From 90566ce8c64d5c82e080ef0660981daa3f433cb7 Mon Sep 17 00:00:00 2001 From: bnb32 Date: Thu, 14 Nov 2024 16:11:59 +0000 Subject: [PATCH] Merge pull request #243 from NREL/bnb/writer_cleanup uv invert for netcdf writes --- .buildinfo | 4 + .nojekyll | 0 _autosummary/sup3r.batch.batch_cli.html | 730 ++ _autosummary/sup3r.batch.html | 738 ++ .../sup3r.bias.base.DataRetrievalBase.html | 1084 +++ _autosummary/sup3r.bias.base.html | 741 ++ ...sup3r.bias.bias_calc.LinearCorrection.html | 1232 +++ ...ias.bias_calc.MonthlyLinearCorrection.html | 1231 +++ ...ias.bias_calc.MonthlyScalarCorrection.html | 1230 +++ ...sup3r.bias.bias_calc.ScalarCorrection.html | 1235 +++ .../sup3r.bias.bias_calc.SkillAssessment.html | 1241 +++ _autosummary/sup3r.bias.bias_calc.html | 755 ++ _autosummary/sup3r.bias.bias_calc_cli.html | 742 ++ ....bias.bias_calc_cli.kickoff_local_job.html | 777 ++ ....bias.bias_calc_cli.kickoff_slurm_job.html | 783 ++ ...as.bias_calc_vortex.BiasCorrectUpdate.html | 850 ++ ...as.bias_calc_vortex.VortexMeanPrepper.html | 1055 +++ _autosummary/sup3r.bias.bias_calc_vortex.html | 745 ++ ...bias.bias_transforms.global_linear_bc.html | 775 ++ _autosummary/sup3r.bias.bias_transforms.html | 756 ++ ....bias.bias_transforms.local_linear_bc.html | 793 ++ ...bias.bias_transforms.local_presrat_bc.html | 818 ++ ...p3r.bias.bias_transforms.local_qdm_bc.html | 857 ++ ...as_transforms.monthly_local_linear_bc.html | 804 ++ _autosummary/sup3r.bias.html | 762 ++ .../sup3r.bias.mixins.FillAndSmoothMixin.html | 818 ++ .../sup3r.bias.mixins.ZeroRateMixin.html | 829 ++ _autosummary/sup3r.bias.mixins.html | 742 ++ _autosummary/sup3r.bias.presrat.PresRat.html | 1508 ++++ _autosummary/sup3r.bias.presrat.html | 741 ++ ...as.qdm.QuantileDeltaMappingCorrection.html | 1317 +++ _autosummary/sup3r.bias.qdm.html | 741 ++ ...r.bias.utilities.bias_correct_feature.html | 780 ++ ....bias.utilities.bias_correct_features.html | 766 ++ _autosummary/sup3r.bias.utilities.html | 748 ++ _autosummary/sup3r.bias.utilities.lin_bc.html | 783 ++ _autosummary/sup3r.bias.utilities.qdm_bc.html | 796 ++ _autosummary/sup3r.cli.html | 730 ++ _autosummary/sup3r.html | 765 ++ ...p3r.models.abstract.AbstractInterface.html | 1122 +++ ...r.models.abstract.AbstractSingleModel.html | 1401 +++ ...up3r.models.abstract.TensorboardMixIn.html | 822 ++ _autosummary/sup3r.models.abstract.html | 745 ++ _autosummary/sup3r.models.base.Sup3rGan.html | 2221 +++++ _autosummary/sup3r.models.base.html | 739 ++ ...sup3r.models.conditional.Sup3rCondMom.html | 1936 ++++ _autosummary/sup3r.models.conditional.html | 739 ++ _autosummary/sup3r.models.dc.Sup3rGanDC.html | 2279 +++++ _autosummary/sup3r.models.dc.html | 739 ++ _autosummary/sup3r.models.html | 762 ++ .../sup3r.models.linear.LinearInterp.html | 1163 +++ _autosummary/sup3r.models.linear.html | 739 ++ .../sup3r.models.multi_step.MultiStepGan.html | 1204 +++ ...els.multi_step.MultiStepSurfaceMetGan.html | 1244 +++ ...r.models.multi_step.SolarMultiStepGan.html | 1374 +++ _autosummary/sup3r.models.multi_step.html | 747 ++ .../sup3r.models.solar_cc.SolarCC.html | 2226 +++++ _autosummary/sup3r.models.solar_cc.html | 739 ++ ...models.surface.SurfaceSpatialMetModel.html | 1534 ++++ _autosummary/sup3r.models.surface.html | 739 ++ ...up3r.models.utilities.TrainingSession.html | 793 ++ ....models.utilities.get_optimizer_class.html | 761 ++ _autosummary/sup3r.models.utilities.html | 751 ++ .../sup3r.models.utilities.st_interp.html | 778 ++ ...p3r.pipeline.forward_pass.ForwardPass.html | 979 ++ _autosummary/sup3r.pipeline.forward_pass.html | 739 ++ .../sup3r.pipeline.forward_pass_cli.html | 742 ++ ...ne.forward_pass_cli.kickoff_local_job.html | 777 ++ ...ne.forward_pass_cli.kickoff_slurm_job.html | 783 ++ _autosummary/sup3r.pipeline.html | 753 ++ _autosummary/sup3r.pipeline.pipeline_cli.html | 730 ++ ...p3r.pipeline.slicer.ForwardPassSlicer.html | 1268 +++ _autosummary/sup3r.pipeline.slicer.html | 739 ++ ...3r.pipeline.strategy.ForwardPassChunk.html | 805 ++ ...pipeline.strategy.ForwardPassStrategy.html | 1184 +++ _autosummary/sup3r.pipeline.strategy.html | 743 ++ ...r.pipeline.utilities.get_chunk_slices.html | 773 ++ .../sup3r.pipeline.utilities.get_model.html | 761 ++ _autosummary/sup3r.pipeline.utilities.html | 742 ++ ...cessing.collectors.base.BaseCollector.html | 892 ++ .../sup3r.postprocessing.collectors.base.html | 739 ++ ...tprocessing.collectors.h5.CollectorH5.html | 1185 +++ .../sup3r.postprocessing.collectors.h5.html | 739 ++ .../sup3r.postprocessing.collectors.html | 744 ++ ...tprocessing.collectors.nc.CollectorNC.html | 927 ++ .../sup3r.postprocessing.collectors.nc.html | 740 ++ ...sup3r.postprocessing.data_collect_cli.html | 742 ++ ...ng.data_collect_cli.kickoff_local_job.html | 777 ++ ...ng.data_collect_cli.kickoff_slurm_job.html | 783 ++ _autosummary/sup3r.postprocessing.html | 744 ++ ...processing.writers.base.OutputHandler.html | 1066 +++ ...stprocessing.writers.base.OutputMixin.html | 837 ++ ...ostprocessing.writers.base.RexOutputs.html | 1663 ++++ .../sup3r.postprocessing.writers.base.html | 746 ++ ...processing.writers.h5.OutputHandlerH5.html | 1065 +++ .../sup3r.postprocessing.writers.h5.html | 740 ++ .../sup3r.postprocessing.writers.html | 744 ++ ...processing.writers.nc.OutputHandlerNC.html | 1065 +++ .../sup3r.postprocessing.writers.nc.html | 739 ++ .../sup3r.preprocessing.accessor.Sup3rX.html | 1246 +++ .../sup3r.preprocessing.accessor.html | 740 ++ .../sup3r.preprocessing.base.Container.html | 858 ++ ...sup3r.preprocessing.base.Sup3rDataset.html | 975 ++ .../sup3r.preprocessing.base.Sup3rMeta.html | 808 ++ _autosummary/sup3r.preprocessing.base.html | 751 ++ ...sing.batch_handlers.dc.BatchHandlerDC.html | 1328 +++ ...sup3r.preprocessing.batch_handlers.dc.html | 743 ++ ...g.batch_handlers.factory.BatchHandler.html | 1311 +++ ...batch_handlers.factory.BatchHandlerCC.html | 1286 +++ ..._handlers.factory.BatchHandlerFactory.html | 780 ++ ...tch_handlers.factory.BatchHandlerMom1.html | 1448 +++ ...h_handlers.factory.BatchHandlerMom1SF.html | 1456 +++ ...tch_handlers.factory.BatchHandlerMom2.html | 1455 +++ ...h_handlers.factory.BatchHandlerMom2SF.html | 761 ++ ..._handlers.factory.BatchHandlerMom2Sep.html | 1455 +++ ...andlers.factory.BatchHandlerMom2SepSF.html | 1456 +++ ...tch_handlers.factory.DualBatchHandler.html | 1286 +++ ....preprocessing.batch_handlers.factory.html | 773 ++ .../sup3r.preprocessing.batch_handlers.html | 741 ++ ...ch_queues.abstract.AbstractBatchQueue.html | 1203 +++ ...r.preprocessing.batch_queues.abstract.html | 748 ++ ...ng.batch_queues.base.SingleBatchQueue.html | 1234 +++ ...sup3r.preprocessing.batch_queues.base.html | 740 ++ ...ues.conditional.ConditionalBatchQueue.html | 1386 +++ ...ng.batch_queues.conditional.QueueMom1.html | 1371 +++ ....batch_queues.conditional.QueueMom1SF.html | 1380 +++ ...ng.batch_queues.conditional.QueueMom2.html | 1378 +++ ....batch_queues.conditional.QueueMom2SF.html | 1380 +++ ...batch_queues.conditional.QueueMom2Sep.html | 1379 +++ ...tch_queues.conditional.QueueMom2SepSF.html | 1380 +++ ...reprocessing.batch_queues.conditional.html | 757 ++ ...ocessing.batch_queues.dc.BatchQueueDC.html | 1265 +++ ...ssing.batch_queues.dc.ValBatchQueueDC.html | 1269 +++ .../sup3r.preprocessing.batch_queues.dc.html | 743 ++ ...sing.batch_queues.dual.DualBatchQueue.html | 1200 +++ ...sup3r.preprocessing.batch_queues.dual.html | 740 ++ .../sup3r.preprocessing.batch_queues.html | 753 ++ ....preprocessing.batch_queues.utilities.html | 745 ++ ...ng.batch_queues.utilities.smooth_data.html | 783 ++ ...es.utilities.spatial_simple_enhancing.html | 779 ++ ...s.utilities.temporal_simple_enhancing.html | 774 ++ ...p3r.preprocessing.cachers.base.Cacher.html | 1039 +++ .../sup3r.preprocessing.cachers.base.html | 739 ++ _autosummary/sup3r.preprocessing.cachers.html | 741 ++ ...sup3r.preprocessing.cachers.utilities.html | 730 ++ ...rocessing.collections.base.Collection.html | 895 ++ .../sup3r.preprocessing.collections.base.html | 744 ++ .../sup3r.preprocessing.collections.html | 741 ++ ...ing.collections.stats.StatsCollection.html | 937 ++ ...sup3r.preprocessing.collections.stats.html | 739 ++ ...eprocessing.data_handlers.exo.ExoData.html | 1033 +++ ...sing.data_handlers.exo.ExoDataHandler.html | 926 ++ ...g.data_handlers.exo.SingleExoDataStep.html | 927 ++ ...sup3r.preprocessing.data_handlers.exo.html | 747 ++ ...ata_handlers.factory.DailyDataHandler.html | 1033 +++ ...ing.data_handlers.factory.DataHandler.html | 1028 +++ ...a_handlers.factory.DataHandlerFactory.html | 774 ++ ...handlers.factory.DataHandlerH5SolarCC.html | 1026 +++ ..._handlers.factory.DataHandlerH5WindCC.html | 1026 +++ ...r.preprocessing.data_handlers.factory.html | 764 ++ .../sup3r.preprocessing.data_handlers.html | 744 ++ ...ata_handlers.nc_cc.DataHandlerNCforCC.html | 1086 +++ ....nc_cc.DataHandlerNCforCCwithPowerLaw.html | 1085 +++ ...p3r.preprocessing.data_handlers.nc_cc.html | 742 ++ ...eprocessing.derivers.base.BaseDeriver.html | 986 ++ ...r.preprocessing.derivers.base.Deriver.html | 989 ++ .../sup3r.preprocessing.derivers.base.html | 743 ++ .../sup3r.preprocessing.derivers.html | 745 ++ ...essing.derivers.methods.ClearSkyRatio.html | 799 ++ ...sing.derivers.methods.ClearSkyRatioCC.html | 803 ++ ...processing.derivers.methods.CloudMask.html | 799 ++ ...ssing.derivers.methods.DerivedFeature.html | 811 ++ ...ocessing.derivers.methods.PressureWRF.html | 793 ++ ...processing.derivers.methods.SurfaceRH.html | 796 ++ ...3r.preprocessing.derivers.methods.Sza.html | 792 ++ ...3r.preprocessing.derivers.methods.Tas.html | 792 ++ ...preprocessing.derivers.methods.TasMax.html | 793 ++ ...preprocessing.derivers.methods.TasMin.html | 793 ++ ...ocessing.derivers.methods.TempNCforCC.html | 792 ++ ...preprocessing.derivers.methods.USolar.html | 793 ++ ....preprocessing.derivers.methods.UWind.html | 793 ++ ...essing.derivers.methods.UWindPowerLaw.html | 814 ++ ...preprocessing.derivers.methods.VSolar.html | 793 ++ ....preprocessing.derivers.methods.VWind.html | 793 ++ ...essing.derivers.methods.VWindPowerLaw.html | 801 ++ ...essing.derivers.methods.Winddirection.html | 792 ++ ...processing.derivers.methods.Windspeed.html | 792 ++ .../sup3r.preprocessing.derivers.methods.html | 793 ++ ...essing.derivers.utilities.SolarZenith.html | 799 ++ ...up3r.preprocessing.derivers.utilities.html | 757 ++ ...ocessing.derivers.utilities.invert_uv.html | 784 ++ ...sing.derivers.utilities.parse_feature.html | 762 ++ ...ivers.utilities.transform_rotate_wind.html | 784 ++ ....derivers.utilities.windspeed_log_law.html | 776 ++ _autosummary/sup3r.preprocessing.html | 793 ++ .../sup3r.preprocessing.loaders.Loader.html | 798 ++ ...preprocessing.loaders.base.BaseLoader.html | 885 ++ .../sup3r.preprocessing.loaders.base.html | 740 ++ ...p3r.preprocessing.loaders.h5.LoaderH5.html | 894 ++ .../sup3r.preprocessing.loaders.h5.html | 742 ++ _autosummary/sup3r.preprocessing.loaders.html | 757 ++ ...p3r.preprocessing.loaders.nc.LoaderNC.html | 908 ++ .../sup3r.preprocessing.loaders.nc.html | 741 ++ ...sup3r.preprocessing.loaders.utilities.html | 742 ++ ...g.loaders.utilities.standardize_names.html | 762 ++ ....loaders.utilities.standardize_values.html | 766 ++ .../sup3r.preprocessing.names.Dimension.html | 1621 ++++ _autosummary/sup3r.preprocessing.names.html | 740 ++ ...ssing.rasterizers.base.BaseRasterizer.html | 991 ++ .../sup3r.preprocessing.rasterizers.base.html | 741 ++ ...ssing.rasterizers.dual.DualRasterizer.html | 919 ++ .../sup3r.preprocessing.rasterizers.dual.html | 740 ++ ...ing.rasterizers.exo.BaseExoRasterizer.html | 1038 +++ ...cessing.rasterizers.exo.ExoRasterizer.html | 1043 +++ ...ssing.rasterizers.exo.ExoRasterizerH5.html | 989 ++ ...ssing.rasterizers.exo.ExoRasterizerNC.html | 989 ++ ...cessing.rasterizers.exo.SzaRasterizer.html | 989 ++ .../sup3r.preprocessing.rasterizers.exo.html | 753 ++ ...ssing.rasterizers.extended.Rasterizer.html | 1021 +++ ...3r.preprocessing.rasterizers.extended.html | 739 ++ .../sup3r.preprocessing.rasterizers.html | 753 ++ ...r.preprocessing.samplers.base.Sampler.html | 998 +++ .../sup3r.preprocessing.samplers.base.html | 741 ++ ...eprocessing.samplers.cc.DualSamplerCC.html | 1068 +++ .../sup3r.preprocessing.samplers.cc.html | 739 ++ ...r.preprocessing.samplers.dc.SamplerDC.html | 995 +++ .../sup3r.preprocessing.samplers.dc.html | 740 ++ ...eprocessing.samplers.dual.DualSampler.html | 1004 +++ .../sup3r.preprocessing.samplers.dual.html | 741 ++ .../sup3r.preprocessing.samplers.html | 753 ++ ...samplers.utilities.daily_time_sampler.html | 775 ++ ...up3r.preprocessing.samplers.utilities.html | 757 ++ ...ers.utilities.nsrdb_reduce_daily_data.html | 778 ++ ...ers.utilities.nsrdb_sub_daily_sampler.html | 777 ++ ...amplers.utilities.uniform_box_sampler.html | 772 ++ ...mplers.utilities.uniform_time_sampler.html | 774 ++ ...mplers.utilities.weighted_box_sampler.html | 777 ++ ...plers.utilities.weighted_time_sampler.html | 778 ++ ...processing.utilities.check_signatures.html | 761 ++ ...reprocessing.utilities.composite_info.html | 761 ++ ...preprocessing.utilities.composite_sig.html | 761 ++ ...eprocessing.utilities.compute_if_dask.html | 762 ++ ...rocessing.utilities.contains_ellipsis.html | 762 ++ ...processing.utilities.dims_array_tuple.html | 763 ++ ....preprocessing.utilities.expand_paths.html | 780 ++ ...processing.utilities.get_class_kwargs.html | 761 ++ ...ssing.utilities.get_date_range_kwargs.html | 772 ++ ...ing.utilities.get_input_handler_class.html | 774 ++ ...reprocessing.utilities.get_obj_params.html | 761 ++ ...eprocessing.utilities.get_source_type.html | 769 ++ .../sup3r.preprocessing.utilities.html | 808 ++ ...3r.preprocessing.utilities.is_type_of.html | 761 ++ ...up3r.preprocessing.utilities.log_args.html | 762 ++ ...r.preprocessing.utilities.lower_names.html | 761 ++ ...sup3r.preprocessing.utilities.lowered.html | 762 ++ ...ng.utilities.make_time_index_from_kws.html | 771 ++ ...eprocessing.utilities.numpy_if_tensor.html | 761 ++ ...preprocessing.utilities.ordered_array.html | 767 ++ ....preprocessing.utilities.ordered_dims.html | 764 ++ ...reprocessing.utilities.parse_ellipsis.html | 769 ++ ...reprocessing.utilities.parse_features.html | 775 ++ ...3r.preprocessing.utilities.parse_keys.html | 764 ++ ...preprocessing.utilities.parse_to_list.html | 761 ++ _autosummary/sup3r.qa.html | 744 ++ _autosummary/sup3r.qa.qa.Sup3rQa.html | 1017 +++ _autosummary/sup3r.qa.qa.html | 742 ++ _autosummary/sup3r.qa.qa_cli.html | 730 ++ .../sup3r.qa.utilities.continuous_dist.html | 782 ++ .../sup3r.qa.utilities.direct_dist.html | 794 ++ ...sup3r.qa.utilities.frequency_spectrum.html | 781 ++ .../sup3r.qa.utilities.gradient_dist.html | 794 ++ _autosummary/sup3r.qa.utilities.html | 760 ++ ...p3r.qa.utilities.time_derivative_dist.html | 796 ++ ...r.qa.utilities.tke_frequency_spectrum.html | 780 ++ ....qa.utilities.tke_wavenumber_spectrum.html | 786 ++ ...up3r.qa.utilities.wavenumber_spectrum.html | 783 ++ _autosummary/sup3r.solar.html | 742 ++ _autosummary/sup3r.solar.solar.Solar.html | 1169 +++ _autosummary/sup3r.solar.solar.html | 742 ++ _autosummary/sup3r.solar.solar_cli.html | 742 ++ ...p3r.solar.solar_cli.kickoff_local_job.html | 777 ++ ...p3r.solar.solar_cli.kickoff_slurm_job.html | 783 ++ _autosummary/sup3r.utilities.ModuleName.html | 1500 ++++ _autosummary/sup3r.utilities.cli.BaseCLI.html | 922 ++ .../sup3r.utilities.cli.SlurmManager.html | 1199 +++ _autosummary/sup3r.utilities.cli.html | 742 ++ ...tilities.era_downloader.EraDownloader.html | 1226 +++ .../sup3r.utilities.era_downloader.html | 745 ++ _autosummary/sup3r.utilities.html | 762 ++ ....utilities.interpolation.Interpolator.html | 843 ++ .../sup3r.utilities.interpolation.html | 739 ++ ....utilities.loss_metrics.CoarseMseLoss.html | 869 ++ .../sup3r.utilities.loss_metrics.ExpLoss.html | 860 ++ ...p3r.utilities.loss_metrics.LowResLoss.html | 876 ++ ...s.loss_metrics.MaterialDerivativeLoss.html | 876 ++ .../sup3r.utilities.loss_metrics.MmdLoss.html | 862 ++ ...p3r.utilities.loss_metrics.MmdMseLoss.html | 874 ++ ...p3r.utilities.loss_metrics.MseExpLoss.html | 869 ++ ...ties.loss_metrics.SpatialExtremesLoss.html | 863 ++ ....loss_metrics.SpatialExtremesOnlyLoss.html | 870 ++ ...ities.loss_metrics.SpatialFftOnlyLoss.html | 869 ++ ...ss_metrics.SpatiotemporalExtremesLoss.html | 868 ++ ...oss_metrics.SpatiotemporalFftOnlyLoss.html | 870 ++ ...lities.loss_metrics.StExtremesFftLoss.html | 855 ++ ...ies.loss_metrics.TemporalExtremesLoss.html | 863 ++ ...loss_metrics.TemporalExtremesOnlyLoss.html | 870 ++ ...tilities.loss_metrics.gaussian_kernel.html | 777 ++ .../sup3r.utilities.loss_metrics.html | 790 ++ ...s.pytest.helpers.BatchHandlerTesterCC.html | 1272 +++ ...s.pytest.helpers.BatchHandlerTesterDC.html | 1331 +++ ...est.helpers.BatchHandlerTesterFactory.html | 762 ++ ...es.pytest.helpers.DualSamplerTesterCC.html | 761 ++ ...3r.utilities.pytest.helpers.DummyData.html | 856 ++ ...utilities.pytest.helpers.DummySampler.html | 997 +++ ...tilities.pytest.helpers.SamplerTester.html | 988 ++ ...lities.pytest.helpers.SamplerTesterDC.html | 761 ++ .../sup3r.utilities.pytest.helpers.html | 787 ++ ...es.pytest.helpers.make_collect_chunks.html | 781 ++ ...test.helpers.make_fake_cs_ratio_files.html | 781 ++ ...ilities.pytest.helpers.make_fake_dset.html | 761 ++ ...es.pytest.helpers.make_fake_h5_chunks.html | 786 ++ ...ties.pytest.helpers.make_fake_nc_file.html | 761 ++ ...tilities.pytest.helpers.make_fake_tif.html | 761 ++ ...s.pytest.helpers.test_sampler_factory.html | 761 ++ _autosummary/sup3r.utilities.pytest.html | 738 ++ .../sup3r.utilities.utilities.Timer.html | 843 ++ ...ties.utilities.generate_random_string.html | 762 ++ _autosummary/sup3r.utilities.utilities.html | 772 ++ ...p3r.utilities.utilities.nn_fill_array.html | 769 ++ ...p3r.utilities.utilities.pd_date_range.html | 762 ++ ...ilities.utilities.preprocess_datasets.html | 762 ++ .../sup3r.utilities.utilities.safe_cast.html | 761 ++ ...3r.utilities.utilities.safe_serialize.html | 761 ++ ...tilities.utilities.spatial_coarsening.html | 780 ++ ...ilities.utilities.temporal_coarsening.html | 776 ++ ...utilities.utilities.xr_open_mfdataset.html | 761 ++ _cli/cli.html | 743 ++ _cli/sup3r.html | 1169 +++ _modules/index.html | 736 ++ _modules/sup3r/bias/base.html | 1466 +++ _modules/sup3r/bias/bias_calc.html | 1289 +++ _modules/sup3r/bias/bias_calc_cli.html | 790 ++ _modules/sup3r/bias/bias_calc_vortex.html | 1248 +++ _modules/sup3r/bias/bias_transforms.html | 1789 ++++ _modules/sup3r/bias/mixins.html | 833 ++ _modules/sup3r/bias/presrat.html | 1238 +++ _modules/sup3r/bias/qdm.html | 1339 +++ _modules/sup3r/bias/utilities.html | 967 ++ _modules/sup3r/models/abstract.html | 2383 +++++ _modules/sup3r/models/base.html | 1760 ++++ _modules/sup3r/models/conditional.html | 1175 +++ _modules/sup3r/models/dc.html | 814 ++ _modules/sup3r/models/linear.html | 843 ++ _modules/sup3r/models/multi_step.html | 1577 ++++ _modules/sup3r/models/solar_cc.html | 948 ++ _modules/sup3r/models/surface.html | 1500 ++++ _modules/sup3r/models/utilities.html | 794 ++ _modules/sup3r/pipeline/forward_pass.html | 1334 +++ _modules/sup3r/pipeline/forward_pass_cli.html | 828 ++ _modules/sup3r/pipeline/slicer.html | 1212 +++ _modules/sup3r/pipeline/strategy.html | 1370 +++ _modules/sup3r/pipeline/utilities.html | 725 ++ .../sup3r/postprocessing/collectors/base.html | 770 ++ .../sup3r/postprocessing/collectors/h5.html | 1507 ++++ .../sup3r/postprocessing/collectors/nc.html | 768 ++ .../postprocessing/data_collect_cli.html | 797 ++ .../sup3r/postprocessing/writers/base.html | 1395 +++ _modules/sup3r/postprocessing/writers/h5.html | 758 ++ _modules/sup3r/postprocessing/writers/nc.html | 759 ++ _modules/sup3r/preprocessing/accessor.html | 1338 +++ _modules/sup3r/preprocessing/base.html | 1109 +++ .../preprocessing/batch_handlers/dc.html | 729 ++ .../preprocessing/batch_handlers/factory.html | 1001 +++ .../preprocessing/batch_queues/abstract.html | 1040 +++ .../preprocessing/batch_queues/base.html | 753 ++ .../batch_queues/conditional.html | 1002 +++ .../sup3r/preprocessing/batch_queues/dc.html | 799 ++ .../preprocessing/batch_queues/dual.html | 743 ++ .../preprocessing/batch_queues/utilities.html | 842 ++ .../sup3r/preprocessing/cachers/base.html | 1189 +++ .../sup3r/preprocessing/collections/base.html | 746 ++ .../preprocessing/collections/stats.html | 844 ++ .../preprocessing/data_handlers/exo.html | 1196 +++ .../preprocessing/data_handlers/factory.html | 1050 +++ .../preprocessing/data_handlers/nc_cc.html | 912 ++ .../sup3r/preprocessing/derivers/base.html | 1144 +++ .../sup3r/preprocessing/derivers/methods.html | 1213 +++ .../preprocessing/derivers/utilities.html | 941 ++ _modules/sup3r/preprocessing/loaders.html | 694 ++ .../sup3r/preprocessing/loaders/base.html | 818 ++ _modules/sup3r/preprocessing/loaders/h5.html | 891 ++ _modules/sup3r/preprocessing/loaders/nc.html | 820 ++ .../preprocessing/loaders/utilities.html | 704 ++ _modules/sup3r/preprocessing/names.html | 892 ++ .../sup3r/preprocessing/rasterizers/base.html | 908 ++ .../sup3r/preprocessing/rasterizers/dual.html | 912 ++ .../sup3r/preprocessing/rasterizers/exo.html | 1105 +++ .../preprocessing/rasterizers/extended.html | 874 ++ .../sup3r/preprocessing/samplers/base.html | 1027 +++ _modules/sup3r/preprocessing/samplers/cc.html | 880 ++ _modules/sup3r/preprocessing/samplers/dc.html | 783 ++ .../sup3r/preprocessing/samplers/dual.html | 811 ++ .../preprocessing/samplers/utilities.html | 978 ++ _modules/sup3r/preprocessing/utilities.html | 1299 +++ _modules/sup3r/qa/qa.html | 1171 +++ _modules/sup3r/qa/utilities.html | 1071 +++ _modules/sup3r/solar/solar.html | 1411 +++ _modules/sup3r/solar/solar_cli.html | 832 ++ _modules/sup3r/utilities.html | 738 ++ _modules/sup3r/utilities/cli.html | 1058 +++ _modules/sup3r/utilities/era_downloader.html | 1613 ++++ _modules/sup3r/utilities/interpolation.html | 882 ++ _modules/sup3r/utilities/loss_metrics.html | 1567 ++++ _modules/sup3r/utilities/pytest/helpers.html | 1191 +++ _modules/sup3r/utilities/utilities.html | 1071 +++ .../_autosummary/sup3r.batch.batch_cli.rst | 23 + _sources/_autosummary/sup3r.batch.rst | 30 + .../sup3r.bias.base.DataRetrievalBase.rst | 38 + _sources/_autosummary/sup3r.bias.base.rst | 31 + .../sup3r.bias.bias_calc.LinearCorrection.rst | 43 + ...bias.bias_calc.MonthlyLinearCorrection.rst | 43 + ...bias.bias_calc.MonthlyScalarCorrection.rst | 43 + .../sup3r.bias.bias_calc.ScalarCorrection.rst | 43 + .../sup3r.bias.bias_calc.SkillAssessment.rst | 44 + .../_autosummary/sup3r.bias.bias_calc.rst | 35 + ...r.bias.bias_calc_cli.kickoff_local_job.rst | 6 + ...r.bias.bias_calc_cli.kickoff_slurm_job.rst | 6 + .../_autosummary/sup3r.bias.bias_calc_cli.rst | 31 + ...ias.bias_calc_vortex.BiasCorrectUpdate.rst | 26 + ...ias.bias_calc_vortex.VortexMeanPrepper.rst | 48 + .../sup3r.bias.bias_calc_vortex.rst | 32 + ....bias.bias_transforms.global_linear_bc.rst | 6 + ...r.bias.bias_transforms.local_linear_bc.rst | 6 + ....bias.bias_transforms.local_presrat_bc.rst | 6 + ...up3r.bias.bias_transforms.local_qdm_bc.rst | 6 + ...ias_transforms.monthly_local_linear_bc.rst | 6 + .../sup3r.bias.bias_transforms.rst | 34 + .../sup3r.bias.mixins.FillAndSmoothMixin.rst | 24 + .../sup3r.bias.mixins.ZeroRateMixin.rst | 24 + _sources/_autosummary/sup3r.bias.mixins.rst | 32 + .../sup3r.bias.presrat.PresRat.rst | 46 + _sources/_autosummary/sup3r.bias.presrat.rst | 31 + ...ias.qdm.QuantileDeltaMappingCorrection.rst | 43 + _sources/_autosummary/sup3r.bias.qdm.rst | 31 + _sources/_autosummary/sup3r.bias.rst | 38 + ...3r.bias.utilities.bias_correct_feature.rst | 6 + ...r.bias.utilities.bias_correct_features.rst | 6 + .../sup3r.bias.utilities.lin_bc.rst | 6 + .../sup3r.bias.utilities.qdm_bc.rst | 6 + .../_autosummary/sup3r.bias.utilities.rst | 33 + _sources/_autosummary/sup3r.cli.rst | 23 + ...up3r.models.abstract.AbstractInterface.rst | 52 + ...3r.models.abstract.AbstractSingleModel.rst | 55 + ...sup3r.models.abstract.TensorboardMixIn.rst | 31 + .../_autosummary/sup3r.models.abstract.rst | 33 + .../sup3r.models.base.Sup3rGan.rst | 95 + _sources/_autosummary/sup3r.models.base.rst | 31 + .../sup3r.models.conditional.Sup3rCondMom.rst | 85 + .../_autosummary/sup3r.models.conditional.rst | 31 + .../sup3r.models.dc.Sup3rGanDC.rst | 97 + _sources/_autosummary/sup3r.models.dc.rst | 31 + .../sup3r.models.linear.LinearInterp.rst | 53 + _sources/_autosummary/sup3r.models.linear.rst | 31 + .../sup3r.models.multi_step.MultiStepGan.rst | 55 + ...dels.multi_step.MultiStepSurfaceMetGan.rst | 55 + ...3r.models.multi_step.SolarMultiStepGan.rst | 63 + .../_autosummary/sup3r.models.multi_step.rst | 33 + _sources/_autosummary/sup3r.models.rst | 38 + .../sup3r.models.solar_cc.SolarCC.rst | 99 + .../_autosummary/sup3r.models.solar_cc.rst | 31 + ....models.surface.SurfaceSpatialMetModel.rst | 68 + .../_autosummary/sup3r.models.surface.rst | 31 + ...sup3r.models.utilities.TrainingSession.rst | 24 + ...r.models.utilities.get_optimizer_class.rst | 6 + .../_autosummary/sup3r.models.utilities.rst | 39 + .../sup3r.models.utilities.st_interp.rst | 6 + ...up3r.pipeline.forward_pass.ForwardPass.rst | 36 + .../sup3r.pipeline.forward_pass.rst | 31 + ...ine.forward_pass_cli.kickoff_local_job.rst | 6 + ...ine.forward_pass_cli.kickoff_slurm_job.rst | 6 + .../sup3r.pipeline.forward_pass_cli.rst | 31 + .../sup3r.pipeline.pipeline_cli.rst | 23 + _sources/_autosummary/sup3r.pipeline.rst | 35 + ...up3r.pipeline.slicer.ForwardPassSlicer.rst | 62 + .../_autosummary/sup3r.pipeline.slicer.rst | 31 + ...p3r.pipeline.strategy.ForwardPassChunk.rst | 38 + ....pipeline.strategy.ForwardPassStrategy.rst | 65 + .../_autosummary/sup3r.pipeline.strategy.rst | 32 + ...3r.pipeline.utilities.get_chunk_slices.rst | 6 + .../sup3r.pipeline.utilities.get_model.rst | 6 + .../_autosummary/sup3r.pipeline.utilities.rst | 31 + ...ocessing.collectors.base.BaseCollector.rst | 29 + .../sup3r.postprocessing.collectors.base.rst | 31 + ...stprocessing.collectors.h5.CollectorH5.rst | 37 + .../sup3r.postprocessing.collectors.h5.rst | 31 + ...stprocessing.collectors.nc.CollectorNC.rst | 30 + .../sup3r.postprocessing.collectors.nc.rst | 31 + .../sup3r.postprocessing.collectors.rst | 32 + ...ing.data_collect_cli.kickoff_local_job.rst | 6 + ...ing.data_collect_cli.kickoff_slurm_job.rst | 6 + .../sup3r.postprocessing.data_collect_cli.rst | 31 + .../_autosummary/sup3r.postprocessing.rst | 32 + ...tprocessing.writers.base.OutputHandler.rst | 35 + ...ostprocessing.writers.base.OutputMixin.rst | 26 + ...postprocessing.writers.base.RexOutputs.rst | 77 + .../sup3r.postprocessing.writers.base.rst | 33 + ...tprocessing.writers.h5.OutputHandlerH5.rst | 35 + .../sup3r.postprocessing.writers.h5.rst | 31 + ...tprocessing.writers.nc.OutputHandlerNC.rst | 35 + .../sup3r.postprocessing.writers.nc.rst | 31 + .../sup3r.postprocessing.writers.rst | 32 + .../sup3r.preprocessing.accessor.Sup3rX.rst | 60 + .../sup3r.preprocessing.accessor.rst | 31 + .../sup3r.preprocessing.base.Container.rst | 32 + .../sup3r.preprocessing.base.Sup3rDataset.rst | 40 + .../sup3r.preprocessing.base.Sup3rMeta.rst | 25 + .../_autosummary/sup3r.preprocessing.base.rst | 33 + ...ssing.batch_handlers.dc.BatchHandlerDC.rst | 59 + .../sup3r.preprocessing.batch_handlers.dc.rst | 31 + ...ng.batch_handlers.factory.BatchHandler.rst | 56 + ....batch_handlers.factory.BatchHandlerCC.rst | 56 + ...h_handlers.factory.BatchHandlerFactory.rst | 6 + ...atch_handlers.factory.BatchHandlerMom1.rst | 58 + ...ch_handlers.factory.BatchHandlerMom1SF.rst | 58 + ...atch_handlers.factory.BatchHandlerMom2.rst | 58 + ...ch_handlers.factory.BatchHandlerMom2SF.rst | 58 + ...h_handlers.factory.BatchHandlerMom2Sep.rst | 58 + ...handlers.factory.BatchHandlerMom2SepSF.rst | 58 + ...atch_handlers.factory.DualBatchHandler.rst | 56 + ...r.preprocessing.batch_handlers.factory.rst | 46 + .../sup3r.preprocessing.batch_handlers.rst | 31 + ...tch_queues.abstract.AbstractBatchQueue.rst | 55 + ...3r.preprocessing.batch_queues.abstract.rst | 31 + ...ing.batch_queues.base.SingleBatchQueue.rst | 55 + .../sup3r.preprocessing.batch_queues.base.rst | 31 + ...eues.conditional.ConditionalBatchQueue.rst | 57 + ...ing.batch_queues.conditional.QueueMom1.rst | 57 + ...g.batch_queues.conditional.QueueMom1SF.rst | 57 + ...ing.batch_queues.conditional.QueueMom2.rst | 57 + ...g.batch_queues.conditional.QueueMom2SF.rst | 57 + ....batch_queues.conditional.QueueMom2Sep.rst | 57 + ...atch_queues.conditional.QueueMom2SepSF.rst | 57 + ...preprocessing.batch_queues.conditional.rst | 37 + ...rocessing.batch_queues.dc.BatchQueueDC.rst | 58 + ...essing.batch_queues.dc.ValBatchQueueDC.rst | 58 + .../sup3r.preprocessing.batch_queues.dc.rst | 32 + ...ssing.batch_queues.dual.DualBatchQueue.rst | 55 + .../sup3r.preprocessing.batch_queues.dual.rst | 31 + .../sup3r.preprocessing.batch_queues.rst | 35 + ...r.preprocessing.batch_queues.utilities.rst | 32 + ...ing.batch_queues.utilities.smooth_data.rst | 6 + ...ues.utilities.spatial_simple_enhancing.rst | 6 + ...es.utilities.temporal_simple_enhancing.rst | 6 + ...up3r.preprocessing.cachers.base.Cacher.rst | 41 + .../sup3r.preprocessing.cachers.base.rst | 31 + .../sup3r.preprocessing.cachers.rst | 31 + .../sup3r.preprocessing.cachers.utilities.rst | 23 + ...processing.collections.base.Collection.rst | 35 + .../sup3r.preprocessing.collections.base.rst | 31 + .../sup3r.preprocessing.collections.rst | 31 + ...sing.collections.stats.StatsCollection.rst | 39 + .../sup3r.preprocessing.collections.stats.rst | 31 + ...reprocessing.data_handlers.exo.ExoData.rst | 38 + ...ssing.data_handlers.exo.ExoDataHandler.rst | 43 + ...ng.data_handlers.exo.SingleExoDataStep.rst | 40 + .../sup3r.preprocessing.data_handlers.exo.rst | 33 + ...data_handlers.factory.DailyDataHandler.rst | 42 + ...sing.data_handlers.factory.DataHandler.rst | 42 + ...ta_handlers.factory.DataHandlerFactory.rst | 6 + ..._handlers.factory.DataHandlerH5SolarCC.rst | 42 + ...a_handlers.factory.DataHandlerH5WindCC.rst | 42 + ...3r.preprocessing.data_handlers.factory.rst | 41 + ...data_handlers.nc_cc.DataHandlerNCforCC.rst | 47 + ...s.nc_cc.DataHandlerNCforCCwithPowerLaw.rst | 47 + ...up3r.preprocessing.data_handlers.nc_cc.rst | 32 + .../sup3r.preprocessing.data_handlers.rst | 32 + ...reprocessing.derivers.base.BaseDeriver.rst | 42 + ...3r.preprocessing.derivers.base.Deriver.rst | 42 + .../sup3r.preprocessing.derivers.base.rst | 32 + ...cessing.derivers.methods.ClearSkyRatio.rst | 30 + ...ssing.derivers.methods.ClearSkyRatioCC.rst | 30 + ...eprocessing.derivers.methods.CloudMask.rst | 30 + ...essing.derivers.methods.DerivedFeature.rst | 30 + ...rocessing.derivers.methods.PressureWRF.rst | 30 + ...eprocessing.derivers.methods.SurfaceRH.rst | 30 + ...p3r.preprocessing.derivers.methods.Sza.rst | 30 + ...p3r.preprocessing.derivers.methods.Tas.rst | 30 + ....preprocessing.derivers.methods.TasMax.rst | 30 + ....preprocessing.derivers.methods.TasMin.rst | 30 + ...rocessing.derivers.methods.TempNCforCC.rst | 30 + ....preprocessing.derivers.methods.USolar.rst | 30 + ...r.preprocessing.derivers.methods.UWind.rst | 30 + ...cessing.derivers.methods.UWindPowerLaw.rst | 32 + ....preprocessing.derivers.methods.VSolar.rst | 30 + ...r.preprocessing.derivers.methods.VWind.rst | 30 + ...cessing.derivers.methods.VWindPowerLaw.rst | 32 + ...cessing.derivers.methods.Winddirection.rst | 30 + ...eprocessing.derivers.methods.Windspeed.rst | 30 + .../sup3r.preprocessing.derivers.methods.rst | 49 + .../sup3r.preprocessing.derivers.rst | 32 + ...cessing.derivers.utilities.SolarZenith.rst | 24 + ...rocessing.derivers.utilities.invert_uv.rst | 6 + ...ssing.derivers.utilities.parse_feature.rst | 6 + ...sup3r.preprocessing.derivers.utilities.rst | 41 + ...rivers.utilities.transform_rotate_wind.rst | 6 + ...g.derivers.utilities.windspeed_log_law.rst | 6 + .../sup3r.preprocessing.loaders.Loader.rst | 29 + ....preprocessing.loaders.base.BaseLoader.rst | 34 + .../sup3r.preprocessing.loaders.base.rst | 31 + ...up3r.preprocessing.loaders.h5.LoaderH5.rst | 34 + .../sup3r.preprocessing.loaders.h5.rst | 31 + ...up3r.preprocessing.loaders.nc.LoaderNC.rst | 36 + .../sup3r.preprocessing.loaders.nc.rst | 31 + .../sup3r.preprocessing.loaders.rst | 41 + .../sup3r.preprocessing.loaders.utilities.rst | 31 + ...ng.loaders.utilities.standardize_names.rst | 6 + ...g.loaders.utilities.standardize_values.rst | 6 + .../sup3r.preprocessing.names.Dimension.rst | 98 + .../sup3r.preprocessing.names.rst | 31 + ...essing.rasterizers.base.BaseRasterizer.rst | 41 + .../sup3r.preprocessing.rasterizers.base.rst | 31 + ...essing.rasterizers.dual.DualRasterizer.rst | 36 + .../sup3r.preprocessing.rasterizers.dual.rst | 31 + ...sing.rasterizers.exo.BaseExoRasterizer.rst | 54 + ...ocessing.rasterizers.exo.ExoRasterizer.rst | 55 + ...essing.rasterizers.exo.ExoRasterizerH5.rst | 54 + ...essing.rasterizers.exo.ExoRasterizerNC.rst | 54 + ...ocessing.rasterizers.exo.SzaRasterizer.rst | 54 + .../sup3r.preprocessing.rasterizers.exo.rst | 35 + ...essing.rasterizers.extended.Rasterizer.rst | 42 + ...p3r.preprocessing.rasterizers.extended.rst | 31 + .../sup3r.preprocessing.rasterizers.rst | 33 + _sources/_autosummary/sup3r.preprocessing.rst | 42 + ...3r.preprocessing.samplers.base.Sampler.rst | 41 + .../sup3r.preprocessing.samplers.base.rst | 31 + ...reprocessing.samplers.cc.DualSamplerCC.rst | 45 + .../sup3r.preprocessing.samplers.cc.rst | 31 + ...3r.preprocessing.samplers.dc.SamplerDC.rst | 42 + .../sup3r.preprocessing.samplers.dc.rst | 31 + ...reprocessing.samplers.dual.DualSampler.rst | 43 + .../sup3r.preprocessing.samplers.dual.rst | 31 + .../sup3r.preprocessing.samplers.rst | 34 + ....samplers.utilities.daily_time_sampler.rst | 6 + ...lers.utilities.nsrdb_reduce_daily_data.rst | 6 + ...lers.utilities.nsrdb_sub_daily_sampler.rst | 6 + ...sup3r.preprocessing.samplers.utilities.rst | 36 + ...samplers.utilities.uniform_box_sampler.rst | 6 + ...amplers.utilities.uniform_time_sampler.rst | 6 + ...amplers.utilities.weighted_box_sampler.rst | 6 + ...mplers.utilities.weighted_time_sampler.rst | 6 + ...eprocessing.utilities.check_signatures.rst | 6 + ...preprocessing.utilities.composite_info.rst | 6 + ....preprocessing.utilities.composite_sig.rst | 6 + ...reprocessing.utilities.compute_if_dask.rst | 6 + ...processing.utilities.contains_ellipsis.rst | 6 + ...eprocessing.utilities.dims_array_tuple.rst | 6 + ...r.preprocessing.utilities.expand_paths.rst | 6 + ...eprocessing.utilities.get_class_kwargs.rst | 6 + ...essing.utilities.get_date_range_kwargs.rst | 6 + ...sing.utilities.get_input_handler_class.rst | 6 + ...preprocessing.utilities.get_obj_params.rst | 6 + ...reprocessing.utilities.get_source_type.rst | 6 + ...p3r.preprocessing.utilities.is_type_of.rst | 6 + ...sup3r.preprocessing.utilities.log_args.rst | 6 + ...3r.preprocessing.utilities.lower_names.rst | 6 + .../sup3r.preprocessing.utilities.lowered.rst | 6 + ...ing.utilities.make_time_index_from_kws.rst | 6 + ...reprocessing.utilities.numpy_if_tensor.rst | 6 + ....preprocessing.utilities.ordered_array.rst | 6 + ...r.preprocessing.utilities.ordered_dims.rst | 6 + ...preprocessing.utilities.parse_ellipsis.rst | 6 + ...preprocessing.utilities.parse_features.rst | 6 + ...p3r.preprocessing.utilities.parse_keys.rst | 6 + ....preprocessing.utilities.parse_to_list.rst | 6 + .../sup3r.preprocessing.utilities.rst | 53 + _sources/_autosummary/sup3r.qa.qa.Sup3rQa.rst | 38 + _sources/_autosummary/sup3r.qa.qa.rst | 31 + _sources/_autosummary/sup3r.qa.qa_cli.rst | 23 + _sources/_autosummary/sup3r.qa.rst | 32 + .../sup3r.qa.utilities.continuous_dist.rst | 6 + .../sup3r.qa.utilities.direct_dist.rst | 6 + .../sup3r.qa.utilities.frequency_spectrum.rst | 6 + .../sup3r.qa.utilities.gradient_dist.rst | 6 + _sources/_autosummary/sup3r.qa.utilities.rst | 37 + ...up3r.qa.utilities.time_derivative_dist.rst | 6 + ...3r.qa.utilities.tke_frequency_spectrum.rst | 6 + ...r.qa.utilities.tke_wavenumber_spectrum.rst | 6 + ...sup3r.qa.utilities.wavenumber_spectrum.rst | 6 + _sources/_autosummary/sup3r.rst | 39 + _sources/_autosummary/sup3r.solar.rst | 31 + .../_autosummary/sup3r.solar.solar.Solar.rst | 46 + _sources/_autosummary/sup3r.solar.solar.rst | 31 + ...up3r.solar.solar_cli.kickoff_local_job.rst | 6 + ...up3r.solar.solar_cli.kickoff_slurm_job.rst | 6 + .../_autosummary/sup3r.solar.solar_cli.rst | 31 + .../sup3r.utilities.ModuleName.rst | 85 + .../sup3r.utilities.cli.BaseCLI.rst | 29 + .../sup3r.utilities.cli.SlurmManager.rst | 54 + _sources/_autosummary/sup3r.utilities.cli.rst | 32 + ...utilities.era_downloader.EraDownloader.rst | 52 + .../sup3r.utilities.era_downloader.rst | 31 + ...r.utilities.interpolation.Interpolator.rst | 25 + .../sup3r.utilities.interpolation.rst | 31 + ...r.utilities.loss_metrics.CoarseMseLoss.rst | 32 + .../sup3r.utilities.loss_metrics.ExpLoss.rst | 26 + ...up3r.utilities.loss_metrics.LowResLoss.rst | 32 + ...es.loss_metrics.MaterialDerivativeLoss.rst | 32 + .../sup3r.utilities.loss_metrics.MmdLoss.rst | 26 + ...up3r.utilities.loss_metrics.MmdMseLoss.rst | 33 + ...up3r.utilities.loss_metrics.MseExpLoss.rst | 32 + ...ities.loss_metrics.SpatialExtremesLoss.rst | 33 + ...s.loss_metrics.SpatialExtremesOnlyLoss.rst | 32 + ...lities.loss_metrics.SpatialFftOnlyLoss.rst | 32 + ...oss_metrics.SpatiotemporalExtremesLoss.rst | 34 + ...loss_metrics.SpatiotemporalFftOnlyLoss.rst | 32 + ...ilities.loss_metrics.StExtremesFftLoss.rst | 26 + ...ties.loss_metrics.TemporalExtremesLoss.rst | 33 + ....loss_metrics.TemporalExtremesOnlyLoss.rst | 32 + ...utilities.loss_metrics.gaussian_kernel.rst | 6 + .../sup3r.utilities.loss_metrics.rst | 52 + ...es.pytest.helpers.BatchHandlerTesterCC.rst | 56 + ...es.pytest.helpers.BatchHandlerTesterDC.rst | 60 + ...test.helpers.BatchHandlerTesterFactory.rst | 6 + ...ies.pytest.helpers.DualSamplerTesterCC.rst | 45 + ...p3r.utilities.pytest.helpers.DummyData.rst | 32 + ....utilities.pytest.helpers.DummySampler.rst | 41 + ...utilities.pytest.helpers.SamplerTester.rst | 41 + ...ilities.pytest.helpers.SamplerTesterDC.rst | 42 + ...ies.pytest.helpers.make_collect_chunks.rst | 6 + ...ytest.helpers.make_fake_cs_ratio_files.rst | 6 + ...tilities.pytest.helpers.make_fake_dset.rst | 6 + ...ies.pytest.helpers.make_fake_h5_chunks.rst | 6 + ...ities.pytest.helpers.make_fake_nc_file.rst | 6 + ...utilities.pytest.helpers.make_fake_tif.rst | 6 + .../sup3r.utilities.pytest.helpers.rst | 51 + ...es.pytest.helpers.test_sampler_factory.rst | 6 + .../_autosummary/sup3r.utilities.pytest.rst | 30 + _sources/_autosummary/sup3r.utilities.rst | 43 + .../sup3r.utilities.utilities.Timer.rst | 32 + ...ities.utilities.generate_random_string.rst | 6 + ...up3r.utilities.utilities.nn_fill_array.rst | 6 + ...up3r.utilities.utilities.pd_date_range.rst | 6 + ...tilities.utilities.preprocess_datasets.rst | 6 + .../sup3r.utilities.utilities.rst | 46 + .../sup3r.utilities.utilities.safe_cast.rst | 6 + ...p3r.utilities.utilities.safe_serialize.rst | 6 + ...utilities.utilities.spatial_coarsening.rst | 6 + ...tilities.utilities.temporal_coarsening.rst | 6 + ....utilities.utilities.xr_open_mfdataset.rst | 6 + _sources/_cli/cli.rst | 6 + _sources/_cli/sup3r.rst | 3 + _sources/api.rst | 6 + _sources/examples/examples.rst | 7 + _sources/examples/sup3rcc.rst | 1 + _sources/examples/sup3rwind.rst | 1 + _sources/index.rst | 15 + _sources/misc/installation.rst | 6 + _sources/misc/installation_usage.rst | 6 + _static/basic.css | 914 ++ _static/check-solid.svg | 4 + _static/clipboard.min.js | 7 + _static/copy-button.svg | 5 + _static/copybutton.css | 94 + _static/copybutton.js | 248 + _static/copybutton_funcs.js | 73 + _static/custom.css | 14 + _static/doctools.js | 149 + _static/documentation_options.js | 13 + _static/file.png | Bin 0 -> 286 bytes _static/images/logo_binder.svg | 19 + _static/images/logo_colab.png | Bin 0 -> 7601 bytes _static/images/logo_deepnote.svg | 1 + _static/images/logo_jupyterhub.svg | 1 + _static/language_data.js | 192 + _static/locales/ar/LC_MESSAGES/booktheme.mo | Bin 0 -> 1541 bytes _static/locales/ar/LC_MESSAGES/booktheme.po | 75 + _static/locales/bg/LC_MESSAGES/booktheme.mo | Bin 0 -> 1708 bytes _static/locales/bg/LC_MESSAGES/booktheme.po | 75 + _static/locales/bn/LC_MESSAGES/booktheme.mo | Bin 0 -> 1646 bytes _static/locales/bn/LC_MESSAGES/booktheme.po | 63 + _static/locales/ca/LC_MESSAGES/booktheme.mo | Bin 0 -> 1246 bytes _static/locales/ca/LC_MESSAGES/booktheme.po | 66 + _static/locales/cs/LC_MESSAGES/booktheme.mo | Bin 0 -> 1405 bytes _static/locales/cs/LC_MESSAGES/booktheme.po | 75 + _static/locales/da/LC_MESSAGES/booktheme.mo | Bin 0 -> 1304 bytes _static/locales/da/LC_MESSAGES/booktheme.po | 75 + _static/locales/de/LC_MESSAGES/booktheme.mo | Bin 0 -> 1388 bytes _static/locales/de/LC_MESSAGES/booktheme.po | 75 + _static/locales/el/LC_MESSAGES/booktheme.mo | Bin 0 -> 1722 bytes _static/locales/el/LC_MESSAGES/booktheme.po | 75 + _static/locales/eo/LC_MESSAGES/booktheme.mo | Bin 0 -> 1337 bytes _static/locales/eo/LC_MESSAGES/booktheme.po | 75 + _static/locales/es/LC_MESSAGES/booktheme.mo | Bin 0 -> 1396 bytes _static/locales/es/LC_MESSAGES/booktheme.po | 75 + _static/locales/et/LC_MESSAGES/booktheme.mo | Bin 0 -> 1341 bytes _static/locales/et/LC_MESSAGES/booktheme.po | 75 + _static/locales/fi/LC_MESSAGES/booktheme.mo | Bin 0 -> 1368 bytes _static/locales/fi/LC_MESSAGES/booktheme.po | 75 + _static/locales/fr/LC_MESSAGES/booktheme.mo | Bin 0 -> 1412 bytes _static/locales/fr/LC_MESSAGES/booktheme.po | 75 + _static/locales/hr/LC_MESSAGES/booktheme.mo | Bin 0 -> 1402 bytes _static/locales/hr/LC_MESSAGES/booktheme.po | 75 + _static/locales/id/LC_MESSAGES/booktheme.mo | Bin 0 -> 1323 bytes _static/locales/id/LC_MESSAGES/booktheme.po | 75 + _static/locales/it/LC_MESSAGES/booktheme.mo | Bin 0 -> 1403 bytes _static/locales/it/LC_MESSAGES/booktheme.po | 75 + _static/locales/iw/LC_MESSAGES/booktheme.mo | Bin 0 -> 1445 bytes _static/locales/iw/LC_MESSAGES/booktheme.po | 75 + _static/locales/ja/LC_MESSAGES/booktheme.mo | Bin 0 -> 1471 bytes _static/locales/ja/LC_MESSAGES/booktheme.po | 75 + _static/locales/ko/LC_MESSAGES/booktheme.mo | Bin 0 -> 1375 bytes _static/locales/ko/LC_MESSAGES/booktheme.po | 75 + _static/locales/lt/LC_MESSAGES/booktheme.mo | Bin 0 -> 1413 bytes _static/locales/lt/LC_MESSAGES/booktheme.po | 75 + _static/locales/lv/LC_MESSAGES/booktheme.mo | Bin 0 -> 1404 bytes _static/locales/lv/LC_MESSAGES/booktheme.po | 75 + _static/locales/ml/LC_MESSAGES/booktheme.mo | Bin 0 -> 1883 bytes _static/locales/ml/LC_MESSAGES/booktheme.po | 66 + _static/locales/mr/LC_MESSAGES/booktheme.mo | Bin 0 -> 1674 bytes _static/locales/mr/LC_MESSAGES/booktheme.po | 66 + _static/locales/ms/LC_MESSAGES/booktheme.mo | Bin 0 -> 1213 bytes _static/locales/ms/LC_MESSAGES/booktheme.po | 66 + _static/locales/nl/LC_MESSAGES/booktheme.mo | Bin 0 -> 1356 bytes _static/locales/nl/LC_MESSAGES/booktheme.po | 75 + _static/locales/no/LC_MESSAGES/booktheme.mo | Bin 0 -> 1317 bytes _static/locales/no/LC_MESSAGES/booktheme.po | 75 + _static/locales/pl/LC_MESSAGES/booktheme.mo | Bin 0 -> 1371 bytes _static/locales/pl/LC_MESSAGES/booktheme.po | 75 + _static/locales/pt/LC_MESSAGES/booktheme.mo | Bin 0 -> 1364 bytes _static/locales/pt/LC_MESSAGES/booktheme.po | 75 + _static/locales/ro/LC_MESSAGES/booktheme.mo | Bin 0 -> 1390 bytes _static/locales/ro/LC_MESSAGES/booktheme.po | 75 + _static/locales/ru/LC_MESSAGES/booktheme.mo | Bin 0 -> 1722 bytes _static/locales/ru/LC_MESSAGES/booktheme.po | 75 + _static/locales/sk/LC_MESSAGES/booktheme.mo | Bin 0 -> 1393 bytes _static/locales/sk/LC_MESSAGES/booktheme.po | 75 + _static/locales/sl/LC_MESSAGES/booktheme.mo | Bin 0 -> 1374 bytes _static/locales/sl/LC_MESSAGES/booktheme.po | 75 + _static/locales/sr/LC_MESSAGES/booktheme.mo | Bin 0 -> 1679 bytes _static/locales/sr/LC_MESSAGES/booktheme.po | 75 + _static/locales/sv/LC_MESSAGES/booktheme.mo | Bin 0 -> 1365 bytes _static/locales/sv/LC_MESSAGES/booktheme.po | 75 + _static/locales/ta/LC_MESSAGES/booktheme.mo | Bin 0 -> 1928 bytes _static/locales/ta/LC_MESSAGES/booktheme.po | 66 + _static/locales/te/LC_MESSAGES/booktheme.mo | Bin 0 -> 1806 bytes _static/locales/te/LC_MESSAGES/booktheme.po | 66 + _static/locales/tg/LC_MESSAGES/booktheme.mo | Bin 0 -> 1628 bytes _static/locales/tg/LC_MESSAGES/booktheme.po | 75 + _static/locales/th/LC_MESSAGES/booktheme.mo | Bin 0 -> 1766 bytes _static/locales/th/LC_MESSAGES/booktheme.po | 75 + _static/locales/tl/LC_MESSAGES/booktheme.mo | Bin 0 -> 1273 bytes _static/locales/tl/LC_MESSAGES/booktheme.po | 66 + _static/locales/tr/LC_MESSAGES/booktheme.mo | Bin 0 -> 1373 bytes _static/locales/tr/LC_MESSAGES/booktheme.po | 75 + _static/locales/uk/LC_MESSAGES/booktheme.mo | Bin 0 -> 1681 bytes _static/locales/uk/LC_MESSAGES/booktheme.po | 75 + _static/locales/ur/LC_MESSAGES/booktheme.mo | Bin 0 -> 1462 bytes _static/locales/ur/LC_MESSAGES/booktheme.po | 66 + _static/locales/vi/LC_MESSAGES/booktheme.mo | Bin 0 -> 1431 bytes _static/locales/vi/LC_MESSAGES/booktheme.po | 75 + .../locales/zh_CN/LC_MESSAGES/booktheme.mo | Bin 0 -> 1310 bytes .../locales/zh_CN/LC_MESSAGES/booktheme.po | 75 + .../locales/zh_TW/LC_MESSAGES/booktheme.mo | Bin 0 -> 1341 bytes .../locales/zh_TW/LC_MESSAGES/booktheme.po | 75 + _static/minus.png | Bin 0 -> 90 bytes _static/plus.png | Bin 0 -> 90 bytes _static/pygments.css | 152 + _static/sbt-webpack-macros.html | 11 + _static/scripts/bootstrap.js | 3 + _static/scripts/bootstrap.js.LICENSE.txt | 5 + _static/scripts/bootstrap.js.map | 1 + _static/scripts/fontawesome.js | 3 + _static/scripts/fontawesome.js.LICENSE.txt | 5 + _static/scripts/fontawesome.js.map | 1 + _static/scripts/pydata-sphinx-theme.js | 2 + _static/scripts/pydata-sphinx-theme.js.map | 1 + _static/scripts/sphinx-book-theme.js | 2 + _static/scripts/sphinx-book-theme.js.map | 1 + _static/searchtools.js | 632 ++ _static/sphinx_highlight.js | 154 + _static/styles/pydata-sphinx-theme.css | 32 + _static/styles/pydata-sphinx-theme.css.map | 1 + _static/styles/sphinx-book-theme.css | 9 + _static/styles/sphinx-book-theme.css.map | 1 + _static/styles/theme.css | 2 + _static/tabs.css | 89 + _static/tabs.js | 145 + .../fontawesome/webfonts/fa-brands-400.ttf | Bin 0 -> 209128 bytes .../fontawesome/webfonts/fa-brands-400.woff2 | Bin 0 -> 117852 bytes .../fontawesome/webfonts/fa-regular-400.ttf | Bin 0 -> 67860 bytes .../fontawesome/webfonts/fa-regular-400.woff2 | Bin 0 -> 25392 bytes .../fontawesome/webfonts/fa-solid-900.ttf | Bin 0 -> 420332 bytes .../fontawesome/webfonts/fa-solid-900.woff2 | Bin 0 -> 156400 bytes _static/webpack-macros.html | 24 + api.html | 718 ++ examples/examples.html | 753 ++ examples/sup3rcc.html | 842 ++ examples/sup3rwind.html | 818 ++ genindex.html | 7941 +++++++++++++++++ index.html | 813 ++ misc/installation.html | 794 ++ misc/installation_usage.html | 738 ++ objects.inv | Bin 0 -> 23345 bytes py-modindex.html | 1180 +++ search.html | 676 ++ searchindex.js | 1 + 906 files changed, 414084 insertions(+) create mode 100644 .buildinfo create mode 100644 .nojekyll create mode 100644 _autosummary/sup3r.batch.batch_cli.html create mode 100644 _autosummary/sup3r.batch.html create mode 100644 _autosummary/sup3r.bias.base.DataRetrievalBase.html create mode 100644 _autosummary/sup3r.bias.base.html create mode 100644 _autosummary/sup3r.bias.bias_calc.LinearCorrection.html create mode 100644 _autosummary/sup3r.bias.bias_calc.MonthlyLinearCorrection.html create mode 100644 _autosummary/sup3r.bias.bias_calc.MonthlyScalarCorrection.html create mode 100644 _autosummary/sup3r.bias.bias_calc.ScalarCorrection.html create mode 100644 _autosummary/sup3r.bias.bias_calc.SkillAssessment.html create mode 100644 _autosummary/sup3r.bias.bias_calc.html create mode 100644 _autosummary/sup3r.bias.bias_calc_cli.html create mode 100644 _autosummary/sup3r.bias.bias_calc_cli.kickoff_local_job.html create mode 100644 _autosummary/sup3r.bias.bias_calc_cli.kickoff_slurm_job.html create mode 100644 _autosummary/sup3r.bias.bias_calc_vortex.BiasCorrectUpdate.html create mode 100644 _autosummary/sup3r.bias.bias_calc_vortex.VortexMeanPrepper.html create mode 100644 _autosummary/sup3r.bias.bias_calc_vortex.html create mode 100644 _autosummary/sup3r.bias.bias_transforms.global_linear_bc.html create mode 100644 _autosummary/sup3r.bias.bias_transforms.html create mode 100644 _autosummary/sup3r.bias.bias_transforms.local_linear_bc.html create mode 100644 _autosummary/sup3r.bias.bias_transforms.local_presrat_bc.html create mode 100644 _autosummary/sup3r.bias.bias_transforms.local_qdm_bc.html create mode 100644 _autosummary/sup3r.bias.bias_transforms.monthly_local_linear_bc.html create mode 100644 _autosummary/sup3r.bias.html create mode 100644 _autosummary/sup3r.bias.mixins.FillAndSmoothMixin.html create mode 100644 _autosummary/sup3r.bias.mixins.ZeroRateMixin.html create mode 100644 _autosummary/sup3r.bias.mixins.html create mode 100644 _autosummary/sup3r.bias.presrat.PresRat.html create mode 100644 _autosummary/sup3r.bias.presrat.html create mode 100644 _autosummary/sup3r.bias.qdm.QuantileDeltaMappingCorrection.html create mode 100644 _autosummary/sup3r.bias.qdm.html create mode 100644 _autosummary/sup3r.bias.utilities.bias_correct_feature.html create mode 100644 _autosummary/sup3r.bias.utilities.bias_correct_features.html create mode 100644 _autosummary/sup3r.bias.utilities.html create mode 100644 _autosummary/sup3r.bias.utilities.lin_bc.html create mode 100644 _autosummary/sup3r.bias.utilities.qdm_bc.html create mode 100644 _autosummary/sup3r.cli.html create mode 100644 _autosummary/sup3r.html create mode 100644 _autosummary/sup3r.models.abstract.AbstractInterface.html create mode 100644 _autosummary/sup3r.models.abstract.AbstractSingleModel.html create mode 100644 _autosummary/sup3r.models.abstract.TensorboardMixIn.html create mode 100644 _autosummary/sup3r.models.abstract.html create mode 100644 _autosummary/sup3r.models.base.Sup3rGan.html create mode 100644 _autosummary/sup3r.models.base.html create mode 100644 _autosummary/sup3r.models.conditional.Sup3rCondMom.html create mode 100644 _autosummary/sup3r.models.conditional.html create mode 100644 _autosummary/sup3r.models.dc.Sup3rGanDC.html create mode 100644 _autosummary/sup3r.models.dc.html create mode 100644 _autosummary/sup3r.models.html create mode 100644 _autosummary/sup3r.models.linear.LinearInterp.html create mode 100644 _autosummary/sup3r.models.linear.html create mode 100644 _autosummary/sup3r.models.multi_step.MultiStepGan.html create mode 100644 _autosummary/sup3r.models.multi_step.MultiStepSurfaceMetGan.html create mode 100644 _autosummary/sup3r.models.multi_step.SolarMultiStepGan.html create mode 100644 _autosummary/sup3r.models.multi_step.html create mode 100644 _autosummary/sup3r.models.solar_cc.SolarCC.html create mode 100644 _autosummary/sup3r.models.solar_cc.html create mode 100644 _autosummary/sup3r.models.surface.SurfaceSpatialMetModel.html create mode 100644 _autosummary/sup3r.models.surface.html create mode 100644 _autosummary/sup3r.models.utilities.TrainingSession.html create mode 100644 _autosummary/sup3r.models.utilities.get_optimizer_class.html create mode 100644 _autosummary/sup3r.models.utilities.html create mode 100644 _autosummary/sup3r.models.utilities.st_interp.html create mode 100644 _autosummary/sup3r.pipeline.forward_pass.ForwardPass.html create mode 100644 _autosummary/sup3r.pipeline.forward_pass.html create mode 100644 _autosummary/sup3r.pipeline.forward_pass_cli.html create mode 100644 _autosummary/sup3r.pipeline.forward_pass_cli.kickoff_local_job.html create mode 100644 _autosummary/sup3r.pipeline.forward_pass_cli.kickoff_slurm_job.html create mode 100644 _autosummary/sup3r.pipeline.html create mode 100644 _autosummary/sup3r.pipeline.pipeline_cli.html create mode 100644 _autosummary/sup3r.pipeline.slicer.ForwardPassSlicer.html create mode 100644 _autosummary/sup3r.pipeline.slicer.html create mode 100644 _autosummary/sup3r.pipeline.strategy.ForwardPassChunk.html create mode 100644 _autosummary/sup3r.pipeline.strategy.ForwardPassStrategy.html create mode 100644 _autosummary/sup3r.pipeline.strategy.html create mode 100644 _autosummary/sup3r.pipeline.utilities.get_chunk_slices.html create mode 100644 _autosummary/sup3r.pipeline.utilities.get_model.html create mode 100644 _autosummary/sup3r.pipeline.utilities.html create mode 100644 _autosummary/sup3r.postprocessing.collectors.base.BaseCollector.html create mode 100644 _autosummary/sup3r.postprocessing.collectors.base.html create mode 100644 _autosummary/sup3r.postprocessing.collectors.h5.CollectorH5.html create mode 100644 _autosummary/sup3r.postprocessing.collectors.h5.html create mode 100644 _autosummary/sup3r.postprocessing.collectors.html create mode 100644 _autosummary/sup3r.postprocessing.collectors.nc.CollectorNC.html create mode 100644 _autosummary/sup3r.postprocessing.collectors.nc.html create mode 100644 _autosummary/sup3r.postprocessing.data_collect_cli.html create mode 100644 _autosummary/sup3r.postprocessing.data_collect_cli.kickoff_local_job.html create mode 100644 _autosummary/sup3r.postprocessing.data_collect_cli.kickoff_slurm_job.html create mode 100644 _autosummary/sup3r.postprocessing.html create mode 100644 _autosummary/sup3r.postprocessing.writers.base.OutputHandler.html create mode 100644 _autosummary/sup3r.postprocessing.writers.base.OutputMixin.html create mode 100644 _autosummary/sup3r.postprocessing.writers.base.RexOutputs.html create mode 100644 _autosummary/sup3r.postprocessing.writers.base.html create mode 100644 _autosummary/sup3r.postprocessing.writers.h5.OutputHandlerH5.html create mode 100644 _autosummary/sup3r.postprocessing.writers.h5.html create mode 100644 _autosummary/sup3r.postprocessing.writers.html create mode 100644 _autosummary/sup3r.postprocessing.writers.nc.OutputHandlerNC.html create mode 100644 _autosummary/sup3r.postprocessing.writers.nc.html create mode 100644 _autosummary/sup3r.preprocessing.accessor.Sup3rX.html create mode 100644 _autosummary/sup3r.preprocessing.accessor.html create mode 100644 _autosummary/sup3r.preprocessing.base.Container.html create mode 100644 _autosummary/sup3r.preprocessing.base.Sup3rDataset.html create mode 100644 _autosummary/sup3r.preprocessing.base.Sup3rMeta.html create mode 100644 _autosummary/sup3r.preprocessing.base.html create mode 100644 _autosummary/sup3r.preprocessing.batch_handlers.dc.BatchHandlerDC.html create mode 100644 _autosummary/sup3r.preprocessing.batch_handlers.dc.html create mode 100644 _autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandler.html create mode 100644 _autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerCC.html create mode 100644 _autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerFactory.html create mode 100644 _autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom1.html create mode 100644 _autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom1SF.html create mode 100644 _autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2.html create mode 100644 _autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2SF.html create mode 100644 _autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2Sep.html create mode 100644 _autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2SepSF.html create mode 100644 _autosummary/sup3r.preprocessing.batch_handlers.factory.DualBatchHandler.html create mode 100644 _autosummary/sup3r.preprocessing.batch_handlers.factory.html create mode 100644 _autosummary/sup3r.preprocessing.batch_handlers.html create mode 100644 _autosummary/sup3r.preprocessing.batch_queues.abstract.AbstractBatchQueue.html create mode 100644 _autosummary/sup3r.preprocessing.batch_queues.abstract.html create mode 100644 _autosummary/sup3r.preprocessing.batch_queues.base.SingleBatchQueue.html create mode 100644 _autosummary/sup3r.preprocessing.batch_queues.base.html create mode 100644 _autosummary/sup3r.preprocessing.batch_queues.conditional.ConditionalBatchQueue.html create mode 100644 _autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom1.html create mode 100644 _autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom1SF.html create mode 100644 _autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom2.html create mode 100644 _autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom2SF.html create mode 100644 _autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom2Sep.html create mode 100644 _autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom2SepSF.html create mode 100644 _autosummary/sup3r.preprocessing.batch_queues.conditional.html create mode 100644 _autosummary/sup3r.preprocessing.batch_queues.dc.BatchQueueDC.html create mode 100644 _autosummary/sup3r.preprocessing.batch_queues.dc.ValBatchQueueDC.html create mode 100644 _autosummary/sup3r.preprocessing.batch_queues.dc.html create mode 100644 _autosummary/sup3r.preprocessing.batch_queues.dual.DualBatchQueue.html create mode 100644 _autosummary/sup3r.preprocessing.batch_queues.dual.html create mode 100644 _autosummary/sup3r.preprocessing.batch_queues.html create mode 100644 _autosummary/sup3r.preprocessing.batch_queues.utilities.html create mode 100644 _autosummary/sup3r.preprocessing.batch_queues.utilities.smooth_data.html create mode 100644 _autosummary/sup3r.preprocessing.batch_queues.utilities.spatial_simple_enhancing.html create mode 100644 _autosummary/sup3r.preprocessing.batch_queues.utilities.temporal_simple_enhancing.html create mode 100644 _autosummary/sup3r.preprocessing.cachers.base.Cacher.html create mode 100644 _autosummary/sup3r.preprocessing.cachers.base.html create mode 100644 _autosummary/sup3r.preprocessing.cachers.html create mode 100644 _autosummary/sup3r.preprocessing.cachers.utilities.html create mode 100644 _autosummary/sup3r.preprocessing.collections.base.Collection.html create mode 100644 _autosummary/sup3r.preprocessing.collections.base.html create mode 100644 _autosummary/sup3r.preprocessing.collections.html create mode 100644 _autosummary/sup3r.preprocessing.collections.stats.StatsCollection.html create mode 100644 _autosummary/sup3r.preprocessing.collections.stats.html create mode 100644 _autosummary/sup3r.preprocessing.data_handlers.exo.ExoData.html create mode 100644 _autosummary/sup3r.preprocessing.data_handlers.exo.ExoDataHandler.html create mode 100644 _autosummary/sup3r.preprocessing.data_handlers.exo.SingleExoDataStep.html create mode 100644 _autosummary/sup3r.preprocessing.data_handlers.exo.html create mode 100644 _autosummary/sup3r.preprocessing.data_handlers.factory.DailyDataHandler.html create mode 100644 _autosummary/sup3r.preprocessing.data_handlers.factory.DataHandler.html create mode 100644 _autosummary/sup3r.preprocessing.data_handlers.factory.DataHandlerFactory.html create mode 100644 _autosummary/sup3r.preprocessing.data_handlers.factory.DataHandlerH5SolarCC.html create mode 100644 _autosummary/sup3r.preprocessing.data_handlers.factory.DataHandlerH5WindCC.html create mode 100644 _autosummary/sup3r.preprocessing.data_handlers.factory.html create mode 100644 _autosummary/sup3r.preprocessing.data_handlers.html create mode 100644 _autosummary/sup3r.preprocessing.data_handlers.nc_cc.DataHandlerNCforCC.html create mode 100644 _autosummary/sup3r.preprocessing.data_handlers.nc_cc.DataHandlerNCforCCwithPowerLaw.html create mode 100644 _autosummary/sup3r.preprocessing.data_handlers.nc_cc.html create mode 100644 _autosummary/sup3r.preprocessing.derivers.base.BaseDeriver.html create mode 100644 _autosummary/sup3r.preprocessing.derivers.base.Deriver.html create mode 100644 _autosummary/sup3r.preprocessing.derivers.base.html create mode 100644 _autosummary/sup3r.preprocessing.derivers.html create mode 100644 _autosummary/sup3r.preprocessing.derivers.methods.ClearSkyRatio.html create mode 100644 _autosummary/sup3r.preprocessing.derivers.methods.ClearSkyRatioCC.html create mode 100644 _autosummary/sup3r.preprocessing.derivers.methods.CloudMask.html create mode 100644 _autosummary/sup3r.preprocessing.derivers.methods.DerivedFeature.html create mode 100644 _autosummary/sup3r.preprocessing.derivers.methods.PressureWRF.html create mode 100644 _autosummary/sup3r.preprocessing.derivers.methods.SurfaceRH.html create mode 100644 _autosummary/sup3r.preprocessing.derivers.methods.Sza.html create mode 100644 _autosummary/sup3r.preprocessing.derivers.methods.Tas.html create mode 100644 _autosummary/sup3r.preprocessing.derivers.methods.TasMax.html create mode 100644 _autosummary/sup3r.preprocessing.derivers.methods.TasMin.html create mode 100644 _autosummary/sup3r.preprocessing.derivers.methods.TempNCforCC.html create mode 100644 _autosummary/sup3r.preprocessing.derivers.methods.USolar.html create mode 100644 _autosummary/sup3r.preprocessing.derivers.methods.UWind.html create mode 100644 _autosummary/sup3r.preprocessing.derivers.methods.UWindPowerLaw.html create mode 100644 _autosummary/sup3r.preprocessing.derivers.methods.VSolar.html create mode 100644 _autosummary/sup3r.preprocessing.derivers.methods.VWind.html create mode 100644 _autosummary/sup3r.preprocessing.derivers.methods.VWindPowerLaw.html create mode 100644 _autosummary/sup3r.preprocessing.derivers.methods.Winddirection.html create mode 100644 _autosummary/sup3r.preprocessing.derivers.methods.Windspeed.html create mode 100644 _autosummary/sup3r.preprocessing.derivers.methods.html create mode 100644 _autosummary/sup3r.preprocessing.derivers.utilities.SolarZenith.html create mode 100644 _autosummary/sup3r.preprocessing.derivers.utilities.html create mode 100644 _autosummary/sup3r.preprocessing.derivers.utilities.invert_uv.html create mode 100644 _autosummary/sup3r.preprocessing.derivers.utilities.parse_feature.html create mode 100644 _autosummary/sup3r.preprocessing.derivers.utilities.transform_rotate_wind.html create mode 100644 _autosummary/sup3r.preprocessing.derivers.utilities.windspeed_log_law.html create mode 100644 _autosummary/sup3r.preprocessing.html create mode 100644 _autosummary/sup3r.preprocessing.loaders.Loader.html create mode 100644 _autosummary/sup3r.preprocessing.loaders.base.BaseLoader.html create mode 100644 _autosummary/sup3r.preprocessing.loaders.base.html create mode 100644 _autosummary/sup3r.preprocessing.loaders.h5.LoaderH5.html create mode 100644 _autosummary/sup3r.preprocessing.loaders.h5.html create mode 100644 _autosummary/sup3r.preprocessing.loaders.html create mode 100644 _autosummary/sup3r.preprocessing.loaders.nc.LoaderNC.html create mode 100644 _autosummary/sup3r.preprocessing.loaders.nc.html create mode 100644 _autosummary/sup3r.preprocessing.loaders.utilities.html create mode 100644 _autosummary/sup3r.preprocessing.loaders.utilities.standardize_names.html create mode 100644 _autosummary/sup3r.preprocessing.loaders.utilities.standardize_values.html create mode 100644 _autosummary/sup3r.preprocessing.names.Dimension.html create mode 100644 _autosummary/sup3r.preprocessing.names.html create mode 100644 _autosummary/sup3r.preprocessing.rasterizers.base.BaseRasterizer.html create mode 100644 _autosummary/sup3r.preprocessing.rasterizers.base.html create mode 100644 _autosummary/sup3r.preprocessing.rasterizers.dual.DualRasterizer.html create mode 100644 _autosummary/sup3r.preprocessing.rasterizers.dual.html create mode 100644 _autosummary/sup3r.preprocessing.rasterizers.exo.BaseExoRasterizer.html create mode 100644 _autosummary/sup3r.preprocessing.rasterizers.exo.ExoRasterizer.html create mode 100644 _autosummary/sup3r.preprocessing.rasterizers.exo.ExoRasterizerH5.html create mode 100644 _autosummary/sup3r.preprocessing.rasterizers.exo.ExoRasterizerNC.html create mode 100644 _autosummary/sup3r.preprocessing.rasterizers.exo.SzaRasterizer.html create mode 100644 _autosummary/sup3r.preprocessing.rasterizers.exo.html create mode 100644 _autosummary/sup3r.preprocessing.rasterizers.extended.Rasterizer.html create mode 100644 _autosummary/sup3r.preprocessing.rasterizers.extended.html create mode 100644 _autosummary/sup3r.preprocessing.rasterizers.html create mode 100644 _autosummary/sup3r.preprocessing.samplers.base.Sampler.html create mode 100644 _autosummary/sup3r.preprocessing.samplers.base.html create mode 100644 _autosummary/sup3r.preprocessing.samplers.cc.DualSamplerCC.html create mode 100644 _autosummary/sup3r.preprocessing.samplers.cc.html create mode 100644 _autosummary/sup3r.preprocessing.samplers.dc.SamplerDC.html create mode 100644 _autosummary/sup3r.preprocessing.samplers.dc.html create mode 100644 _autosummary/sup3r.preprocessing.samplers.dual.DualSampler.html create mode 100644 _autosummary/sup3r.preprocessing.samplers.dual.html create mode 100644 _autosummary/sup3r.preprocessing.samplers.html create mode 100644 _autosummary/sup3r.preprocessing.samplers.utilities.daily_time_sampler.html create mode 100644 _autosummary/sup3r.preprocessing.samplers.utilities.html create mode 100644 _autosummary/sup3r.preprocessing.samplers.utilities.nsrdb_reduce_daily_data.html create mode 100644 _autosummary/sup3r.preprocessing.samplers.utilities.nsrdb_sub_daily_sampler.html create mode 100644 _autosummary/sup3r.preprocessing.samplers.utilities.uniform_box_sampler.html create mode 100644 _autosummary/sup3r.preprocessing.samplers.utilities.uniform_time_sampler.html create mode 100644 _autosummary/sup3r.preprocessing.samplers.utilities.weighted_box_sampler.html create mode 100644 _autosummary/sup3r.preprocessing.samplers.utilities.weighted_time_sampler.html create mode 100644 _autosummary/sup3r.preprocessing.utilities.check_signatures.html create mode 100644 _autosummary/sup3r.preprocessing.utilities.composite_info.html create mode 100644 _autosummary/sup3r.preprocessing.utilities.composite_sig.html create mode 100644 _autosummary/sup3r.preprocessing.utilities.compute_if_dask.html create mode 100644 _autosummary/sup3r.preprocessing.utilities.contains_ellipsis.html create mode 100644 _autosummary/sup3r.preprocessing.utilities.dims_array_tuple.html create mode 100644 _autosummary/sup3r.preprocessing.utilities.expand_paths.html create mode 100644 _autosummary/sup3r.preprocessing.utilities.get_class_kwargs.html create mode 100644 _autosummary/sup3r.preprocessing.utilities.get_date_range_kwargs.html create mode 100644 _autosummary/sup3r.preprocessing.utilities.get_input_handler_class.html create mode 100644 _autosummary/sup3r.preprocessing.utilities.get_obj_params.html create mode 100644 _autosummary/sup3r.preprocessing.utilities.get_source_type.html create mode 100644 _autosummary/sup3r.preprocessing.utilities.html create mode 100644 _autosummary/sup3r.preprocessing.utilities.is_type_of.html create mode 100644 _autosummary/sup3r.preprocessing.utilities.log_args.html create mode 100644 _autosummary/sup3r.preprocessing.utilities.lower_names.html create mode 100644 _autosummary/sup3r.preprocessing.utilities.lowered.html create mode 100644 _autosummary/sup3r.preprocessing.utilities.make_time_index_from_kws.html create mode 100644 _autosummary/sup3r.preprocessing.utilities.numpy_if_tensor.html create mode 100644 _autosummary/sup3r.preprocessing.utilities.ordered_array.html create mode 100644 _autosummary/sup3r.preprocessing.utilities.ordered_dims.html create mode 100644 _autosummary/sup3r.preprocessing.utilities.parse_ellipsis.html create mode 100644 _autosummary/sup3r.preprocessing.utilities.parse_features.html create mode 100644 _autosummary/sup3r.preprocessing.utilities.parse_keys.html create mode 100644 _autosummary/sup3r.preprocessing.utilities.parse_to_list.html create mode 100644 _autosummary/sup3r.qa.html create mode 100644 _autosummary/sup3r.qa.qa.Sup3rQa.html create mode 100644 _autosummary/sup3r.qa.qa.html create mode 100644 _autosummary/sup3r.qa.qa_cli.html create mode 100644 _autosummary/sup3r.qa.utilities.continuous_dist.html create mode 100644 _autosummary/sup3r.qa.utilities.direct_dist.html create mode 100644 _autosummary/sup3r.qa.utilities.frequency_spectrum.html create mode 100644 _autosummary/sup3r.qa.utilities.gradient_dist.html create mode 100644 _autosummary/sup3r.qa.utilities.html create mode 100644 _autosummary/sup3r.qa.utilities.time_derivative_dist.html create mode 100644 _autosummary/sup3r.qa.utilities.tke_frequency_spectrum.html create mode 100644 _autosummary/sup3r.qa.utilities.tke_wavenumber_spectrum.html create mode 100644 _autosummary/sup3r.qa.utilities.wavenumber_spectrum.html create mode 100644 _autosummary/sup3r.solar.html create mode 100644 _autosummary/sup3r.solar.solar.Solar.html create mode 100644 _autosummary/sup3r.solar.solar.html create mode 100644 _autosummary/sup3r.solar.solar_cli.html create mode 100644 _autosummary/sup3r.solar.solar_cli.kickoff_local_job.html create mode 100644 _autosummary/sup3r.solar.solar_cli.kickoff_slurm_job.html create mode 100644 _autosummary/sup3r.utilities.ModuleName.html create mode 100644 _autosummary/sup3r.utilities.cli.BaseCLI.html create mode 100644 _autosummary/sup3r.utilities.cli.SlurmManager.html create mode 100644 _autosummary/sup3r.utilities.cli.html create mode 100644 _autosummary/sup3r.utilities.era_downloader.EraDownloader.html create mode 100644 _autosummary/sup3r.utilities.era_downloader.html create mode 100644 _autosummary/sup3r.utilities.html create mode 100644 _autosummary/sup3r.utilities.interpolation.Interpolator.html create mode 100644 _autosummary/sup3r.utilities.interpolation.html create mode 100644 _autosummary/sup3r.utilities.loss_metrics.CoarseMseLoss.html create mode 100644 _autosummary/sup3r.utilities.loss_metrics.ExpLoss.html create mode 100644 _autosummary/sup3r.utilities.loss_metrics.LowResLoss.html create mode 100644 _autosummary/sup3r.utilities.loss_metrics.MaterialDerivativeLoss.html create mode 100644 _autosummary/sup3r.utilities.loss_metrics.MmdLoss.html create mode 100644 _autosummary/sup3r.utilities.loss_metrics.MmdMseLoss.html create mode 100644 _autosummary/sup3r.utilities.loss_metrics.MseExpLoss.html create mode 100644 _autosummary/sup3r.utilities.loss_metrics.SpatialExtremesLoss.html create mode 100644 _autosummary/sup3r.utilities.loss_metrics.SpatialExtremesOnlyLoss.html create mode 100644 _autosummary/sup3r.utilities.loss_metrics.SpatialFftOnlyLoss.html create mode 100644 _autosummary/sup3r.utilities.loss_metrics.SpatiotemporalExtremesLoss.html create mode 100644 _autosummary/sup3r.utilities.loss_metrics.SpatiotemporalFftOnlyLoss.html create mode 100644 _autosummary/sup3r.utilities.loss_metrics.StExtremesFftLoss.html create mode 100644 _autosummary/sup3r.utilities.loss_metrics.TemporalExtremesLoss.html create mode 100644 _autosummary/sup3r.utilities.loss_metrics.TemporalExtremesOnlyLoss.html create mode 100644 _autosummary/sup3r.utilities.loss_metrics.gaussian_kernel.html create mode 100644 _autosummary/sup3r.utilities.loss_metrics.html create mode 100644 _autosummary/sup3r.utilities.pytest.helpers.BatchHandlerTesterCC.html create mode 100644 _autosummary/sup3r.utilities.pytest.helpers.BatchHandlerTesterDC.html create mode 100644 _autosummary/sup3r.utilities.pytest.helpers.BatchHandlerTesterFactory.html create mode 100644 _autosummary/sup3r.utilities.pytest.helpers.DualSamplerTesterCC.html create mode 100644 _autosummary/sup3r.utilities.pytest.helpers.DummyData.html create mode 100644 _autosummary/sup3r.utilities.pytest.helpers.DummySampler.html create mode 100644 _autosummary/sup3r.utilities.pytest.helpers.SamplerTester.html create mode 100644 _autosummary/sup3r.utilities.pytest.helpers.SamplerTesterDC.html create mode 100644 _autosummary/sup3r.utilities.pytest.helpers.html create mode 100644 _autosummary/sup3r.utilities.pytest.helpers.make_collect_chunks.html create mode 100644 _autosummary/sup3r.utilities.pytest.helpers.make_fake_cs_ratio_files.html create mode 100644 _autosummary/sup3r.utilities.pytest.helpers.make_fake_dset.html create mode 100644 _autosummary/sup3r.utilities.pytest.helpers.make_fake_h5_chunks.html create mode 100644 _autosummary/sup3r.utilities.pytest.helpers.make_fake_nc_file.html create mode 100644 _autosummary/sup3r.utilities.pytest.helpers.make_fake_tif.html create mode 100644 _autosummary/sup3r.utilities.pytest.helpers.test_sampler_factory.html create mode 100644 _autosummary/sup3r.utilities.pytest.html create mode 100644 _autosummary/sup3r.utilities.utilities.Timer.html create mode 100644 _autosummary/sup3r.utilities.utilities.generate_random_string.html create mode 100644 _autosummary/sup3r.utilities.utilities.html create mode 100644 _autosummary/sup3r.utilities.utilities.nn_fill_array.html create mode 100644 _autosummary/sup3r.utilities.utilities.pd_date_range.html create mode 100644 _autosummary/sup3r.utilities.utilities.preprocess_datasets.html create mode 100644 _autosummary/sup3r.utilities.utilities.safe_cast.html create mode 100644 _autosummary/sup3r.utilities.utilities.safe_serialize.html create mode 100644 _autosummary/sup3r.utilities.utilities.spatial_coarsening.html create mode 100644 _autosummary/sup3r.utilities.utilities.temporal_coarsening.html create mode 100644 _autosummary/sup3r.utilities.utilities.xr_open_mfdataset.html create mode 100644 _cli/cli.html create mode 100644 _cli/sup3r.html create mode 100644 _modules/index.html create mode 100644 _modules/sup3r/bias/base.html create mode 100644 _modules/sup3r/bias/bias_calc.html create mode 100644 _modules/sup3r/bias/bias_calc_cli.html create mode 100644 _modules/sup3r/bias/bias_calc_vortex.html create mode 100644 _modules/sup3r/bias/bias_transforms.html create mode 100644 _modules/sup3r/bias/mixins.html create mode 100644 _modules/sup3r/bias/presrat.html create mode 100644 _modules/sup3r/bias/qdm.html create mode 100644 _modules/sup3r/bias/utilities.html create mode 100644 _modules/sup3r/models/abstract.html create mode 100644 _modules/sup3r/models/base.html create mode 100644 _modules/sup3r/models/conditional.html create mode 100644 _modules/sup3r/models/dc.html create mode 100644 _modules/sup3r/models/linear.html create mode 100644 _modules/sup3r/models/multi_step.html create mode 100644 _modules/sup3r/models/solar_cc.html create mode 100644 _modules/sup3r/models/surface.html create mode 100644 _modules/sup3r/models/utilities.html create mode 100644 _modules/sup3r/pipeline/forward_pass.html create mode 100644 _modules/sup3r/pipeline/forward_pass_cli.html create mode 100644 _modules/sup3r/pipeline/slicer.html create mode 100644 _modules/sup3r/pipeline/strategy.html create mode 100644 _modules/sup3r/pipeline/utilities.html create mode 100644 _modules/sup3r/postprocessing/collectors/base.html create mode 100644 _modules/sup3r/postprocessing/collectors/h5.html create mode 100644 _modules/sup3r/postprocessing/collectors/nc.html create mode 100644 _modules/sup3r/postprocessing/data_collect_cli.html create mode 100644 _modules/sup3r/postprocessing/writers/base.html create mode 100644 _modules/sup3r/postprocessing/writers/h5.html create mode 100644 _modules/sup3r/postprocessing/writers/nc.html create mode 100644 _modules/sup3r/preprocessing/accessor.html create mode 100644 _modules/sup3r/preprocessing/base.html create mode 100644 _modules/sup3r/preprocessing/batch_handlers/dc.html create mode 100644 _modules/sup3r/preprocessing/batch_handlers/factory.html create mode 100644 _modules/sup3r/preprocessing/batch_queues/abstract.html create mode 100644 _modules/sup3r/preprocessing/batch_queues/base.html create mode 100644 _modules/sup3r/preprocessing/batch_queues/conditional.html create mode 100644 _modules/sup3r/preprocessing/batch_queues/dc.html create mode 100644 _modules/sup3r/preprocessing/batch_queues/dual.html create mode 100644 _modules/sup3r/preprocessing/batch_queues/utilities.html create mode 100644 _modules/sup3r/preprocessing/cachers/base.html create mode 100644 _modules/sup3r/preprocessing/collections/base.html create mode 100644 _modules/sup3r/preprocessing/collections/stats.html create mode 100644 _modules/sup3r/preprocessing/data_handlers/exo.html create mode 100644 _modules/sup3r/preprocessing/data_handlers/factory.html create mode 100644 _modules/sup3r/preprocessing/data_handlers/nc_cc.html create mode 100644 _modules/sup3r/preprocessing/derivers/base.html create mode 100644 _modules/sup3r/preprocessing/derivers/methods.html create mode 100644 _modules/sup3r/preprocessing/derivers/utilities.html create mode 100644 _modules/sup3r/preprocessing/loaders.html create mode 100644 _modules/sup3r/preprocessing/loaders/base.html create mode 100644 _modules/sup3r/preprocessing/loaders/h5.html create mode 100644 _modules/sup3r/preprocessing/loaders/nc.html create mode 100644 _modules/sup3r/preprocessing/loaders/utilities.html create mode 100644 _modules/sup3r/preprocessing/names.html create mode 100644 _modules/sup3r/preprocessing/rasterizers/base.html create mode 100644 _modules/sup3r/preprocessing/rasterizers/dual.html create mode 100644 _modules/sup3r/preprocessing/rasterizers/exo.html create mode 100644 _modules/sup3r/preprocessing/rasterizers/extended.html create mode 100644 _modules/sup3r/preprocessing/samplers/base.html create mode 100644 _modules/sup3r/preprocessing/samplers/cc.html create mode 100644 _modules/sup3r/preprocessing/samplers/dc.html create mode 100644 _modules/sup3r/preprocessing/samplers/dual.html create mode 100644 _modules/sup3r/preprocessing/samplers/utilities.html create mode 100644 _modules/sup3r/preprocessing/utilities.html create mode 100644 _modules/sup3r/qa/qa.html create mode 100644 _modules/sup3r/qa/utilities.html create mode 100644 _modules/sup3r/solar/solar.html create mode 100644 _modules/sup3r/solar/solar_cli.html create mode 100644 _modules/sup3r/utilities.html create mode 100644 _modules/sup3r/utilities/cli.html create mode 100644 _modules/sup3r/utilities/era_downloader.html create mode 100644 _modules/sup3r/utilities/interpolation.html create mode 100644 _modules/sup3r/utilities/loss_metrics.html create mode 100644 _modules/sup3r/utilities/pytest/helpers.html create mode 100644 _modules/sup3r/utilities/utilities.html create mode 100644 _sources/_autosummary/sup3r.batch.batch_cli.rst create mode 100644 _sources/_autosummary/sup3r.batch.rst create mode 100644 _sources/_autosummary/sup3r.bias.base.DataRetrievalBase.rst create mode 100644 _sources/_autosummary/sup3r.bias.base.rst create mode 100644 _sources/_autosummary/sup3r.bias.bias_calc.LinearCorrection.rst create mode 100644 _sources/_autosummary/sup3r.bias.bias_calc.MonthlyLinearCorrection.rst create mode 100644 _sources/_autosummary/sup3r.bias.bias_calc.MonthlyScalarCorrection.rst create mode 100644 _sources/_autosummary/sup3r.bias.bias_calc.ScalarCorrection.rst create mode 100644 _sources/_autosummary/sup3r.bias.bias_calc.SkillAssessment.rst create mode 100644 _sources/_autosummary/sup3r.bias.bias_calc.rst create mode 100644 _sources/_autosummary/sup3r.bias.bias_calc_cli.kickoff_local_job.rst create mode 100644 _sources/_autosummary/sup3r.bias.bias_calc_cli.kickoff_slurm_job.rst create mode 100644 _sources/_autosummary/sup3r.bias.bias_calc_cli.rst create mode 100644 _sources/_autosummary/sup3r.bias.bias_calc_vortex.BiasCorrectUpdate.rst create mode 100644 _sources/_autosummary/sup3r.bias.bias_calc_vortex.VortexMeanPrepper.rst create mode 100644 _sources/_autosummary/sup3r.bias.bias_calc_vortex.rst create mode 100644 _sources/_autosummary/sup3r.bias.bias_transforms.global_linear_bc.rst create mode 100644 _sources/_autosummary/sup3r.bias.bias_transforms.local_linear_bc.rst create mode 100644 _sources/_autosummary/sup3r.bias.bias_transforms.local_presrat_bc.rst create mode 100644 _sources/_autosummary/sup3r.bias.bias_transforms.local_qdm_bc.rst create mode 100644 _sources/_autosummary/sup3r.bias.bias_transforms.monthly_local_linear_bc.rst create mode 100644 _sources/_autosummary/sup3r.bias.bias_transforms.rst create mode 100644 _sources/_autosummary/sup3r.bias.mixins.FillAndSmoothMixin.rst create mode 100644 _sources/_autosummary/sup3r.bias.mixins.ZeroRateMixin.rst create mode 100644 _sources/_autosummary/sup3r.bias.mixins.rst create mode 100644 _sources/_autosummary/sup3r.bias.presrat.PresRat.rst create mode 100644 _sources/_autosummary/sup3r.bias.presrat.rst create mode 100644 _sources/_autosummary/sup3r.bias.qdm.QuantileDeltaMappingCorrection.rst create mode 100644 _sources/_autosummary/sup3r.bias.qdm.rst create mode 100644 _sources/_autosummary/sup3r.bias.rst create mode 100644 _sources/_autosummary/sup3r.bias.utilities.bias_correct_feature.rst create mode 100644 _sources/_autosummary/sup3r.bias.utilities.bias_correct_features.rst create mode 100644 _sources/_autosummary/sup3r.bias.utilities.lin_bc.rst create mode 100644 _sources/_autosummary/sup3r.bias.utilities.qdm_bc.rst create mode 100644 _sources/_autosummary/sup3r.bias.utilities.rst create mode 100644 _sources/_autosummary/sup3r.cli.rst create mode 100644 _sources/_autosummary/sup3r.models.abstract.AbstractInterface.rst create mode 100644 _sources/_autosummary/sup3r.models.abstract.AbstractSingleModel.rst create mode 100644 _sources/_autosummary/sup3r.models.abstract.TensorboardMixIn.rst create mode 100644 _sources/_autosummary/sup3r.models.abstract.rst create mode 100644 _sources/_autosummary/sup3r.models.base.Sup3rGan.rst create mode 100644 _sources/_autosummary/sup3r.models.base.rst create mode 100644 _sources/_autosummary/sup3r.models.conditional.Sup3rCondMom.rst create mode 100644 _sources/_autosummary/sup3r.models.conditional.rst create mode 100644 _sources/_autosummary/sup3r.models.dc.Sup3rGanDC.rst create mode 100644 _sources/_autosummary/sup3r.models.dc.rst create mode 100644 _sources/_autosummary/sup3r.models.linear.LinearInterp.rst create mode 100644 _sources/_autosummary/sup3r.models.linear.rst create mode 100644 _sources/_autosummary/sup3r.models.multi_step.MultiStepGan.rst create mode 100644 _sources/_autosummary/sup3r.models.multi_step.MultiStepSurfaceMetGan.rst create mode 100644 _sources/_autosummary/sup3r.models.multi_step.SolarMultiStepGan.rst create mode 100644 _sources/_autosummary/sup3r.models.multi_step.rst create mode 100644 _sources/_autosummary/sup3r.models.rst create mode 100644 _sources/_autosummary/sup3r.models.solar_cc.SolarCC.rst create mode 100644 _sources/_autosummary/sup3r.models.solar_cc.rst create mode 100644 _sources/_autosummary/sup3r.models.surface.SurfaceSpatialMetModel.rst create mode 100644 _sources/_autosummary/sup3r.models.surface.rst create mode 100644 _sources/_autosummary/sup3r.models.utilities.TrainingSession.rst create mode 100644 _sources/_autosummary/sup3r.models.utilities.get_optimizer_class.rst create mode 100644 _sources/_autosummary/sup3r.models.utilities.rst create mode 100644 _sources/_autosummary/sup3r.models.utilities.st_interp.rst create mode 100644 _sources/_autosummary/sup3r.pipeline.forward_pass.ForwardPass.rst create mode 100644 _sources/_autosummary/sup3r.pipeline.forward_pass.rst create mode 100644 _sources/_autosummary/sup3r.pipeline.forward_pass_cli.kickoff_local_job.rst create mode 100644 _sources/_autosummary/sup3r.pipeline.forward_pass_cli.kickoff_slurm_job.rst create mode 100644 _sources/_autosummary/sup3r.pipeline.forward_pass_cli.rst create mode 100644 _sources/_autosummary/sup3r.pipeline.pipeline_cli.rst create mode 100644 _sources/_autosummary/sup3r.pipeline.rst create mode 100644 _sources/_autosummary/sup3r.pipeline.slicer.ForwardPassSlicer.rst create mode 100644 _sources/_autosummary/sup3r.pipeline.slicer.rst create mode 100644 _sources/_autosummary/sup3r.pipeline.strategy.ForwardPassChunk.rst create mode 100644 _sources/_autosummary/sup3r.pipeline.strategy.ForwardPassStrategy.rst create mode 100644 _sources/_autosummary/sup3r.pipeline.strategy.rst create mode 100644 _sources/_autosummary/sup3r.pipeline.utilities.get_chunk_slices.rst create mode 100644 _sources/_autosummary/sup3r.pipeline.utilities.get_model.rst create mode 100644 _sources/_autosummary/sup3r.pipeline.utilities.rst create mode 100644 _sources/_autosummary/sup3r.postprocessing.collectors.base.BaseCollector.rst create mode 100644 _sources/_autosummary/sup3r.postprocessing.collectors.base.rst create mode 100644 _sources/_autosummary/sup3r.postprocessing.collectors.h5.CollectorH5.rst create mode 100644 _sources/_autosummary/sup3r.postprocessing.collectors.h5.rst create mode 100644 _sources/_autosummary/sup3r.postprocessing.collectors.nc.CollectorNC.rst create mode 100644 _sources/_autosummary/sup3r.postprocessing.collectors.nc.rst create mode 100644 _sources/_autosummary/sup3r.postprocessing.collectors.rst create mode 100644 _sources/_autosummary/sup3r.postprocessing.data_collect_cli.kickoff_local_job.rst create mode 100644 _sources/_autosummary/sup3r.postprocessing.data_collect_cli.kickoff_slurm_job.rst create mode 100644 _sources/_autosummary/sup3r.postprocessing.data_collect_cli.rst create mode 100644 _sources/_autosummary/sup3r.postprocessing.rst create mode 100644 _sources/_autosummary/sup3r.postprocessing.writers.base.OutputHandler.rst create mode 100644 _sources/_autosummary/sup3r.postprocessing.writers.base.OutputMixin.rst create mode 100644 _sources/_autosummary/sup3r.postprocessing.writers.base.RexOutputs.rst create mode 100644 _sources/_autosummary/sup3r.postprocessing.writers.base.rst create mode 100644 _sources/_autosummary/sup3r.postprocessing.writers.h5.OutputHandlerH5.rst create mode 100644 _sources/_autosummary/sup3r.postprocessing.writers.h5.rst create mode 100644 _sources/_autosummary/sup3r.postprocessing.writers.nc.OutputHandlerNC.rst create mode 100644 _sources/_autosummary/sup3r.postprocessing.writers.nc.rst create mode 100644 _sources/_autosummary/sup3r.postprocessing.writers.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.accessor.Sup3rX.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.accessor.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.base.Container.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.base.Sup3rDataset.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.base.Sup3rMeta.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.base.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_handlers.dc.BatchHandlerDC.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_handlers.dc.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandler.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerCC.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerFactory.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom1.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom1SF.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2SF.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2Sep.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2SepSF.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.DualBatchHandler.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_handlers.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_queues.abstract.AbstractBatchQueue.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_queues.abstract.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_queues.base.SingleBatchQueue.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_queues.base.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_queues.conditional.ConditionalBatchQueue.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom1.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom1SF.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom2.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom2SF.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom2Sep.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom2SepSF.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_queues.conditional.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_queues.dc.BatchQueueDC.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_queues.dc.ValBatchQueueDC.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_queues.dc.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_queues.dual.DualBatchQueue.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_queues.dual.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_queues.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_queues.utilities.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_queues.utilities.smooth_data.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_queues.utilities.spatial_simple_enhancing.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.batch_queues.utilities.temporal_simple_enhancing.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.cachers.base.Cacher.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.cachers.base.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.cachers.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.cachers.utilities.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.collections.base.Collection.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.collections.base.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.collections.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.collections.stats.StatsCollection.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.collections.stats.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.data_handlers.exo.ExoData.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.data_handlers.exo.ExoDataHandler.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.data_handlers.exo.SingleExoDataStep.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.data_handlers.exo.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.data_handlers.factory.DailyDataHandler.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.data_handlers.factory.DataHandler.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.data_handlers.factory.DataHandlerFactory.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.data_handlers.factory.DataHandlerH5SolarCC.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.data_handlers.factory.DataHandlerH5WindCC.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.data_handlers.factory.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.data_handlers.nc_cc.DataHandlerNCforCC.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.data_handlers.nc_cc.DataHandlerNCforCCwithPowerLaw.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.data_handlers.nc_cc.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.data_handlers.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.derivers.base.BaseDeriver.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.derivers.base.Deriver.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.derivers.base.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.derivers.methods.ClearSkyRatio.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.derivers.methods.ClearSkyRatioCC.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.derivers.methods.CloudMask.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.derivers.methods.DerivedFeature.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.derivers.methods.PressureWRF.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.derivers.methods.SurfaceRH.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.derivers.methods.Sza.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.derivers.methods.Tas.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.derivers.methods.TasMax.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.derivers.methods.TasMin.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.derivers.methods.TempNCforCC.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.derivers.methods.USolar.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.derivers.methods.UWind.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.derivers.methods.UWindPowerLaw.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.derivers.methods.VSolar.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.derivers.methods.VWind.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.derivers.methods.VWindPowerLaw.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.derivers.methods.Winddirection.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.derivers.methods.Windspeed.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.derivers.methods.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.derivers.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.derivers.utilities.SolarZenith.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.derivers.utilities.invert_uv.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.derivers.utilities.parse_feature.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.derivers.utilities.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.derivers.utilities.transform_rotate_wind.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.derivers.utilities.windspeed_log_law.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.loaders.Loader.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.loaders.base.BaseLoader.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.loaders.base.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.loaders.h5.LoaderH5.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.loaders.h5.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.loaders.nc.LoaderNC.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.loaders.nc.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.loaders.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.loaders.utilities.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.loaders.utilities.standardize_names.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.loaders.utilities.standardize_values.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.names.Dimension.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.names.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.rasterizers.base.BaseRasterizer.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.rasterizers.base.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.rasterizers.dual.DualRasterizer.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.rasterizers.dual.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.rasterizers.exo.BaseExoRasterizer.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.rasterizers.exo.ExoRasterizer.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.rasterizers.exo.ExoRasterizerH5.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.rasterizers.exo.ExoRasterizerNC.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.rasterizers.exo.SzaRasterizer.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.rasterizers.exo.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.rasterizers.extended.Rasterizer.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.rasterizers.extended.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.rasterizers.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.samplers.base.Sampler.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.samplers.base.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.samplers.cc.DualSamplerCC.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.samplers.cc.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.samplers.dc.SamplerDC.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.samplers.dc.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.samplers.dual.DualSampler.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.samplers.dual.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.samplers.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.samplers.utilities.daily_time_sampler.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.samplers.utilities.nsrdb_reduce_daily_data.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.samplers.utilities.nsrdb_sub_daily_sampler.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.samplers.utilities.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.samplers.utilities.uniform_box_sampler.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.samplers.utilities.uniform_time_sampler.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.samplers.utilities.weighted_box_sampler.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.samplers.utilities.weighted_time_sampler.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.utilities.check_signatures.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.utilities.composite_info.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.utilities.composite_sig.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.utilities.compute_if_dask.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.utilities.contains_ellipsis.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.utilities.dims_array_tuple.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.utilities.expand_paths.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.utilities.get_class_kwargs.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.utilities.get_date_range_kwargs.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.utilities.get_input_handler_class.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.utilities.get_obj_params.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.utilities.get_source_type.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.utilities.is_type_of.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.utilities.log_args.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.utilities.lower_names.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.utilities.lowered.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.utilities.make_time_index_from_kws.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.utilities.numpy_if_tensor.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.utilities.ordered_array.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.utilities.ordered_dims.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.utilities.parse_ellipsis.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.utilities.parse_features.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.utilities.parse_keys.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.utilities.parse_to_list.rst create mode 100644 _sources/_autosummary/sup3r.preprocessing.utilities.rst create mode 100644 _sources/_autosummary/sup3r.qa.qa.Sup3rQa.rst create mode 100644 _sources/_autosummary/sup3r.qa.qa.rst create mode 100644 _sources/_autosummary/sup3r.qa.qa_cli.rst create mode 100644 _sources/_autosummary/sup3r.qa.rst create mode 100644 _sources/_autosummary/sup3r.qa.utilities.continuous_dist.rst create mode 100644 _sources/_autosummary/sup3r.qa.utilities.direct_dist.rst create mode 100644 _sources/_autosummary/sup3r.qa.utilities.frequency_spectrum.rst create mode 100644 _sources/_autosummary/sup3r.qa.utilities.gradient_dist.rst create mode 100644 _sources/_autosummary/sup3r.qa.utilities.rst create mode 100644 _sources/_autosummary/sup3r.qa.utilities.time_derivative_dist.rst create mode 100644 _sources/_autosummary/sup3r.qa.utilities.tke_frequency_spectrum.rst create mode 100644 _sources/_autosummary/sup3r.qa.utilities.tke_wavenumber_spectrum.rst create mode 100644 _sources/_autosummary/sup3r.qa.utilities.wavenumber_spectrum.rst create mode 100644 _sources/_autosummary/sup3r.rst create mode 100644 _sources/_autosummary/sup3r.solar.rst create mode 100644 _sources/_autosummary/sup3r.solar.solar.Solar.rst create mode 100644 _sources/_autosummary/sup3r.solar.solar.rst create mode 100644 _sources/_autosummary/sup3r.solar.solar_cli.kickoff_local_job.rst create mode 100644 _sources/_autosummary/sup3r.solar.solar_cli.kickoff_slurm_job.rst create mode 100644 _sources/_autosummary/sup3r.solar.solar_cli.rst create mode 100644 _sources/_autosummary/sup3r.utilities.ModuleName.rst create mode 100644 _sources/_autosummary/sup3r.utilities.cli.BaseCLI.rst create mode 100644 _sources/_autosummary/sup3r.utilities.cli.SlurmManager.rst create mode 100644 _sources/_autosummary/sup3r.utilities.cli.rst create mode 100644 _sources/_autosummary/sup3r.utilities.era_downloader.EraDownloader.rst create mode 100644 _sources/_autosummary/sup3r.utilities.era_downloader.rst create mode 100644 _sources/_autosummary/sup3r.utilities.interpolation.Interpolator.rst create mode 100644 _sources/_autosummary/sup3r.utilities.interpolation.rst create mode 100644 _sources/_autosummary/sup3r.utilities.loss_metrics.CoarseMseLoss.rst create mode 100644 _sources/_autosummary/sup3r.utilities.loss_metrics.ExpLoss.rst create mode 100644 _sources/_autosummary/sup3r.utilities.loss_metrics.LowResLoss.rst create mode 100644 _sources/_autosummary/sup3r.utilities.loss_metrics.MaterialDerivativeLoss.rst create mode 100644 _sources/_autosummary/sup3r.utilities.loss_metrics.MmdLoss.rst create mode 100644 _sources/_autosummary/sup3r.utilities.loss_metrics.MmdMseLoss.rst create mode 100644 _sources/_autosummary/sup3r.utilities.loss_metrics.MseExpLoss.rst create mode 100644 _sources/_autosummary/sup3r.utilities.loss_metrics.SpatialExtremesLoss.rst create mode 100644 _sources/_autosummary/sup3r.utilities.loss_metrics.SpatialExtremesOnlyLoss.rst create mode 100644 _sources/_autosummary/sup3r.utilities.loss_metrics.SpatialFftOnlyLoss.rst create mode 100644 _sources/_autosummary/sup3r.utilities.loss_metrics.SpatiotemporalExtremesLoss.rst create mode 100644 _sources/_autosummary/sup3r.utilities.loss_metrics.SpatiotemporalFftOnlyLoss.rst create mode 100644 _sources/_autosummary/sup3r.utilities.loss_metrics.StExtremesFftLoss.rst create mode 100644 _sources/_autosummary/sup3r.utilities.loss_metrics.TemporalExtremesLoss.rst create mode 100644 _sources/_autosummary/sup3r.utilities.loss_metrics.TemporalExtremesOnlyLoss.rst create mode 100644 _sources/_autosummary/sup3r.utilities.loss_metrics.gaussian_kernel.rst create mode 100644 _sources/_autosummary/sup3r.utilities.loss_metrics.rst create mode 100644 _sources/_autosummary/sup3r.utilities.pytest.helpers.BatchHandlerTesterCC.rst create mode 100644 _sources/_autosummary/sup3r.utilities.pytest.helpers.BatchHandlerTesterDC.rst create mode 100644 _sources/_autosummary/sup3r.utilities.pytest.helpers.BatchHandlerTesterFactory.rst create mode 100644 _sources/_autosummary/sup3r.utilities.pytest.helpers.DualSamplerTesterCC.rst create mode 100644 _sources/_autosummary/sup3r.utilities.pytest.helpers.DummyData.rst create mode 100644 _sources/_autosummary/sup3r.utilities.pytest.helpers.DummySampler.rst create mode 100644 _sources/_autosummary/sup3r.utilities.pytest.helpers.SamplerTester.rst create mode 100644 _sources/_autosummary/sup3r.utilities.pytest.helpers.SamplerTesterDC.rst create mode 100644 _sources/_autosummary/sup3r.utilities.pytest.helpers.make_collect_chunks.rst create mode 100644 _sources/_autosummary/sup3r.utilities.pytest.helpers.make_fake_cs_ratio_files.rst create mode 100644 _sources/_autosummary/sup3r.utilities.pytest.helpers.make_fake_dset.rst create mode 100644 _sources/_autosummary/sup3r.utilities.pytest.helpers.make_fake_h5_chunks.rst create mode 100644 _sources/_autosummary/sup3r.utilities.pytest.helpers.make_fake_nc_file.rst create mode 100644 _sources/_autosummary/sup3r.utilities.pytest.helpers.make_fake_tif.rst create mode 100644 _sources/_autosummary/sup3r.utilities.pytest.helpers.rst create mode 100644 _sources/_autosummary/sup3r.utilities.pytest.helpers.test_sampler_factory.rst create mode 100644 _sources/_autosummary/sup3r.utilities.pytest.rst create mode 100644 _sources/_autosummary/sup3r.utilities.rst create mode 100644 _sources/_autosummary/sup3r.utilities.utilities.Timer.rst create mode 100644 _sources/_autosummary/sup3r.utilities.utilities.generate_random_string.rst create mode 100644 _sources/_autosummary/sup3r.utilities.utilities.nn_fill_array.rst create mode 100644 _sources/_autosummary/sup3r.utilities.utilities.pd_date_range.rst create mode 100644 _sources/_autosummary/sup3r.utilities.utilities.preprocess_datasets.rst create mode 100644 _sources/_autosummary/sup3r.utilities.utilities.rst create mode 100644 _sources/_autosummary/sup3r.utilities.utilities.safe_cast.rst create mode 100644 _sources/_autosummary/sup3r.utilities.utilities.safe_serialize.rst create mode 100644 _sources/_autosummary/sup3r.utilities.utilities.spatial_coarsening.rst create mode 100644 _sources/_autosummary/sup3r.utilities.utilities.temporal_coarsening.rst create mode 100644 _sources/_autosummary/sup3r.utilities.utilities.xr_open_mfdataset.rst create mode 100644 _sources/_cli/cli.rst create mode 100644 _sources/_cli/sup3r.rst create mode 100644 _sources/api.rst create mode 100644 _sources/examples/examples.rst create mode 100644 _sources/examples/sup3rcc.rst create mode 100644 _sources/examples/sup3rwind.rst create mode 100644 _sources/index.rst create mode 100644 _sources/misc/installation.rst create mode 100644 _sources/misc/installation_usage.rst create mode 100644 _static/basic.css create mode 100644 _static/check-solid.svg create mode 100644 _static/clipboard.min.js create mode 100644 _static/copy-button.svg create mode 100644 _static/copybutton.css create mode 100644 _static/copybutton.js create mode 100644 _static/copybutton_funcs.js create mode 100644 _static/custom.css create mode 100644 _static/doctools.js create mode 100644 _static/documentation_options.js create mode 100644 _static/file.png create mode 100644 _static/images/logo_binder.svg create mode 100644 _static/images/logo_colab.png create mode 100644 _static/images/logo_deepnote.svg create mode 100644 _static/images/logo_jupyterhub.svg create mode 100644 _static/language_data.js create mode 100644 _static/locales/ar/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/ar/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/bg/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/bg/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/bn/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/bn/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/ca/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/ca/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/cs/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/cs/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/da/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/da/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/de/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/de/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/el/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/el/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/eo/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/eo/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/es/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/es/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/et/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/et/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/fi/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/fi/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/fr/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/fr/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/hr/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/hr/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/id/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/id/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/it/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/it/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/iw/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/iw/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/ja/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/ja/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/ko/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/ko/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/lt/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/lt/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/lv/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/lv/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/ml/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/ml/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/mr/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/mr/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/ms/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/ms/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/nl/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/nl/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/no/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/no/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/pl/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/pl/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/pt/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/pt/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/ro/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/ro/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/ru/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/ru/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/sk/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/sk/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/sl/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/sl/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/sr/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/sr/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/sv/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/sv/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/ta/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/ta/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/te/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/te/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/tg/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/tg/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/th/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/th/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/tl/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/tl/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/tr/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/tr/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/uk/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/uk/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/ur/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/ur/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/vi/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/vi/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/zh_CN/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/zh_CN/LC_MESSAGES/booktheme.po create mode 100644 _static/locales/zh_TW/LC_MESSAGES/booktheme.mo create mode 100644 _static/locales/zh_TW/LC_MESSAGES/booktheme.po create mode 100644 _static/minus.png create mode 100644 _static/plus.png create mode 100644 _static/pygments.css create mode 100644 _static/sbt-webpack-macros.html create mode 100644 _static/scripts/bootstrap.js create mode 100644 _static/scripts/bootstrap.js.LICENSE.txt create mode 100644 _static/scripts/bootstrap.js.map create mode 100644 _static/scripts/fontawesome.js create mode 100644 _static/scripts/fontawesome.js.LICENSE.txt create mode 100644 _static/scripts/fontawesome.js.map create mode 100644 _static/scripts/pydata-sphinx-theme.js create mode 100644 _static/scripts/pydata-sphinx-theme.js.map create mode 100644 _static/scripts/sphinx-book-theme.js create mode 100644 _static/scripts/sphinx-book-theme.js.map create mode 100644 _static/searchtools.js create mode 100644 _static/sphinx_highlight.js create mode 100644 _static/styles/pydata-sphinx-theme.css create mode 100644 _static/styles/pydata-sphinx-theme.css.map create mode 100644 _static/styles/sphinx-book-theme.css create mode 100644 _static/styles/sphinx-book-theme.css.map create mode 100644 _static/styles/theme.css create mode 100644 _static/tabs.css create mode 100644 _static/tabs.js create mode 100644 _static/vendor/fontawesome/webfonts/fa-brands-400.ttf create mode 100644 _static/vendor/fontawesome/webfonts/fa-brands-400.woff2 create mode 100644 _static/vendor/fontawesome/webfonts/fa-regular-400.ttf create mode 100644 _static/vendor/fontawesome/webfonts/fa-regular-400.woff2 create mode 100644 _static/vendor/fontawesome/webfonts/fa-solid-900.ttf create mode 100644 _static/vendor/fontawesome/webfonts/fa-solid-900.woff2 create mode 100644 _static/webpack-macros.html create mode 100644 api.html create mode 100644 examples/examples.html create mode 100644 examples/sup3rcc.html create mode 100644 examples/sup3rwind.html create mode 100644 genindex.html create mode 100644 index.html create mode 100644 misc/installation.html create mode 100644 misc/installation_usage.html create mode 100644 objects.inv create mode 100644 py-modindex.html create mode 100644 search.html create mode 100644 searchindex.js diff --git a/.buildinfo b/.buildinfo new file mode 100644 index 000000000..e9038fbd1 --- /dev/null +++ b/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file records the configuration used when building these files. When it is not found, a full rebuild will be done. +config: 88b6c2e6e33c626c9f2b1c507f795d33 +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 000000000..e69de29bb diff --git a/_autosummary/sup3r.batch.batch_cli.html b/_autosummary/sup3r.batch.batch_cli.html new file mode 100644 index 000000000..cf17c2dfd --- /dev/null +++ b/_autosummary/sup3r.batch.batch_cli.html @@ -0,0 +1,730 @@ + + + + + + + + + + + sup3r.batch.batch_cli — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.batch.batch_cli

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.batch.batch_cli#

+

Batch Job CLI entry points.

+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.batch.html b/_autosummary/sup3r.batch.html new file mode 100644 index 000000000..62a0a63d3 --- /dev/null +++ b/_autosummary/sup3r.batch.html @@ -0,0 +1,738 @@ + + + + + + + + + + + sup3r.batch — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.batch

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.batch#

+

sup3r batch utilities based on GAPS batch module

+
+ + + + + +

batch_cli

Batch Job CLI entry points.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.bias.base.DataRetrievalBase.html b/_autosummary/sup3r.bias.base.DataRetrievalBase.html new file mode 100644 index 000000000..11e143a85 --- /dev/null +++ b/_autosummary/sup3r.bias.base.DataRetrievalBase.html @@ -0,0 +1,1084 @@ + + + + + + + + + + + sup3r.bias.base.DataRetrievalBase — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.bias.base.DataRetrievalBase#

+
+
+class DataRetrievalBase(base_fps, bias_fps, base_dset, bias_feature, distance_upper_bound=None, target=None, shape=None, base_handler='Resource', bias_handler='DataHandlerNCforCC', base_handler_kwargs=None, bias_handler_kwargs=None, decimals=None, match_zero_rate=False, pre_load=True)[source]#
+

Bases: object

+

Base class to handle data retrieval for the biased data and the +baseline data

+
+
Parameters:
+
    +
  • base_fps (list | str) – One or more baseline .h5 filepaths representing non-biased data to +use to correct the biased dataset. This is typically several years +of WTK or NSRDB files.

  • +
  • bias_fps (list | str) – One or more biased .nc or .h5 filepaths representing the biased +data to be corrected based on the baseline data. This is typically +several years of GCM .nc files.

  • +
  • base_dset (str) – A single dataset from the base_fps to retrieve. In the case of wind +components, this can be u_100m or v_100m which will retrieve +windspeed and winddirection and derive the U/V component.

  • +
  • bias_feature (str) – This is the biased feature from bias_fps to retrieve. This should +be a single feature name corresponding to base_dset

  • +
  • distance_upper_bound (float) – Upper bound on the nearest neighbor distance in decimal degrees. +This should be the approximate resolution of the low-resolution +bias data. None (default) will calculate this based on the median +distance between points in bias_fps

  • +
  • target (tuple) – (lat, lon) lower left corner of raster to retrieve from bias_fps. +If None then the lower left corner of the full domain will be used.

  • +
  • shape (tuple) – (rows, cols) grid size to retrieve from bias_fps. If None then the +full domain shape will be used.

  • +
  • base_handler (str) – Name of rex resource handler or sup3r.preprocessing class to be +retrieved from the rex/sup3r library. If a sup3r.preprocessing +class is used, all data will be loaded in this class’ +initialization and the subsequent bias calculation will be done in +serial

  • +
  • bias_handler (str) – Name of the bias data handler class to be retrieved from the +sup3r.preprocessing library.

  • +
  • base_handler_kwargs (dict | None) – Optional kwargs to send to the initialization of the base_handler +class

  • +
  • bias_handler_kwargs (dict | None) – Optional kwargs to send to the initialization of the bias_handler +class

  • +
  • decimals (int | None) – Option to round bias and base data to this number of decimals, this +gets passed to np.around(). If decimals is negative, it specifies +the number of positions to the left of the decimal point.

  • +
  • match_zero_rate (bool) – Option to fix the frequency of zero values in the biased data. The +lowest percentile of values in the biased data will be set to zero +to match the percentile of zeros in the base data. If +SkillAssessment is being run and this is True, the distributions +will not be mean-centered. This helps resolve the issue where +global climate models produce too many days with small +precipitation totals e.g., the “drizzle problem” [Polade2014].

  • +
  • pre_load (bool) – Flag to preload all data needed for bias correction. This is +currently recommended to improve performance with the new sup3r +data handler access patterns

  • +
+
+
+

References

+
+
+[Polade2014] +

Polade, S. D., Pierce, D. W., Cayan, D. R., Gershunov, +A., & Dettineer, M. D. (2014). The key role of dry days in changing +regional climate and precipitation regimes. Scientific reports, +4(1), 4364. https://doi.org/10.1038/srep04364

+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +

compare_dists(base_data, bias_data[, adder, ...])

Compare two distributions using the two-sample Kolmogorov-Smirnov.

get_base_data(base_fps, base_dset, base_gid, ...)

Get data from the baseline data source, possibly for many high-res base gids corresponding to a single coarse low-res bias gid.

get_base_gid(bias_gid)

Get one or more base gid(s) corresponding to a bias gid.

get_bias_data(bias_gid[, bias_dh])

Get data from the biased data source for a single gid

get_bias_gid(coord)

Get the bias gid from a coordinate.

get_data_pair(coord[, daily_reduction])

Get base and bias data observations based on a single bias gid.

get_node_cmd(config)

Get a CLI call to call cls.run() on a single node based on an input config.

pre_load()

Preload all data needed for bias correction.

+
+

Attributes

+
+ + + + + + + + +

distance_upper_bound

Maximum distance (float) to map high-resolution data from exo_source to the low-resolution file_paths input.

meta

Get a meta data dictionary on how these bias factors were calculated

+
+
+
+pre_load()[source]#
+

Preload all data needed for bias correction. This is currently +recommended to improve performance with the new sup3r data handler +access patterns

+
+ +
+
+property meta#
+

Get a meta data dictionary on how these bias factors were +calculated

+
+ +
+
+property distance_upper_bound#
+

Maximum distance (float) to map high-resolution data from exo_source +to the low-resolution file_paths input.

+
+ +
+
+static compare_dists(base_data, bias_data, adder=0, scalar=1)[source]#
+

Compare two distributions using the two-sample Kolmogorov-Smirnov. +When the output is minimized, the two distributions are similar.

+
+
Parameters:
+
    +
  • base_data (np.ndarray) – 1D array of base data observations.

  • +
  • bias_data (np.ndarray) – 1D array of biased data observations.

  • +
  • adder (float) – Factor to adjust the biased data before comparing distributions: +bias_data * scalar + adder

  • +
  • scalar (float) – Factor to adjust the biased data before comparing distributions: +bias_data * scalar + adder

  • +
+
+
Returns:
+

out (float) – KS test statistic

+
+
+
+ +
+
+classmethod get_node_cmd(config)[source]#
+

Get a CLI call to call cls.run() on a single node based on an input +config.

+
+
Parameters:
+

config (dict) – sup3r bias calc config with all necessary args and kwargs to +initialize the class and call run() on a single node.

+
+
+
+ +
+
+get_bias_gid(coord)[source]#
+

Get the bias gid from a coordinate.

+
+
Parameters:
+

coord (tuple) – (lat, lon) to get data for.

+
+
Returns:
+

    +
  • bias_gid (int) – gid of the data to retrieve in the bias data source raster data. +The gids for this data source are the enumerated indices of the +flattened coordinate array.

  • +
  • d (float) – Distance in decimal degrees from coord to bias gid

  • +
+

+
+
+
+ +
+
+get_base_gid(bias_gid)[source]#
+

Get one or more base gid(s) corresponding to a bias gid.

+
+
Parameters:
+

bias_gid (int) – gid of the data to retrieve in the bias data source raster data. +The gids for this data source are the enumerated indices of the +flattened coordinate array.

+
+
Returns:
+

    +
  • dist (np.ndarray) – Array of nearest neighbor distances with length equal to the number +of high-resolution baseline gids that map to the low resolution +bias gid pixel.

  • +
  • base_gid (np.ndarray) – Array of base gids that are the nearest neighbors of bias_gid with +length equal to the number of high-resolution baseline gids that +map to the low resolution bias gid pixel.

  • +
+

+
+
+
+ +
+
+get_data_pair(coord, daily_reduction='avg')[source]#
+

Get base and bias data observations based on a single bias gid.

+
+
Parameters:
+
    +
  • coord (tuple) – (lat, lon) to get data for.

  • +
  • daily_reduction (None | str) – Option to do a reduction of the hourly+ source base data to daily +data. Can be None (no reduction, keep source time frequency), “avg” +(daily average), “max” (daily max), “min” (daily min), +“sum” (daily sum/total)

  • +
+
+
Returns:
+

    +
  • base_data (np.ndarray) – 1D array of base data spatially averaged across the base_gid input +and possibly daily-averaged or min/max’d as well.

  • +
  • bias_data (np.ndarray) – 1D array of temporal data at the requested gid.

  • +
  • base_dist (np.ndarray) – Array of nearest neighbor distances from coord to the base data +sites with length equal to the number of high-resolution baseline +gids that map to the low resolution bias gid pixel.

  • +
  • bias_dist (Float) – Nearest neighbor distance from coord to the bias data site

  • +
+

+
+
+
+ +
+
+get_bias_data(bias_gid, bias_dh=None)[source]#
+

Get data from the biased data source for a single gid

+
+
Parameters:
+
    +
  • bias_gid (int) – gid of the data to retrieve in the bias data source raster data. +The gids for this data source are the enumerated indices of the +flattened coordinate array.

  • +
  • bias_dh (DataHandler, default=self.bias_dh) – Any DataHandler from sup3r.preprocessing. This optional +argument allows an alternative handler other than the usual +bias_dh. For instance, the derived +QuantileDeltaMappingCorrection uses it to access the +reference biased dataset as well as the target biased dataset.

  • +
+
+
Returns:
+

bias_data (np.ndarray) – 1D array of temporal data at the requested gid.

+
+
+
+ +
+
+classmethod get_base_data(base_fps, base_dset, base_gid, base_handler, base_handler_kwargs=None, daily_reduction='avg', decimals=None, base_dh_inst=None)[source]#
+

Get data from the baseline data source, possibly for many high-res +base gids corresponding to a single coarse low-res bias gid.

+
+
Parameters:
+
    +
  • base_fps (list | str) – One or more baseline .h5 filepaths representing non-biased data to +use to correct the biased dataset. This is typically several years +of WTK or NSRDB files.

  • +
  • base_dset (str) – A single dataset from the base_fps to retrieve.

  • +
  • base_gid (int | np.ndarray) – One or more spatial gids to retrieve from base_fps. The data will +be spatially averaged across all of these sites.

  • +
  • base_handler (rex.Resource) – A rex data handler similar to rex.Resource or sup3r.DataHandler +classes (if using the latter, must also input base_dh_inst)

  • +
  • base_handler_kwargs (dict | None) – Optional kwargs to send to the initialization of the base_handler +class

  • +
  • daily_reduction (None | str) – Option to do a reduction of the hourly+ source base data to daily +data. Can be None (no reduction, keep source time frequency), “avg” +(daily average), “max” (daily max), “min” (daily min), +“sum” (daily sum/total)

  • +
  • decimals (int | None) – Option to round bias and base data to this number of +decimals, this gets passed to np.around(). If decimals +is negative, it specifies the number of positions to +the left of the decimal point.

  • +
  • base_dh_inst (sup3r.DataHandler) – Instantiated DataHandler class that has already loaded the base +data (required if base files are .nc and are not being opened by a +rex Resource handler).

  • +
+
+
Returns:
+

    +
  • out_data (np.ndarray) – 1D array of base data spatially averaged across the base_gid input +and possibly daily-averaged or min/max’d as well.

  • +
  • out_ti (pd.DatetimeIndex) – DatetimeIndex object of datetimes corresponding to the +output data.

  • +
+

+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.bias.base.html b/_autosummary/sup3r.bias.base.html new file mode 100644 index 000000000..6e2eddb7b --- /dev/null +++ b/_autosummary/sup3r.bias.base.html @@ -0,0 +1,741 @@ + + + + + + + + + + + sup3r.bias.base — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.bias.base

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.bias.base#

+

Data retrieval class for performing / evaluating bias correction.

+

TODO: This can likely leverage the new data handling objects. Refactor +accordingly.

+

Classes

+
+ + + + + +

DataRetrievalBase(base_fps, bias_fps, ...[, ...])

Base class to handle data retrieval for the biased data and the baseline data

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.bias.bias_calc.LinearCorrection.html b/_autosummary/sup3r.bias.bias_calc.LinearCorrection.html new file mode 100644 index 000000000..569225408 --- /dev/null +++ b/_autosummary/sup3r.bias.bias_calc.LinearCorrection.html @@ -0,0 +1,1232 @@ + + + + + + + + + + + sup3r.bias.bias_calc.LinearCorrection — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.bias.bias_calc.LinearCorrection#

+
+
+class LinearCorrection(base_fps, bias_fps, base_dset, bias_feature, distance_upper_bound=None, target=None, shape=None, base_handler='Resource', bias_handler='DataHandlerNCforCC', base_handler_kwargs=None, bias_handler_kwargs=None, decimals=None, match_zero_rate=False, pre_load=True)[source]#
+

Bases: FillAndSmoothMixin, DataRetrievalBase

+

Calculate linear correction *scalar +adder factors to bias correct data

+

This calculation operates on single bias sites for the full time series of +available data (no season bias correction)

+
+
Parameters:
+
    +
  • base_fps (list | str) – One or more baseline .h5 filepaths representing non-biased data to +use to correct the biased dataset. This is typically several years +of WTK or NSRDB files.

  • +
  • bias_fps (list | str) – One or more biased .nc or .h5 filepaths representing the biased +data to be corrected based on the baseline data. This is typically +several years of GCM .nc files.

  • +
  • base_dset (str) – A single dataset from the base_fps to retrieve. In the case of wind +components, this can be u_100m or v_100m which will retrieve +windspeed and winddirection and derive the U/V component.

  • +
  • bias_feature (str) – This is the biased feature from bias_fps to retrieve. This should +be a single feature name corresponding to base_dset

  • +
  • distance_upper_bound (float) – Upper bound on the nearest neighbor distance in decimal degrees. +This should be the approximate resolution of the low-resolution +bias data. None (default) will calculate this based on the median +distance between points in bias_fps

  • +
  • target (tuple) – (lat, lon) lower left corner of raster to retrieve from bias_fps. +If None then the lower left corner of the full domain will be used.

  • +
  • shape (tuple) – (rows, cols) grid size to retrieve from bias_fps. If None then the +full domain shape will be used.

  • +
  • base_handler (str) – Name of rex resource handler or sup3r.preprocessing class to be +retrieved from the rex/sup3r library. If a sup3r.preprocessing +class is used, all data will be loaded in this class’ +initialization and the subsequent bias calculation will be done in +serial

  • +
  • bias_handler (str) – Name of the bias data handler class to be retrieved from the +sup3r.preprocessing library.

  • +
  • base_handler_kwargs (dict | None) – Optional kwargs to send to the initialization of the base_handler +class

  • +
  • bias_handler_kwargs (dict | None) – Optional kwargs to send to the initialization of the bias_handler +class

  • +
  • decimals (int | None) – Option to round bias and base data to this number of decimals, this +gets passed to np.around(). If decimals is negative, it specifies +the number of positions to the left of the decimal point.

  • +
  • match_zero_rate (bool) – Option to fix the frequency of zero values in the biased data. The +lowest percentile of values in the biased data will be set to zero +to match the percentile of zeros in the base data. If +SkillAssessment is being run and this is True, the distributions +will not be mean-centered. This helps resolve the issue where +global climate models produce too many days with small +precipitation totals e.g., the “drizzle problem” [Polade2014].

  • +
  • pre_load (bool) – Flag to preload all data needed for bias correction. This is +currently recommended to improve performance with the new sup3r +data handler access patterns

  • +
+
+
+

References

+
+
+[Polade2014] +

Polade, S. D., Pierce, D. W., Cayan, D. R., Gershunov, +A., & Dettineer, M. D. (2014). The key role of dry days in changing +regional climate and precipitation regimes. Scientific reports, +4(1), 4364. https://doi.org/10.1038/srep04364

+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

compare_dists(base_data, bias_data[, adder, ...])

Compare two distributions using the two-sample Kolmogorov-Smirnov.

fill_and_smooth(out[, fill_extend, ...])

For a given set of parameters, fill and extend missing positions

get_base_data(base_fps, base_dset, base_gid, ...)

Get data from the baseline data source, possibly for many high-res base gids corresponding to a single coarse low-res bias gid.

get_base_gid(bias_gid)

Get one or more base gid(s) corresponding to a bias gid.

get_bias_data(bias_gid[, bias_dh])

Get data from the biased data source for a single gid

get_bias_gid(coord)

Get the bias gid from a coordinate.

get_data_pair(coord[, daily_reduction])

Get base and bias data observations based on a single bias gid.

get_linear_correction(bias_data, base_data, ...)

Get the linear correction factors based on 1D bias and base datasets

get_node_cmd(config)

Get a CLI call to call cls.run() on a single node based on an input config.

pre_load()

Preload all data needed for bias correction.

run([fp_out, max_workers, daily_reduction, ...])

Run linear correction factor calculations for every site in the bias dataset

write_outputs(fp_out, out)

Write outputs to an .h5 file.

+
+

Attributes

+
+ + + + + + + + + + + +

NT

size of the time dimension, 1 is no time-based bias correction

distance_upper_bound

Maximum distance (float) to map high-resolution data from exo_source to the low-resolution file_paths input.

meta

Get a meta data dictionary on how these bias factors were calculated

+
+
+
+NT = 1#
+

size of the time dimension, 1 is no time-based bias correction

+
+ +
+
+static get_linear_correction(bias_data, base_data, bias_feature, base_dset)[source]#
+

Get the linear correction factors based on 1D bias and base datasets

+
+
Parameters:
+
    +
  • bias_data (np.ndarray) – 1D array of biased data observations.

  • +
  • base_data (np.ndarray) – 1D array of base data observations.

  • +
  • bias_feature (str) – This is the biased feature from bias_fps to retrieve. This should +be a single feature name corresponding to base_dset

  • +
  • base_dset (str) – A single dataset from the base_fps to retrieve. In the case of wind +components, this can be u_100m or v_100m which will retrieve +windspeed and winddirection and derive the U/V component.

  • +
+
+
Returns:
+

out (dict) – Dictionary of values defining the mean/std of the bias + base +data and the scalar + adder factors to correct the biased data +like: bias_data * scalar + adder

+
+
+
+ +
+
+write_outputs(fp_out, out)[source]#
+

Write outputs to an .h5 file.

+
+
Parameters:
+
    +
  • fp_out (str | None) – Optional .h5 output file to write scalar and adder arrays.

  • +
  • out (dict) – Dictionary of values defining the mean/std of the bias + base +data and the scalar + adder factors to correct the biased data +like: bias_data * scalar + adder. Each value is of shape +(lat, lon, time).

  • +
+
+
+
+ +
+
+run(fp_out=None, max_workers=None, daily_reduction='avg', fill_extend=True, smooth_extend=0, smooth_interior=0)[source]#
+

Run linear correction factor calculations for every site in the bias +dataset

+
+
Parameters:
+
    +
  • fp_out (str | None) – Optional .h5 output file to write scalar and adder arrays.

  • +
  • max_workers (int) – Number of workers to run in parallel. 1 is serial and None is all +available.

  • +
  • daily_reduction (None | str) – Option to do a reduction of the hourly+ source base data to daily +data. Can be None (no reduction, keep source time frequency), “avg” +(daily average), “max” (daily max), “min” (daily min), +“sum” (daily sum/total)

  • +
  • fill_extend (bool) – Flag to fill data past distance_upper_bound using spatial nearest +neighbor. If False, the extended domain will be left as NaN.

  • +
  • smooth_extend (float) – Option to smooth the scalar/adder data outside of the spatial +domain set by the distance_upper_bound input. This alleviates the +weird seams far from the domain of interest. This value is the +standard deviation for the gaussian_filter kernel

  • +
  • smooth_interior (float) – Option to smooth the scalar/adder data within the valid spatial +domain. This can reduce the affect of extreme values within +aggregations over large number of pixels.

  • +
+
+
Returns:
+

out (dict) – Dictionary of values defining the mean/std of the bias + base +data and the scalar + adder factors to correct the biased data +like: bias_data * scalar + adder. Each value is of shape +(lat, lon, time).

+
+
+
+ +
+
+static compare_dists(base_data, bias_data, adder=0, scalar=1)#
+

Compare two distributions using the two-sample Kolmogorov-Smirnov. +When the output is minimized, the two distributions are similar.

+
+
Parameters:
+
    +
  • base_data (np.ndarray) – 1D array of base data observations.

  • +
  • bias_data (np.ndarray) – 1D array of biased data observations.

  • +
  • adder (float) – Factor to adjust the biased data before comparing distributions: +bias_data * scalar + adder

  • +
  • scalar (float) – Factor to adjust the biased data before comparing distributions: +bias_data * scalar + adder

  • +
+
+
Returns:
+

out (float) – KS test statistic

+
+
+
+ +
+
+property distance_upper_bound#
+

Maximum distance (float) to map high-resolution data from exo_source +to the low-resolution file_paths input.

+
+ +
+
+fill_and_smooth(out, fill_extend=True, smooth_extend=0, smooth_interior=0)#
+

For a given set of parameters, fill and extend missing positions

+

Fill data extending beyond the base meta data extent by doing a +nearest neighbor gap fill. Smooth interior and extended region with +given smoothing values. +Interior smoothing can reduce the affect of extreme values +within aggregations over large number of pixels. +The interior is assumed to be defined by the region without nan values. +The extended region is assumed to be the region with nan values.

+
+
Parameters:
+
    +
  • out (dict) – Dictionary of values defining the mean/std of the bias + base +data and the scalar + adder factors to correct the biased data +like: bias_data * scalar + adder. Each value is of shape +(lat, lon, time).

  • +
  • fill_extend (bool) – Whether to fill data extending beyond the base meta data with +nearest neighbor values.

  • +
  • smooth_extend (float) – Option to smooth the scalar/adder data outside of the spatial +domain set by the threshold input. This alleviates the weird seams +far from the domain of interest. This value is the standard +deviation for the gaussian_filter kernel

  • +
  • smooth_interior (float) – Value to use to smooth the scalar/adder data inside of the spatial +domain set by the threshold input. This can reduce the effect of +extreme values within aggregations over large number of pixels. +This value is the standard deviation for the gaussian_filter +kernel.

  • +
+
+
Returns:
+

out (dict) – Dictionary of values defining the mean/std of the bias + base +data and the scalar + adder factors to correct the biased data +like: bias_data * scalar + adder. Each value is of shape +(lat, lon, time).

+
+
+
+ +
+
+classmethod get_base_data(base_fps, base_dset, base_gid, base_handler, base_handler_kwargs=None, daily_reduction='avg', decimals=None, base_dh_inst=None)#
+

Get data from the baseline data source, possibly for many high-res +base gids corresponding to a single coarse low-res bias gid.

+
+
Parameters:
+
    +
  • base_fps (list | str) – One or more baseline .h5 filepaths representing non-biased data to +use to correct the biased dataset. This is typically several years +of WTK or NSRDB files.

  • +
  • base_dset (str) – A single dataset from the base_fps to retrieve.

  • +
  • base_gid (int | np.ndarray) – One or more spatial gids to retrieve from base_fps. The data will +be spatially averaged across all of these sites.

  • +
  • base_handler (rex.Resource) – A rex data handler similar to rex.Resource or sup3r.DataHandler +classes (if using the latter, must also input base_dh_inst)

  • +
  • base_handler_kwargs (dict | None) – Optional kwargs to send to the initialization of the base_handler +class

  • +
  • daily_reduction (None | str) – Option to do a reduction of the hourly+ source base data to daily +data. Can be None (no reduction, keep source time frequency), “avg” +(daily average), “max” (daily max), “min” (daily min), +“sum” (daily sum/total)

  • +
  • decimals (int | None) – Option to round bias and base data to this number of +decimals, this gets passed to np.around(). If decimals +is negative, it specifies the number of positions to +the left of the decimal point.

  • +
  • base_dh_inst (sup3r.DataHandler) – Instantiated DataHandler class that has already loaded the base +data (required if base files are .nc and are not being opened by a +rex Resource handler).

  • +
+
+
Returns:
+

    +
  • out_data (np.ndarray) – 1D array of base data spatially averaged across the base_gid input +and possibly daily-averaged or min/max’d as well.

  • +
  • out_ti (pd.DatetimeIndex) – DatetimeIndex object of datetimes corresponding to the +output data.

  • +
+

+
+
+
+ +
+
+get_base_gid(bias_gid)#
+

Get one or more base gid(s) corresponding to a bias gid.

+
+
Parameters:
+

bias_gid (int) – gid of the data to retrieve in the bias data source raster data. +The gids for this data source are the enumerated indices of the +flattened coordinate array.

+
+
Returns:
+

    +
  • dist (np.ndarray) – Array of nearest neighbor distances with length equal to the number +of high-resolution baseline gids that map to the low resolution +bias gid pixel.

  • +
  • base_gid (np.ndarray) – Array of base gids that are the nearest neighbors of bias_gid with +length equal to the number of high-resolution baseline gids that +map to the low resolution bias gid pixel.

  • +
+

+
+
+
+ +
+
+get_bias_data(bias_gid, bias_dh=None)#
+

Get data from the biased data source for a single gid

+
+
Parameters:
+
    +
  • bias_gid (int) – gid of the data to retrieve in the bias data source raster data. +The gids for this data source are the enumerated indices of the +flattened coordinate array.

  • +
  • bias_dh (DataHandler, default=self.bias_dh) – Any DataHandler from sup3r.preprocessing. This optional +argument allows an alternative handler other than the usual +bias_dh. For instance, the derived +QuantileDeltaMappingCorrection uses it to access the +reference biased dataset as well as the target biased dataset.

  • +
+
+
Returns:
+

bias_data (np.ndarray) – 1D array of temporal data at the requested gid.

+
+
+
+ +
+
+get_bias_gid(coord)#
+

Get the bias gid from a coordinate.

+
+
Parameters:
+

coord (tuple) – (lat, lon) to get data for.

+
+
Returns:
+

    +
  • bias_gid (int) – gid of the data to retrieve in the bias data source raster data. +The gids for this data source are the enumerated indices of the +flattened coordinate array.

  • +
  • d (float) – Distance in decimal degrees from coord to bias gid

  • +
+

+
+
+
+ +
+
+get_data_pair(coord, daily_reduction='avg')#
+

Get base and bias data observations based on a single bias gid.

+
+
Parameters:
+
    +
  • coord (tuple) – (lat, lon) to get data for.

  • +
  • daily_reduction (None | str) – Option to do a reduction of the hourly+ source base data to daily +data. Can be None (no reduction, keep source time frequency), “avg” +(daily average), “max” (daily max), “min” (daily min), +“sum” (daily sum/total)

  • +
+
+
Returns:
+

    +
  • base_data (np.ndarray) – 1D array of base data spatially averaged across the base_gid input +and possibly daily-averaged or min/max’d as well.

  • +
  • bias_data (np.ndarray) – 1D array of temporal data at the requested gid.

  • +
  • base_dist (np.ndarray) – Array of nearest neighbor distances from coord to the base data +sites with length equal to the number of high-resolution baseline +gids that map to the low resolution bias gid pixel.

  • +
  • bias_dist (Float) – Nearest neighbor distance from coord to the bias data site

  • +
+

+
+
+
+ +
+
+classmethod get_node_cmd(config)#
+

Get a CLI call to call cls.run() on a single node based on an input +config.

+
+
Parameters:
+

config (dict) – sup3r bias calc config with all necessary args and kwargs to +initialize the class and call run() on a single node.

+
+
+
+ +
+
+property meta#
+

Get a meta data dictionary on how these bias factors were +calculated

+
+ +
+
+pre_load()#
+

Preload all data needed for bias correction. This is currently +recommended to improve performance with the new sup3r data handler +access patterns

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.bias.bias_calc.MonthlyLinearCorrection.html b/_autosummary/sup3r.bias.bias_calc.MonthlyLinearCorrection.html new file mode 100644 index 000000000..a5c9ae285 --- /dev/null +++ b/_autosummary/sup3r.bias.bias_calc.MonthlyLinearCorrection.html @@ -0,0 +1,1231 @@ + + + + + + + + + + + sup3r.bias.bias_calc.MonthlyLinearCorrection — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.bias.bias_calc.MonthlyLinearCorrection#

+
+
+class MonthlyLinearCorrection(base_fps, bias_fps, base_dset, bias_feature, distance_upper_bound=None, target=None, shape=None, base_handler='Resource', bias_handler='DataHandlerNCforCC', base_handler_kwargs=None, bias_handler_kwargs=None, decimals=None, match_zero_rate=False, pre_load=True)[source]#
+

Bases: LinearCorrection

+

Calculate linear correction *scalar +adder factors to bias correct data

+

This calculation operates on single bias sites on a monthly basis

+
+
Parameters:
+
    +
  • base_fps (list | str) – One or more baseline .h5 filepaths representing non-biased data to +use to correct the biased dataset. This is typically several years +of WTK or NSRDB files.

  • +
  • bias_fps (list | str) – One or more biased .nc or .h5 filepaths representing the biased +data to be corrected based on the baseline data. This is typically +several years of GCM .nc files.

  • +
  • base_dset (str) – A single dataset from the base_fps to retrieve. In the case of wind +components, this can be u_100m or v_100m which will retrieve +windspeed and winddirection and derive the U/V component.

  • +
  • bias_feature (str) – This is the biased feature from bias_fps to retrieve. This should +be a single feature name corresponding to base_dset

  • +
  • distance_upper_bound (float) – Upper bound on the nearest neighbor distance in decimal degrees. +This should be the approximate resolution of the low-resolution +bias data. None (default) will calculate this based on the median +distance between points in bias_fps

  • +
  • target (tuple) – (lat, lon) lower left corner of raster to retrieve from bias_fps. +If None then the lower left corner of the full domain will be used.

  • +
  • shape (tuple) – (rows, cols) grid size to retrieve from bias_fps. If None then the +full domain shape will be used.

  • +
  • base_handler (str) – Name of rex resource handler or sup3r.preprocessing class to be +retrieved from the rex/sup3r library. If a sup3r.preprocessing +class is used, all data will be loaded in this class’ +initialization and the subsequent bias calculation will be done in +serial

  • +
  • bias_handler (str) – Name of the bias data handler class to be retrieved from the +sup3r.preprocessing library.

  • +
  • base_handler_kwargs (dict | None) – Optional kwargs to send to the initialization of the base_handler +class

  • +
  • bias_handler_kwargs (dict | None) – Optional kwargs to send to the initialization of the bias_handler +class

  • +
  • decimals (int | None) – Option to round bias and base data to this number of decimals, this +gets passed to np.around(). If decimals is negative, it specifies +the number of positions to the left of the decimal point.

  • +
  • match_zero_rate (bool) – Option to fix the frequency of zero values in the biased data. The +lowest percentile of values in the biased data will be set to zero +to match the percentile of zeros in the base data. If +SkillAssessment is being run and this is True, the distributions +will not be mean-centered. This helps resolve the issue where +global climate models produce too many days with small +precipitation totals e.g., the “drizzle problem” [Polade2014].

  • +
  • pre_load (bool) – Flag to preload all data needed for bias correction. This is +currently recommended to improve performance with the new sup3r +data handler access patterns

  • +
+
+
+

References

+
+
+[Polade2014] +

Polade, S. D., Pierce, D. W., Cayan, D. R., Gershunov, +A., & Dettineer, M. D. (2014). The key role of dry days in changing +regional climate and precipitation regimes. Scientific reports, +4(1), 4364. https://doi.org/10.1038/srep04364

+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

compare_dists(base_data, bias_data[, adder, ...])

Compare two distributions using the two-sample Kolmogorov-Smirnov.

fill_and_smooth(out[, fill_extend, ...])

For a given set of parameters, fill and extend missing positions

get_base_data(base_fps, base_dset, base_gid, ...)

Get data from the baseline data source, possibly for many high-res base gids corresponding to a single coarse low-res bias gid.

get_base_gid(bias_gid)

Get one or more base gid(s) corresponding to a bias gid.

get_bias_data(bias_gid[, bias_dh])

Get data from the biased data source for a single gid

get_bias_gid(coord)

Get the bias gid from a coordinate.

get_data_pair(coord[, daily_reduction])

Get base and bias data observations based on a single bias gid.

get_linear_correction(bias_data, base_data, ...)

Get the linear correction factors based on 1D bias and base datasets

get_node_cmd(config)

Get a CLI call to call cls.run() on a single node based on an input config.

pre_load()

Preload all data needed for bias correction.

run([fp_out, max_workers, daily_reduction, ...])

Run linear correction factor calculations for every site in the bias dataset

write_outputs(fp_out, out)

Write outputs to an .h5 file.

+
+

Attributes

+
+ + + + + + + + + + + +

NT

size of the time dimension, 12 is monthly bias correction

distance_upper_bound

Maximum distance (float) to map high-resolution data from exo_source to the low-resolution file_paths input.

meta

Get a meta data dictionary on how these bias factors were calculated

+
+
+
+NT = 12#
+

size of the time dimension, 12 is monthly bias correction

+
+ +
+
+static compare_dists(base_data, bias_data, adder=0, scalar=1)#
+

Compare two distributions using the two-sample Kolmogorov-Smirnov. +When the output is minimized, the two distributions are similar.

+
+
Parameters:
+
    +
  • base_data (np.ndarray) – 1D array of base data observations.

  • +
  • bias_data (np.ndarray) – 1D array of biased data observations.

  • +
  • adder (float) – Factor to adjust the biased data before comparing distributions: +bias_data * scalar + adder

  • +
  • scalar (float) – Factor to adjust the biased data before comparing distributions: +bias_data * scalar + adder

  • +
+
+
Returns:
+

out (float) – KS test statistic

+
+
+
+ +
+
+property distance_upper_bound#
+

Maximum distance (float) to map high-resolution data from exo_source +to the low-resolution file_paths input.

+
+ +
+
+fill_and_smooth(out, fill_extend=True, smooth_extend=0, smooth_interior=0)#
+

For a given set of parameters, fill and extend missing positions

+

Fill data extending beyond the base meta data extent by doing a +nearest neighbor gap fill. Smooth interior and extended region with +given smoothing values. +Interior smoothing can reduce the affect of extreme values +within aggregations over large number of pixels. +The interior is assumed to be defined by the region without nan values. +The extended region is assumed to be the region with nan values.

+
+
Parameters:
+
    +
  • out (dict) – Dictionary of values defining the mean/std of the bias + base +data and the scalar + adder factors to correct the biased data +like: bias_data * scalar + adder. Each value is of shape +(lat, lon, time).

  • +
  • fill_extend (bool) – Whether to fill data extending beyond the base meta data with +nearest neighbor values.

  • +
  • smooth_extend (float) – Option to smooth the scalar/adder data outside of the spatial +domain set by the threshold input. This alleviates the weird seams +far from the domain of interest. This value is the standard +deviation for the gaussian_filter kernel

  • +
  • smooth_interior (float) – Value to use to smooth the scalar/adder data inside of the spatial +domain set by the threshold input. This can reduce the effect of +extreme values within aggregations over large number of pixels. +This value is the standard deviation for the gaussian_filter +kernel.

  • +
+
+
Returns:
+

out (dict) – Dictionary of values defining the mean/std of the bias + base +data and the scalar + adder factors to correct the biased data +like: bias_data * scalar + adder. Each value is of shape +(lat, lon, time).

+
+
+
+ +
+
+classmethod get_base_data(base_fps, base_dset, base_gid, base_handler, base_handler_kwargs=None, daily_reduction='avg', decimals=None, base_dh_inst=None)#
+

Get data from the baseline data source, possibly for many high-res +base gids corresponding to a single coarse low-res bias gid.

+
+
Parameters:
+
    +
  • base_fps (list | str) – One or more baseline .h5 filepaths representing non-biased data to +use to correct the biased dataset. This is typically several years +of WTK or NSRDB files.

  • +
  • base_dset (str) – A single dataset from the base_fps to retrieve.

  • +
  • base_gid (int | np.ndarray) – One or more spatial gids to retrieve from base_fps. The data will +be spatially averaged across all of these sites.

  • +
  • base_handler (rex.Resource) – A rex data handler similar to rex.Resource or sup3r.DataHandler +classes (if using the latter, must also input base_dh_inst)

  • +
  • base_handler_kwargs (dict | None) – Optional kwargs to send to the initialization of the base_handler +class

  • +
  • daily_reduction (None | str) – Option to do a reduction of the hourly+ source base data to daily +data. Can be None (no reduction, keep source time frequency), “avg” +(daily average), “max” (daily max), “min” (daily min), +“sum” (daily sum/total)

  • +
  • decimals (int | None) – Option to round bias and base data to this number of +decimals, this gets passed to np.around(). If decimals +is negative, it specifies the number of positions to +the left of the decimal point.

  • +
  • base_dh_inst (sup3r.DataHandler) – Instantiated DataHandler class that has already loaded the base +data (required if base files are .nc and are not being opened by a +rex Resource handler).

  • +
+
+
Returns:
+

    +
  • out_data (np.ndarray) – 1D array of base data spatially averaged across the base_gid input +and possibly daily-averaged or min/max’d as well.

  • +
  • out_ti (pd.DatetimeIndex) – DatetimeIndex object of datetimes corresponding to the +output data.

  • +
+

+
+
+
+ +
+
+get_base_gid(bias_gid)#
+

Get one or more base gid(s) corresponding to a bias gid.

+
+
Parameters:
+

bias_gid (int) – gid of the data to retrieve in the bias data source raster data. +The gids for this data source are the enumerated indices of the +flattened coordinate array.

+
+
Returns:
+

    +
  • dist (np.ndarray) – Array of nearest neighbor distances with length equal to the number +of high-resolution baseline gids that map to the low resolution +bias gid pixel.

  • +
  • base_gid (np.ndarray) – Array of base gids that are the nearest neighbors of bias_gid with +length equal to the number of high-resolution baseline gids that +map to the low resolution bias gid pixel.

  • +
+

+
+
+
+ +
+
+get_bias_data(bias_gid, bias_dh=None)#
+

Get data from the biased data source for a single gid

+
+
Parameters:
+
    +
  • bias_gid (int) – gid of the data to retrieve in the bias data source raster data. +The gids for this data source are the enumerated indices of the +flattened coordinate array.

  • +
  • bias_dh (DataHandler, default=self.bias_dh) – Any DataHandler from sup3r.preprocessing. This optional +argument allows an alternative handler other than the usual +bias_dh. For instance, the derived +QuantileDeltaMappingCorrection uses it to access the +reference biased dataset as well as the target biased dataset.

  • +
+
+
Returns:
+

bias_data (np.ndarray) – 1D array of temporal data at the requested gid.

+
+
+
+ +
+
+get_bias_gid(coord)#
+

Get the bias gid from a coordinate.

+
+
Parameters:
+

coord (tuple) – (lat, lon) to get data for.

+
+
Returns:
+

    +
  • bias_gid (int) – gid of the data to retrieve in the bias data source raster data. +The gids for this data source are the enumerated indices of the +flattened coordinate array.

  • +
  • d (float) – Distance in decimal degrees from coord to bias gid

  • +
+

+
+
+
+ +
+
+get_data_pair(coord, daily_reduction='avg')#
+

Get base and bias data observations based on a single bias gid.

+
+
Parameters:
+
    +
  • coord (tuple) – (lat, lon) to get data for.

  • +
  • daily_reduction (None | str) – Option to do a reduction of the hourly+ source base data to daily +data. Can be None (no reduction, keep source time frequency), “avg” +(daily average), “max” (daily max), “min” (daily min), +“sum” (daily sum/total)

  • +
+
+
Returns:
+

    +
  • base_data (np.ndarray) – 1D array of base data spatially averaged across the base_gid input +and possibly daily-averaged or min/max’d as well.

  • +
  • bias_data (np.ndarray) – 1D array of temporal data at the requested gid.

  • +
  • base_dist (np.ndarray) – Array of nearest neighbor distances from coord to the base data +sites with length equal to the number of high-resolution baseline +gids that map to the low resolution bias gid pixel.

  • +
  • bias_dist (Float) – Nearest neighbor distance from coord to the bias data site

  • +
+

+
+
+
+ +
+
+static get_linear_correction(bias_data, base_data, bias_feature, base_dset)#
+

Get the linear correction factors based on 1D bias and base datasets

+
+
Parameters:
+
    +
  • bias_data (np.ndarray) – 1D array of biased data observations.

  • +
  • base_data (np.ndarray) – 1D array of base data observations.

  • +
  • bias_feature (str) – This is the biased feature from bias_fps to retrieve. This should +be a single feature name corresponding to base_dset

  • +
  • base_dset (str) – A single dataset from the base_fps to retrieve. In the case of wind +components, this can be u_100m or v_100m which will retrieve +windspeed and winddirection and derive the U/V component.

  • +
+
+
Returns:
+

out (dict) – Dictionary of values defining the mean/std of the bias + base +data and the scalar + adder factors to correct the biased data +like: bias_data * scalar + adder

+
+
+
+ +
+
+classmethod get_node_cmd(config)#
+

Get a CLI call to call cls.run() on a single node based on an input +config.

+
+
Parameters:
+

config (dict) – sup3r bias calc config with all necessary args and kwargs to +initialize the class and call run() on a single node.

+
+
+
+ +
+
+property meta#
+

Get a meta data dictionary on how these bias factors were +calculated

+
+ +
+
+pre_load()#
+

Preload all data needed for bias correction. This is currently +recommended to improve performance with the new sup3r data handler +access patterns

+
+ +
+
+run(fp_out=None, max_workers=None, daily_reduction='avg', fill_extend=True, smooth_extend=0, smooth_interior=0)#
+

Run linear correction factor calculations for every site in the bias +dataset

+
+
Parameters:
+
    +
  • fp_out (str | None) – Optional .h5 output file to write scalar and adder arrays.

  • +
  • max_workers (int) – Number of workers to run in parallel. 1 is serial and None is all +available.

  • +
  • daily_reduction (None | str) – Option to do a reduction of the hourly+ source base data to daily +data. Can be None (no reduction, keep source time frequency), “avg” +(daily average), “max” (daily max), “min” (daily min), +“sum” (daily sum/total)

  • +
  • fill_extend (bool) – Flag to fill data past distance_upper_bound using spatial nearest +neighbor. If False, the extended domain will be left as NaN.

  • +
  • smooth_extend (float) – Option to smooth the scalar/adder data outside of the spatial +domain set by the distance_upper_bound input. This alleviates the +weird seams far from the domain of interest. This value is the +standard deviation for the gaussian_filter kernel

  • +
  • smooth_interior (float) – Option to smooth the scalar/adder data within the valid spatial +domain. This can reduce the affect of extreme values within +aggregations over large number of pixels.

  • +
+
+
Returns:
+

out (dict) – Dictionary of values defining the mean/std of the bias + base +data and the scalar + adder factors to correct the biased data +like: bias_data * scalar + adder. Each value is of shape +(lat, lon, time).

+
+
+
+ +
+
+write_outputs(fp_out, out)#
+

Write outputs to an .h5 file.

+
+
Parameters:
+
    +
  • fp_out (str | None) – Optional .h5 output file to write scalar and adder arrays.

  • +
  • out (dict) – Dictionary of values defining the mean/std of the bias + base +data and the scalar + adder factors to correct the biased data +like: bias_data * scalar + adder. Each value is of shape +(lat, lon, time).

  • +
+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.bias.bias_calc.MonthlyScalarCorrection.html b/_autosummary/sup3r.bias.bias_calc.MonthlyScalarCorrection.html new file mode 100644 index 000000000..fb50b1479 --- /dev/null +++ b/_autosummary/sup3r.bias.bias_calc.MonthlyScalarCorrection.html @@ -0,0 +1,1230 @@ + + + + + + + + + + + sup3r.bias.bias_calc.MonthlyScalarCorrection — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.bias.bias_calc.MonthlyScalarCorrection#

+
+
+class MonthlyScalarCorrection(base_fps, bias_fps, base_dset, bias_feature, distance_upper_bound=None, target=None, shape=None, base_handler='Resource', bias_handler='DataHandlerNCforCC', base_handler_kwargs=None, bias_handler_kwargs=None, decimals=None, match_zero_rate=False, pre_load=True)[source]#
+

Bases: MonthlyLinearCorrection, ScalarCorrection

+

Calculate linear correction *scalar factors for each month

+
+
Parameters:
+
    +
  • base_fps (list | str) – One or more baseline .h5 filepaths representing non-biased data to +use to correct the biased dataset. This is typically several years +of WTK or NSRDB files.

  • +
  • bias_fps (list | str) – One or more biased .nc or .h5 filepaths representing the biased +data to be corrected based on the baseline data. This is typically +several years of GCM .nc files.

  • +
  • base_dset (str) – A single dataset from the base_fps to retrieve. In the case of wind +components, this can be u_100m or v_100m which will retrieve +windspeed and winddirection and derive the U/V component.

  • +
  • bias_feature (str) – This is the biased feature from bias_fps to retrieve. This should +be a single feature name corresponding to base_dset

  • +
  • distance_upper_bound (float) – Upper bound on the nearest neighbor distance in decimal degrees. +This should be the approximate resolution of the low-resolution +bias data. None (default) will calculate this based on the median +distance between points in bias_fps

  • +
  • target (tuple) – (lat, lon) lower left corner of raster to retrieve from bias_fps. +If None then the lower left corner of the full domain will be used.

  • +
  • shape (tuple) – (rows, cols) grid size to retrieve from bias_fps. If None then the +full domain shape will be used.

  • +
  • base_handler (str) – Name of rex resource handler or sup3r.preprocessing class to be +retrieved from the rex/sup3r library. If a sup3r.preprocessing +class is used, all data will be loaded in this class’ +initialization and the subsequent bias calculation will be done in +serial

  • +
  • bias_handler (str) – Name of the bias data handler class to be retrieved from the +sup3r.preprocessing library.

  • +
  • base_handler_kwargs (dict | None) – Optional kwargs to send to the initialization of the base_handler +class

  • +
  • bias_handler_kwargs (dict | None) – Optional kwargs to send to the initialization of the bias_handler +class

  • +
  • decimals (int | None) – Option to round bias and base data to this number of decimals, this +gets passed to np.around(). If decimals is negative, it specifies +the number of positions to the left of the decimal point.

  • +
  • match_zero_rate (bool) – Option to fix the frequency of zero values in the biased data. The +lowest percentile of values in the biased data will be set to zero +to match the percentile of zeros in the base data. If +SkillAssessment is being run and this is True, the distributions +will not be mean-centered. This helps resolve the issue where +global climate models produce too many days with small +precipitation totals e.g., the “drizzle problem” [Polade2014].

  • +
  • pre_load (bool) – Flag to preload all data needed for bias correction. This is +currently recommended to improve performance with the new sup3r +data handler access patterns

  • +
+
+
+

References

+
+
+[Polade2014] +

Polade, S. D., Pierce, D. W., Cayan, D. R., Gershunov, +A., & Dettineer, M. D. (2014). The key role of dry days in changing +regional climate and precipitation regimes. Scientific reports, +4(1), 4364. https://doi.org/10.1038/srep04364

+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

compare_dists(base_data, bias_data[, adder, ...])

Compare two distributions using the two-sample Kolmogorov-Smirnov.

fill_and_smooth(out[, fill_extend, ...])

For a given set of parameters, fill and extend missing positions

get_base_data(base_fps, base_dset, base_gid, ...)

Get data from the baseline data source, possibly for many high-res base gids corresponding to a single coarse low-res bias gid.

get_base_gid(bias_gid)

Get one or more base gid(s) corresponding to a bias gid.

get_bias_data(bias_gid[, bias_dh])

Get data from the biased data source for a single gid

get_bias_gid(coord)

Get the bias gid from a coordinate.

get_data_pair(coord[, daily_reduction])

Get base and bias data observations based on a single bias gid.

get_linear_correction(bias_data, base_data, ...)

Get the linear correction factors based on 1D bias and base datasets

get_node_cmd(config)

Get a CLI call to call cls.run() on a single node based on an input config.

pre_load()

Preload all data needed for bias correction.

run([fp_out, max_workers, daily_reduction, ...])

Run linear correction factor calculations for every site in the bias dataset

write_outputs(fp_out, out)

Write outputs to an .h5 file.

+
+

Attributes

+
+ + + + + + + + + + + +

NT

size of the time dimension, 12 is monthly bias correction

distance_upper_bound

Maximum distance (float) to map high-resolution data from exo_source to the low-resolution file_paths input.

meta

Get a meta data dictionary on how these bias factors were calculated

+
+
+
+NT = 12#
+

size of the time dimension, 12 is monthly bias correction

+
+ +
+
+static compare_dists(base_data, bias_data, adder=0, scalar=1)#
+

Compare two distributions using the two-sample Kolmogorov-Smirnov. +When the output is minimized, the two distributions are similar.

+
+
Parameters:
+
    +
  • base_data (np.ndarray) – 1D array of base data observations.

  • +
  • bias_data (np.ndarray) – 1D array of biased data observations.

  • +
  • adder (float) – Factor to adjust the biased data before comparing distributions: +bias_data * scalar + adder

  • +
  • scalar (float) – Factor to adjust the biased data before comparing distributions: +bias_data * scalar + adder

  • +
+
+
Returns:
+

out (float) – KS test statistic

+
+
+
+ +
+
+property distance_upper_bound#
+

Maximum distance (float) to map high-resolution data from exo_source +to the low-resolution file_paths input.

+
+ +
+
+fill_and_smooth(out, fill_extend=True, smooth_extend=0, smooth_interior=0)#
+

For a given set of parameters, fill and extend missing positions

+

Fill data extending beyond the base meta data extent by doing a +nearest neighbor gap fill. Smooth interior and extended region with +given smoothing values. +Interior smoothing can reduce the affect of extreme values +within aggregations over large number of pixels. +The interior is assumed to be defined by the region without nan values. +The extended region is assumed to be the region with nan values.

+
+
Parameters:
+
    +
  • out (dict) – Dictionary of values defining the mean/std of the bias + base +data and the scalar + adder factors to correct the biased data +like: bias_data * scalar + adder. Each value is of shape +(lat, lon, time).

  • +
  • fill_extend (bool) – Whether to fill data extending beyond the base meta data with +nearest neighbor values.

  • +
  • smooth_extend (float) – Option to smooth the scalar/adder data outside of the spatial +domain set by the threshold input. This alleviates the weird seams +far from the domain of interest. This value is the standard +deviation for the gaussian_filter kernel

  • +
  • smooth_interior (float) – Value to use to smooth the scalar/adder data inside of the spatial +domain set by the threshold input. This can reduce the effect of +extreme values within aggregations over large number of pixels. +This value is the standard deviation for the gaussian_filter +kernel.

  • +
+
+
Returns:
+

out (dict) – Dictionary of values defining the mean/std of the bias + base +data and the scalar + adder factors to correct the biased data +like: bias_data * scalar + adder. Each value is of shape +(lat, lon, time).

+
+
+
+ +
+
+classmethod get_base_data(base_fps, base_dset, base_gid, base_handler, base_handler_kwargs=None, daily_reduction='avg', decimals=None, base_dh_inst=None)#
+

Get data from the baseline data source, possibly for many high-res +base gids corresponding to a single coarse low-res bias gid.

+
+
Parameters:
+
    +
  • base_fps (list | str) – One or more baseline .h5 filepaths representing non-biased data to +use to correct the biased dataset. This is typically several years +of WTK or NSRDB files.

  • +
  • base_dset (str) – A single dataset from the base_fps to retrieve.

  • +
  • base_gid (int | np.ndarray) – One or more spatial gids to retrieve from base_fps. The data will +be spatially averaged across all of these sites.

  • +
  • base_handler (rex.Resource) – A rex data handler similar to rex.Resource or sup3r.DataHandler +classes (if using the latter, must also input base_dh_inst)

  • +
  • base_handler_kwargs (dict | None) – Optional kwargs to send to the initialization of the base_handler +class

  • +
  • daily_reduction (None | str) – Option to do a reduction of the hourly+ source base data to daily +data. Can be None (no reduction, keep source time frequency), “avg” +(daily average), “max” (daily max), “min” (daily min), +“sum” (daily sum/total)

  • +
  • decimals (int | None) – Option to round bias and base data to this number of +decimals, this gets passed to np.around(). If decimals +is negative, it specifies the number of positions to +the left of the decimal point.

  • +
  • base_dh_inst (sup3r.DataHandler) – Instantiated DataHandler class that has already loaded the base +data (required if base files are .nc and are not being opened by a +rex Resource handler).

  • +
+
+
Returns:
+

    +
  • out_data (np.ndarray) – 1D array of base data spatially averaged across the base_gid input +and possibly daily-averaged or min/max’d as well.

  • +
  • out_ti (pd.DatetimeIndex) – DatetimeIndex object of datetimes corresponding to the +output data.

  • +
+

+
+
+
+ +
+
+get_base_gid(bias_gid)#
+

Get one or more base gid(s) corresponding to a bias gid.

+
+
Parameters:
+

bias_gid (int) – gid of the data to retrieve in the bias data source raster data. +The gids for this data source are the enumerated indices of the +flattened coordinate array.

+
+
Returns:
+

    +
  • dist (np.ndarray) – Array of nearest neighbor distances with length equal to the number +of high-resolution baseline gids that map to the low resolution +bias gid pixel.

  • +
  • base_gid (np.ndarray) – Array of base gids that are the nearest neighbors of bias_gid with +length equal to the number of high-resolution baseline gids that +map to the low resolution bias gid pixel.

  • +
+

+
+
+
+ +
+
+get_bias_data(bias_gid, bias_dh=None)#
+

Get data from the biased data source for a single gid

+
+
Parameters:
+
    +
  • bias_gid (int) – gid of the data to retrieve in the bias data source raster data. +The gids for this data source are the enumerated indices of the +flattened coordinate array.

  • +
  • bias_dh (DataHandler, default=self.bias_dh) – Any DataHandler from sup3r.preprocessing. This optional +argument allows an alternative handler other than the usual +bias_dh. For instance, the derived +QuantileDeltaMappingCorrection uses it to access the +reference biased dataset as well as the target biased dataset.

  • +
+
+
Returns:
+

bias_data (np.ndarray) – 1D array of temporal data at the requested gid.

+
+
+
+ +
+
+get_bias_gid(coord)#
+

Get the bias gid from a coordinate.

+
+
Parameters:
+

coord (tuple) – (lat, lon) to get data for.

+
+
Returns:
+

    +
  • bias_gid (int) – gid of the data to retrieve in the bias data source raster data. +The gids for this data source are the enumerated indices of the +flattened coordinate array.

  • +
  • d (float) – Distance in decimal degrees from coord to bias gid

  • +
+

+
+
+
+ +
+
+get_data_pair(coord, daily_reduction='avg')#
+

Get base and bias data observations based on a single bias gid.

+
+
Parameters:
+
    +
  • coord (tuple) – (lat, lon) to get data for.

  • +
  • daily_reduction (None | str) – Option to do a reduction of the hourly+ source base data to daily +data. Can be None (no reduction, keep source time frequency), “avg” +(daily average), “max” (daily max), “min” (daily min), +“sum” (daily sum/total)

  • +
+
+
Returns:
+

    +
  • base_data (np.ndarray) – 1D array of base data spatially averaged across the base_gid input +and possibly daily-averaged or min/max’d as well.

  • +
  • bias_data (np.ndarray) – 1D array of temporal data at the requested gid.

  • +
  • base_dist (np.ndarray) – Array of nearest neighbor distances from coord to the base data +sites with length equal to the number of high-resolution baseline +gids that map to the low resolution bias gid pixel.

  • +
  • bias_dist (Float) – Nearest neighbor distance from coord to the bias data site

  • +
+

+
+
+
+ +
+
+static get_linear_correction(bias_data, base_data, bias_feature, base_dset)#
+

Get the linear correction factors based on 1D bias and base datasets

+
+
Parameters:
+
    +
  • bias_data (np.ndarray) – 1D array of biased data observations.

  • +
  • base_data (np.ndarray) – 1D array of base data observations.

  • +
  • bias_feature (str) – This is the biased feature from bias_fps to retrieve. This should +be a single feature name corresponding to base_dset

  • +
  • base_dset (str) – A single dataset from the base_fps to retrieve. In the case of wind +components, this can be u_100m or v_100m which will retrieve +windspeed and winddirection and derive the U/V component.

  • +
+
+
Returns:
+

out (dict) – Dictionary of values defining the mean/std of the bias + base +data and the scalar + adder factors to correct the biased data +like: bias_data * scalar + adder

+
+
+
+ +
+
+classmethod get_node_cmd(config)#
+

Get a CLI call to call cls.run() on a single node based on an input +config.

+
+
Parameters:
+

config (dict) – sup3r bias calc config with all necessary args and kwargs to +initialize the class and call run() on a single node.

+
+
+
+ +
+
+property meta#
+

Get a meta data dictionary on how these bias factors were +calculated

+
+ +
+
+pre_load()#
+

Preload all data needed for bias correction. This is currently +recommended to improve performance with the new sup3r data handler +access patterns

+
+ +
+
+run(fp_out=None, max_workers=None, daily_reduction='avg', fill_extend=True, smooth_extend=0, smooth_interior=0)#
+

Run linear correction factor calculations for every site in the bias +dataset

+
+
Parameters:
+
    +
  • fp_out (str | None) – Optional .h5 output file to write scalar and adder arrays.

  • +
  • max_workers (int) – Number of workers to run in parallel. 1 is serial and None is all +available.

  • +
  • daily_reduction (None | str) – Option to do a reduction of the hourly+ source base data to daily +data. Can be None (no reduction, keep source time frequency), “avg” +(daily average), “max” (daily max), “min” (daily min), +“sum” (daily sum/total)

  • +
  • fill_extend (bool) – Flag to fill data past distance_upper_bound using spatial nearest +neighbor. If False, the extended domain will be left as NaN.

  • +
  • smooth_extend (float) – Option to smooth the scalar/adder data outside of the spatial +domain set by the distance_upper_bound input. This alleviates the +weird seams far from the domain of interest. This value is the +standard deviation for the gaussian_filter kernel

  • +
  • smooth_interior (float) – Option to smooth the scalar/adder data within the valid spatial +domain. This can reduce the affect of extreme values within +aggregations over large number of pixels.

  • +
+
+
Returns:
+

out (dict) – Dictionary of values defining the mean/std of the bias + base +data and the scalar + adder factors to correct the biased data +like: bias_data * scalar + adder. Each value is of shape +(lat, lon, time).

+
+
+
+ +
+
+write_outputs(fp_out, out)#
+

Write outputs to an .h5 file.

+
+
Parameters:
+
    +
  • fp_out (str | None) – Optional .h5 output file to write scalar and adder arrays.

  • +
  • out (dict) – Dictionary of values defining the mean/std of the bias + base +data and the scalar + adder factors to correct the biased data +like: bias_data * scalar + adder. Each value is of shape +(lat, lon, time).

  • +
+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.bias.bias_calc.ScalarCorrection.html b/_autosummary/sup3r.bias.bias_calc.ScalarCorrection.html new file mode 100644 index 000000000..a4b798f33 --- /dev/null +++ b/_autosummary/sup3r.bias.bias_calc.ScalarCorrection.html @@ -0,0 +1,1235 @@ + + + + + + + + + + + sup3r.bias.bias_calc.ScalarCorrection — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.bias.bias_calc.ScalarCorrection#

+
+
+class ScalarCorrection(base_fps, bias_fps, base_dset, bias_feature, distance_upper_bound=None, target=None, shape=None, base_handler='Resource', bias_handler='DataHandlerNCforCC', base_handler_kwargs=None, bias_handler_kwargs=None, decimals=None, match_zero_rate=False, pre_load=True)[source]#
+

Bases: LinearCorrection

+

Calculate annual linear correction *scalar factors to bias correct data. +This typically used when base data is just monthly or annual means and +standard deviations cannot be computed. This is case for vortex data, for +example. Thus, just scalar factors are computed as mean(base_data) / +mean(bias_data). Adder factors are still written but are exactly zero.

+

This calculation operates on single bias sites on a monthly basis

+
+
Parameters:
+
    +
  • base_fps (list | str) – One or more baseline .h5 filepaths representing non-biased data to +use to correct the biased dataset. This is typically several years +of WTK or NSRDB files.

  • +
  • bias_fps (list | str) – One or more biased .nc or .h5 filepaths representing the biased +data to be corrected based on the baseline data. This is typically +several years of GCM .nc files.

  • +
  • base_dset (str) – A single dataset from the base_fps to retrieve. In the case of wind +components, this can be u_100m or v_100m which will retrieve +windspeed and winddirection and derive the U/V component.

  • +
  • bias_feature (str) – This is the biased feature from bias_fps to retrieve. This should +be a single feature name corresponding to base_dset

  • +
  • distance_upper_bound (float) – Upper bound on the nearest neighbor distance in decimal degrees. +This should be the approximate resolution of the low-resolution +bias data. None (default) will calculate this based on the median +distance between points in bias_fps

  • +
  • target (tuple) – (lat, lon) lower left corner of raster to retrieve from bias_fps. +If None then the lower left corner of the full domain will be used.

  • +
  • shape (tuple) – (rows, cols) grid size to retrieve from bias_fps. If None then the +full domain shape will be used.

  • +
  • base_handler (str) – Name of rex resource handler or sup3r.preprocessing class to be +retrieved from the rex/sup3r library. If a sup3r.preprocessing +class is used, all data will be loaded in this class’ +initialization and the subsequent bias calculation will be done in +serial

  • +
  • bias_handler (str) – Name of the bias data handler class to be retrieved from the +sup3r.preprocessing library.

  • +
  • base_handler_kwargs (dict | None) – Optional kwargs to send to the initialization of the base_handler +class

  • +
  • bias_handler_kwargs (dict | None) – Optional kwargs to send to the initialization of the bias_handler +class

  • +
  • decimals (int | None) – Option to round bias and base data to this number of decimals, this +gets passed to np.around(). If decimals is negative, it specifies +the number of positions to the left of the decimal point.

  • +
  • match_zero_rate (bool) – Option to fix the frequency of zero values in the biased data. The +lowest percentile of values in the biased data will be set to zero +to match the percentile of zeros in the base data. If +SkillAssessment is being run and this is True, the distributions +will not be mean-centered. This helps resolve the issue where +global climate models produce too many days with small +precipitation totals e.g., the “drizzle problem” [Polade2014].

  • +
  • pre_load (bool) – Flag to preload all data needed for bias correction. This is +currently recommended to improve performance with the new sup3r +data handler access patterns

  • +
+
+
+

References

+
+
+[Polade2014] +

Polade, S. D., Pierce, D. W., Cayan, D. R., Gershunov, +A., & Dettineer, M. D. (2014). The key role of dry days in changing +regional climate and precipitation regimes. Scientific reports, +4(1), 4364. https://doi.org/10.1038/srep04364

+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

compare_dists(base_data, bias_data[, adder, ...])

Compare two distributions using the two-sample Kolmogorov-Smirnov.

fill_and_smooth(out[, fill_extend, ...])

For a given set of parameters, fill and extend missing positions

get_base_data(base_fps, base_dset, base_gid, ...)

Get data from the baseline data source, possibly for many high-res base gids corresponding to a single coarse low-res bias gid.

get_base_gid(bias_gid)

Get one or more base gid(s) corresponding to a bias gid.

get_bias_data(bias_gid[, bias_dh])

Get data from the biased data source for a single gid

get_bias_gid(coord)

Get the bias gid from a coordinate.

get_data_pair(coord[, daily_reduction])

Get base and bias data observations based on a single bias gid.

get_linear_correction(bias_data, base_data, ...)

Get the linear correction factors based on 1D bias and base datasets

get_node_cmd(config)

Get a CLI call to call cls.run() on a single node based on an input config.

pre_load()

Preload all data needed for bias correction.

run([fp_out, max_workers, daily_reduction, ...])

Run linear correction factor calculations for every site in the bias dataset

write_outputs(fp_out, out)

Write outputs to an .h5 file.

+
+

Attributes

+
+ + + + + + + + + + + +

NT

size of the time dimension, 1 is no time-based bias correction

distance_upper_bound

Maximum distance (float) to map high-resolution data from exo_source to the low-resolution file_paths input.

meta

Get a meta data dictionary on how these bias factors were calculated

+
+
+
+static get_linear_correction(bias_data, base_data, bias_feature, base_dset)[source]#
+

Get the linear correction factors based on 1D bias and base datasets

+
+
Parameters:
+
    +
  • bias_data (np.ndarray) – 1D array of biased data observations.

  • +
  • base_data (np.ndarray) – 1D array of base data observations.

  • +
  • bias_feature (str) – This is the biased feature from bias_fps to retrieve. This should +be a single feature name corresponding to base_dset

  • +
  • base_dset (str) – A single dataset from the base_fps to retrieve. In the case of wind +components, this can be u_100m or v_100m which will retrieve +windspeed and winddirection and derive the U/V component.

  • +
+
+
Returns:
+

out (dict) – Dictionary of values defining the mean/std of the bias + base +data and the scalar + adder factors to correct the biased data +like: bias_data * scalar + adder

+
+
+
+ +
+
+NT = 1#
+

size of the time dimension, 1 is no time-based bias correction

+
+ +
+
+static compare_dists(base_data, bias_data, adder=0, scalar=1)#
+

Compare two distributions using the two-sample Kolmogorov-Smirnov. +When the output is minimized, the two distributions are similar.

+
+
Parameters:
+
    +
  • base_data (np.ndarray) – 1D array of base data observations.

  • +
  • bias_data (np.ndarray) – 1D array of biased data observations.

  • +
  • adder (float) – Factor to adjust the biased data before comparing distributions: +bias_data * scalar + adder

  • +
  • scalar (float) – Factor to adjust the biased data before comparing distributions: +bias_data * scalar + adder

  • +
+
+
Returns:
+

out (float) – KS test statistic

+
+
+
+ +
+
+property distance_upper_bound#
+

Maximum distance (float) to map high-resolution data from exo_source +to the low-resolution file_paths input.

+
+ +
+
+fill_and_smooth(out, fill_extend=True, smooth_extend=0, smooth_interior=0)#
+

For a given set of parameters, fill and extend missing positions

+

Fill data extending beyond the base meta data extent by doing a +nearest neighbor gap fill. Smooth interior and extended region with +given smoothing values. +Interior smoothing can reduce the affect of extreme values +within aggregations over large number of pixels. +The interior is assumed to be defined by the region without nan values. +The extended region is assumed to be the region with nan values.

+
+
Parameters:
+
    +
  • out (dict) – Dictionary of values defining the mean/std of the bias + base +data and the scalar + adder factors to correct the biased data +like: bias_data * scalar + adder. Each value is of shape +(lat, lon, time).

  • +
  • fill_extend (bool) – Whether to fill data extending beyond the base meta data with +nearest neighbor values.

  • +
  • smooth_extend (float) – Option to smooth the scalar/adder data outside of the spatial +domain set by the threshold input. This alleviates the weird seams +far from the domain of interest. This value is the standard +deviation for the gaussian_filter kernel

  • +
  • smooth_interior (float) – Value to use to smooth the scalar/adder data inside of the spatial +domain set by the threshold input. This can reduce the effect of +extreme values within aggregations over large number of pixels. +This value is the standard deviation for the gaussian_filter +kernel.

  • +
+
+
Returns:
+

out (dict) – Dictionary of values defining the mean/std of the bias + base +data and the scalar + adder factors to correct the biased data +like: bias_data * scalar + adder. Each value is of shape +(lat, lon, time).

+
+
+
+ +
+
+classmethod get_base_data(base_fps, base_dset, base_gid, base_handler, base_handler_kwargs=None, daily_reduction='avg', decimals=None, base_dh_inst=None)#
+

Get data from the baseline data source, possibly for many high-res +base gids corresponding to a single coarse low-res bias gid.

+
+
Parameters:
+
    +
  • base_fps (list | str) – One or more baseline .h5 filepaths representing non-biased data to +use to correct the biased dataset. This is typically several years +of WTK or NSRDB files.

  • +
  • base_dset (str) – A single dataset from the base_fps to retrieve.

  • +
  • base_gid (int | np.ndarray) – One or more spatial gids to retrieve from base_fps. The data will +be spatially averaged across all of these sites.

  • +
  • base_handler (rex.Resource) – A rex data handler similar to rex.Resource or sup3r.DataHandler +classes (if using the latter, must also input base_dh_inst)

  • +
  • base_handler_kwargs (dict | None) – Optional kwargs to send to the initialization of the base_handler +class

  • +
  • daily_reduction (None | str) – Option to do a reduction of the hourly+ source base data to daily +data. Can be None (no reduction, keep source time frequency), “avg” +(daily average), “max” (daily max), “min” (daily min), +“sum” (daily sum/total)

  • +
  • decimals (int | None) – Option to round bias and base data to this number of +decimals, this gets passed to np.around(). If decimals +is negative, it specifies the number of positions to +the left of the decimal point.

  • +
  • base_dh_inst (sup3r.DataHandler) – Instantiated DataHandler class that has already loaded the base +data (required if base files are .nc and are not being opened by a +rex Resource handler).

  • +
+
+
Returns:
+

    +
  • out_data (np.ndarray) – 1D array of base data spatially averaged across the base_gid input +and possibly daily-averaged or min/max’d as well.

  • +
  • out_ti (pd.DatetimeIndex) – DatetimeIndex object of datetimes corresponding to the +output data.

  • +
+

+
+
+
+ +
+
+get_base_gid(bias_gid)#
+

Get one or more base gid(s) corresponding to a bias gid.

+
+
Parameters:
+

bias_gid (int) – gid of the data to retrieve in the bias data source raster data. +The gids for this data source are the enumerated indices of the +flattened coordinate array.

+
+
Returns:
+

    +
  • dist (np.ndarray) – Array of nearest neighbor distances with length equal to the number +of high-resolution baseline gids that map to the low resolution +bias gid pixel.

  • +
  • base_gid (np.ndarray) – Array of base gids that are the nearest neighbors of bias_gid with +length equal to the number of high-resolution baseline gids that +map to the low resolution bias gid pixel.

  • +
+

+
+
+
+ +
+
+get_bias_data(bias_gid, bias_dh=None)#
+

Get data from the biased data source for a single gid

+
+
Parameters:
+
    +
  • bias_gid (int) – gid of the data to retrieve in the bias data source raster data. +The gids for this data source are the enumerated indices of the +flattened coordinate array.

  • +
  • bias_dh (DataHandler, default=self.bias_dh) – Any DataHandler from sup3r.preprocessing. This optional +argument allows an alternative handler other than the usual +bias_dh. For instance, the derived +QuantileDeltaMappingCorrection uses it to access the +reference biased dataset as well as the target biased dataset.

  • +
+
+
Returns:
+

bias_data (np.ndarray) – 1D array of temporal data at the requested gid.

+
+
+
+ +
+
+get_bias_gid(coord)#
+

Get the bias gid from a coordinate.

+
+
Parameters:
+

coord (tuple) – (lat, lon) to get data for.

+
+
Returns:
+

    +
  • bias_gid (int) – gid of the data to retrieve in the bias data source raster data. +The gids for this data source are the enumerated indices of the +flattened coordinate array.

  • +
  • d (float) – Distance in decimal degrees from coord to bias gid

  • +
+

+
+
+
+ +
+
+get_data_pair(coord, daily_reduction='avg')#
+

Get base and bias data observations based on a single bias gid.

+
+
Parameters:
+
    +
  • coord (tuple) – (lat, lon) to get data for.

  • +
  • daily_reduction (None | str) – Option to do a reduction of the hourly+ source base data to daily +data. Can be None (no reduction, keep source time frequency), “avg” +(daily average), “max” (daily max), “min” (daily min), +“sum” (daily sum/total)

  • +
+
+
Returns:
+

    +
  • base_data (np.ndarray) – 1D array of base data spatially averaged across the base_gid input +and possibly daily-averaged or min/max’d as well.

  • +
  • bias_data (np.ndarray) – 1D array of temporal data at the requested gid.

  • +
  • base_dist (np.ndarray) – Array of nearest neighbor distances from coord to the base data +sites with length equal to the number of high-resolution baseline +gids that map to the low resolution bias gid pixel.

  • +
  • bias_dist (Float) – Nearest neighbor distance from coord to the bias data site

  • +
+

+
+
+
+ +
+
+classmethod get_node_cmd(config)#
+

Get a CLI call to call cls.run() on a single node based on an input +config.

+
+
Parameters:
+

config (dict) – sup3r bias calc config with all necessary args and kwargs to +initialize the class and call run() on a single node.

+
+
+
+ +
+
+property meta#
+

Get a meta data dictionary on how these bias factors were +calculated

+
+ +
+
+pre_load()#
+

Preload all data needed for bias correction. This is currently +recommended to improve performance with the new sup3r data handler +access patterns

+
+ +
+
+run(fp_out=None, max_workers=None, daily_reduction='avg', fill_extend=True, smooth_extend=0, smooth_interior=0)#
+

Run linear correction factor calculations for every site in the bias +dataset

+
+
Parameters:
+
    +
  • fp_out (str | None) – Optional .h5 output file to write scalar and adder arrays.

  • +
  • max_workers (int) – Number of workers to run in parallel. 1 is serial and None is all +available.

  • +
  • daily_reduction (None | str) – Option to do a reduction of the hourly+ source base data to daily +data. Can be None (no reduction, keep source time frequency), “avg” +(daily average), “max” (daily max), “min” (daily min), +“sum” (daily sum/total)

  • +
  • fill_extend (bool) – Flag to fill data past distance_upper_bound using spatial nearest +neighbor. If False, the extended domain will be left as NaN.

  • +
  • smooth_extend (float) – Option to smooth the scalar/adder data outside of the spatial +domain set by the distance_upper_bound input. This alleviates the +weird seams far from the domain of interest. This value is the +standard deviation for the gaussian_filter kernel

  • +
  • smooth_interior (float) – Option to smooth the scalar/adder data within the valid spatial +domain. This can reduce the affect of extreme values within +aggregations over large number of pixels.

  • +
+
+
Returns:
+

out (dict) – Dictionary of values defining the mean/std of the bias + base +data and the scalar + adder factors to correct the biased data +like: bias_data * scalar + adder. Each value is of shape +(lat, lon, time).

+
+
+
+ +
+
+write_outputs(fp_out, out)#
+

Write outputs to an .h5 file.

+
+
Parameters:
+
    +
  • fp_out (str | None) – Optional .h5 output file to write scalar and adder arrays.

  • +
  • out (dict) – Dictionary of values defining the mean/std of the bias + base +data and the scalar + adder factors to correct the biased data +like: bias_data * scalar + adder. Each value is of shape +(lat, lon, time).

  • +
+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.bias.bias_calc.SkillAssessment.html b/_autosummary/sup3r.bias.bias_calc.SkillAssessment.html new file mode 100644 index 000000000..dd79c0c3d --- /dev/null +++ b/_autosummary/sup3r.bias.bias_calc.SkillAssessment.html @@ -0,0 +1,1241 @@ + + + + + + + + + + + sup3r.bias.bias_calc.SkillAssessment — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.bias.bias_calc.SkillAssessment#

+
+
+class SkillAssessment(base_fps, bias_fps, base_dset, bias_feature, distance_upper_bound=None, target=None, shape=None, base_handler='Resource', bias_handler='DataHandlerNCforCC', base_handler_kwargs=None, bias_handler_kwargs=None, decimals=None, match_zero_rate=False, pre_load=True)[source]#
+

Bases: MonthlyLinearCorrection

+

Calculate historical skill of one dataset compared to another.

+
+
Parameters:
+
    +
  • base_fps (list | str) – One or more baseline .h5 filepaths representing non-biased data to +use to correct the biased dataset. This is typically several years +of WTK or NSRDB files.

  • +
  • bias_fps (list | str) – One or more biased .nc or .h5 filepaths representing the biased +data to be corrected based on the baseline data. This is typically +several years of GCM .nc files.

  • +
  • base_dset (str) – A single dataset from the base_fps to retrieve. In the case of wind +components, this can be u_100m or v_100m which will retrieve +windspeed and winddirection and derive the U/V component.

  • +
  • bias_feature (str) – This is the biased feature from bias_fps to retrieve. This should +be a single feature name corresponding to base_dset

  • +
  • distance_upper_bound (float) – Upper bound on the nearest neighbor distance in decimal degrees. +This should be the approximate resolution of the low-resolution +bias data. None (default) will calculate this based on the median +distance between points in bias_fps

  • +
  • target (tuple) – (lat, lon) lower left corner of raster to retrieve from bias_fps. +If None then the lower left corner of the full domain will be used.

  • +
  • shape (tuple) – (rows, cols) grid size to retrieve from bias_fps. If None then the +full domain shape will be used.

  • +
  • base_handler (str) – Name of rex resource handler or sup3r.preprocessing class to be +retrieved from the rex/sup3r library. If a sup3r.preprocessing +class is used, all data will be loaded in this class’ +initialization and the subsequent bias calculation will be done in +serial

  • +
  • bias_handler (str) – Name of the bias data handler class to be retrieved from the +sup3r.preprocessing library.

  • +
  • base_handler_kwargs (dict | None) – Optional kwargs to send to the initialization of the base_handler +class

  • +
  • bias_handler_kwargs (dict | None) – Optional kwargs to send to the initialization of the bias_handler +class

  • +
  • decimals (int | None) – Option to round bias and base data to this number of decimals, this +gets passed to np.around(). If decimals is negative, it specifies +the number of positions to the left of the decimal point.

  • +
  • match_zero_rate (bool) – Option to fix the frequency of zero values in the biased data. The +lowest percentile of values in the biased data will be set to zero +to match the percentile of zeros in the base data. If +SkillAssessment is being run and this is True, the distributions +will not be mean-centered. This helps resolve the issue where +global climate models produce too many days with small +precipitation totals e.g., the “drizzle problem” [Polade2014].

  • +
  • pre_load (bool) – Flag to preload all data needed for bias correction. This is +currently recommended to improve performance with the new sup3r +data handler access patterns

  • +
+
+
+

References

+
+
+[Polade2014] +

Polade, S. D., Pierce, D. W., Cayan, D. R., Gershunov, +A., & Dettineer, M. D. (2014). The key role of dry days in changing +regional climate and precipitation regimes. Scientific reports, +4(1), 4364. https://doi.org/10.1038/srep04364

+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

compare_dists(base_data, bias_data[, adder, ...])

Compare two distributions using the two-sample Kolmogorov-Smirnov.

fill_and_smooth(out[, fill_extend, ...])

For a given set of parameters, fill and extend missing positions

get_base_data(base_fps, base_dset, base_gid, ...)

Get data from the baseline data source, possibly for many high-res base gids corresponding to a single coarse low-res bias gid.

get_base_gid(bias_gid)

Get one or more base gid(s) corresponding to a bias gid.

get_bias_data(bias_gid[, bias_dh])

Get data from the biased data source for a single gid

get_bias_gid(coord)

Get the bias gid from a coordinate.

get_data_pair(coord[, daily_reduction])

Get base and bias data observations based on a single bias gid.

get_linear_correction(bias_data, base_data, ...)

Get the linear correction factors based on 1D bias and base datasets

get_node_cmd(config)

Get a CLI call to call cls.run() on a single node based on an input config.

pre_load()

Preload all data needed for bias correction.

run([fp_out, max_workers, daily_reduction, ...])

Run linear correction factor calculations for every site in the bias dataset

write_outputs(fp_out, out)

Write outputs to an .h5 file.

+
+

Attributes

+
+ + + + + + + + + + + + + + +

NT

size of the time dimension, 12 is monthly bias correction

PERCENTILES

Data percentiles to report.

distance_upper_bound

Maximum distance (float) to map high-resolution data from exo_source to the low-resolution file_paths input.

meta

Get a meta data dictionary on how these bias factors were calculated

+
+
+
+PERCENTILES = (1, 5, 25, 50, 75, 95, 99)#
+

Data percentiles to report.

+
+ +
+
+NT = 12#
+

size of the time dimension, 12 is monthly bias correction

+
+ +
+
+static compare_dists(base_data, bias_data, adder=0, scalar=1)#
+

Compare two distributions using the two-sample Kolmogorov-Smirnov. +When the output is minimized, the two distributions are similar.

+
+
Parameters:
+
    +
  • base_data (np.ndarray) – 1D array of base data observations.

  • +
  • bias_data (np.ndarray) – 1D array of biased data observations.

  • +
  • adder (float) – Factor to adjust the biased data before comparing distributions: +bias_data * scalar + adder

  • +
  • scalar (float) – Factor to adjust the biased data before comparing distributions: +bias_data * scalar + adder

  • +
+
+
Returns:
+

out (float) – KS test statistic

+
+
+
+ +
+
+property distance_upper_bound#
+

Maximum distance (float) to map high-resolution data from exo_source +to the low-resolution file_paths input.

+
+ +
+
+fill_and_smooth(out, fill_extend=True, smooth_extend=0, smooth_interior=0)#
+

For a given set of parameters, fill and extend missing positions

+

Fill data extending beyond the base meta data extent by doing a +nearest neighbor gap fill. Smooth interior and extended region with +given smoothing values. +Interior smoothing can reduce the affect of extreme values +within aggregations over large number of pixels. +The interior is assumed to be defined by the region without nan values. +The extended region is assumed to be the region with nan values.

+
+
Parameters:
+
    +
  • out (dict) – Dictionary of values defining the mean/std of the bias + base +data and the scalar + adder factors to correct the biased data +like: bias_data * scalar + adder. Each value is of shape +(lat, lon, time).

  • +
  • fill_extend (bool) – Whether to fill data extending beyond the base meta data with +nearest neighbor values.

  • +
  • smooth_extend (float) – Option to smooth the scalar/adder data outside of the spatial +domain set by the threshold input. This alleviates the weird seams +far from the domain of interest. This value is the standard +deviation for the gaussian_filter kernel

  • +
  • smooth_interior (float) – Value to use to smooth the scalar/adder data inside of the spatial +domain set by the threshold input. This can reduce the effect of +extreme values within aggregations over large number of pixels. +This value is the standard deviation for the gaussian_filter +kernel.

  • +
+
+
Returns:
+

out (dict) – Dictionary of values defining the mean/std of the bias + base +data and the scalar + adder factors to correct the biased data +like: bias_data * scalar + adder. Each value is of shape +(lat, lon, time).

+
+
+
+ +
+
+classmethod get_base_data(base_fps, base_dset, base_gid, base_handler, base_handler_kwargs=None, daily_reduction='avg', decimals=None, base_dh_inst=None)#
+

Get data from the baseline data source, possibly for many high-res +base gids corresponding to a single coarse low-res bias gid.

+
+
Parameters:
+
    +
  • base_fps (list | str) – One or more baseline .h5 filepaths representing non-biased data to +use to correct the biased dataset. This is typically several years +of WTK or NSRDB files.

  • +
  • base_dset (str) – A single dataset from the base_fps to retrieve.

  • +
  • base_gid (int | np.ndarray) – One or more spatial gids to retrieve from base_fps. The data will +be spatially averaged across all of these sites.

  • +
  • base_handler (rex.Resource) – A rex data handler similar to rex.Resource or sup3r.DataHandler +classes (if using the latter, must also input base_dh_inst)

  • +
  • base_handler_kwargs (dict | None) – Optional kwargs to send to the initialization of the base_handler +class

  • +
  • daily_reduction (None | str) – Option to do a reduction of the hourly+ source base data to daily +data. Can be None (no reduction, keep source time frequency), “avg” +(daily average), “max” (daily max), “min” (daily min), +“sum” (daily sum/total)

  • +
  • decimals (int | None) – Option to round bias and base data to this number of +decimals, this gets passed to np.around(). If decimals +is negative, it specifies the number of positions to +the left of the decimal point.

  • +
  • base_dh_inst (sup3r.DataHandler) – Instantiated DataHandler class that has already loaded the base +data (required if base files are .nc and are not being opened by a +rex Resource handler).

  • +
+
+
Returns:
+

    +
  • out_data (np.ndarray) – 1D array of base data spatially averaged across the base_gid input +and possibly daily-averaged or min/max’d as well.

  • +
  • out_ti (pd.DatetimeIndex) – DatetimeIndex object of datetimes corresponding to the +output data.

  • +
+

+
+
+
+ +
+
+get_base_gid(bias_gid)#
+

Get one or more base gid(s) corresponding to a bias gid.

+
+
Parameters:
+

bias_gid (int) – gid of the data to retrieve in the bias data source raster data. +The gids for this data source are the enumerated indices of the +flattened coordinate array.

+
+
Returns:
+

    +
  • dist (np.ndarray) – Array of nearest neighbor distances with length equal to the number +of high-resolution baseline gids that map to the low resolution +bias gid pixel.

  • +
  • base_gid (np.ndarray) – Array of base gids that are the nearest neighbors of bias_gid with +length equal to the number of high-resolution baseline gids that +map to the low resolution bias gid pixel.

  • +
+

+
+
+
+ +
+
+get_bias_data(bias_gid, bias_dh=None)#
+

Get data from the biased data source for a single gid

+
+
Parameters:
+
    +
  • bias_gid (int) – gid of the data to retrieve in the bias data source raster data. +The gids for this data source are the enumerated indices of the +flattened coordinate array.

  • +
  • bias_dh (DataHandler, default=self.bias_dh) – Any DataHandler from sup3r.preprocessing. This optional +argument allows an alternative handler other than the usual +bias_dh. For instance, the derived +QuantileDeltaMappingCorrection uses it to access the +reference biased dataset as well as the target biased dataset.

  • +
+
+
Returns:
+

bias_data (np.ndarray) – 1D array of temporal data at the requested gid.

+
+
+
+ +
+
+get_bias_gid(coord)#
+

Get the bias gid from a coordinate.

+
+
Parameters:
+

coord (tuple) – (lat, lon) to get data for.

+
+
Returns:
+

    +
  • bias_gid (int) – gid of the data to retrieve in the bias data source raster data. +The gids for this data source are the enumerated indices of the +flattened coordinate array.

  • +
  • d (float) – Distance in decimal degrees from coord to bias gid

  • +
+

+
+
+
+ +
+
+get_data_pair(coord, daily_reduction='avg')#
+

Get base and bias data observations based on a single bias gid.

+
+
Parameters:
+
    +
  • coord (tuple) – (lat, lon) to get data for.

  • +
  • daily_reduction (None | str) – Option to do a reduction of the hourly+ source base data to daily +data. Can be None (no reduction, keep source time frequency), “avg” +(daily average), “max” (daily max), “min” (daily min), +“sum” (daily sum/total)

  • +
+
+
Returns:
+

    +
  • base_data (np.ndarray) – 1D array of base data spatially averaged across the base_gid input +and possibly daily-averaged or min/max’d as well.

  • +
  • bias_data (np.ndarray) – 1D array of temporal data at the requested gid.

  • +
  • base_dist (np.ndarray) – Array of nearest neighbor distances from coord to the base data +sites with length equal to the number of high-resolution baseline +gids that map to the low resolution bias gid pixel.

  • +
  • bias_dist (Float) – Nearest neighbor distance from coord to the bias data site

  • +
+

+
+
+
+ +
+
+static get_linear_correction(bias_data, base_data, bias_feature, base_dset)#
+

Get the linear correction factors based on 1D bias and base datasets

+
+
Parameters:
+
    +
  • bias_data (np.ndarray) – 1D array of biased data observations.

  • +
  • base_data (np.ndarray) – 1D array of base data observations.

  • +
  • bias_feature (str) – This is the biased feature from bias_fps to retrieve. This should +be a single feature name corresponding to base_dset

  • +
  • base_dset (str) – A single dataset from the base_fps to retrieve. In the case of wind +components, this can be u_100m or v_100m which will retrieve +windspeed and winddirection and derive the U/V component.

  • +
+
+
Returns:
+

out (dict) – Dictionary of values defining the mean/std of the bias + base +data and the scalar + adder factors to correct the biased data +like: bias_data * scalar + adder

+
+
+
+ +
+
+classmethod get_node_cmd(config)#
+

Get a CLI call to call cls.run() on a single node based on an input +config.

+
+
Parameters:
+

config (dict) – sup3r bias calc config with all necessary args and kwargs to +initialize the class and call run() on a single node.

+
+
+
+ +
+
+property meta#
+

Get a meta data dictionary on how these bias factors were +calculated

+
+ +
+
+pre_load()#
+

Preload all data needed for bias correction. This is currently +recommended to improve performance with the new sup3r data handler +access patterns

+
+ +
+
+run(fp_out=None, max_workers=None, daily_reduction='avg', fill_extend=True, smooth_extend=0, smooth_interior=0)#
+

Run linear correction factor calculations for every site in the bias +dataset

+
+
Parameters:
+
    +
  • fp_out (str | None) – Optional .h5 output file to write scalar and adder arrays.

  • +
  • max_workers (int) – Number of workers to run in parallel. 1 is serial and None is all +available.

  • +
  • daily_reduction (None | str) – Option to do a reduction of the hourly+ source base data to daily +data. Can be None (no reduction, keep source time frequency), “avg” +(daily average), “max” (daily max), “min” (daily min), +“sum” (daily sum/total)

  • +
  • fill_extend (bool) – Flag to fill data past distance_upper_bound using spatial nearest +neighbor. If False, the extended domain will be left as NaN.

  • +
  • smooth_extend (float) – Option to smooth the scalar/adder data outside of the spatial +domain set by the distance_upper_bound input. This alleviates the +weird seams far from the domain of interest. This value is the +standard deviation for the gaussian_filter kernel

  • +
  • smooth_interior (float) – Option to smooth the scalar/adder data within the valid spatial +domain. This can reduce the affect of extreme values within +aggregations over large number of pixels.

  • +
+
+
Returns:
+

out (dict) – Dictionary of values defining the mean/std of the bias + base +data and the scalar + adder factors to correct the biased data +like: bias_data * scalar + adder. Each value is of shape +(lat, lon, time).

+
+
+
+ +
+
+write_outputs(fp_out, out)#
+

Write outputs to an .h5 file.

+
+
Parameters:
+
    +
  • fp_out (str | None) – Optional .h5 output file to write scalar and adder arrays.

  • +
  • out (dict) – Dictionary of values defining the mean/std of the bias + base +data and the scalar + adder factors to correct the biased data +like: bias_data * scalar + adder. Each value is of shape +(lat, lon, time).

  • +
+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.bias.bias_calc.html b/_autosummary/sup3r.bias.bias_calc.html new file mode 100644 index 000000000..efb093bff --- /dev/null +++ b/_autosummary/sup3r.bias.bias_calc.html @@ -0,0 +1,755 @@ + + + + + + + + + + + sup3r.bias.bias_calc — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.bias.bias_calc

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.bias.bias_calc#

+

Utilities to calculate the bias correction factors for biased data that is +going to be fed into the sup3r downscaling models. This is typically used to +bias correct GCM data vs. some historical record like the WTK or NSRDB.

+

TODO: Generalize the with ProcessPoolExecutor() as exe: ... so we don’t +need to duplicate this wherever we kickoff a process or thread pool

+

Classes

+
+ + + + + + + + + + + + + + + + + +

LinearCorrection(base_fps, bias_fps, ...[, ...])

Calculate linear correction *scalar +adder factors to bias correct data

MonthlyLinearCorrection(base_fps, bias_fps, ...)

Calculate linear correction *scalar +adder factors to bias correct data

MonthlyScalarCorrection(base_fps, bias_fps, ...)

Calculate linear correction *scalar factors for each month

ScalarCorrection(base_fps, bias_fps, ...[, ...])

Calculate annual linear correction *scalar factors to bias correct data. This typically used when base data is just monthly or annual means and standard deviations cannot be computed. This is case for vortex data, for example. Thus, just scalar factors are computed as mean(base_data) / mean(bias_data). Adder factors are still written but are exactly zero.

SkillAssessment(base_fps, bias_fps, ...[, ...])

Calculate historical skill of one dataset compared to another.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.bias.bias_calc_cli.html b/_autosummary/sup3r.bias.bias_calc_cli.html new file mode 100644 index 000000000..2391bcef8 --- /dev/null +++ b/_autosummary/sup3r.bias.bias_calc_cli.html @@ -0,0 +1,742 @@ + + + + + + + + + + + sup3r.bias.bias_calc_cli — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.bias.bias_calc_cli

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.bias.bias_calc_cli#

+

sup3r bias correction calculation CLI entry points.

+

Functions

+
+ + + + + + + + +

kickoff_local_job(ctx, cmd[, pipeline_step])

Run sup3r bias calc locally.

kickoff_slurm_job(ctx, cmd[, pipeline_step, ...])

Run sup3r on HPC via SLURM job submission.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.bias.bias_calc_cli.kickoff_local_job.html b/_autosummary/sup3r.bias.bias_calc_cli.kickoff_local_job.html new file mode 100644 index 000000000..402d85dc0 --- /dev/null +++ b/_autosummary/sup3r.bias.bias_calc_cli.kickoff_local_job.html @@ -0,0 +1,777 @@ + + + + + + + + + + + sup3r.bias.bias_calc_cli.kickoff_local_job — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.bias.bias_calc_cli.kickoff_local_job

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.bias.bias_calc_cli.kickoff_local_job#

+
+
+kickoff_local_job(ctx, cmd, pipeline_step=None)[source]#
+

Run sup3r bias calc locally.

+
+
Parameters:
+
    +
  • ctx (click.pass_context) – Click context object where ctx.obj is a dictionary

  • +
  • cmd (str) –

    +
    +
    Command to be submitted in shell script. Example:

    ‘python -m sup3r.cli forward_pass -c <config_file>’

    +
    +
    +
  • +
  • pipeline_step (str, optional) – Name of the pipeline step being run. If None, the +pipeline_step will be set to the module_name, +mimicking old reV behavior. By default, None.

  • +
+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.bias.bias_calc_cli.kickoff_slurm_job.html b/_autosummary/sup3r.bias.bias_calc_cli.kickoff_slurm_job.html new file mode 100644 index 000000000..dd3ad4d0b --- /dev/null +++ b/_autosummary/sup3r.bias.bias_calc_cli.kickoff_slurm_job.html @@ -0,0 +1,783 @@ + + + + + + + + + + + sup3r.bias.bias_calc_cli.kickoff_slurm_job — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.bias.bias_calc_cli.kickoff_slurm_job

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.bias.bias_calc_cli.kickoff_slurm_job#

+
+
+kickoff_slurm_job(ctx, cmd, pipeline_step=None, alloc='sup3r', memory=None, walltime=4, feature=None, stdout_path='./stdout/')[source]#
+

Run sup3r on HPC via SLURM job submission.

+
+
Parameters:
+
    +
  • ctx (click.pass_context) – Click context object where ctx.obj is a dictionary

  • +
  • cmd (str) –

    +
    +
    Command to be submitted in SLURM shell script. Example:

    ‘python -m sup3r.cli forward_pass -c <config_file>’

    +
    +
    +
  • +
  • pipeline_step (str, optional) – Name of the pipeline step being run. If None, the +pipeline_step will be set to the module_name, +mimicking old reV behavior. By default, None.

  • +
  • alloc (str) – HPC project (allocation) handle. Example: ‘sup3r’.

  • +
  • memory (int) – Node memory request in GB.

  • +
  • walltime (float) – Node walltime request in hours.

  • +
  • feature (str) – Additional flags for SLURM job. Format is “–qos=high” +or “–depend=[state:job_id]”. Default is None.

  • +
  • stdout_path (str) – Path to print .stdout and .stderr files.

  • +
+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.bias.bias_calc_vortex.BiasCorrectUpdate.html b/_autosummary/sup3r.bias.bias_calc_vortex.BiasCorrectUpdate.html new file mode 100644 index 000000000..8b5025fc2 --- /dev/null +++ b/_autosummary/sup3r.bias.bias_calc_vortex.BiasCorrectUpdate.html @@ -0,0 +1,850 @@ + + + + + + + + + + + sup3r.bias.bias_calc_vortex.BiasCorrectUpdate — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.bias.bias_calc_vortex.BiasCorrectUpdate

+ +
+ +
+
+ + + + +
+ +
+

sup3r.bias.bias_calc_vortex.BiasCorrectUpdate#

+
+
+class BiasCorrectUpdate[source]#
+

Bases: object

+

Class for bias correcting existing files and writing corrected files.

+

Methods

+
+ + + + + + + + + + + +

get_bc_factors(bc_file, dset, month[, ...])

Get bias correction factors for the given dset and month

run(in_file, out_file, dset, bc_file[, ...])

Run bias correction update.

update_file(in_file, out_file, dset, bc_file)

Update the in_file with bias corrected values for the given dset and write to out_file.

+
+
+
+classmethod get_bc_factors(bc_file, dset, month, global_scalar=1)[source]#
+

Get bias correction factors for the given dset and month

+
+
Parameters:
+
    +
  • bc_file (str) – Name of h5 file containing bias correction factors

  • +
  • dset (str) – Name of dataset to apply bias correction factors for

  • +
  • month (int) – Index of month to bias correct

  • +
  • global_scalar (float) – Optional global scalar to multiply all bias correction +factors. This can be used to improve systemic bias against +observation data.

  • +
+
+
Returns:
+

factors (ndarray) – Array of bias correction factors for the given dset and month.

+
+
+
+ +
+
+classmethod update_file(in_file, out_file, dset, bc_file, global_scalar=1, max_workers=None)[source]#
+

Update the in_file with bias corrected values for the given dset +and write to out_file.

+
+
Parameters:
+
    +
  • in_file (str) – Name of h5 file containing data to bias correct

  • +
  • out_file (str) – Name of h5 file containing bias corrected data

  • +
  • dset (str) – Name of dataset to bias correct

  • +
  • bc_file (str) – Name of file containing bias correction factors for the given dset

  • +
  • global_scalar (float) – Optional global scalar to multiply all bias correction +factors. This can be used to improve systemic bias against +observation data.

  • +
  • max_workers (int | None) – Number of workers to use for parallel processing.

  • +
+
+
+
+ +
+
+classmethod run(in_file, out_file, dset, bc_file, overwrite=False, global_scalar=1, max_workers=None)[source]#
+

Run bias correction update.

+
+
Parameters:
+
    +
  • in_file (str) – Name of h5 file containing data to bias correct

  • +
  • out_file (str) – Name of h5 file containing bias corrected data

  • +
  • dset (str) – Name of dataset to bias correct

  • +
  • bc_file (str) – Name of file containing bias correction factors for the given dset

  • +
  • overwrite (bool) – Whether to overwrite the output file if it already exists.

  • +
  • global_scalar (float) – Optional global scalar to multiply all bias correction +factors. This can be used to improve systemic bias against +observation data.

  • +
  • max_workers (int | None) – Number of workers to use for parallel processing.

  • +
+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.bias.bias_calc_vortex.VortexMeanPrepper.html b/_autosummary/sup3r.bias.bias_calc_vortex.VortexMeanPrepper.html new file mode 100644 index 000000000..9a3fee358 --- /dev/null +++ b/_autosummary/sup3r.bias.bias_calc_vortex.VortexMeanPrepper.html @@ -0,0 +1,1055 @@ + + + + + + + + + + + sup3r.bias.bias_calc_vortex.VortexMeanPrepper — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.bias.bias_calc_vortex.VortexMeanPrepper#

+
+
+class VortexMeanPrepper(path_pattern, in_heights, out_heights, overwrite=False)[source]#
+

Bases: object

+

Class for converting monthly vortex tif files for each height to a +single h5 files containing all monthly means for all requested output +heights.

+
+
Parameters:
+
    +
  • path_pattern (str) – Pattern for input tif files. Needs to include {month} and {height} +format keys.

  • +
  • in_heights (list) – List of heights for input files.

  • +
  • out_heights (list) – List of output heights used for interpolation

  • +
  • overwrite (bool) – Whether to overwrite intermediate netcdf files containing the +interpolated masked monthly means.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

convert_all_tifs()

Write netcdf files for all heights for all months.

convert_month_height_tif(month, height)

Get windspeed mean for the given month and hub height from the corresponding input file and write this to a netcdf file.

convert_month_tif(month)

Write netcdf files for all heights for the given month.

get_all_data()

Get interpolated monthly means for all out heights as a dictionary to use for h5 writing.

get_height_files(month)

Get set of netcdf files for given month

get_input_file(month, height)

Get vortex tif file for given month and height.

get_lat_lon()

Get lat lon grid

get_month(month)

Get interpolated means for all hub heights for the given month.

get_output_file(month)

Get name of netcdf file for a given month.

interp(data)

Interpolate data to requested output heights.

run(path_pattern, in_heights, out_heights, ...)

Read vortex tif files, convert these to monthly netcdf files for all input heights, interpolate this data to requested output heights, mask fill values, and write all data to h5 file.

write_data(fp_out, out)

Write monthly means for all heights to h5 file

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +

global_attrs

Get dictionary on how this data is prepared

in_features

List of features corresponding to input heights.

input_files

Get list of all input files used for h5 meta.

mask

Mask coordinates without data

meta

Get meta with latitude/longitude

out_features

List of features corresponding to output heights

output_files

List of output monthly output files each with windspeed for all input heights

time_index

Get time index so output conforms to standard format

+
+
+
+property in_features#
+

List of features corresponding to input heights.

+
+ +
+
+property out_features#
+

List of features corresponding to output heights

+
+ +
+
+get_input_file(month, height)[source]#
+

Get vortex tif file for given month and height.

+
+ +
+
+get_height_files(month)[source]#
+

Get set of netcdf files for given month

+
+ +
+
+property input_files#
+

Get list of all input files used for h5 meta.

+
+ +
+
+get_output_file(month)[source]#
+

Get name of netcdf file for a given month.

+
+ +
+
+property output_files#
+

List of output monthly output files each with windspeed for all +input heights

+
+ +
+
+convert_month_height_tif(month, height)[source]#
+

Get windspeed mean for the given month and hub height from the +corresponding input file and write this to a netcdf file.

+
+ +
+
+convert_month_tif(month)[source]#
+

Write netcdf files for all heights for the given month.

+
+ +
+
+convert_all_tifs()[source]#
+

Write netcdf files for all heights for all months.

+
+ +
+
+property mask#
+

Mask coordinates without data

+
+ +
+
+get_month(month)[source]#
+

Get interpolated means for all hub heights for the given month.

+
+
Parameters:
+

month (str) – Name of month to get data for

+
+
Returns:
+

data (xarray.Dataset) – xarray dataset object containing interpolated monthly windspeed +means for all input and output heights

+
+
+
+ +
+
+interp(data)[source]#
+

Interpolate data to requested output heights.

+
+
Parameters:
+

data (xarray.Dataset) – xarray dataset object containing windspeed for all input heights

+
+
Returns:
+

data (xarray.Dataset) – xarray dataset object containing windspeed for all input and output +heights

+
+
+
+ +
+
+get_lat_lon()[source]#
+

Get lat lon grid

+
+ +
+
+property meta#
+

Get meta with latitude/longitude

+
+ +
+
+property time_index#
+

Get time index so output conforms to standard format

+
+ +
+
+get_all_data()[source]#
+

Get interpolated monthly means for all out heights as a dictionary +to use for h5 writing.

+
+
Returns:
+

out (dict) – Dictionary of arrays containing monthly means for each hub height. +Also includes latitude and longitude. Spatial dimensions are +flattened

+
+
+
+ +
+
+property global_attrs#
+

Get dictionary on how this data is prepared

+
+ +
+
+write_data(fp_out, out)[source]#
+

Write monthly means for all heights to h5 file

+
+ +
+
+classmethod run(path_pattern, in_heights, out_heights, fp_out, overwrite=False)[source]#
+

Read vortex tif files, convert these to monthly netcdf files for all +input heights, interpolate this data to requested output heights, mask +fill values, and write all data to h5 file.

+
+
Parameters:
+
    +
  • path_pattern (str) – Pattern for input tif files. Needs to include {month} and {height} +format keys.

  • +
  • in_heights (list) – List of heights for input files.

  • +
  • out_heights (list) – List of output heights used for interpolation

  • +
  • fp_out (str) – Name of final h5 output file to write with means.

  • +
  • overwrite (bool) – Whether to overwrite intermediate netcdf files containing the +interpolated masked monthly means.

  • +
+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.bias.bias_calc_vortex.html b/_autosummary/sup3r.bias.bias_calc_vortex.html new file mode 100644 index 000000000..7eaf0edff --- /dev/null +++ b/_autosummary/sup3r.bias.bias_calc_vortex.html @@ -0,0 +1,745 @@ + + + + + + + + + + + sup3r.bias.bias_calc_vortex — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.bias.bias_calc_vortex

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.bias.bias_calc_vortex#

+

Classes to compute means from vortex and era data and compute bias +correction factors.

+

Vortex mean files can be downloaded from IRENA. +https://globalatlas.irena.org/workspace

+

Classes

+
+ + + + + + + + +

BiasCorrectUpdate()

Class for bias correcting existing files and writing corrected files.

VortexMeanPrepper(path_pattern, in_heights, ...)

Class for converting monthly vortex tif files for each height to a single h5 files containing all monthly means for all requested output heights.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.bias.bias_transforms.global_linear_bc.html b/_autosummary/sup3r.bias.bias_transforms.global_linear_bc.html new file mode 100644 index 000000000..690dc73e9 --- /dev/null +++ b/_autosummary/sup3r.bias.bias_transforms.global_linear_bc.html @@ -0,0 +1,775 @@ + + + + + + + + + + + sup3r.bias.bias_transforms.global_linear_bc — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.bias.bias_transforms.global_linear_bc

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.bias.bias_transforms.global_linear_bc#

+
+
+global_linear_bc(data, scalar, adder, out_range=None)[source]#
+

Bias correct data using a simple global *scalar +adder method.

+
+
Parameters:
+
    +
  • data (np.ndarray) – Sup3r input data to be bias corrected, assumed to be 3D with shape +(spatial, spatial, temporal) for a single feature.

  • +
  • scalar (float) – Scalar (multiplicative) value to apply to input data.

  • +
  • adder (float) – Adder value to apply to input data.

  • +
  • out_range (None | tuple) – Option to set floor/ceiling values on the output data.

  • +
+
+
Returns:
+

out (np.ndarray) – out = data * scalar + adder

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.bias.bias_transforms.html b/_autosummary/sup3r.bias.bias_transforms.html new file mode 100644 index 000000000..aecfe53f0 --- /dev/null +++ b/_autosummary/sup3r.bias.bias_transforms.html @@ -0,0 +1,756 @@ + + + + + + + + + + + sup3r.bias.bias_transforms — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.bias.bias_transforms

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.bias.bias_transforms#

+

Bias correction transformation functions.

+

TODO: These methods need to be refactored to use lazy calculations. They +currently slow down the forward pass runs when operating on full input data +volume.

+

We should write bc factor files in a format compatible with Loaders / +Rasterizers so we can use those class methods to match factors with locations

+

Functions

+
+ + + + + + + + + + + + + + + + + +

global_linear_bc(data, scalar, adder[, ...])

Bias correct data using a simple global *scalar +adder method.

local_linear_bc(data, lat_lon, feature_name, ...)

Bias correct data using a simple annual (or multi-year) *scalar +adder method on a site-by-site basis.

local_presrat_bc(data, lat_lon, base_dset, ...)

Bias correction using PresRat

local_qdm_bc(data, lat_lon, base_dset, ...)

Bias correction using QDM

monthly_local_linear_bc(data, lat_lon, ...)

Bias correct data using a simple monthly *scalar +adder method on a site-by-site basis.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.bias.bias_transforms.local_linear_bc.html b/_autosummary/sup3r.bias.bias_transforms.local_linear_bc.html new file mode 100644 index 000000000..0d6a0a1c9 --- /dev/null +++ b/_autosummary/sup3r.bias.bias_transforms.local_linear_bc.html @@ -0,0 +1,793 @@ + + + + + + + + + + + sup3r.bias.bias_transforms.local_linear_bc — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.bias.bias_transforms.local_linear_bc

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.bias.bias_transforms.local_linear_bc#

+
+
+local_linear_bc(data, lat_lon, feature_name, bias_fp, lr_padded_slice=None, out_range=None, smoothing=0)[source]#
+

Bias correct data using a simple annual (or multi-year) *scalar +adder +method on a site-by-site basis.

+
+
Parameters:
+
    +
  • data (np.ndarray) – Sup3r input data to be bias corrected, assumed to be 3D with shape +(spatial, spatial, temporal) for a single feature.

  • +
  • lat_lon (ndarray) – Array of latitudes and longitudes for the domain to bias correct +(n_lats, n_lons, 2)

  • +
  • feature_name (str) – Name of feature that is being corrected. Datasets with names +“{feature_name}_scalar” and “{feature_name}_adder” will be retrieved +from bias_fp.

  • +
  • bias_fp (str) – Filepath to bias correction file from the bias calc module. Must have +datasets “{feature_name}_scalar” and “{feature_name}_adder” that are +the full low-resolution shape of the forward pass input that will be +sliced using lr_padded_slice for the current chunk.

  • +
  • lr_padded_slice (tuple | None) – Tuple of length four that slices (spatial_1, spatial_2, temporal, +features) where each tuple entry is a slice object for that axes. +Note that if this method is called as part of a sup3r forward pass, the +lr_padded_slice will be included in the kwargs for the active chunk. +If this is None, no slicing will be done and the full bias correction +source shape will be used.

  • +
  • out_range (None | tuple) – Option to set floor/ceiling values on the output data.

  • +
  • smoothing (float) – Value to use to smooth the scalar/adder data. This can reduce the +effect of extreme values within aggregations over large number of +pixels. This value is the standard deviation for the gaussian_filter +kernel.

  • +
+
+
Returns:
+

out (np.ndarray) – out = data * scalar + adder

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.bias.bias_transforms.local_presrat_bc.html b/_autosummary/sup3r.bias.bias_transforms.local_presrat_bc.html new file mode 100644 index 000000000..117e65f17 --- /dev/null +++ b/_autosummary/sup3r.bias.bias_transforms.local_presrat_bc.html @@ -0,0 +1,818 @@ + + + + + + + + + + + sup3r.bias.bias_transforms.local_presrat_bc — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.bias.bias_transforms.local_presrat_bc

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.bias.bias_transforms.local_presrat_bc#

+
+
+local_presrat_bc(data: ndarray, lat_lon: ndarray, base_dset: str, feature_name: str, bias_fp, date_range_kwargs: dict, lr_padded_slice=None, threshold=0.1, relative=True, no_trend=False, delta_denom_min=None, delta_range=None, k_range=None, out_range=None, max_workers=1)[source]#
+

Bias correction using PresRat

+
+
Parameters:
+
    +
  • data (np.ndarray) – Sup3r input data to be bias corrected, assumed to be 3D with shape +(spatial, spatial, temporal) for a single feature.

  • +
  • lat_lon (np.ndarray) – Array of latitudes and longitudes for the domain to bias correct +(n_lats, n_lons, 2)

  • +
  • base_dset – Name of feature that is used as (historical) reference. Dataset with +names “base_{base_dset}_params” will be retrieved.

  • +
  • feature_name (str) – Name of feature that is being corrected. Datasets with names +“bias_{feature_name}_params” and “bias_fut_{feature_name}_params” will +be retrieved.

  • +
  • bias_fp (str) – Filepath to statistical distributions file from the bias calc module. +Must have datasets “bias_{feature_name}_params”, +“bias_fut_{feature_name}_params”, and “base_{base_dset}_params” that +are the parameters to define the statistical distributions to be used +to correct the given data.

  • +
  • date_range_kwargs (dict) – Keyword args for pd.date_range to produce a DatetimeIndex object +associated with the input data temporal axis (assumed 3rd axis e.g. +axis=2). Note that if this method is called as part of a sup3r +resolution forward pass, the date_range_kwargs will be included +automatically for the current chunk.

  • +
  • lr_padded_slice (tuple | None) – Tuple of length four that slices (spatial_1, spatial_2, temporal, +features) where each tuple entry is a slice object for that axes. +Note that if this method is called as part of a sup3r forward pass, the +lr_padded_slice will be included automatically in the kwargs for the +active chunk. If this is None, no slicing will be done and the full +bias correction source shape will be used.

  • +
  • threshold (float) – Nearest neighbor euclidean distance threshold. If the coordinates are +more than this value away from the bias correction lat/lon, an error +is raised.

  • +
  • relative (bool) – Apply QDM correction as a relative factor (product), otherwise, it is +applied as an offset (sum).

  • +
  • no_trend (bool, default=False) – An option to ignore the trend component of the correction, thus +resulting in an ordinary Quantile Mapping, i.e. corrects the bias by +comparing the distributions of the biased dataset with a reference +datasets, without reinforcing the zero rate or applying the k-factor. +See params_mf of +rex.utilities.bc_utils.QuantileDeltaMapping. Note that this +assumes that params_mh is the data distribution representative for the +target data.

  • +
  • delta_denom_min (float | None) – Option to specify a minimum value for the denominator term in the +calculation of a relative delta value. This prevents division by a +very small number making delta blow up and resulting in very large +output bias corrected values. See equation 4 of Cannon et al., 2015 +for the delta term. If this is not set, the zero_rate_threshold +calculated as part of the presrat bias calculation will be used

  • +
  • delta_range (tuple | None) – Option to set a (min, max) on the delta term in QDM. This can help +prevent QDM from making non-realistic increases/decreases in +otherwise physical values. See equation 4 of Cannon et al., 2015 for +the delta term.

  • +
  • k_range (tuple | None) – Option to set a (min, max) value for the k-factor multiplier

  • +
  • out_range (None | tuple) – Option to set floor/ceiling values on the output data.

  • +
  • max_workers (int | None) – Max number of workers to use for QDM process pool

  • +
+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.bias.bias_transforms.local_qdm_bc.html b/_autosummary/sup3r.bias.bias_transforms.local_qdm_bc.html new file mode 100644 index 000000000..9d236235c --- /dev/null +++ b/_autosummary/sup3r.bias.bias_transforms.local_qdm_bc.html @@ -0,0 +1,857 @@ + + + + + + + + + + + sup3r.bias.bias_transforms.local_qdm_bc — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.bias.bias_transforms.local_qdm_bc

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.bias.bias_transforms.local_qdm_bc#

+
+
+local_qdm_bc(data: ndarray, lat_lon: ndarray, base_dset: str, feature_name: str, bias_fp, date_range_kwargs: dict, lr_padded_slice=None, threshold=0.1, relative=True, no_trend=False, delta_denom_min=None, delta_denom_zero=None, delta_range=None, out_range=None, max_workers=1)[source]#
+

Bias correction using QDM

+

Apply QDM to correct bias on the given data. It assumes that the required +statistical distributions were previously estimated and saved in +bias_fp.

+

Assume CDF for the nearest day of year (doy) is representative.

+
+
Parameters:
+
    +
  • data (Union[np.ndarray, da.core.Array]) – Sup3r input data to be bias corrected, assumed to be 3D with shape +(spatial, spatial, temporal) for a single feature.

  • +
  • lat_lon (np.ndarray) – Array of latitudes and longitudes for the domain to bias correct +(n_lats, n_lons, 2)

  • +
  • base_dset – Name of feature that is used as (historical) reference. Dataset with +names “base_{base_dset}_params” will be retrieved.

  • +
  • feature_name (str) – Name of feature that is being corrected. Datasets with names +“bias_{feature_name}_params” and “bias_fut_{feature_name}_params” will +be retrieved.

  • +
  • date_range_kwargs (dict) – Keyword args for pd.date_range to produce a DatetimeIndex object +associated with the input data temporal axis (assumed 3rd axis e.g. +axis=2). Note that if this method is called as part of a sup3r +resolution forward pass, the date_range_kwargs will be included +automatically for the current chunk.

  • +
  • bias_fp (str) – Filepath to statistical distributions file from the bias calc module. +Must have datasets “bias_{feature_name}_params”, +“bias_fut_{feature_name}_params”, and “base_{base_dset}_params” that +are the parameters to define the statistical distributions to be used +to correct the given data.

  • +
  • lr_padded_slice (tuple | None) – Tuple of length four that slices (spatial_1, spatial_2, temporal, +features) where each tuple entry is a slice object for that axes. +Note that if this method is called as part of a sup3r forward pass, the +lr_padded_slice will be included automatically in the kwargs for the +active chunk. If this is None, no slicing will be done and the full +bias correction source shape will be used.

  • +
  • no_trend (bool, default=False) – An option to ignore the trend component of the correction, thus +resulting in an ordinary Quantile Mapping, i.e. corrects the bias by +comparing the distributions of the biased dataset with a reference +datasets. See +params_mf of rex.utilities.bc_utils.QuantileDeltaMapping. +Note that this assumes that params_mh is the data distribution +representative for the target data.

  • +
  • delta_denom_min (float | None) – Option to specify a minimum value for the denominator term in the +calculation of a relative delta value. This prevents division by a +very small number making delta blow up and resulting in very large +output bias corrected values. See equation 4 of Cannon et al., 2015 +for the delta term.

  • +
  • delta_denom_zero (float | None) – Option to specify a value to replace zeros in the denominator term +in the calculation of a relative delta value. This prevents +division by a very small number making delta blow up and resulting +in very large output bias corrected values. See equation 4 of +Cannon et al., 2015 for the delta term.

  • +
  • delta_range (tuple | None) – Option to set a (min, max) on the delta term in QDM. This can help +prevent QDM from making non-realistic increases/decreases in +otherwise physical values. See equation 4 of Cannon et al., 2015 for +the delta term.

  • +
  • out_range (None | tuple) – Option to set floor/ceiling values on the output data.

  • +
  • max_workers (int | None) – Max number of workers to use for QDM process pool

  • +
+
+
Returns:
+

out (np.ndarray) – The input data corrected by QDM. Its shape is the same of the input +(spatial, spatial, time_window, temporal). The dimension time_window +aligns with the number of time windows defined in a year, while +temporal aligns with the time of the data.

+
+
+
+

See also

+
+
sup3r.bias.qdm.QuantileDeltaMappingCorrection

Estimate probability distributions required by QDM method

+
+
+
+

Notes

+

Be careful selecting bias_fp. Usually, the input data used here would +be related to the dataset used to estimate +“bias_fut_{feature_name}_params”.

+

Keeping arguments as consistent as possible with local_linear_bc(), thus +a 4D data (spatial, spatial, time_window, temporal), and lat_lon (n_lats, +n_lons, [lat, lon]). But QuantileDeltaMapping(), from rex library, +expects an array, (time, space), thus we need to re-organize our input to +match that, and in the end bring it back to (spatial, spatial, time_window, +temporal). This is still better than maintaining the same functionality +consistent in two libraries.

+

Also, rex.utilities.bc_utils.QuantileDeltaMapping expects params +to be 2D (space, N-params).

+
+

See also

+
+
rex.utilities.bc_utils.QuantileDeltaMapping

Core QDM transformation.

+
+
+
+

Examples

+
>>> unbiased = local_qdm_bc(biased_array, lat_lon_array, "ghi", "rsds",
+...                         "./dist_params.hdf")
+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.bias.bias_transforms.monthly_local_linear_bc.html b/_autosummary/sup3r.bias.bias_transforms.monthly_local_linear_bc.html new file mode 100644 index 000000000..7b159f989 --- /dev/null +++ b/_autosummary/sup3r.bias.bias_transforms.monthly_local_linear_bc.html @@ -0,0 +1,804 @@ + + + + + + + + + + + sup3r.bias.bias_transforms.monthly_local_linear_bc — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.bias.bias_transforms.monthly_local_linear_bc

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.bias.bias_transforms.monthly_local_linear_bc#

+
+
+monthly_local_linear_bc(data, lat_lon, feature_name, bias_fp, date_range_kwargs, lr_padded_slice=None, temporal_avg=True, out_range=None, smoothing=0, scalar_range=None, adder_range=None)[source]#
+

Bias correct data using a simple monthly *scalar +adder method on a +site-by-site basis.

+
+
Parameters:
+
    +
  • data (np.ndarray) – Sup3r input data to be bias corrected, assumed to be 3D with shape +(spatial, spatial, temporal) for a single feature.

  • +
  • lat_lon (ndarray) – Array of latitudes and longitudes for the domain to bias correct +(n_lats, n_lons, 2)

  • +
  • feature_name (str) – Name of feature that is being corrected. Datasets with names +“{feature_name}_scalar” and “{feature_name}_adder” will be retrieved +from bias_fp.

  • +
  • bias_fp (str) – Filepath to bias correction file from the bias calc module. Must have +datasets “{feature_name}_scalar” and “{feature_name}_adder” that are +the full low-resolution shape of the forward pass input that will be +sliced using lr_padded_slice for the current chunk.

  • +
  • date_range_kwargs (dict) – Keyword args for pd.date_range to produce a DatetimeIndex object +associated with the input data temporal axis (assumed 3rd axis e.g. +axis=2). Note that if this method is called as part of a sup3r +resolution forward pass, the date_range_kwargs will be included +automatically for the current chunk.

  • +
  • lr_padded_slice (tuple | None) – Tuple of length four that slices (spatial_1, spatial_2, temporal, +features) where each tuple entry is a slice object for that axes. +Note that if this method is called as part of a sup3r forward pass, the +lr_padded_slice will be included automatically in the kwargs for the +active chunk. If this is None, no slicing will be done and the full +bias correction source shape will be used.

  • +
  • temporal_avg (bool) – Take the average scalars and adders for the chunk’s time index, this +will smooth the transition of scalars/adders from month to month if +processing small chunks. If processing the full annual time index, set +this to False.

  • +
  • out_range (None | tuple) – Option to set floor/ceiling values on the output data.

  • +
  • smoothing (float) – Value to use to smooth the scalar/adder data. This can reduce the +effect of extreme values within aggregations over large number of +pixels. This value is the standard deviation for the gaussian_filter +kernel.

  • +
  • scalar_range (tuple | None) – Allowed range for the scalar term in the linear bias correction.

  • +
  • adder_range (tuple | None) – Allowed range for the adder term in the linear bias correction.

  • +
+
+
Returns:
+

out (np.ndarray) – out = data * scalar + adder

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.bias.html b/_autosummary/sup3r.bias.html new file mode 100644 index 000000000..c4b79fc5e --- /dev/null +++ b/_autosummary/sup3r.bias.html @@ -0,0 +1,762 @@ + + + + + + + + + + + sup3r.bias — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.bias

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.bias#

+

Bias calculation and correction modules.

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

base

Data retrieval class for performing / evaluating bias correction.

bias_calc

Utilities to calculate the bias correction factors for biased data that is going to be fed into the sup3r downscaling models.

bias_calc_cli

sup3r bias correction calculation CLI entry points.

bias_calc_vortex

Classes to compute means from vortex and era data and compute bias correction factors.

bias_transforms

Bias correction transformation functions.

mixins

Mixin classes to support bias calculation

presrat

QDM related methods to correct and bias and trend

qdm

QDM related methods to correct and bias and trend

utilities

Bias correction methods which can be applied to data handler data.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.bias.mixins.FillAndSmoothMixin.html b/_autosummary/sup3r.bias.mixins.FillAndSmoothMixin.html new file mode 100644 index 000000000..9f8fee39d --- /dev/null +++ b/_autosummary/sup3r.bias.mixins.FillAndSmoothMixin.html @@ -0,0 +1,818 @@ + + + + + + + + + + + sup3r.bias.mixins.FillAndSmoothMixin — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.bias.mixins.FillAndSmoothMixin

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.bias.mixins.FillAndSmoothMixin#

+
+
+class FillAndSmoothMixin[source]#
+

Bases: object

+

Fill and extend parameters for calibration on missing positions

+

TODO: replace nn_fill_array call with Sup3rX.interpolate_na method

+

Methods

+
+ + + + + +

fill_and_smooth(out[, fill_extend, ...])

For a given set of parameters, fill and extend missing positions

+
+
+
+fill_and_smooth(out, fill_extend=True, smooth_extend=0, smooth_interior=0)[source]#
+

For a given set of parameters, fill and extend missing positions

+

Fill data extending beyond the base meta data extent by doing a +nearest neighbor gap fill. Smooth interior and extended region with +given smoothing values. +Interior smoothing can reduce the affect of extreme values +within aggregations over large number of pixels. +The interior is assumed to be defined by the region without nan values. +The extended region is assumed to be the region with nan values.

+
+
Parameters:
+
    +
  • out (dict) – Dictionary of values defining the mean/std of the bias + base +data and the scalar + adder factors to correct the biased data +like: bias_data * scalar + adder. Each value is of shape +(lat, lon, time).

  • +
  • fill_extend (bool) – Whether to fill data extending beyond the base meta data with +nearest neighbor values.

  • +
  • smooth_extend (float) – Option to smooth the scalar/adder data outside of the spatial +domain set by the threshold input. This alleviates the weird seams +far from the domain of interest. This value is the standard +deviation for the gaussian_filter kernel

  • +
  • smooth_interior (float) – Value to use to smooth the scalar/adder data inside of the spatial +domain set by the threshold input. This can reduce the effect of +extreme values within aggregations over large number of pixels. +This value is the standard deviation for the gaussian_filter +kernel.

  • +
+
+
Returns:
+

out (dict) – Dictionary of values defining the mean/std of the bias + base +data and the scalar + adder factors to correct the biased data +like: bias_data * scalar + adder. Each value is of shape +(lat, lon, time).

+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.bias.mixins.ZeroRateMixin.html b/_autosummary/sup3r.bias.mixins.ZeroRateMixin.html new file mode 100644 index 000000000..8d9c2d64c --- /dev/null +++ b/_autosummary/sup3r.bias.mixins.ZeroRateMixin.html @@ -0,0 +1,829 @@ + + + + + + + + + + + sup3r.bias.mixins.ZeroRateMixin — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.bias.mixins.ZeroRateMixin

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.bias.mixins.ZeroRateMixin#

+
+
+class ZeroRateMixin[source]#
+

Bases: object

+

Estimate zero rate

+

[Pierce2015].

+

References

+
+
+[Pierce2015] +(1,2,3) +

Pierce, D. W., Cayan, D. R., Maurer, E. P., Abatzoglou, J. +T., & Hegewisch, K. C. (2015). Improved bias correction techniques for +hydrological simulations of climate change. Journal of Hydrometeorology, +16(6), 2421-2442.

+
+
+

Methods

+
+ + + + + +

zero_precipitation_rate(arr[, threshold])

Rate of (nearly) zero precipitation days

+
+
+
+static zero_precipitation_rate(arr: ndarray, threshold: float = 0.0)[source]#
+

Rate of (nearly) zero precipitation days

+

Estimate the rate of values less than a given threshold. In concept +the threshold would be zero (thus the name zero precipitation rate) +but it is often used a small threshold to truncate negligible values. +For instance, [Pierce2015] uses 0.01 mm/day for PresRat correction.

+
+
Parameters:
+
    +
  • arr (np.ndarray) – An array of values to be analyzed. Usually precipitation but it +could be applied to other quantities.

  • +
  • threshold (float) – Minimum value accepted. Less than that is assumed to be zero.

  • +
+
+
Returns:
+

rate (float) – Rate of days with negligible precipitation (see Z_gf in +[Pierce2015]).

+
+
+

Notes

+

The NaN are ignored for the rate estimate. Therefore, a large +number of NaN might compromise the confidence of the estimator.

+

If the input values are all non-finite, it returns NaN.

+

Examples

+
>>> ZeroRateMixin().zero_precipitation_rate([2, 3, 4], 1)
+0.0
+
+
+
>>> ZeroRateMixin().zero_precipitation_rate([0, 1, 2, 3], 1)
+0.25
+
+
+
>>> ZeroRateMixin().zero_precipitation_rate([0, 1, 2, 3, np.nan], 1)
+0.25
+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.bias.mixins.html b/_autosummary/sup3r.bias.mixins.html new file mode 100644 index 000000000..c7cccce30 --- /dev/null +++ b/_autosummary/sup3r.bias.mixins.html @@ -0,0 +1,742 @@ + + + + + + + + + + + sup3r.bias.mixins — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.bias.mixins

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.bias.mixins#

+

Mixin classes to support bias calculation

+

Classes

+
+ + + + + + + + +

FillAndSmoothMixin()

Fill and extend parameters for calibration on missing positions

ZeroRateMixin()

Estimate zero rate

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.bias.presrat.PresRat.html b/_autosummary/sup3r.bias.presrat.PresRat.html new file mode 100644 index 000000000..dde6c86f4 --- /dev/null +++ b/_autosummary/sup3r.bias.presrat.PresRat.html @@ -0,0 +1,1508 @@ + + + + + + + + + + + sup3r.bias.presrat.PresRat — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.bias.presrat.PresRat#

+
+
+class PresRat(base_fps, bias_fps, bias_fut_fps, base_dset, bias_feature, distance_upper_bound=None, target=None, shape=None, base_handler='Resource', bias_handler='DataHandlerNCforCC', base_handler_kwargs=None, bias_handler_kwargs=None, bias_fut_handler_kwargs=None, decimals=None, match_zero_rate=False, n_quantiles=101, dist='empirical', relative=True, sampling='linear', log_base=10, n_time_steps=24, window_size=120, pre_load=True)[source]#
+

Bases: ZeroRateMixin, QuantileDeltaMappingCorrection

+

PresRat bias correction method (precipitation)

+

The PresRat correction [Pierce2015] is defined as the combination of +three steps: +* Use the model-predicted change ratio (with the CDFs); +* The treatment of zero-precipitation days (with the fraction of dry days); +* The final correction factor (K) to preserve the mean (ratio between both +estimated means);

+

To keep consistency with the full sup3r pipeline, PresRat was implemented +as follows:

+
    +
  1. Define zero rate from observations (oh)

  2. +
+

Using the historical observations, estimate the zero rate precipitation +for each gridpoint. It is expected a long time series here, such as +decadal or longer. A threshold larger than zero is an option here.

+

The result is a 2D (space) zero_rate (non-dimensional).

+
    +
  1. Find the threshold for each gridpoint (mh)

  2. +
+

Using the zero rate from the previous step, identify the magnitude +threshold for each gridpoint that satisfies that dry days rate.

+

Note that Pierce (2015) impose tau >= 0.01 mm/day for precipitation.

+

The result is a 2D (space) threshold tau with the same dimensions +of the data been corrected. For instance, it could be mm/day for +precipitation.

+
    +
  1. Define Z_fg using tau (mf)

  2. +
+

The tau that was defined with the modeled historical, is now +used as a threshold on modeled future before any correction to define +the equivalent zero rate in the future.

+

The result is a 2D (space) rate (non-dimensional)

+
    +
  1. Estimate tau_fut using Z_fg

  2. +
+

Since sup3r process data in smaller chunks, it wouldn’t be possible to +apply the rate Z_fg directly. To address that, all modeled future +data is corrected with QDM, and applying Z_fg it is defined the +tau_fut.

+

References

+
+
+[Pierce2015] +(1,2,3) +

Pierce, D. W., Cayan, D. R., Maurer, E. P., Abatzoglou, J. +T., & Hegewisch, K. C. (2015). Improved bias correction techniques for +hydrological simulations of climate change. Journal of Hydrometeorology, +16(6), 2421-2442.

+
+
+
+
Parameters:
+
    +
  • base_fps (list | str) – One or more baseline .h5 filepaths representing non-biased data to +use +to correct the biased dataset (observed historical in Cannon et. +al. (2015)). This is typically several years of WTK or NSRDB files.

  • +
  • bias_fps (list | str) – One or more biased .nc or .h5 filepaths representing the biased +data +to be compared with the baseline data (modeled historical in Cannon +et. al. (2015)). This is typically several years of GCM .nc files.

  • +
  • bias_fut_fps (list | str) – Consistent data to bias_fps but for a different time period +(modeled +future in Cannon et. al. (2015)). This is the dataset that would be +corrected, while bias_fsp is used to provide a transformation map +with the baseline data.

  • +
  • base_dset (str) – A single dataset from the base_fps to retrieve. In the case of wind +components, this can be U_100m or V_100m which will retrieve +windspeed and winddirection and derive the U/V component.

  • +
  • bias_feature (str) – This is the biased feature from bias_fps to retrieve. This should +be a single feature name corresponding to base_dset

  • +
  • distance_upper_bound (float) – Upper bound on the nearest neighbor distance in decimal degrees. +This should be the approximate resolution of the low-resolution +bias data. None (default) will calculate this based on the median +distance between points in bias_fps

  • +
  • target (tuple) – (lat, lon) lower left corner of raster to retrieve from bias_fps. +If None then the lower left corner of the full domain will be used.

  • +
  • shape (tuple) – (rows, cols) grid size to retrieve from bias_fps. If None then the +full domain shape will be used.

  • +
  • base_handler (str) – Name of rex resource handler or sup3r.preprocessing.data_handlers +class to be retrieved from the rex/sup3r library. If a +sup3r.preprocessing.data_handlers class is used, all data will be +loaded in this class’ initialization and the subsequent bias +calculation will be done in serial

  • +
  • bias_handler (str) – Name of the bias data handler class to be retrieved from the +sup3r.preprocessing.data_handlers library.

  • +
  • base_handler_kwargs (dict | None) – Optional kwargs to send to the initialization of the base_handler +class

  • +
  • bias_handler_kwargs (dict | None) – Optional kwargs to send to the initialization of the bias_handler +class with the bias_fps

  • +
  • bias_fut_handler_kwargs (dict | None) – Optional kwargs to send to the initialization of the +bias_handler class with the bias_fut_fps

  • +
  • decimals (int | None) – Option to round bias and base data to this number of +decimals, this gets passed to np.around(). If decimals +is negative, it specifies the number of positions to +the left of the decimal point.

  • +
  • match_zero_rate (bool) – Option to fix the frequency of zero values in the biased data. The +lowest percentile of values in the biased data will be set to zero +to match the percentile of zeros in the base data. If +SkillAssessment is being run and this is True, the distributions +will not be mean-centered. This helps resolve the issue where +global climate models produce too many days with small +precipitation totals e.g., the “drizzle problem” [Polade2014].

  • +
  • dist (str, default=”empirical”,) – Define the type of distribution, which can be “empirical” or any +parametric distribution defined in “scipy”.

  • +
  • n_quantiles (int, default=101) – Defines the number of quantiles (between 0 and 1) for an empirical +distribution.

  • +
  • sampling (str, default=”linear”,) – Defines how the quantiles are sampled. For instance, ‘linear’ will +result in a linearly spaced quantiles. Other options are: ‘log’ +and ‘invlog’.

  • +
  • log_base (int or float, default=10) – Log base value if sampling is “log” or “invlog”.

  • +
  • n_time_steps (int) – Number of times to calculate QDM parameters equally distributed +along a year. For instance, n_time_steps=1 results in a single +set of parameters while n_time_steps=12 is approximately every +month.

  • +
  • window_size (int) – Total time window period in days to be considered for each time QDM +is calculated. For instance, window_size=30 with +n_time_steps=12 would result in approximately monthly estimates.

  • +
  • pre_load (bool) – Flag to preload all data needed for bias correction. This is +currently recommended to improve performance with the new sup3r +data handler access patterns

  • +
+
+
+
+

See also

+
+
sup3r.bias.bias_transforms.local_qdm_bc

Bias correction using QDM.

+
+
sup3r.preprocessing.data_handlers.DataHandler

Bias correction using QDM directly from a derived handler.

+
+
rex.utilities.bc_utils.QuantileDeltaMapping

Quantile Delta Mapping method and support functions. Since rex.utilities.bc_utils is used here, the arguments dist, n_quantiles, sampling, and log_base must be consitent with that package/module.

+
+
+
+

Notes

+

One way of using this class is by saving the distributions definitions +obtained here with the method write_outputs() and then use that +file with local_qdm_bc() or through +a derived DataHandler. +ATTENTION, be careful handling that file of parameters. There is +no checking process and one could missuse the correction estimated for +the wrong dataset.

+

References

+
+
+[Cannon2015] +

Cannon, A. J., Sobie, S. R., & Murdock, T. Q. (2015). +Bias correction of GCM precipitation by quantile mapping: how well +do methods preserve changes in quantiles and extremes?. Journal of +Climate, 28(17), 6938-6959.

+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

calc_k_factor(base_data, bias_data, ...)

Calculate the K factor at a single spatial location that will preserve the original model-predicted mean change in precipitation

calc_tau_fut(base_data, bias_data, ...[, ...])

Calculate a precipitation threshold (tau) that preserves the model-predicted changes in fraction of dry days at a single spatial location.

compare_dists(base_data, bias_data[, adder, ...])

Compare two distributions using the two-sample Kolmogorov-Smirnov.

fill_and_smooth(out[, fill_extend, ...])

For a given set of parameters, fill and extend missing positions

get_base_data(base_fps, base_dset, base_gid, ...)

Get data from the baseline data source, possibly for many high-res base gids corresponding to a single coarse low-res bias gid.

get_base_gid(bias_gid)

Get one or more base gid(s) corresponding to a bias gid.

get_bias_data(bias_gid[, bias_dh])

Get data from the biased data source for a single gid

get_bias_gid(coord)

Get the bias gid from a coordinate.

get_data_pair(coord[, daily_reduction])

Get base and bias data observations based on a single bias gid.

get_node_cmd(config)

Get a CLI call to call cls.run() on a single node based on an input config.

get_qdm_params(bias_data, bias_fut_data, ...)

Get quantiles' cut point for given datasets

pre_load()

Preload all data needed for bias correction.

run([fp_out, max_workers, daily_reduction, ...])

Estimate the required information for PresRat correction

window_mask(doy, d0, window_size)

An index of elements within a given time window

write_outputs(fp_out[, out, extra_attrs])

Write outputs to an .h5 file.

zero_precipitation_rate(arr[, threshold])

Rate of (nearly) zero precipitation days

+
+

Attributes

+
+ + + + + + + + +

distance_upper_bound

Maximum distance (float) to map high-resolution data from exo_source to the low-resolution file_paths input.

meta

Get a meta data dictionary on how these bias factors were calculated

+
+
+
+classmethod calc_tau_fut(base_data, bias_data, bias_fut_data, corrected_fut_data, zero_rate_threshold=1.157e-07)[source]#
+

Calculate a precipitation threshold (tau) that preserves the +model-predicted changes in fraction of dry days at a single spatial +location.

+
+
Parameters:
+
    +
  • base_data (np.ndarray) – A 1D array of (usually) daily precipitation observations from a +historical dataset like Daymet

  • +
  • bias_data (np.ndarray) – A 1D array of (usually) daily precipitation historical climate +simulation outputs from a GCM dataset from CMIP6

  • +
  • bias_future_data (np.ndarray) – A 1D array of (usually) daily precipitation future (e.g., ssp245) +climate simulation outputs from a GCM dataset from CMIP6

  • +
  • corrected_fut_data (np.ndarray) – Bias corrected bias_future_data usually with relative QDM

  • +
  • zero_rate_threshold (float, default=1.157e-7) – Threshold value used to determine the zero rate in the observed +historical dataset. For instance, 0.01 means that anything less +than that will be considered negligible, hence equal to zero. Dai +2006 defined this as 1mm/day. Pierce 2015 used 0.01mm/day. We +recommend 0.01mm/day (1.157e-7 kg/m2/s).

  • +
+
+
Returns:
+

    +
  • tau_fut (float) – Precipitation threshold that will preserve the model predicted +changes in fraction of dry days. Precipitation less than this value +in the modeled future data can be set to zero.

  • +
  • obs_zero_rate (float) – Rate of dry days in the observed historical data.

  • +
+

+
+
+
+ +
+
+classmethod calc_k_factor(base_data, bias_data, bias_fut_data, corrected_fut_data, base_ti, bias_ti, bias_fut_ti, window_center, window_size, n_time_steps, zero_rate_threshold)[source]#
+

Calculate the K factor at a single spatial location that will +preserve the original model-predicted mean change in precipitation

+
+
Parameters:
+
    +
  • base_data (np.ndarray) – A 1D array of (usually) daily precipitation observations from a +historical dataset like Daymet

  • +
  • bias_data (np.ndarray) – A 1D array of (usually) daily precipitation historical climate +simulation outputs from a GCM dataset from CMIP6

  • +
  • bias_future_data (np.ndarray) – A 1D array of (usually) daily precipitation future (e.g., ssp245) +climate simulation outputs from a GCM dataset from CMIP6

  • +
  • corrected_fut_data (np.ndarray) – Bias corrected bias_future_data usually with relative QDM

  • +
  • base_ti (pd.DatetimeIndex) – Datetime index associated with bias_data and of the same length

  • +
  • bias_fut_ti (pd.DatetimeIndex) – Datetime index associated with bias_fut_data and of the same length

  • +
  • window_center (np.ndarray) – Sequence of days of the year equally spaced and shifted by half +window size, thus `ntimes`=12 results in approximately [15, 45, +…]. It includes the fraction of a day, thus 15.5 is equivalent +to January 15th, 12:00h. Shape is (N,)

  • +
  • window_size (int) – Total time window period in days to be considered for each time QDM +is calculated. For instance, window_size=30 with +n_time_steps=12 would result in approximately monthly estimates.

  • +
  • n_time_steps (int) – Number of times to calculate QDM parameters equally distributed +along a year. For instance, n_time_steps=1 results in a single +set of parameters while n_time_steps=12 is approximately every +month.

  • +
  • zero_rate_threshold (float, default=1.157e-7) – Threshold value used to determine the zero rate in the observed +historical dataset. For instance, 0.01 means that anything less +than that will be considered negligible, hence equal to zero. Dai +2006 defined this as 1mm/day. Pierce 2015 used 0.01mm/day. We +recommend 0.01mm/day (1.157e-7 kg/m2/s).

  • +
+
+
Returns:
+

k (np.ndarray) – K factor from the Pierce 2015 paper with shape (n_time_steps,) +for a single spatial location.

+
+
+
+ +
+
+run(fp_out=None, max_workers=None, daily_reduction='avg', fill_extend=True, smooth_extend=0, smooth_interior=0, zero_rate_threshold=1.157e-07)[source]#
+

Estimate the required information for PresRat correction

+
+
Parameters:
+
    +
  • fp_out (str | None) – Optional .h5 output file to write scalar and adder arrays.

  • +
  • max_workers (int, optional) – Number of workers to run in parallel. 1 is serial and None is all +available.

  • +
  • daily_reduction (None | str) – Option to do a reduction of the hourly+ source base data to daily +data. Can be None (no reduction, keep source time frequency), “avg” +(daily average), “max” (daily max), “min” (daily min), +“sum” (daily sum/total)

  • +
  • fill_extend (bool) – Whether to fill data extending beyond the base meta data with +nearest neighbor values.

  • +
  • smooth_extend (float) – Option to smooth the scalar/adder data outside of the spatial +domain set by the threshold input. This alleviates the weird seams +far from the domain of interest. This value is the standard +deviation for the gaussian_filter kernel

  • +
  • smooth_interior (float) – Value to use to smooth the scalar/adder data inside of the spatial +domain set by the threshold input. This can reduce the effect of +extreme values within aggregations over large number of pixels. +This value is the standard deviation for the gaussian_filter +kernel.

  • +
  • zero_rate_threshold (float, default=1.157e-7) – Threshold value used to determine the zero rate in the observed +historical dataset. For instance, 0.01 means that anything less +than that will be considered negligible, hence equal to zero. Dai +2006 defined this as 1mm/day. Pierce 2015 used 0.01mm/day. We +recommend 0.01mm/day (1.157e-7 kg/m2/s).

  • +
+
+
Returns:
+

out (dict) – Dictionary with parameters defining the statistical distributions +for each of the three given datasets. Each value has dimensions +(lat, lon, n-parameters).

+
+
+
+ +
+
+write_outputs(fp_out: str, out: dict | None = None, extra_attrs: dict | None = None)[source]#
+

Write outputs to an .h5 file.

+
+
Parameters:
+
    +
  • fp_out (str | None) – An HDF5 filename to write the estimated statistical distributions.

  • +
  • out (dict, optional) – A dictionary with the three statistical distribution parameters. +If not given, it uses out.

  • +
  • extra_attrs (dict, optional) – Extra attributes to be exported together with the dataset.

  • +
+
+
+

Examples

+
>>> mycalc = PresRat(...)
+>>> mycalc.write_outputs(fp_out="myfile.h5", out=mydictdataset,
+...   extra_attrs={'zero_rate_threshold': 0.01})
+
+
+
+ +
+
+static compare_dists(base_data, bias_data, adder=0, scalar=1)#
+

Compare two distributions using the two-sample Kolmogorov-Smirnov. +When the output is minimized, the two distributions are similar.

+
+
Parameters:
+
    +
  • base_data (np.ndarray) – 1D array of base data observations.

  • +
  • bias_data (np.ndarray) – 1D array of biased data observations.

  • +
  • adder (float) – Factor to adjust the biased data before comparing distributions: +bias_data * scalar + adder

  • +
  • scalar (float) – Factor to adjust the biased data before comparing distributions: +bias_data * scalar + adder

  • +
+
+
Returns:
+

out (float) – KS test statistic

+
+
+
+ +
+
+property distance_upper_bound#
+

Maximum distance (float) to map high-resolution data from exo_source +to the low-resolution file_paths input.

+
+ +
+
+fill_and_smooth(out, fill_extend=True, smooth_extend=0, smooth_interior=0)#
+

For a given set of parameters, fill and extend missing positions

+

Fill data extending beyond the base meta data extent by doing a +nearest neighbor gap fill. Smooth interior and extended region with +given smoothing values. +Interior smoothing can reduce the affect of extreme values +within aggregations over large number of pixels. +The interior is assumed to be defined by the region without nan values. +The extended region is assumed to be the region with nan values.

+
+
Parameters:
+
    +
  • out (dict) – Dictionary of values defining the mean/std of the bias + base +data and the scalar + adder factors to correct the biased data +like: bias_data * scalar + adder. Each value is of shape +(lat, lon, time).

  • +
  • fill_extend (bool) – Whether to fill data extending beyond the base meta data with +nearest neighbor values.

  • +
  • smooth_extend (float) – Option to smooth the scalar/adder data outside of the spatial +domain set by the threshold input. This alleviates the weird seams +far from the domain of interest. This value is the standard +deviation for the gaussian_filter kernel

  • +
  • smooth_interior (float) – Value to use to smooth the scalar/adder data inside of the spatial +domain set by the threshold input. This can reduce the effect of +extreme values within aggregations over large number of pixels. +This value is the standard deviation for the gaussian_filter +kernel.

  • +
+
+
Returns:
+

out (dict) – Dictionary of values defining the mean/std of the bias + base +data and the scalar + adder factors to correct the biased data +like: bias_data * scalar + adder. Each value is of shape +(lat, lon, time).

+
+
+
+ +
+
+classmethod get_base_data(base_fps, base_dset, base_gid, base_handler, base_handler_kwargs=None, daily_reduction='avg', decimals=None, base_dh_inst=None)#
+

Get data from the baseline data source, possibly for many high-res +base gids corresponding to a single coarse low-res bias gid.

+
+
Parameters:
+
    +
  • base_fps (list | str) – One or more baseline .h5 filepaths representing non-biased data to +use to correct the biased dataset. This is typically several years +of WTK or NSRDB files.

  • +
  • base_dset (str) – A single dataset from the base_fps to retrieve.

  • +
  • base_gid (int | np.ndarray) – One or more spatial gids to retrieve from base_fps. The data will +be spatially averaged across all of these sites.

  • +
  • base_handler (rex.Resource) – A rex data handler similar to rex.Resource or sup3r.DataHandler +classes (if using the latter, must also input base_dh_inst)

  • +
  • base_handler_kwargs (dict | None) – Optional kwargs to send to the initialization of the base_handler +class

  • +
  • daily_reduction (None | str) – Option to do a reduction of the hourly+ source base data to daily +data. Can be None (no reduction, keep source time frequency), “avg” +(daily average), “max” (daily max), “min” (daily min), +“sum” (daily sum/total)

  • +
  • decimals (int | None) – Option to round bias and base data to this number of +decimals, this gets passed to np.around(). If decimals +is negative, it specifies the number of positions to +the left of the decimal point.

  • +
  • base_dh_inst (sup3r.DataHandler) – Instantiated DataHandler class that has already loaded the base +data (required if base files are .nc and are not being opened by a +rex Resource handler).

  • +
+
+
Returns:
+

    +
  • out_data (np.ndarray) – 1D array of base data spatially averaged across the base_gid input +and possibly daily-averaged or min/max’d as well.

  • +
  • out_ti (pd.DatetimeIndex) – DatetimeIndex object of datetimes corresponding to the +output data.

  • +
+

+
+
+
+ +
+
+get_base_gid(bias_gid)#
+

Get one or more base gid(s) corresponding to a bias gid.

+
+
Parameters:
+

bias_gid (int) – gid of the data to retrieve in the bias data source raster data. +The gids for this data source are the enumerated indices of the +flattened coordinate array.

+
+
Returns:
+

    +
  • dist (np.ndarray) – Array of nearest neighbor distances with length equal to the number +of high-resolution baseline gids that map to the low resolution +bias gid pixel.

  • +
  • base_gid (np.ndarray) – Array of base gids that are the nearest neighbors of bias_gid with +length equal to the number of high-resolution baseline gids that +map to the low resolution bias gid pixel.

  • +
+

+
+
+
+ +
+
+get_bias_data(bias_gid, bias_dh=None)#
+

Get data from the biased data source for a single gid

+
+
Parameters:
+
    +
  • bias_gid (int) – gid of the data to retrieve in the bias data source raster data. +The gids for this data source are the enumerated indices of the +flattened coordinate array.

  • +
  • bias_dh (DataHandler, default=self.bias_dh) – Any DataHandler from sup3r.preprocessing. This optional +argument allows an alternative handler other than the usual +bias_dh. For instance, the derived +QuantileDeltaMappingCorrection uses it to access the +reference biased dataset as well as the target biased dataset.

  • +
+
+
Returns:
+

bias_data (np.ndarray) – 1D array of temporal data at the requested gid.

+
+
+
+ +
+
+get_bias_gid(coord)#
+

Get the bias gid from a coordinate.

+
+
Parameters:
+

coord (tuple) – (lat, lon) to get data for.

+
+
Returns:
+

    +
  • bias_gid (int) – gid of the data to retrieve in the bias data source raster data. +The gids for this data source are the enumerated indices of the +flattened coordinate array.

  • +
  • d (float) – Distance in decimal degrees from coord to bias gid

  • +
+

+
+
+
+ +
+
+get_data_pair(coord, daily_reduction='avg')#
+

Get base and bias data observations based on a single bias gid.

+
+
Parameters:
+
    +
  • coord (tuple) – (lat, lon) to get data for.

  • +
  • daily_reduction (None | str) – Option to do a reduction of the hourly+ source base data to daily +data. Can be None (no reduction, keep source time frequency), “avg” +(daily average), “max” (daily max), “min” (daily min), +“sum” (daily sum/total)

  • +
+
+
Returns:
+

    +
  • base_data (np.ndarray) – 1D array of base data spatially averaged across the base_gid input +and possibly daily-averaged or min/max’d as well.

  • +
  • bias_data (np.ndarray) – 1D array of temporal data at the requested gid.

  • +
  • base_dist (np.ndarray) – Array of nearest neighbor distances from coord to the base data +sites with length equal to the number of high-resolution baseline +gids that map to the low resolution bias gid pixel.

  • +
  • bias_dist (Float) – Nearest neighbor distance from coord to the bias data site

  • +
+

+
+
+
+ +
+
+classmethod get_node_cmd(config)#
+

Get a CLI call to call cls.run() on a single node based on an input +config.

+
+
Parameters:
+

config (dict) – sup3r bias calc config with all necessary args and kwargs to +initialize the class and call run() on a single node.

+
+
+
+ +
+
+static get_qdm_params(bias_data, bias_fut_data, base_data, bias_feature, base_dset, sampling, n_samples, log_base)#
+

Get quantiles’ cut point for given datasets

+

Estimate the quantiles’ cut points for each of the three given +datasets. Lacking a good analytical approximation, such as one of +the parametric distributions, those quantiles can be used to +approximate the statistical distribution of those datasets.

+
+
Parameters:
+
    +
  • bias_data (np.ndarray) – 1D array of biased data observations.

  • +
  • bias_fut_data (np.ndarray) – 1D array of biased data observations.

  • +
  • base_data (np.ndarray) – 1D array of base data observations.

  • +
  • bias_feature (str) – This is the biased feature from bias_fps to retrieve. This should +be a single feature name corresponding to base_dset.

  • +
  • base_dset (str) – A single dataset from the base_fps to retrieve. In the case of wind +components, this can be U_100m or V_100m which will retrieve +windspeed and winddirection and derive the U/V component.

  • +
  • sampling (str) – Defines how the quantiles are sampled. For instance, ‘linear’ will +result in a linearly spaced quantiles. Other options are: ‘log’ +and ‘invlog’.

  • +
  • n_samples (int) – Number of points to sample between 0 and 1, i.e. number of +quantiles.

  • +
  • log_base (int | float) – Log base value.

  • +
+
+
Returns:
+

out (dict) – Dictionary of the quantiles’ cut points. Note that to make sense +of those cut point values, one need to know the given arguments +such as log_base. For instance, the sequence [-1, 0, 2] are, +if sampling was linear, the minimum, median, and maximum values +respectively. The expected keys are “bias_{bias_feature}_params”, +“bias_fut_{bias_feature}_params”, and “base_{base_dset}_params”.

+
+
+
+

See also

+
+
rex.utilities.bc_utils

Sampling scales, such as sample_q_linear()

+
+
+
+
+ +
+
+property meta#
+

Get a meta data dictionary on how these bias factors were +calculated

+
+ +
+
+pre_load()#
+

Preload all data needed for bias correction. This is currently +recommended to improve performance with the new sup3r data handler +access patterns

+
+ +
+
+static window_mask(doy, d0, window_size)#
+

An index of elements within a given time window

+

Create an index of days of the year within the target time window. It +only considers the day of the year (doy), hence, it is limited to time +scales smaller than annual.

+
+
Parameters:
+
    +
  • doy (np.ndarray) – An unordered array of days of year, i.e. January 1st is 1.

  • +
  • d0 (int) – Center point of the target window [day of year].

  • +
  • window_size (float) – Total size of the target window, i.e. the window covers half this +value on each side of d0. Note that it has the same units of doy, +thus it is equal to the number of points only if doy is daily.

  • +
+
+
Returns:
+

np.array – An boolean array with the same shape of the given doy, where +True means that position is within the target window.

+
+
+

Notes

+

Leap years have the day 366 included in the output index, but a +precise shift is not calculated, resulting in a negligible error +in large datasets.

+
+ +
+
+static zero_precipitation_rate(arr: ndarray, threshold: float = 0.0)#
+

Rate of (nearly) zero precipitation days

+

Estimate the rate of values less than a given threshold. In concept +the threshold would be zero (thus the name zero precipitation rate) +but it is often used a small threshold to truncate negligible values. +For instance, [Pierce2015] uses 0.01 mm/day for PresRat correction.

+
+
Parameters:
+
    +
  • arr (np.ndarray) – An array of values to be analyzed. Usually precipitation but it +could be applied to other quantities.

  • +
  • threshold (float) – Minimum value accepted. Less than that is assumed to be zero.

  • +
+
+
Returns:
+

rate (float) – Rate of days with negligible precipitation (see Z_gf in +[Pierce2015]).

+
+
+

Notes

+

The NaN are ignored for the rate estimate. Therefore, a large +number of NaN might compromise the confidence of the estimator.

+

If the input values are all non-finite, it returns NaN.

+

Examples

+
>>> ZeroRateMixin().zero_precipitation_rate([2, 3, 4], 1)
+0.0
+
+
+
>>> ZeroRateMixin().zero_precipitation_rate([0, 1, 2, 3], 1)
+0.25
+
+
+
>>> ZeroRateMixin().zero_precipitation_rate([0, 1, 2, 3, np.nan], 1)
+0.25
+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.bias.presrat.html b/_autosummary/sup3r.bias.presrat.html new file mode 100644 index 000000000..0447661ac --- /dev/null +++ b/_autosummary/sup3r.bias.presrat.html @@ -0,0 +1,741 @@ + + + + + + + + + + + sup3r.bias.presrat — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.bias.presrat

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.bias.presrat#

+

QDM related methods to correct and bias and trend

+

Procedures to apply Quantile Delta Method correction and derived methods such +as PresRat.

+

Classes

+
+ + + + + +

PresRat(base_fps, bias_fps, bias_fut_fps, ...)

PresRat bias correction method (precipitation)

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.bias.qdm.QuantileDeltaMappingCorrection.html b/_autosummary/sup3r.bias.qdm.QuantileDeltaMappingCorrection.html new file mode 100644 index 000000000..0599e0197 --- /dev/null +++ b/_autosummary/sup3r.bias.qdm.QuantileDeltaMappingCorrection.html @@ -0,0 +1,1317 @@ + + + + + + + + + + + sup3r.bias.qdm.QuantileDeltaMappingCorrection — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.bias.qdm.QuantileDeltaMappingCorrection#

+
+
+class QuantileDeltaMappingCorrection(base_fps, bias_fps, bias_fut_fps, base_dset, bias_feature, distance_upper_bound=None, target=None, shape=None, base_handler='Resource', bias_handler='DataHandlerNCforCC', base_handler_kwargs=None, bias_handler_kwargs=None, bias_fut_handler_kwargs=None, decimals=None, match_zero_rate=False, n_quantiles=101, dist='empirical', relative=True, sampling='linear', log_base=10, n_time_steps=24, window_size=120, pre_load=True)[source]#
+

Bases: FillAndSmoothMixin, DataRetrievalBase

+

Estimate probability distributions required by Quantile Delta Mapping

+

The main purpose of this class is to estimate the probability +distributions required by Quantile Delta Mapping (QDM) ([Cannon2015]) +technique. Therefore, the name ‘Correction’ can be misleading since it is +not the correction per se, but that was used to keep consistency within +this module.

+

The QDM technique corrects bias and trend by comparing the data +distributions of three datasets: a historical reference, a biased +reference, and a biased target to correct (in Cannon et. al. (2015) +called: historical observed, historical modeled, and future modeled +respectively). Those three probability distributions provided here +can be, for instance, used by +local_qdm_bc() to actually correct +a dataset.

+
+
Parameters:
+
    +
  • base_fps (list | str) – One or more baseline .h5 filepaths representing non-biased data to +use +to correct the biased dataset (observed historical in Cannon et. +al. (2015)). This is typically several years of WTK or NSRDB files.

  • +
  • bias_fps (list | str) – One or more biased .nc or .h5 filepaths representing the biased +data +to be compared with the baseline data (modeled historical in Cannon +et. al. (2015)). This is typically several years of GCM .nc files.

  • +
  • bias_fut_fps (list | str) – Consistent data to bias_fps but for a different time period +(modeled +future in Cannon et. al. (2015)). This is the dataset that would be +corrected, while bias_fsp is used to provide a transformation map +with the baseline data.

  • +
  • base_dset (str) – A single dataset from the base_fps to retrieve. In the case of wind +components, this can be U_100m or V_100m which will retrieve +windspeed and winddirection and derive the U/V component.

  • +
  • bias_feature (str) – This is the biased feature from bias_fps to retrieve. This should +be a single feature name corresponding to base_dset

  • +
  • distance_upper_bound (float) – Upper bound on the nearest neighbor distance in decimal degrees. +This should be the approximate resolution of the low-resolution +bias data. None (default) will calculate this based on the median +distance between points in bias_fps

  • +
  • target (tuple) – (lat, lon) lower left corner of raster to retrieve from bias_fps. +If None then the lower left corner of the full domain will be used.

  • +
  • shape (tuple) – (rows, cols) grid size to retrieve from bias_fps. If None then the +full domain shape will be used.

  • +
  • base_handler (str) – Name of rex resource handler or sup3r.preprocessing.data_handlers +class to be retrieved from the rex/sup3r library. If a +sup3r.preprocessing.data_handlers class is used, all data will be +loaded in this class’ initialization and the subsequent bias +calculation will be done in serial

  • +
  • bias_handler (str) – Name of the bias data handler class to be retrieved from the +sup3r.preprocessing.data_handlers library.

  • +
  • base_handler_kwargs (dict | None) – Optional kwargs to send to the initialization of the base_handler +class

  • +
  • bias_handler_kwargs (dict | None) – Optional kwargs to send to the initialization of the bias_handler +class with the bias_fps

  • +
  • bias_fut_handler_kwargs (dict | None) – Optional kwargs to send to the initialization of the +bias_handler class with the bias_fut_fps

  • +
  • decimals (int | None) – Option to round bias and base data to this number of +decimals, this gets passed to np.around(). If decimals +is negative, it specifies the number of positions to +the left of the decimal point.

  • +
  • match_zero_rate (bool) – Option to fix the frequency of zero values in the biased data. The +lowest percentile of values in the biased data will be set to zero +to match the percentile of zeros in the base data. If +SkillAssessment is being run and this is True, the distributions +will not be mean-centered. This helps resolve the issue where +global climate models produce too many days with small +precipitation totals e.g., the “drizzle problem” [Polade2014].

  • +
  • dist (str, default=”empirical”,) – Define the type of distribution, which can be “empirical” or any +parametric distribution defined in “scipy”.

  • +
  • n_quantiles (int, default=101) – Defines the number of quantiles (between 0 and 1) for an empirical +distribution.

  • +
  • sampling (str, default=”linear”,) – Defines how the quantiles are sampled. For instance, ‘linear’ will +result in a linearly spaced quantiles. Other options are: ‘log’ +and ‘invlog’.

  • +
  • log_base (int or float, default=10) – Log base value if sampling is “log” or “invlog”.

  • +
  • n_time_steps (int) – Number of times to calculate QDM parameters equally distributed +along a year. For instance, n_time_steps=1 results in a single +set of parameters while n_time_steps=12 is approximately every +month.

  • +
  • window_size (int) – Total time window period in days to be considered for each time QDM +is calculated. For instance, window_size=30 with +n_time_steps=12 would result in approximately monthly estimates.

  • +
  • pre_load (bool) – Flag to preload all data needed for bias correction. This is +currently recommended to improve performance with the new sup3r +data handler access patterns

  • +
+
+
+
+

See also

+
+
sup3r.bias.bias_transforms.local_qdm_bc

Bias correction using QDM.

+
+
sup3r.preprocessing.data_handlers.DataHandler

Bias correction using QDM directly from a derived handler.

+
+
rex.utilities.bc_utils.QuantileDeltaMapping

Quantile Delta Mapping method and support functions. Since rex.utilities.bc_utils is used here, the arguments dist, n_quantiles, sampling, and log_base must be consitent with that package/module.

+
+
+
+

Notes

+

One way of using this class is by saving the distributions definitions +obtained here with the method write_outputs() and then use that +file with local_qdm_bc() or through +a derived DataHandler. +ATTENTION, be careful handling that file of parameters. There is +no checking process and one could missuse the correction estimated for +the wrong dataset.

+

References

+
+
+[Cannon2015] +

Cannon, A. J., Sobie, S. R., & Murdock, T. Q. (2015). +Bias correction of GCM precipitation by quantile mapping: how well +do methods preserve changes in quantiles and extremes?. Journal of +Climate, 28(17), 6938-6959.

+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

compare_dists(base_data, bias_data[, adder, ...])

Compare two distributions using the two-sample Kolmogorov-Smirnov.

fill_and_smooth(out[, fill_extend, ...])

For a given set of parameters, fill and extend missing positions

get_base_data(base_fps, base_dset, base_gid, ...)

Get data from the baseline data source, possibly for many high-res base gids corresponding to a single coarse low-res bias gid.

get_base_gid(bias_gid)

Get one or more base gid(s) corresponding to a bias gid.

get_bias_data(bias_gid[, bias_dh])

Get data from the biased data source for a single gid

get_bias_gid(coord)

Get the bias gid from a coordinate.

get_data_pair(coord[, daily_reduction])

Get base and bias data observations based on a single bias gid.

get_node_cmd(config)

Get a CLI call to call cls.run() on a single node based on an input config.

get_qdm_params(bias_data, bias_fut_data, ...)

Get quantiles' cut point for given datasets

pre_load()

Preload all data needed for bias correction.

run([fp_out, max_workers, daily_reduction, ...])

Estimate the statistical distributions for each location

window_mask(doy, d0, window_size)

An index of elements within a given time window

write_outputs(fp_out[, out])

Write outputs to an .h5 file.

+
+

Attributes

+
+ + + + + + + + +

distance_upper_bound

Maximum distance (float) to map high-resolution data from exo_source to the low-resolution file_paths input.

meta

Get a meta data dictionary on how these bias factors were calculated

+
+
+
+pre_load()[source]#
+

Preload all data needed for bias correction. This is currently +recommended to improve performance with the new sup3r data handler +access patterns

+
+ +
+
+static get_qdm_params(bias_data, bias_fut_data, base_data, bias_feature, base_dset, sampling, n_samples, log_base)[source]#
+

Get quantiles’ cut point for given datasets

+

Estimate the quantiles’ cut points for each of the three given +datasets. Lacking a good analytical approximation, such as one of +the parametric distributions, those quantiles can be used to +approximate the statistical distribution of those datasets.

+
+
Parameters:
+
    +
  • bias_data (np.ndarray) – 1D array of biased data observations.

  • +
  • bias_fut_data (np.ndarray) – 1D array of biased data observations.

  • +
  • base_data (np.ndarray) – 1D array of base data observations.

  • +
  • bias_feature (str) – This is the biased feature from bias_fps to retrieve. This should +be a single feature name corresponding to base_dset.

  • +
  • base_dset (str) – A single dataset from the base_fps to retrieve. In the case of wind +components, this can be U_100m or V_100m which will retrieve +windspeed and winddirection and derive the U/V component.

  • +
  • sampling (str) – Defines how the quantiles are sampled. For instance, ‘linear’ will +result in a linearly spaced quantiles. Other options are: ‘log’ +and ‘invlog’.

  • +
  • n_samples (int) – Number of points to sample between 0 and 1, i.e. number of +quantiles.

  • +
  • log_base (int | float) – Log base value.

  • +
+
+
Returns:
+

out (dict) – Dictionary of the quantiles’ cut points. Note that to make sense +of those cut point values, one need to know the given arguments +such as log_base. For instance, the sequence [-1, 0, 2] are, +if sampling was linear, the minimum, median, and maximum values +respectively. The expected keys are “bias_{bias_feature}_params”, +“bias_fut_{bias_feature}_params”, and “base_{base_dset}_params”.

+
+
+
+

See also

+
+
rex.utilities.bc_utils

Sampling scales, such as sample_q_linear()

+
+
+
+
+ +
+
+write_outputs(fp_out, out=None)[source]#
+

Write outputs to an .h5 file.

+
+
Parameters:
+
    +
  • fp_out (str | None) – An HDF5 filename to write the estimated statistical distributions.

  • +
  • out (dict, optional) – A dictionary with the three statistical distribution parameters. +If not given, it uses out.

  • +
+
+
+
+ +
+
+run(fp_out=None, max_workers=None, daily_reduction='avg', fill_extend=True, smooth_extend=0, smooth_interior=0)[source]#
+

Estimate the statistical distributions for each location

+
+
Parameters:
+
    +
  • fp_out (str | None) – Optional .h5 output file to write scalar and adder arrays.

  • +
  • max_workers (int, optional) – Number of workers to run in parallel. 1 is serial and None is all +available.

  • +
  • daily_reduction (None | str) – Option to do a reduction of the hourly+ source base data to daily +data. Can be None (no reduction, keep source time frequency), “avg” +(daily average), “max” (daily max), “min” (daily min), +“sum” (daily sum/total)

  • +
+
+
Returns:
+

out (dict) – Dictionary with parameters defining the statistical distributions +for each of the three given datasets. Each value has dimensions +(lat, lon, n-parameters).

+
+
+
+ +
+
+static compare_dists(base_data, bias_data, adder=0, scalar=1)#
+

Compare two distributions using the two-sample Kolmogorov-Smirnov. +When the output is minimized, the two distributions are similar.

+
+
Parameters:
+
    +
  • base_data (np.ndarray) – 1D array of base data observations.

  • +
  • bias_data (np.ndarray) – 1D array of biased data observations.

  • +
  • adder (float) – Factor to adjust the biased data before comparing distributions: +bias_data * scalar + adder

  • +
  • scalar (float) – Factor to adjust the biased data before comparing distributions: +bias_data * scalar + adder

  • +
+
+
Returns:
+

out (float) – KS test statistic

+
+
+
+ +
+
+property distance_upper_bound#
+

Maximum distance (float) to map high-resolution data from exo_source +to the low-resolution file_paths input.

+
+ +
+
+fill_and_smooth(out, fill_extend=True, smooth_extend=0, smooth_interior=0)#
+

For a given set of parameters, fill and extend missing positions

+

Fill data extending beyond the base meta data extent by doing a +nearest neighbor gap fill. Smooth interior and extended region with +given smoothing values. +Interior smoothing can reduce the affect of extreme values +within aggregations over large number of pixels. +The interior is assumed to be defined by the region without nan values. +The extended region is assumed to be the region with nan values.

+
+
Parameters:
+
    +
  • out (dict) – Dictionary of values defining the mean/std of the bias + base +data and the scalar + adder factors to correct the biased data +like: bias_data * scalar + adder. Each value is of shape +(lat, lon, time).

  • +
  • fill_extend (bool) – Whether to fill data extending beyond the base meta data with +nearest neighbor values.

  • +
  • smooth_extend (float) – Option to smooth the scalar/adder data outside of the spatial +domain set by the threshold input. This alleviates the weird seams +far from the domain of interest. This value is the standard +deviation for the gaussian_filter kernel

  • +
  • smooth_interior (float) – Value to use to smooth the scalar/adder data inside of the spatial +domain set by the threshold input. This can reduce the effect of +extreme values within aggregations over large number of pixels. +This value is the standard deviation for the gaussian_filter +kernel.

  • +
+
+
Returns:
+

out (dict) – Dictionary of values defining the mean/std of the bias + base +data and the scalar + adder factors to correct the biased data +like: bias_data * scalar + adder. Each value is of shape +(lat, lon, time).

+
+
+
+ +
+
+classmethod get_base_data(base_fps, base_dset, base_gid, base_handler, base_handler_kwargs=None, daily_reduction='avg', decimals=None, base_dh_inst=None)#
+

Get data from the baseline data source, possibly for many high-res +base gids corresponding to a single coarse low-res bias gid.

+
+
Parameters:
+
    +
  • base_fps (list | str) – One or more baseline .h5 filepaths representing non-biased data to +use to correct the biased dataset. This is typically several years +of WTK or NSRDB files.

  • +
  • base_dset (str) – A single dataset from the base_fps to retrieve.

  • +
  • base_gid (int | np.ndarray) – One or more spatial gids to retrieve from base_fps. The data will +be spatially averaged across all of these sites.

  • +
  • base_handler (rex.Resource) – A rex data handler similar to rex.Resource or sup3r.DataHandler +classes (if using the latter, must also input base_dh_inst)

  • +
  • base_handler_kwargs (dict | None) – Optional kwargs to send to the initialization of the base_handler +class

  • +
  • daily_reduction (None | str) – Option to do a reduction of the hourly+ source base data to daily +data. Can be None (no reduction, keep source time frequency), “avg” +(daily average), “max” (daily max), “min” (daily min), +“sum” (daily sum/total)

  • +
  • decimals (int | None) – Option to round bias and base data to this number of +decimals, this gets passed to np.around(). If decimals +is negative, it specifies the number of positions to +the left of the decimal point.

  • +
  • base_dh_inst (sup3r.DataHandler) – Instantiated DataHandler class that has already loaded the base +data (required if base files are .nc and are not being opened by a +rex Resource handler).

  • +
+
+
Returns:
+

    +
  • out_data (np.ndarray) – 1D array of base data spatially averaged across the base_gid input +and possibly daily-averaged or min/max’d as well.

  • +
  • out_ti (pd.DatetimeIndex) – DatetimeIndex object of datetimes corresponding to the +output data.

  • +
+

+
+
+
+ +
+
+get_base_gid(bias_gid)#
+

Get one or more base gid(s) corresponding to a bias gid.

+
+
Parameters:
+

bias_gid (int) – gid of the data to retrieve in the bias data source raster data. +The gids for this data source are the enumerated indices of the +flattened coordinate array.

+
+
Returns:
+

    +
  • dist (np.ndarray) – Array of nearest neighbor distances with length equal to the number +of high-resolution baseline gids that map to the low resolution +bias gid pixel.

  • +
  • base_gid (np.ndarray) – Array of base gids that are the nearest neighbors of bias_gid with +length equal to the number of high-resolution baseline gids that +map to the low resolution bias gid pixel.

  • +
+

+
+
+
+ +
+
+get_bias_data(bias_gid, bias_dh=None)#
+

Get data from the biased data source for a single gid

+
+
Parameters:
+
    +
  • bias_gid (int) – gid of the data to retrieve in the bias data source raster data. +The gids for this data source are the enumerated indices of the +flattened coordinate array.

  • +
  • bias_dh (DataHandler, default=self.bias_dh) – Any DataHandler from sup3r.preprocessing. This optional +argument allows an alternative handler other than the usual +bias_dh. For instance, the derived +QuantileDeltaMappingCorrection uses it to access the +reference biased dataset as well as the target biased dataset.

  • +
+
+
Returns:
+

bias_data (np.ndarray) – 1D array of temporal data at the requested gid.

+
+
+
+ +
+
+get_bias_gid(coord)#
+

Get the bias gid from a coordinate.

+
+
Parameters:
+

coord (tuple) – (lat, lon) to get data for.

+
+
Returns:
+

    +
  • bias_gid (int) – gid of the data to retrieve in the bias data source raster data. +The gids for this data source are the enumerated indices of the +flattened coordinate array.

  • +
  • d (float) – Distance in decimal degrees from coord to bias gid

  • +
+

+
+
+
+ +
+
+get_data_pair(coord, daily_reduction='avg')#
+

Get base and bias data observations based on a single bias gid.

+
+
Parameters:
+
    +
  • coord (tuple) – (lat, lon) to get data for.

  • +
  • daily_reduction (None | str) – Option to do a reduction of the hourly+ source base data to daily +data. Can be None (no reduction, keep source time frequency), “avg” +(daily average), “max” (daily max), “min” (daily min), +“sum” (daily sum/total)

  • +
+
+
Returns:
+

    +
  • base_data (np.ndarray) – 1D array of base data spatially averaged across the base_gid input +and possibly daily-averaged or min/max’d as well.

  • +
  • bias_data (np.ndarray) – 1D array of temporal data at the requested gid.

  • +
  • base_dist (np.ndarray) – Array of nearest neighbor distances from coord to the base data +sites with length equal to the number of high-resolution baseline +gids that map to the low resolution bias gid pixel.

  • +
  • bias_dist (Float) – Nearest neighbor distance from coord to the bias data site

  • +
+

+
+
+
+ +
+
+classmethod get_node_cmd(config)#
+

Get a CLI call to call cls.run() on a single node based on an input +config.

+
+
Parameters:
+

config (dict) – sup3r bias calc config with all necessary args and kwargs to +initialize the class and call run() on a single node.

+
+
+
+ +
+
+property meta#
+

Get a meta data dictionary on how these bias factors were +calculated

+
+ +
+
+static window_mask(doy, d0, window_size)[source]#
+

An index of elements within a given time window

+

Create an index of days of the year within the target time window. It +only considers the day of the year (doy), hence, it is limited to time +scales smaller than annual.

+
+
Parameters:
+
    +
  • doy (np.ndarray) – An unordered array of days of year, i.e. January 1st is 1.

  • +
  • d0 (int) – Center point of the target window [day of year].

  • +
  • window_size (float) – Total size of the target window, i.e. the window covers half this +value on each side of d0. Note that it has the same units of doy, +thus it is equal to the number of points only if doy is daily.

  • +
+
+
Returns:
+

np.array – An boolean array with the same shape of the given doy, where +True means that position is within the target window.

+
+
+

Notes

+

Leap years have the day 366 included in the output index, but a +precise shift is not calculated, resulting in a negligible error +in large datasets.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.bias.qdm.html b/_autosummary/sup3r.bias.qdm.html new file mode 100644 index 000000000..5879181d4 --- /dev/null +++ b/_autosummary/sup3r.bias.qdm.html @@ -0,0 +1,741 @@ + + + + + + + + + + + sup3r.bias.qdm — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.bias.qdm

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.bias.qdm#

+

QDM related methods to correct and bias and trend

+

Procedures to apply Quantile Delta Method correction and derived methods such +as PresRat.

+

Classes

+
+ + + + + +

QuantileDeltaMappingCorrection(base_fps, ...)

Estimate probability distributions required by Quantile Delta Mapping

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.bias.utilities.bias_correct_feature.html b/_autosummary/sup3r.bias.utilities.bias_correct_feature.html new file mode 100644 index 000000000..505f796c4 --- /dev/null +++ b/_autosummary/sup3r.bias.utilities.bias_correct_feature.html @@ -0,0 +1,780 @@ + + + + + + + + + + + sup3r.bias.utilities.bias_correct_feature — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.bias.utilities.bias_correct_feature

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.bias.utilities.bias_correct_feature#

+
+
+bias_correct_feature(source_feature, input_handler, bc_method, bc_kwargs, time_slice=None)[source]#
+

Bias correct data using a method defined by the bias_correct_method +input to ForwardPassStrategy

+
+
Parameters:
+
    +
  • source_feature (str) – The source feature name corresponding to the output feature name

  • +
  • input_handler (DataHandler) – DataHandler storing raw input data previously used as input for +forward passes. This is assumed to have data with shape (lats, lons, +time, features), which can be accessed through the handler with +handler[feature, lat_slice, lon_slice, time_slice]

  • +
  • bc_method (Callable) – Bias correction method from bias_transforms.py

  • +
  • bc_kwargs (dict) – Dictionary of keyword arguments for bc_method

  • +
  • time_slice (slice | None) – Optional time slice to restrict bias correction domain

  • +
+
+
Returns:
+

data (Union[np.ndarray, da.core.Array]) – Data corrected by the bias_correct_method ready for input to the +forward pass through the generative model.

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.bias.utilities.bias_correct_features.html b/_autosummary/sup3r.bias.utilities.bias_correct_features.html new file mode 100644 index 000000000..bff595cae --- /dev/null +++ b/_autosummary/sup3r.bias.utilities.bias_correct_features.html @@ -0,0 +1,766 @@ + + + + + + + + + + + sup3r.bias.utilities.bias_correct_features — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.bias.utilities.bias_correct_features

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.bias.utilities.bias_correct_features#

+
+
+bias_correct_features(features, input_handler, bc_method, bc_kwargs, time_slice=None)[source]#
+

Bias correct all feature data using a method defined by the +bias_correct_method input to ForwardPassStrategy

+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.bias.utilities.html b/_autosummary/sup3r.bias.utilities.html new file mode 100644 index 000000000..c28d0969a --- /dev/null +++ b/_autosummary/sup3r.bias.utilities.html @@ -0,0 +1,748 @@ + + + + + + + + + + + sup3r.bias.utilities — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.bias.utilities

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.bias.utilities#

+

Bias correction methods which can be applied to data handler data.

+

Functions

+
+ + + + + + + + + + + + + + +

bias_correct_feature(source_feature, ...[, ...])

Bias correct data using a method defined by the bias_correct_method input to ForwardPassStrategy

bias_correct_features(features, ...[, ...])

Bias correct all feature data using a method defined by the bias_correct_method input to ForwardPassStrategy

lin_bc(handler, bc_files[, bias_feature, ...])

Bias correct the data in this DataHandler in place using linear bias correction factors from files output by MonthlyLinearCorrection or LinearCorrection from sup3r.bias.bias_calc

qdm_bc(handler, bc_files, bias_feature[, ...])

Bias Correction using Quantile Delta Mapping

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.bias.utilities.lin_bc.html b/_autosummary/sup3r.bias.utilities.lin_bc.html new file mode 100644 index 000000000..5488c1b5a --- /dev/null +++ b/_autosummary/sup3r.bias.utilities.lin_bc.html @@ -0,0 +1,783 @@ + + + + + + + + + + + sup3r.bias.utilities.lin_bc — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.bias.utilities.lin_bc

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.bias.utilities.lin_bc#

+
+
+lin_bc(handler, bc_files, bias_feature=None, threshold=0.1)[source]#
+

Bias correct the data in this DataHandler in place using linear bias +correction factors from files output by MonthlyLinearCorrection or +LinearCorrection from sup3r.bias.bias_calc

+
+
Parameters:
+
    +
  • handler (DataHandler) – DataHandler instance with .data attribute containing data to +bias correct

  • +
  • bc_files (list | tuple | str) – One or more filepaths to .h5 files output by +MonthlyLinearCorrection or LinearCorrection. These should contain +datasets named “{feature}_scalar” and “{feature}_adder” where +{feature} is one of the features contained by this DataHandler and +the data is a 3D array of shape (lat, lon, time) where time is +length 1 for annual correction or 12 for monthly correction.

  • +
  • bias_feature (str | None) – Name of the feature used as a reference. Dataset with +name “base_{bias_feature}_scalar” and +“base_{bias_feature}_adder” will be retrieved from bc_files.

  • +
  • threshold (float) – Nearest neighbor euclidean distance threshold. If the DataHandler +coordinates are more than this value away from the bias correction +lat/lon, an error is raised.

  • +
+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.bias.utilities.qdm_bc.html b/_autosummary/sup3r.bias.utilities.qdm_bc.html new file mode 100644 index 000000000..44a4a2e1a --- /dev/null +++ b/_autosummary/sup3r.bias.utilities.qdm_bc.html @@ -0,0 +1,796 @@ + + + + + + + + + + + sup3r.bias.utilities.qdm_bc — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.bias.utilities.qdm_bc

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.bias.utilities.qdm_bc#

+
+
+qdm_bc(handler, bc_files, bias_feature, relative=True, threshold=0.1, no_trend=False)[source]#
+

Bias Correction using Quantile Delta Mapping

+

Bias correct this DataHandler’s data with Quantile Delta Mapping. The +required statistical distributions should be pre-calculated using +sup3r.bias.bias_calc.QuantileDeltaMappingCorrection.

+

Warning: There is no guarantee that the coefficients from bc_files +match the resource processed here. Be careful choosing bc_files.

+
+
Parameters:
+
    +
  • handler (DataHandler) – DataHandler instance with .data attribute containing data to +bias correct

  • +
  • bc_files (list | tuple | str) – One or more filepaths to .h5 files output by +bias_calc.QuantileDeltaMappingCorrection. These should +contain datasets named “base_{bias_feature}_params”, +“bias_{feature}_params”, and “bias_fut_{feature}_params” where +{feature} is one of the features contained by this DataHandler and +the data is a 3D array of shape (lat, lon, time) where time.

  • +
  • bias_feature (str) – Name of the feature used as (historical) reference. Dataset with +name “base_{bias_feature}_params” will be retrieved from +bc_files.

  • +
  • relative (bool, default=True) – Switcher to apply QDM as a relative (use True) or absolute (use +False) correction value.

  • +
  • threshold (float, default=0.1) – Nearest neighbor euclidean distance threshold. If the DataHandler +coordinates are more than this value away from the bias correction +lat/lon, an error is raised.

  • +
  • no_trend (bool, default=False) – An option to ignore the trend component of the correction, thus +resulting in an ordinary Quantile Mapping, i.e. corrects the bias +by comparing the distributions of the biased dataset with a +reference datasets. See params_mf of +rex.utilities.bc_utils.QuantileDeltaMapping. +Note that this assumes that “bias_{feature}_params” +(params_mh) is the data distribution representative for the +target data.

  • +
+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.cli.html b/_autosummary/sup3r.cli.html new file mode 100644 index 000000000..99c7fa1ae --- /dev/null +++ b/_autosummary/sup3r.cli.html @@ -0,0 +1,730 @@ + + + + + + + + + + + sup3r.cli — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.cli

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.cli#

+

Sup3r command line interface (CLI).

+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.html b/_autosummary/sup3r.html new file mode 100644 index 000000000..ca15efb27 --- /dev/null +++ b/_autosummary/sup3r.html @@ -0,0 +1,765 @@ + + + + + + + + + + + sup3r — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r#

+

Super Resolving Renewable Energy Resource Data (SUP3R)

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

batch

sup3r batch utilities based on GAPS batch module

bias

Bias calculation and correction modules.

cli

Sup3r command line interface (CLI).

models

Sup3r Model Software

pipeline

Sup3r data pipeline architecture.

postprocessing

Post processing module

preprocessing

Sup3r preprocessing module.

qa

sup3r QA module.

solar

Custom sup3r solar module.

utilities

Sup3r utilities

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.models.abstract.AbstractInterface.html b/_autosummary/sup3r.models.abstract.AbstractInterface.html new file mode 100644 index 000000000..b5308f77e --- /dev/null +++ b/_autosummary/sup3r.models.abstract.AbstractInterface.html @@ -0,0 +1,1122 @@ + + + + + + + + + + + sup3r.models.abstract.AbstractInterface — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.models.abstract.AbstractInterface#

+
+
+class AbstractInterface[source]#
+

Bases: ABC

+

Abstract class to define the required interface for Sup3r model subclasses

+

Note that this only sets the required interfaces for a GAN that can be +loaded from disk and used to predict synthetic outputs. The interface for +models that can be trained will be set in another class.

+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + +

generate(low_res[, norm_in, un_norm_out, ...])

Use the generator model to generate high res data from low res input.

get_s_enhance_from_layers()

Compute factor by which model will enhance spatial resolution from layer attributes.

get_t_enhance_from_layers()

Compute factor by which model will enhance temporal resolution from layer attributes.

load(model_dir[, verbose])

Load the GAN with its sub-networks from a previously saved-to output directory.

save_params(out_dir)

seed([s])

Set the random seed for reproducible results.

set_model_params(**kwargs)

Set parameters used for training the model

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

hr_exo_features

Get list of high-resolution exogenous filter names the model uses.

hr_out_features

Get the list of high-resolution output feature names that the generative model outputs.

input_dims

Get dimension of model generator input.

input_resolution

Resolution of input data.

is_4d

Check if model expects spatial only input

is_5d

Check if model expects spatiotemporal input

lr_features

Get a list of low-resolution features input to the generative model.

meta

Get meta data dictionary that defines how the model was created

model_params

Model parameters, used to save model to disc

output_resolution

Resolution of output data.

s_enhance

Factor by which model will enhance spatial resolution.

s_enhancements

List of spatial enhancement factors.

smoothed_features

Get the list of smoothed input feature names that the generative model was trained on.

smoothing

Value of smoothing parameter used in gaussian filtering of coarsened high res data.

t_enhance

Factor by which model will enhance temporal resolution.

t_enhancements

List of temporal enhancement factors.

version_record

A record of important versions that this model was built with.

+
+
+
+abstract classmethod load(model_dir, verbose=True)[source]#
+

Load the GAN with its sub-networks from a previously saved-to output +directory.

+
+
Parameters:
+
    +
  • model_dir – Directory to load GAN model files from.

  • +
  • verbose (bool) – Flag to log information about the loaded model.

  • +
+
+
Returns:
+

out (BaseModel) – Returns a pretrained gan model that was previously saved to +model_dir

+
+
+
+ +
+
+abstract generate(low_res, norm_in=True, un_norm_out=True, exogenous_data=None)[source]#
+

Use the generator model to generate high res data from low res +input. This is the public generate function.

+
+ +
+
+static seed(s=0)[source]#
+

Set the random seed for reproducible results.

+
+
Parameters:
+

s (int) – Random seed

+
+
+
+ +
+
+property input_dims#
+

Get dimension of model generator input. This is usually 4D for +spatial models and 5D for spatiotemporal models. This gives the input +to the first step if the model is multi-step. Returns 5 for linear +models.

+
+
Returns:
+

int

+
+
+
+ +
+
+property is_5d#
+

Check if model expects spatiotemporal input

+
+ +
+
+property is_4d#
+

Check if model expects spatial only input

+
+ +
+
+get_s_enhance_from_layers()[source]#
+

Compute factor by which model will enhance spatial resolution from +layer attributes. Used in model training during high res coarsening

+
+ +
+
+get_t_enhance_from_layers()[source]#
+

Compute factor by which model will enhance temporal resolution from +layer attributes. Used in model training during high res coarsening

+
+ +
+
+property s_enhance#
+

Factor by which model will enhance spatial resolution. Used in +model training during high res coarsening and also in forward pass +routine to determine shape of needed exogenous data

+
+ +
+
+property t_enhance#
+

Factor by which model will enhance temporal resolution. Used in +model training during high res coarsening and also in forward pass +routine to determine shape of needed exogenous data

+
+ +
+
+property s_enhancements#
+

List of spatial enhancement factors. In the case of a single step +model this is just [self.s_enhance]. This is used to determine +shapes of needed exogenous data in forward pass routine

+
+ +
+
+property t_enhancements#
+

List of temporal enhancement factors. In the case of a single step +model this is just [self.t_enhance]. This is used to determine +shapes of needed exogenous data in forward pass routine

+
+ +
+
+property input_resolution#
+

Resolution of input data. Given as a dictionary +{'spatial': '...km', 'temporal': '...min'}. The numbers are +required to be integers in the units specified. The units are not +strict as long as the resolution of the exogenous data, when extracting +exogenous data, is specified in the same units.

+
+ +
+
+property output_resolution#
+

Resolution of output data. Given as a dictionary +{‘spatial’: ‘…km’, ‘temporal’: ‘…min’}. This is computed from the +input resolution and the enhancement factors.

+
+ +
+
+abstract property meta#
+

Get meta data dictionary that defines how the model was created

+
+ +
+
+property lr_features#
+

Get a list of low-resolution features input to the generative model. +This includes low-resolution features that might be supplied +exogenously at inference time but that were in the low-res batches +during training

+
+ +
+
+property hr_out_features#
+

Get the list of high-resolution output feature names that the +generative model outputs.

+
+ +
+
+property hr_exo_features#
+

Get list of high-resolution exogenous filter names the model uses. +If the model has N concat or add layers this list will be the last N +features in the training features list. The ordering is assumed to be +the same as the order of concat or add layers. If training features is +[…, topo, sza], and the model has 2 concat or add layers, exo +features will be [topo, sza]. Topo will then be used in the first +concat layer and sza will be used in the second

+
+ +
+
+property smoothing#
+

Value of smoothing parameter used in gaussian filtering of coarsened +high res data.

+
+ +
+
+property smoothed_features#
+

Get the list of smoothed input feature names that the generative +model was trained on.

+
+ +
+
+property model_params#
+

Model parameters, used to save model to disc

+
+
Returns:
+

dict

+
+
+
+ +
+
+property version_record#
+

A record of important versions that this model was built with.

+
+
Returns:
+

dict

+
+
+
+ +
+
+set_model_params(**kwargs)[source]#
+

Set parameters used for training the model

+
+
Parameters:
+

kwargs (dict) – Keyword arguments including ‘input_resolution’, +‘lr_features’, ‘hr_exo_features’, ‘hr_out_features’, +‘smoothed_features’, ‘s_enhance’, ‘t_enhance’, ‘smoothing’

+
+
+
+ +
+
+save_params(out_dir)[source]#
+
+
Parameters:
+

out_dir (str) – Directory to save linear model params. This directory will be +created if it does not already exist.

+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.models.abstract.AbstractSingleModel.html b/_autosummary/sup3r.models.abstract.AbstractSingleModel.html new file mode 100644 index 000000000..e32af2c26 --- /dev/null +++ b/_autosummary/sup3r.models.abstract.AbstractSingleModel.html @@ -0,0 +1,1401 @@ + + + + + + + + + + + sup3r.models.abstract.AbstractSingleModel — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.models.abstract.AbstractSingleModel#

+
+
+class AbstractSingleModel[source]#
+

Bases: ABC, TensorboardMixIn

+

Abstract class to define the required training interface +for Sup3r model subclasses

+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

dict_to_tensorboard(entry)

Write data to tensorboard log file.

early_stop(history, column[, threshold, n_epoch])

Determine whether to stop training early based on nearly no change to validation loss for a certain number of consecutive epochs.

finish_epoch(epoch, epochs, t0, ...[, extras])

Perform finishing checks after an epoch is done training

generate(low_res[, norm_in, un_norm_out, ...])

Use the generator model to generate high res data from low res input.

get_high_res_exo_input(high_res)

Get exogenous feature data from high_res

get_loss_fun(loss)

Get the initialized loss function class from the sup3r loss library or the tensorflow losses.

get_optimizer_config(optimizer)

Get a config that defines the current model optimizer

get_optimizer_state(optimizer)

Get a set of state variables for the optimizer

get_single_grad(low_res, hi_res_true, ...[, ...])

Run gradient descent for one mini-batch of (low_res, hi_res_true), do not update weights, just return gradient details.

init_optimizer(optimizer, learning_rate)

Initialize keras optimizer object.

load_network(model, name)

Load a CustomNetwork object from hidden layers config, .json file config, or .pkl file saved pre-trained model.

load_saved_params(out_dir[, verbose])

Load saved model_params (you need this and the gen+disc models to load a full model).

log_loss_details(loss_details[, level])

Log the loss details to the module logger.

norm_input(low_res)

Normalize low resolution data being input to the generator.

profile_to_tensorboard(name)

Write profile data to tensorboard log file.

run_gradient_descent(low_res, hi_res_true, ...)

Run gradient descent for one mini-batch of (low_res, hi_res_true) and update weights

save(out_dir)

Save the model with its sub-networks to a directory.

set_norm_stats(new_means, new_stdevs)

Set the normalization statistics associated with a data batch handler to model attributes.

un_norm_output(output)

Un-normalize synthetically generated output data to physical units

update_loss_details(loss_details, new_data, ...)

Update a dictionary of loss_details with loss information from a new batch.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + +

generator

Get the generative model.

generator_weights

Get a list of layer weights and bias terms for the generator model.

history

Model training history DataFrame (None if not yet trained)

means

Get the data normalization mean values.

optimizer

Get the tensorflow optimizer to perform gradient descent calculations for the generative network.

stdevs

Get the data normalization standard deviation values.

total_batches

Record of total number of batches for logging.

+
+
+
+load_network(model, name)[source]#
+

Load a CustomNetwork object from hidden layers config, .json file +config, or .pkl file saved pre-trained model.

+
+
Parameters:
+
    +
  • model (str | dict) – Model hidden layers config, a .json with “hidden_layers” key, or a +.pkl for a saved pre-trained model.

  • +
  • name (str) – Name of the model to be loaded

  • +
+
+
Returns:
+

model (phygnn.CustomNetwork) – CustomNetwork object initialized from the model input.

+
+
+
+ +
+
+property means#
+

Get the data normalization mean values.

+
+
Returns:
+

np.ndarray

+
+
+
+ +
+
+property stdevs#
+

Get the data normalization standard deviation values.

+
+
Returns:
+

np.ndarray

+
+
+
+ +
+
+set_norm_stats(new_means, new_stdevs)[source]#
+

Set the normalization statistics associated with a data batch +handler to model attributes.

+
+
Parameters:
+
    +
  • new_means (dict | None) – Set of mean values for data normalization keyed by feature name. +Can be used to maintain a consistent normalization scheme between +transfer learning domains.

  • +
  • new_stdevs (dict | None) – Set of stdev values for data normalization keyed by feature name. +Can be used to maintain a consistent normalization scheme between +transfer learning domains.

  • +
+
+
+
+ +
+
+norm_input(low_res)[source]#
+

Normalize low resolution data being input to the generator.

+
+
Parameters:
+

low_res (np.ndarray) – Un-normalized low-resolution input data in physical units, usually +a 4D or 5D array of shape: +(n_obs, spatial_1, spatial_2, n_features) +(n_obs, spatial_1, spatial_2, n_temporal, n_features)

+
+
Returns:
+

low_res (np.ndarray) – Normalized low-resolution input data, usually a 4D or 5D array of +shape: +(n_obs, spatial_1, spatial_2, n_features) +(n_obs, spatial_1, spatial_2, n_temporal, n_features)

+
+
+
+ +
+
+un_norm_output(output)[source]#
+

Un-normalize synthetically generated output data to physical units

+
+
Parameters:
+

output (tf.Tensor | np.ndarray) – Synthetically generated high-resolution data

+
+
Returns:
+

output (np.ndarray) – Synthetically generated high-resolution data

+
+
+
+ +
+
+property optimizer#
+

Get the tensorflow optimizer to perform gradient descent +calculations for the generative network. This is functionally identical +to optimizer_disc is no special optimizer model or learning rate was +specified for the disc.

+
+
Returns:
+

tf.keras.optimizers.Optimizer

+
+
+
+ +
+
+property history#
+

Model training history DataFrame (None if not yet trained)

+
+
Returns:
+

pandas.DataFrame | None

+
+
+
+ +
+
+property generator#
+

Get the generative model.

+
+
Returns:
+

phygnn.base.CustomNetwork

+
+
+
+ +
+
+property generator_weights#
+

Get a list of layer weights and bias terms for the generator model.

+
+
Returns:
+

list

+
+
+
+ +
+
+static init_optimizer(optimizer, learning_rate)[source]#
+

Initialize keras optimizer object.

+
+
Parameters:
+
    +
  • optimizer (tf.keras.optimizers.Optimizer | dict | None | str) – Instantiated tf.keras.optimizers object or a dict optimizer config +from tf.keras.optimizers.get_config(). None defaults to Adam.

  • +
  • learning_rate (float, optional) – Optimizer learning rate. Not used if optimizer input arg is a +pre-initialized object or if optimizer input arg is a config dict.

  • +
+
+
Returns:
+

optimizer (tf.keras.optimizers.Optimizer) – Initialized optimizer object.

+
+
+
+ +
+
+static load_saved_params(out_dir, verbose=True)[source]#
+

Load saved model_params (you need this and the gen+disc models +to load a full model).

+
+
Parameters:
+
    +
  • out_dir (str) – Directory to load model files from.

  • +
  • verbose (bool) – Flag to log information about the loaded model.

  • +
+
+
Returns:
+

params (dict) – Model parameters loaded from disk json file. This should be the +same as self.model_params with and additional ‘history’ entry. +Should be all the kwargs you need to init a model.

+
+
+
+ +
+
+get_high_res_exo_input(high_res)[source]#
+

Get exogenous feature data from high_res

+
+
Parameters:
+

high_res (tf.Tensor) – Ground truth high resolution spatiotemporal data.

+
+
Returns:
+

exo_data (dict) – Dictionary of exogenous feature data used as input to tf_generate. +e.g. {'topography': tf.Tensor(...)}

+
+
+
+ +
+
+static get_loss_fun(loss)[source]#
+

Get the initialized loss function class from the sup3r loss library +or the tensorflow losses.

+
+
Parameters:
+

loss (str | dict) – Loss function class name from sup3r.utilities.loss_metrics +(prioritized) or tensorflow.keras.losses. Defaults to +tf.keras.losses.MeanSquaredError. This can be provided as a dict +with kwargs for loss functions with extra parameters. +e.g. {‘SpatialExtremesLoss’: {‘weight’: 0.5}}

+
+
Returns:
+

out (tf.keras.losses.Loss) – Initialized loss function class that is callable, e.g. if +“MeanSquaredError” is requested, this will return +an instance of tf.keras.losses.MeanSquaredError()

+
+
+
+ +
+
+static get_optimizer_config(optimizer)[source]#
+

Get a config that defines the current model optimizer

+
+
Parameters:
+

optimizer (tf.keras.optimizers.Optimizer) – TF-Keras optimizer object (e.g., Adam)

+
+
Returns:
+

config (dict) – Optimizer config

+
+
+
+ +
+
+classmethod get_optimizer_state(optimizer)[source]#
+

Get a set of state variables for the optimizer

+
+
Parameters:
+

optimizer (tf.keras.optimizers.Optimizer) – TF-Keras optimizer object (e.g., Adam)

+
+
Returns:
+

state (dict) – Optimizer state variables

+
+
+
+ +
+
+static update_loss_details(loss_details, new_data, batch_len, prefix=None)[source]#
+

Update a dictionary of loss_details with loss information from a new +batch.

+
+
Parameters:
+
    +
  • loss_details (dict) – Namespace of the breakdown of loss components where each value is a +running average at the current state in the epoch.

  • +
  • new_data (dict) – Namespace of the breakdown of loss components for a single new +batch.

  • +
  • batch_len (int) – Length of the incoming batch.

  • +
  • prefix (None | str) – Option to prefix the names of the loss data when saving to the +loss_details dictionary.

  • +
+
+
Returns:
+

loss_details (dict) – Same as input loss_details but with running averages updated.

+
+
+
+ +
+
+static log_loss_details(loss_details, level='INFO')[source]#
+

Log the loss details to the module logger.

+
+
Parameters:
+
    +
  • loss_details (dict) – Namespace of the breakdown of loss components where each value is a +running average at the current state in the epoch.

  • +
  • level (str) – Log level (e.g. INFO, DEBUG)

  • +
+
+
+
+ +
+
+static early_stop(history, column, threshold=0.005, n_epoch=5)[source]#
+

Determine whether to stop training early based on nearly no change +to validation loss for a certain number of consecutive epochs.

+
+
Parameters:
+
    +
  • history (pd.DataFrame | None) – Model training history

  • +
  • column (str) – Column from the model training history to evaluate for early +termination.

  • +
  • threshold (float) – The absolute relative fractional difference in validation loss +between subsequent epochs below which an early termination is +warranted. E.g. if val losses were 0.1 and 0.0998 the relative +diff would be calculated as 0.0002 / 0.1 = 0.002 which would be +less than the default thresold of 0.01 and would satisfy the +condition for early termination.

  • +
  • n_epoch (int) – The number of consecutive epochs that satisfy the threshold that +warrants an early stop.

  • +
+
+
Returns:
+

stop (bool) – Flag to stop training (True) or keep going (False).

+
+
+
+ +
+
+abstract save(out_dir)[source]#
+

Save the model with its sub-networks to a directory.

+
+
Parameters:
+

out_dir (str) – Directory to save model files. This directory will be created +if it does not already exist.

+
+
+
+ +
+
+finish_epoch(epoch, epochs, t0, loss_details, checkpoint_int, out_dir, early_stop_on, early_stop_threshold, early_stop_n_epoch, extras=None)[source]#
+

Perform finishing checks after an epoch is done training

+
+
Parameters:
+
    +
  • epoch (int) – Epoch number that is finishing

  • +
  • epochs (list) – List of epochs being iterated through

  • +
  • t0 (float) – Starting time of training.

  • +
  • loss_details (dict) – Namespace of the breakdown of loss components

  • +
  • checkpoint_int (int | None) – Epoch interval at which to save checkpoint models.

  • +
  • out_dir (str) – Directory to save checkpoint models. Should have {epoch} in +the directory name. This directory will be created if it does not +already exist.

  • +
  • early_stop_on (str | None) – If not None, this should be a column in the training history to +evaluate for early stopping (e.g. validation_loss_gen, +validation_loss_disc). If this value in this history decreases by +an absolute fractional relative difference of less than 0.01 for +more than 5 epochs in a row, the training will stop early.

  • +
  • early_stop_threshold (float) – The absolute relative fractional difference in validation loss +between subsequent epochs below which an early termination is +warranted. E.g. if val losses were 0.1 and 0.0998 the relative +diff would be calculated as 0.0002 / 0.1 = 0.002 which would be +less than the default thresold of 0.01 and would satisfy the +condition for early termination.

  • +
  • early_stop_n_epoch (int) – The number of consecutive epochs that satisfy the threshold that +warrants an early stop.

  • +
  • extras (dict | None) – Extra kwargs/parameters to save in the epoch history.

  • +
+
+
Returns:
+

stop (bool) – Flag to early stop training.

+
+
+
+ +
+
+run_gradient_descent(low_res, hi_res_true, training_weights, optimizer=None, multi_gpu=False, **calc_loss_kwargs)[source]#
+

Run gradient descent for one mini-batch of (low_res, hi_res_true) +and update weights

+
+
Parameters:
+
    +
  • low_res (np.ndarray) – Real low-resolution data in a 4D or 5D array: +(n_observations, spatial_1, spatial_2, features) +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
  • hi_res_true (np.ndarray) – Real high-resolution data in a 4D or 5D array: +(n_observations, spatial_1, spatial_2, features) +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
  • training_weights (list) – A list of layer weights that are to-be-trained based on the +current loss weight values.

  • +
  • optimizer (tf.keras.optimizers.Optimizer) – Optimizer class to use to update weights. This can be different if +you’re training just the generator or one of the discriminator +models. Defaults to the generator optimizer.

  • +
  • multi_gpu (bool) – Flag to break up the batch for parallel gradient descent +calculations on multiple gpus. If True and multiple GPUs are +present, each batch from the batch_handler will be divided up +between the GPUs and the resulting gradient from each GPU will +constitute a single gradient descent step with the nominal learning +rate that the model was initialized with.

  • +
  • calc_loss_kwargs (dict) – Kwargs to pass to the self.calc_loss() method

  • +
+
+
Returns:
+

loss_details (dict) – Namespace of the breakdown of loss components

+
+
+
+ +
+
+generate(low_res, norm_in=True, un_norm_out=True, exogenous_data=None)[source]#
+

Use the generator model to generate high res data from low res +input. This is the public generate function.

+
+
Parameters:
+
    +
  • low_res (np.ndarray) – Low-resolution input data, usually a 4D or 5D array of shape: +(n_obs, spatial_1, spatial_2, n_features) +(n_obs, spatial_1, spatial_2, n_temporal, n_features)

  • +
  • norm_in (bool) – Flag to normalize low_res input data if the self._means, +self._stdevs attributes are available. The generator should always +received normalized data with mean=0 stdev=1. This also normalizes +hi_res_topo.

  • +
  • un_norm_out (bool) – Flag to un-normalize synthetically generated output data to physical +units

  • +
  • exogenous_data (dict | ExoData | None) – Special dictionary (class:ExoData) of exogenous feature data with +entries describing whether features should be combined at input, a +mid network layer, or with output. This doesn’t have to include +the ‘model’ key since this data is for a single step model.

  • +
+
+
Returns:
+

hi_res (ndarray) – Synthetically generated high-resolution data, usually a 4D or 5D +array with shape: +(n_obs, spatial_1, spatial_2, n_features) +(n_obs, spatial_1, spatial_2, n_temporal, n_features)

+
+
+
+ +
+
+dict_to_tensorboard(entry)#
+

Write data to tensorboard log file. This is usually a loss_details +dictionary.

+
+
Parameters:
+

entry (dict) – Dictionary of values to write to tensorboard log file

+
+
+
+ +
+
+get_single_grad(low_res, hi_res_true, training_weights, device_name=None, **calc_loss_kwargs)[source]#
+

Run gradient descent for one mini-batch of (low_res, hi_res_true), +do not update weights, just return gradient details.

+
+
Parameters:
+
    +
  • low_res (np.ndarray) – Real low-resolution data in a 4D or 5D array: +(n_observations, spatial_1, spatial_2, features) +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
  • hi_res_true (np.ndarray) – Real high-resolution data in a 4D or 5D array: +(n_observations, spatial_1, spatial_2, features) +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
  • training_weights (list) – A list of layer weights that are to-be-trained based on the +current loss weight values.

  • +
  • device_name (None | str) – Optional tensorflow device name for GPU placement. Note that if a +GPU is available, variables will be placed on that GPU even if +device_name=None.

  • +
  • calc_loss_kwargs (dict) – Kwargs to pass to the self.calc_loss() method

  • +
+
+
Returns:
+

    +
  • grad (list) – a list or nested structure of Tensors (or IndexedSlices, or None, +or CompositeTensor) representing the gradients for the +training_weights

  • +
  • loss_details (dict) – Namespace of the breakdown of loss components

  • +
+

+
+
+
+ +
+
+profile_to_tensorboard(name)#
+

Write profile data to tensorboard log file.

+
+
Parameters:
+

name (str) – Tag name to use for profile info

+
+
+
+ +
+
+property total_batches#
+

Record of total number of batches for logging.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.models.abstract.TensorboardMixIn.html b/_autosummary/sup3r.models.abstract.TensorboardMixIn.html new file mode 100644 index 000000000..6521695c0 --- /dev/null +++ b/_autosummary/sup3r.models.abstract.TensorboardMixIn.html @@ -0,0 +1,822 @@ + + + + + + + + + + + sup3r.models.abstract.TensorboardMixIn — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.models.abstract.TensorboardMixIn

+ +
+ +
+
+ + + + +
+ +
+

sup3r.models.abstract.TensorboardMixIn#

+
+
+class TensorboardMixIn[source]#
+

Bases: object

+

MixIn class for tensorboard logging and profiling.

+

Methods

+
+ + + + + + + + +

dict_to_tensorboard(entry)

Write data to tensorboard log file.

profile_to_tensorboard(name)

Write profile data to tensorboard log file.

+
+

Attributes

+
+ + + + + +

total_batches

Record of total number of batches for logging.

+
+
+
+property total_batches#
+

Record of total number of batches for logging.

+
+ +
+
+dict_to_tensorboard(entry)[source]#
+

Write data to tensorboard log file. This is usually a loss_details +dictionary.

+
+
Parameters:
+

entry (dict) – Dictionary of values to write to tensorboard log file

+
+
+
+ +
+
+profile_to_tensorboard(name)[source]#
+

Write profile data to tensorboard log file.

+
+
Parameters:
+

name (str) – Tag name to use for profile info

+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.models.abstract.html b/_autosummary/sup3r.models.abstract.html new file mode 100644 index 000000000..d64e70502 --- /dev/null +++ b/_autosummary/sup3r.models.abstract.html @@ -0,0 +1,745 @@ + + + + + + + + + + + sup3r.models.abstract — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.models.abstract

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.models.abstract#

+

Abstract class defining the required interface for Sup3r model subclasses

+

Classes

+
+ + + + + + + + + + + +

AbstractInterface()

Abstract class to define the required interface for Sup3r model subclasses

AbstractSingleModel()

Abstract class to define the required training interface for Sup3r model subclasses

TensorboardMixIn()

MixIn class for tensorboard logging and profiling.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.models.base.Sup3rGan.html b/_autosummary/sup3r.models.base.Sup3rGan.html new file mode 100644 index 000000000..24dbd169b --- /dev/null +++ b/_autosummary/sup3r.models.base.Sup3rGan.html @@ -0,0 +1,2221 @@ + + + + + + + + + + + sup3r.models.base.Sup3rGan — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.models.base.Sup3rGan

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.models.base.Sup3rGan#

+
+
+class Sup3rGan(gen_layers, disc_layers, loss='MeanSquaredError', optimizer=None, learning_rate=0.0001, optimizer_disc=None, learning_rate_disc=None, history=None, meta=None, means=None, stdevs=None, default_device=None, name=None)[source]#
+

Bases: AbstractSingleModel, AbstractInterface

+

Basic sup3r GAN model.

+
+
Parameters:
+
    +
  • gen_layers (list | str) – Hidden layers input argument to phygnn.base.CustomNetwork for the +generative super resolving model. Can also be a str filepath to a +.json config file containing the input layers argument or a .pkl +for a saved pre-trained model.

  • +
  • disc_layers (list | str) – Hidden layers input argument to phygnn.base.CustomNetwork for the +discriminative model (spatial or spatiotemporal discriminator). Can +also be a str filepath to a .json config file containing the input +layers argument or a .pkl for a saved pre-trained model.

  • +
  • loss (str | dict) – Loss function class name from sup3r.utilities.loss_metrics +(prioritized) or tensorflow.keras.losses. Defaults to +tf.keras.losses.MeanSquaredError. This can be provided as a dict +with kwargs for loss functions with extra parameters. +e.g. {‘SpatialExtremesLoss’: {‘weight’: 0.5}}

  • +
  • optimizer (tf.keras.optimizers.Optimizer | dict | None | str) – Instantiated tf.keras.optimizers object or a dict optimizer config +from tf.keras.optimizers.get_config(). None defaults to Adam.

  • +
  • learning_rate (float, optional) – Optimizer learning rate. Not used if optimizer input arg is a +pre-initialized object or if optimizer input arg is a config dict.

  • +
  • optimizer_disc (tf.keras.optimizers.Optimizer | dict | None) – Same as optimizer input, but if specified this makes a different +optimizer just for the discriminator network (spatial or +spatiotemporal disc).

  • +
  • learning_rate_disc (float, optional) – Same as learning_rate input, but if specified this makes a +different learning_rate just for the discriminator network (spatial +or spatiotemporal disc).

  • +
  • history (pd.DataFrame | str | None) – Model training history with “epoch” index, str pointing to a saved +history csv file with “epoch” as first column, or None for clean +history

  • +
  • meta (dict | None) – Model meta data that describes how the model was created.

  • +
  • means (dict | None) – Set of mean values for data normalization keyed by feature name. +Can be used to maintain a consistent normalization scheme between +transfer learning domains.

  • +
  • stdevs (dict | None) – Set of stdev values for data normalization keyed by feature name. +Can be used to maintain a consistent normalization scheme between +transfer learning domains.

  • +
  • default_device (str | None) – Option for default device placement of model weights. If None and a +single GPU exists, that GPU will be the default device. If None and +multiple GPUs exist, the CPU will be the default device (this was +tested as most efficient given the custom multi-gpu strategy +developed in self.run_gradient_descent()). Examples: “/gpu:0” or +“/cpu:0”

  • +
  • name (str | None) – Optional name for the GAN.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

calc_loss(hi_res_true, hi_res_gen[, ...])

Calculate the GAN loss function using generated and true high resolution data.

calc_loss_disc(disc_out_true, disc_out_gen)

Calculate the loss term for the discriminator model (either the spatial or temporal discriminator).

calc_loss_gen_advers(disc_out_gen)

Calculate the adversarial component of the loss term for the generator model.

calc_loss_gen_content(hi_res_true, hi_res_gen)

Calculate the content loss term for the generator model.

calc_val_loss(batch_handler, ...)

Calculate the validation loss at the current state of model training

check_batch_handler_attrs(batch_handler)

Not all batch handlers have the following attributes.

dict_to_tensorboard(entry)

Write data to tensorboard log file.

discriminate(hi_res[, norm_in])

Run the discriminator model on a hi resolution input field.

early_stop(history, column[, threshold, n_epoch])

Determine whether to stop training early based on nearly no change to validation loss for a certain number of consecutive epochs.

finish_epoch(epoch, epochs, t0, ...[, extras])

Perform finishing checks after an epoch is done training

generate(low_res[, norm_in, un_norm_out, ...])

Use the generator model to generate high res data from low res input.

get_high_res_exo_input(high_res)

Get exogenous feature data from high_res

get_loss_fun(loss)

Get the initialized loss function class from the sup3r loss library or the tensorflow losses.

get_optimizer_config(optimizer)

Get a config that defines the current model optimizer

get_optimizer_state(optimizer)

Get a set of state variables for the optimizer

get_s_enhance_from_layers()

Compute factor by which model will enhance spatial resolution from layer attributes.

get_single_grad(low_res, hi_res_true, ...[, ...])

Run gradient descent for one mini-batch of (low_res, hi_res_true), do not update weights, just return gradient details.

get_t_enhance_from_layers()

Compute factor by which model will enhance temporal resolution from layer attributes.

get_weight_update_fraction(history, ...[, ...])

Get the factor by which to multiply previous adversarial loss weight

init_optimizer(optimizer, learning_rate)

Initialize keras optimizer object.

init_weights(lr_shape, hr_shape[, device])

Initialize the generator and discriminator weights with device placement.

load(model_dir[, verbose])

Load the GAN with its sub-networks from a previously saved-to output directory.

load_network(model, name)

Load a CustomNetwork object from hidden layers config, .json file config, or .pkl file saved pre-trained model.

load_saved_params(out_dir[, verbose])

Load saved model_params (you need this and the gen+disc models to load a full model).

log_loss_details(loss_details[, level])

Log the loss details to the module logger.

norm_input(low_res)

Normalize low resolution data being input to the generator.

profile_to_tensorboard(name)

Write profile data to tensorboard log file.

run_gradient_descent(low_res, hi_res_true, ...)

Run gradient descent for one mini-batch of (low_res, hi_res_true) and update weights

save(out_dir)

Save the GAN with its sub-networks to a directory.

save_params(out_dir)

seed([s])

Set the random seed for reproducible results.

set_model_params(**kwargs)

Set parameters used for training the model

set_norm_stats(new_means, new_stdevs)

Set the normalization statistics associated with a data batch handler to model attributes.

train(batch_handler, input_resolution, n_epoch)

Train the GAN model on real low res data and real high res data

train_epoch(batch_handler, ...[, multi_gpu])

Train the GAN for one epoch.

un_norm_output(output)

Un-normalize synthetically generated output data to physical units

update_adversarial_weights(history, ...)

Update spatial / temporal adversarial loss weights based on training fraction history.

update_loss_details(loss_details, new_data, ...)

Update a dictionary of loss_details with loss information from a new batch.

update_optimizer([option])

Update optimizer by changing current configuration

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

discriminator

Get the discriminator model.

discriminator_weights

Get a list of layer weights and bias terms for the discriminator model.

generator

Get the generative model.

generator_weights

Get a list of layer weights and bias terms for the generator model.

history

Model training history DataFrame (None if not yet trained)

hr_exo_features

Get list of high-resolution exogenous filter names the model uses.

hr_out_features

Get the list of high-resolution output feature names that the generative model outputs.

input_dims

Get dimension of model generator input.

input_resolution

Resolution of input data.

is_4d

Check if model expects spatial only input

is_5d

Check if model expects spatiotemporal input

lr_features

Get a list of low-resolution features input to the generative model.

means

Get the data normalization mean values.

meta

Get meta data dictionary that defines how the model was created

model_params

Model parameters, used to save model to disc

optimizer

Get the tensorflow optimizer to perform gradient descent calculations for the generative network.

optimizer_disc

Get the tensorflow optimizer to perform gradient descent calculations for the discriminator network.

output_resolution

Resolution of output data.

s_enhance

Factor by which model will enhance spatial resolution.

s_enhancements

List of spatial enhancement factors.

smoothed_features

Get the list of smoothed input feature names that the generative model was trained on.

smoothing

Value of smoothing parameter used in gaussian filtering of coarsened high res data.

stdevs

Get the data normalization standard deviation values.

t_enhance

Factor by which model will enhance temporal resolution.

t_enhancements

List of temporal enhancement factors.

total_batches

Record of total number of batches for logging.

version_record

A record of important versions that this model was built with.

weights

Get a list of all the layer weights and bias terms for the generator and discriminator networks

+
+
+
+save(out_dir)[source]#
+

Save the GAN with its sub-networks to a directory.

+
+
Parameters:
+

out_dir (str) – Directory to save GAN model files. This directory will be created +if it does not already exist.

+
+
+
+ +
+
+classmethod load(model_dir, verbose=True)[source]#
+

Load the GAN with its sub-networks from a previously saved-to output +directory.

+
+
Parameters:
+
    +
  • model_dir (str) – Directory to load GAN model files from.

  • +
  • verbose (bool) – Flag to log information about the loaded model.

  • +
+
+
Returns:
+

out (BaseModel) – Returns a pretrained gan model that was previously saved to out_dir

+
+
+
+ +
+
+property discriminator#
+

Get the discriminator model.

+
+
Returns:
+

phygnn.base.CustomNetwork

+
+
+
+ +
+
+property discriminator_weights#
+

Get a list of layer weights and bias terms for the discriminator +model.

+
+
Returns:
+

list

+
+
+
+ +
+
+discriminate(hi_res, norm_in=False)[source]#
+

Run the discriminator model on a hi resolution input field.

+
+
Parameters:
+
    +
  • hi_res (np.ndarray) – Real or fake high res data in a 4D or 5D tensor: +(n_obs, spatial_1, spatial_2, n_features) +(n_obs, spatial_1, spatial_2, n_temporal, n_features)

  • +
  • norm_in (bool) – Flag to normalize low_res input data if the self._means, +self._stdevs attributes are available. The disc should always +received normalized data with mean=0 stdev=1.

  • +
+
+
Returns:
+

out (np.ndarray) – Discriminator output logits

+
+
+
+ +
+
+property optimizer_disc#
+

Get the tensorflow optimizer to perform gradient descent +calculations for the discriminator network.

+
+
Returns:
+

tf.keras.optimizers.Optimizer

+
+
+
+ +
+
+update_optimizer(option='generator', **kwargs)[source]#
+

Update optimizer by changing current configuration

+
+
Parameters:
+
    +
  • option (str) – Which optimizer to update. Can be “generator”, “discriminator”, or +“all”

  • +
  • kwargs (dict) – kwargs to use for optimizer configuration update

  • +
+
+
+
+ +
+
+property meta#
+

Get meta data dictionary that defines how the model was created

+
+ +
+
+property model_params#
+

Model parameters, used to save model to disc

+
+
Returns:
+

dict

+
+
+
+ +
+
+property weights#
+

Get a list of all the layer weights and bias terms for the +generator and discriminator networks

+
+ +
+
+init_weights(lr_shape, hr_shape, device=None)[source]#
+

Initialize the generator and discriminator weights with device +placement.

+
+
Parameters:
+
    +
  • lr_shape (tuple) – Shape of one batch of low res input data for sup3r resolution. Note +that the batch size (axis=0) must be included, but the actual batch +size doesnt really matter.

  • +
  • hr_shape (tuple) – Shape of one batch of high res input data for sup3r resolution. +Note that the batch size (axis=0) must be included, but the actual +batch size doesnt really matter.

  • +
  • device (str | None) – Option to place model weights on a device. If None, +self.default_device will be used.

  • +
+
+
+
+ +
+
+static get_weight_update_fraction(history, comparison_key, update_bounds=(0.5, 0.95), update_frac=0.0)[source]#
+

Get the factor by which to multiply previous adversarial loss +weight

+
+
Parameters:
+
    +
  • history (dict) – Dictionary with information on how often discriminators +were trained during previous epoch.

  • +
  • comparison_key (str) – history key to use for update check

  • +
  • update_bounds (tuple) – Tuple specifying allowed range for history[comparison_key]. If +history[comparison_key] < update_bounds[0] then the weight will be +increased by (1 + update_frac). If history[comparison_key] > +update_bounds[1] then the weight will be decreased by 1 / (1 + +update_frac).

  • +
  • update_frac (float) – Fraction by which to increase/decrease adversarial loss weight

  • +
+
+
Returns:
+

float – Factor by which to multiply old weight to get updated weight

+
+
+
+ +
+
+calc_loss_gen_content(hi_res_true, hi_res_gen)[source]#
+

Calculate the content loss term for the generator model.

+
+
Parameters:
+
    +
  • hi_res_true (tf.Tensor) – Ground truth high resolution spatiotemporal data.

  • +
  • hi_res_gen (tf.Tensor) – Superresolved high resolution spatiotemporal data generated by the +generative model.

  • +
+
+
Returns:
+

loss_gen_s (tf.Tensor) – 0D tensor generator model loss for the content loss comparing the +hi res ground truth to the hi res synthetically generated output.

+
+
+
+ +
+
+static calc_loss_gen_advers(disc_out_gen)[source]#
+

Calculate the adversarial component of the loss term for the +generator model.

+
+
Parameters:
+

disc_out_gen (tf.Tensor) – Raw discriminator outputs from the discriminator model +predicting only on hi_res_gen (not on hi_res_true).

+
+
Returns:
+

loss_gen_advers (tf.Tensor) – 0D tensor generator model loss for the adversarial component of the +generator loss term.

+
+
+
+ +
+
+static calc_loss_disc(disc_out_true, disc_out_gen)[source]#
+

Calculate the loss term for the discriminator model (either the +spatial or temporal discriminator).

+
+
Parameters:
+
    +
  • disc_out_true (tf.Tensor) – Raw discriminator outputs from the discriminator model predicting +only on ground truth data hi_res_true (not on hi_res_gen).

  • +
  • disc_out_gen (tf.Tensor) – Raw discriminator outputs from the discriminator model predicting +only on synthetic data hi_res_gen (not on hi_res_true).

  • +
+
+
Returns:
+

loss_disc (tf.Tensor) – 0D tensor discriminator model loss for either the spatial or +temporal component of the super resolution generated output.

+
+
+
+ +
+
+calc_loss(hi_res_true, hi_res_gen, weight_gen_advers=0.001, train_gen=True, train_disc=False)[source]#
+

Calculate the GAN loss function using generated and true high +resolution data.

+
+
Parameters:
+
    +
  • hi_res_true (tf.Tensor) – Ground truth high resolution spatiotemporal data.

  • +
  • hi_res_gen (tf.Tensor) – Superresolved high resolution spatiotemporal data generated by the +generative model.

  • +
  • weight_gen_advers (float) – Weight factor for the adversarial loss component of the generator +vs. the discriminator.

  • +
  • train_gen (bool) – True if generator is being trained, then loss=loss_gen

  • +
  • train_disc (bool) – True if disc is being trained, then loss=loss_disc

  • +
+
+
Returns:
+

    +
  • loss (tf.Tensor) – 0D tensor representing the loss value for the network being trained +(either generator or one of the discriminators)

  • +
  • loss_details (dict) – Namespace of the breakdown of loss components

  • +
+

+
+
+
+ +
+
+calc_val_loss(batch_handler, weight_gen_advers, loss_details)[source]#
+

Calculate the validation loss at the current state of model training

+
+
Parameters:
+
    +
  • batch_handler (sup3r.preprocessing.BatchHandler) – BatchHandler object to iterate through

  • +
  • weight_gen_advers (float) – Weight factor for the adversarial loss component of the generator +vs. the discriminator.

  • +
  • loss_details (dict) – Namespace of the breakdown of loss components

  • +
+
+
Returns:
+

loss_details (dict) – Same as input but now includes val_* loss info

+
+
+
+ +
+
+train_epoch(batch_handler, weight_gen_advers, train_gen, train_disc, disc_loss_bounds, multi_gpu=False)[source]#
+

Train the GAN for one epoch.

+
+
Parameters:
+
    +
  • batch_handler (sup3r.preprocessing.BatchHandler) – BatchHandler object to iterate through

  • +
  • weight_gen_advers (float) – Weight factor for the adversarial loss component of the generator +vs. the discriminator.

  • +
  • train_gen (bool) – Flag whether to train the generator for this set of epochs

  • +
  • train_disc (bool) – Flag whether to train the discriminator for this set of epochs

  • +
  • disc_loss_bounds (tuple) – Lower and upper bounds for the discriminator loss outside of which +the discriminators will not train unless train_disc=True or +and train_gen=False.

  • +
  • multi_gpu (bool) – Flag to break up the batch for parallel gradient descent +calculations on multiple gpus. If True and multiple GPUs are +present, each batch from the batch_handler will be divided up +between the GPUs and the resulting gradient from each GPU will +constitute a single gradient descent step with the nominal learning +rate that the model was initialized with. If true and multiple gpus +are found, default_device device should be set to /cpu:0

  • +
+
+
Returns:
+

loss_details (dict) – Namespace of the breakdown of loss components

+
+
+
+ +
+
+update_adversarial_weights(history, adaptive_update_fraction, adaptive_update_bounds, weight_gen_advers, train_disc)[source]#
+

Update spatial / temporal adversarial loss weights based on training +fraction history.

+
+
Parameters:
+
    +
  • history (dict) – Dictionary with information on how often discriminators +were trained during current and previous epochs.

  • +
  • adaptive_update_fraction (float) – Amount by which to increase or decrease adversarial loss weights +for adaptive updates

  • +
  • adaptive_update_bounds (tuple) – Tuple specifying allowed range for history[comparison_key]. If +history[comparison_key] < update_bounds[0] then the weight will be +increased by (1 + update_frac). If history[comparison_key] > +update_bounds[1] then the weight will be decreased by 1 / (1 + +update_frac).

  • +
  • weight_gen_advers (float) – Weight factor for the adversarial loss component of the generator +vs. the discriminator.

  • +
  • train_disc (bool) – Whether the discriminator was set to be trained during the +previous epoch

  • +
+
+
Returns:
+

weight_gen_advers (float) – Updated weight factor for the adversarial loss component of the +generator vs. the discriminator.

+
+
+
+ +
+
+static check_batch_handler_attrs(batch_handler)[source]#
+

Not all batch handlers have the following attributes. So we perform +some sanitation before sending to set_model_params

+
+ +
+
+dict_to_tensorboard(entry)#
+

Write data to tensorboard log file. This is usually a loss_details +dictionary.

+
+
Parameters:
+

entry (dict) – Dictionary of values to write to tensorboard log file

+
+
+
+ +
+
+static early_stop(history, column, threshold=0.005, n_epoch=5)#
+

Determine whether to stop training early based on nearly no change +to validation loss for a certain number of consecutive epochs.

+
+
Parameters:
+
    +
  • history (pd.DataFrame | None) – Model training history

  • +
  • column (str) – Column from the model training history to evaluate for early +termination.

  • +
  • threshold (float) – The absolute relative fractional difference in validation loss +between subsequent epochs below which an early termination is +warranted. E.g. if val losses were 0.1 and 0.0998 the relative +diff would be calculated as 0.0002 / 0.1 = 0.002 which would be +less than the default thresold of 0.01 and would satisfy the +condition for early termination.

  • +
  • n_epoch (int) – The number of consecutive epochs that satisfy the threshold that +warrants an early stop.

  • +
+
+
Returns:
+

stop (bool) – Flag to stop training (True) or keep going (False).

+
+
+
+ +
+
+finish_epoch(epoch, epochs, t0, loss_details, checkpoint_int, out_dir, early_stop_on, early_stop_threshold, early_stop_n_epoch, extras=None)#
+

Perform finishing checks after an epoch is done training

+
+
Parameters:
+
    +
  • epoch (int) – Epoch number that is finishing

  • +
  • epochs (list) – List of epochs being iterated through

  • +
  • t0 (float) – Starting time of training.

  • +
  • loss_details (dict) – Namespace of the breakdown of loss components

  • +
  • checkpoint_int (int | None) – Epoch interval at which to save checkpoint models.

  • +
  • out_dir (str) – Directory to save checkpoint models. Should have {epoch} in +the directory name. This directory will be created if it does not +already exist.

  • +
  • early_stop_on (str | None) – If not None, this should be a column in the training history to +evaluate for early stopping (e.g. validation_loss_gen, +validation_loss_disc). If this value in this history decreases by +an absolute fractional relative difference of less than 0.01 for +more than 5 epochs in a row, the training will stop early.

  • +
  • early_stop_threshold (float) – The absolute relative fractional difference in validation loss +between subsequent epochs below which an early termination is +warranted. E.g. if val losses were 0.1 and 0.0998 the relative +diff would be calculated as 0.0002 / 0.1 = 0.002 which would be +less than the default thresold of 0.01 and would satisfy the +condition for early termination.

  • +
  • early_stop_n_epoch (int) – The number of consecutive epochs that satisfy the threshold that +warrants an early stop.

  • +
  • extras (dict | None) – Extra kwargs/parameters to save in the epoch history.

  • +
+
+
Returns:
+

stop (bool) – Flag to early stop training.

+
+
+
+ +
+
+generate(low_res, norm_in=True, un_norm_out=True, exogenous_data=None)#
+

Use the generator model to generate high res data from low res +input. This is the public generate function.

+
+
Parameters:
+
    +
  • low_res (np.ndarray) – Low-resolution input data, usually a 4D or 5D array of shape: +(n_obs, spatial_1, spatial_2, n_features) +(n_obs, spatial_1, spatial_2, n_temporal, n_features)

  • +
  • norm_in (bool) – Flag to normalize low_res input data if the self._means, +self._stdevs attributes are available. The generator should always +received normalized data with mean=0 stdev=1. This also normalizes +hi_res_topo.

  • +
  • un_norm_out (bool) – Flag to un-normalize synthetically generated output data to physical +units

  • +
  • exogenous_data (dict | ExoData | None) – Special dictionary (class:ExoData) of exogenous feature data with +entries describing whether features should be combined at input, a +mid network layer, or with output. This doesn’t have to include +the ‘model’ key since this data is for a single step model.

  • +
+
+
Returns:
+

hi_res (ndarray) – Synthetically generated high-resolution data, usually a 4D or 5D +array with shape: +(n_obs, spatial_1, spatial_2, n_features) +(n_obs, spatial_1, spatial_2, n_temporal, n_features)

+
+
+
+ +
+
+property generator#
+

Get the generative model.

+
+
Returns:
+

phygnn.base.CustomNetwork

+
+
+
+ +
+
+property generator_weights#
+

Get a list of layer weights and bias terms for the generator model.

+
+
Returns:
+

list

+
+
+
+ +
+
+get_high_res_exo_input(high_res)#
+

Get exogenous feature data from high_res

+
+
Parameters:
+

high_res (tf.Tensor) – Ground truth high resolution spatiotemporal data.

+
+
Returns:
+

exo_data (dict) – Dictionary of exogenous feature data used as input to tf_generate. +e.g. {'topography': tf.Tensor(...)}

+
+
+
+ +
+
+static get_loss_fun(loss)#
+

Get the initialized loss function class from the sup3r loss library +or the tensorflow losses.

+
+
Parameters:
+

loss (str | dict) – Loss function class name from sup3r.utilities.loss_metrics +(prioritized) or tensorflow.keras.losses. Defaults to +tf.keras.losses.MeanSquaredError. This can be provided as a dict +with kwargs for loss functions with extra parameters. +e.g. {‘SpatialExtremesLoss’: {‘weight’: 0.5}}

+
+
Returns:
+

out (tf.keras.losses.Loss) – Initialized loss function class that is callable, e.g. if +“MeanSquaredError” is requested, this will return +an instance of tf.keras.losses.MeanSquaredError()

+
+
+
+ +
+
+static get_optimizer_config(optimizer)#
+

Get a config that defines the current model optimizer

+
+
Parameters:
+

optimizer (tf.keras.optimizers.Optimizer) – TF-Keras optimizer object (e.g., Adam)

+
+
Returns:
+

config (dict) – Optimizer config

+
+
+
+ +
+
+classmethod get_optimizer_state(optimizer)#
+

Get a set of state variables for the optimizer

+
+
Parameters:
+

optimizer (tf.keras.optimizers.Optimizer) – TF-Keras optimizer object (e.g., Adam)

+
+
Returns:
+

state (dict) – Optimizer state variables

+
+
+
+ +
+
+get_s_enhance_from_layers()#
+

Compute factor by which model will enhance spatial resolution from +layer attributes. Used in model training during high res coarsening

+
+ +
+
+get_single_grad(low_res, hi_res_true, training_weights, device_name=None, **calc_loss_kwargs)#
+

Run gradient descent for one mini-batch of (low_res, hi_res_true), +do not update weights, just return gradient details.

+
+
Parameters:
+
    +
  • low_res (np.ndarray) – Real low-resolution data in a 4D or 5D array: +(n_observations, spatial_1, spatial_2, features) +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
  • hi_res_true (np.ndarray) – Real high-resolution data in a 4D or 5D array: +(n_observations, spatial_1, spatial_2, features) +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
  • training_weights (list) – A list of layer weights that are to-be-trained based on the +current loss weight values.

  • +
  • device_name (None | str) – Optional tensorflow device name for GPU placement. Note that if a +GPU is available, variables will be placed on that GPU even if +device_name=None.

  • +
  • calc_loss_kwargs (dict) – Kwargs to pass to the self.calc_loss() method

  • +
+
+
Returns:
+

    +
  • grad (list) – a list or nested structure of Tensors (or IndexedSlices, or None, +or CompositeTensor) representing the gradients for the +training_weights

  • +
  • loss_details (dict) – Namespace of the breakdown of loss components

  • +
+

+
+
+
+ +
+
+get_t_enhance_from_layers()#
+

Compute factor by which model will enhance temporal resolution from +layer attributes. Used in model training during high res coarsening

+
+ +
+
+property history#
+

Model training history DataFrame (None if not yet trained)

+
+
Returns:
+

pandas.DataFrame | None

+
+
+
+ +
+
+property hr_exo_features#
+

Get list of high-resolution exogenous filter names the model uses. +If the model has N concat or add layers this list will be the last N +features in the training features list. The ordering is assumed to be +the same as the order of concat or add layers. If training features is +[…, topo, sza], and the model has 2 concat or add layers, exo +features will be [topo, sza]. Topo will then be used in the first +concat layer and sza will be used in the second

+
+ +
+
+property hr_out_features#
+

Get the list of high-resolution output feature names that the +generative model outputs.

+
+ +
+
+static init_optimizer(optimizer, learning_rate)#
+

Initialize keras optimizer object.

+
+
Parameters:
+
    +
  • optimizer (tf.keras.optimizers.Optimizer | dict | None | str) – Instantiated tf.keras.optimizers object or a dict optimizer config +from tf.keras.optimizers.get_config(). None defaults to Adam.

  • +
  • learning_rate (float, optional) – Optimizer learning rate. Not used if optimizer input arg is a +pre-initialized object or if optimizer input arg is a config dict.

  • +
+
+
Returns:
+

optimizer (tf.keras.optimizers.Optimizer) – Initialized optimizer object.

+
+
+
+ +
+
+property input_dims#
+

Get dimension of model generator input. This is usually 4D for +spatial models and 5D for spatiotemporal models. This gives the input +to the first step if the model is multi-step. Returns 5 for linear +models.

+
+
Returns:
+

int

+
+
+
+ +
+
+property input_resolution#
+

Resolution of input data. Given as a dictionary +{'spatial': '...km', 'temporal': '...min'}. The numbers are +required to be integers in the units specified. The units are not +strict as long as the resolution of the exogenous data, when extracting +exogenous data, is specified in the same units.

+
+ +
+
+property is_4d#
+

Check if model expects spatial only input

+
+ +
+
+property is_5d#
+

Check if model expects spatiotemporal input

+
+ +
+
+load_network(model, name)#
+

Load a CustomNetwork object from hidden layers config, .json file +config, or .pkl file saved pre-trained model.

+
+
Parameters:
+
    +
  • model (str | dict) – Model hidden layers config, a .json with “hidden_layers” key, or a +.pkl for a saved pre-trained model.

  • +
  • name (str) – Name of the model to be loaded

  • +
+
+
Returns:
+

model (phygnn.CustomNetwork) – CustomNetwork object initialized from the model input.

+
+
+
+ +
+
+static load_saved_params(out_dir, verbose=True)#
+

Load saved model_params (you need this and the gen+disc models +to load a full model).

+
+
Parameters:
+
    +
  • out_dir (str) – Directory to load model files from.

  • +
  • verbose (bool) – Flag to log information about the loaded model.

  • +
+
+
Returns:
+

params (dict) – Model parameters loaded from disk json file. This should be the +same as self.model_params with and additional ‘history’ entry. +Should be all the kwargs you need to init a model.

+
+
+
+ +
+
+static log_loss_details(loss_details, level='INFO')#
+

Log the loss details to the module logger.

+
+
Parameters:
+
    +
  • loss_details (dict) – Namespace of the breakdown of loss components where each value is a +running average at the current state in the epoch.

  • +
  • level (str) – Log level (e.g. INFO, DEBUG)

  • +
+
+
+
+ +
+
+property lr_features#
+

Get a list of low-resolution features input to the generative model. +This includes low-resolution features that might be supplied +exogenously at inference time but that were in the low-res batches +during training

+
+ +
+
+property means#
+

Get the data normalization mean values.

+
+
Returns:
+

np.ndarray

+
+
+
+ +
+
+norm_input(low_res)#
+

Normalize low resolution data being input to the generator.

+
+
Parameters:
+

low_res (np.ndarray) – Un-normalized low-resolution input data in physical units, usually +a 4D or 5D array of shape: +(n_obs, spatial_1, spatial_2, n_features) +(n_obs, spatial_1, spatial_2, n_temporal, n_features)

+
+
Returns:
+

low_res (np.ndarray) – Normalized low-resolution input data, usually a 4D or 5D array of +shape: +(n_obs, spatial_1, spatial_2, n_features) +(n_obs, spatial_1, spatial_2, n_temporal, n_features)

+
+
+
+ +
+
+property optimizer#
+

Get the tensorflow optimizer to perform gradient descent +calculations for the generative network. This is functionally identical +to optimizer_disc is no special optimizer model or learning rate was +specified for the disc.

+
+
Returns:
+

tf.keras.optimizers.Optimizer

+
+
+
+ +
+
+property output_resolution#
+

Resolution of output data. Given as a dictionary +{‘spatial’: ‘…km’, ‘temporal’: ‘…min’}. This is computed from the +input resolution and the enhancement factors.

+
+ +
+
+profile_to_tensorboard(name)#
+

Write profile data to tensorboard log file.

+
+
Parameters:
+

name (str) – Tag name to use for profile info

+
+
+
+ +
+
+run_gradient_descent(low_res, hi_res_true, training_weights, optimizer=None, multi_gpu=False, **calc_loss_kwargs)#
+

Run gradient descent for one mini-batch of (low_res, hi_res_true) +and update weights

+
+
Parameters:
+
    +
  • low_res (np.ndarray) – Real low-resolution data in a 4D or 5D array: +(n_observations, spatial_1, spatial_2, features) +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
  • hi_res_true (np.ndarray) – Real high-resolution data in a 4D or 5D array: +(n_observations, spatial_1, spatial_2, features) +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
  • training_weights (list) – A list of layer weights that are to-be-trained based on the +current loss weight values.

  • +
  • optimizer (tf.keras.optimizers.Optimizer) – Optimizer class to use to update weights. This can be different if +you’re training just the generator or one of the discriminator +models. Defaults to the generator optimizer.

  • +
  • multi_gpu (bool) – Flag to break up the batch for parallel gradient descent +calculations on multiple gpus. If True and multiple GPUs are +present, each batch from the batch_handler will be divided up +between the GPUs and the resulting gradient from each GPU will +constitute a single gradient descent step with the nominal learning +rate that the model was initialized with.

  • +
  • calc_loss_kwargs (dict) – Kwargs to pass to the self.calc_loss() method

  • +
+
+
Returns:
+

loss_details (dict) – Namespace of the breakdown of loss components

+
+
+
+ +
+
+property s_enhance#
+

Factor by which model will enhance spatial resolution. Used in +model training during high res coarsening and also in forward pass +routine to determine shape of needed exogenous data

+
+ +
+
+property s_enhancements#
+

List of spatial enhancement factors. In the case of a single step +model this is just [self.s_enhance]. This is used to determine +shapes of needed exogenous data in forward pass routine

+
+ +
+
+save_params(out_dir)#
+
+
Parameters:
+

out_dir (str) – Directory to save linear model params. This directory will be +created if it does not already exist.

+
+
+
+ +
+
+static seed(s=0)#
+

Set the random seed for reproducible results.

+
+
Parameters:
+

s (int) – Random seed

+
+
+
+ +
+
+set_model_params(**kwargs)#
+

Set parameters used for training the model

+
+
Parameters:
+

kwargs (dict) – Keyword arguments including ‘input_resolution’, +‘lr_features’, ‘hr_exo_features’, ‘hr_out_features’, +‘smoothed_features’, ‘s_enhance’, ‘t_enhance’, ‘smoothing’

+
+
+
+ +
+
+set_norm_stats(new_means, new_stdevs)#
+

Set the normalization statistics associated with a data batch +handler to model attributes.

+
+
Parameters:
+
    +
  • new_means (dict | None) – Set of mean values for data normalization keyed by feature name. +Can be used to maintain a consistent normalization scheme between +transfer learning domains.

  • +
  • new_stdevs (dict | None) – Set of stdev values for data normalization keyed by feature name. +Can be used to maintain a consistent normalization scheme between +transfer learning domains.

  • +
+
+
+
+ +
+
+property smoothed_features#
+

Get the list of smoothed input feature names that the generative +model was trained on.

+
+ +
+
+property smoothing#
+

Value of smoothing parameter used in gaussian filtering of coarsened +high res data.

+
+ +
+
+property stdevs#
+

Get the data normalization standard deviation values.

+
+
Returns:
+

np.ndarray

+
+
+
+ +
+
+property t_enhance#
+

Factor by which model will enhance temporal resolution. Used in +model training during high res coarsening and also in forward pass +routine to determine shape of needed exogenous data

+
+ +
+
+property t_enhancements#
+

List of temporal enhancement factors. In the case of a single step +model this is just [self.t_enhance]. This is used to determine +shapes of needed exogenous data in forward pass routine

+
+ +
+
+property total_batches#
+

Record of total number of batches for logging.

+
+ +
+
+un_norm_output(output)#
+

Un-normalize synthetically generated output data to physical units

+
+
Parameters:
+

output (tf.Tensor | np.ndarray) – Synthetically generated high-resolution data

+
+
Returns:
+

output (np.ndarray) – Synthetically generated high-resolution data

+
+
+
+ +
+
+static update_loss_details(loss_details, new_data, batch_len, prefix=None)#
+

Update a dictionary of loss_details with loss information from a new +batch.

+
+
Parameters:
+
    +
  • loss_details (dict) – Namespace of the breakdown of loss components where each value is a +running average at the current state in the epoch.

  • +
  • new_data (dict) – Namespace of the breakdown of loss components for a single new +batch.

  • +
  • batch_len (int) – Length of the incoming batch.

  • +
  • prefix (None | str) – Option to prefix the names of the loss data when saving to the +loss_details dictionary.

  • +
+
+
Returns:
+

loss_details (dict) – Same as input loss_details but with running averages updated.

+
+
+
+ +
+
+property version_record#
+

A record of important versions that this model was built with.

+
+
Returns:
+

dict

+
+
+
+ +
+
+train(batch_handler, input_resolution, n_epoch, weight_gen_advers=0.001, train_gen=True, train_disc=True, disc_loss_bounds=(0.45, 0.6), checkpoint_int=None, out_dir='./gan_{epoch}', early_stop_on=None, early_stop_threshold=0.005, early_stop_n_epoch=5, adaptive_update_bounds=(0.9, 0.99), adaptive_update_fraction=0.0, multi_gpu=False, tensorboard_log=True, tensorboard_profile=False)[source]#
+

Train the GAN model on real low res data and real high res data

+
+
Parameters:
+
    +
  • batch_handler (sup3r.preprocessing.BatchHandler) – BatchHandler object to iterate through

  • +
  • input_resolution (dict) – Dictionary specifying spatiotemporal input resolution. e.g. +{‘temporal’: ‘60min’, ‘spatial’: ‘30km’}

  • +
  • n_epoch (int) – Number of epochs to train on

  • +
  • weight_gen_advers (float) – Weight factor for the adversarial loss component of the generator +vs. the discriminator.

  • +
  • train_gen (bool) – Flag whether to train the generator for this set of epochs

  • +
  • train_disc (bool) – Flag whether to train the discriminator for this set of epochs

  • +
  • disc_loss_bounds (tuple) – Lower and upper bounds for the discriminator loss outside of which +the discriminator will not train unless train_disc=True and +train_gen=False.

  • +
  • checkpoint_int (int | None) – Epoch interval at which to save checkpoint models.

  • +
  • out_dir (str) – Directory to save checkpoint GAN models. Should have {epoch} in +the directory name. This directory will be created if it does not +already exist.

  • +
  • early_stop_on (str | None) – If not None, this should be a column in the training history to +evaluate for early stopping (e.g. validation_loss_gen, +validation_loss_disc). If this value in this history decreases by +an absolute fractional relative difference of less than 0.01 for +more than 5 epochs in a row, the training will stop early.

  • +
  • early_stop_threshold (float) – The absolute relative fractional difference in validation loss +between subsequent epochs below which an early termination is +warranted. E.g. if val losses were 0.1 and 0.0998 the relative +diff would be calculated as 0.0002 / 0.1 = 0.002 which would be +less than the default thresold of 0.01 and would satisfy the +condition for early termination.

  • +
  • early_stop_n_epoch (int) – The number of consecutive epochs that satisfy the threshold that +warrants an early stop.

  • +
  • adaptive_update_bounds (tuple) – Tuple specifying allowed range for loss_details[comparison_key]. If +history[comparison_key] < threshold_range[0] then the weight will +be increased by (1 + update_frac). If history[comparison_key] > +threshold_range[1] then the weight will be decreased by 1 / (1 + +update_frac).

  • +
  • adaptive_update_fraction (float) – Amount by which to increase or decrease adversarial weights for +adaptive updates

  • +
  • multi_gpu (bool) – Flag to break up the batch for parallel gradient descent +calculations on multiple gpus. If True and multiple GPUs are +present, each batch from the batch_handler will be divided up +between the GPUs and the resulting gradient from each GPU will +constitute a single gradient descent step with the nominal learning +rate that the model was initialized with. If true and multiple gpus +are found, default_device device should be set to /cpu:0

  • +
  • tensorboard_log (bool) – Whether to write log file for use with tensorboard. Log data can +be viewed with tensorboard --logdir <logdir> where <logdir> +is the parent directory of out_dir, and pointing the browser to +the printed address.

  • +
  • tensorboard_profile (bool) – Whether to export profiling information to tensorboard. This can +then be viewed in the tensorboard dashboard under the profile tab

  • +
  • TODO ((1) args here are getting excessive. Might be time for some)

  • +
  • refactoring.

  • +
  • (2) cal_val_loss should be done in a separate thread from train_epoch

  • +
  • so they can be done concurrently. This would be especially important

  • +
  • for batch handlers which require val data, like dc handlers.

  • +
  • (3) Would like an automatic way to exit the batch handler thread

  • +
  • instead of manually calling .stop() here.

  • +
+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.models.base.html b/_autosummary/sup3r.models.base.html new file mode 100644 index 000000000..8be00cc3f --- /dev/null +++ b/_autosummary/sup3r.models.base.html @@ -0,0 +1,739 @@ + + + + + + + + + + + sup3r.models.base — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.models.base

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.models.base#

+

Sup3r model software

+

Classes

+
+ + + + + +

Sup3rGan(gen_layers, disc_layers[, loss, ...])

Basic sup3r GAN model.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.models.conditional.Sup3rCondMom.html b/_autosummary/sup3r.models.conditional.Sup3rCondMom.html new file mode 100644 index 000000000..02bfdd98f --- /dev/null +++ b/_autosummary/sup3r.models.conditional.Sup3rCondMom.html @@ -0,0 +1,1936 @@ + + + + + + + + + + + sup3r.models.conditional.Sup3rCondMom — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.models.conditional.Sup3rCondMom

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.models.conditional.Sup3rCondMom#

+
+
+class Sup3rCondMom(gen_layers, optimizer=None, learning_rate=0.0001, num_par=None, history=None, meta=None, means=None, stdevs=None, default_device=None, name=None)[source]#
+

Bases: AbstractSingleModel, AbstractInterface

+

Basic Sup3r conditional moments model.

+
+
Parameters:
+
    +
  • gen_layers (list | str) – Hidden layers input argument to phygnn.base.CustomNetwork for the +generative super resolving model. Can also be a str filepath to a +.json config file containing the input layers argument or a .pkl +for a saved pre-trained model.

  • +
  • optimizer (tf.keras.optimizers.Optimizer | dict | None | str) – Instantiated tf.keras.optimizers object or a dict optimizer config +from tf.keras.optimizers.get_config(). None defaults to Adam.

  • +
  • learning_rate (float, optional) – Optimizer learning rate. Not used if optimizer input arg is a +pre-initialized object or if optimizer input arg is a config dict.

  • +
  • num_par (int | None) – Number of trainable parameters in the model

  • +
  • history (pd.DataFrame | str | None) – Model training history with “epoch” index, str pointing to a saved +history csv file with “epoch” as first column, or None for clean +history

  • +
  • meta (dict | None) – Model meta data that describes how the model was created.

  • +
  • means (dict | None) – Set of mean values for data normalization keyed by feature name. +Can be used to maintain a consistent normalization scheme between +transfer learning domains.

  • +
  • stdevs (dict | None) – Set of stdev values for data normalization keyed by feature name. +Can be used to maintain a consistent normalization scheme between +transfer learning domains.

  • +
  • default_device (str | None) – Option for default device placement of model weights. If None and a +single GPU exists, that GPU will be the default device. If None and +multiple GPUs exist, the CPU will be the default device (this was +tested as most efficient given the custom multi-gpu strategy +developed in self.run_gradient_descent())

  • +
  • name (str | None) – Optional name for the model.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

calc_loss(output_true, output_gen, mask)

Calculate the total moment predictor loss

calc_loss_cond_mom(output_true, output_gen, mask)

Calculate the loss of the moment predictor

calc_val_loss(batch_handler, loss_details)

Calculate the validation loss at the current state of model training

dict_to_tensorboard(entry)

Write data to tensorboard log file.

early_stop(history, column[, threshold, n_epoch])

Determine whether to stop training early based on nearly no change to validation loss for a certain number of consecutive epochs.

finish_epoch(epoch, epochs, t0, ...[, extras])

Perform finishing checks after an epoch is done training

generate(low_res[, norm_in, un_norm_out, ...])

Use the generator model to generate high res data from low res input.

get_high_res_exo_input(high_res)

Get exogenous feature data from high_res

get_loss_fun(loss)

Get the initialized loss function class from the sup3r loss library or the tensorflow losses.

get_optimizer_config(optimizer)

Get a config that defines the current model optimizer

get_optimizer_state(optimizer)

Get a set of state variables for the optimizer

get_s_enhance_from_layers()

Compute factor by which model will enhance spatial resolution from layer attributes.

get_single_grad(low_res, hi_res_true, ...[, ...])

Run gradient descent for one mini-batch of (low_res, hi_res_true), do not update weights, just return gradient details.

get_t_enhance_from_layers()

Compute factor by which model will enhance temporal resolution from layer attributes.

init_optimizer(optimizer, learning_rate)

Initialize keras optimizer object.

load(model_dir[, verbose])

Load the model with its sub-networks from a previously saved-to output directory.

load_network(model, name)

Load a CustomNetwork object from hidden layers config, .json file config, or .pkl file saved pre-trained model.

load_saved_params(out_dir[, verbose])

Load saved model_params (you need this and the gen+disc models to load a full model).

log_loss_details(loss_details[, level])

Log the loss details to the module logger.

norm_input(low_res)

Normalize low resolution data being input to the generator.

profile_to_tensorboard(name)

Write profile data to tensorboard log file.

run_gradient_descent(low_res, hi_res_true, ...)

Run gradient descent for one mini-batch of (low_res, hi_res_true) and update weights

save(out_dir)

Save the model with its sub-networks to a directory.

save_params(out_dir)

seed([s])

Set the random seed for reproducible results.

set_model_params(**kwargs)

Set parameters used for training the model

set_norm_stats(new_means, new_stdevs)

Set the normalization statistics associated with a data batch handler to model attributes.

train(batch_handler, input_resolution, n_epoch)

Train the model on real low res data and real high res data

train_epoch(batch_handler[, multi_gpu])

Train the model for one epoch.

un_norm_output(output)

Un-normalize synthetically generated output data to physical units

update_loss_details(loss_details, new_data, ...)

Update a dictionary of loss_details with loss information from a new batch.

update_optimizer(**kwargs)

Update optimizer by changing current configuration

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

generator

Get the generative model.

generator_weights

Get a list of layer weights and bias terms for the generator model.

history

Model training history DataFrame (None if not yet trained)

hr_exo_features

Get list of high-resolution exogenous filter names the model uses.

hr_out_features

Get the list of high-resolution output feature names that the generative model outputs.

input_dims

Get dimension of model generator input.

input_resolution

Resolution of input data.

is_4d

Check if model expects spatial only input

is_5d

Check if model expects spatiotemporal input

lr_features

Get a list of low-resolution features input to the generative model.

means

Get the data normalization mean values.

meta

Get meta data dictionary that defines how the model was created

model_params

Model parameters, used to save model to disc

optimizer

Get the tensorflow optimizer to perform gradient descent calculations for the generative network.

output_resolution

Resolution of output data.

s_enhance

Factor by which model will enhance spatial resolution.

s_enhancements

List of spatial enhancement factors.

smoothed_features

Get the list of smoothed input feature names that the generative model was trained on.

smoothing

Value of smoothing parameter used in gaussian filtering of coarsened high res data.

stdevs

Get the data normalization standard deviation values.

t_enhance

Factor by which model will enhance temporal resolution.

t_enhancements

List of temporal enhancement factors.

total_batches

Record of total number of batches for logging.

version_record

A record of important versions that this model was built with.

weights

Get a list of all the layer weights and bias terms for the generator network

+
+
+
+save(out_dir)[source]#
+

Save the model with its sub-networks to a directory.

+
+
Parameters:
+

out_dir (str) – Directory to save model files. This directory will be created +if it does not already exist.

+
+
+
+ +
+
+classmethod load(model_dir, verbose=True)[source]#
+

Load the model with its sub-networks from a previously saved-to +output directory.

+
+
Parameters:
+
    +
  • model_dir (str) – Directory to load model files from.

  • +
  • verbose (bool) – Flag to log information about the loaded model.

  • +
+
+
Returns:
+

out (BaseModel) – Returns a pretrained gan model that was previously saved to out_dir

+
+
+
+ +
+
+update_optimizer(**kwargs)[source]#
+

Update optimizer by changing current configuration

+
+
Parameters:
+

kwargs (dict) – kwargs to use for optimizer configuration update

+
+
+
+ +
+
+property meta#
+

Get meta data dictionary that defines how the model was created

+
+ +
+
+property model_params#
+

Model parameters, used to save model to disc

+
+
Returns:
+

dict

+
+
+
+ +
+
+property weights#
+

Get a list of all the layer weights and bias terms for the +generator network

+
+ +
+
+calc_loss_cond_mom(output_true, output_gen, mask)[source]#
+

Calculate the loss of the moment predictor

+
+
Parameters:
+
    +
  • output_true (tf.Tensor) – True realization output

  • +
  • output_gen (tf.Tensor) – Predicted realization output

  • +
  • mask (tf.Tensor) – Mask to apply

  • +
+
+
Returns:
+

loss (tf.Tensor) – 0D tensor generator model loss for the MSE loss of the +moment predictor

+
+
+
+ +
+
+calc_loss(output_true, output_gen, mask)[source]#
+

Calculate the total moment predictor loss

+
+
Parameters:
+
    +
  • output_true (tf.Tensor) – True realization output

  • +
  • output_gen (tf.Tensor) – Predicted realization output

  • +
  • mask (tf.Tensor) – Mask to apply

  • +
+
+
Returns:
+

    +
  • loss (tf.Tensor) – 0D tensor representing the loss value for the +moment predictor

  • +
  • loss_details (dict) – Namespace of the breakdown of loss components

  • +
+

+
+
+
+ +
+
+calc_val_loss(batch_handler, loss_details)[source]#
+

Calculate the validation loss at the current state of model training

+
+
Parameters:
+
    +
  • batch_handler (sup3r.preprocessing.BatchHandler) – BatchHandler object to iterate through

  • +
  • loss_details (dict) – Namespace of the breakdown of loss components

  • +
+
+
Returns:
+

loss_details (dict) – Same as input but now includes val_* loss info

+
+
+
+ +
+
+dict_to_tensorboard(entry)#
+

Write data to tensorboard log file. This is usually a loss_details +dictionary.

+
+
Parameters:
+

entry (dict) – Dictionary of values to write to tensorboard log file

+
+
+
+ +
+
+static early_stop(history, column, threshold=0.005, n_epoch=5)#
+

Determine whether to stop training early based on nearly no change +to validation loss for a certain number of consecutive epochs.

+
+
Parameters:
+
    +
  • history (pd.DataFrame | None) – Model training history

  • +
  • column (str) – Column from the model training history to evaluate for early +termination.

  • +
  • threshold (float) – The absolute relative fractional difference in validation loss +between subsequent epochs below which an early termination is +warranted. E.g. if val losses were 0.1 and 0.0998 the relative +diff would be calculated as 0.0002 / 0.1 = 0.002 which would be +less than the default thresold of 0.01 and would satisfy the +condition for early termination.

  • +
  • n_epoch (int) – The number of consecutive epochs that satisfy the threshold that +warrants an early stop.

  • +
+
+
Returns:
+

stop (bool) – Flag to stop training (True) or keep going (False).

+
+
+
+ +
+
+finish_epoch(epoch, epochs, t0, loss_details, checkpoint_int, out_dir, early_stop_on, early_stop_threshold, early_stop_n_epoch, extras=None)#
+

Perform finishing checks after an epoch is done training

+
+
Parameters:
+
    +
  • epoch (int) – Epoch number that is finishing

  • +
  • epochs (list) – List of epochs being iterated through

  • +
  • t0 (float) – Starting time of training.

  • +
  • loss_details (dict) – Namespace of the breakdown of loss components

  • +
  • checkpoint_int (int | None) – Epoch interval at which to save checkpoint models.

  • +
  • out_dir (str) – Directory to save checkpoint models. Should have {epoch} in +the directory name. This directory will be created if it does not +already exist.

  • +
  • early_stop_on (str | None) – If not None, this should be a column in the training history to +evaluate for early stopping (e.g. validation_loss_gen, +validation_loss_disc). If this value in this history decreases by +an absolute fractional relative difference of less than 0.01 for +more than 5 epochs in a row, the training will stop early.

  • +
  • early_stop_threshold (float) – The absolute relative fractional difference in validation loss +between subsequent epochs below which an early termination is +warranted. E.g. if val losses were 0.1 and 0.0998 the relative +diff would be calculated as 0.0002 / 0.1 = 0.002 which would be +less than the default thresold of 0.01 and would satisfy the +condition for early termination.

  • +
  • early_stop_n_epoch (int) – The number of consecutive epochs that satisfy the threshold that +warrants an early stop.

  • +
  • extras (dict | None) – Extra kwargs/parameters to save in the epoch history.

  • +
+
+
Returns:
+

stop (bool) – Flag to early stop training.

+
+
+
+ +
+
+generate(low_res, norm_in=True, un_norm_out=True, exogenous_data=None)#
+

Use the generator model to generate high res data from low res +input. This is the public generate function.

+
+
Parameters:
+
    +
  • low_res (np.ndarray) – Low-resolution input data, usually a 4D or 5D array of shape: +(n_obs, spatial_1, spatial_2, n_features) +(n_obs, spatial_1, spatial_2, n_temporal, n_features)

  • +
  • norm_in (bool) – Flag to normalize low_res input data if the self._means, +self._stdevs attributes are available. The generator should always +received normalized data with mean=0 stdev=1. This also normalizes +hi_res_topo.

  • +
  • un_norm_out (bool) – Flag to un-normalize synthetically generated output data to physical +units

  • +
  • exogenous_data (dict | ExoData | None) – Special dictionary (class:ExoData) of exogenous feature data with +entries describing whether features should be combined at input, a +mid network layer, or with output. This doesn’t have to include +the ‘model’ key since this data is for a single step model.

  • +
+
+
Returns:
+

hi_res (ndarray) – Synthetically generated high-resolution data, usually a 4D or 5D +array with shape: +(n_obs, spatial_1, spatial_2, n_features) +(n_obs, spatial_1, spatial_2, n_temporal, n_features)

+
+
+
+ +
+
+property generator#
+

Get the generative model.

+
+
Returns:
+

phygnn.base.CustomNetwork

+
+
+
+ +
+
+property generator_weights#
+

Get a list of layer weights and bias terms for the generator model.

+
+
Returns:
+

list

+
+
+
+ +
+
+get_high_res_exo_input(high_res)#
+

Get exogenous feature data from high_res

+
+
Parameters:
+

high_res (tf.Tensor) – Ground truth high resolution spatiotemporal data.

+
+
Returns:
+

exo_data (dict) – Dictionary of exogenous feature data used as input to tf_generate. +e.g. {'topography': tf.Tensor(...)}

+
+
+
+ +
+
+static get_loss_fun(loss)#
+

Get the initialized loss function class from the sup3r loss library +or the tensorflow losses.

+
+
Parameters:
+

loss (str | dict) – Loss function class name from sup3r.utilities.loss_metrics +(prioritized) or tensorflow.keras.losses. Defaults to +tf.keras.losses.MeanSquaredError. This can be provided as a dict +with kwargs for loss functions with extra parameters. +e.g. {‘SpatialExtremesLoss’: {‘weight’: 0.5}}

+
+
Returns:
+

out (tf.keras.losses.Loss) – Initialized loss function class that is callable, e.g. if +“MeanSquaredError” is requested, this will return +an instance of tf.keras.losses.MeanSquaredError()

+
+
+
+ +
+
+static get_optimizer_config(optimizer)#
+

Get a config that defines the current model optimizer

+
+
Parameters:
+

optimizer (tf.keras.optimizers.Optimizer) – TF-Keras optimizer object (e.g., Adam)

+
+
Returns:
+

config (dict) – Optimizer config

+
+
+
+ +
+
+classmethod get_optimizer_state(optimizer)#
+

Get a set of state variables for the optimizer

+
+
Parameters:
+

optimizer (tf.keras.optimizers.Optimizer) – TF-Keras optimizer object (e.g., Adam)

+
+
Returns:
+

state (dict) – Optimizer state variables

+
+
+
+ +
+
+get_s_enhance_from_layers()#
+

Compute factor by which model will enhance spatial resolution from +layer attributes. Used in model training during high res coarsening

+
+ +
+
+get_single_grad(low_res, hi_res_true, training_weights, device_name=None, **calc_loss_kwargs)#
+

Run gradient descent for one mini-batch of (low_res, hi_res_true), +do not update weights, just return gradient details.

+
+
Parameters:
+
    +
  • low_res (np.ndarray) – Real low-resolution data in a 4D or 5D array: +(n_observations, spatial_1, spatial_2, features) +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
  • hi_res_true (np.ndarray) – Real high-resolution data in a 4D or 5D array: +(n_observations, spatial_1, spatial_2, features) +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
  • training_weights (list) – A list of layer weights that are to-be-trained based on the +current loss weight values.

  • +
  • device_name (None | str) – Optional tensorflow device name for GPU placement. Note that if a +GPU is available, variables will be placed on that GPU even if +device_name=None.

  • +
  • calc_loss_kwargs (dict) – Kwargs to pass to the self.calc_loss() method

  • +
+
+
Returns:
+

    +
  • grad (list) – a list or nested structure of Tensors (or IndexedSlices, or None, +or CompositeTensor) representing the gradients for the +training_weights

  • +
  • loss_details (dict) – Namespace of the breakdown of loss components

  • +
+

+
+
+
+ +
+
+get_t_enhance_from_layers()#
+

Compute factor by which model will enhance temporal resolution from +layer attributes. Used in model training during high res coarsening

+
+ +
+
+property history#
+

Model training history DataFrame (None if not yet trained)

+
+
Returns:
+

pandas.DataFrame | None

+
+
+
+ +
+
+property hr_exo_features#
+

Get list of high-resolution exogenous filter names the model uses. +If the model has N concat or add layers this list will be the last N +features in the training features list. The ordering is assumed to be +the same as the order of concat or add layers. If training features is +[…, topo, sza], and the model has 2 concat or add layers, exo +features will be [topo, sza]. Topo will then be used in the first +concat layer and sza will be used in the second

+
+ +
+
+property hr_out_features#
+

Get the list of high-resolution output feature names that the +generative model outputs.

+
+ +
+
+static init_optimizer(optimizer, learning_rate)#
+

Initialize keras optimizer object.

+
+
Parameters:
+
    +
  • optimizer (tf.keras.optimizers.Optimizer | dict | None | str) – Instantiated tf.keras.optimizers object or a dict optimizer config +from tf.keras.optimizers.get_config(). None defaults to Adam.

  • +
  • learning_rate (float, optional) – Optimizer learning rate. Not used if optimizer input arg is a +pre-initialized object or if optimizer input arg is a config dict.

  • +
+
+
Returns:
+

optimizer (tf.keras.optimizers.Optimizer) – Initialized optimizer object.

+
+
+
+ +
+
+property input_dims#
+

Get dimension of model generator input. This is usually 4D for +spatial models and 5D for spatiotemporal models. This gives the input +to the first step if the model is multi-step. Returns 5 for linear +models.

+
+
Returns:
+

int

+
+
+
+ +
+
+property input_resolution#
+

Resolution of input data. Given as a dictionary +{'spatial': '...km', 'temporal': '...min'}. The numbers are +required to be integers in the units specified. The units are not +strict as long as the resolution of the exogenous data, when extracting +exogenous data, is specified in the same units.

+
+ +
+
+property is_4d#
+

Check if model expects spatial only input

+
+ +
+
+property is_5d#
+

Check if model expects spatiotemporal input

+
+ +
+
+load_network(model, name)#
+

Load a CustomNetwork object from hidden layers config, .json file +config, or .pkl file saved pre-trained model.

+
+
Parameters:
+
    +
  • model (str | dict) – Model hidden layers config, a .json with “hidden_layers” key, or a +.pkl for a saved pre-trained model.

  • +
  • name (str) – Name of the model to be loaded

  • +
+
+
Returns:
+

model (phygnn.CustomNetwork) – CustomNetwork object initialized from the model input.

+
+
+
+ +
+
+static load_saved_params(out_dir, verbose=True)#
+

Load saved model_params (you need this and the gen+disc models +to load a full model).

+
+
Parameters:
+
    +
  • out_dir (str) – Directory to load model files from.

  • +
  • verbose (bool) – Flag to log information about the loaded model.

  • +
+
+
Returns:
+

params (dict) – Model parameters loaded from disk json file. This should be the +same as self.model_params with and additional ‘history’ entry. +Should be all the kwargs you need to init a model.

+
+
+
+ +
+
+static log_loss_details(loss_details, level='INFO')#
+

Log the loss details to the module logger.

+
+
Parameters:
+
    +
  • loss_details (dict) – Namespace of the breakdown of loss components where each value is a +running average at the current state in the epoch.

  • +
  • level (str) – Log level (e.g. INFO, DEBUG)

  • +
+
+
+
+ +
+
+property lr_features#
+

Get a list of low-resolution features input to the generative model. +This includes low-resolution features that might be supplied +exogenously at inference time but that were in the low-res batches +during training

+
+ +
+
+property means#
+

Get the data normalization mean values.

+
+
Returns:
+

np.ndarray

+
+
+
+ +
+
+norm_input(low_res)#
+

Normalize low resolution data being input to the generator.

+
+
Parameters:
+

low_res (np.ndarray) – Un-normalized low-resolution input data in physical units, usually +a 4D or 5D array of shape: +(n_obs, spatial_1, spatial_2, n_features) +(n_obs, spatial_1, spatial_2, n_temporal, n_features)

+
+
Returns:
+

low_res (np.ndarray) – Normalized low-resolution input data, usually a 4D or 5D array of +shape: +(n_obs, spatial_1, spatial_2, n_features) +(n_obs, spatial_1, spatial_2, n_temporal, n_features)

+
+
+
+ +
+
+property optimizer#
+

Get the tensorflow optimizer to perform gradient descent +calculations for the generative network. This is functionally identical +to optimizer_disc is no special optimizer model or learning rate was +specified for the disc.

+
+
Returns:
+

tf.keras.optimizers.Optimizer

+
+
+
+ +
+
+property output_resolution#
+

Resolution of output data. Given as a dictionary +{‘spatial’: ‘…km’, ‘temporal’: ‘…min’}. This is computed from the +input resolution and the enhancement factors.

+
+ +
+
+profile_to_tensorboard(name)#
+

Write profile data to tensorboard log file.

+
+
Parameters:
+

name (str) – Tag name to use for profile info

+
+
+
+ +
+
+run_gradient_descent(low_res, hi_res_true, training_weights, optimizer=None, multi_gpu=False, **calc_loss_kwargs)#
+

Run gradient descent for one mini-batch of (low_res, hi_res_true) +and update weights

+
+
Parameters:
+
    +
  • low_res (np.ndarray) – Real low-resolution data in a 4D or 5D array: +(n_observations, spatial_1, spatial_2, features) +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
  • hi_res_true (np.ndarray) – Real high-resolution data in a 4D or 5D array: +(n_observations, spatial_1, spatial_2, features) +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
  • training_weights (list) – A list of layer weights that are to-be-trained based on the +current loss weight values.

  • +
  • optimizer (tf.keras.optimizers.Optimizer) – Optimizer class to use to update weights. This can be different if +you’re training just the generator or one of the discriminator +models. Defaults to the generator optimizer.

  • +
  • multi_gpu (bool) – Flag to break up the batch for parallel gradient descent +calculations on multiple gpus. If True and multiple GPUs are +present, each batch from the batch_handler will be divided up +between the GPUs and the resulting gradient from each GPU will +constitute a single gradient descent step with the nominal learning +rate that the model was initialized with.

  • +
  • calc_loss_kwargs (dict) – Kwargs to pass to the self.calc_loss() method

  • +
+
+
Returns:
+

loss_details (dict) – Namespace of the breakdown of loss components

+
+
+
+ +
+
+property s_enhance#
+

Factor by which model will enhance spatial resolution. Used in +model training during high res coarsening and also in forward pass +routine to determine shape of needed exogenous data

+
+ +
+
+property s_enhancements#
+

List of spatial enhancement factors. In the case of a single step +model this is just [self.s_enhance]. This is used to determine +shapes of needed exogenous data in forward pass routine

+
+ +
+
+save_params(out_dir)#
+
+
Parameters:
+

out_dir (str) – Directory to save linear model params. This directory will be +created if it does not already exist.

+
+
+
+ +
+
+static seed(s=0)#
+

Set the random seed for reproducible results.

+
+
Parameters:
+

s (int) – Random seed

+
+
+
+ +
+
+set_model_params(**kwargs)#
+

Set parameters used for training the model

+
+
Parameters:
+

kwargs (dict) – Keyword arguments including ‘input_resolution’, +‘lr_features’, ‘hr_exo_features’, ‘hr_out_features’, +‘smoothed_features’, ‘s_enhance’, ‘t_enhance’, ‘smoothing’

+
+
+
+ +
+
+set_norm_stats(new_means, new_stdevs)#
+

Set the normalization statistics associated with a data batch +handler to model attributes.

+
+
Parameters:
+
    +
  • new_means (dict | None) – Set of mean values for data normalization keyed by feature name. +Can be used to maintain a consistent normalization scheme between +transfer learning domains.

  • +
  • new_stdevs (dict | None) – Set of stdev values for data normalization keyed by feature name. +Can be used to maintain a consistent normalization scheme between +transfer learning domains.

  • +
+
+
+
+ +
+
+property smoothed_features#
+

Get the list of smoothed input feature names that the generative +model was trained on.

+
+ +
+
+property smoothing#
+

Value of smoothing parameter used in gaussian filtering of coarsened +high res data.

+
+ +
+
+property stdevs#
+

Get the data normalization standard deviation values.

+
+
Returns:
+

np.ndarray

+
+
+
+ +
+
+property t_enhance#
+

Factor by which model will enhance temporal resolution. Used in +model training during high res coarsening and also in forward pass +routine to determine shape of needed exogenous data

+
+ +
+
+property t_enhancements#
+

List of temporal enhancement factors. In the case of a single step +model this is just [self.t_enhance]. This is used to determine +shapes of needed exogenous data in forward pass routine

+
+ +
+
+property total_batches#
+

Record of total number of batches for logging.

+
+ +
+
+un_norm_output(output)#
+

Un-normalize synthetically generated output data to physical units

+
+
Parameters:
+

output (tf.Tensor | np.ndarray) – Synthetically generated high-resolution data

+
+
Returns:
+

output (np.ndarray) – Synthetically generated high-resolution data

+
+
+
+ +
+
+static update_loss_details(loss_details, new_data, batch_len, prefix=None)#
+

Update a dictionary of loss_details with loss information from a new +batch.

+
+
Parameters:
+
    +
  • loss_details (dict) – Namespace of the breakdown of loss components where each value is a +running average at the current state in the epoch.

  • +
  • new_data (dict) – Namespace of the breakdown of loss components for a single new +batch.

  • +
  • batch_len (int) – Length of the incoming batch.

  • +
  • prefix (None | str) – Option to prefix the names of the loss data when saving to the +loss_details dictionary.

  • +
+
+
Returns:
+

loss_details (dict) – Same as input loss_details but with running averages updated.

+
+
+
+ +
+
+property version_record#
+

A record of important versions that this model was built with.

+
+
Returns:
+

dict

+
+
+
+ +
+
+train_epoch(batch_handler, multi_gpu=False)[source]#
+

Train the model for one epoch.

+
+
Parameters:
+
    +
  • batch_handler (sup3r.preprocessing.BatchHandler) – BatchHandler object to iterate through

  • +
  • multi_gpu (bool) – Flag to break up the batch for parallel gradient descent +calculations on multiple gpus. If True and multiple GPUs are +present, each batch from the batch_handler will be divided up +between the GPUs and the resulting gradient from each GPU will +constitute a single gradient descent step with the nominal learning +rate that the model was initialized with.

  • +
+
+
Returns:
+

loss_details (dict) – Namespace of the breakdown of loss components

+
+
+
+ +
+
+train(batch_handler, input_resolution, n_epoch, checkpoint_int=None, out_dir='./condMom_{epoch}', early_stop_on=None, early_stop_threshold=0.005, early_stop_n_epoch=5, multi_gpu=False, tensorboard_log=True)[source]#
+

Train the model on real low res data and real high res data

+
+
Parameters:
+
    +
  • batch_handler (sup3r.preprocessing.BatchHandler) – BatchHandler object to iterate through

  • +
  • input_resolution (dict) – Dictionary specifying spatiotemporal input resolution. e.g. +{‘temporal’: ‘60min’, ‘spatial’: ‘30km’}

  • +
  • n_epoch (int) – Number of epochs to train on

  • +
  • checkpoint_int (int | None) – Epoch interval at which to save checkpoint models.

  • +
  • out_dir (str) – Directory to save checkpoint models. Should have {epoch} in +the directory name. This directory will be created if it does not +already exist.

  • +
  • early_stop_on (str | None) – If not None, this should be a column in the training history to +evaluate for early stopping (e.g. validation_loss_gen). +If this value in this history decreases by +an absolute fractional relative difference of less than 0.01 for +more than 5 epochs in a row, the training will stop early.

  • +
  • early_stop_threshold (float) – The absolute relative fractional difference in validation loss +between subsequent epochs below which an early termination is +warranted. E.g. if val losses were 0.1 and 0.0998 the relative +diff would be calculated as 0.0002 / 0.1 = 0.002 which would be +less than the default thresold of 0.01 and would satisfy the +condition for early termination.

  • +
  • early_stop_n_epoch (int) – The number of consecutive epochs that satisfy the threshold that +warrants an early stop.

  • +
  • multi_gpu (bool) – Flag to break up the batch for parallel gradient descent +calculations on multiple gpus. If True and multiple GPUs are +present, each batch from the batch_handler will be divided up +between the GPUs and the resulting gradient from each GPU will +constitute a single gradient descent step with the nominal learning +rate that the model was initialized with.

  • +
  • tensorboard_log (bool) – Whether to write log file for use with tensorboard. Log data can +be viewed with tensorboard --logdir <logdir> where <logdir> +is the parent directory of out_dir, and pointing the browser to +the printed address.

  • +
+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.models.conditional.html b/_autosummary/sup3r.models.conditional.html new file mode 100644 index 000000000..1e3e59920 --- /dev/null +++ b/_autosummary/sup3r.models.conditional.html @@ -0,0 +1,739 @@ + + + + + + + + + + + sup3r.models.conditional — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.models.conditional

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.models.conditional#

+

Sup3r conditional moment model software

+

Classes

+
+ + + + + +

Sup3rCondMom(gen_layers[, optimizer, ...])

Basic Sup3r conditional moments model.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.models.dc.Sup3rGanDC.html b/_autosummary/sup3r.models.dc.Sup3rGanDC.html new file mode 100644 index 000000000..9fe173f7f --- /dev/null +++ b/_autosummary/sup3r.models.dc.Sup3rGanDC.html @@ -0,0 +1,2279 @@ + + + + + + + + + + + sup3r.models.dc.Sup3rGanDC — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.models.dc.Sup3rGanDC

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.models.dc.Sup3rGanDC#

+
+
+class Sup3rGanDC(gen_layers, disc_layers, loss='MeanSquaredError', optimizer=None, learning_rate=0.0001, optimizer_disc=None, learning_rate_disc=None, history=None, meta=None, means=None, stdevs=None, default_device=None, name=None)[source]#
+

Bases: Sup3rGan

+

Data-centric model using loss across time bins to select training +observations

+
+
Parameters:
+
    +
  • gen_layers (list | str) – Hidden layers input argument to phygnn.base.CustomNetwork for the +generative super resolving model. Can also be a str filepath to a +.json config file containing the input layers argument or a .pkl +for a saved pre-trained model.

  • +
  • disc_layers (list | str) – Hidden layers input argument to phygnn.base.CustomNetwork for the +discriminative model (spatial or spatiotemporal discriminator). Can +also be a str filepath to a .json config file containing the input +layers argument or a .pkl for a saved pre-trained model.

  • +
  • loss (str | dict) – Loss function class name from sup3r.utilities.loss_metrics +(prioritized) or tensorflow.keras.losses. Defaults to +tf.keras.losses.MeanSquaredError. This can be provided as a dict +with kwargs for loss functions with extra parameters. +e.g. {‘SpatialExtremesLoss’: {‘weight’: 0.5}}

  • +
  • optimizer (tf.keras.optimizers.Optimizer | dict | None | str) – Instantiated tf.keras.optimizers object or a dict optimizer config +from tf.keras.optimizers.get_config(). None defaults to Adam.

  • +
  • learning_rate (float, optional) – Optimizer learning rate. Not used if optimizer input arg is a +pre-initialized object or if optimizer input arg is a config dict.

  • +
  • optimizer_disc (tf.keras.optimizers.Optimizer | dict | None) – Same as optimizer input, but if specified this makes a different +optimizer just for the discriminator network (spatial or +spatiotemporal disc).

  • +
  • learning_rate_disc (float, optional) – Same as learning_rate input, but if specified this makes a +different learning_rate just for the discriminator network (spatial +or spatiotemporal disc).

  • +
  • history (pd.DataFrame | str | None) – Model training history with “epoch” index, str pointing to a saved +history csv file with “epoch” as first column, or None for clean +history

  • +
  • meta (dict | None) – Model meta data that describes how the model was created.

  • +
  • means (dict | None) – Set of mean values for data normalization keyed by feature name. +Can be used to maintain a consistent normalization scheme between +transfer learning domains.

  • +
  • stdevs (dict | None) – Set of stdev values for data normalization keyed by feature name. +Can be used to maintain a consistent normalization scheme between +transfer learning domains.

  • +
  • default_device (str | None) – Option for default device placement of model weights. If None and a +single GPU exists, that GPU will be the default device. If None and +multiple GPUs exist, the CPU will be the default device (this was +tested as most efficient given the custom multi-gpu strategy +developed in self.run_gradient_descent()). Examples: “/gpu:0” or +“/cpu:0”

  • +
  • name (str | None) – Optional name for the GAN.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

calc_loss(hi_res_true, hi_res_gen[, ...])

Calculate the GAN loss function using generated and true high resolution data.

calc_loss_disc(disc_out_true, disc_out_gen)

Calculate the loss term for the discriminator model (either the spatial or temporal discriminator).

calc_loss_gen_advers(disc_out_gen)

Calculate the adversarial component of the loss term for the generator model.

calc_loss_gen_content(hi_res_true, hi_res_gen)

Calculate the content loss term for the generator model.

calc_val_loss(batch_handler, ...)

Overloading the base calc_val_loss method.

calc_val_loss_gen(batch_handler, ...)

Calculate the validation total loss across the validation samples.

calc_val_loss_gen_content(batch_handler)

Calculate the validation content loss across the validation samples.

check_batch_handler_attrs(batch_handler)

Not all batch handlers have the following attributes.

dict_to_tensorboard(entry)

Write data to tensorboard log file.

discriminate(hi_res[, norm_in])

Run the discriminator model on a hi resolution input field.

early_stop(history, column[, threshold, n_epoch])

Determine whether to stop training early based on nearly no change to validation loss for a certain number of consecutive epochs.

finish_epoch(epoch, epochs, t0, ...[, extras])

Perform finishing checks after an epoch is done training

generate(low_res[, norm_in, un_norm_out, ...])

Use the generator model to generate high res data from low res input.

get_high_res_exo_input(high_res)

Get exogenous feature data from high_res

get_loss_fun(loss)

Get the initialized loss function class from the sup3r loss library or the tensorflow losses.

get_optimizer_config(optimizer)

Get a config that defines the current model optimizer

get_optimizer_state(optimizer)

Get a set of state variables for the optimizer

get_s_enhance_from_layers()

Compute factor by which model will enhance spatial resolution from layer attributes.

get_single_grad(low_res, hi_res_true, ...[, ...])

Run gradient descent for one mini-batch of (low_res, hi_res_true), do not update weights, just return gradient details.

get_t_enhance_from_layers()

Compute factor by which model will enhance temporal resolution from layer attributes.

get_weight_update_fraction(history, ...[, ...])

Get the factor by which to multiply previous adversarial loss weight

init_optimizer(optimizer, learning_rate)

Initialize keras optimizer object.

init_weights(lr_shape, hr_shape[, device])

Initialize the generator and discriminator weights with device placement.

load(model_dir[, verbose])

Load the GAN with its sub-networks from a previously saved-to output directory.

load_network(model, name)

Load a CustomNetwork object from hidden layers config, .json file config, or .pkl file saved pre-trained model.

load_saved_params(out_dir[, verbose])

Load saved model_params (you need this and the gen+disc models to load a full model).

log_loss_details(loss_details[, level])

Log the loss details to the module logger.

norm_input(low_res)

Normalize low resolution data being input to the generator.

profile_to_tensorboard(name)

Write profile data to tensorboard log file.

run_gradient_descent(low_res, hi_res_true, ...)

Run gradient descent for one mini-batch of (low_res, hi_res_true) and update weights

save(out_dir)

Save the GAN with its sub-networks to a directory.

save_params(out_dir)

seed([s])

Set the random seed for reproducible results.

set_model_params(**kwargs)

Set parameters used for training the model

set_norm_stats(new_means, new_stdevs)

Set the normalization statistics associated with a data batch handler to model attributes.

train(batch_handler, input_resolution, n_epoch)

Train the GAN model on real low res data and real high res data

train_epoch(batch_handler, ...[, multi_gpu])

Train the GAN for one epoch.

un_norm_output(output)

Un-normalize synthetically generated output data to physical units

update_adversarial_weights(history, ...)

Update spatial / temporal adversarial loss weights based on training fraction history.

update_loss_details(loss_details, new_data, ...)

Update a dictionary of loss_details with loss information from a new batch.

update_optimizer([option])

Update optimizer by changing current configuration

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

discriminator

Get the discriminator model.

discriminator_weights

Get a list of layer weights and bias terms for the discriminator model.

generator

Get the generative model.

generator_weights

Get a list of layer weights and bias terms for the generator model.

history

Model training history DataFrame (None if not yet trained)

hr_exo_features

Get list of high-resolution exogenous filter names the model uses.

hr_out_features

Get the list of high-resolution output feature names that the generative model outputs.

input_dims

Get dimension of model generator input.

input_resolution

Resolution of input data.

is_4d

Check if model expects spatial only input

is_5d

Check if model expects spatiotemporal input

lr_features

Get a list of low-resolution features input to the generative model.

means

Get the data normalization mean values.

meta

Get meta data dictionary that defines how the model was created

model_params

Model parameters, used to save model to disc

optimizer

Get the tensorflow optimizer to perform gradient descent calculations for the generative network.

optimizer_disc

Get the tensorflow optimizer to perform gradient descent calculations for the discriminator network.

output_resolution

Resolution of output data.

s_enhance

Factor by which model will enhance spatial resolution.

s_enhancements

List of spatial enhancement factors.

smoothed_features

Get the list of smoothed input feature names that the generative model was trained on.

smoothing

Value of smoothing parameter used in gaussian filtering of coarsened high res data.

stdevs

Get the data normalization standard deviation values.

t_enhance

Factor by which model will enhance temporal resolution.

t_enhancements

List of temporal enhancement factors.

total_batches

Record of total number of batches for logging.

version_record

A record of important versions that this model was built with.

weights

Get a list of all the layer weights and bias terms for the generator and discriminator networks

+
+
+
+calc_val_loss_gen(batch_handler, weight_gen_advers)[source]#
+

Calculate the validation total loss across the validation +samples. e.g. If the sample domain has 100 steps and the +validation set has 10 bins then this will get a list of losses across +step 0 to 10, 10 to 20, etc. Use this to determine performance +within bins and to update how observations are selected from these +bins.

+
+
Parameters:
+
    +
  • batch_handler (sup3r.preprocessing.BatchHandlerDC) – BatchHandler object to iterate through

  • +
  • weight_gen_advers (float) – Weight factor for the adversarial loss component of the generator +vs. the discriminator.

  • +
+
+
Returns:
+

array – Array of total losses for all sample bins, with shape +(n_space_bins, n_time_bins)

+
+
+
+ +
+
+calc_val_loss_gen_content(batch_handler)[source]#
+

Calculate the validation content loss across the validation +samples. e.g. If the sample domain has 100 steps and the +validation set has 10 bins then this will get a list of losses across +step 0 to 10, 10 to 20, etc. Use this to determine performance +within bins and to update how observations are selected from these +bins.

+
+
Parameters:
+

batch_handler (sup3r.preprocessing.BatchHandlerDC) – BatchHandler object to iterate through

+
+
Returns:
+

list – List of content losses for all sample bins

+
+
+
+ +
+
+calc_val_loss(batch_handler, weight_gen_advers, loss_details)[source]#
+

Overloading the base calc_val_loss method. Method updates the +temporal weights for the batch handler based on the losses across the +time bins

+
+
Parameters:
+
    +
  • batch_handler (sup3r.preprocessing.BatchHandler) – BatchHandler object to iterate through

  • +
  • weight_gen_advers (float) – Weight factor for the adversarial loss component of the generator +vs. the discriminator.

  • +
  • loss_details (dict) – Namespace of the breakdown of loss components where each value is a +running average at the current state in the epoch.

  • +
+
+
Returns:
+

dict – Updated loss_details with mean validation loss calculated using +the validation samples across the time bins

+
+
+
+ +
+
+calc_loss(hi_res_true, hi_res_gen, weight_gen_advers=0.001, train_gen=True, train_disc=False)#
+

Calculate the GAN loss function using generated and true high +resolution data.

+
+
Parameters:
+
    +
  • hi_res_true (tf.Tensor) – Ground truth high resolution spatiotemporal data.

  • +
  • hi_res_gen (tf.Tensor) – Superresolved high resolution spatiotemporal data generated by the +generative model.

  • +
  • weight_gen_advers (float) – Weight factor for the adversarial loss component of the generator +vs. the discriminator.

  • +
  • train_gen (bool) – True if generator is being trained, then loss=loss_gen

  • +
  • train_disc (bool) – True if disc is being trained, then loss=loss_disc

  • +
+
+
Returns:
+

    +
  • loss (tf.Tensor) – 0D tensor representing the loss value for the network being trained +(either generator or one of the discriminators)

  • +
  • loss_details (dict) – Namespace of the breakdown of loss components

  • +
+

+
+
+
+ +
+
+static calc_loss_disc(disc_out_true, disc_out_gen)#
+

Calculate the loss term for the discriminator model (either the +spatial or temporal discriminator).

+
+
Parameters:
+
    +
  • disc_out_true (tf.Tensor) – Raw discriminator outputs from the discriminator model predicting +only on ground truth data hi_res_true (not on hi_res_gen).

  • +
  • disc_out_gen (tf.Tensor) – Raw discriminator outputs from the discriminator model predicting +only on synthetic data hi_res_gen (not on hi_res_true).

  • +
+
+
Returns:
+

loss_disc (tf.Tensor) – 0D tensor discriminator model loss for either the spatial or +temporal component of the super resolution generated output.

+
+
+
+ +
+
+static calc_loss_gen_advers(disc_out_gen)#
+

Calculate the adversarial component of the loss term for the +generator model.

+
+
Parameters:
+

disc_out_gen (tf.Tensor) – Raw discriminator outputs from the discriminator model +predicting only on hi_res_gen (not on hi_res_true).

+
+
Returns:
+

loss_gen_advers (tf.Tensor) – 0D tensor generator model loss for the adversarial component of the +generator loss term.

+
+
+
+ +
+
+calc_loss_gen_content(hi_res_true, hi_res_gen)#
+

Calculate the content loss term for the generator model.

+
+
Parameters:
+
    +
  • hi_res_true (tf.Tensor) – Ground truth high resolution spatiotemporal data.

  • +
  • hi_res_gen (tf.Tensor) – Superresolved high resolution spatiotemporal data generated by the +generative model.

  • +
+
+
Returns:
+

loss_gen_s (tf.Tensor) – 0D tensor generator model loss for the content loss comparing the +hi res ground truth to the hi res synthetically generated output.

+
+
+
+ +
+
+static check_batch_handler_attrs(batch_handler)#
+

Not all batch handlers have the following attributes. So we perform +some sanitation before sending to set_model_params

+
+ +
+
+dict_to_tensorboard(entry)#
+

Write data to tensorboard log file. This is usually a loss_details +dictionary.

+
+
Parameters:
+

entry (dict) – Dictionary of values to write to tensorboard log file

+
+
+
+ +
+
+discriminate(hi_res, norm_in=False)#
+

Run the discriminator model on a hi resolution input field.

+
+
Parameters:
+
    +
  • hi_res (np.ndarray) – Real or fake high res data in a 4D or 5D tensor: +(n_obs, spatial_1, spatial_2, n_features) +(n_obs, spatial_1, spatial_2, n_temporal, n_features)

  • +
  • norm_in (bool) – Flag to normalize low_res input data if the self._means, +self._stdevs attributes are available. The disc should always +received normalized data with mean=0 stdev=1.

  • +
+
+
Returns:
+

out (np.ndarray) – Discriminator output logits

+
+
+
+ +
+
+property discriminator#
+

Get the discriminator model.

+
+
Returns:
+

phygnn.base.CustomNetwork

+
+
+
+ +
+
+property discriminator_weights#
+

Get a list of layer weights and bias terms for the discriminator +model.

+
+
Returns:
+

list

+
+
+
+ +
+
+static early_stop(history, column, threshold=0.005, n_epoch=5)#
+

Determine whether to stop training early based on nearly no change +to validation loss for a certain number of consecutive epochs.

+
+
Parameters:
+
    +
  • history (pd.DataFrame | None) – Model training history

  • +
  • column (str) – Column from the model training history to evaluate for early +termination.

  • +
  • threshold (float) – The absolute relative fractional difference in validation loss +between subsequent epochs below which an early termination is +warranted. E.g. if val losses were 0.1 and 0.0998 the relative +diff would be calculated as 0.0002 / 0.1 = 0.002 which would be +less than the default thresold of 0.01 and would satisfy the +condition for early termination.

  • +
  • n_epoch (int) – The number of consecutive epochs that satisfy the threshold that +warrants an early stop.

  • +
+
+
Returns:
+

stop (bool) – Flag to stop training (True) or keep going (False).

+
+
+
+ +
+
+finish_epoch(epoch, epochs, t0, loss_details, checkpoint_int, out_dir, early_stop_on, early_stop_threshold, early_stop_n_epoch, extras=None)#
+

Perform finishing checks after an epoch is done training

+
+
Parameters:
+
    +
  • epoch (int) – Epoch number that is finishing

  • +
  • epochs (list) – List of epochs being iterated through

  • +
  • t0 (float) – Starting time of training.

  • +
  • loss_details (dict) – Namespace of the breakdown of loss components

  • +
  • checkpoint_int (int | None) – Epoch interval at which to save checkpoint models.

  • +
  • out_dir (str) – Directory to save checkpoint models. Should have {epoch} in +the directory name. This directory will be created if it does not +already exist.

  • +
  • early_stop_on (str | None) – If not None, this should be a column in the training history to +evaluate for early stopping (e.g. validation_loss_gen, +validation_loss_disc). If this value in this history decreases by +an absolute fractional relative difference of less than 0.01 for +more than 5 epochs in a row, the training will stop early.

  • +
  • early_stop_threshold (float) – The absolute relative fractional difference in validation loss +between subsequent epochs below which an early termination is +warranted. E.g. if val losses were 0.1 and 0.0998 the relative +diff would be calculated as 0.0002 / 0.1 = 0.002 which would be +less than the default thresold of 0.01 and would satisfy the +condition for early termination.

  • +
  • early_stop_n_epoch (int) – The number of consecutive epochs that satisfy the threshold that +warrants an early stop.

  • +
  • extras (dict | None) – Extra kwargs/parameters to save in the epoch history.

  • +
+
+
Returns:
+

stop (bool) – Flag to early stop training.

+
+
+
+ +
+
+generate(low_res, norm_in=True, un_norm_out=True, exogenous_data=None)#
+

Use the generator model to generate high res data from low res +input. This is the public generate function.

+
+
Parameters:
+
    +
  • low_res (np.ndarray) – Low-resolution input data, usually a 4D or 5D array of shape: +(n_obs, spatial_1, spatial_2, n_features) +(n_obs, spatial_1, spatial_2, n_temporal, n_features)

  • +
  • norm_in (bool) – Flag to normalize low_res input data if the self._means, +self._stdevs attributes are available. The generator should always +received normalized data with mean=0 stdev=1. This also normalizes +hi_res_topo.

  • +
  • un_norm_out (bool) – Flag to un-normalize synthetically generated output data to physical +units

  • +
  • exogenous_data (dict | ExoData | None) – Special dictionary (class:ExoData) of exogenous feature data with +entries describing whether features should be combined at input, a +mid network layer, or with output. This doesn’t have to include +the ‘model’ key since this data is for a single step model.

  • +
+
+
Returns:
+

hi_res (ndarray) – Synthetically generated high-resolution data, usually a 4D or 5D +array with shape: +(n_obs, spatial_1, spatial_2, n_features) +(n_obs, spatial_1, spatial_2, n_temporal, n_features)

+
+
+
+ +
+
+property generator#
+

Get the generative model.

+
+
Returns:
+

phygnn.base.CustomNetwork

+
+
+
+ +
+
+property generator_weights#
+

Get a list of layer weights and bias terms for the generator model.

+
+
Returns:
+

list

+
+
+
+ +
+
+get_high_res_exo_input(high_res)#
+

Get exogenous feature data from high_res

+
+
Parameters:
+

high_res (tf.Tensor) – Ground truth high resolution spatiotemporal data.

+
+
Returns:
+

exo_data (dict) – Dictionary of exogenous feature data used as input to tf_generate. +e.g. {'topography': tf.Tensor(...)}

+
+
+
+ +
+
+static get_loss_fun(loss)#
+

Get the initialized loss function class from the sup3r loss library +or the tensorflow losses.

+
+
Parameters:
+

loss (str | dict) – Loss function class name from sup3r.utilities.loss_metrics +(prioritized) or tensorflow.keras.losses. Defaults to +tf.keras.losses.MeanSquaredError. This can be provided as a dict +with kwargs for loss functions with extra parameters. +e.g. {‘SpatialExtremesLoss’: {‘weight’: 0.5}}

+
+
Returns:
+

out (tf.keras.losses.Loss) – Initialized loss function class that is callable, e.g. if +“MeanSquaredError” is requested, this will return +an instance of tf.keras.losses.MeanSquaredError()

+
+
+
+ +
+
+static get_optimizer_config(optimizer)#
+

Get a config that defines the current model optimizer

+
+
Parameters:
+

optimizer (tf.keras.optimizers.Optimizer) – TF-Keras optimizer object (e.g., Adam)

+
+
Returns:
+

config (dict) – Optimizer config

+
+
+
+ +
+
+classmethod get_optimizer_state(optimizer)#
+

Get a set of state variables for the optimizer

+
+
Parameters:
+

optimizer (tf.keras.optimizers.Optimizer) – TF-Keras optimizer object (e.g., Adam)

+
+
Returns:
+

state (dict) – Optimizer state variables

+
+
+
+ +
+
+get_s_enhance_from_layers()#
+

Compute factor by which model will enhance spatial resolution from +layer attributes. Used in model training during high res coarsening

+
+ +
+
+get_single_grad(low_res, hi_res_true, training_weights, device_name=None, **calc_loss_kwargs)#
+

Run gradient descent for one mini-batch of (low_res, hi_res_true), +do not update weights, just return gradient details.

+
+
Parameters:
+
    +
  • low_res (np.ndarray) – Real low-resolution data in a 4D or 5D array: +(n_observations, spatial_1, spatial_2, features) +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
  • hi_res_true (np.ndarray) – Real high-resolution data in a 4D or 5D array: +(n_observations, spatial_1, spatial_2, features) +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
  • training_weights (list) – A list of layer weights that are to-be-trained based on the +current loss weight values.

  • +
  • device_name (None | str) – Optional tensorflow device name for GPU placement. Note that if a +GPU is available, variables will be placed on that GPU even if +device_name=None.

  • +
  • calc_loss_kwargs (dict) – Kwargs to pass to the self.calc_loss() method

  • +
+
+
Returns:
+

    +
  • grad (list) – a list or nested structure of Tensors (or IndexedSlices, or None, +or CompositeTensor) representing the gradients for the +training_weights

  • +
  • loss_details (dict) – Namespace of the breakdown of loss components

  • +
+

+
+
+
+ +
+
+get_t_enhance_from_layers()#
+

Compute factor by which model will enhance temporal resolution from +layer attributes. Used in model training during high res coarsening

+
+ +
+
+static get_weight_update_fraction(history, comparison_key, update_bounds=(0.5, 0.95), update_frac=0.0)#
+

Get the factor by which to multiply previous adversarial loss +weight

+
+
Parameters:
+
    +
  • history (dict) – Dictionary with information on how often discriminators +were trained during previous epoch.

  • +
  • comparison_key (str) – history key to use for update check

  • +
  • update_bounds (tuple) – Tuple specifying allowed range for history[comparison_key]. If +history[comparison_key] < update_bounds[0] then the weight will be +increased by (1 + update_frac). If history[comparison_key] > +update_bounds[1] then the weight will be decreased by 1 / (1 + +update_frac).

  • +
  • update_frac (float) – Fraction by which to increase/decrease adversarial loss weight

  • +
+
+
Returns:
+

float – Factor by which to multiply old weight to get updated weight

+
+
+
+ +
+
+property history#
+

Model training history DataFrame (None if not yet trained)

+
+
Returns:
+

pandas.DataFrame | None

+
+
+
+ +
+
+property hr_exo_features#
+

Get list of high-resolution exogenous filter names the model uses. +If the model has N concat or add layers this list will be the last N +features in the training features list. The ordering is assumed to be +the same as the order of concat or add layers. If training features is +[…, topo, sza], and the model has 2 concat or add layers, exo +features will be [topo, sza]. Topo will then be used in the first +concat layer and sza will be used in the second

+
+ +
+
+property hr_out_features#
+

Get the list of high-resolution output feature names that the +generative model outputs.

+
+ +
+
+static init_optimizer(optimizer, learning_rate)#
+

Initialize keras optimizer object.

+
+
Parameters:
+
    +
  • optimizer (tf.keras.optimizers.Optimizer | dict | None | str) – Instantiated tf.keras.optimizers object or a dict optimizer config +from tf.keras.optimizers.get_config(). None defaults to Adam.

  • +
  • learning_rate (float, optional) – Optimizer learning rate. Not used if optimizer input arg is a +pre-initialized object or if optimizer input arg is a config dict.

  • +
+
+
Returns:
+

optimizer (tf.keras.optimizers.Optimizer) – Initialized optimizer object.

+
+
+
+ +
+
+init_weights(lr_shape, hr_shape, device=None)#
+

Initialize the generator and discriminator weights with device +placement.

+
+
Parameters:
+
    +
  • lr_shape (tuple) – Shape of one batch of low res input data for sup3r resolution. Note +that the batch size (axis=0) must be included, but the actual batch +size doesnt really matter.

  • +
  • hr_shape (tuple) – Shape of one batch of high res input data for sup3r resolution. +Note that the batch size (axis=0) must be included, but the actual +batch size doesnt really matter.

  • +
  • device (str | None) – Option to place model weights on a device. If None, +self.default_device will be used.

  • +
+
+
+
+ +
+
+property input_dims#
+

Get dimension of model generator input. This is usually 4D for +spatial models and 5D for spatiotemporal models. This gives the input +to the first step if the model is multi-step. Returns 5 for linear +models.

+
+
Returns:
+

int

+
+
+
+ +
+
+property input_resolution#
+

Resolution of input data. Given as a dictionary +{'spatial': '...km', 'temporal': '...min'}. The numbers are +required to be integers in the units specified. The units are not +strict as long as the resolution of the exogenous data, when extracting +exogenous data, is specified in the same units.

+
+ +
+
+property is_4d#
+

Check if model expects spatial only input

+
+ +
+
+property is_5d#
+

Check if model expects spatiotemporal input

+
+ +
+
+classmethod load(model_dir, verbose=True)#
+

Load the GAN with its sub-networks from a previously saved-to output +directory.

+
+
Parameters:
+
    +
  • model_dir (str) – Directory to load GAN model files from.

  • +
  • verbose (bool) – Flag to log information about the loaded model.

  • +
+
+
Returns:
+

out (BaseModel) – Returns a pretrained gan model that was previously saved to out_dir

+
+
+
+ +
+
+load_network(model, name)#
+

Load a CustomNetwork object from hidden layers config, .json file +config, or .pkl file saved pre-trained model.

+
+
Parameters:
+
    +
  • model (str | dict) – Model hidden layers config, a .json with “hidden_layers” key, or a +.pkl for a saved pre-trained model.

  • +
  • name (str) – Name of the model to be loaded

  • +
+
+
Returns:
+

model (phygnn.CustomNetwork) – CustomNetwork object initialized from the model input.

+
+
+
+ +
+
+static load_saved_params(out_dir, verbose=True)#
+

Load saved model_params (you need this and the gen+disc models +to load a full model).

+
+
Parameters:
+
    +
  • out_dir (str) – Directory to load model files from.

  • +
  • verbose (bool) – Flag to log information about the loaded model.

  • +
+
+
Returns:
+

params (dict) – Model parameters loaded from disk json file. This should be the +same as self.model_params with and additional ‘history’ entry. +Should be all the kwargs you need to init a model.

+
+
+
+ +
+
+static log_loss_details(loss_details, level='INFO')#
+

Log the loss details to the module logger.

+
+
Parameters:
+
    +
  • loss_details (dict) – Namespace of the breakdown of loss components where each value is a +running average at the current state in the epoch.

  • +
  • level (str) – Log level (e.g. INFO, DEBUG)

  • +
+
+
+
+ +
+
+property lr_features#
+

Get a list of low-resolution features input to the generative model. +This includes low-resolution features that might be supplied +exogenously at inference time but that were in the low-res batches +during training

+
+ +
+
+property means#
+

Get the data normalization mean values.

+
+
Returns:
+

np.ndarray

+
+
+
+ +
+
+property meta#
+

Get meta data dictionary that defines how the model was created

+
+ +
+
+property model_params#
+

Model parameters, used to save model to disc

+
+
Returns:
+

dict

+
+
+
+ +
+
+norm_input(low_res)#
+

Normalize low resolution data being input to the generator.

+
+
Parameters:
+

low_res (np.ndarray) – Un-normalized low-resolution input data in physical units, usually +a 4D or 5D array of shape: +(n_obs, spatial_1, spatial_2, n_features) +(n_obs, spatial_1, spatial_2, n_temporal, n_features)

+
+
Returns:
+

low_res (np.ndarray) – Normalized low-resolution input data, usually a 4D or 5D array of +shape: +(n_obs, spatial_1, spatial_2, n_features) +(n_obs, spatial_1, spatial_2, n_temporal, n_features)

+
+
+
+ +
+
+property optimizer#
+

Get the tensorflow optimizer to perform gradient descent +calculations for the generative network. This is functionally identical +to optimizer_disc is no special optimizer model or learning rate was +specified for the disc.

+
+
Returns:
+

tf.keras.optimizers.Optimizer

+
+
+
+ +
+
+property optimizer_disc#
+

Get the tensorflow optimizer to perform gradient descent +calculations for the discriminator network.

+
+
Returns:
+

tf.keras.optimizers.Optimizer

+
+
+
+ +
+
+property output_resolution#
+

Resolution of output data. Given as a dictionary +{‘spatial’: ‘…km’, ‘temporal’: ‘…min’}. This is computed from the +input resolution and the enhancement factors.

+
+ +
+
+profile_to_tensorboard(name)#
+

Write profile data to tensorboard log file.

+
+
Parameters:
+

name (str) – Tag name to use for profile info

+
+
+
+ +
+
+run_gradient_descent(low_res, hi_res_true, training_weights, optimizer=None, multi_gpu=False, **calc_loss_kwargs)#
+

Run gradient descent for one mini-batch of (low_res, hi_res_true) +and update weights

+
+
Parameters:
+
    +
  • low_res (np.ndarray) – Real low-resolution data in a 4D or 5D array: +(n_observations, spatial_1, spatial_2, features) +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
  • hi_res_true (np.ndarray) – Real high-resolution data in a 4D or 5D array: +(n_observations, spatial_1, spatial_2, features) +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
  • training_weights (list) – A list of layer weights that are to-be-trained based on the +current loss weight values.

  • +
  • optimizer (tf.keras.optimizers.Optimizer) – Optimizer class to use to update weights. This can be different if +you’re training just the generator or one of the discriminator +models. Defaults to the generator optimizer.

  • +
  • multi_gpu (bool) – Flag to break up the batch for parallel gradient descent +calculations on multiple gpus. If True and multiple GPUs are +present, each batch from the batch_handler will be divided up +between the GPUs and the resulting gradient from each GPU will +constitute a single gradient descent step with the nominal learning +rate that the model was initialized with.

  • +
  • calc_loss_kwargs (dict) – Kwargs to pass to the self.calc_loss() method

  • +
+
+
Returns:
+

loss_details (dict) – Namespace of the breakdown of loss components

+
+
+
+ +
+
+property s_enhance#
+

Factor by which model will enhance spatial resolution. Used in +model training during high res coarsening and also in forward pass +routine to determine shape of needed exogenous data

+
+ +
+
+property s_enhancements#
+

List of spatial enhancement factors. In the case of a single step +model this is just [self.s_enhance]. This is used to determine +shapes of needed exogenous data in forward pass routine

+
+ +
+
+save(out_dir)#
+

Save the GAN with its sub-networks to a directory.

+
+
Parameters:
+

out_dir (str) – Directory to save GAN model files. This directory will be created +if it does not already exist.

+
+
+
+ +
+
+save_params(out_dir)#
+
+
Parameters:
+

out_dir (str) – Directory to save linear model params. This directory will be +created if it does not already exist.

+
+
+
+ +
+
+static seed(s=0)#
+

Set the random seed for reproducible results.

+
+
Parameters:
+

s (int) – Random seed

+
+
+
+ +
+
+set_model_params(**kwargs)#
+

Set parameters used for training the model

+
+
Parameters:
+

kwargs (dict) – Keyword arguments including ‘input_resolution’, +‘lr_features’, ‘hr_exo_features’, ‘hr_out_features’, +‘smoothed_features’, ‘s_enhance’, ‘t_enhance’, ‘smoothing’

+
+
+
+ +
+
+set_norm_stats(new_means, new_stdevs)#
+

Set the normalization statistics associated with a data batch +handler to model attributes.

+
+
Parameters:
+
    +
  • new_means (dict | None) – Set of mean values for data normalization keyed by feature name. +Can be used to maintain a consistent normalization scheme between +transfer learning domains.

  • +
  • new_stdevs (dict | None) – Set of stdev values for data normalization keyed by feature name. +Can be used to maintain a consistent normalization scheme between +transfer learning domains.

  • +
+
+
+
+ +
+
+property smoothed_features#
+

Get the list of smoothed input feature names that the generative +model was trained on.

+
+ +
+
+property smoothing#
+

Value of smoothing parameter used in gaussian filtering of coarsened +high res data.

+
+ +
+
+property stdevs#
+

Get the data normalization standard deviation values.

+
+
Returns:
+

np.ndarray

+
+
+
+ +
+
+property t_enhance#
+

Factor by which model will enhance temporal resolution. Used in +model training during high res coarsening and also in forward pass +routine to determine shape of needed exogenous data

+
+ +
+
+property t_enhancements#
+

List of temporal enhancement factors. In the case of a single step +model this is just [self.t_enhance]. This is used to determine +shapes of needed exogenous data in forward pass routine

+
+ +
+
+property total_batches#
+

Record of total number of batches for logging.

+
+ +
+
+train(batch_handler, input_resolution, n_epoch, weight_gen_advers=0.001, train_gen=True, train_disc=True, disc_loss_bounds=(0.45, 0.6), checkpoint_int=None, out_dir='./gan_{epoch}', early_stop_on=None, early_stop_threshold=0.005, early_stop_n_epoch=5, adaptive_update_bounds=(0.9, 0.99), adaptive_update_fraction=0.0, multi_gpu=False, tensorboard_log=True, tensorboard_profile=False)#
+

Train the GAN model on real low res data and real high res data

+
+
Parameters:
+
    +
  • batch_handler (sup3r.preprocessing.BatchHandler) – BatchHandler object to iterate through

  • +
  • input_resolution (dict) – Dictionary specifying spatiotemporal input resolution. e.g. +{‘temporal’: ‘60min’, ‘spatial’: ‘30km’}

  • +
  • n_epoch (int) – Number of epochs to train on

  • +
  • weight_gen_advers (float) – Weight factor for the adversarial loss component of the generator +vs. the discriminator.

  • +
  • train_gen (bool) – Flag whether to train the generator for this set of epochs

  • +
  • train_disc (bool) – Flag whether to train the discriminator for this set of epochs

  • +
  • disc_loss_bounds (tuple) – Lower and upper bounds for the discriminator loss outside of which +the discriminator will not train unless train_disc=True and +train_gen=False.

  • +
  • checkpoint_int (int | None) – Epoch interval at which to save checkpoint models.

  • +
  • out_dir (str) – Directory to save checkpoint GAN models. Should have {epoch} in +the directory name. This directory will be created if it does not +already exist.

  • +
  • early_stop_on (str | None) – If not None, this should be a column in the training history to +evaluate for early stopping (e.g. validation_loss_gen, +validation_loss_disc). If this value in this history decreases by +an absolute fractional relative difference of less than 0.01 for +more than 5 epochs in a row, the training will stop early.

  • +
  • early_stop_threshold (float) – The absolute relative fractional difference in validation loss +between subsequent epochs below which an early termination is +warranted. E.g. if val losses were 0.1 and 0.0998 the relative +diff would be calculated as 0.0002 / 0.1 = 0.002 which would be +less than the default thresold of 0.01 and would satisfy the +condition for early termination.

  • +
  • early_stop_n_epoch (int) – The number of consecutive epochs that satisfy the threshold that +warrants an early stop.

  • +
  • adaptive_update_bounds (tuple) – Tuple specifying allowed range for loss_details[comparison_key]. If +history[comparison_key] < threshold_range[0] then the weight will +be increased by (1 + update_frac). If history[comparison_key] > +threshold_range[1] then the weight will be decreased by 1 / (1 + +update_frac).

  • +
  • adaptive_update_fraction (float) – Amount by which to increase or decrease adversarial weights for +adaptive updates

  • +
  • multi_gpu (bool) – Flag to break up the batch for parallel gradient descent +calculations on multiple gpus. If True and multiple GPUs are +present, each batch from the batch_handler will be divided up +between the GPUs and the resulting gradient from each GPU will +constitute a single gradient descent step with the nominal learning +rate that the model was initialized with. If true and multiple gpus +are found, default_device device should be set to /cpu:0

  • +
  • tensorboard_log (bool) – Whether to write log file for use with tensorboard. Log data can +be viewed with tensorboard --logdir <logdir> where <logdir> +is the parent directory of out_dir, and pointing the browser to +the printed address.

  • +
  • tensorboard_profile (bool) – Whether to export profiling information to tensorboard. This can +then be viewed in the tensorboard dashboard under the profile tab

  • +
  • TODO ((1) args here are getting excessive. Might be time for some)

  • +
  • refactoring.

  • +
  • (2) cal_val_loss should be done in a separate thread from train_epoch

  • +
  • so they can be done concurrently. This would be especially important

  • +
  • for batch handlers which require val data, like dc handlers.

  • +
  • (3) Would like an automatic way to exit the batch handler thread

  • +
  • instead of manually calling .stop() here.

  • +
+
+
+
+ +
+
+train_epoch(batch_handler, weight_gen_advers, train_gen, train_disc, disc_loss_bounds, multi_gpu=False)#
+

Train the GAN for one epoch.

+
+
Parameters:
+
    +
  • batch_handler (sup3r.preprocessing.BatchHandler) – BatchHandler object to iterate through

  • +
  • weight_gen_advers (float) – Weight factor for the adversarial loss component of the generator +vs. the discriminator.

  • +
  • train_gen (bool) – Flag whether to train the generator for this set of epochs

  • +
  • train_disc (bool) – Flag whether to train the discriminator for this set of epochs

  • +
  • disc_loss_bounds (tuple) – Lower and upper bounds for the discriminator loss outside of which +the discriminators will not train unless train_disc=True or +and train_gen=False.

  • +
  • multi_gpu (bool) – Flag to break up the batch for parallel gradient descent +calculations on multiple gpus. If True and multiple GPUs are +present, each batch from the batch_handler will be divided up +between the GPUs and the resulting gradient from each GPU will +constitute a single gradient descent step with the nominal learning +rate that the model was initialized with. If true and multiple gpus +are found, default_device device should be set to /cpu:0

  • +
+
+
Returns:
+

loss_details (dict) – Namespace of the breakdown of loss components

+
+
+
+ +
+
+un_norm_output(output)#
+

Un-normalize synthetically generated output data to physical units

+
+
Parameters:
+

output (tf.Tensor | np.ndarray) – Synthetically generated high-resolution data

+
+
Returns:
+

output (np.ndarray) – Synthetically generated high-resolution data

+
+
+
+ +
+
+update_adversarial_weights(history, adaptive_update_fraction, adaptive_update_bounds, weight_gen_advers, train_disc)#
+

Update spatial / temporal adversarial loss weights based on training +fraction history.

+
+
Parameters:
+
    +
  • history (dict) – Dictionary with information on how often discriminators +were trained during current and previous epochs.

  • +
  • adaptive_update_fraction (float) – Amount by which to increase or decrease adversarial loss weights +for adaptive updates

  • +
  • adaptive_update_bounds (tuple) – Tuple specifying allowed range for history[comparison_key]. If +history[comparison_key] < update_bounds[0] then the weight will be +increased by (1 + update_frac). If history[comparison_key] > +update_bounds[1] then the weight will be decreased by 1 / (1 + +update_frac).

  • +
  • weight_gen_advers (float) – Weight factor for the adversarial loss component of the generator +vs. the discriminator.

  • +
  • train_disc (bool) – Whether the discriminator was set to be trained during the +previous epoch

  • +
+
+
Returns:
+

weight_gen_advers (float) – Updated weight factor for the adversarial loss component of the +generator vs. the discriminator.

+
+
+
+ +
+
+static update_loss_details(loss_details, new_data, batch_len, prefix=None)#
+

Update a dictionary of loss_details with loss information from a new +batch.

+
+
Parameters:
+
    +
  • loss_details (dict) – Namespace of the breakdown of loss components where each value is a +running average at the current state in the epoch.

  • +
  • new_data (dict) – Namespace of the breakdown of loss components for a single new +batch.

  • +
  • batch_len (int) – Length of the incoming batch.

  • +
  • prefix (None | str) – Option to prefix the names of the loss data when saving to the +loss_details dictionary.

  • +
+
+
Returns:
+

loss_details (dict) – Same as input loss_details but with running averages updated.

+
+
+
+ +
+
+update_optimizer(option='generator', **kwargs)#
+

Update optimizer by changing current configuration

+
+
Parameters:
+
    +
  • option (str) – Which optimizer to update. Can be “generator”, “discriminator”, or +“all”

  • +
  • kwargs (dict) – kwargs to use for optimizer configuration update

  • +
+
+
+
+ +
+
+property version_record#
+

A record of important versions that this model was built with.

+
+
Returns:
+

dict

+
+
+
+ +
+
+property weights#
+

Get a list of all the layer weights and bias terms for the +generator and discriminator networks

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.models.dc.html b/_autosummary/sup3r.models.dc.html new file mode 100644 index 000000000..4ecd400d7 --- /dev/null +++ b/_autosummary/sup3r.models.dc.html @@ -0,0 +1,739 @@ + + + + + + + + + + + sup3r.models.dc — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.models.dc

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.models.dc#

+

Sup3r data-centric model software

+

Classes

+
+ + + + + +

Sup3rGanDC(gen_layers, disc_layers[, loss, ...])

Data-centric model using loss across time bins to select training observations

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.models.html b/_autosummary/sup3r.models.html new file mode 100644 index 000000000..cfe93351a --- /dev/null +++ b/_autosummary/sup3r.models.html @@ -0,0 +1,762 @@ + + + + + + + + + + + sup3r.models — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.models

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.models#

+

Sup3r Model Software

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

abstract

Abstract class defining the required interface for Sup3r model subclasses

base

Sup3r model software

conditional

Sup3r conditional moment model software

dc

Sup3r data-centric model software

linear

Simple models for super resolution such as linear interp models.

multi_step

Sup3r multi step model frameworks

solar_cc

Sup3r model software

surface

Special models for surface meteorological data.

utilities

Utilities shared across the sup3r.models module

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.models.linear.LinearInterp.html b/_autosummary/sup3r.models.linear.LinearInterp.html new file mode 100644 index 000000000..d6a9b0b3a --- /dev/null +++ b/_autosummary/sup3r.models.linear.LinearInterp.html @@ -0,0 +1,1163 @@ + + + + + + + + + + + sup3r.models.linear.LinearInterp — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.models.linear.LinearInterp#

+
+
+class LinearInterp(lr_features, s_enhance, t_enhance, t_centered=False, input_resolution=None)[source]#
+

Bases: AbstractInterface

+

Simple model to do linear interpolation on the spatial and temporal axes

+
+
Parameters:
+
    +
  • lr_features (list) – List of feature names that this model will operate on for both +input and output. This must match the feature axis ordering in the +array input to generate().

  • +
  • s_enhance (int) – Integer factor by which the spatial axes is to be enhanced.

  • +
  • t_enhance (int) – Integer factor by which the temporal axes is to be enhanced.

  • +
  • t_centered (bool) – Flag to switch time axis from time-beginning (Default, e.g. +interpolate 00:00 01:00 to 00:00 00:30 01:00 01:30) to +time-centered (e.g. interp 01:00 02:00 to 00:45 01:15 01:45 02:15)

  • +
  • input_resolution (dict | None) – Resolution of the input data. e.g. {‘spatial’: ‘30km’, ‘temporal’: +‘60min’}. This is used to determine how to aggregate +high-resolution topography data.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +

generate(low_res[, norm_in, un_norm_out, ...])

Use the generator model to generate high res data from low res input.

get_s_enhance_from_layers()

Compute factor by which model will enhance spatial resolution from layer attributes.

get_t_enhance_from_layers()

Compute factor by which model will enhance temporal resolution from layer attributes.

load(model_dir[, verbose])

Load the LinearInterp model with its params saved to the model_dir created with LinearInterp.save(model_dir)

save(out_dir)

save_params(out_dir)

seed([s])

Set the random seed for reproducible results.

set_model_params(**kwargs)

Set parameters used for training the model

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

hr_exo_features

Returns an empty list for LinearInterp model

hr_out_features

Get the list of output feature names that the generative model outputs

input_dims

Get dimension of model generator input.

input_resolution

Resolution of input data.

is_4d

Check if model expects spatial only input

is_5d

Check if model expects spatiotemporal input

lr_features

Get the list of input feature names that the generative model was trained on.

meta

Get meta data dictionary that defines the model params

model_params

Model parameters, used to save model to disc

output_resolution

Resolution of output data.

s_enhance

Factor by which model will enhance spatial resolution.

s_enhancements

List of spatial enhancement factors.

smoothed_features

Get the list of smoothed input feature names that the generative model was trained on.

smoothing

Value of smoothing parameter used in gaussian filtering of coarsened high res data.

t_enhance

Factor by which model will enhance temporal resolution.

t_enhancements

List of temporal enhancement factors.

version_record

A record of important versions that this model was built with.

+
+
+
+classmethod load(model_dir, verbose=False)[source]#
+

Load the LinearInterp model with its params saved to the model_dir +created with LinearInterp.save(model_dir)

+
+
Parameters:
+
    +
  • model_dir (str) – Directory to load LinearInterp model files from. Must +have a model_params.json file containing “meta” key with all of the +class init args.

  • +
  • verbose (bool) – Flag to log information about the loaded model.

  • +
+
+
Returns:
+

out (LinearInterp) – Returns an initialized LinearInterp model

+
+
+
+ +
+
+property meta#
+

Get meta data dictionary that defines the model params

+
+ +
+
+property lr_features#
+

Get the list of input feature names that the generative model was +trained on.

+
+ +
+
+property hr_out_features#
+

Get the list of output feature names that the generative model +outputs

+
+ +
+
+property hr_exo_features#
+

Returns an empty list for LinearInterp model

+
+ +
+
+save(out_dir)[source]#
+
+
Parameters:
+

out_dir (str) – Directory to save linear model params. This directory will be +created if it does not already exist.

+
+
+
+ +
+
+generate(low_res, norm_in=False, un_norm_out=False, exogenous_data=None)[source]#
+

Use the generator model to generate high res data from low res +input. This is the public generate function.

+
+
Parameters:
+
    +
  • low_res (np.ndarray) – Low-resolution spatiotemporal input data, a 5D array of shape: +(n_obs, spatial_1, spatial_2, temporal, n_features)

  • +
  • norm_in (bool) – This doesnt do anything for this LinearInterp, but is +kept to keep the same interface as Sup3rGan

  • +
  • un_norm_out (bool) – This doesnt do anything for this LinearInterp, but is +kept to keep the same interface as Sup3rGan

  • +
  • exogenous_data (list) – This doesnt do anything for this LinearInterp, but is +kept to keep the same interface as Sup3rGan

  • +
+
+
Returns:
+

hi_res (ndarray) – high-resolution spatial output data, a 5D array of shape: +(n_obs, spatial_1, spatial_2, temporal, n_features)

+
+
+
+ +
+
+get_s_enhance_from_layers()#
+

Compute factor by which model will enhance spatial resolution from +layer attributes. Used in model training during high res coarsening

+
+ +
+
+get_t_enhance_from_layers()#
+

Compute factor by which model will enhance temporal resolution from +layer attributes. Used in model training during high res coarsening

+
+ +
+
+property input_dims#
+

Get dimension of model generator input. This is usually 4D for +spatial models and 5D for spatiotemporal models. This gives the input +to the first step if the model is multi-step. Returns 5 for linear +models.

+
+
Returns:
+

int

+
+
+
+ +
+
+property input_resolution#
+

Resolution of input data. Given as a dictionary +{'spatial': '...km', 'temporal': '...min'}. The numbers are +required to be integers in the units specified. The units are not +strict as long as the resolution of the exogenous data, when extracting +exogenous data, is specified in the same units.

+
+ +
+
+property is_4d#
+

Check if model expects spatial only input

+
+ +
+
+property is_5d#
+

Check if model expects spatiotemporal input

+
+ +
+
+property model_params#
+

Model parameters, used to save model to disc

+
+
Returns:
+

dict

+
+
+
+ +
+
+property output_resolution#
+

Resolution of output data. Given as a dictionary +{‘spatial’: ‘…km’, ‘temporal’: ‘…min’}. This is computed from the +input resolution and the enhancement factors.

+
+ +
+
+property s_enhance#
+

Factor by which model will enhance spatial resolution. Used in +model training during high res coarsening and also in forward pass +routine to determine shape of needed exogenous data

+
+ +
+
+property s_enhancements#
+

List of spatial enhancement factors. In the case of a single step +model this is just [self.s_enhance]. This is used to determine +shapes of needed exogenous data in forward pass routine

+
+ +
+
+save_params(out_dir)#
+
+
Parameters:
+

out_dir (str) – Directory to save linear model params. This directory will be +created if it does not already exist.

+
+
+
+ +
+
+static seed(s=0)#
+

Set the random seed for reproducible results.

+
+
Parameters:
+

s (int) – Random seed

+
+
+
+ +
+
+set_model_params(**kwargs)#
+

Set parameters used for training the model

+
+
Parameters:
+

kwargs (dict) – Keyword arguments including ‘input_resolution’, +‘lr_features’, ‘hr_exo_features’, ‘hr_out_features’, +‘smoothed_features’, ‘s_enhance’, ‘t_enhance’, ‘smoothing’

+
+
+
+ +
+
+property smoothed_features#
+

Get the list of smoothed input feature names that the generative +model was trained on.

+
+ +
+
+property smoothing#
+

Value of smoothing parameter used in gaussian filtering of coarsened +high res data.

+
+ +
+
+property t_enhance#
+

Factor by which model will enhance temporal resolution. Used in +model training during high res coarsening and also in forward pass +routine to determine shape of needed exogenous data

+
+ +
+
+property t_enhancements#
+

List of temporal enhancement factors. In the case of a single step +model this is just [self.t_enhance]. This is used to determine +shapes of needed exogenous data in forward pass routine

+
+ +
+
+property version_record#
+

A record of important versions that this model was built with.

+
+
Returns:
+

dict

+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.models.linear.html b/_autosummary/sup3r.models.linear.html new file mode 100644 index 000000000..15379386f --- /dev/null +++ b/_autosummary/sup3r.models.linear.html @@ -0,0 +1,739 @@ + + + + + + + + + + + sup3r.models.linear — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.models.linear

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.models.linear#

+

Simple models for super resolution such as linear interp models.

+

Classes

+
+ + + + + +

LinearInterp(lr_features, s_enhance, t_enhance)

Simple model to do linear interpolation on the spatial and temporal axes

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.models.multi_step.MultiStepGan.html b/_autosummary/sup3r.models.multi_step.MultiStepGan.html new file mode 100644 index 000000000..89866ff44 --- /dev/null +++ b/_autosummary/sup3r.models.multi_step.MultiStepGan.html @@ -0,0 +1,1204 @@ + + + + + + + + + + + sup3r.models.multi_step.MultiStepGan — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.models.multi_step.MultiStepGan#

+
+
+class MultiStepGan(models)[source]#
+

Bases: AbstractInterface

+

Multi-Step GAN, which is really just an abstraction layer on top of one +or more Sup3rGan models that will perform their forward passes in +serial.

+
+
Parameters:
+

models (list | tuple) – An ordered list/tuple of one or more trained Sup3rGan models

+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + +

generate(low_res[, norm_in, un_norm_out, ...])

Use the generator model to generate high res data from low res input.

get_s_enhance_from_layers()

Compute factor by which model will enhance spatial resolution from layer attributes.

get_t_enhance_from_layers()

Compute factor by which model will enhance temporal resolution from layer attributes.

load(model_dirs[, model_kwargs, verbose])

Load the GANs with its sub-networks from a previously saved-to output directory.

save_params(out_dir)

seed([s])

Set the random seed for reproducible results.

set_model_params(**kwargs)

Set parameters used for training the model

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

hr_exo_features

Get list of high-resolution exogenous filter names the model uses.

hr_out_features

Get the list of high-resolution output feature names that the generative model outputs.

input_dims

Get dimension of model generator input.

input_resolution

Resolution of input data.

is_4d

Check if model expects spatial only input

is_5d

Check if model expects spatiotemporal input

lr_features

Get a list of low-resolution features input to the generative model.

means

Get the data normalization mean values.

meta

Get a tuple of meta data dictionaries for all models

model_params

Get a tuple of model parameters for all models

models

Get an ordered tuple of the Sup3rGan models that are part of this MultiStepGan

output_resolution

Resolution of output data.

s_enhance

Factor by which model will enhance spatial resolution.

s_enhancements

List of spatial enhancement factors.

smoothed_features

Get the list of smoothed input feature names that the generative model was trained on.

smoothing

Value of smoothing parameter used in gaussian filtering of coarsened high res data.

stdevs

Get the data normalization standard deviation values.

t_enhance

Factor by which model will enhance temporal resolution.

t_enhancements

List of temporal enhancement factors.

version_record

Get a tuple of version records from all models

+
+
+
+classmethod load(model_dirs, model_kwargs=None, verbose=True)[source]#
+

Load the GANs with its sub-networks from a previously saved-to +output directory.

+
+
Parameters:
+
    +
  • model_dirs (list | tuple) – An ordered list/tuple of one or more directories containing trained ++ saved Sup3rGan models created using the Sup3rGan.save() method.

  • +
  • model_kwargs (list | tuple) – An ordered list/tuple of one or more dictionaries containing kwargs +for the corresponding model in model_dirs

  • +
  • verbose (bool) – Flag to log information about the loaded model.

  • +
+
+
Returns:
+

out (MultiStepGan) – Returns a pretrained gan model that was previously saved to +model_dirs

+
+
+
+ +
+
+property models#
+

Get an ordered tuple of the Sup3rGan models that are part of this +MultiStepGan

+
+ +
+
+property means#
+

Get the data normalization mean values. This is a tuple of means +from all models.

+
+
Returns:
+

tuple | np.ndarray

+
+
+
+ +
+
+property stdevs#
+

Get the data normalization standard deviation values. This is a +tuple of stdevs from all models.

+
+
Returns:
+

tuple | np.ndarray

+
+
+
+ +
+
+static seed(s=0)[source]#
+

Set the random seed for reproducible results.

+
+
Parameters:
+

s (int) – Random seed

+
+
+
+ +
+
+generate(low_res, norm_in=True, un_norm_out=True, exogenous_data=None)[source]#
+

Use the generator model to generate high res data from low res +input. This is the public generate function.

+
+
Parameters:
+
    +
  • low_res (np.ndarray) – Low-resolution input data, usually a 4D or 5D array of shape: +(n_obs, spatial_1, spatial_2, n_features) +(n_obs, spatial_1, spatial_2, n_temporal, n_features)

  • +
  • norm_in (bool) – Flag to normalize low_res input data if the self.means, +self.stdevs attributes are available. The generator should always +received normalized data with mean=0 stdev=1.

  • +
  • un_norm_out (bool) – Flag to un-normalize synthetically generated output data to physical +units

  • +
  • exogenous_data (ExoData) –

    +
    +
    ExoData object, which is a special dictionary containing

    exogenous data for each model step and info about how to use the +data at each step.

    +
    +
    +
  • +
+
+
Returns:
+

hi_res (ndarray) – Synthetically generated high-resolution data, usually a 4D or 5D +array with shape: +(n_obs, spatial_1, spatial_2, n_features) +(n_obs, spatial_1, spatial_2, n_temporal, n_features)

+
+
+
+ +
+
+property version_record#
+

Get a tuple of version records from all models

+
+
Returns:
+

tuple

+
+
+
+ +
+
+property meta#
+

Get a tuple of meta data dictionaries for all models

+
+
Returns:
+

tuple

+
+
+
+ +
+
+property lr_features#
+

Get a list of low-resolution features input to the generative model. +This includes low-resolution features that might be supplied +exogenously at inference time but that were in the low-res batches +during training

+
+ +
+
+property hr_out_features#
+

Get the list of high-resolution output feature names that the +generative model outputs.

+
+ +
+
+property hr_exo_features#
+

Get list of high-resolution exogenous filter names the model uses. +For the multi-step model, each entry in this list corresponds to one of +the single-step models and is itself a list of hr_exo_features for that +model.

+
+ +
+
+property model_params#
+

Get a tuple of model parameters for all models

+
+
Returns:
+

tuple

+
+
+
+ +
+
+get_s_enhance_from_layers()#
+

Compute factor by which model will enhance spatial resolution from +layer attributes. Used in model training during high res coarsening

+
+ +
+
+get_t_enhance_from_layers()#
+

Compute factor by which model will enhance temporal resolution from +layer attributes. Used in model training during high res coarsening

+
+ +
+
+property input_dims#
+

Get dimension of model generator input. This is usually 4D for +spatial models and 5D for spatiotemporal models. This gives the input +to the first step if the model is multi-step. Returns 5 for linear +models.

+
+
Returns:
+

int

+
+
+
+ +
+
+property input_resolution#
+

Resolution of input data. Given as a dictionary +{'spatial': '...km', 'temporal': '...min'}. The numbers are +required to be integers in the units specified. The units are not +strict as long as the resolution of the exogenous data, when extracting +exogenous data, is specified in the same units.

+
+ +
+
+property is_4d#
+

Check if model expects spatial only input

+
+ +
+
+property is_5d#
+

Check if model expects spatiotemporal input

+
+ +
+
+property output_resolution#
+

Resolution of output data. Given as a dictionary +{‘spatial’: ‘…km’, ‘temporal’: ‘…min’}. This is computed from the +input resolution and the enhancement factors.

+
+ +
+
+property s_enhance#
+

Factor by which model will enhance spatial resolution. Used in +model training during high res coarsening and also in forward pass +routine to determine shape of needed exogenous data

+
+ +
+
+property s_enhancements#
+

List of spatial enhancement factors. In the case of a single step +model this is just [self.s_enhance]. This is used to determine +shapes of needed exogenous data in forward pass routine

+
+ +
+
+save_params(out_dir)#
+
+
Parameters:
+

out_dir (str) – Directory to save linear model params. This directory will be +created if it does not already exist.

+
+
+
+ +
+
+set_model_params(**kwargs)#
+

Set parameters used for training the model

+
+
Parameters:
+

kwargs (dict) – Keyword arguments including ‘input_resolution’, +‘lr_features’, ‘hr_exo_features’, ‘hr_out_features’, +‘smoothed_features’, ‘s_enhance’, ‘t_enhance’, ‘smoothing’

+
+
+
+ +
+
+property smoothed_features#
+

Get the list of smoothed input feature names that the generative +model was trained on.

+
+ +
+
+property smoothing#
+

Value of smoothing parameter used in gaussian filtering of coarsened +high res data.

+
+ +
+
+property t_enhance#
+

Factor by which model will enhance temporal resolution. Used in +model training during high res coarsening and also in forward pass +routine to determine shape of needed exogenous data

+
+ +
+
+property t_enhancements#
+

List of temporal enhancement factors. In the case of a single step +model this is just [self.t_enhance]. This is used to determine +shapes of needed exogenous data in forward pass routine

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.models.multi_step.MultiStepSurfaceMetGan.html b/_autosummary/sup3r.models.multi_step.MultiStepSurfaceMetGan.html new file mode 100644 index 000000000..1fbb90ba4 --- /dev/null +++ b/_autosummary/sup3r.models.multi_step.MultiStepSurfaceMetGan.html @@ -0,0 +1,1244 @@ + + + + + + + + + + + sup3r.models.multi_step.MultiStepSurfaceMetGan — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.models.multi_step.MultiStepSurfaceMetGan#

+
+
+class MultiStepSurfaceMetGan(models)[source]#
+

Bases: MultiStepGan

+

A two-step GAN where the first step is a spatial-only enhancement on a +4D tensor of near-surface temperature and relative humidity data, and the +second step is a (spatio)temporal enhancement on a 5D tensor.

+
+

Note

+

No inputs are needed for the first spatial-only surface meteorology +model. The spatial enhancement is determined by the low and high res +topography inputs in the exogenous_data kwargs in the +MultiStepSurfaceMetGan.generate() method.

+

The low res input to the spatial enhancement should be a 4D tensor of +the shape (temporal, spatial_1, spatial_2, features) where temporal +(usually the observation index) is a series of sequential timesteps that +will be transposed to a 5D tensor of shape +(1, spatial_1, spatial_2, temporal, features) tensor and then fed to the +2nd-step (spatio)temporal GAN.

+
+
+
Parameters:
+

models (list | tuple) – An ordered list/tuple of one or more trained Sup3rGan models

+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + +

generate(low_res[, norm_in, un_norm_out, ...])

Use the generator model to generate high res data from low res input.

get_s_enhance_from_layers()

Compute factor by which model will enhance spatial resolution from layer attributes.

get_t_enhance_from_layers()

Compute factor by which model will enhance temporal resolution from layer attributes.

load([surface_model_class, ...])

Load the GANs with its sub-networks from a previously saved-to output directory.

save_params(out_dir)

seed([s])

Set the random seed for reproducible results.

set_model_params(**kwargs)

Set parameters used for training the model

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

hr_exo_features

Get list of high-resolution exogenous filter names the model uses.

hr_out_features

Get the list of high-resolution output feature names that the generative model outputs.

input_dims

Get dimension of model generator input.

input_resolution

Resolution of input data.

is_4d

Check if model expects spatial only input

is_5d

Check if model expects spatiotemporal input

lr_features

Get a list of low-resolution features input to the generative model.

means

Get the data normalization mean values.

meta

Get a tuple of meta data dictionaries for all models

model_params

Get a tuple of model parameters for all models

models

Get an ordered tuple of the Sup3rGan models that are part of this MultiStepGan

output_resolution

Resolution of output data.

s_enhance

Factor by which model will enhance spatial resolution.

s_enhancements

List of spatial enhancement factors.

smoothed_features

Get the list of smoothed input feature names that the generative model was trained on.

smoothing

Value of smoothing parameter used in gaussian filtering of coarsened high res data.

stdevs

Get the data normalization standard deviation values.

t_enhance

Factor by which model will enhance temporal resolution.

t_enhancements

List of temporal enhancement factors.

version_record

Get a tuple of version records from all models

+
+
+
+generate(low_res, norm_in=True, un_norm_out=True, exogenous_data=None)[source]#
+

Use the generator model to generate high res data from low res +input. This is the public generate function.

+
+
Parameters:
+
    +
  • low_res (np.ndarray) – Low-resolution spatial input data, a 4D array of shape: +(n_obs, spatial_1, spatial_2, n_features), Where the feature +channel can include temperature_*m, relativehumidity_*m, and/or +pressure_*m

  • +
  • norm_in (bool) – Flag to normalize low_res input data if the self.means, +self.stdevs attributes are available. The generator should always +received normalized data with mean=0 stdev=1.

  • +
  • un_norm_out (bool) – Flag to un-normalize synthetically generated output data to physical +units

  • +
  • exogenous_data (dict) – For the MultiStepSurfaceMetGan, this must be a nested dictionary +with a main ‘topography’ key and two entries for +exogenous_data[‘topography’][‘steps’]. The first entry includes a +2D (lat, lon) array of low-resolution surface elevation data in +meters (must match spatial_1, spatial_2 from low_res), and the +second entry includes a 2D (lat, lon) array of high-resolution +surface elevation data in meters. e.g. +.. code-block:: JSON

    +
    +
    +
    {‘topography’: {
    +
    ‘steps’: [
    +
    {‘model’: 0,

    ‘combine_type’: +‘input’, +‘data’: lr_topo},

    +
    +
    {‘model’: 0,

    ‘combine_type’: +‘output’, +‘data’: hr_topo’}

    +
    +
    +

    ]

    +
    +
    +

    }

    +
    +
    +

    }

    +
    +
  • +
+
+
Returns:
+

hi_res (ndarray) – Synthetically generated high-resolution data output from the 2nd +step (spatio)temporal GAN with a 5D array shape: +(1, spatial_1, spatial_2, n_temporal, n_features), Where the +feature channel can include temperature_*m, relativehumidity_*m, +and/or pressure_*m

+
+
+
+ +
+
+classmethod load(surface_model_class='SurfaceSpatialMetModel', temporal_model_class='MultiStepGan', surface_model_kwargs=None, temporal_model_kwargs=None, verbose=True)[source]#
+

Load the GANs with its sub-networks from a previously saved-to +output directory.

+
+
Parameters:
+
    +
  • surface_model_class (str) – Name of surface model class to be retrieved from sup3r.models, this +is typically “SurfaceSpatialMetModel”

  • +
  • temporal_model_class (str) – Name of temporal model class to be retrieved from sup3r.models, +this is typically “Sup3rGan”

  • +
  • surface_model_kwargs (None | dict) – Optional additional kwargs to surface_model_class.load()

  • +
  • temporal_model_kwargs (None | dict) – Optional additional kwargs to temporal_model_class.load()

  • +
  • verbose (bool) – Flag to log information about the loaded model.

  • +
+
+
Returns:
+

out (MultiStepGan) – Returns a pretrained gan model that was previously saved to +model_dirs

+
+
+
+ +
+
+get_s_enhance_from_layers()#
+

Compute factor by which model will enhance spatial resolution from +layer attributes. Used in model training during high res coarsening

+
+ +
+
+get_t_enhance_from_layers()#
+

Compute factor by which model will enhance temporal resolution from +layer attributes. Used in model training during high res coarsening

+
+ +
+
+property hr_exo_features#
+

Get list of high-resolution exogenous filter names the model uses. +For the multi-step model, each entry in this list corresponds to one of +the single-step models and is itself a list of hr_exo_features for that +model.

+
+ +
+
+property hr_out_features#
+

Get the list of high-resolution output feature names that the +generative model outputs.

+
+ +
+
+property input_dims#
+

Get dimension of model generator input. This is usually 4D for +spatial models and 5D for spatiotemporal models. This gives the input +to the first step if the model is multi-step. Returns 5 for linear +models.

+
+
Returns:
+

int

+
+
+
+ +
+
+property input_resolution#
+

Resolution of input data. Given as a dictionary +{'spatial': '...km', 'temporal': '...min'}. The numbers are +required to be integers in the units specified. The units are not +strict as long as the resolution of the exogenous data, when extracting +exogenous data, is specified in the same units.

+
+ +
+
+property is_4d#
+

Check if model expects spatial only input

+
+ +
+
+property is_5d#
+

Check if model expects spatiotemporal input

+
+ +
+
+property lr_features#
+

Get a list of low-resolution features input to the generative model. +This includes low-resolution features that might be supplied +exogenously at inference time but that were in the low-res batches +during training

+
+ +
+
+property means#
+

Get the data normalization mean values. This is a tuple of means +from all models.

+
+
Returns:
+

tuple | np.ndarray

+
+
+
+ +
+
+property meta#
+

Get a tuple of meta data dictionaries for all models

+
+
Returns:
+

tuple

+
+
+
+ +
+
+property model_params#
+

Get a tuple of model parameters for all models

+
+
Returns:
+

tuple

+
+
+
+ +
+
+property models#
+

Get an ordered tuple of the Sup3rGan models that are part of this +MultiStepGan

+
+ +
+
+property output_resolution#
+

Resolution of output data. Given as a dictionary +{‘spatial’: ‘…km’, ‘temporal’: ‘…min’}. This is computed from the +input resolution and the enhancement factors.

+
+ +
+
+property s_enhance#
+

Factor by which model will enhance spatial resolution. Used in +model training during high res coarsening and also in forward pass +routine to determine shape of needed exogenous data

+
+ +
+
+property s_enhancements#
+

List of spatial enhancement factors. In the case of a single step +model this is just [self.s_enhance]. This is used to determine +shapes of needed exogenous data in forward pass routine

+
+ +
+
+save_params(out_dir)#
+
+
Parameters:
+

out_dir (str) – Directory to save linear model params. This directory will be +created if it does not already exist.

+
+
+
+ +
+
+static seed(s=0)#
+

Set the random seed for reproducible results.

+
+
Parameters:
+

s (int) – Random seed

+
+
+
+ +
+
+set_model_params(**kwargs)#
+

Set parameters used for training the model

+
+
Parameters:
+

kwargs (dict) – Keyword arguments including ‘input_resolution’, +‘lr_features’, ‘hr_exo_features’, ‘hr_out_features’, +‘smoothed_features’, ‘s_enhance’, ‘t_enhance’, ‘smoothing’

+
+
+
+ +
+
+property smoothed_features#
+

Get the list of smoothed input feature names that the generative +model was trained on.

+
+ +
+
+property smoothing#
+

Value of smoothing parameter used in gaussian filtering of coarsened +high res data.

+
+ +
+
+property stdevs#
+

Get the data normalization standard deviation values. This is a +tuple of stdevs from all models.

+
+
Returns:
+

tuple | np.ndarray

+
+
+
+ +
+
+property t_enhance#
+

Factor by which model will enhance temporal resolution. Used in +model training during high res coarsening and also in forward pass +routine to determine shape of needed exogenous data

+
+ +
+
+property t_enhancements#
+

List of temporal enhancement factors. In the case of a single step +model this is just [self.t_enhance]. This is used to determine +shapes of needed exogenous data in forward pass routine

+
+ +
+
+property version_record#
+

Get a tuple of version records from all models

+
+
Returns:
+

tuple

+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.models.multi_step.SolarMultiStepGan.html b/_autosummary/sup3r.models.multi_step.SolarMultiStepGan.html new file mode 100644 index 000000000..0413a78b6 --- /dev/null +++ b/_autosummary/sup3r.models.multi_step.SolarMultiStepGan.html @@ -0,0 +1,1374 @@ + + + + + + + + + + + sup3r.models.multi_step.SolarMultiStepGan — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.models.multi_step.SolarMultiStepGan#

+
+
+class SolarMultiStepGan(spatial_solar_models, spatial_wind_models, temporal_solar_models, t_enhance=None)[source]#
+

Bases: MultiStepGan

+

Special multi step model for solar clearsky ratio super resolution.

+

This model takes in two parallel models for wind-only and solar-only +spatial super resolutions, then combines them into a 3-channel +high-spatial-resolution input (clearsky_ratio, u_200m, v_200m) for a solar +temporal super resolution model.

+
+
Parameters:
+
    +
  • spatial_solar_models (MultiStepGan) – A loaded MultiStepGan object representing the one or more spatial +super resolution steps in this composite MultiStepGan +model that inputs and outputs clearsky_ratio

  • +
  • spatial_wind_models (MultiStepGan) – A loaded MultiStepGan object representing the one or more spatial +super resolution steps in this composite MultiStepGan +model that inputs and outputs wind u/v features and must include +u_200m + v_200m as output features.

  • +
  • temporal_solar_models (MultiStepGan) – A loaded MultiStepGan object representing the one or more +(spatio)temporal super resolution steps in this composite +SolarMultiStepGan model. This is the final step in the custom solar +downscaling methodology.

  • +
  • t_enhance (int | None) – Optional argument to fix or update the temporal enhancement of the +model. This can be used to manipulate the output shape to match +whatever padded shape the sup3r forward pass module expects. If +this differs from the t_enhance value based on model layers the +output will be padded so that the output shape matches low_res * +t_enhance for the time dimension.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

generate(low_res[, norm_in, un_norm_out, ...])

Use the generator model to generate high res data from low res input.

get_s_enhance_from_layers()

Compute factor by which model will enhance spatial resolution from layer attributes.

get_t_enhance_from_layers()

Compute factor by which model will enhance temporal resolution from layer attributes.

load(spatial_solar_model_dirs, ...[, ...])

Load the GANs with its sub-networks from a previously saved-to output directory.

preflight()

Run some preflight checks to make sure the loaded models can work together.

save_params(out_dir)

seed([s])

Set the random seed for reproducible results.

set_model_params(**kwargs)

Set parameters used for training the model

temporal_pad(low_res, hi_res[, mode])

Optionally add temporal padding to the 5D generated output array

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

hr_exo_features

Get list of high-resolution exogenous filter names the model uses.

hr_out_features

Get the list of output feature names that the last solar spatiotemporal generative model in this SolarMultiStepGan outputs.

idf_solar

Get an array of feature indices for the subset of features required for the spatial_solar_models.

idf_wind

Get an array of feature indices for the subset of features required for the spatial_wind_models.

idf_wind_out

Get an array of spatial_wind_models output feature indices that are required for input to the temporal_solar_models.

input_dims

Get dimension of model generator input.

input_resolution

Resolution of input data.

is_4d

Check if model expects spatial only input

is_5d

Check if model expects spatiotemporal input

lr_features

Get a list of low-resolution features input to the generative model.

means

Get the data normalization mean values.

meta

Get a tuple of meta data dictionaries for all models

model_params

Get a tuple of model parameters for all models

models

Get an ordered tuple of the Sup3rGan models that are part of this MultiStepGan

output_resolution

Resolution of output data.

s_enhance

Factor by which model will enhance spatial resolution.

s_enhancements

List of spatial enhancement factors.

smoothed_features

Get the list of smoothed input feature names that the generative model was trained on.

smoothing

Value of smoothing parameter used in gaussian filtering of coarsened high res data.

spatial_solar_models

Get the MultiStepGan object for the spatial-only solar model(s)

spatial_wind_models

Get the MultiStepGan object for the spatial-only wind model(s)

stdevs

Get the data normalization standard deviation values.

t_enhance

Factor by which model will enhance temporal resolution.

t_enhancements

List of temporal enhancement factors.

temporal_solar_models

Get the MultiStepGan object for the (spatio)temporal model(s)

version_record

Get a tuple of version records from all models

+
+
+
+preflight()[source]#
+

Run some preflight checks to make sure the loaded models can work +together.

+
+ +
+
+property spatial_solar_models#
+

Get the MultiStepGan object for the spatial-only solar model(s)

+
+
Returns:
+

MultiStepGan

+
+
+
+ +
+
+property spatial_wind_models#
+

Get the MultiStepGan object for the spatial-only wind model(s)

+
+
Returns:
+

MultiStepGan

+
+
+
+ +
+
+property temporal_solar_models#
+

Get the MultiStepGan object for the (spatio)temporal model(s)

+
+
Returns:
+

MultiStepGan

+
+
+
+ +
+
+property meta#
+

Get a tuple of meta data dictionaries for all models

+
+
Returns:
+

tuple

+
+
+
+ +
+
+property lr_features#
+

Get a list of low-resolution features input to the generative model. +This includes low-resolution features that might be supplied +exogenously at inference time but that were in the low-res batches +during training

+
+ +
+
+property hr_out_features#
+

Get the list of output feature names that the last solar +spatiotemporal generative model in this SolarMultiStepGan outputs.

+
+ +
+
+property idf_wind#
+

Get an array of feature indices for the subset of features required +for the spatial_wind_models. This excludes topography which is assumed +to be provided as exogenous_data.

+
+ +
+
+property idf_wind_out#
+

Get an array of spatial_wind_models output feature indices that are +required for input to the temporal_solar_models. Typically this is the +indices of u_200m + v_200m from the output features of +spatial_wind_models

+
+ +
+
+property idf_solar#
+

Get an array of feature indices for the subset of features required +for the spatial_solar_models. This excludes topography which is assumed +to be provided as exogenous_data.

+
+ +
+
+generate(low_res, norm_in=True, un_norm_out=True, exogenous_data=None)[source]#
+

Use the generator model to generate high res data from low res +input. This is the public generate function.

+
+
Parameters:
+
    +
  • low_res (np.ndarray) – Low-resolution input data to the 1st step spatial GAN, which is a +4D array of shape: (temporal, spatial_1, spatial_2, n_features). +This should include all of the self.lr_features which is a +concatenation of both the solar and wind spatial model features. +The topography feature might be removed from this input and present +in the exogenous_data input.

  • +
  • norm_in (bool) – Flag to normalize low_res input data if the self.means, +self.stdevs attributes are available. The generator should always +received normalized data with mean=0 stdev=1.

  • +
  • un_norm_out (bool) – Flag to un-normalize synthetically generated output data to physical +units

  • +
  • exogenous_data (ExoData | dict) – ExoData object (or dict that will be cast to ExoData) +with data arrays for each exogenous data step. Each array has 3D or +4D shape:

    +
    +

    (spatial_1, spatial_2, n_features) +(temporal, spatial_1, spatial_2, n_features) +It’s assumed that the spatial_solar_models do not require +exogenous_data and only use clearsky_ratio.

    +
    +
  • +
+
+
Returns:
+

hi_res (ndarray) – Synthetically generated high-resolution data output from the 2nd +step (spatio)temporal GAN with a 5D array shape: +(1, spatial_1, spatial_2, n_temporal, n_features)

+
+
+
+ +
+
+temporal_pad(low_res, hi_res, mode='reflect')[source]#
+

Optionally add temporal padding to the 5D generated output array

+
+
Parameters:
+
    +
  • low_res (np.ndarray) – Low-resolution input data to the 1st step spatial GAN, which is a +4D array of shape: (temporal, spatial_1, spatial_2, n_features).

  • +
  • hi_res (ndarray) – Synthetically generated high-resolution data output from the 2nd +step (spatio)temporal GAN with a 5D array shape: +(1, spatial_1, spatial_2, n_temporal, n_features)

  • +
  • mode (str) – Padding mode for np.pad()

  • +
+
+
Returns:
+

hi_res (ndarray) – Synthetically generated high-resolution data output from the 2nd +step (spatio)temporal GAN with a 5D array shape: +(1, spatial_1, spatial_2, n_temporal, n_features) +With the temporal axis padded with self._temporal_pad on either +side.

+
+
+
+ +
+
+classmethod load(spatial_solar_model_dirs, spatial_wind_model_dirs, temporal_solar_model_dirs, t_enhance=None, verbose=True)[source]#
+

Load the GANs with its sub-networks from a previously saved-to +output directory.

+
+
Parameters:
+
    +
  • spatial_solar_model_dirs (str | list | tuple) – An ordered list/tuple of one or more directories containing trained ++ saved Sup3rGan models created using the Sup3rGan.save() method. +This must contain only spatial solar models that input/output 4D +tensors.

  • +
  • spatial_wind_model_dirs (str | list | tuple) – An ordered list/tuple of one or more directories containing trained ++ saved Sup3rGan models created using the Sup3rGan.save() method. +This must contain only spatial wind models that input/output 4D +tensors.

  • +
  • temporal_solar_model_dirs (str | list | tuple) – An ordered list/tuple of one or more directories containing trained ++ saved Sup3rGan models created using the Sup3rGan.save() method. +This must contain only (spatio)temporal solar models that +input/output 5D tensors that are the concatenated output of the +spatial_solar_models and the spatial_wind_models.

  • +
  • t_enhance (int | None) – Optional argument to fix or update the temporal enhancement of the +model. This can be used to manipulate the output shape to match +whatever padded shape the sup3r forward pass module expects. If +this differs from the t_enhance value based on model layers the +output will be padded so that the output shape matches low_res * +t_enhance for the time dimension.

  • +
  • verbose (bool) – Flag to log information about the loaded model.

  • +
+
+
Returns:
+

out (SolarMultiStepGan) – Returns a pretrained gan model that was previously saved to +model_dirs

+
+
+
+ +
+
+get_s_enhance_from_layers()#
+

Compute factor by which model will enhance spatial resolution from +layer attributes. Used in model training during high res coarsening

+
+ +
+
+get_t_enhance_from_layers()#
+

Compute factor by which model will enhance temporal resolution from +layer attributes. Used in model training during high res coarsening

+
+ +
+
+property hr_exo_features#
+

Get list of high-resolution exogenous filter names the model uses. +For the multi-step model, each entry in this list corresponds to one of +the single-step models and is itself a list of hr_exo_features for that +model.

+
+ +
+
+property input_dims#
+

Get dimension of model generator input. This is usually 4D for +spatial models and 5D for spatiotemporal models. This gives the input +to the first step if the model is multi-step. Returns 5 for linear +models.

+
+
Returns:
+

int

+
+
+
+ +
+
+property input_resolution#
+

Resolution of input data. Given as a dictionary +{'spatial': '...km', 'temporal': '...min'}. The numbers are +required to be integers in the units specified. The units are not +strict as long as the resolution of the exogenous data, when extracting +exogenous data, is specified in the same units.

+
+ +
+
+property is_4d#
+

Check if model expects spatial only input

+
+ +
+
+property is_5d#
+

Check if model expects spatiotemporal input

+
+ +
+
+property means#
+

Get the data normalization mean values. This is a tuple of means +from all models.

+
+
Returns:
+

tuple | np.ndarray

+
+
+
+ +
+
+property model_params#
+

Get a tuple of model parameters for all models

+
+
Returns:
+

tuple

+
+
+
+ +
+
+property models#
+

Get an ordered tuple of the Sup3rGan models that are part of this +MultiStepGan

+
+ +
+
+property output_resolution#
+

Resolution of output data. Given as a dictionary +{‘spatial’: ‘…km’, ‘temporal’: ‘…min’}. This is computed from the +input resolution and the enhancement factors.

+
+ +
+
+property s_enhance#
+

Factor by which model will enhance spatial resolution. Used in +model training during high res coarsening and also in forward pass +routine to determine shape of needed exogenous data

+
+ +
+
+property s_enhancements#
+

List of spatial enhancement factors. In the case of a single step +model this is just [self.s_enhance]. This is used to determine +shapes of needed exogenous data in forward pass routine

+
+ +
+
+save_params(out_dir)#
+
+
Parameters:
+

out_dir (str) – Directory to save linear model params. This directory will be +created if it does not already exist.

+
+
+
+ +
+
+static seed(s=0)#
+

Set the random seed for reproducible results.

+
+
Parameters:
+

s (int) – Random seed

+
+
+
+ +
+
+set_model_params(**kwargs)#
+

Set parameters used for training the model

+
+
Parameters:
+

kwargs (dict) – Keyword arguments including ‘input_resolution’, +‘lr_features’, ‘hr_exo_features’, ‘hr_out_features’, +‘smoothed_features’, ‘s_enhance’, ‘t_enhance’, ‘smoothing’

+
+
+
+ +
+
+property smoothed_features#
+

Get the list of smoothed input feature names that the generative +model was trained on.

+
+ +
+
+property smoothing#
+

Value of smoothing parameter used in gaussian filtering of coarsened +high res data.

+
+ +
+
+property stdevs#
+

Get the data normalization standard deviation values. This is a +tuple of stdevs from all models.

+
+
Returns:
+

tuple | np.ndarray

+
+
+
+ +
+
+property t_enhance#
+

Factor by which model will enhance temporal resolution. Used in +model training during high res coarsening and also in forward pass +routine to determine shape of needed exogenous data

+
+ +
+
+property t_enhancements#
+

List of temporal enhancement factors. In the case of a single step +model this is just [self.t_enhance]. This is used to determine +shapes of needed exogenous data in forward pass routine

+
+ +
+
+property version_record#
+

Get a tuple of version records from all models

+
+
Returns:
+

tuple

+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.models.multi_step.html b/_autosummary/sup3r.models.multi_step.html new file mode 100644 index 000000000..678b32824 --- /dev/null +++ b/_autosummary/sup3r.models.multi_step.html @@ -0,0 +1,747 @@ + + + + + + + + + + + sup3r.models.multi_step — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.models.multi_step

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.models.multi_step#

+

Sup3r multi step model frameworks

+

TODO: SolarMultiStepGan can be cleaned up a little with the output padding and +t_enhance argument moved to SolarCC.

+

Classes

+
+ + + + + + + + + + + +

MultiStepGan(models)

Multi-Step GAN, which is really just an abstraction layer on top of one or more Sup3rGan models that will perform their forward passes in serial.

MultiStepSurfaceMetGan(models)

A two-step GAN where the first step is a spatial-only enhancement on a 4D tensor of near-surface temperature and relative humidity data, and the second step is a (spatio)temporal enhancement on a 5D tensor.

SolarMultiStepGan(spatial_solar_models, ...)

Special multi step model for solar clearsky ratio super resolution.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.models.solar_cc.SolarCC.html b/_autosummary/sup3r.models.solar_cc.SolarCC.html new file mode 100644 index 000000000..34820eb11 --- /dev/null +++ b/_autosummary/sup3r.models.solar_cc.SolarCC.html @@ -0,0 +1,2226 @@ + + + + + + + + + + + sup3r.models.solar_cc.SolarCC — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.models.solar_cc.SolarCC

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.models.solar_cc.SolarCC#

+
+
+class SolarCC(*args, t_enhance=None, **kwargs)[source]#
+

Bases: Sup3rGan

+

Solar climate change model.

+
+

Note

+
+
Modifications to standard Sup3rGan
    +
  • Content loss is only on the n_days of the center 8 daylight hours of +the daily true+synthetic high res samples

  • +
  • Discriminator only sees n_days of the center 8 daylight hours of the +daily true high res sample.

  • +
  • Discriminator sees random n_days of 8-hour samples of the daily +synthetic high res sample.

  • +
  • Includes padding on high resolution output of generate() so +that forward pass always outputs a multiple of 24 hours.

  • +
+
+
+
+

Add optional t_enhance adjustment.

+
+
Parameters:
+
    +
  • *args (list) – List of arguments to parent class

  • +
  • t_enhance (int | None) – Optional argument to fix or update the temporal enhancement of the +model. This can be used to manipulate the output shape to match +whatever padded shape the sup3r forward pass module expects. If +this differs from the t_enhance value based on model layers the +output will be padded so that the output shape matches low_res * +t_enhance for the time dimension.

  • +
  • **kwargs (Mappable) – Keyword arguments for parent class

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

calc_loss(hi_res_true, hi_res_gen[, ...])

Calculate the GAN loss function using generated and true high resolution data.

calc_loss_disc(disc_out_true, disc_out_gen)

Calculate the loss term for the discriminator model (either the spatial or temporal discriminator).

calc_loss_gen_advers(disc_out_gen)

Calculate the adversarial component of the loss term for the generator model.

calc_loss_gen_content(hi_res_true, hi_res_gen)

Calculate the content loss term for the generator model.

calc_val_loss(batch_handler, ...)

Calculate the validation loss at the current state of model training

check_batch_handler_attrs(batch_handler)

Not all batch handlers have the following attributes.

dict_to_tensorboard(entry)

Write data to tensorboard log file.

discriminate(hi_res[, norm_in])

Run the discriminator model on a hi resolution input field.

early_stop(history, column[, threshold, n_epoch])

Determine whether to stop training early based on nearly no change to validation loss for a certain number of consecutive epochs.

finish_epoch(epoch, epochs, t0, ...[, extras])

Perform finishing checks after an epoch is done training

generate(low_res, **kwargs)

Override parent method to apply padding on high res output.

get_high_res_exo_input(high_res)

Get exogenous feature data from high_res

get_loss_fun(loss)

Get the initialized loss function class from the sup3r loss library or the tensorflow losses.

get_optimizer_config(optimizer)

Get a config that defines the current model optimizer

get_optimizer_state(optimizer)

Get a set of state variables for the optimizer

get_s_enhance_from_layers()

Compute factor by which model will enhance spatial resolution from layer attributes.

get_single_grad(low_res, hi_res_true, ...[, ...])

Run gradient descent for one mini-batch of (low_res, hi_res_true), do not update weights, just return gradient details.

get_t_enhance_from_layers()

Compute factor by which model will enhance temporal resolution from layer attributes.

get_weight_update_fraction(history, ...[, ...])

Get the factor by which to multiply previous adversarial loss weight

init_optimizer(optimizer, learning_rate)

Initialize keras optimizer object.

init_weights(lr_shape, hr_shape[, device])

Initialize the generator and discriminator weights with device placement.

load(model_dir[, t_enhance, verbose])

Load the GAN with its sub-networks from a previously saved-to output directory.

load_network(model, name)

Load a CustomNetwork object from hidden layers config, .json file config, or .pkl file saved pre-trained model.

load_saved_params(out_dir[, verbose])

Load saved model_params (you need this and the gen+disc models to load a full model).

log_loss_details(loss_details[, level])

Log the loss details to the module logger.

norm_input(low_res)

Normalize low resolution data being input to the generator.

profile_to_tensorboard(name)

Write profile data to tensorboard log file.

run_gradient_descent(low_res, hi_res_true, ...)

Run gradient descent for one mini-batch of (low_res, hi_res_true) and update weights

save(out_dir)

Save the GAN with its sub-networks to a directory.

save_params(out_dir)

seed([s])

Set the random seed for reproducible results.

set_model_params(**kwargs)

Set parameters used for training the model

set_norm_stats(new_means, new_stdevs)

Set the normalization statistics associated with a data batch handler to model attributes.

temporal_pad(low_res, hi_res[, mode])

Optionally add temporal padding to the 5D generated output array

train(batch_handler, input_resolution, n_epoch)

Train the GAN model on real low res data and real high res data

train_epoch(batch_handler, ...[, multi_gpu])

Train the GAN for one epoch.

un_norm_output(output)

Un-normalize synthetically generated output data to physical units

update_adversarial_weights(history, ...)

Update spatial / temporal adversarial loss weights based on training fraction history.

update_loss_details(loss_details, new_data, ...)

Update a dictionary of loss_details with loss information from a new batch.

update_optimizer([option])

Update optimizer by changing current configuration

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

DAYLIGHT_HOURS

STARTING_HOUR

STRIDE_LEN

discriminator

Get the discriminator model.

discriminator_weights

Get a list of layer weights and bias terms for the discriminator model.

generator

Get the generative model.

generator_weights

Get a list of layer weights and bias terms for the generator model.

history

Model training history DataFrame (None if not yet trained)

hr_exo_features

Get list of high-resolution exogenous filter names the model uses.

hr_out_features

Get the list of high-resolution output feature names that the generative model outputs.

input_dims

Get dimension of model generator input.

input_resolution

Resolution of input data.

is_4d

Check if model expects spatial only input

is_5d

Check if model expects spatiotemporal input

lr_features

Get a list of low-resolution features input to the generative model.

means

Get the data normalization mean values.

meta

Get meta data dictionary that defines how the model was created

model_params

Model parameters, used to save model to disc

optimizer

Get the tensorflow optimizer to perform gradient descent calculations for the generative network.

optimizer_disc

Get the tensorflow optimizer to perform gradient descent calculations for the discriminator network.

output_resolution

Resolution of output data.

s_enhance

Factor by which model will enhance spatial resolution.

s_enhancements

List of spatial enhancement factors.

smoothed_features

Get the list of smoothed input feature names that the generative model was trained on.

smoothing

Value of smoothing parameter used in gaussian filtering of coarsened high res data.

stdevs

Get the data normalization standard deviation values.

t_enhance

Factor by which model will enhance temporal resolution.

t_enhancements

List of temporal enhancement factors.

total_batches

Record of total number of batches for logging.

version_record

A record of important versions that this model was built with.

weights

Get a list of all the layer weights and bias terms for the generator and discriminator networks

+
+
+
+init_weights(lr_shape, hr_shape, device=None)[source]#
+

Initialize the generator and discriminator weights with device +placement.

+
+
Parameters:
+
    +
  • lr_shape (tuple) – Shape of one batch of low res input data for sup3r resolution. Note +that the batch size (axis=0) must be included, but the actual batch +size doesn’t really matter.

  • +
  • hr_shape (tuple) – Shape of one batch of high res input data for sup3r resolution. +Note that the batch size (axis=0) must be included, but the actual +batch size doesn’t really matter.

  • +
  • device (str | None) – Option to place model weights on a device. If None, +self.default_device will be used.

  • +
+
+
+
+ +
+
+calc_loss(hi_res_true, hi_res_gen, weight_gen_advers=0.001, train_gen=True, train_disc=False)[source]#
+

Calculate the GAN loss function using generated and true high +resolution data.

+
+
Parameters:
+
    +
  • hi_res_true (tf.Tensor) – Ground truth high resolution spatiotemporal data.

  • +
  • hi_res_gen (tf.Tensor) – Super-resolved high resolution spatiotemporal data generated by the +generative model.

  • +
  • weight_gen_advers (float) – Weight factor for the adversarial loss component of the generator +vs. the discriminator.

  • +
  • train_gen (bool) – True if generator is being trained, then loss=loss_gen

  • +
  • train_disc (bool) – True if disc is being trained, then loss=loss_disc

  • +
+
+
Returns:
+

    +
  • loss (tf.Tensor) – 0D tensor representing the loss value for the network being trained +(either generator or one of the discriminators)

  • +
  • loss_details (dict) – Namespace of the breakdown of loss components

  • +
+

+
+
+
+ +
+
+temporal_pad(low_res, hi_res, mode='reflect')[source]#
+

Optionally add temporal padding to the 5D generated output array

+
+
Parameters:
+
    +
  • low_res (np.ndarray) – Low-resolution input data to the spatio(temporal) GAN, which is a +5D array of shape: (1, spatial_1, spatial_2, n_temporal, +n_features).

  • +
  • hi_res (ndarray) – Synthetically generated high-resolution data output from the +(spatio)temporal GAN with a 5D array shape: +(1, spatial_1, spatial_2, n_temporal, n_features)

  • +
  • mode (str) – Padding mode for np.pad()

  • +
+
+
Returns:
+

hi_res (ndarray) – Synthetically generated high-resolution data output from the +(spatio)temporal GAN with a 5D array shape: +(1, spatial_1, spatial_2, n_temporal, n_features) +With the temporal axis padded with self._temporal_pad on either +side.

+
+
+
+ +
+
+generate(low_res, **kwargs)[source]#
+

Override parent method to apply padding on high res output.

+
+ +
+
+classmethod load(model_dir, t_enhance=None, verbose=True)[source]#
+

Load the GAN with its sub-networks from a previously saved-to output +directory.

+
+
Parameters:
+
    +
  • model_dir (str) – Directory to load GAN model files from.

  • +
  • t_enhance (int | None) – Optional argument to fix or update the temporal enhancement of the +model. This can be used to manipulate the output shape to match +whatever padded shape the sup3r forward pass module expects. If +this differs from the t_enhance value based on model layers the +output will be padded so that the output shape matches low_res * +t_enhance for the time dimension.

  • +
  • verbose (bool) – Flag to log information about the loaded model.

  • +
+
+
Returns:
+

out (BaseModel) – Returns a pretrained gan model that was previously saved to out_dir

+
+
+
+ +
+
+static calc_loss_disc(disc_out_true, disc_out_gen)#
+

Calculate the loss term for the discriminator model (either the +spatial or temporal discriminator).

+
+
Parameters:
+
    +
  • disc_out_true (tf.Tensor) – Raw discriminator outputs from the discriminator model predicting +only on ground truth data hi_res_true (not on hi_res_gen).

  • +
  • disc_out_gen (tf.Tensor) – Raw discriminator outputs from the discriminator model predicting +only on synthetic data hi_res_gen (not on hi_res_true).

  • +
+
+
Returns:
+

loss_disc (tf.Tensor) – 0D tensor discriminator model loss for either the spatial or +temporal component of the super resolution generated output.

+
+
+
+ +
+
+static calc_loss_gen_advers(disc_out_gen)#
+

Calculate the adversarial component of the loss term for the +generator model.

+
+
Parameters:
+

disc_out_gen (tf.Tensor) – Raw discriminator outputs from the discriminator model +predicting only on hi_res_gen (not on hi_res_true).

+
+
Returns:
+

loss_gen_advers (tf.Tensor) – 0D tensor generator model loss for the adversarial component of the +generator loss term.

+
+
+
+ +
+
+calc_loss_gen_content(hi_res_true, hi_res_gen)#
+

Calculate the content loss term for the generator model.

+
+
Parameters:
+
    +
  • hi_res_true (tf.Tensor) – Ground truth high resolution spatiotemporal data.

  • +
  • hi_res_gen (tf.Tensor) – Superresolved high resolution spatiotemporal data generated by the +generative model.

  • +
+
+
Returns:
+

loss_gen_s (tf.Tensor) – 0D tensor generator model loss for the content loss comparing the +hi res ground truth to the hi res synthetically generated output.

+
+
+
+ +
+
+calc_val_loss(batch_handler, weight_gen_advers, loss_details)#
+

Calculate the validation loss at the current state of model training

+
+
Parameters:
+
    +
  • batch_handler (sup3r.preprocessing.BatchHandler) – BatchHandler object to iterate through

  • +
  • weight_gen_advers (float) – Weight factor for the adversarial loss component of the generator +vs. the discriminator.

  • +
  • loss_details (dict) – Namespace of the breakdown of loss components

  • +
+
+
Returns:
+

loss_details (dict) – Same as input but now includes val_* loss info

+
+
+
+ +
+
+static check_batch_handler_attrs(batch_handler)#
+

Not all batch handlers have the following attributes. So we perform +some sanitation before sending to set_model_params

+
+ +
+
+dict_to_tensorboard(entry)#
+

Write data to tensorboard log file. This is usually a loss_details +dictionary.

+
+
Parameters:
+

entry (dict) – Dictionary of values to write to tensorboard log file

+
+
+
+ +
+
+discriminate(hi_res, norm_in=False)#
+

Run the discriminator model on a hi resolution input field.

+
+
Parameters:
+
    +
  • hi_res (np.ndarray) – Real or fake high res data in a 4D or 5D tensor: +(n_obs, spatial_1, spatial_2, n_features) +(n_obs, spatial_1, spatial_2, n_temporal, n_features)

  • +
  • norm_in (bool) – Flag to normalize low_res input data if the self._means, +self._stdevs attributes are available. The disc should always +received normalized data with mean=0 stdev=1.

  • +
+
+
Returns:
+

out (np.ndarray) – Discriminator output logits

+
+
+
+ +
+
+property discriminator#
+

Get the discriminator model.

+
+
Returns:
+

phygnn.base.CustomNetwork

+
+
+
+ +
+
+property discriminator_weights#
+

Get a list of layer weights and bias terms for the discriminator +model.

+
+
Returns:
+

list

+
+
+
+ +
+
+static early_stop(history, column, threshold=0.005, n_epoch=5)#
+

Determine whether to stop training early based on nearly no change +to validation loss for a certain number of consecutive epochs.

+
+
Parameters:
+
    +
  • history (pd.DataFrame | None) – Model training history

  • +
  • column (str) – Column from the model training history to evaluate for early +termination.

  • +
  • threshold (float) – The absolute relative fractional difference in validation loss +between subsequent epochs below which an early termination is +warranted. E.g. if val losses were 0.1 and 0.0998 the relative +diff would be calculated as 0.0002 / 0.1 = 0.002 which would be +less than the default thresold of 0.01 and would satisfy the +condition for early termination.

  • +
  • n_epoch (int) – The number of consecutive epochs that satisfy the threshold that +warrants an early stop.

  • +
+
+
Returns:
+

stop (bool) – Flag to stop training (True) or keep going (False).

+
+
+
+ +
+
+finish_epoch(epoch, epochs, t0, loss_details, checkpoint_int, out_dir, early_stop_on, early_stop_threshold, early_stop_n_epoch, extras=None)#
+

Perform finishing checks after an epoch is done training

+
+
Parameters:
+
    +
  • epoch (int) – Epoch number that is finishing

  • +
  • epochs (list) – List of epochs being iterated through

  • +
  • t0 (float) – Starting time of training.

  • +
  • loss_details (dict) – Namespace of the breakdown of loss components

  • +
  • checkpoint_int (int | None) – Epoch interval at which to save checkpoint models.

  • +
  • out_dir (str) – Directory to save checkpoint models. Should have {epoch} in +the directory name. This directory will be created if it does not +already exist.

  • +
  • early_stop_on (str | None) – If not None, this should be a column in the training history to +evaluate for early stopping (e.g. validation_loss_gen, +validation_loss_disc). If this value in this history decreases by +an absolute fractional relative difference of less than 0.01 for +more than 5 epochs in a row, the training will stop early.

  • +
  • early_stop_threshold (float) – The absolute relative fractional difference in validation loss +between subsequent epochs below which an early termination is +warranted. E.g. if val losses were 0.1 and 0.0998 the relative +diff would be calculated as 0.0002 / 0.1 = 0.002 which would be +less than the default thresold of 0.01 and would satisfy the +condition for early termination.

  • +
  • early_stop_n_epoch (int) – The number of consecutive epochs that satisfy the threshold that +warrants an early stop.

  • +
  • extras (dict | None) – Extra kwargs/parameters to save in the epoch history.

  • +
+
+
Returns:
+

stop (bool) – Flag to early stop training.

+
+
+
+ +
+
+property generator#
+

Get the generative model.

+
+
Returns:
+

phygnn.base.CustomNetwork

+
+
+
+ +
+
+property generator_weights#
+

Get a list of layer weights and bias terms for the generator model.

+
+
Returns:
+

list

+
+
+
+ +
+
+get_high_res_exo_input(high_res)#
+

Get exogenous feature data from high_res

+
+
Parameters:
+

high_res (tf.Tensor) – Ground truth high resolution spatiotemporal data.

+
+
Returns:
+

exo_data (dict) – Dictionary of exogenous feature data used as input to tf_generate. +e.g. {'topography': tf.Tensor(...)}

+
+
+
+ +
+
+static get_loss_fun(loss)#
+

Get the initialized loss function class from the sup3r loss library +or the tensorflow losses.

+
+
Parameters:
+

loss (str | dict) – Loss function class name from sup3r.utilities.loss_metrics +(prioritized) or tensorflow.keras.losses. Defaults to +tf.keras.losses.MeanSquaredError. This can be provided as a dict +with kwargs for loss functions with extra parameters. +e.g. {‘SpatialExtremesLoss’: {‘weight’: 0.5}}

+
+
Returns:
+

out (tf.keras.losses.Loss) – Initialized loss function class that is callable, e.g. if +“MeanSquaredError” is requested, this will return +an instance of tf.keras.losses.MeanSquaredError()

+
+
+
+ +
+
+static get_optimizer_config(optimizer)#
+

Get a config that defines the current model optimizer

+
+
Parameters:
+

optimizer (tf.keras.optimizers.Optimizer) – TF-Keras optimizer object (e.g., Adam)

+
+
Returns:
+

config (dict) – Optimizer config

+
+
+
+ +
+
+classmethod get_optimizer_state(optimizer)#
+

Get a set of state variables for the optimizer

+
+
Parameters:
+

optimizer (tf.keras.optimizers.Optimizer) – TF-Keras optimizer object (e.g., Adam)

+
+
Returns:
+

state (dict) – Optimizer state variables

+
+
+
+ +
+
+get_s_enhance_from_layers()#
+

Compute factor by which model will enhance spatial resolution from +layer attributes. Used in model training during high res coarsening

+
+ +
+
+get_single_grad(low_res, hi_res_true, training_weights, device_name=None, **calc_loss_kwargs)#
+

Run gradient descent for one mini-batch of (low_res, hi_res_true), +do not update weights, just return gradient details.

+
+
Parameters:
+
    +
  • low_res (np.ndarray) – Real low-resolution data in a 4D or 5D array: +(n_observations, spatial_1, spatial_2, features) +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
  • hi_res_true (np.ndarray) – Real high-resolution data in a 4D or 5D array: +(n_observations, spatial_1, spatial_2, features) +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
  • training_weights (list) – A list of layer weights that are to-be-trained based on the +current loss weight values.

  • +
  • device_name (None | str) – Optional tensorflow device name for GPU placement. Note that if a +GPU is available, variables will be placed on that GPU even if +device_name=None.

  • +
  • calc_loss_kwargs (dict) – Kwargs to pass to the self.calc_loss() method

  • +
+
+
Returns:
+

    +
  • grad (list) – a list or nested structure of Tensors (or IndexedSlices, or None, +or CompositeTensor) representing the gradients for the +training_weights

  • +
  • loss_details (dict) – Namespace of the breakdown of loss components

  • +
+

+
+
+
+ +
+
+get_t_enhance_from_layers()#
+

Compute factor by which model will enhance temporal resolution from +layer attributes. Used in model training during high res coarsening

+
+ +
+
+static get_weight_update_fraction(history, comparison_key, update_bounds=(0.5, 0.95), update_frac=0.0)#
+

Get the factor by which to multiply previous adversarial loss +weight

+
+
Parameters:
+
    +
  • history (dict) – Dictionary with information on how often discriminators +were trained during previous epoch.

  • +
  • comparison_key (str) – history key to use for update check

  • +
  • update_bounds (tuple) – Tuple specifying allowed range for history[comparison_key]. If +history[comparison_key] < update_bounds[0] then the weight will be +increased by (1 + update_frac). If history[comparison_key] > +update_bounds[1] then the weight will be decreased by 1 / (1 + +update_frac).

  • +
  • update_frac (float) – Fraction by which to increase/decrease adversarial loss weight

  • +
+
+
Returns:
+

float – Factor by which to multiply old weight to get updated weight

+
+
+
+ +
+
+property history#
+

Model training history DataFrame (None if not yet trained)

+
+
Returns:
+

pandas.DataFrame | None

+
+
+
+ +
+
+property hr_exo_features#
+

Get list of high-resolution exogenous filter names the model uses. +If the model has N concat or add layers this list will be the last N +features in the training features list. The ordering is assumed to be +the same as the order of concat or add layers. If training features is +[…, topo, sza], and the model has 2 concat or add layers, exo +features will be [topo, sza]. Topo will then be used in the first +concat layer and sza will be used in the second

+
+ +
+
+property hr_out_features#
+

Get the list of high-resolution output feature names that the +generative model outputs.

+
+ +
+
+static init_optimizer(optimizer, learning_rate)#
+

Initialize keras optimizer object.

+
+
Parameters:
+
    +
  • optimizer (tf.keras.optimizers.Optimizer | dict | None | str) – Instantiated tf.keras.optimizers object or a dict optimizer config +from tf.keras.optimizers.get_config(). None defaults to Adam.

  • +
  • learning_rate (float, optional) – Optimizer learning rate. Not used if optimizer input arg is a +pre-initialized object or if optimizer input arg is a config dict.

  • +
+
+
Returns:
+

optimizer (tf.keras.optimizers.Optimizer) – Initialized optimizer object.

+
+
+
+ +
+
+property input_dims#
+

Get dimension of model generator input. This is usually 4D for +spatial models and 5D for spatiotemporal models. This gives the input +to the first step if the model is multi-step. Returns 5 for linear +models.

+
+
Returns:
+

int

+
+
+
+ +
+
+property input_resolution#
+

Resolution of input data. Given as a dictionary +{'spatial': '...km', 'temporal': '...min'}. The numbers are +required to be integers in the units specified. The units are not +strict as long as the resolution of the exogenous data, when extracting +exogenous data, is specified in the same units.

+
+ +
+
+property is_4d#
+

Check if model expects spatial only input

+
+ +
+
+property is_5d#
+

Check if model expects spatiotemporal input

+
+ +
+
+load_network(model, name)#
+

Load a CustomNetwork object from hidden layers config, .json file +config, or .pkl file saved pre-trained model.

+
+
Parameters:
+
    +
  • model (str | dict) – Model hidden layers config, a .json with “hidden_layers” key, or a +.pkl for a saved pre-trained model.

  • +
  • name (str) – Name of the model to be loaded

  • +
+
+
Returns:
+

model (phygnn.CustomNetwork) – CustomNetwork object initialized from the model input.

+
+
+
+ +
+
+static load_saved_params(out_dir, verbose=True)#
+

Load saved model_params (you need this and the gen+disc models +to load a full model).

+
+
Parameters:
+
    +
  • out_dir (str) – Directory to load model files from.

  • +
  • verbose (bool) – Flag to log information about the loaded model.

  • +
+
+
Returns:
+

params (dict) – Model parameters loaded from disk json file. This should be the +same as self.model_params with and additional ‘history’ entry. +Should be all the kwargs you need to init a model.

+
+
+
+ +
+
+static log_loss_details(loss_details, level='INFO')#
+

Log the loss details to the module logger.

+
+
Parameters:
+
    +
  • loss_details (dict) – Namespace of the breakdown of loss components where each value is a +running average at the current state in the epoch.

  • +
  • level (str) – Log level (e.g. INFO, DEBUG)

  • +
+
+
+
+ +
+
+property lr_features#
+

Get a list of low-resolution features input to the generative model. +This includes low-resolution features that might be supplied +exogenously at inference time but that were in the low-res batches +during training

+
+ +
+
+property means#
+

Get the data normalization mean values.

+
+
Returns:
+

np.ndarray

+
+
+
+ +
+
+property meta#
+

Get meta data dictionary that defines how the model was created

+
+ +
+
+property model_params#
+

Model parameters, used to save model to disc

+
+
Returns:
+

dict

+
+
+
+ +
+
+norm_input(low_res)#
+

Normalize low resolution data being input to the generator.

+
+
Parameters:
+

low_res (np.ndarray) – Un-normalized low-resolution input data in physical units, usually +a 4D or 5D array of shape: +(n_obs, spatial_1, spatial_2, n_features) +(n_obs, spatial_1, spatial_2, n_temporal, n_features)

+
+
Returns:
+

low_res (np.ndarray) – Normalized low-resolution input data, usually a 4D or 5D array of +shape: +(n_obs, spatial_1, spatial_2, n_features) +(n_obs, spatial_1, spatial_2, n_temporal, n_features)

+
+
+
+ +
+
+property optimizer#
+

Get the tensorflow optimizer to perform gradient descent +calculations for the generative network. This is functionally identical +to optimizer_disc is no special optimizer model or learning rate was +specified for the disc.

+
+
Returns:
+

tf.keras.optimizers.Optimizer

+
+
+
+ +
+
+property optimizer_disc#
+

Get the tensorflow optimizer to perform gradient descent +calculations for the discriminator network.

+
+
Returns:
+

tf.keras.optimizers.Optimizer

+
+
+
+ +
+
+property output_resolution#
+

Resolution of output data. Given as a dictionary +{‘spatial’: ‘…km’, ‘temporal’: ‘…min’}. This is computed from the +input resolution and the enhancement factors.

+
+ +
+
+profile_to_tensorboard(name)#
+

Write profile data to tensorboard log file.

+
+
Parameters:
+

name (str) – Tag name to use for profile info

+
+
+
+ +
+
+run_gradient_descent(low_res, hi_res_true, training_weights, optimizer=None, multi_gpu=False, **calc_loss_kwargs)#
+

Run gradient descent for one mini-batch of (low_res, hi_res_true) +and update weights

+
+
Parameters:
+
    +
  • low_res (np.ndarray) – Real low-resolution data in a 4D or 5D array: +(n_observations, spatial_1, spatial_2, features) +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
  • hi_res_true (np.ndarray) – Real high-resolution data in a 4D or 5D array: +(n_observations, spatial_1, spatial_2, features) +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
  • training_weights (list) – A list of layer weights that are to-be-trained based on the +current loss weight values.

  • +
  • optimizer (tf.keras.optimizers.Optimizer) – Optimizer class to use to update weights. This can be different if +you’re training just the generator or one of the discriminator +models. Defaults to the generator optimizer.

  • +
  • multi_gpu (bool) – Flag to break up the batch for parallel gradient descent +calculations on multiple gpus. If True and multiple GPUs are +present, each batch from the batch_handler will be divided up +between the GPUs and the resulting gradient from each GPU will +constitute a single gradient descent step with the nominal learning +rate that the model was initialized with.

  • +
  • calc_loss_kwargs (dict) – Kwargs to pass to the self.calc_loss() method

  • +
+
+
Returns:
+

loss_details (dict) – Namespace of the breakdown of loss components

+
+
+
+ +
+
+property s_enhance#
+

Factor by which model will enhance spatial resolution. Used in +model training during high res coarsening and also in forward pass +routine to determine shape of needed exogenous data

+
+ +
+
+property s_enhancements#
+

List of spatial enhancement factors. In the case of a single step +model this is just [self.s_enhance]. This is used to determine +shapes of needed exogenous data in forward pass routine

+
+ +
+
+save(out_dir)#
+

Save the GAN with its sub-networks to a directory.

+
+
Parameters:
+

out_dir (str) – Directory to save GAN model files. This directory will be created +if it does not already exist.

+
+
+
+ +
+
+save_params(out_dir)#
+
+
Parameters:
+

out_dir (str) – Directory to save linear model params. This directory will be +created if it does not already exist.

+
+
+
+ +
+
+static seed(s=0)#
+

Set the random seed for reproducible results.

+
+
Parameters:
+

s (int) – Random seed

+
+
+
+ +
+
+set_model_params(**kwargs)#
+

Set parameters used for training the model

+
+
Parameters:
+

kwargs (dict) – Keyword arguments including ‘input_resolution’, +‘lr_features’, ‘hr_exo_features’, ‘hr_out_features’, +‘smoothed_features’, ‘s_enhance’, ‘t_enhance’, ‘smoothing’

+
+
+
+ +
+
+set_norm_stats(new_means, new_stdevs)#
+

Set the normalization statistics associated with a data batch +handler to model attributes.

+
+
Parameters:
+
    +
  • new_means (dict | None) – Set of mean values for data normalization keyed by feature name. +Can be used to maintain a consistent normalization scheme between +transfer learning domains.

  • +
  • new_stdevs (dict | None) – Set of stdev values for data normalization keyed by feature name. +Can be used to maintain a consistent normalization scheme between +transfer learning domains.

  • +
+
+
+
+ +
+
+property smoothed_features#
+

Get the list of smoothed input feature names that the generative +model was trained on.

+
+ +
+
+property smoothing#
+

Value of smoothing parameter used in gaussian filtering of coarsened +high res data.

+
+ +
+
+property stdevs#
+

Get the data normalization standard deviation values.

+
+
Returns:
+

np.ndarray

+
+
+
+ +
+
+property t_enhance#
+

Factor by which model will enhance temporal resolution. Used in +model training during high res coarsening and also in forward pass +routine to determine shape of needed exogenous data

+
+ +
+
+property t_enhancements#
+

List of temporal enhancement factors. In the case of a single step +model this is just [self.t_enhance]. This is used to determine +shapes of needed exogenous data in forward pass routine

+
+ +
+
+property total_batches#
+

Record of total number of batches for logging.

+
+ +
+
+train(batch_handler, input_resolution, n_epoch, weight_gen_advers=0.001, train_gen=True, train_disc=True, disc_loss_bounds=(0.45, 0.6), checkpoint_int=None, out_dir='./gan_{epoch}', early_stop_on=None, early_stop_threshold=0.005, early_stop_n_epoch=5, adaptive_update_bounds=(0.9, 0.99), adaptive_update_fraction=0.0, multi_gpu=False, tensorboard_log=True, tensorboard_profile=False)#
+

Train the GAN model on real low res data and real high res data

+
+
Parameters:
+
    +
  • batch_handler (sup3r.preprocessing.BatchHandler) – BatchHandler object to iterate through

  • +
  • input_resolution (dict) – Dictionary specifying spatiotemporal input resolution. e.g. +{‘temporal’: ‘60min’, ‘spatial’: ‘30km’}

  • +
  • n_epoch (int) – Number of epochs to train on

  • +
  • weight_gen_advers (float) – Weight factor for the adversarial loss component of the generator +vs. the discriminator.

  • +
  • train_gen (bool) – Flag whether to train the generator for this set of epochs

  • +
  • train_disc (bool) – Flag whether to train the discriminator for this set of epochs

  • +
  • disc_loss_bounds (tuple) – Lower and upper bounds for the discriminator loss outside of which +the discriminator will not train unless train_disc=True and +train_gen=False.

  • +
  • checkpoint_int (int | None) – Epoch interval at which to save checkpoint models.

  • +
  • out_dir (str) – Directory to save checkpoint GAN models. Should have {epoch} in +the directory name. This directory will be created if it does not +already exist.

  • +
  • early_stop_on (str | None) – If not None, this should be a column in the training history to +evaluate for early stopping (e.g. validation_loss_gen, +validation_loss_disc). If this value in this history decreases by +an absolute fractional relative difference of less than 0.01 for +more than 5 epochs in a row, the training will stop early.

  • +
  • early_stop_threshold (float) – The absolute relative fractional difference in validation loss +between subsequent epochs below which an early termination is +warranted. E.g. if val losses were 0.1 and 0.0998 the relative +diff would be calculated as 0.0002 / 0.1 = 0.002 which would be +less than the default thresold of 0.01 and would satisfy the +condition for early termination.

  • +
  • early_stop_n_epoch (int) – The number of consecutive epochs that satisfy the threshold that +warrants an early stop.

  • +
  • adaptive_update_bounds (tuple) – Tuple specifying allowed range for loss_details[comparison_key]. If +history[comparison_key] < threshold_range[0] then the weight will +be increased by (1 + update_frac). If history[comparison_key] > +threshold_range[1] then the weight will be decreased by 1 / (1 + +update_frac).

  • +
  • adaptive_update_fraction (float) – Amount by which to increase or decrease adversarial weights for +adaptive updates

  • +
  • multi_gpu (bool) – Flag to break up the batch for parallel gradient descent +calculations on multiple gpus. If True and multiple GPUs are +present, each batch from the batch_handler will be divided up +between the GPUs and the resulting gradient from each GPU will +constitute a single gradient descent step with the nominal learning +rate that the model was initialized with. If true and multiple gpus +are found, default_device device should be set to /cpu:0

  • +
  • tensorboard_log (bool) – Whether to write log file for use with tensorboard. Log data can +be viewed with tensorboard --logdir <logdir> where <logdir> +is the parent directory of out_dir, and pointing the browser to +the printed address.

  • +
  • tensorboard_profile (bool) – Whether to export profiling information to tensorboard. This can +then be viewed in the tensorboard dashboard under the profile tab

  • +
  • TODO ((1) args here are getting excessive. Might be time for some)

  • +
  • refactoring.

  • +
  • (2) cal_val_loss should be done in a separate thread from train_epoch

  • +
  • so they can be done concurrently. This would be especially important

  • +
  • for batch handlers which require val data, like dc handlers.

  • +
  • (3) Would like an automatic way to exit the batch handler thread

  • +
  • instead of manually calling .stop() here.

  • +
+
+
+
+ +
+
+train_epoch(batch_handler, weight_gen_advers, train_gen, train_disc, disc_loss_bounds, multi_gpu=False)#
+

Train the GAN for one epoch.

+
+
Parameters:
+
    +
  • batch_handler (sup3r.preprocessing.BatchHandler) – BatchHandler object to iterate through

  • +
  • weight_gen_advers (float) – Weight factor for the adversarial loss component of the generator +vs. the discriminator.

  • +
  • train_gen (bool) – Flag whether to train the generator for this set of epochs

  • +
  • train_disc (bool) – Flag whether to train the discriminator for this set of epochs

  • +
  • disc_loss_bounds (tuple) – Lower and upper bounds for the discriminator loss outside of which +the discriminators will not train unless train_disc=True or +and train_gen=False.

  • +
  • multi_gpu (bool) – Flag to break up the batch for parallel gradient descent +calculations on multiple gpus. If True and multiple GPUs are +present, each batch from the batch_handler will be divided up +between the GPUs and the resulting gradient from each GPU will +constitute a single gradient descent step with the nominal learning +rate that the model was initialized with. If true and multiple gpus +are found, default_device device should be set to /cpu:0

  • +
+
+
Returns:
+

loss_details (dict) – Namespace of the breakdown of loss components

+
+
+
+ +
+
+un_norm_output(output)#
+

Un-normalize synthetically generated output data to physical units

+
+
Parameters:
+

output (tf.Tensor | np.ndarray) – Synthetically generated high-resolution data

+
+
Returns:
+

output (np.ndarray) – Synthetically generated high-resolution data

+
+
+
+ +
+
+update_adversarial_weights(history, adaptive_update_fraction, adaptive_update_bounds, weight_gen_advers, train_disc)#
+

Update spatial / temporal adversarial loss weights based on training +fraction history.

+
+
Parameters:
+
    +
  • history (dict) – Dictionary with information on how often discriminators +were trained during current and previous epochs.

  • +
  • adaptive_update_fraction (float) – Amount by which to increase or decrease adversarial loss weights +for adaptive updates

  • +
  • adaptive_update_bounds (tuple) – Tuple specifying allowed range for history[comparison_key]. If +history[comparison_key] < update_bounds[0] then the weight will be +increased by (1 + update_frac). If history[comparison_key] > +update_bounds[1] then the weight will be decreased by 1 / (1 + +update_frac).

  • +
  • weight_gen_advers (float) – Weight factor for the adversarial loss component of the generator +vs. the discriminator.

  • +
  • train_disc (bool) – Whether the discriminator was set to be trained during the +previous epoch

  • +
+
+
Returns:
+

weight_gen_advers (float) – Updated weight factor for the adversarial loss component of the +generator vs. the discriminator.

+
+
+
+ +
+
+static update_loss_details(loss_details, new_data, batch_len, prefix=None)#
+

Update a dictionary of loss_details with loss information from a new +batch.

+
+
Parameters:
+
    +
  • loss_details (dict) – Namespace of the breakdown of loss components where each value is a +running average at the current state in the epoch.

  • +
  • new_data (dict) – Namespace of the breakdown of loss components for a single new +batch.

  • +
  • batch_len (int) – Length of the incoming batch.

  • +
  • prefix (None | str) – Option to prefix the names of the loss data when saving to the +loss_details dictionary.

  • +
+
+
Returns:
+

loss_details (dict) – Same as input loss_details but with running averages updated.

+
+
+
+ +
+
+update_optimizer(option='generator', **kwargs)#
+

Update optimizer by changing current configuration

+
+
Parameters:
+
    +
  • option (str) – Which optimizer to update. Can be “generator”, “discriminator”, or +“all”

  • +
  • kwargs (dict) – kwargs to use for optimizer configuration update

  • +
+
+
+
+ +
+
+property version_record#
+

A record of important versions that this model was built with.

+
+
Returns:
+

dict

+
+
+
+ +
+
+property weights#
+

Get a list of all the layer weights and bias terms for the +generator and discriminator networks

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.models.solar_cc.html b/_autosummary/sup3r.models.solar_cc.html new file mode 100644 index 000000000..d0c0b119b --- /dev/null +++ b/_autosummary/sup3r.models.solar_cc.html @@ -0,0 +1,739 @@ + + + + + + + + + + + sup3r.models.solar_cc — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.models.solar_cc

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.models.solar_cc#

+

Sup3r model software

+

Classes

+
+ + + + + +

SolarCC(*args[, t_enhance])

Solar climate change model.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.models.surface.SurfaceSpatialMetModel.html b/_autosummary/sup3r.models.surface.SurfaceSpatialMetModel.html new file mode 100644 index 000000000..fb77f3ea8 --- /dev/null +++ b/_autosummary/sup3r.models.surface.SurfaceSpatialMetModel.html @@ -0,0 +1,1534 @@ + + + + + + + + + + + sup3r.models.surface.SurfaceSpatialMetModel — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.models.surface.SurfaceSpatialMetModel

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.models.surface.SurfaceSpatialMetModel#

+
+
+class SurfaceSpatialMetModel(lr_features, s_enhance, noise_adders=None, temp_lapse=None, w_delta_temp=None, w_delta_topo=None, pres_div=None, pres_exp=None, interp_method='LANCZOS', input_resolution=None, fix_bias=True)[source]#
+

Bases: LinearInterp

+

Model to spatially downscale daily-average near-surface temperature, +relative humidity, and pressure

+

Note that this model can also operate on temperature_*m, +relativehumidity_*m, and pressure_*m datasets that are not +strictly at the surface but could be near hub height.

+
+
Parameters:
+
    +
  • lr_features (list) – List of feature names that this model will operate on for both +input and output. This must match the feature axis ordering in the +array input to generate(). Typically this is a list containing: +temperature_*m, relativehumidity_*m, and pressure_*m. The list can +contain multiple instances of each variable at different heights. +relativehumidity_*m entries must have corresponding temperature_*m +entires at the same hub height.

  • +
  • s_enhance (int) – Integer factor by which the spatial axes are to be enhanced.

  • +
  • noise_adders (float | list | None) – Option to add gaussian noise to spatial model output. Noise will be +normally distributed with mean of 0 and standard deviation = +noise_adders. noise_adders can be a single value or a list +corresponding to the lr_features list. None is no noise. The +addition of noise has been shown to help downstream temporal-only +models produce diurnal cycles in regions where there is minimal +change in topography. A noise_adders around 0.07C (temperature) and +0.1% (relative humidity) have been shown to be effective. This is +unnecessary if daily min/max temperatures are provided as low res +training features.

  • +
  • temp_lapse (None | float) – Temperature lapse rate: change in degrees C/K per meter. Defaults +to the cls.TEMP_LAPSE attribute.

  • +
  • w_delta_temp (None | float) – Weight for the delta-temperature feature for the relative humidity +linear regression model. Defaults to the cls.W_DELTA_TEMP +attribute.

  • +
  • w_delta_topo (None | float) – Weight for the delta-topography feature for the relative humidity +linear regression model. Defaults to the cls.W_DELTA_TOPO +attribute.

  • +
  • pres_div (None | float) – Divisor factor in the pressure scale height equation. Defaults to +the cls.PRES_DIV attribute.

  • +
  • pres_exp (None | float) – Exponential factor in the pressure scale height equation. Defaults +to the cls.PRES_EXP attribute.

  • +
  • interp_method (str) – Name of the interpolation method to use from PIL.Image.Resampling +(NEAREST, BILINEAR, BICUBIC, LANCZOS) +LANCZOS is default and has been tested to work best for +SurfaceSpatialMetModel.

  • +
  • fix_bias (bool) – Some local bias can be introduced by the bilinear interp + lapse +rate, this flag will attempt to correct that bias by using the +low-resolution deviation from the input data

  • +
  • input_resolution (dict | None) – Resolution of the input data. e.g. {‘spatial’: ‘30km’, ‘temporal’: +‘60min’}. This is used to determine how to aggregate +high-resolution topography data.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

downscale_arr(arr, s_enhance[, method, fix_bias])

Downscale a 2D array of data Image.resize() method

downscale_pres(single_lr_pres, topo_lr, topo_hr)

Downscale pressure raster data at a single observation.

downscale_rh(single_lr_rh, single_lr_temp, ...)

Downscale relative humidity raster data at a single observation.

downscale_temp(single_lr_temp, topo_lr, topo_hr)

Downscale temperature raster data at a single observation.

fix_downscaled_bias(single_lr, single_hr[, ...])

Fix any bias introduced by the spatial downscaling with lapse rate.

generate(low_res[, norm_in, un_norm_out, ...])

Use the generator model to generate high res data from low res input.

get_s_enhance_from_layers()

Compute factor by which model will enhance spatial resolution from layer attributes.

get_t_enhance_from_layers()

Compute factor by which model will enhance temporal resolution from layer attributes.

load(model_dir[, verbose])

Load the LinearInterp model with its params saved to the model_dir created with LinearInterp.save(model_dir)

save(out_dir)

save_params(out_dir)

seed([s])

Set the random seed for reproducible results.

set_model_params(**kwargs)

Set parameters used for training the model

train(true_hr_temp, true_hr_rh, ...)

Trains the relative humidity linear model.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

PRES_DIV

Air pressure scaling equation divisor variable: 101325 * (1 - (1 - topo / PRES_DIV)**PRES_EXP)

PRES_EXP

Air pressure scaling equation exponent variable: 101325 * (1 - (1 - topo / PRES_DIV)**PRES_EXP)

TEMP_LAPSE

change in degrees C/K per meter

W_DELTA_TEMP

Weight for the delta-temperature feature for the relative humidity linear regression model.

W_DELTA_TOPO

Weight for the delta-topography feature for the relative humidity linear regression model.

feature_inds_other

Get the feature index values for the features that are not temperature, pressure, or relativehumidity.

feature_inds_pres

Get the feature index values for the pressure features.

feature_inds_rh

Get the feature index values for the relative humidity features.

feature_inds_temp

Get the feature index values for the temperature features.

hr_exo_features

Returns an empty list for LinearInterp model

hr_out_features

Get the list of output feature names that the generative model outputs

input_dims

Get dimension of model input.

input_resolution

Resolution of input data.

is_4d

Check if model expects spatial only input

is_5d

Check if model expects spatiotemporal input

lr_features

Get the list of input feature names that the generative model was trained on.

meta

Get meta data dictionary that defines the model params

model_params

Model parameters, used to save model to disc

output_resolution

Resolution of output data.

s_enhance

Factor by which model will enhance spatial resolution.

s_enhancements

List of spatial enhancement factors.

smoothed_features

Get the list of smoothed input feature names that the generative model was trained on.

smoothing

Value of smoothing parameter used in gaussian filtering of coarsened high res data.

t_enhance

Factor by which model will enhance temporal resolution.

t_enhancements

List of temporal enhancement factors.

version_record

A record of important versions that this model was built with.

+
+
+
+TEMP_LAPSE = 0.0065#
+

change in degrees C/K per meter

+
+
Type:
+

Temperature lapse rate

+
+
+
+ +
+
+PRES_DIV = 44307.69231#
+

Air pressure scaling equation divisor variable: +101325 * (1 - (1 - topo / PRES_DIV)**PRES_EXP)

+
+ +
+
+PRES_EXP = 5.25328#
+

Air pressure scaling equation exponent variable: +101325 * (1 - (1 - topo / PRES_DIV)**PRES_EXP)

+
+ +
+
+W_DELTA_TEMP = -3.9924283#
+

Weight for the delta-temperature feature for the relative humidity +linear regression model.

+
+ +
+
+W_DELTA_TOPO = -0.01736911#
+

Weight for the delta-topography feature for the relative humidity linear +regression model.

+
+ +
+
+property feature_inds_temp#
+

Get the feature index values for the temperature features.

+
+ +
+
+property feature_inds_pres#
+

Get the feature index values for the pressure features.

+
+ +
+
+property feature_inds_rh#
+

Get the feature index values for the relative humidity features.

+
+ +
+
+property feature_inds_other#
+

Get the feature index values for the features that are not +temperature, pressure, or relativehumidity.

+
+ +
+
+classmethod fix_downscaled_bias(single_lr, single_hr, method=Resampling.LANCZOS)[source]#
+

Fix any bias introduced by the spatial downscaling with lapse rate.

+
+
Parameters:
+
    +
  • single_lr (np.ndarray) – Single timestep raster data with shape +(lat, lon) matching the low-resolution input data.

  • +
  • single_hr (np.ndarray) – Single timestep downscaled raster data with shape +(lat, lon) matching the high-resolution input data.

  • +
  • method (Image.Resampling.LANCZOS) – An Image.Resampling method (NEAREST, BILINEAR, BICUBIC, LANCZOS). +NEAREST enforces zero bias but makes slightly more spatial seams.

  • +
+
+
Returns:
+

single_hr (np.ndarray) – Single timestep downscaled raster data with shape +(lat, lon) matching the high-resolution input data.

+
+
+
+ +
+
+classmethod downscale_arr(arr, s_enhance, method=Resampling.LANCZOS, fix_bias=False)[source]#
+

Downscale a 2D array of data Image.resize() method

+
+
Parameters:
+
    +
  • arr (np.ndarray) – 2D raster data, typically spatial daily average data with shape +(lat, lon)

  • +
  • s_enhance (int) – Integer factor by which the spatial axes are to be enhanced.

  • +
  • method (Image.Resampling.LANCZOS) – An Image.Resampling method (NEAREST, BILINEAR, BICUBIC, LANCZOS). +LANCZOS is default and has been tested to work best for +SurfaceSpatialMetModel.

  • +
  • fix_bias (bool) – Some local bias can be introduced by the bilinear interp + lapse +rate, this flag will attempt to correct that bias by using the +low-resolution deviation from the input data

  • +
+
+
+
+ +
+
+downscale_temp(single_lr_temp, topo_lr, topo_hr)[source]#
+

Downscale temperature raster data at a single observation.

+

This model uses a simple lapse rate that adjusts temperature as a +function of elevation. The process is as follows:

+
+
    +
  • add a scale factor to the low-res temperature field: +topo_lr * TEMP_LAPSE

  • +
  • perform bilinear interpolation of the scaled temperature field.

  • +
  • subtract the scale factor from the high-res temperature field: +topo_hr * TEMP_LAPSE

  • +
+
+
+
Parameters:
+
    +
  • single_lr_temp (np.ndarray) – Single timestep temperature (deg C) raster data with shape +(lat, lon) matching the low-resolution input data.

  • +
  • topo_lr (np.ndarray) – low-resolution surface elevation data in meters with shape +(lat, lon)

  • +
  • topo_hr (np.ndarray) – High-resolution surface elevation data in meters with shape +(lat, lon)

  • +
+
+
Returns:
+

hi_res_temp (np.ndarray) – Single timestep temperature (deg C) raster data with shape +(lat, lon) matching the high-resolution output data.

+
+
+
+ +
+
+downscale_rh(single_lr_rh, single_lr_temp, single_hr_temp, topo_lr, topo_hr)[source]#
+

Downscale relative humidity raster data at a single observation.

+
+
Here’s a description of the humidity scaling model:
    +
  • Take low-resolution and high-resolution daily average +temperature, relative humidity, and topography data.

  • +
  • Downscale all low-resolution variables using a bilinear image +enhancement

  • +
  • The target model output is the difference between the +interpolated low-res relative humidity and the true high-res +relative humidity. Calculate this difference as a linear function +of the same difference in the temperature and topography fields.

  • +
  • Use this linear regression model to calculate high-res relative +humidity fields when provided low-res input of relative humidity +along with low-res/high-res pairs of temperature and topography.

  • +
+
+
+
+
Parameters:
+
    +
  • single_lr_rh (np.ndarray) – Single timestep relative humidity (%) raster data with shape +(lat, lon) matching the low-resolution input data.

  • +
  • single_lr_temp (np.ndarray) – Single timestep temperature (deg C) raster data with shape +(lat, lon) matching the low-resolution input data.

  • +
  • single_hr_temp (np.ndarray) – Single timestep temperature (deg C) raster data with shape +(lat, lon) matching the high-resolution output data.

  • +
  • topo_lr (np.ndarray) – low-resolution surface elevation data in meters with shape +(lat, lon)

  • +
  • topo_hr (np.ndarray) – High-resolution surface elevation data in meters with shape +(lat, lon)

  • +
+
+
Returns:
+

hi_res_rh (np.ndarray) – Single timestep relative humidity (%) raster data with shape +(lat, lon) matching the high-resolution output data.

+
+
+
+ +
+
+downscale_pres(single_lr_pres, topo_lr, topo_hr)[source]#
+

Downscale pressure raster data at a single observation.

+

This model uses a simple exponential scale height that adjusts +temperature as a function of elevation. The process is as follows:

+
+
    +
  • add a scale factor to the low-res pressure field: +101325 * (1 - (1 - topo_lr / PRES_DIV)**PRES_EXP)

  • +
  • perform bilinear interpolation of the scaled pressure field.

  • +
  • subtract the scale factor from the high-res pressure field: +101325 * (1 - (1 - topo_hr / PRES_DIV)**PRES_EXP)

  • +
+
+
+
Parameters:
+
    +
  • single_lr_pres (np.ndarray) – Single timestep pressure (Pa) raster data with shape +(lat, lon) matching the low-resolution input data.

  • +
  • topo_lr (np.ndarray) – low-resolution surface elevation data in meters with shape +(lat, lon)

  • +
  • topo_hr (np.ndarray) – High-resolution surface elevation data in meters with shape +(lat, lon)

  • +
+
+
Returns:
+

hi_res_pres (np.ndarray) – Single timestep pressure (Pa) raster data with shape +(lat, lon) matching the high-resolution output data.

+
+
+
+ +
+
+get_s_enhance_from_layers()#
+

Compute factor by which model will enhance spatial resolution from +layer attributes. Used in model training during high res coarsening

+
+ +
+
+get_t_enhance_from_layers()#
+

Compute factor by which model will enhance temporal resolution from +layer attributes. Used in model training during high res coarsening

+
+ +
+
+property hr_exo_features#
+

Returns an empty list for LinearInterp model

+
+ +
+
+property hr_out_features#
+

Get the list of output feature names that the generative model +outputs

+
+ +
+
+property input_resolution#
+

Resolution of input data. Given as a dictionary +{'spatial': '...km', 'temporal': '...min'}. The numbers are +required to be integers in the units specified. The units are not +strict as long as the resolution of the exogenous data, when extracting +exogenous data, is specified in the same units.

+
+ +
+
+property is_4d#
+

Check if model expects spatial only input

+
+ +
+
+property is_5d#
+

Check if model expects spatiotemporal input

+
+ +
+
+classmethod load(model_dir, verbose=False)#
+

Load the LinearInterp model with its params saved to the model_dir +created with LinearInterp.save(model_dir)

+
+
Parameters:
+
    +
  • model_dir (str) – Directory to load LinearInterp model files from. Must +have a model_params.json file containing “meta” key with all of the +class init args.

  • +
  • verbose (bool) – Flag to log information about the loaded model.

  • +
+
+
Returns:
+

out (LinearInterp) – Returns an initialized LinearInterp model

+
+
+
+ +
+
+property lr_features#
+

Get the list of input feature names that the generative model was +trained on.

+
+ +
+
+property model_params#
+

Model parameters, used to save model to disc

+
+
Returns:
+

dict

+
+
+
+ +
+
+property output_resolution#
+

Resolution of output data. Given as a dictionary +{‘spatial’: ‘…km’, ‘temporal’: ‘…min’}. This is computed from the +input resolution and the enhancement factors.

+
+ +
+
+property s_enhance#
+

Factor by which model will enhance spatial resolution. Used in +model training during high res coarsening and also in forward pass +routine to determine shape of needed exogenous data

+
+ +
+
+property s_enhancements#
+

List of spatial enhancement factors. In the case of a single step +model this is just [self.s_enhance]. This is used to determine +shapes of needed exogenous data in forward pass routine

+
+ +
+
+save(out_dir)#
+
+
Parameters:
+

out_dir (str) – Directory to save linear model params. This directory will be +created if it does not already exist.

+
+
+
+ +
+
+save_params(out_dir)#
+
+
Parameters:
+

out_dir (str) – Directory to save linear model params. This directory will be +created if it does not already exist.

+
+
+
+ +
+
+static seed(s=0)#
+

Set the random seed for reproducible results.

+
+
Parameters:
+

s (int) – Random seed

+
+
+
+ +
+
+set_model_params(**kwargs)#
+

Set parameters used for training the model

+
+
Parameters:
+

kwargs (dict) – Keyword arguments including ‘input_resolution’, +‘lr_features’, ‘hr_exo_features’, ‘hr_out_features’, +‘smoothed_features’, ‘s_enhance’, ‘t_enhance’, ‘smoothing’

+
+
+
+ +
+
+property smoothed_features#
+

Get the list of smoothed input feature names that the generative +model was trained on.

+
+ +
+
+property smoothing#
+

Value of smoothing parameter used in gaussian filtering of coarsened +high res data.

+
+ +
+
+property t_enhance#
+

Factor by which model will enhance temporal resolution. Used in +model training during high res coarsening and also in forward pass +routine to determine shape of needed exogenous data

+
+ +
+
+property t_enhancements#
+

List of temporal enhancement factors. In the case of a single step +model this is just [self.t_enhance]. This is used to determine +shapes of needed exogenous data in forward pass routine

+
+ +
+
+property version_record#
+

A record of important versions that this model was built with.

+
+
Returns:
+

dict

+
+
+
+ +
+
+property input_dims#
+

Get dimension of model input. This is 4 for linear and surface +models (n_obs, spatial_1, spatial_2, temporal)

+
+
Returns:
+

int

+
+
+
+ +
+
+generate(low_res, norm_in=False, un_norm_out=False, exogenous_data=None)[source]#
+

Use the generator model to generate high res data from low res +input. This is the public generate function.

+
+
Parameters:
+
    +
  • low_res (np.ndarray) – Low-resolution spatial input data, a 4D array of shape: +(n_obs, spatial_1, spatial_2, n_features), Where the feature +channel can include temperature_*m, relativehumidity_*m, and/or +pressure_*m

  • +
  • norm_in (bool) – This doesnt do anything for this SurfaceSpatialMetModel, but is +kept to keep the same interface as Sup3rGan

  • +
  • un_norm_out (bool) – This doesnt do anything for this SurfaceSpatialMetModel, but is +kept to keep the same interface as Sup3rGan

  • +
  • exogenous_data (dict) – For the SurfaceSpatialMetModel, this must be a nested dictionary +with a main ‘topography’ key and two entries for +exogenous_data[‘topography’][‘steps’]. The first entry includes a +2D (lat, lon) array of low-resolution surface elevation data in +meters (must match spatial_1, spatial_2 from low_res), and the +second entry includes a 2D (lat, lon) array of high-resolution +surface elevation data in meters. e.g. +.. code-block:: JSON

    +
    +
    +
    {‘topography’: {

    ‘steps’: [{‘data’: lr_topo}, {‘data’: hr_topo’}] +}

    +
    +
    +

    }

    +
    +
  • +
+
+
Returns:
+

hi_res (ndarray) – high-resolution spatial output data, a 4D array of shape: +(n_obs, spatial_1, spatial_2, n_features), Where the feature +channel can include temperature_*m, relativehumidity_*m, and/or +pressure_*m

+
+
+
+ +
+
+property meta#
+

Get meta data dictionary that defines the model params

+
+ +
+
+train(true_hr_temp, true_hr_rh, true_hr_topo, input_resolution)[source]#
+

Trains the relative humidity linear model. The temperature and +surface lapse rate models are parameterizations taken from the NSRDB +and are not trained.

+
+
Parameters:
+
    +
  • true_hr_temp (np.ndarray) – True high-resolution daily average temperature data in a 3D array +of shape (lat, lon, n_days)

  • +
  • true_hr_rh (np.ndarray) – True high-resolution daily average relative humidity data in a 3D +array of shape (lat, lon, n_days)

  • +
  • true_hr_topo (np.ndarray) – High-resolution surface elevation data in meters with shape +(lat, lon)

  • +
  • input_resolution (dict) – Dictionary of spatial and temporal input resolution. e.g. +{‘spatial’: ‘20km’: ‘temporal’: ‘60min’}

  • +
+
+
Returns:
+

    +
  • w_delta_temp (float) – Weight for the delta-temperature feature for the relative humidity +linear regression model.

  • +
  • w_delta_topo (float) – Weight for the delta-topography feature for the relative humidity +linear regression model.

  • +
+

+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.models.surface.html b/_autosummary/sup3r.models.surface.html new file mode 100644 index 000000000..97ba3d464 --- /dev/null +++ b/_autosummary/sup3r.models.surface.html @@ -0,0 +1,739 @@ + + + + + + + + + + + sup3r.models.surface — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.models.surface

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.models.surface#

+

Special models for surface meteorological data.

+

Classes

+
+ + + + + +

SurfaceSpatialMetModel(lr_features, s_enhance)

Model to spatially downscale daily-average near-surface temperature, relative humidity, and pressure

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.models.utilities.TrainingSession.html b/_autosummary/sup3r.models.utilities.TrainingSession.html new file mode 100644 index 000000000..9989ae7f8 --- /dev/null +++ b/_autosummary/sup3r.models.utilities.TrainingSession.html @@ -0,0 +1,793 @@ + + + + + + + + + + + sup3r.models.utilities.TrainingSession — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.models.utilities.TrainingSession

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.models.utilities.TrainingSession#

+
+
+class TrainingSession(batch_handler, model, **kwargs)[source]#
+

Bases: object

+

Wrapper to gracefully exit batch handler thread during training, upon a +keyboard interruption.

+
+
Parameters:
+
    +
  • batch_handler (BatchHandler) – Batch iterator

  • +
  • model (Sup3rGan) – Gan model to run in new thread

  • +
  • **kwargs (dict) – Model keyword args

  • +
+
+
+

Methods

+
+ + + + + +

run()

Wrap model.train().

+
+
+
+run()[source]#
+

Wrap model.train().

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.models.utilities.get_optimizer_class.html b/_autosummary/sup3r.models.utilities.get_optimizer_class.html new file mode 100644 index 000000000..a3a27c0aa --- /dev/null +++ b/_autosummary/sup3r.models.utilities.get_optimizer_class.html @@ -0,0 +1,761 @@ + + + + + + + + + + + sup3r.models.utilities.get_optimizer_class — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.models.utilities.get_optimizer_class

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.models.utilities.get_optimizer_class#

+
+
+get_optimizer_class(conf)[source]#
+

Get optimizer class from keras

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.models.utilities.html b/_autosummary/sup3r.models.utilities.html new file mode 100644 index 000000000..38bc4652c --- /dev/null +++ b/_autosummary/sup3r.models.utilities.html @@ -0,0 +1,751 @@ + + + + + + + + + + + sup3r.models.utilities — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.models.utilities

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.models.utilities#

+

Utilities shared across the sup3r.models module

+

Functions

+
+ + + + + + + + +

get_optimizer_class(conf)

Get optimizer class from keras

st_interp(low, s_enhance, t_enhance[, ...])

Spatiotemporal bilinear interpolation for low resolution field on a regular grid.

+
+

Classes

+
+ + + + + +

TrainingSession(batch_handler, model, **kwargs)

Wrapper to gracefully exit batch handler thread during training, upon a keyboard interruption.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.models.utilities.st_interp.html b/_autosummary/sup3r.models.utilities.st_interp.html new file mode 100644 index 000000000..32a73ca69 --- /dev/null +++ b/_autosummary/sup3r.models.utilities.st_interp.html @@ -0,0 +1,778 @@ + + + + + + + + + + + sup3r.models.utilities.st_interp — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.models.utilities.st_interp

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.models.utilities.st_interp#

+
+
+st_interp(low, s_enhance, t_enhance, t_centered=False)[source]#
+

Spatiotemporal bilinear interpolation for low resolution field on a +regular grid. Used to provide baseline for comparison with gan output

+
+
Parameters:
+
    +
  • low (ndarray) – Low resolution field to interpolate. +(spatial_1, spatial_2, temporal)

  • +
  • s_enhance (int) – Factor by which to enhance the spatial domain

  • +
  • t_enhance (int) – Factor by which to enhance the temporal domain

  • +
  • t_centered (bool) – Flag to switch time axis from time-beginning (Default, e.g. +interpolate 00:00 01:00 to 00:00 00:30 01:00 01:30) to +time-centered (e.g. interp 01:00 02:00 to 00:45 01:15 01:45 02:15)

  • +
+
+
Returns:
+

ndarray – Spatiotemporally interpolated low resolution output

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.pipeline.forward_pass.ForwardPass.html b/_autosummary/sup3r.pipeline.forward_pass.ForwardPass.html new file mode 100644 index 000000000..d22247951 --- /dev/null +++ b/_autosummary/sup3r.pipeline.forward_pass.ForwardPass.html @@ -0,0 +1,979 @@ + + + + + + + + + + + sup3r.pipeline.forward_pass.ForwardPass — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.pipeline.forward_pass.ForwardPass#

+
+
+class ForwardPass(strategy, node_index=0)[source]#
+

Bases: object

+

Class to run forward passes on all chunks provided by the given +ForwardPassStrategy. The chunks provided by the strategy are all passed +through the GAN generator to produce high resolution output.

+

Initialize ForwardPass with ForwardPassStrategy. The strategy +provides the data chunks to run forward passes on

+
+
Parameters:
+
    +
  • strategy (ForwardPassStrategy) – ForwardPassStrategy instance with information on data chunks to run +forward passes on.

  • +
  • node_index (int) – Index of node used to run forward pass

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + +

get_input_chunk([chunk_index, mode])

Get FowardPassChunk instance for the given chunk index.

get_node_cmd(config)

Get a CLI call to initialize ForwardPassStrategy and run ForwardPass on a single node based on an input config.

pad_source_data(input_data, pad_width, exo_data)

Pad the edges of the source data from the data handler.

run(strategy, node_index)

Runs forward passes on all spatiotemporal chunks for the given node index.

run_chunk(chunk, model_kwargs, model_class, ...)

Run a forward pass on single spatiotemporal chunk.

run_generator(data_chunk, hr_crop_slices, model)

Run forward pass of the generator on smallest data chunk.

+
+

Attributes

+
+ + + + + + + + +

OUTPUT_HANDLER_CLASS

meta

Meta data dictionary for the forward pass run (to write to output files).

+
+
+
+get_input_chunk(chunk_index=0, mode='reflect')[source]#
+

Get FowardPassChunk instance for the given chunk index.

+
+ +
+
+property meta#
+

Meta data dictionary for the forward pass run (to write to output +files).

+
+ +
+
+pad_source_data(input_data, pad_width, exo_data, mode='reflect')[source]#
+

Pad the edges of the source data from the data handler.

+
+
Parameters:
+
    +
  • input_data (Union[np.ndarray, da.core.Array]) – Source input data from data handler class, shape is: +(spatial_1, spatial_2, temporal, features)

  • +
  • pad_width (tuple) – Tuple of tuples with padding width for spatial and temporal +dimensions. Each tuple includes the start and end of padding for +that dimension. Ordering is spatial_1, spatial_2, temporal.

  • +
  • exo_data (dict) – Full exo_handler_kwargs dictionary with all feature entries. See +run_generator() for more information.

  • +
  • mode (str) – Mode to use for padding. e.g. ‘reflect’.

  • +
+
+
Returns:
+

    +
  • out (Union[np.ndarray, da.core.Array]) – Padded copy of source input data from data handler class, shape is: +(spatial_1, spatial_2, temporal, features)

  • +
  • exo_data (dict) – Same as input dictionary with s_enhance, t_enhance added to each +step entry for all features

  • +
+

+
+
+
+ +
+
+classmethod run_generator(data_chunk, hr_crop_slices, model, s_enhance=None, t_enhance=None, exo_data=None)[source]#
+

Run forward pass of the generator on smallest data chunk. Each chunk +has a maximum shape given by self.strategy.fwp_chunk_shape.

+
+
Parameters:
+
    +
  • data_chunk (ndarray) – Low res data chunk to go through generator

  • +
  • hr_crop_slices (list) – List of slices for extracting cropped region of interest from +output. Output can include an extra overlapping boundary to +reduce chunking error. The cropping cuts off this padded region +before stitching chunks.

  • +
  • model (Sup3rGan) – A loaded Sup3rGan model (any model imported from sup3r.models).

  • +
  • t_enhance (int) – Factor by which to enhance temporal resolution

  • +
  • s_enhance (int) – Factor by which to enhance spatial resolution

  • +
  • exo_data (dict | None) – Dictionary of exogenous feature data with entries describing +whether features should be combined at input, a mid network layer, +or with output. e.g. +.. code-block:: JSON

    +
    +
    +
    {
    +
    ‘topography’: {‘steps’: [

    {‘combine_type’: ‘input’, ‘model’: 0, ‘data’: …}, +{‘combine_type’: ‘layer’, ‘model’: 0, ‘data’: …}]}

    +
    +
    +
    +
    +

    }

    +
    +
  • +
+
+
Returns:
+

ndarray – High resolution data generated by GAN

+
+
+
+ +
+
+classmethod get_node_cmd(config)[source]#
+

Get a CLI call to initialize ForwardPassStrategy and run ForwardPass +on a single node based on an input config.

+
+
Parameters:
+

config (dict) – sup3r forward pass config with all necessary args and kwargs to +initialize ForwardPassStrategy and run ForwardPass on a single +node.

+
+
+
+ +
+
+classmethod run(strategy, node_index)[source]#
+

Runs forward passes on all spatiotemporal chunks for the given node +index.

+
+
Parameters:
+
    +
  • strategy (ForwardPassStrategy) – ForwardPassStrategy instance with information on data chunks to run +forward passes on.

  • +
  • node_index (int) – Index of node on which the forward passes for spatiotemporal chunks +will be run.

  • +
+
+
+
+ +
+
+classmethod run_chunk(chunk: ForwardPassChunk, model_kwargs, model_class, allowed_const, invert_uv=None, meta=None, output_workers=None)[source]#
+

Run a forward pass on single spatiotemporal chunk.

+
+
Parameters:
+
    +
  • chunk (FowardPassChunk) – Struct with chunk data (including exo data if applicable) and +chunk attributes (e.g. chunk specific slices, times, lat/lon, etc)

  • +
  • model_kwargs (str | list) – Keyword arguments to send to model_class.load(**model_kwargs) to +initialize the GAN. Typically this is just the string path to the +model directory, but can be multiple models or arguments for more +complex models.

  • +
  • model_class (str) – Name of the sup3r model class for the GAN model to load. The +default is the basic spatial / spatiotemporal Sup3rGan model. This +will be loaded from sup3r.models

  • +
  • allowed_const (list | bool) – If your model is allowed to output a constant output, set this to +True to allow any constant output or a list of allowed possible +constant outputs. See ForwardPassStrategy for more +information on this argument.

  • +
  • invert_uv (bool) – Whether to convert uv to windspeed and winddirection for writing +output. This defaults to True for H5 output and False for NETCDF +output.

  • +
  • meta (dict | None) – Meta data to write to forward pass output file.

  • +
  • output_workers (int | None) – Max number of workers to use for writing forward pass output.

  • +
+
+
Returns:
+

    +
  • failed (bool) – Whether the forward pass failed due to constant output.

  • +
  • output_data (ndarray) – Array of high-resolution output from generator

  • +
+

+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.pipeline.forward_pass.html b/_autosummary/sup3r.pipeline.forward_pass.html new file mode 100644 index 000000000..dadbbe4b5 --- /dev/null +++ b/_autosummary/sup3r.pipeline.forward_pass.html @@ -0,0 +1,739 @@ + + + + + + + + + + + sup3r.pipeline.forward_pass — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.pipeline.forward_pass

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.pipeline.forward_pass#

+

Sup3r forward pass handling module.

+

Classes

+
+ + + + + +

ForwardPass(strategy[, node_index])

Class to run forward passes on all chunks provided by the given ForwardPassStrategy.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.pipeline.forward_pass_cli.html b/_autosummary/sup3r.pipeline.forward_pass_cli.html new file mode 100644 index 000000000..3871b5569 --- /dev/null +++ b/_autosummary/sup3r.pipeline.forward_pass_cli.html @@ -0,0 +1,742 @@ + + + + + + + + + + + sup3r.pipeline.forward_pass_cli — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.pipeline.forward_pass_cli

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.pipeline.forward_pass_cli#

+

sup3r forward pass CLI entry points.

+

Functions

+
+ + + + + + + + +

kickoff_local_job(ctx, cmd[, pipeline_step])

Run sup3r forward pass locally.

kickoff_slurm_job(ctx, cmd[, pipeline_step, ...])

Run sup3r on HPC via SLURM job submission.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.pipeline.forward_pass_cli.kickoff_local_job.html b/_autosummary/sup3r.pipeline.forward_pass_cli.kickoff_local_job.html new file mode 100644 index 000000000..955d01af6 --- /dev/null +++ b/_autosummary/sup3r.pipeline.forward_pass_cli.kickoff_local_job.html @@ -0,0 +1,777 @@ + + + + + + + + + + + sup3r.pipeline.forward_pass_cli.kickoff_local_job — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.pipeline.forward_pass_cli.kickoff_local_job

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.pipeline.forward_pass_cli.kickoff_local_job#

+
+
+kickoff_local_job(ctx, cmd, pipeline_step=None)[source]#
+

Run sup3r forward pass locally.

+
+
Parameters:
+
    +
  • ctx (click.pass_context) – Click context object where ctx.obj is a dictionary

  • +
  • cmd (str) –

    +
    +
    Command to be submitted in shell script. Example:

    ‘python -m sup3r.cli forward_pass -c <config_file>’

    +
    +
    +
  • +
  • pipeline_step (str, optional) – Name of the pipeline step being run. If None, the +pipeline_step will be set to the module_name, +mimicking old reV behavior. By default, None.

  • +
+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.pipeline.forward_pass_cli.kickoff_slurm_job.html b/_autosummary/sup3r.pipeline.forward_pass_cli.kickoff_slurm_job.html new file mode 100644 index 000000000..15c138273 --- /dev/null +++ b/_autosummary/sup3r.pipeline.forward_pass_cli.kickoff_slurm_job.html @@ -0,0 +1,783 @@ + + + + + + + + + + + sup3r.pipeline.forward_pass_cli.kickoff_slurm_job — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.pipeline.forward_pass_cli.kickoff_slurm_job

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.pipeline.forward_pass_cli.kickoff_slurm_job#

+
+
+kickoff_slurm_job(ctx, cmd, pipeline_step=None, alloc='sup3r', memory=None, walltime=4, feature=None, stdout_path='./stdout/')[source]#
+

Run sup3r on HPC via SLURM job submission.

+
+
Parameters:
+
    +
  • ctx (click.pass_context) – Click context object where ctx.obj is a dictionary

  • +
  • cmd (str) –

    +
    +
    Command to be submitted in SLURM shell script. Example:

    ‘python -m sup3r.cli forward_pass -c <config_file>’

    +
    +
    +
  • +
  • pipeline_step (str, optional) – Name of the pipeline step being run. If None, the +pipeline_step will be set to the module_name, +mimicking old reV behavior. By default, None.

  • +
  • alloc (str) – HPC project (allocation) handle. Example: ‘sup3r’.

  • +
  • memory (int) – Node memory request in GB.

  • +
  • walltime (float) – Node walltime request in hours.

  • +
  • feature (str) – Additional flags for SLURM job. Format is “–qos=high” +or “–depend=[state:job_id]”. Default is None.

  • +
  • stdout_path (str) – Path to print .stdout and .stderr files.

  • +
+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.pipeline.html b/_autosummary/sup3r.pipeline.html new file mode 100644 index 000000000..f0a679ece --- /dev/null +++ b/_autosummary/sup3r.pipeline.html @@ -0,0 +1,753 @@ + + + + + + + + + + + sup3r.pipeline — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.pipeline

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.pipeline#

+

Sup3r data pipeline architecture.

+
+ + + + + + + + + + + + + + + + + + + + +

forward_pass

Sup3r forward pass handling module.

forward_pass_cli

sup3r forward pass CLI entry points.

pipeline_cli

Pipeline CLI entry points.

slicer

Slicer class for chunking forward pass input

strategy

ForwardPassStrategy class.

utilities

Methods used by ForwardPass and ForwardPassStrategy

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.pipeline.pipeline_cli.html b/_autosummary/sup3r.pipeline.pipeline_cli.html new file mode 100644 index 000000000..f1434a01c --- /dev/null +++ b/_autosummary/sup3r.pipeline.pipeline_cli.html @@ -0,0 +1,730 @@ + + + + + + + + + + + sup3r.pipeline.pipeline_cli — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.pipeline.pipeline_cli

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.pipeline.pipeline_cli#

+

Pipeline CLI entry points.

+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.pipeline.slicer.ForwardPassSlicer.html b/_autosummary/sup3r.pipeline.slicer.ForwardPassSlicer.html new file mode 100644 index 000000000..44f5110c6 --- /dev/null +++ b/_autosummary/sup3r.pipeline.slicer.ForwardPassSlicer.html @@ -0,0 +1,1268 @@ + + + + + + + + + + + sup3r.pipeline.slicer.ForwardPassSlicer — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.pipeline.slicer.ForwardPassSlicer#

+
+
+class ForwardPassSlicer(coarse_shape: tuple | list, time_steps: int, s_enhance: int, t_enhance: int, time_slice: slice, temporal_pad: int, spatial_pad: int, chunk_shape: tuple | list)[source]#
+

Bases: object

+

Get slices for sending data chunks through generator.

+
+
Parameters:
+
    +
  • coarse_shape (tuple) – Shape of full domain for low res data

  • +
  • time_steps (int) – Number of time steps for full temporal domain of low res data. This +is used to construct a dummy_time_index from np.arange(time_steps)

  • +
  • time_slice (slice | list) – Slice to use to extract range from time_index. Can be a slice(start, +stop, step) or list [start, stop, step]

  • +
  • chunk_shape (tuple) – Max shape (spatial_1, spatial_2, temporal) of an unpadded coarse +chunk to use for a forward pass. The number of nodes that the +ForwardPassStrategy is set to distribute to is calculated by +dividing up the total time index from all file_paths by the +temporal part of this chunk shape. Each node will then be +parallelized accross parallel processes by the spatial chunk shape. +If temporal_pad / spatial_pad are non zero the chunk sent +to the generator can be bigger than this shape. If running in +serial set this equal to the shape of the full spatiotemporal data +volume for best performance.

  • +
  • s_enhance (int) – Spatial enhancement factor

  • +
  • t_enhance (int) – Temporal enhancement factor

  • +
  • spatial_pad (int) – Size of spatial overlap between coarse chunks passed to forward +passes for subsequent spatial stitching. This overlap will pad both +sides of the fwp_chunk_shape. Note that the first and last chunks +in any of the spatial dimension will not be padded.

  • +
  • temporal_pad (int) – Size of temporal overlap between coarse chunks passed to forward +passes for subsequent temporal stitching. This overlap will pad +both sides of the fwp_chunk_shape. Note that the first and last +chunks in the temporal dimension will not be padded.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + +

get_cropped_slices(unpadded_slices, ...)

Get cropped slices to cut off padded output

get_hr_slices(slices, enhancement[, step])

Get high resolution slices for temporal or spatial slices

get_padded_slices(slices, shape, ...[, step])

Get padded slices with the specified padding size, max shape, enhancement, and step size

get_spatial_slices()

Get spatial slices for small data chunks that are passed through generator

get_time_slices()

Calculate the number of time chunks across the full time index

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

chunk_lookup

Get a 3D array with shape (n_spatial_1_chunks, n_spatial_2_chunks, n_time_chunks) where each value is the chunk index.

hr_crop_slices

Get high res spatiotemporal cropped slices for cropping generator output

n_chunks

Get total number of spatiotemporal chunks

n_spatial_chunks

Get the number of spatial chunks

n_time_chunks

Get the number of temporal chunks

s1_hr_slices

Get high res spatial slices for first spatial dimension

s1_lr_pad_slices

List of low resolution spatial slices with padding for first spatial dimension

s1_lr_slices

List of low resolution spatial slices for first spatial dimension considering padding on all sides of the spatial raster.

s2_hr_slices

Get high res spatial slices for second spatial dimension

s2_lr_pad_slices

List of low resolution spatial slices with padding for second spatial dimension

s2_lr_slices

List of low resolution spatial slices for second spatial dimension considering padding on all sides of the spatial raster.

s_hr_crop_slices

Get high res cropped slices for cropping generator output

s_hr_slices

Get high res slices for indexing full generator output array

s_lr_crop_slices

Get low res cropped slices for cropping input chunk domain

s_lr_pad_slices

Get low res padded slices for small data chunks that are passed through generator

s_lr_slices

Get low res spatial slices for small data chunks that are passed through generator

spatial_chunk_lookup

Get a 2D array with shape (n_spatial_1_chunks, n_spatial_2_chunks) where each value is the spatial chunk index.

t_hr_crop_slices

Get high res temporal cropped slices for cropping forward pass output before stitching together

t_lr_crop_slices

Get low res temporal cropped slices for cropping time index of padded input data.

t_lr_pad_slices

Get low res temporal padded slices for distributing time chunks across nodes.

t_lr_slices

Low resolution temporal slices

coarse_shape

time_steps

s_enhance

t_enhance

time_slice

temporal_pad

spatial_pad

chunk_shape

+
+
+
+get_spatial_slices()[source]#
+

Get spatial slices for small data chunks that are passed through +generator

+
+
Returns:
+

    +
  • s_lr_slices (list) – List of slices for low res data chunks which have not been padded. +data_handler.data[s_lr_slice] corresponds to an unpadded low res +input to the model.

  • +
  • s_lr_pad_slices (list) – List of slices which have been padded so that high res output +can be stitched together. data_handler.data[s_lr_pad_slice] +corresponds to a padded low res input to the model.

  • +
  • s_hr_slices (list) – List of slices for high res data corresponding to the +lr_slices regions. output_array[s_hr_slice] corresponds to the +cropped generator output.

  • +
+

+
+
+
+ +
+
+get_time_slices()[source]#
+

Calculate the number of time chunks across the full time index

+
+
Returns:
+

    +
  • t_lr_slices (list) – List of low-res non-padded time index slices. e.g. If +fwp_chunk_size[2] is 5 then the size of these slices will always +be 5.

  • +
  • t_lr_pad_slices (list) – List of low-res padded time index slices. e.g. If fwp_chunk_size[2] +is 5 the size of these slices will be 15, with exceptions at the +start and end of the full time index.

  • +
+

+
+
+
+ +
+
+property s_lr_slices#
+

Get low res spatial slices for small data chunks that are passed +through generator

+
+
Returns:
+

_s_lr_slices (list) – List of spatial slices corresponding to the unpadded spatial region +going through the generator

+
+
+
+ +
+
+property s_lr_pad_slices#
+

Get low res padded slices for small data chunks that are passed +through generator

+
+
Returns:
+

_s_lr_pad_slices (list) – List of slices which have been padded so that high res output +can be stitched together. Each entry in this list has a slice for +each spatial dimension. data_handler.data[s_lr_pad_slice] gives the +padded data volume passed through the generator

+
+
+
+ +
+
+property t_lr_pad_slices#
+

Get low res temporal padded slices for distributing time chunks +across nodes. These slices correspond to the time chunks sent to each +node and are padded according to temporal_pad.

+
+
Returns:
+

_t_lr_pad_slices (list) – List of low res temporal slices which have been padded so that high +res output can be stitched together

+
+
+
+ +
+
+property t_lr_crop_slices#
+

Get low res temporal cropped slices for cropping time index of +padded input data.

+
+
Returns:
+

_t_lr_crop_slices (list) – List of low res temporal slices for cropping padded input data

+
+
+
+ +
+
+property t_hr_crop_slices#
+

Get high res temporal cropped slices for cropping forward pass +output before stitching together

+
+
Returns:
+

_t_hr_crop_slices (list) – List of high res temporal slices for cropping padded generator +output

+
+
+
+ +
+
+property s1_hr_slices#
+

Get high res spatial slices for first spatial dimension

+
+ +
+
+property s2_hr_slices#
+

Get high res spatial slices for second spatial dimension

+
+ +
+
+property s_hr_slices#
+

Get high res slices for indexing full generator output array

+
+
Returns:
+

_s_hr_slices (list) – List of high res slices. Each entry in this list has a slice for +each spatial dimension. output[hr_slice] gives the superresolved +domain corresponding to data_handler.data[lr_slice]

+
+
+
+ +
+
+property s_lr_crop_slices#
+

Get low res cropped slices for cropping input chunk domain

+
+
Returns:
+

_s_lr_crop_slices (list) – List of low res cropped slices. Each entry in this list has a +slice for each spatial dimension.

+
+
+
+ +
+
+property s_hr_crop_slices#
+

Get high res cropped slices for cropping generator output

+
+
Returns:
+

_s_hr_crop_slices (list) – List of high res cropped slices. Each entry in this list has a +slice for each spatial dimension.

+
+
+
+ +
+
+property hr_crop_slices#
+

Get high res spatiotemporal cropped slices for cropping generator +output

+
+
Returns:
+

_hr_crop_slices (list) – List of high res spatiotemporal cropped slices. Each entry in this +list has a crop slice for each spatial dimension and temporal +dimension and then slice(None) for the feature dimension. +model.generate()[hr_crop_slice] gives the cropped generator output +corresponding to outpuUnion[np.ndarray, da.core.Array][hr_slice]

+
+
+
+ +
+
+property s1_lr_pad_slices#
+

List of low resolution spatial slices with padding for first +spatial dimension

+
+ +
+
+property s2_lr_pad_slices#
+

List of low resolution spatial slices with padding for second +spatial dimension

+
+ +
+
+property s1_lr_slices#
+

List of low resolution spatial slices for first spatial dimension +considering padding on all sides of the spatial raster.

+
+ +
+
+property s2_lr_slices#
+

List of low resolution spatial slices for second spatial dimension +considering padding on all sides of the spatial raster.

+
+ +
+
+property t_lr_slices#
+

Low resolution temporal slices

+
+ +
+
+static get_hr_slices(slices, enhancement, step=None)[source]#
+

Get high resolution slices for temporal or spatial slices

+
+
Parameters:
+
    +
  • slices (list) – Low resolution slices to be enhanced

  • +
  • enhancement (int) – Enhancement factor

  • +
  • step (int | None) – Step size for slices

  • +
+
+
Returns:
+

hr_slices (list) – High resolution slices

+
+
+
+ +
+
+property chunk_lookup#
+

Get a 3D array with shape +(n_spatial_1_chunks, n_spatial_2_chunks, n_time_chunks) +where each value is the chunk index.

+
+ +
+
+property spatial_chunk_lookup#
+

Get a 2D array with shape (n_spatial_1_chunks, n_spatial_2_chunks) +where each value is the spatial chunk index.

+
+ +
+
+property n_spatial_chunks#
+

Get the number of spatial chunks

+
+ +
+
+property n_time_chunks#
+

Get the number of temporal chunks

+
+ +
+
+property n_chunks#
+

Get total number of spatiotemporal chunks

+
+ +
+
+static get_padded_slices(slices, shape, enhancement, padding, step=None)[source]#
+

Get padded slices with the specified padding size, max shape, +enhancement, and step size

+
+
Parameters:
+
    +
  • slices (list) – List of low res unpadded slice

  • +
  • shape (int) – max possible index of a padded slice. e.g. if the slices are +indexing a dimension with size 10 then a padded slice cannot have +an index greater than 10.

  • +
  • enhancement (int) – Enhancement factor. e.g. If these slices are indexing a spatial +dimension which will be enhanced by 2x then enhancement=2.

  • +
  • padding (int) – Padding factor. e.g. If these slices are indexing a spatial +dimension and the spatial_pad is 10 this is 10. It will be +multiplied by the enhancement factor if the slices are to be used +to index an enhanced dimension.

  • +
  • step (int | None) – Step size for slices. e.g. If these slices are indexing a temporal +dimension and time_slice.step = 3 then step=3.

  • +
+
+
Returns:
+

list – Padded slices for temporal or spatial dimensions.

+
+
+
+ +
+
+static get_cropped_slices(unpadded_slices, padded_slices, enhancement)[source]#
+

Get cropped slices to cut off padded output

+
+
Parameters:
+
    +
  • unpadded_slices (list) – List of unpadded slices

  • +
  • padded_slices (list) – List of padded slices

  • +
  • enhancement (int) – Enhancement factor for the data to be cropped.

  • +
+
+
Returns:
+

list – Cropped slices for temporal or spatial dimensions.

+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.pipeline.slicer.html b/_autosummary/sup3r.pipeline.slicer.html new file mode 100644 index 000000000..1b82a99a2 --- /dev/null +++ b/_autosummary/sup3r.pipeline.slicer.html @@ -0,0 +1,739 @@ + + + + + + + + + + + sup3r.pipeline.slicer — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.pipeline.slicer

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.pipeline.slicer#

+

Slicer class for chunking forward pass input

+

Classes

+
+ + + + + +

ForwardPassSlicer(coarse_shape, time_steps, ...)

Get slices for sending data chunks through generator.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.pipeline.strategy.ForwardPassChunk.html b/_autosummary/sup3r.pipeline.strategy.ForwardPassChunk.html new file mode 100644 index 000000000..03dcefc64 --- /dev/null +++ b/_autosummary/sup3r.pipeline.strategy.ForwardPassChunk.html @@ -0,0 +1,805 @@ + + + + + + + + + + + sup3r.pipeline.strategy.ForwardPassChunk — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.pipeline.strategy.ForwardPassChunk

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.pipeline.strategy.ForwardPassChunk#

+
+
+class ForwardPassChunk(input_data: ndarray | Array, exo_data: Dict, hr_crop_slice: slice, lr_pad_slice: slice, hr_lat_lon: ndarray | Array, hr_times: DatetimeIndex, gids: ndarray | Array, out_file: str, pad_width: Tuple[tuple, tuple, tuple], index: int)[source]#
+

Bases: object

+

Structure storing chunk data and attributes for a specific chunk going +through the generator.

+

Methods

+
+ + +
+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

input_data

exo_data

hr_crop_slice

lr_pad_slice

hr_lat_lon

hr_times

gids

out_file

pad_width

index

+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.pipeline.strategy.ForwardPassStrategy.html b/_autosummary/sup3r.pipeline.strategy.ForwardPassStrategy.html new file mode 100644 index 000000000..6cf3d92bf --- /dev/null +++ b/_autosummary/sup3r.pipeline.strategy.ForwardPassStrategy.html @@ -0,0 +1,1184 @@ + + + + + + + + + + + sup3r.pipeline.strategy.ForwardPassStrategy — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.pipeline.strategy.ForwardPassStrategy#

+
+
+class ForwardPassStrategy(file_paths: str | list | Path, model_kwargs: dict, fwp_chunk_shape: tuple = (None, None, None), spatial_pad: int = 0, temporal_pad: int = 0, model_class: str = 'Sup3rGan', out_pattern: str | None = None, input_handler_name: str | None = None, input_handler_kwargs: dict | None = None, exo_handler_kwargs: dict | None = None, bias_correct_method: str | None = None, bias_correct_kwargs: dict | None = None, allowed_const: list | bool | None = None, incremental: bool = True, output_workers: int = 1, invert_uv: bool | None = None, pass_workers: int = 1, max_nodes: int = 1, head_node: bool = False)[source]#
+

Bases: object

+

Class to prepare data for forward passes through generator.

+

A full file list of contiguous times is provided. The corresponding data is +split into spatiotemporal chunks which can overlap in time and space. These +chunks are distributed across nodes according to the max nodes input or +number of temporal chunks. This strategy stores information on these +chunks, how they overlap, how they are distributed to nodes, and how to +crop generator output to stich the chunks back together.

+

Use the following inputs to initialize data handlers on different nodes and +to define the size of the data chunks that will be passed through the +generator.

+
+
Parameters:
+
    +
  • file_paths (list | str) – A list of low-resolution source files to extract raster data from. +Each file must have the same number of timesteps. Can also pass a +string with a unix-style file path which will be passed through +glob.glob.

    +

    Note: These files can also include a 2D (lat, lon) “mask” variable +which is True for grid points which can be skipped in the forward pass +and False otherwise. This will be used to skip running the forward pass +for chunks which only include masked points. e.g. chunks covering only +ocean. Chunks with even a single unmasked point will still be sent +through the forward pass.

    +
  • +
  • model_kwargs (str | list) – Keyword arguments to send to model_class.load(**model_kwargs) to +initialize the GAN. Typically this is just the string path to the +model directory, but can be multiple models or arguments for more +complex models.

  • +
  • fwp_chunk_shape (tuple) – Max shape (spatial_1, spatial_2, temporal) of an unpadded coarse chunk +to use for a forward pass. The number of nodes that the +ForwardPassStrategy is set to distribute to is calculated by +dividing up the total time index from all file_paths by the temporal +part of this chunk shape. Each node will then be parallelized across +parallel processes by the spatial chunk shape. If temporal_pad / +spatial_pad are non zero the chunk sent to the generator can be bigger +than this shape. If running in serial set this equal to the shape of +the full spatiotemporal data volume for best performance.

  • +
  • spatial_pad (int) – Size of spatial overlap between coarse chunks passed to forward passes +for subsequent spatial stitching. This overlap will pad both sides of +the fwp_chunk_shape.

  • +
  • temporal_pad (int) – Size of temporal overlap between coarse chunks passed to forward passes +for subsequent temporal stitching. This overlap will pad both sides of +the fwp_chunk_shape.

  • +
  • model_class (str) – Name of the sup3r model class for the GAN model to load. The default is +the basic spatial / spatiotemporal Sup3rGan model. This will be +loaded from sup3r.models

  • +
  • out_pattern (str) – Output file pattern. Must include {file_id} format key. Each output +file will have a unique file_id filled in and the ext determines the +output type. If pattern is None then data will be returned in an array +and not saved.

  • +
  • input_handler_name (str | None) – Class to use for input data. Provide a string name to match an +rasterizer or handler class in sup3r.preprocessing

  • +
  • input_handler_kwargs (dict | None) – Any kwargs for initializing the input_handler_name class.

  • +
  • exo_handler_kwargs (dict | None) – Dictionary of args to pass to +ExoDataHandler for +extracting exogenous features for foward passes. This should be +a nested dictionary with keys for each exogenous feature. The +dictionaries corresponding to the feature names should include the path +to exogenous data source and the files used for input to the forward +passes, at minimum. Can also provide a dictionary of +input_handler_kwargs used for the handler which opens the +exogenous data. e.g.:

    +
    {'topography': {
    +    'source_file': ...,
    +    'input_files': ...,
    +    'input_handler_kwargs': {'target': ..., 'shape': ...}}}
    +
    +
    +
  • +
  • bias_correct_method (str | None) – Optional bias correction function name that can be imported from the +sup3r.bias.bias_transforms module. This will transform the +source data according to some predefined bias correction transformation +along with the bias_correct_kwargs. As the first argument, this method +must receive a generic numpy array of data to be bias corrected

  • +
  • bias_correct_kwargs (dict | None) – Optional namespace of kwargs to provide to bias_correct_method. If +this is provided, it must be a dictionary where each key is a feature +name and each value is a dictionary of kwargs to correct that feature. +You can bias correct only certain input features by only including +those feature names in this dict.

  • +
  • allowed_const (list | bool) – Tensorflow has a tensor memory limit of 2GB (result of protobuf +limitation) and when exceeded can return a tensor with a constant +output. sup3r will raise a MemoryError in response. If your model +is allowed to output a constant output, set this to True to allow any +constant output or a list of allowed possible constant outputs. For +example, a precipitation model should be allowed to output all zeros so +set this to [0]. For details on this limit: +tensorflow/tensorflow#51870

  • +
  • incremental (bool) – Allow the forward pass iteration to skip spatiotemporal chunks that +already have an output file (default = True) or iterate through all +chunks and overwrite any pre-existing outputs (False).

  • +
  • output_workers (int | None) – Max number of workers to use for writing forward pass output.

  • +
  • invert_uv (bool | None) – Whether to convert u and v wind components to windspeed and direction +for writing to output. This defaults to True for H5 output and False +for NETCDF output.

  • +
  • pass_workers (int | None) – Max number of workers to use for performing forward passes on a single +node. If 1 then all forward passes on chunks distributed to a single +node will be run serially. pass_workers=2 is the minimum number of +workers required to run the ForwardPass initialization and +run_chunk() methods concurrently.

  • +
  • max_nodes (int | None) – Maximum number of nodes to distribute spatiotemporal chunks across. If +None then a node will be used for each temporal chunk.

  • +
  • head_node (bool) – Whether initialization is taking place on the head node of a multi node +job launch. When this is true ForwardPassStrategy is only +partially initialized to provide the head node enough information for +how to distribute jobs across nodes. Preflight tasks like bias +correction will be skipped because they will be performed on the nodes +jobs are distributed to by the head node.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

chunk_finished(chunk_idx[, log])

Check if process for given chunk_index has already been run.

chunk_masked(chunk_idx[, log])

Check if the region for this chunk is masked.

get_chunk_indices(chunk_index)

Get (spatial, temporal) indices for the given chunk index

get_exo_cache_files(model)

Get list of exo cache files so we can check if they exist or not.

get_exo_kwargs(model)

Get list of exo kwargs for all exo features.

get_pad_width(chunk_index)

Get padding for the current spatiotemporal chunk

init_chunk([chunk_index])

Get FowardPassChunk instance for the given chunk index.

init_input_handler()

Get input handler instance for given input kwargs.

load_exo_data(model)

Extract exogenous data for each exo feature and store data in dictionary with key for each exo feature

node_finished(node_idx)

Check if all out files for a given node have been saved

preflight()

Prelight logging and sanity checks

prep_chunk_data([chunk_index])

Get low res input data and exo data for given chunk index and bias correct low res data if requested.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

allowed_const

bias_correct_kwargs

bias_correct_method

exo_handler_kwargs

fwp_chunk_shape

fwp_mask

Cached spatial mask which returns whether a given spatial chunk should be skipped by the forward pass or not.

head_node

hr_lat_lon

Get high resolution lat lons

incremental

input_handler_kwargs

input_handler_name

invert_uv

max_nodes

meta

Meta data dictionary for the strategy.

model_class

node_chunks

Get array of lists such that node_chunks[i] is a list of indices for the chunks that will be sent through the generator on the ith node.

out_files

Get list of output file names for each file chunk forward pass.

out_pattern

output_workers

pass_workers

spatial_pad

temporal_pad

unmasked_chunks

List of chunk indices that are not masked from the input spatial region.

file_paths

model_kwargs

+
+
+
+property meta#
+

Meta data dictionary for the strategy. Used to add info to forward +pass output meta.

+
+ +
+
+init_input_handler()[source]#
+

Get input handler instance for given input kwargs. If self.head_node +is False we get all requested features. Otherwise this is part of +initialization on a head node and just used to get the shape of the +input domain, so we don’t need to get any features yet.

+
+ +
+
+property node_chunks#
+

Get array of lists such that node_chunks[i] is a list of +indices for the chunks that will be sent through the generator on the +ith node.

+
+ +
+
+property unmasked_chunks#
+

List of chunk indices that are not masked from the input spatial +region. These chunks are those that will go through the forward pass. +Masked chunks will be skipped.

+
+ +
+
+preflight()[source]#
+

Prelight logging and sanity checks

+
+ +
+
+get_chunk_indices(chunk_index)[source]#
+

Get (spatial, temporal) indices for the given chunk index

+
+ +
+
+property hr_lat_lon#
+

Get high resolution lat lons

+
+ +
+
+property out_files#
+

Get list of output file names for each file chunk forward pass.

+
+ +
+
+get_pad_width(chunk_index)[source]#
+

Get padding for the current spatiotemporal chunk

+
+
Returns:
+

padding (tuple) – Tuple of tuples with padding width for spatial and temporal +dimensions. Each tuple includes the start and end of padding for +that dimension. Ordering is spatial_1, spatial_2, temporal.

+
+
+
+ +
+
+prep_chunk_data(chunk_index=0)[source]#
+

Get low res input data and exo data for given chunk index and bias +correct low res data if requested.

+
+

Note

+

input_data.load() is called here to load chunk data into memory

+
+
+ +
+
+init_chunk(chunk_index=0)[source]#
+

Get FowardPassChunk instance for the given chunk index.

+

This selects the appropriate data from self.input_handler and +self.exo_data and returns a structure object (ForwardPassChunk) +with that data and other chunk specific attributes.

+
+ +
+
+get_exo_kwargs(model)[source]#
+

Get list of exo kwargs for all exo features.

+
+ +
+
+get_exo_cache_files(model)[source]#
+

Get list of exo cache files so we can check if they exist or not.

+
+ +
+
+load_exo_data(model)[source]#
+

Extract exogenous data for each exo feature and store data in +dictionary with key for each exo feature

+
+
Returns:
+

exo_data (ExoData) – ExoData object composed of multiple +SingleExoDataStep objects. This is the exo data for the +full spatiotemporal extent.

+
+
+
+ +
+
+property fwp_mask#
+

Cached spatial mask which returns whether a given spatial chunk +should be skipped by the forward pass or not. This is used to skip +running the forward pass for area with just ocean, for example.

+

Note: This is True for grid points which can be skipped in the +forward pass and False otherwise.

+ +
+ +
+
+node_finished(node_idx)[source]#
+

Check if all out files for a given node have been saved

+
+ +
+
+chunk_finished(chunk_idx, log=True)[source]#
+

Check if process for given chunk_index has already been run. +Considered finished if there is already an output file and incremental +is False.

+
+ +
+
+chunk_masked(chunk_idx, log=True)[source]#
+

Check if the region for this chunk is masked. This is used to skip +running the forward pass for region with just ocean, for example.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.pipeline.strategy.html b/_autosummary/sup3r.pipeline.strategy.html new file mode 100644 index 000000000..af7aafbff --- /dev/null +++ b/_autosummary/sup3r.pipeline.strategy.html @@ -0,0 +1,743 @@ + + + + + + + + + + + sup3r.pipeline.strategy — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.pipeline.strategy

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.pipeline.strategy#

+

ForwardPassStrategy class. This sets up chunks and needed generator +inputs to distribute forward passes.

+

Classes

+
+ + + + + + + + +

ForwardPassChunk(input_data, exo_data, ...)

Structure storing chunk data and attributes for a specific chunk going through the generator.

ForwardPassStrategy(file_paths, model_kwargs)

Class to prepare data for forward passes through generator.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.pipeline.utilities.get_chunk_slices.html b/_autosummary/sup3r.pipeline.utilities.get_chunk_slices.html new file mode 100644 index 000000000..efc6c7217 --- /dev/null +++ b/_autosummary/sup3r.pipeline.utilities.get_chunk_slices.html @@ -0,0 +1,773 @@ + + + + + + + + + + + sup3r.pipeline.utilities.get_chunk_slices — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.pipeline.utilities.get_chunk_slices

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.pipeline.utilities.get_chunk_slices#

+
+
+get_chunk_slices(arr_size, chunk_size, index_slice=slice(None, None, None))[source]#
+

Get array slices of corresponding chunk size

+
+
Parameters:
+
    +
  • arr_size (int) – Length of array to slice

  • +
  • chunk_size (int) – Size of slices to split array into

  • +
  • index_slice (slice) – Slice specifying starting and ending index of slice list

  • +
+
+
Returns:
+

list – List of slices corresponding to chunks of array

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.pipeline.utilities.get_model.html b/_autosummary/sup3r.pipeline.utilities.get_model.html new file mode 100644 index 000000000..69dcdae2d --- /dev/null +++ b/_autosummary/sup3r.pipeline.utilities.get_model.html @@ -0,0 +1,761 @@ + + + + + + + + + + + sup3r.pipeline.utilities.get_model — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.pipeline.utilities.get_model

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.pipeline.utilities.get_model#

+
+
+get_model(model_class, kwargs)[source]#
+

Instantiate model after check on class name.

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.pipeline.utilities.html b/_autosummary/sup3r.pipeline.utilities.html new file mode 100644 index 000000000..3787f3dc3 --- /dev/null +++ b/_autosummary/sup3r.pipeline.utilities.html @@ -0,0 +1,742 @@ + + + + + + + + + + + sup3r.pipeline.utilities — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.pipeline.utilities

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.pipeline.utilities#

+

Methods used by ForwardPass and ForwardPassStrategy

+

Functions

+
+ + + + + + + + +

get_chunk_slices(arr_size, chunk_size[, ...])

Get array slices of corresponding chunk size

get_model(model_class, kwargs)

Instantiate model after check on class name.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.postprocessing.collectors.base.BaseCollector.html b/_autosummary/sup3r.postprocessing.collectors.base.BaseCollector.html new file mode 100644 index 000000000..4a718cc6f --- /dev/null +++ b/_autosummary/sup3r.postprocessing.collectors.base.BaseCollector.html @@ -0,0 +1,892 @@ + + + + + + + + + + + sup3r.postprocessing.collectors.base.BaseCollector — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.postprocessing.collectors.base.BaseCollector#

+
+
+class BaseCollector(file_paths)[source]#
+

Bases: OutputMixin, ABC

+

Base collector class for H5/NETCDF collection

+
+
Parameters:
+

file_paths (list | str) – Explicit list of str file paths that will be sorted and collected +or a single string with unix-style /search/patt*ern.<ext>. Files +should have non-overlapping time_index and spatial domains.

+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + +

collect(*args, **kwargs)

Collect data files from a dir to one output file.

get_chunk_indices(file)

Get spatial and temporal chunk indices from the given file name.

get_dset_attrs(feature)

Get attrributes for output feature

get_node_cmd(config)

Get a CLI call to collect data.

get_time_dim_name(filepath)

Get the name of the time dimension in the given file

write_data(out_file, dsets, time_index, ...)

Write list of datasets to out_file.

+
+
+
+static get_chunk_indices(file)[source]#
+

Get spatial and temporal chunk indices from the given file name.

+
+
Returns:
+

    +
  • temporal_chunk_index (str) – Zero padded integer for the temporal chunk index

  • +
  • spatial_chunk_index (str) – Zero padded integer for the spatial chunk index

  • +
+

+
+
+
+ +
+
+abstract classmethod collect(*args, **kwargs)[source]#
+

Collect data files from a dir to one output file.

+
+ +
+
+classmethod get_node_cmd(config)[source]#
+

Get a CLI call to collect data.

+
+
Parameters:
+

config (dict) – sup3r collection config with all necessary args and kwargs to +run data collection.

+
+
+
+ +
+
+static get_dset_attrs(feature)#
+

Get attrributes for output feature

+
+
Parameters:
+

feature (str) – Name of feature to write

+
+
Returns:
+

    +
  • attrs (dict) – Dictionary of attributes for requested dset

  • +
  • dtype (str) – Data type for requested dset. Defaults to float32

  • +
+

+
+
+
+ +
+
+static get_time_dim_name(filepath)#
+

Get the name of the time dimension in the given file

+
+
Parameters:
+

filepath (str) – Path to the file

+
+
Returns:
+

time_key (str) – Name of the time dimension in the given file

+
+
+
+ +
+
+classmethod write_data(out_file, dsets, time_index, data_list, meta, global_attrs=None)#
+

Write list of datasets to out_file.

+
+
Parameters:
+
    +
  • out_file (str) – Pre-existing H5 file output path

  • +
  • dsets (list) – list of datasets to write to out_file

  • +
  • time_index (pd.DatetimeIndex()) – Pandas datetime index to use for file time_index.

  • +
  • data_list (list) – List of np.ndarray objects to write to out_file

  • +
  • meta (pd.DataFrame) – Full meta dataframe for the final output data.

  • +
  • global_attrs (dict) – Namespace of file-global attributes for the final output data.

  • +
+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.postprocessing.collectors.base.html b/_autosummary/sup3r.postprocessing.collectors.base.html new file mode 100644 index 000000000..e55c1e571 --- /dev/null +++ b/_autosummary/sup3r.postprocessing.collectors.base.html @@ -0,0 +1,739 @@ + + + + + + + + + + + sup3r.postprocessing.collectors.base — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.postprocessing.collectors.base

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.postprocessing.collectors.base#

+

H5/NETCDF file collection.

+

Classes

+
+ + + + + +

BaseCollector(file_paths)

Base collector class for H5/NETCDF collection

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.postprocessing.collectors.h5.CollectorH5.html b/_autosummary/sup3r.postprocessing.collectors.h5.CollectorH5.html new file mode 100644 index 000000000..b8e568711 --- /dev/null +++ b/_autosummary/sup3r.postprocessing.collectors.h5.CollectorH5.html @@ -0,0 +1,1185 @@ + + + + + + + + + + + sup3r.postprocessing.collectors.h5.CollectorH5 — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.postprocessing.collectors.h5.CollectorH5#

+
+
+class CollectorH5(file_paths)[source]#
+

Bases: BaseCollector

+

Sup3r H5 file collection framework

+
+
Parameters:
+

file_paths (list | str) – Explicit list of str file paths that will be sorted and collected +or a single string with unix-style /search/patt*ern.<ext>. Files +should have non-overlapping time_index and spatial domains.

+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

collect(file_paths, out_file, features[, ...])

Collect data files from a dir to one output file.

collect_feature(dset, target_masked_meta, ...)

Collect chunks for single feature

get_chunk_indices(file)

Get spatial and temporal chunk indices from the given file name.

get_collection_attrs(file_paths[, ...])

Get important dataset attributes from a file list to be collected.

get_coordinate_indices(target_meta, full_meta)

Get coordindate indices in meta data for given targets

get_data(file_path, feature, time_index, ...)

Retreive a data array from a chunked file.

get_dset_attrs(feature)

Get attrributes for output feature

get_flist_chunks(file_paths[, n_writes])

Group files by temporal_chunk_index and then combines these groups if n_writes is less than the number of time_chunks.

get_node_cmd(config)

Get a CLI call to collect data.

get_slices(final_time_index, final_meta, ...)

Get index slices where the new ti/meta belong in the final ti/meta.

get_target_and_masked_meta(meta[, ...])

Use combined meta for all files and target_meta_file to get mapping from the full meta to the target meta and the mapping from the target meta to the full meta, both of which are masked to remove coordinates not present in the target_meta.

get_time_dim_name(filepath)

Get the name of the time dimension in the given file

get_unique_chunk_files(file_paths)

We get files for the unique spatial and temporal extents covered by all collection files.

write_data(out_file, dsets, time_index, ...)

Write list of datasets to out_file.

+
+
+
+classmethod get_slices(final_time_index, final_meta, new_time_index, new_meta)[source]#
+

Get index slices where the new ti/meta belong in the final ti/meta.

+
+
Parameters:
+
    +
  • final_time_index (pd.Datetimeindex) – Time index of the final file that new_time_index is being written +to.

  • +
  • final_meta (pd.DataFrame) – Meta data of the final file that new_meta is being written to.

  • +
  • new_time_index (pd.Datetimeindex) – Chunk time index that is a subset of the final_time_index.

  • +
  • new_meta (pd.DataFrame) – Chunk meta data that is a subset of the final_meta.

  • +
+
+
Returns:
+

    +
  • row_slice (slice) – final_time_index[row_slice] = new_time_index

  • +
  • col_slice (slice) – final_meta[col_slice] = new_meta

  • +
+

+
+
+
+ +
+
+get_coordinate_indices(target_meta, full_meta, threshold=0.0001)[source]#
+

Get coordindate indices in meta data for given targets

+
+
Parameters:
+
    +
  • target_meta (pd.DataFrame) – Dataframe of coordinates to find within the full meta

  • +
  • full_meta (pd.DataFrame) – Dataframe of full set of coordinates for unfiltered dataset

  • +
  • threshold (float) – Threshold distance for finding target coordinates within full meta

  • +
+
+
+
+ +
+
+get_data(file_path, feature, time_index, meta, scale_factor, dtype, threshold=0.0001)[source]#
+

Retreive a data array from a chunked file.

+
+
Parameters:
+
    +
  • file_path (str) – h5 file to get data from

  • +
  • feature (str) – dataset to retrieve data from fpath.

  • +
  • time_index (pd.Datetimeindex) – Time index of the final file.

  • +
  • meta (pd.DataFrame) – Meta data of the final file.

  • +
  • scale_factor (int | float) – Final destination scale factor after collection. If the data +retrieval from the files to be collected has a different scale +factor, the collected data will be rescaled and returned as +float32.

  • +
  • dtype (np.dtype) – Final dtype to return data as

  • +
  • threshold (float) – Threshold distance for finding target coordinates within full meta

  • +
+
+
Returns:
+

    +
  • f_data (Union[np.ndarray, da.core.Array]) – Data array from the fpath cast as input dtype.

  • +
  • row_slice (slice) – final_time_index[row_slice] = new_time_index

  • +
  • col_slice (slice) – final_meta[col_slice] = new_meta

  • +
+

+
+
+
+ +
+
+get_unique_chunk_files(file_paths)[source]#
+

We get files for the unique spatial and temporal extents covered by +all collection files. Since the files have a suffix +_{temporal_chunk_index}_{spatial_chunk_index}.h5 we just use all +files with a single spatial_chunk_index for the full time index and +all files with a single temporal_chunk_index for the full meta.

+
+
Parameters:
+
    +
  • t_files (list) – Explicit list of str file paths which, when combined, provide the +entire spatial domain.

  • +
  • s_files (list) – Explicit list of str file paths which, when combined, provide the +entire temporal extent.

  • +
+
+
+
+ +
+
+get_target_and_masked_meta(meta, target_meta_file=None, threshold=0.0001)[source]#
+

Use combined meta for all files and target_meta_file to get +mapping from the full meta to the target meta and the mapping from the +target meta to the full meta, both of which are masked to remove +coordinates not present in the target_meta.

+
+
Parameters:
+
    +
  • meta (pd.DataFrame) – Concatenated full size meta data from the flist that is being +collected or provided target meta

  • +
  • target_meta_file (str) – Path to target final meta containing coordinates to keep from the +full list of coordinates present in the collected meta for the full +file list.

  • +
  • threshold (float) – Threshold distance for finding target coordinates within full meta

  • +
+
+
Returns:
+

    +
  • target_meta (pd.DataFrame) – Concatenated full size meta data from the flist that is being +collected or provided target meta

  • +
  • masked_meta (pd.DataFrame) – Concatenated full size meta data from the flist that is being +collected masked against target_meta

  • +
+

+
+
+
+ +
+
+get_collection_attrs(file_paths, max_workers=None, target_meta_file=None, threshold=0.0001)[source]#
+

Get important dataset attributes from a file list to be collected.

+

Assumes the file list is chunked in time (row chunked).

+
+
Parameters:
+
    +
  • file_paths (list | str) – Explicit list of str file paths that will be sorted and collected +or a single string with unix-style /search/patt*ern.h5.

  • +
  • max_workers (int | None) – Number of workers to use in parallel. 1 runs serial, +None will use all available workers.

  • +
  • target_meta_file (str) – Path to target final meta containing coordinates to keep from the +full list of coordinates present in the collected meta for the full +file list.

  • +
  • threshold (float) – Threshold distance for finding target coordinates within full meta

  • +
+
+
Returns:
+

    +
  • time_index (pd.datetimeindex) – Concatenated full size datetime index from the flist that is +being collected

  • +
  • target_meta (pd.DataFrame) – Concatenated full size meta data from the flist that is being +collected or provided target meta

  • +
  • masked_meta (pd.DataFrame) – Concatenated full size meta data from the flist that is being +collected masked against target_meta

  • +
  • shape (tuple) – Output (collected) dataset shape

  • +
  • global_attrs (dict) – Global attributes from the first file in file_paths (it’s assumed +that all the files in file_paths have the same global file +attributes).

  • +
+

+
+
+
+ +
+
+get_flist_chunks(file_paths, n_writes=None)[source]#
+

Group files by temporal_chunk_index and then combines these groups +if n_writes is less than the number of time_chunks. Assumes +file_paths have a suffix format like +_{temporal_chunk_index}_{spatial_chunk_index}.h5

+
+
Parameters:
+
    +
  • file_paths (list) – List of file paths each with a suffix +_{temporal_chunk_index}_{spatial_chunk_index}.h5

  • +
  • n_writes (int | None) – Number of writes to use for collection

  • +
+
+
Returns:
+

flist_chunks (list) – List of file list chunks. Used to split collection and writing into +multiple steps.

+
+
+
+ +
+
+collect_feature(dset, target_masked_meta, target_meta_file, time_index, shape, flist_chunks, out_file, threshold=0.0001, max_workers=None)[source]#
+

Collect chunks for single feature

+
+
dsetstr

Dataset name to collect.

+
+
target_masked_metapd.DataFrame

Same as subset_masked_meta but instead for the entire list of files +to be collected.

+
+
target_meta_filestr

Path to target final meta containing coordinates to keep from the +full file list collected meta. This can be but is not necessarily a +subset of the full list of coordinates for all files in the file +list. This is used to remove coordinates from the full file list +which are not present in the target_meta. Either this full +meta or a subset, depending on which coordinates are present in +the data to be collected, will be the final meta for the collected +output files.

+
+
time_indexpd.datetimeindex

Concatenated datetime index for the given file paths.

+
+
shapetuple

Output (collected) dataset shape

+
+
flist_chunkslist

List of file list chunks. Used to split collection and writing into +multiple steps.

+
+
out_filestr

File path of final output file.

+
+
thresholdfloat

Threshold distance for finding target coordinates within full meta

+
+
max_workersint | None

Number of workers to use in parallel. 1 runs serial, +None will use all available workers.

+
+
+
+ +
+
+classmethod collect(file_paths, out_file, features, max_workers=None, log_level=None, log_file=None, target_meta_file=None, n_writes=None, overwrite=True, threshold=0.0001)[source]#
+

Collect data files from a dir to one output file.

+
+
Filename requirements:
    +
  • Should end with “.h5”

  • +
+
+
+
+
Parameters:
+
    +
  • file_paths (list | str) – Explicit list of str file paths that will be sorted and collected +or a single string with unix-style /search/patt*ern.h5. Files +resolved by this argument must be of the form +*_{temporal_chunk_index}_{spatial_chunk_index}.h5.

  • +
  • out_file (str) – File path of final output file.

  • +
  • features (list) – List of dsets to collect

  • +
  • max_workers (int | None) – Number of workers to use in parallel. 1 runs serial, +None will use all available workers.

  • +
  • log_level (str | None) – Desired log level, None will not initialize logging.

  • +
  • log_file (str | None) – Target log file. None logs to stdout.

  • +
  • write_status (bool) – Flag to write status file once complete if running from pipeline.

  • +
  • job_name (str) – Job name for status file if running from pipeline.

  • +
  • pipeline_step (str, optional) – Name of the pipeline step being run. If None, the +pipeline_step will be set to "collect, mimicking old reV +behavior. By default, None.

  • +
  • target_meta_file (str) – Path to target final meta containing coordinates to keep from the +full file list collected meta. This can be but is not necessarily a +subset of the full list of coordinates for all files in the file +list. This is used to remove coordinates from the full file list +which are not present in the target_meta. Either this full +meta or a subset, depending on which coordinates are present in +the data to be collected, will be the final meta for the collected +output files.

  • +
  • n_writes (int | None) – Number of writes to split full file list into. Must be less than +or equal to the number of temporal chunks if chunks have different +time indices.

  • +
  • overwrite (bool) – Whether to overwrite existing output file

  • +
  • threshold (float) – Threshold distance for finding target coordinates within full meta

  • +
+
+
+
+ +
+
+static get_chunk_indices(file)#
+

Get spatial and temporal chunk indices from the given file name.

+
+
Returns:
+

    +
  • temporal_chunk_index (str) – Zero padded integer for the temporal chunk index

  • +
  • spatial_chunk_index (str) – Zero padded integer for the spatial chunk index

  • +
+

+
+
+
+ +
+
+static get_dset_attrs(feature)#
+

Get attrributes for output feature

+
+
Parameters:
+

feature (str) – Name of feature to write

+
+
Returns:
+

    +
  • attrs (dict) – Dictionary of attributes for requested dset

  • +
  • dtype (str) – Data type for requested dset. Defaults to float32

  • +
+

+
+
+
+ +
+
+classmethod get_node_cmd(config)#
+

Get a CLI call to collect data.

+
+
Parameters:
+

config (dict) – sup3r collection config with all necessary args and kwargs to +run data collection.

+
+
+
+ +
+
+static get_time_dim_name(filepath)#
+

Get the name of the time dimension in the given file

+
+
Parameters:
+

filepath (str) – Path to the file

+
+
Returns:
+

time_key (str) – Name of the time dimension in the given file

+
+
+
+ +
+
+classmethod write_data(out_file, dsets, time_index, data_list, meta, global_attrs=None)#
+

Write list of datasets to out_file.

+
+
Parameters:
+
    +
  • out_file (str) – Pre-existing H5 file output path

  • +
  • dsets (list) – list of datasets to write to out_file

  • +
  • time_index (pd.DatetimeIndex()) – Pandas datetime index to use for file time_index.

  • +
  • data_list (list) – List of np.ndarray objects to write to out_file

  • +
  • meta (pd.DataFrame) – Full meta dataframe for the final output data.

  • +
  • global_attrs (dict) – Namespace of file-global attributes for the final output data.

  • +
+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.postprocessing.collectors.h5.html b/_autosummary/sup3r.postprocessing.collectors.h5.html new file mode 100644 index 000000000..66c75a513 --- /dev/null +++ b/_autosummary/sup3r.postprocessing.collectors.h5.html @@ -0,0 +1,739 @@ + + + + + + + + + + + sup3r.postprocessing.collectors.h5 — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.postprocessing.collectors.h5

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.postprocessing.collectors.h5#

+

H5 file collection.

+

Classes

+
+ + + + + +

CollectorH5(file_paths)

Sup3r H5 file collection framework

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.postprocessing.collectors.html b/_autosummary/sup3r.postprocessing.collectors.html new file mode 100644 index 000000000..9a3ee9977 --- /dev/null +++ b/_autosummary/sup3r.postprocessing.collectors.html @@ -0,0 +1,744 @@ + + + + + + + + + + + sup3r.postprocessing.collectors — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.postprocessing.collectors

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.postprocessing.collectors#

+

Collector classes for NETCDF and H5 data.

+
+ + + + + + + + + + + +

base

H5/NETCDF file collection.

h5

H5 file collection.

nc

NETCDF file collection.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.postprocessing.collectors.nc.CollectorNC.html b/_autosummary/sup3r.postprocessing.collectors.nc.CollectorNC.html new file mode 100644 index 000000000..bd87f3067 --- /dev/null +++ b/_autosummary/sup3r.postprocessing.collectors.nc.CollectorNC.html @@ -0,0 +1,927 @@ + + + + + + + + + + + sup3r.postprocessing.collectors.nc.CollectorNC — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.postprocessing.collectors.nc.CollectorNC#

+
+
+class CollectorNC(file_paths)[source]#
+

Bases: BaseCollector

+

Sup3r NETCDF file collection framework

+
+
Parameters:
+

file_paths (list | str) – Explicit list of str file paths that will be sorted and collected +or a single string with unix-style /search/patt*ern.<ext>. Files +should have non-overlapping time_index and spatial domains.

+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + +

collect(file_paths, out_file[, features, ...])

Collect data files from a dir to one output file.

get_chunk_indices(file)

Get spatial and temporal chunk indices from the given file name.

get_dset_attrs(feature)

Get attrributes for output feature

get_node_cmd(config)

Get a CLI call to collect data.

get_time_dim_name(filepath)

Get the name of the time dimension in the given file

group_spatial_chunks()

Group same spatial chunks together so each chunk has same spatial footprint but different times

write_data(out_file, dsets, time_index, ...)

Write list of datasets to out_file.

+
+
+
+classmethod collect(file_paths, out_file, features='all', log_level=None, log_file=None, overwrite=True, res_kwargs=None)[source]#
+

Collect data files from a dir to one output file.

+
+
Filename requirements:
    +
  • Should end with “.nc”

  • +
+
+
+
+
Parameters:
+
    +
  • file_paths (list | str) – Explicit list of str file paths that will be sorted and collected +or a single string with unix-style /search/patt*ern.nc.

  • +
  • out_file (str) – File path of final output file.

  • +
  • features (list | str) – List of dsets to collect. If ‘all’ then all data_vars will be +collected.

  • +
  • log_level (str | None) – Desired log level, None will not initialize logging.

  • +
  • log_file (str | None) – Target log file. None logs to stdout.

  • +
  • write_status (bool) – Flag to write status file once complete if running from pipeline.

  • +
  • job_name (str) – Job name for status file if running from pipeline.

  • +
  • overwrite (bool) – Whether to overwrite existing output file

  • +
  • res_kwargs (dict | None) – Dictionary of kwargs to pass to xarray.open_mfdataset.

  • +
+
+
+
+ +
+
+group_spatial_chunks()[source]#
+

Group same spatial chunks together so each chunk has same spatial +footprint but different times

+
+ +
+
+static get_chunk_indices(file)#
+

Get spatial and temporal chunk indices from the given file name.

+
+
Returns:
+

    +
  • temporal_chunk_index (str) – Zero padded integer for the temporal chunk index

  • +
  • spatial_chunk_index (str) – Zero padded integer for the spatial chunk index

  • +
+

+
+
+
+ +
+
+static get_dset_attrs(feature)#
+

Get attrributes for output feature

+
+
Parameters:
+

feature (str) – Name of feature to write

+
+
Returns:
+

    +
  • attrs (dict) – Dictionary of attributes for requested dset

  • +
  • dtype (str) – Data type for requested dset. Defaults to float32

  • +
+

+
+
+
+ +
+
+classmethod get_node_cmd(config)#
+

Get a CLI call to collect data.

+
+
Parameters:
+

config (dict) – sup3r collection config with all necessary args and kwargs to +run data collection.

+
+
+
+ +
+
+static get_time_dim_name(filepath)#
+

Get the name of the time dimension in the given file

+
+
Parameters:
+

filepath (str) – Path to the file

+
+
Returns:
+

time_key (str) – Name of the time dimension in the given file

+
+
+
+ +
+
+classmethod write_data(out_file, dsets, time_index, data_list, meta, global_attrs=None)#
+

Write list of datasets to out_file.

+
+
Parameters:
+
    +
  • out_file (str) – Pre-existing H5 file output path

  • +
  • dsets (list) – list of datasets to write to out_file

  • +
  • time_index (pd.DatetimeIndex()) – Pandas datetime index to use for file time_index.

  • +
  • data_list (list) – List of np.ndarray objects to write to out_file

  • +
  • meta (pd.DataFrame) – Full meta dataframe for the final output data.

  • +
  • global_attrs (dict) – Namespace of file-global attributes for the final output data.

  • +
+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.postprocessing.collectors.nc.html b/_autosummary/sup3r.postprocessing.collectors.nc.html new file mode 100644 index 000000000..b8f85fb6e --- /dev/null +++ b/_autosummary/sup3r.postprocessing.collectors.nc.html @@ -0,0 +1,740 @@ + + + + + + + + + + + sup3r.postprocessing.collectors.nc — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.postprocessing.collectors.nc

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.postprocessing.collectors.nc#

+

NETCDF file collection.

+

TODO: Integrate this with Cacher class

+

Classes

+
+ + + + + +

CollectorNC(file_paths)

Sup3r NETCDF file collection framework

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.postprocessing.data_collect_cli.html b/_autosummary/sup3r.postprocessing.data_collect_cli.html new file mode 100644 index 000000000..38809cafd --- /dev/null +++ b/_autosummary/sup3r.postprocessing.data_collect_cli.html @@ -0,0 +1,742 @@ + + + + + + + + + + + sup3r.postprocessing.data_collect_cli — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.postprocessing.data_collect_cli

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.postprocessing.data_collect_cli#

+

sup3r data collection CLI entry points.

+

Functions

+
+ + + + + + + + +

kickoff_local_job(ctx, cmd[, pipeline_step])

Run sup3r data collection locally.

kickoff_slurm_job(ctx, cmd[, pipeline_step, ...])

Run sup3r on HPC via SLURM job submission.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.postprocessing.data_collect_cli.kickoff_local_job.html b/_autosummary/sup3r.postprocessing.data_collect_cli.kickoff_local_job.html new file mode 100644 index 000000000..efd1142fd --- /dev/null +++ b/_autosummary/sup3r.postprocessing.data_collect_cli.kickoff_local_job.html @@ -0,0 +1,777 @@ + + + + + + + + + + + sup3r.postprocessing.data_collect_cli.kickoff_local_job — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.postprocessing.data_collect_cli.kickoff_local_job

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.postprocessing.data_collect_cli.kickoff_local_job#

+
+
+kickoff_local_job(ctx, cmd, pipeline_step=None)[source]#
+

Run sup3r data collection locally.

+
+
Parameters:
+
    +
  • ctx (click.pass_context) – Click context object where ctx.obj is a dictionary

  • +
  • cmd (str) –

    +
    +
    Command to be submitted in shell script. Example:

    ‘python -m sup3r.cli data_collect -c <config_file>’

    +
    +
    +
  • +
  • pipeline_step (str, optional) – Name of the pipeline step being run. If None, the +pipeline_step will be set to the module_name, +mimicking old reV behavior. By default, None.

  • +
+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.postprocessing.data_collect_cli.kickoff_slurm_job.html b/_autosummary/sup3r.postprocessing.data_collect_cli.kickoff_slurm_job.html new file mode 100644 index 000000000..4deb9d42e --- /dev/null +++ b/_autosummary/sup3r.postprocessing.data_collect_cli.kickoff_slurm_job.html @@ -0,0 +1,783 @@ + + + + + + + + + + + sup3r.postprocessing.data_collect_cli.kickoff_slurm_job — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.postprocessing.data_collect_cli.kickoff_slurm_job

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.postprocessing.data_collect_cli.kickoff_slurm_job#

+
+
+kickoff_slurm_job(ctx, cmd, pipeline_step=None, alloc='sup3r', memory=None, walltime=4, feature=None, stdout_path='./stdout/')[source]#
+

Run sup3r on HPC via SLURM job submission.

+
+
Parameters:
+
    +
  • ctx (click.pass_context) – Click context object where ctx.obj is a dictionary

  • +
  • cmd (str) –

    +
    +
    Command to be submitted in SLURM shell script. Example:

    ‘python -m sup3r.cli data-collect -c <config_file>’

    +
    +
    +
  • +
  • pipeline_step (str, optional) – Name of the pipeline step being run. If None, the +pipeline_step will be set to the module_name, +mimicking old reV behavior. By default, None.

  • +
  • alloc (str) – HPC project (allocation) handle. Example: ‘sup3r’.

  • +
  • memory (int) – Node memory request in GB.

  • +
  • walltime (float) – Node walltime request in hours.

  • +
  • feature (str) – Additional flags for SLURM job. Format is “–qos=high” +or “–depend=[state:job_id]”. Default is None.

  • +
  • stdout_path (str) – Path to print .stdout and .stderr files.

  • +
+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.postprocessing.html b/_autosummary/sup3r.postprocessing.html new file mode 100644 index 000000000..7d432bd34 --- /dev/null +++ b/_autosummary/sup3r.postprocessing.html @@ -0,0 +1,744 @@ + + + + + + + + + + + sup3r.postprocessing — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.postprocessing

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.postprocessing#

+

Post processing module

+
+ + + + + + + + + + + +

collectors

Collector classes for NETCDF and H5 data.

data_collect_cli

sup3r data collection CLI entry points.

writers

Module with objects which write forward pass output to files.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.postprocessing.writers.base.OutputHandler.html b/_autosummary/sup3r.postprocessing.writers.base.OutputHandler.html new file mode 100644 index 000000000..9d74b572d --- /dev/null +++ b/_autosummary/sup3r.postprocessing.writers.base.OutputHandler.html @@ -0,0 +1,1066 @@ + + + + + + + + + + + sup3r.postprocessing.writers.base.OutputHandler — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.postprocessing.writers.base.OutputHandler#

+
+
+class OutputHandler[source]#
+

Bases: OutputMixin

+

Class to handle forward pass output. This includes transforming features +back to their original form and outputting to the correct file format.

+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

enforce_limits(features, data)

Enforce physical limits for feature data

get_dset_attrs(feature)

Get attrributes for output feature

get_lat_lon(low_res_lat_lon, shape)

Get lat lon arrays for high res output file

get_renamed_features(features)

Rename features based on transformation from u/v to windspeed/winddirection

get_time_dim_name(filepath)

Get the name of the time dimension in the given file

get_times(low_res_times, shape)

Get array of times for high res output file

invert_uv_features(data, features, lat_lon)

Invert U/V to windspeed and winddirection.

invert_uv_single_pair(data, lat_lon, u_idx, ...)

Perform inverse transform in place on a single u/v pair.

is_increasing_lons(lat_lon)

Check if longitudes are in increasing order.

pad_lat_lon(lat_lon)

Pad lat lon grid with additional rows and columns to use for interpolation routine

write_data(out_file, dsets, time_index, ...)

Write list of datasets to out_file.

write_output(data, features, ...[, ...])

Write forward pass output to file

+
+
+
+classmethod get_renamed_features(features)[source]#
+

Rename features based on transformation from u/v to +windspeed/winddirection

+
+
Parameters:
+

features (list) – List of output features

+
+
Returns:
+

list – List of renamed features u/v -> windspeed/winddirection for each +height

+
+
+
+ +
+
+classmethod invert_uv_features(data, features, lat_lon, max_workers=None)[source]#
+

Invert U/V to windspeed and winddirection. Performed in place.

+
+
Parameters:
+
    +
  • data (ndarray) – High res data from forward pass +(spatial_1, spatial_2, temporal, features)

  • +
  • features (list) – List of output features. If this doesn’t contain any names matching +u_*m, this method will do nothing.

  • +
  • lat_lon (ndarray) – High res lat/lon array +(spatial_1, spatial_2, 2)

  • +
  • max_workers (int | None) – Max workers to use for inverse transform. If None the maximum +possible will be used

  • +
+
+
+
+ +
+
+static invert_uv_single_pair(data, lat_lon, u_idx, v_idx)[source]#
+

Perform inverse transform in place on a single u/v pair.

+
+
Parameters:
+
    +
  • data (ndarray) – High res data from forward pass +(spatial_1, spatial_2, temporal, features)

  • +
  • lat_lon (ndarray) – High res lat/lon array +(spatial_1, spatial_2, 2)

  • +
  • u_idx (int) – Index in data for U component to transform

  • +
  • v_idx (int) – Index in data for V component to transform

  • +
+
+
+
+ +
+
+static enforce_limits(features, data)[source]#
+

Enforce physical limits for feature data

+
+
Parameters:
+
    +
  • features (list) – List of features with ordering corresponding to last channel of +data array.

  • +
  • data (ndarray) – Array of feature data

  • +
+
+
Returns:
+

data (ndarray) – Array of feature data with physical limits enforced

+
+
+
+ +
+
+static pad_lat_lon(lat_lon)[source]#
+

Pad lat lon grid with additional rows and columns to use for +interpolation routine

+
+
Parameters:
+
    +
  • lat_lon (ndarray) – Array of lat/lon for input data. +(spatial_1, spatial_2, 2) +Last dimension has ordering (lat, lon)

  • +
  • shape (tuple) – (lons, lats) Shape of high res grid

  • +
+
+
Returns:
+

lat_lon (ndarray) – Array of padded lat lons +(spatial_1, spatial_2, 2) +Last dimension has ordering (lat, lon)

+
+
+
+ +
+
+static is_increasing_lons(lat_lon)[source]#
+

Check if longitudes are in increasing order. Need to check this +for interpolation routine. This is primarily to identify whether the +lons go through the 180 -> -180 boundary, which creates a +discontinuity. For example, [130, 180, -130, -80]. If any lons go from +positive to negative the lons need to be shifted to the range 0-360.

+
+
Parameters:
+

lat_lon (ndarray) – Array of lat/lon for input data. +(spatial_1, spatial_2, 2) +Last dimension has ordering (lat, lon)

+
+
Returns:
+

bool – Whether all lons are in increasing order or not

+
+
+
+ +
+
+classmethod get_lat_lon(low_res_lat_lon, shape)[source]#
+

Get lat lon arrays for high res output file

+
+
Parameters:
+
    +
  • low_res_lat_lon (ndarray) – Array of lat/lon for input data. Longitudes must be arranged in +a counter-clockwise direction (when looking down from above the +north pole). e.g. [-50, -25, 25, 50] or [130, 180, -130, -80]. The +latter passes through the 180 -> -180 boundary and will be +temporarily shifted to the 0-360 range before interpolation. +(spatial_1, spatial_2, 2) +Last dimension has ordering (lat, lon)

  • +
  • shape (tuple) – (lons, lats) Shape of high res grid

  • +
+
+
Returns:
+

lat_lon (ndarray) – Array of lat lons for high res output file +(spatial_1, spatial_2, 2) +Last dimension has ordering (lat, lon)

+
+
+
+ +
+
+static get_times(low_res_times, shape)[source]#
+

Get array of times for high res output file

+
+
Parameters:
+
    +
  • low_res_times (pd.Datetimeindex) – List of times for low res input data. If there is only a single low +res timestep, it is assumed the data is daily.

  • +
  • shape (int) – Number of time steps for high res time array

  • +
+
+
Returns:
+

ndarray – Array of times for high res output file.

+
+
+
+ +
+
+classmethod write_output(data, features, low_res_lat_lon, low_res_times, out_file, meta_data=None, invert_uv=None, max_workers=None, gids=None)[source]#
+

Write forward pass output to file

+
+
Parameters:
+
    +
  • data (ndarray) – (spatial_1, spatial_2, temporal, features) +High resolution forward pass output

  • +
  • features (list) – List of feature names corresponding to the last dimension of data

  • +
  • low_res_lat_lon (ndarray) – Array of lat/lon for input data. (spatial_1, spatial_2, 2) +Last dimension has ordering (lat, lon)

  • +
  • low_res_times (pd.Datetimeindex) – List of times for low res source data

  • +
  • out_file (string) – Output file path

  • +
  • meta_data (dict | None) – Dictionary of meta data from model

  • +
  • invert_uv (bool | None) – Whether to convert u and v wind components to windspeed and +direction

  • +
  • max_workers (int | None) – Max workers to use for inverse uv transform. If None the +max_workers will be estimated based on memory limits.

  • +
  • gids (list) – List of coordinate indices used to label each lat lon pair and to +help with spatial chunk data collection

  • +
+
+
+
+ +
+
+static get_dset_attrs(feature)#
+

Get attrributes for output feature

+
+
Parameters:
+

feature (str) – Name of feature to write

+
+
Returns:
+

    +
  • attrs (dict) – Dictionary of attributes for requested dset

  • +
  • dtype (str) – Data type for requested dset. Defaults to float32

  • +
+

+
+
+
+ +
+
+static get_time_dim_name(filepath)#
+

Get the name of the time dimension in the given file

+
+
Parameters:
+

filepath (str) – Path to the file

+
+
Returns:
+

time_key (str) – Name of the time dimension in the given file

+
+
+
+ +
+
+classmethod write_data(out_file, dsets, time_index, data_list, meta, global_attrs=None)#
+

Write list of datasets to out_file.

+
+
Parameters:
+
    +
  • out_file (str) – Pre-existing H5 file output path

  • +
  • dsets (list) – list of datasets to write to out_file

  • +
  • time_index (pd.DatetimeIndex()) – Pandas datetime index to use for file time_index.

  • +
  • data_list (list) – List of np.ndarray objects to write to out_file

  • +
  • meta (pd.DataFrame) – Full meta dataframe for the final output data.

  • +
  • global_attrs (dict) – Namespace of file-global attributes for the final output data.

  • +
+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.postprocessing.writers.base.OutputMixin.html b/_autosummary/sup3r.postprocessing.writers.base.OutputMixin.html new file mode 100644 index 000000000..2f65084ab --- /dev/null +++ b/_autosummary/sup3r.postprocessing.writers.base.OutputMixin.html @@ -0,0 +1,837 @@ + + + + + + + + + + + sup3r.postprocessing.writers.base.OutputMixin — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.postprocessing.writers.base.OutputMixin

+ +
+ +
+
+ + + + +
+ +
+

sup3r.postprocessing.writers.base.OutputMixin#

+
+
+class OutputMixin[source]#
+

Bases: object

+

Methods used by various Output and Collection classes

+

Methods

+
+ + + + + + + + + + + +

get_dset_attrs(feature)

Get attrributes for output feature

get_time_dim_name(filepath)

Get the name of the time dimension in the given file

write_data(out_file, dsets, time_index, ...)

Write list of datasets to out_file.

+
+
+
+static get_time_dim_name(filepath)[source]#
+

Get the name of the time dimension in the given file

+
+
Parameters:
+

filepath (str) – Path to the file

+
+
Returns:
+

time_key (str) – Name of the time dimension in the given file

+
+
+
+ +
+
+static get_dset_attrs(feature)[source]#
+

Get attrributes for output feature

+
+
Parameters:
+

feature (str) – Name of feature to write

+
+
Returns:
+

    +
  • attrs (dict) – Dictionary of attributes for requested dset

  • +
  • dtype (str) – Data type for requested dset. Defaults to float32

  • +
+

+
+
+
+ +
+
+classmethod write_data(out_file, dsets, time_index, data_list, meta, global_attrs=None)[source]#
+

Write list of datasets to out_file.

+
+
Parameters:
+
    +
  • out_file (str) – Pre-existing H5 file output path

  • +
  • dsets (list) – list of datasets to write to out_file

  • +
  • time_index (pd.DatetimeIndex()) – Pandas datetime index to use for file time_index.

  • +
  • data_list (list) – List of np.ndarray objects to write to out_file

  • +
  • meta (pd.DataFrame) – Full meta dataframe for the final output data.

  • +
  • global_attrs (dict) – Namespace of file-global attributes for the final output data.

  • +
+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.postprocessing.writers.base.RexOutputs.html b/_autosummary/sup3r.postprocessing.writers.base.RexOutputs.html new file mode 100644 index 000000000..7ed07c540 --- /dev/null +++ b/_autosummary/sup3r.postprocessing.writers.base.RexOutputs.html @@ -0,0 +1,1663 @@ + + + + + + + + + + + sup3r.postprocessing.writers.base.RexOutputs — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.postprocessing.writers.base.RexOutputs#

+
+
+class RexOutputs(h5_file, mode='r', unscale=True, str_decode=True, group=None)[source]#
+

Bases: Outputs

+

Base class to handle NREL h5 formatted output data

+
+
Parameters:
+
    +
  • h5_file (str) – Path to .h5 resource file

  • +
  • mode (str, optional) – Mode to instantiate h5py.File instance, by default ‘r’

  • +
  • unscale (bool, optional) – Boolean flag to automatically unscale variables on extraction, +by default True

  • +
  • str_decode (bool, optional) – Boolean flag to decode the bytestring meta data into normal +strings. Setting this to False will speed up the meta data read, +by default True

  • +
  • group (str, optional) – Group within .h5 resource file to open, by default None

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

add_dataset(h5_file, dset_name, dset_data, dtype)

Add dataset to h5_file

close()

Close h5 instance

df_str_decode(df)

Decode a dataframe with byte string columns into ordinary str cols.

get_SAM_df(site)

Placeholder for get_SAM_df method that it resource specific

get_attrs([dset])

Get h5 attributes either from file or dataset

get_config(config_name)

Get SAM config

get_dset_properties(dset)

Get dataset properties (shape, dtype, chunks)

get_meta_arr(rec_name[, rows])

Get a meta array by name (faster than DataFrame extraction).

get_scale_factor(dset)

Get dataset scale factor

get_units(dset)

Get dataset units

init_h5(h5_file, dsets, shapes, attrs, ...)

Init a full output file with the final intended shape without data.

open_dataset(ds_name)

Open resource dataset

preload_SAM(h5_file, sites, tech[, unscale, ...])

Pre-load project_points for SAM

set_configs(SAM_configs)

Set SAM configuration JSONs as attributes of 'meta'

set_version_attr()

Set the version attribute to the h5 file.

update_dset(dset, dset_array[, dset_slice])

Check to see if dset needs to be updated on disk If so write dset_array to disk

write_dataset(dset_name, data, dtype[, ...])

Write dataset to disk.

write_means(h5_file, meta, dset_name, means, ...)

Write means array to disk

write_profiles(h5_file, meta, time_index, ...)

Write profiles to disk

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

ADD_ATTR

SAM_configs

SAM configuration JSONs used to create CF profiles

SCALE_ATTR

UNIT_ATTR

adders

Dictionary of all dataset add offset factors

attrs

Dictionary of all dataset attributes

chunks

Dictionary of all dataset chunk sizes

coordinates

(lat, lon) pairs

data_version

Get the version attribute of the data.

datasets

Datasets available

dsets

Datasets available

dtypes

Dictionary of all dataset dtypes

full_version_record

Get record of versions for dependencies

global_attrs

Global (file) attributes

groups

Groups available

h5

Open h5py File instance.

lat_lon

Extract (latitude, longitude) pairs

meta

Resource meta data DataFrame

package

Package used to create file

res_dsets

Available resource datasets

resource_datasets

Available resource datasets

run_attrs

Runtime attributes stored at the global (file) level

scale_factors

Dictionary of all dataset scale factors

shape

Variable array shape from time_index and meta

shapes

Dictionary of all dataset shapes

source

Package and version used to create file

time_index

Resource DatetimeIndex

units

Dictionary of all dataset units

version

Version of package used to create file

writable

Check to see if h5py.File instance is writable

+
+
+
+property full_version_record#
+

Get record of versions for dependencies

+
+
Returns:
+

dict – Dictionary of package versions for dependencies

+
+
+
+ +
+
+set_version_attr()[source]#
+

Set the version attribute to the h5 file.

+
+ +
+
+property SAM_configs#
+

SAM configuration JSONs used to create CF profiles

+
+
Returns:
+

configs (dict) – Dictionary of SAM configuration JSONs

+
+
+
+ +
+
+classmethod add_dataset(h5_file, dset_name, dset_data, dtype, attrs=None, chunks=None, unscale=True, mode='a', str_decode=True, group=None)#
+

Add dataset to h5_file

+
+
Parameters:
+
    +
  • h5_file (str) – Path to .h5 resource file

  • +
  • dset_name (str) – Name of dataset to be added to h5 file

  • +
  • dset_data (ndarray) – Data to be added to h5 file

  • +
  • dtype (str) – Intended dataset datatype after scaling.

  • +
  • attrs (dict, optional) – Attributes to be set. May include ‘scale_factor’, by default None

  • +
  • unscale (bool, optional) – Boolean flag to automatically unscale variables on extraction, +by default True

  • +
  • mode (str, optional) – Mode to instantiate h5py.File instance, by default ‘a’

  • +
  • str_decode (bool, optional) – Boolean flag to decode the bytestring meta data into normal +strings. Setting this to False will speed up the meta data read, +by default True

  • +
  • group (str, optional) – Group within .h5 resource file to open, by default None

  • +
+
+
+
+ +
+
+property adders#
+

Dictionary of all dataset add offset factors

+
+
Returns:
+

adders (dict)

+
+
+
+ +
+
+property attrs#
+

Dictionary of all dataset attributes

+
+
Returns:
+

attrs (dict)

+
+
+
+ +
+
+property chunks#
+

Dictionary of all dataset chunk sizes

+
+
Returns:
+

chunks (dict)

+
+
+
+ +
+
+close()#
+

Close h5 instance

+
+ +
+
+property coordinates#
+

(lat, lon) pairs

+
+
Returns:
+

lat_lon (ndarray)

+
+
Type:
+

Coordinates

+
+
+
+ +
+
+property data_version#
+

Get the version attribute of the data. None if not available.

+
+
Returns:
+

version (str | None)

+
+
+
+ +
+
+property datasets#
+

Datasets available

+
+
Returns:
+

list

+
+
+
+ +
+
+static df_str_decode(df)#
+

Decode a dataframe with byte string columns into ordinary str cols.

+
+
Parameters:
+

df (pd.DataFrame) – Dataframe with some columns being byte strings.

+
+
Returns:
+

df (pd.DataFrame) – DataFrame with str columns instead of byte str columns.

+
+
+
+ +
+
+property dsets#
+

Datasets available

+
+
Returns:
+

list

+
+
+
+ +
+
+property dtypes#
+

Dictionary of all dataset dtypes

+
+
Returns:
+

dtypes (dict)

+
+
+
+ +
+
+get_SAM_df(site)#
+

Placeholder for get_SAM_df method that it resource specific

+
+
Parameters:
+

site (int) – Site to extract SAM DataFrame for

+
+
+
+ +
+
+get_attrs(dset=None)#
+

Get h5 attributes either from file or dataset

+
+
Parameters:
+

dset (str) – Dataset to get attributes for, if None get file (global) attributes

+
+
Returns:
+

attrs (dict) – Dataset or file attributes

+
+
+
+ +
+
+get_config(config_name)#
+

Get SAM config

+
+
Parameters:
+

config_name (str) – Name of config

+
+
Returns:
+

config (dict) – SAM config JSON as a dictionary

+
+
+
+ +
+
+get_dset_properties(dset)#
+

Get dataset properties (shape, dtype, chunks)

+
+
Parameters:
+

dset (str) – Dataset to get scale factor for

+
+
Returns:
+

    +
  • shape (tuple) – Dataset array shape

  • +
  • dtype (str) – Dataset array dtype

  • +
  • chunks (tuple) – Dataset chunk size

  • +
+

+
+
+
+ +
+
+get_meta_arr(rec_name, rows=slice(None, None, None))#
+

Get a meta array by name (faster than DataFrame extraction).

+
+
Parameters:
+
    +
  • rec_name (str) – Named record from the meta data to retrieve.

  • +
  • rows (slice) – Rows of the record to extract.

  • +
+
+
Returns:
+

meta_arr (np.ndarray) – Extracted array from the meta data record name.

+
+
+
+ +
+
+get_scale_factor(dset)#
+

Get dataset scale factor

+
+
Parameters:
+

dset (str) – Dataset to get scale factor for

+
+
Returns:
+

float – Dataset scale factor, used to unscale int values to floats

+
+
+
+ +
+
+get_units(dset)#
+

Get dataset units

+
+
Parameters:
+

dset (str) – Dataset to get units for

+
+
Returns:
+

str – Dataset units, None if not defined

+
+
+
+ +
+
+property global_attrs#
+

Global (file) attributes

+
+
Returns:
+

global_attrs (dict)

+
+
+
+ +
+
+property groups#
+

Groups available

+
+
Returns:
+

groups (list) – List of groups

+
+
+
+ +
+
+property h5#
+

Open h5py File instance. If _group is not None return open Group

+
+
Returns:
+

h5 (h5py.File | h5py.Group)

+
+
+
+ +
+
+classmethod init_h5(h5_file, dsets, shapes, attrs, chunks, dtypes, meta, time_index=None, configs=None, unscale=True, mode='w', str_decode=True, group=None, run_attrs=None)#
+

Init a full output file with the final intended shape without data.

+
+
Parameters:
+
    +
  • h5_file (str) – Full h5 output filepath.

  • +
  • dsets (list) – List of strings of dataset names to initialize (does not include +meta or time_index).

  • +
  • shapes (dict) – Dictionary of dataset shapes (keys correspond to dsets).

  • +
  • attrs (dict) – Dictionary of dataset attributes (keys correspond to dsets).

  • +
  • chunks (dict) – Dictionary of chunk tuples (keys correspond to dsets).

  • +
  • dtypes (dict) – dictionary of numpy datatypes (keys correspond to dsets).

  • +
  • meta (pd.DataFrame) – Full meta data.

  • +
  • time_index (pd.datetimeindex | None) – Full pandas datetime index. None implies that only 1D results +(no site profiles) are being written.

  • +
  • configs (dict | None) – Optional input configs to set as attr on meta.

  • +
  • unscale (bool) – Boolean flag to automatically unscale variables on extraction

  • +
  • mode (str) – Mode to instantiate h5py.File instance

  • +
  • str_decode (bool) – Boolean flag to decode the bytestring meta data into normal +strings. Setting this to False will speed up the meta data read.

  • +
  • group (str) – Group within .h5 resource file to open

  • +
  • run_attrs (dict | NoneType) – Runtime attributes (args, kwargs) to add as global (file) +attributes

  • +
+
+
+
+ +
+
+property lat_lon#
+

Extract (latitude, longitude) pairs

+
+
Returns:
+

lat_lon (ndarray)

+
+
+
+ +
+
+property meta#
+

Resource meta data DataFrame

+
+
Returns:
+

meta (pandas.DataFrame)

+
+
+
+ +
+
+open_dataset(ds_name)#
+

Open resource dataset

+
+
Parameters:
+

ds_name (str) – Dataset name to open

+
+
Returns:
+

ds (ResourceDataset) – Resource for open resource dataset

+
+
+
+ +
+
+property package#
+

Package used to create file

+
+
Returns:
+

str

+
+
+
+ +
+
+classmethod preload_SAM(h5_file, sites, tech, unscale=True, str_decode=True, group=None, hsds=False, hsds_kwargs=None, time_index_step=None, means=False)#
+

Pre-load project_points for SAM

+
+
Parameters:
+
    +
  • h5_file (str) – h5_file to extract resource from

  • +
  • sites (list) – List of sites to be provided to SAM +(sites is synonymous with gids aka spatial indices)

  • +
  • tech (str) – Technology to be run by SAM

  • +
  • unscale (bool) – Boolean flag to automatically unscale variables on extraction

  • +
  • str_decode (bool) – Boolean flag to decode the bytestring meta data into normal +strings. Setting this to False will speed up the meta data read.

  • +
  • group (str) – Group within .h5 resource file to open

  • +
  • hsds (bool, optional) – Boolean flag to use h5pyd to handle .h5 ‘files’ hosted on AWS +behind HSDS, by default False

  • +
  • hsds_kwargs (dict, optional) – Dictionary of optional kwargs for h5pyd, e.g., bucket, username, +password, by default None

  • +
  • time_index_step (int, optional) – Step size for time_index, used to reduce temporal resolution, +by default None

  • +
  • means (bool, optional) – Boolean flag to compute mean resource when res_array is set, +by default False

  • +
+
+
Returns:
+

SAM_res (SAMResource) – Instance of SAMResource pre-loaded with Solar resource for sites +in project_points

+
+
+
+ +
+
+property res_dsets#
+

Available resource datasets

+
+
Returns:
+

list

+
+
+
+ +
+
+property resource_datasets#
+

Available resource datasets

+
+
Returns:
+

list

+
+
+
+ +
+
+property run_attrs#
+

Runtime attributes stored at the global (file) level

+
+
Returns:
+

global_attrs (dict)

+
+
+
+ +
+
+property scale_factors#
+

Dictionary of all dataset scale factors

+
+
Returns:
+

scale_factors (dict)

+
+
+
+ +
+
+set_configs(SAM_configs)#
+

Set SAM configuration JSONs as attributes of ‘meta’

+
+
Parameters:
+

SAM_configs (dict) – Dictionary of SAM configuration JSONs

+
+
+
+ +
+
+property shape#
+

Variable array shape from time_index and meta

+
+
Returns:
+

tuple – shape of variables arrays == (time, locations)

+
+
+
+ +
+
+property shapes#
+

Dictionary of all dataset shapes

+
+
Returns:
+

shapes (dict)

+
+
+
+ +
+
+property source#
+

Package and version used to create file

+
+
Returns:
+

str

+
+
+
+ +
+
+property time_index#
+

Resource DatetimeIndex

+
+
Returns:
+

time_index (pandas.DatetimeIndex)

+
+
+
+ +
+
+property units#
+

Dictionary of all dataset units

+
+
Returns:
+

units (dict)

+
+
+
+ +
+
+update_dset(dset, dset_array, dset_slice=None)#
+

Check to see if dset needs to be updated on disk +If so write dset_array to disk

+
+
Parameters:
+
    +
  • dset (str) – dataset to update

  • +
  • dset_array (ndarray) – dataset array

  • +
  • dset_slice (tuple) – slice of dataset to update, it None update all

  • +
+
+
+
+ +
+
+property version#
+

Version of package used to create file

+
+
Returns:
+

str

+
+
+
+ +
+
+property writable#
+

Check to see if h5py.File instance is writable

+
+
Returns:
+

is_writable (bool) – Flag if mode is writable

+
+
+
+ +
+
+write_dataset(dset_name, data, dtype, chunks=None, attrs=None)#
+

Write dataset to disk. Dataset it created in .h5 file and data is +scaled if needed.

+
+
Parameters:
+
    +
  • dset_name (str) – Name of dataset to be added to h5 file.

  • +
  • data (ndarray) – Data to be added to h5 file.

  • +
  • dtype (str) – Intended dataset datatype after scaling.

  • +
  • chunks (tuple) – Chunk size for capacity factor means dataset.

  • +
  • attrs (dict) – Attributes to be set. May include ‘scale_factor’.

  • +
+
+
+
+ +
+
+classmethod write_means(h5_file, meta, dset_name, means, dtype, attrs=None, SAM_configs=None, chunks=None, unscale=True, mode='w-', str_decode=True, group=None)#
+

Write means array to disk

+
+
Parameters:
+
    +
  • h5_file (str) – Path to .h5 resource file

  • +
  • meta (pandas.Dataframe) – Locational meta data

  • +
  • dset_name (str) – Name of the target dataset (should identify the means).

  • +
  • means (ndarray) – output means array.

  • +
  • dtype (str) – Intended dataset datatype after scaling.

  • +
  • attrs (dict, optional) – Attributes to be set. May include ‘scale_factor’, by default None

  • +
  • SAM_configs (dict, optional) – Dictionary of SAM configuration JSONs used to compute cf means, +by default None

  • +
  • chunks (tuple, optional) – Chunk size for capacity factor means dataset, by default None

  • +
  • unscale (bool, optional) – Boolean flag to automatically unscale variables on extraction, +by default True

  • +
  • mode (str, optional) – Mode to instantiate h5py.File instance, by default ‘w-’

  • +
  • str_decode (bool, optional) – Boolean flag to decode the bytestring meta data into normal +strings. Setting this to False will speed up the meta data read, +by default True

  • +
  • group (str, optional) – Group within .h5 resource file to open, by default None

  • +
+
+
+
+ +
+
+classmethod write_profiles(h5_file, meta, time_index, dset_name, profiles, dtype, attrs=None, SAM_configs=None, chunks=(None, 100), unscale=True, mode='w-', str_decode=True, group=None)#
+

Write profiles to disk

+
+
Parameters:
+
    +
  • h5_file (str) – Path to .h5 resource file

  • +
  • meta (pandas.Dataframe) – Locational meta data

  • +
  • time_index (pandas.DatetimeIndex) – Temporal timesteps

  • +
  • dset_name (str) – Name of the target dataset (should identify the profiles).

  • +
  • profiles (ndarray) – output result timeseries profiles

  • +
  • dtype (str) – Intended dataset datatype after scaling.

  • +
  • attrs (dict, optional) – Attributes to be set. May include ‘scale_factor’, by default None

  • +
  • SAM_configs (dict, optional) – Dictionary of SAM configuration JSONs used to compute cf means, +by default None

  • +
  • chunks (tuple, optional) – Chunk size for capacity factor means dataset, +by default (None, 100)

  • +
  • unscale (bool, optional) – Boolean flag to automatically unscale variables on extraction, +by default True

  • +
  • mode (str, optional) – Mode to instantiate h5py.File instance, by default ‘w-’

  • +
  • str_decode (bool, optional) – Boolean flag to decode the bytestring meta data into normal +strings. Setting this to False will speed up the meta data read, +by default True

  • +
  • group (str, optional) – Group within .h5 resource file to open, by default None

  • +
+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.postprocessing.writers.base.html b/_autosummary/sup3r.postprocessing.writers.base.html new file mode 100644 index 000000000..38d699def --- /dev/null +++ b/_autosummary/sup3r.postprocessing.writers.base.html @@ -0,0 +1,746 @@ + + + + + + + + + + + sup3r.postprocessing.writers.base — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.postprocessing.writers.base

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.postprocessing.writers.base#

+

Output handling

+

TODO: OutputHandlers should be combined with Cacher objects.

+

Classes

+
+ + + + + + + + + + + +

OutputHandler()

Class to handle forward pass output.

OutputMixin()

Methods used by various Output and Collection classes

RexOutputs(h5_file[, mode, unscale, ...])

Base class to handle NREL h5 formatted output data

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.postprocessing.writers.h5.OutputHandlerH5.html b/_autosummary/sup3r.postprocessing.writers.h5.OutputHandlerH5.html new file mode 100644 index 000000000..e71cf2ec0 --- /dev/null +++ b/_autosummary/sup3r.postprocessing.writers.h5.OutputHandlerH5.html @@ -0,0 +1,1065 @@ + + + + + + + + + + + sup3r.postprocessing.writers.h5.OutputHandlerH5 — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.postprocessing.writers.h5.OutputHandlerH5#

+
+
+class OutputHandlerH5[source]#
+

Bases: OutputHandler

+

Class to handle writing output to H5 file

+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

enforce_limits(features, data)

Enforce physical limits for feature data

get_dset_attrs(feature)

Get attrributes for output feature

get_lat_lon(low_res_lat_lon, shape)

Get lat lon arrays for high res output file

get_renamed_features(features)

Rename features based on transformation from u/v to windspeed/winddirection

get_time_dim_name(filepath)

Get the name of the time dimension in the given file

get_times(low_res_times, shape)

Get array of times for high res output file

invert_uv_features(data, features, lat_lon)

Invert U/V to windspeed and winddirection.

invert_uv_single_pair(data, lat_lon, u_idx, ...)

Perform inverse transform in place on a single u/v pair.

is_increasing_lons(lat_lon)

Check if longitudes are in increasing order.

pad_lat_lon(lat_lon)

Pad lat lon grid with additional rows and columns to use for interpolation routine

write_data(out_file, dsets, time_index, ...)

Write list of datasets to out_file.

write_output(data, features, ...[, ...])

Write forward pass output to file

+
+
+
+static enforce_limits(features, data)#
+

Enforce physical limits for feature data

+
+
Parameters:
+
    +
  • features (list) – List of features with ordering corresponding to last channel of +data array.

  • +
  • data (ndarray) – Array of feature data

  • +
+
+
Returns:
+

data (ndarray) – Array of feature data with physical limits enforced

+
+
+
+ +
+
+static get_dset_attrs(feature)#
+

Get attrributes for output feature

+
+
Parameters:
+

feature (str) – Name of feature to write

+
+
Returns:
+

    +
  • attrs (dict) – Dictionary of attributes for requested dset

  • +
  • dtype (str) – Data type for requested dset. Defaults to float32

  • +
+

+
+
+
+ +
+
+classmethod get_lat_lon(low_res_lat_lon, shape)#
+

Get lat lon arrays for high res output file

+
+
Parameters:
+
    +
  • low_res_lat_lon (ndarray) – Array of lat/lon for input data. Longitudes must be arranged in +a counter-clockwise direction (when looking down from above the +north pole). e.g. [-50, -25, 25, 50] or [130, 180, -130, -80]. The +latter passes through the 180 -> -180 boundary and will be +temporarily shifted to the 0-360 range before interpolation. +(spatial_1, spatial_2, 2) +Last dimension has ordering (lat, lon)

  • +
  • shape (tuple) – (lons, lats) Shape of high res grid

  • +
+
+
Returns:
+

lat_lon (ndarray) – Array of lat lons for high res output file +(spatial_1, spatial_2, 2) +Last dimension has ordering (lat, lon)

+
+
+
+ +
+
+classmethod get_renamed_features(features)#
+

Rename features based on transformation from u/v to +windspeed/winddirection

+
+
Parameters:
+

features (list) – List of output features

+
+
Returns:
+

list – List of renamed features u/v -> windspeed/winddirection for each +height

+
+
+
+ +
+
+static get_time_dim_name(filepath)#
+

Get the name of the time dimension in the given file

+
+
Parameters:
+

filepath (str) – Path to the file

+
+
Returns:
+

time_key (str) – Name of the time dimension in the given file

+
+
+
+ +
+
+static get_times(low_res_times, shape)#
+

Get array of times for high res output file

+
+
Parameters:
+
    +
  • low_res_times (pd.Datetimeindex) – List of times for low res input data. If there is only a single low +res timestep, it is assumed the data is daily.

  • +
  • shape (int) – Number of time steps for high res time array

  • +
+
+
Returns:
+

ndarray – Array of times for high res output file.

+
+
+
+ +
+
+classmethod invert_uv_features(data, features, lat_lon, max_workers=None)#
+

Invert U/V to windspeed and winddirection. Performed in place.

+
+
Parameters:
+
    +
  • data (ndarray) – High res data from forward pass +(spatial_1, spatial_2, temporal, features)

  • +
  • features (list) – List of output features. If this doesn’t contain any names matching +u_*m, this method will do nothing.

  • +
  • lat_lon (ndarray) – High res lat/lon array +(spatial_1, spatial_2, 2)

  • +
  • max_workers (int | None) – Max workers to use for inverse transform. If None the maximum +possible will be used

  • +
+
+
+
+ +
+
+static invert_uv_single_pair(data, lat_lon, u_idx, v_idx)#
+

Perform inverse transform in place on a single u/v pair.

+
+
Parameters:
+
    +
  • data (ndarray) – High res data from forward pass +(spatial_1, spatial_2, temporal, features)

  • +
  • lat_lon (ndarray) – High res lat/lon array +(spatial_1, spatial_2, 2)

  • +
  • u_idx (int) – Index in data for U component to transform

  • +
  • v_idx (int) – Index in data for V component to transform

  • +
+
+
+
+ +
+
+static is_increasing_lons(lat_lon)#
+

Check if longitudes are in increasing order. Need to check this +for interpolation routine. This is primarily to identify whether the +lons go through the 180 -> -180 boundary, which creates a +discontinuity. For example, [130, 180, -130, -80]. If any lons go from +positive to negative the lons need to be shifted to the range 0-360.

+
+
Parameters:
+

lat_lon (ndarray) – Array of lat/lon for input data. +(spatial_1, spatial_2, 2) +Last dimension has ordering (lat, lon)

+
+
Returns:
+

bool – Whether all lons are in increasing order or not

+
+
+
+ +
+
+static pad_lat_lon(lat_lon)#
+

Pad lat lon grid with additional rows and columns to use for +interpolation routine

+
+
Parameters:
+
    +
  • lat_lon (ndarray) – Array of lat/lon for input data. +(spatial_1, spatial_2, 2) +Last dimension has ordering (lat, lon)

  • +
  • shape (tuple) – (lons, lats) Shape of high res grid

  • +
+
+
Returns:
+

lat_lon (ndarray) – Array of padded lat lons +(spatial_1, spatial_2, 2) +Last dimension has ordering (lat, lon)

+
+
+
+ +
+
+classmethod write_data(out_file, dsets, time_index, data_list, meta, global_attrs=None)#
+

Write list of datasets to out_file.

+
+
Parameters:
+
    +
  • out_file (str) – Pre-existing H5 file output path

  • +
  • dsets (list) – list of datasets to write to out_file

  • +
  • time_index (pd.DatetimeIndex()) – Pandas datetime index to use for file time_index.

  • +
  • data_list (list) – List of np.ndarray objects to write to out_file

  • +
  • meta (pd.DataFrame) – Full meta dataframe for the final output data.

  • +
  • global_attrs (dict) – Namespace of file-global attributes for the final output data.

  • +
+
+
+
+ +
+
+classmethod write_output(data, features, low_res_lat_lon, low_res_times, out_file, meta_data=None, invert_uv=None, max_workers=None, gids=None)#
+

Write forward pass output to file

+
+
Parameters:
+
    +
  • data (ndarray) – (spatial_1, spatial_2, temporal, features) +High resolution forward pass output

  • +
  • features (list) – List of feature names corresponding to the last dimension of data

  • +
  • low_res_lat_lon (ndarray) – Array of lat/lon for input data. (spatial_1, spatial_2, 2) +Last dimension has ordering (lat, lon)

  • +
  • low_res_times (pd.Datetimeindex) – List of times for low res source data

  • +
  • out_file (string) – Output file path

  • +
  • meta_data (dict | None) – Dictionary of meta data from model

  • +
  • invert_uv (bool | None) – Whether to convert u and v wind components to windspeed and +direction

  • +
  • max_workers (int | None) – Max workers to use for inverse uv transform. If None the +max_workers will be estimated based on memory limits.

  • +
  • gids (list) – List of coordinate indices used to label each lat lon pair and to +help with spatial chunk data collection

  • +
+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.postprocessing.writers.h5.html b/_autosummary/sup3r.postprocessing.writers.h5.html new file mode 100644 index 000000000..69422332e --- /dev/null +++ b/_autosummary/sup3r.postprocessing.writers.h5.html @@ -0,0 +1,740 @@ + + + + + + + + + + + sup3r.postprocessing.writers.h5 — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.postprocessing.writers.h5

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.postprocessing.writers.h5#

+

Output handling

+

TODO: Remove redundant code re. Cachers

+

Classes

+
+ + + + + +

OutputHandlerH5()

Class to handle writing output to H5 file

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.postprocessing.writers.html b/_autosummary/sup3r.postprocessing.writers.html new file mode 100644 index 000000000..d4df5cc96 --- /dev/null +++ b/_autosummary/sup3r.postprocessing.writers.html @@ -0,0 +1,744 @@ + + + + + + + + + + + sup3r.postprocessing.writers — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.postprocessing.writers

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.postprocessing.writers#

+

Module with objects which write forward pass output to files.

+
+ + + + + + + + + + + +

base

Output handling

h5

Output handling

nc

Output handling

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.postprocessing.writers.nc.OutputHandlerNC.html b/_autosummary/sup3r.postprocessing.writers.nc.OutputHandlerNC.html new file mode 100644 index 000000000..f507dad7c --- /dev/null +++ b/_autosummary/sup3r.postprocessing.writers.nc.OutputHandlerNC.html @@ -0,0 +1,1065 @@ + + + + + + + + + + + sup3r.postprocessing.writers.nc.OutputHandlerNC — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.postprocessing.writers.nc.OutputHandlerNC#

+
+
+class OutputHandlerNC[source]#
+

Bases: OutputHandler

+

Forward pass OutputHandler for NETCDF files

+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

enforce_limits(features, data)

Enforce physical limits for feature data

get_dset_attrs(feature)

Get attrributes for output feature

get_lat_lon(low_res_lat_lon, shape)

Get lat lon arrays for high res output file

get_renamed_features(features)

Rename features based on transformation from u/v to windspeed/winddirection

get_time_dim_name(filepath)

Get the name of the time dimension in the given file

get_times(low_res_times, shape)

Get array of times for high res output file

invert_uv_features(data, features, lat_lon)

Invert U/V to windspeed and winddirection.

invert_uv_single_pair(data, lat_lon, u_idx, ...)

Perform inverse transform in place on a single u/v pair.

is_increasing_lons(lat_lon)

Check if longitudes are in increasing order.

pad_lat_lon(lat_lon)

Pad lat lon grid with additional rows and columns to use for interpolation routine

write_data(out_file, dsets, time_index, ...)

Write list of datasets to out_file.

write_output(data, features, ...[, ...])

Write forward pass output to file

+
+
+
+static enforce_limits(features, data)#
+

Enforce physical limits for feature data

+
+
Parameters:
+
    +
  • features (list) – List of features with ordering corresponding to last channel of +data array.

  • +
  • data (ndarray) – Array of feature data

  • +
+
+
Returns:
+

data (ndarray) – Array of feature data with physical limits enforced

+
+
+
+ +
+
+static get_dset_attrs(feature)#
+

Get attrributes for output feature

+
+
Parameters:
+

feature (str) – Name of feature to write

+
+
Returns:
+

    +
  • attrs (dict) – Dictionary of attributes for requested dset

  • +
  • dtype (str) – Data type for requested dset. Defaults to float32

  • +
+

+
+
+
+ +
+
+classmethod get_lat_lon(low_res_lat_lon, shape)#
+

Get lat lon arrays for high res output file

+
+
Parameters:
+
    +
  • low_res_lat_lon (ndarray) – Array of lat/lon for input data. Longitudes must be arranged in +a counter-clockwise direction (when looking down from above the +north pole). e.g. [-50, -25, 25, 50] or [130, 180, -130, -80]. The +latter passes through the 180 -> -180 boundary and will be +temporarily shifted to the 0-360 range before interpolation. +(spatial_1, spatial_2, 2) +Last dimension has ordering (lat, lon)

  • +
  • shape (tuple) – (lons, lats) Shape of high res grid

  • +
+
+
Returns:
+

lat_lon (ndarray) – Array of lat lons for high res output file +(spatial_1, spatial_2, 2) +Last dimension has ordering (lat, lon)

+
+
+
+ +
+
+classmethod get_renamed_features(features)#
+

Rename features based on transformation from u/v to +windspeed/winddirection

+
+
Parameters:
+

features (list) – List of output features

+
+
Returns:
+

list – List of renamed features u/v -> windspeed/winddirection for each +height

+
+
+
+ +
+
+static get_time_dim_name(filepath)#
+

Get the name of the time dimension in the given file

+
+
Parameters:
+

filepath (str) – Path to the file

+
+
Returns:
+

time_key (str) – Name of the time dimension in the given file

+
+
+
+ +
+
+static get_times(low_res_times, shape)#
+

Get array of times for high res output file

+
+
Parameters:
+
    +
  • low_res_times (pd.Datetimeindex) – List of times for low res input data. If there is only a single low +res timestep, it is assumed the data is daily.

  • +
  • shape (int) – Number of time steps for high res time array

  • +
+
+
Returns:
+

ndarray – Array of times for high res output file.

+
+
+
+ +
+
+classmethod invert_uv_features(data, features, lat_lon, max_workers=None)#
+

Invert U/V to windspeed and winddirection. Performed in place.

+
+
Parameters:
+
    +
  • data (ndarray) – High res data from forward pass +(spatial_1, spatial_2, temporal, features)

  • +
  • features (list) – List of output features. If this doesn’t contain any names matching +u_*m, this method will do nothing.

  • +
  • lat_lon (ndarray) – High res lat/lon array +(spatial_1, spatial_2, 2)

  • +
  • max_workers (int | None) – Max workers to use for inverse transform. If None the maximum +possible will be used

  • +
+
+
+
+ +
+
+static invert_uv_single_pair(data, lat_lon, u_idx, v_idx)#
+

Perform inverse transform in place on a single u/v pair.

+
+
Parameters:
+
    +
  • data (ndarray) – High res data from forward pass +(spatial_1, spatial_2, temporal, features)

  • +
  • lat_lon (ndarray) – High res lat/lon array +(spatial_1, spatial_2, 2)

  • +
  • u_idx (int) – Index in data for U component to transform

  • +
  • v_idx (int) – Index in data for V component to transform

  • +
+
+
+
+ +
+
+static is_increasing_lons(lat_lon)#
+

Check if longitudes are in increasing order. Need to check this +for interpolation routine. This is primarily to identify whether the +lons go through the 180 -> -180 boundary, which creates a +discontinuity. For example, [130, 180, -130, -80]. If any lons go from +positive to negative the lons need to be shifted to the range 0-360.

+
+
Parameters:
+

lat_lon (ndarray) – Array of lat/lon for input data. +(spatial_1, spatial_2, 2) +Last dimension has ordering (lat, lon)

+
+
Returns:
+

bool – Whether all lons are in increasing order or not

+
+
+
+ +
+
+static pad_lat_lon(lat_lon)#
+

Pad lat lon grid with additional rows and columns to use for +interpolation routine

+
+
Parameters:
+
    +
  • lat_lon (ndarray) – Array of lat/lon for input data. +(spatial_1, spatial_2, 2) +Last dimension has ordering (lat, lon)

  • +
  • shape (tuple) – (lons, lats) Shape of high res grid

  • +
+
+
Returns:
+

lat_lon (ndarray) – Array of padded lat lons +(spatial_1, spatial_2, 2) +Last dimension has ordering (lat, lon)

+
+
+
+ +
+
+classmethod write_data(out_file, dsets, time_index, data_list, meta, global_attrs=None)#
+

Write list of datasets to out_file.

+
+
Parameters:
+
    +
  • out_file (str) – Pre-existing H5 file output path

  • +
  • dsets (list) – list of datasets to write to out_file

  • +
  • time_index (pd.DatetimeIndex()) – Pandas datetime index to use for file time_index.

  • +
  • data_list (list) – List of np.ndarray objects to write to out_file

  • +
  • meta (pd.DataFrame) – Full meta dataframe for the final output data.

  • +
  • global_attrs (dict) – Namespace of file-global attributes for the final output data.

  • +
+
+
+
+ +
+
+classmethod write_output(data, features, low_res_lat_lon, low_res_times, out_file, meta_data=None, invert_uv=None, max_workers=None, gids=None)#
+

Write forward pass output to file

+
+
Parameters:
+
    +
  • data (ndarray) – (spatial_1, spatial_2, temporal, features) +High resolution forward pass output

  • +
  • features (list) – List of feature names corresponding to the last dimension of data

  • +
  • low_res_lat_lon (ndarray) – Array of lat/lon for input data. (spatial_1, spatial_2, 2) +Last dimension has ordering (lat, lon)

  • +
  • low_res_times (pd.Datetimeindex) – List of times for low res source data

  • +
  • out_file (string) – Output file path

  • +
  • meta_data (dict | None) – Dictionary of meta data from model

  • +
  • invert_uv (bool | None) – Whether to convert u and v wind components to windspeed and +direction

  • +
  • max_workers (int | None) – Max workers to use for inverse uv transform. If None the +max_workers will be estimated based on memory limits.

  • +
  • gids (list) – List of coordinate indices used to label each lat lon pair and to +help with spatial chunk data collection

  • +
+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.postprocessing.writers.nc.html b/_autosummary/sup3r.postprocessing.writers.nc.html new file mode 100644 index 000000000..f64dd9a84 --- /dev/null +++ b/_autosummary/sup3r.postprocessing.writers.nc.html @@ -0,0 +1,739 @@ + + + + + + + + + + + sup3r.postprocessing.writers.nc — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.postprocessing.writers.nc

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.postprocessing.writers.nc#

+

Output handling

+

Classes

+
+ + + + + +

OutputHandlerNC()

Forward pass OutputHandler for NETCDF files

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.accessor.Sup3rX.html b/_autosummary/sup3r.preprocessing.accessor.Sup3rX.html new file mode 100644 index 000000000..55f2c231c --- /dev/null +++ b/_autosummary/sup3r.preprocessing.accessor.Sup3rX.html @@ -0,0 +1,1246 @@ + + + + + + + + + + + sup3r.preprocessing.accessor.Sup3rX — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.accessor.Sup3rX#

+
+
+class Sup3rX(ds: Dataset | Self)[source]#
+

Bases: object

+

Accessor for xarray - the suggested way to extend xarray functionality.

+

References

+

https://docs.xarray.dev/en/latest/internals/extending-xarray.html

+
+

Note

+

(1) This is an xr.Dataset style object with all xr.Dataset +methods, plus more. The way to access these methods is either through +appending .sx.<method> on an xr.Dataset or by wrapping an +xr.Dataset with Sup3rX, e.g. Sup3rX(xr.Dataset(...)).<method>. +Throughout the sup3r codebase we prefer to use the latter. The +most important part of this interface is parsing __getitem__ calls of +the form ds.sx[keys].

+
+
    +
  1. keys can be a single feature name, list of features, or numpy +style indexing for dimensions. e.g. ds.sx['u'][slice(0, 10), +...] or ds.sx[['u', 'v']][..., slice(0, 10)].

  2. +
  3. If ds[keys] returns an xr.Dataset object then +ds.sx[keys] will return a Sup3rX object. e.g.

  4. +
+
+

ds.sx[['u','v']]) will return a Sup3rX instance but +ds.sx['u'] will return an xr.DataArray

+
+
    +
  1. Providing only numpy style indexing without features will return

  2. +
+
+

an array with all contained features in the last dimension with a +spatiotemporal shape corresponding to the indices. e.g. +ds[slice(0, 10), 0, 1] will return an array of shape (10, 1, +1, n_features). This array will be a dask.array or numpy.array, +depending on whether data is still on disk or loaded into memory.

+
+
+

(2) The __getitem__ and __getattr__ methods will cast back to +type(self) if self._ds.__getitem__ or self._ds.__getattr__ +returns an instance of type(self._ds) (e.g. an xr.Dataset). This +means we do not have to constantly append .sx for successive calls to +accessor methods.

+
+

Examples

+
>>> # To use as an accessor:
+>>> ds = xr.Dataset(...)
+>>> feature_data = ds.sx[features]
+>>> ti = ds.sx.time_index
+>>> lat_lon_array = ds.sx.lat_lon
+
+
+
>>> # Use as wrapper:
+>>> ds = Sup3rX(xr.Dataset(data_vars={'windspeed': ...}, ...))
+>>> np_array = ds['windspeed'].values
+>>> dask_array = ds['windspeed'][...] == ds['windspeed'].as_array()
+
+
+

Initialize accessor.

+
+
Parameters:
+

ds (xr.Dataset | xr.DataArray) – xarray Dataset instance to access with the following methods

+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

add_dims_to_data_vars(vals)

Add dimensions to vals entries if needed.

as_array()

Return .data attribute of an xarray.DataArray with our standard dimension order (lats, lons, time, ..., features)

assign(vals)

Override xarray assign and assign_coords methods to enable update without explicitly providing dimensions if variable already exists.

coarsen(*args, **kwargs)

Override xr.Dataset.coarsen to cast back to Sup3rX object.

compute(**kwargs)

Load ._ds into memory.

flatten()

Flatten rasterized dataset so that there is only a single spatial dimension.

interpolate_na(**kwargs)

Use xr.DataArray.interpolate_na to fill NaN values with a dask compatible method.

isel(*args, **kwargs)

Override xr.Dataset.isel to cast back to Sup3rX object.

mean(**kwargs)

Get mean directly from dataset object.

normalize(means, stds)

Normalize dataset using given means and stds.

ordered(data)

Return data with dimensions in standard order (lats, lons, time, ..., features)

qa()

Check NaNs and stats for all features.

sample(idx)

Get sample from self._ds.

std(**kwargs)

Get std directly from dataset object.

to_dataarray()

Return xr.DataArray for the contained xr.Dataset.

unflatten(grid_shape)

Convert flattened dataset into rasterized dataset with the given grid shape.

update_ds(new_dset[, attrs])

Update self._ds with coords and data_vars replaced with those provided.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

dtype

Get dtype of underlying array.

features

Features in this container.

flattened

Check if the contained data is flattened 2D data or 3D rasterized data.

grid_shape

Return the shape of the spatial dimensions.

lat_lon

Base lat lon for contained data.

loaded

Check if data has been loaded as numpy arrays.

meta

Return dataframe of flattened lat / lon values.

name

Name of dataset.

shape

Get shape of underlying xr.DataArray, using our standard dimension order.

size

Get size of data contained to use in weight calculations.

target

Return the value of the lower left hand coordinate.

time_independent

Check if the contained data is time independent.

time_index

Base time index for contained data.

time_step

Get time step in seconds.

values

Return numpy values in standard dimension order (lats, lons, time, ..., features)

+
+
+
+property values#
+

Return numpy values in standard dimension order (lats, lons, time, +..., features)

+
+ +
+
+to_dataarray() ndarray | Array[source]#
+

Return xr.DataArray for the contained xr.Dataset.

+
+ +
+
+as_array()[source]#
+

Return .data attribute of an xarray.DataArray with our standard +dimension order (lats, lons, time, ..., features)

+
+ +
+
+compute(**kwargs)[source]#
+

Load ._ds into memory. This updates the internal xr.Dataset if +it has not been loaded already.

+
+ +
+
+property loaded#
+

Check if data has been loaded as numpy arrays.

+
+ +
+
+property flattened#
+

Check if the contained data is flattened 2D data or 3D rasterized +data.

+
+ +
+
+property time_independent#
+

Check if the contained data is time independent.

+
+ +
+
+update_ds(new_dset, attrs=None)[source]#
+

Update self._ds with coords and data_vars replaced with those +provided. These are both provided as dictionaries {name: dask.array}.

+
+
Parameters:
+

new_dset (Dict[str, dask.array]) – Can contain any existing or new variable / coordinate as long as +they all have a consistent shape.

+
+
Returns:
+

_ds (xr.Dataset) – Updated dataset with provided coordinates and data_vars with +variables in our standard dimension order.

+
+
+
+ +
+
+ordered(data)[source]#
+

Return data with dimensions in standard order (lats, lons, time, +..., features)

+
+ +
+
+sample(idx)[source]#
+

Get sample from self._ds. The idx should be a tuple of slices +for the dimensions (south_north, west_east, time) and a list of +feature names. e.g. +(slice(0, 3), slice(1, 10), slice(None), ['u_10m', 'v_10m'])

+
+ +
+
+property name#
+

Name of dataset. Used to label datasets when grouped in +Data objects. e.g. for low / high res pairs or daily / hourly +data.

+
+ +
+
+isel(*args, **kwargs)[source]#
+

Override xr.Dataset.isel to cast back to Sup3rX object.

+
+ +
+
+coarsen(*args, **kwargs)[source]#
+

Override xr.Dataset.coarsen to cast back to Sup3rX object.

+
+ +
+
+mean(**kwargs)[source]#
+

Get mean directly from dataset object.

+
+ +
+
+std(**kwargs)[source]#
+

Get std directly from dataset object.

+
+ +
+
+normalize(means, stds)[source]#
+

Normalize dataset using given means and stds. These are provided as +dictionaries.

+
+ +
+
+interpolate_na(**kwargs)[source]#
+

Use xr.DataArray.interpolate_na to fill NaN values with a dask +compatible method.

+
+ +
+
+add_dims_to_data_vars(vals)[source]#
+

Add dimensions to vals entries if needed. This is used to set values +of self._ds which can require dimensions to be explicitly specified +for the data being set. e.g. self._ds[‘u_100m’] = ((‘south_north’, +‘west_east’, ‘time’), data). We make guesses on the correct dims if +they are missing and give a warning. We add attributes if available in +vals, as well

+
+
Parameters:
+

vals (Dict[Str, Union]) – Dictionary of feature names and arrays to use for setting feature +data. When arrays are >2 dimensions xarray needs explicit dimension +info, so we need to add these if not provided.

+
+
+
+ +
+
+assign(vals: Dict[str, ndarray | Array | tuple])[source]#
+

Override xarray assign and assign_coords methods to enable update +without explicitly providing dimensions if variable already exists.

+
+
Parameters:
+

vals (dict) – Dictionary of variable names and either arrays or tuples of (dims, +array). If dims are not provided this will try to use stored dims +of the variable, if it exists already.

+
+
+
+ +
+
+property features#
+

Features in this container.

+
+ +
+
+property dtype#
+

Get dtype of underlying array.

+
+ +
+
+property shape#
+

Get shape of underlying xr.DataArray, using our standard dimension +order.

+
+ +
+
+property size#
+

Get size of data contained to use in weight calculations.

+
+ +
+
+property time_index#
+

Base time index for contained data.

+
+ +
+
+property time_step#
+

Get time step in seconds.

+
+ +
+
+property lat_lon: ndarray | Array#
+

Base lat lon for contained data.

+
+ +
+
+property target#
+

Return the value of the lower left hand coordinate.

+
+ +
+
+property grid_shape#
+

Return the shape of the spatial dimensions.

+
+ +
+
+property meta#
+

Return dataframe of flattened lat / lon values. Can also be set to +include additional data like elevation, country, state, etc

+
+ +
+
+unflatten(grid_shape)[source]#
+

Convert flattened dataset into rasterized dataset with the given +grid shape.

+
+ +
+
+flatten()[source]#
+

Flatten rasterized dataset so that there is only a single spatial +dimension.

+
+ +
+
+qa()[source]#
+

Check NaNs and stats for all features.

+
+ +
+
+__mul__(other)[source]#
+

Multiply Sup3rX object by other. Used to compute weighted means +and stdevs.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.accessor.html b/_autosummary/sup3r.preprocessing.accessor.html new file mode 100644 index 000000000..01faca1bc --- /dev/null +++ b/_autosummary/sup3r.preprocessing.accessor.html @@ -0,0 +1,740 @@ + + + + + + + + + + + sup3r.preprocessing.accessor — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.accessor

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.accessor#

+

Accessor for xarray. This defines the basic data object contained by all +Container objects.

+

Classes

+
+ + + + + +

Sup3rX(ds)

Accessor for xarray - the suggested way to extend xarray functionality.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.base.Container.html b/_autosummary/sup3r.preprocessing.base.Container.html new file mode 100644 index 000000000..7e4b638f4 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.base.Container.html @@ -0,0 +1,858 @@ + + + + + + + + + + + sup3r.preprocessing.base.Container — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.base.Container

+ +
+ +
+
+ + + + +
+ +
+

sup3r.preprocessing.base.Container#

+
+
+class Container(data: Sup3rX | Sup3rDataset | Tuple[Sup3rX, ...] | Tuple[Sup3rDataset, ...] | None = None)[source]#
+

Bases: object

+

Basic fundamental object used to build preprocessing objects. Contains +an xarray-like Dataset (Sup3rX), wrapped tuple of +Sup3rX objects (Sup3rDataset), or a tuple of such objects.

+
+
Parameters:
+

data (Union[Sup3rX, Sup3rDataset, Tuple[Sup3rX, …],) – Tuple[Sup3rDataset, …] +Can be an xr.Dataset, a Sup3rX object, a +Sup3rDataset object, or a tuple of such objects.

+
+

Note

+

.data will return a Sup3rDataset object or tuple of +such. This is a tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple +or 1-tuple (e.g. len(data) == 2 or len(data) == 1). This is +a 2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+
+
+

Methods

+
+ + + + + + + + +

post_init_log([args_dict])

Log additional arguments after initialization.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + +

data

Return underlying data.

shape

Get shape of underlying data.

+
+
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+wrap(data)[source]#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+
+post_init_log(args_dict=None)[source]#
+

Log additional arguments after initialization.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.base.Sup3rDataset.html b/_autosummary/sup3r.preprocessing.base.Sup3rDataset.html new file mode 100644 index 000000000..b8f46a9b3 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.base.Sup3rDataset.html @@ -0,0 +1,975 @@ + + + + + + + + + + + sup3r.preprocessing.base.Sup3rDataset — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.base.Sup3rDataset#

+
+
+class Sup3rDataset(**dsets: Mapping[str, Dataset | Sup3rX])[source]#
+

Bases: object

+

Interface for interacting with one or two xr.Dataset instances. +This is a wrapper around one or two Sup3rX objects so they work well +with Dual objects like DualSampler, DualRasterizer, +DualBatchHandler, etc…)

+

Examples

+
>>> # access high_res or low_res:
+>>> hr = xr.Dataset(...)
+>>> lr = xr.Dataset(...)
+>>> ds = Sup3rDataset(low_res=lr, high_res=hr)
+>>> ds.high_res; ds.low_res  # returns Sup3rX objects
+>>> ds[feature]  # returns a tuple of dataarray (low_res, high_res)
+
+
+
>>> # access hourly or daily:
+>>> daily = xr.Dataset(...)
+>>> hourly = xr.Dataset(...)
+>>> ds = Sup3rDataset(daily=daily, hourly=hourly)
+>>> ds.hourly; ds.daily  # returns Sup3rX objects
+>>> ds[feature]  # returns a tuple of dataarray (daily, hourly)
+
+
+
>>> # single resolution data access:
+>>> xds = xr.Dataset(...)
+>>> ds = Sup3rDataset(hourly=xds)
+>>> ds.hourly  # returns Sup3rX object
+>>> ds[feature]  # returns a single dataarray
+
+
+
+

Note

+

(1) This may seem similar to +Collection, which also can +contain multiple data members, but members of +Collection objects are +completely independent while here there are at most two members which are +related as low / high res versions of the same underlying data.

+

(2) Here we make an important choice to use high_res members to compute +means / stds. It would be reasonable to instead use the average of high_res +and low_res means / stds for aggregate stats but we want to preserve the +relationship between coarsened variables after normalization (e.g. +temperature_2m, temperature_max_2m, temperature_min_2m). This means all +these variables should have the same means and stds, which ultimately come +from the high_res non coarsened variable.

+
+
+
Parameters:
+

dsets (Mapping[str, xr.Dataset | Sup3rX | Sup3rDataset]) – Sup3rDataset is initialized from a flexible kwargs input. The +keys will be used as names in a named tuple and the values will be +the dataset members. These names will also be used to define +attributes which point to these dataset members. You can provide +name=data or name1=data1, name2=data2 and then access these +datasets as .name1 or .name2. If dsets values are +xr.Dataset objects these will be cast to Sup3rX objects first. +We also check if dsets values are Sup3rDataset objects and if +they only include one data member we use those to reinitialize a +Sup3rDataset

+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + +

compute(**kwargs)

Load data into memory for each data member.

isel(*args, **kwargs)

Return new Sup3rDataset with isel applied to each member.

mean(**kwargs)

Use the high_res members to compute the means.

normalize(means, stds)

Normalize dataset using the given mean and stds.

rewrap(data)

Rewrap data as Sup3rDataset after calling parent method.

sample(idx)

Get samples from self._ds members.

std(**kwargs)

Use the high_res members to compute the stds.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + +

dtype

Get datatype of first member.

features

The features are determined by the set of features from all data members.

loaded

Check if all data members have been loaded into memory.

shape

We use the shape of the largest data member.

size

Return number of elements in the largest data member.

+
+
+
+property dtype#
+

Get datatype of first member. Assumed to be constant for all +members.

+
+ +
+
+rewrap(data)[source]#
+

Rewrap data as Sup3rDataset after calling parent method.

+
+ +
+
+sample(idx)[source]#
+

Get samples from self._ds members. idx should be either a tuple +of slices for the dimensions (south_north, west_east, time) and a list +of feature names or a 2-tuple of the same, for dual datasets.

+
+ +
+
+isel(*args, **kwargs)[source]#
+

Return new Sup3rDataset with isel applied to each member.

+
+ +
+
+property shape#
+

We use the shape of the largest data member. These are assumed to be +ordered as (low-res, high-res) if there are two members.

+
+ +
+
+property features#
+

The features are determined by the set of features from all data +members.

+
+ +
+
+property size#
+

Return number of elements in the largest data member.

+
+ +
+
+mean(**kwargs)[source]#
+

Use the high_res members to compute the means. These are used for +normalization during training.

+
+ +
+
+std(**kwargs)[source]#
+

Use the high_res members to compute the stds. These are used for +normalization during training.

+
+ +
+
+normalize(means, stds)[source]#
+

Normalize dataset using the given mean and stds. These are provided +as dictionaries.

+
+ +
+
+compute(**kwargs)[source]#
+

Load data into memory for each data member.

+
+ +
+
+property loaded#
+

Check if all data members have been loaded into memory.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.base.Sup3rMeta.html b/_autosummary/sup3r.preprocessing.base.Sup3rMeta.html new file mode 100644 index 000000000..5d4540273 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.base.Sup3rMeta.html @@ -0,0 +1,808 @@ + + + + + + + + + + + sup3r.preprocessing.base.Sup3rMeta — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.base.Sup3rMeta

+ +
+ +
+
+ + + + +
+ +
+

sup3r.preprocessing.base.Sup3rMeta#

+
+
+class Sup3rMeta(name, bases, namespace, **kwargs)[source]#
+

Bases: ABCMeta, type

+

Meta class to define __name__, __signature__, and +__subclasscheck__ of composite and derived classes. This allows us to +still resolve a signature for classes which pass through parent args / +kwargs as *args / **kwargs or those built through factory +composition, for example.

+

Define __name__ and __signature__

+

Methods

+
+ + + + + + + + +

mro()

Return a type's method resolution order.

register(subclass)

Register a virtual subclass of an ABC.

+
+
+
+__call__(*args, **kwargs)#
+

Call self as a function.

+
+ +
+
+mro()#
+

Return a type’s method resolution order.

+
+ +
+
+register(subclass)#
+

Register a virtual subclass of an ABC.

+

Returns the subclass, to allow usage as a class decorator.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.base.html b/_autosummary/sup3r.preprocessing.base.html new file mode 100644 index 000000000..ac1c31ffc --- /dev/null +++ b/_autosummary/sup3r.preprocessing.base.html @@ -0,0 +1,751 @@ + + + + + + + + + + + sup3r.preprocessing.base — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.base

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.base#

+

Base classes - fundamental dataset objects and the base Container +object, which just contains dataset objects. All objects that interact with +data are containers. e.g. loaders, rasterizers, data handlers, samplers, batch +queues, batch handlers.

+

TODO: xarray-contrib/datatree might be a better approach +for Sup3rDataset concept. Consider migrating once datatree has been fully +integrated into xarray (in progress as of 8/8/2024)

+

Classes

+
+ + + + + + + + + + + +

Container([data])

Basic fundamental object used to build preprocessing objects.

Sup3rDataset(**dsets)

Interface for interacting with one or two xr.Dataset instances.

Sup3rMeta(name, bases, namespace, **kwargs)

Meta class to define __name__, __signature__, and __subclasscheck__ of composite and derived classes.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_handlers.dc.BatchHandlerDC.html b/_autosummary/sup3r.preprocessing.batch_handlers.dc.BatchHandlerDC.html new file mode 100644 index 000000000..da62adb81 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_handlers.dc.BatchHandlerDC.html @@ -0,0 +1,1328 @@ + + + + + + + + + + + sup3r.preprocessing.batch_handlers.dc.BatchHandlerDC — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.batch_handlers.dc.BatchHandlerDC#

+
+
+class BatchHandlerDC(train_containers, *, val_containers=None, sample_shape=None, batch_size=16, n_batches=64, s_enhance=1, t_enhance=1, means=None, stds=None, queue_cap=None, transform_kwargs=None, mode='lazy', feature_sets=None, spatial_weights=None, temporal_weights=None, n_space_bins=1, n_time_bins=1)[source]#
+

Bases: BatchHandler

+

Data-Centric BatchHandler. This is used to adaptively select data +from lower performing spatiotemporal extents during training. To do this, +validation data is required, as it is used to compute losses within fixed +spatiotemporal bins which are then used as sampling probabilities for those +same regions when building batches.

+ +
+
Parameters:
+
    +
  • train_containers (List[Container]) – List of objects with a .data attribute, which will be used +to initialize Sampler objects and then used to initialize a +batch queue of training data. The data can be a Sup3rX or +Sup3rDataset object.

  • +
  • val_containers (List[Container]) – List of objects with a .data attribute, which will be used +to initialize Sampler objects and then used to initialize a +batch queue of validation data. The data can be a Sup3rX or a +Sup3rDataset object.

  • +
  • batch_size (int) – Number of observations / samples in a batch

  • +
  • n_batches (int) – Number of batches in an epoch, this sets the iteration limit for +this object.

  • +
  • s_enhance (int) – Integer factor by which the spatial axes is to be enhanced.

  • +
  • t_enhance (int) – Integer factor by which the temporal axes is to be enhanced.

  • +
  • means (str | dict | None) – Usually a file path for loading / saving results, or None for +just calculating stats and not saving. Can also be a dict.

  • +
  • stds (str | dict | None) – Usually a file path for loading / saving results, or None for +just calculating stats and not saving. Can also be a dict.

  • +
  • queue_cap (int) – Maximum number of batches the batch queue can store.

  • +
  • transform_kwargs (Union[Dict, None]) – Dictionary of kwargs to be passed to self.transform. This method +performs smoothing / coarsening.

  • +
  • mode (str) – Loading mode. Default is ‘lazy’, which only loads data into memory +as batches are queued. ‘eager’ will load all data into memory right +away.

  • +
  • feature_sets (Optional[dict]) – Optional dictionary describing how the full set of features is +split between lr_only_features and hr_exo_features. See +Sampler

  • +
  • sample_shape (tuple) – Size of arrays to sample from the contained data.

  • +
  • spatial_weights (Union[np.ndarray, da.core.Array] | List | None) – Set of weights used to initialize the spatial sampling. e.g. If we +want to start off sampling across 2 spatial bins evenly this should +be [0.5, 0.5]. During training these weights will be updated based +only performance across the bins associated with these weights.

  • +
  • temporal_weights (Union[np.ndarray, da.core.Array] | List | None) – Set of weights used to initialize the temporal sampling. e.g. If we +want to start off sampling only the first season of the year this +should be [1, 0, 0, 0]. During training these weights will be +updated based only performance across the bins associated with +these weights.

  • +
  • n_space_bins (int) – Number of spatial bins to use for weighted sampling. e.g. if this +is 4 the spatial domain will be divided into 4 equal regions and +losses will be calculated across these regions during traning in +order to adaptively sample from lower performing regions.

  • +
  • n_time_bins (int) – Number of time bins to use for weighted sampling. e.g. if this +is 4 the temporal domain will be divided into 4 equal periods and +losses will be calculated across these periods during traning in +order to adaptively sample from lower performing time periods.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_enhancement_factors()

Make sure the enhancement factors evenly divide the sample_shape.

check_features()

Make sure all samplers have the same sets of features.

check_shared_attr(attr)

Check if all containers have the same value for attr.

enqueue_batch()

Build batch and send to queue.

enqueue_batches()

Callback function for queue thread.

get_batch()

Get batch from queue or directly from a Sampler through sample_batch.

get_container_index()

Get random container index based on weights

get_queue()

Return FIFO queue for storing batches.

get_random_container()

Get random container based on container weights

init_samplers(train_containers, ...)

Initialize samplers from given data containers.

log_queue_info()

Log info about queue size.

post_init_log([args_dict])

Log additional arguments after initialization.

post_proc(samples)

Performs some post proc on dequeued samples before sending out for training.

preflight()

Run checks before kicking off the queue.

sample_batch()

Update weights and get batch of samples from sampled container.

start()

Start the val data batch queue in addition to the train batch queue.

stop()

Stop the val data batch queue in addition to the train batch queue.

transform(samples[, smoothing, ...])

Coarsen high res data to get corresponding low res batch.

update_weights(spatial_weights, temporal_weights)

Set weights used to sample spatial and temporal bins.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

container_weights

Get weights used to sample from different containers based on relative sizes

data

Return underlying data.

features

Get all features contained in data.

hr_shape

Shape of high resolution sample in a low-res / high-res pair.

lr_shape

Shape of low resolution sample in a low-res / high-res pair.

queue_shape

Shape of objects stored in the queue.

queue_thread

Get new queue thread.

running

Boolean to check whether to keep enqueueing batches.

shape

Get shape of underlying data.

spatial_weights

Get weights used to sample spatial bins.

temporal_weights

Get weights used to sample temporal bins.

+
+
+
+class Batch(low_res, high_res)#
+

Bases: tuple

+

Create new instance of Batch(low_res, high_res)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+ +
+
+SAMPLER#
+

alias of SamplerDC

+
+ +
+
+TRAIN_QUEUE#
+

alias of BatchQueueDC

+
+ +
+
+VAL_QUEUE#
+

alias of ValBatchQueueDC

+
+ +
+
+check_enhancement_factors()#
+

Make sure the enhancement factors evenly divide the sample_shape.

+
+ +
+
+check_features()#
+

Make sure all samplers have the same sets of features.

+
+ +
+
+check_shared_attr(attr)#
+

Check if all containers have the same value for attr. If they do +the collection effectively inherits those attributes.

+
+ +
+
+property container_weights#
+

Get weights used to sample from different containers based on +relative sizes

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+enqueue_batch()#
+

Build batch and send to queue.

+
+ +
+
+enqueue_batches() None#
+

Callback function for queue thread. While training, the queue is +checked for empty spots and filled. In the training thread, batches are +removed from the queue.

+
+ +
+
+property features#
+

Get all features contained in data.

+
+ +
+
+get_batch() Batch#
+

Get batch from queue or directly from a Sampler through +sample_batch.

+
+ +
+
+get_container_index()#
+

Get random container index based on weights

+
+ +
+
+get_queue()#
+

Return FIFO queue for storing batches.

+
+ +
+
+get_random_container()#
+

Get random container based on container weights

+
+ +
+
+property hr_shape#
+

Shape of high resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+init_samplers(train_containers, val_containers, sample_shape, feature_sets, batch_size, sampler_kwargs)#
+

Initialize samplers from given data containers.

+
+ +
+
+log_queue_info()#
+

Log info about queue size.

+
+ +
+
+property lr_shape#
+

Shape of low resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+post_proc(samples) Batch#
+

Performs some post proc on dequeued samples before sending out for +training. Post processing can include coarsening on high-res data (if +Collection consists of Sampler objects and not +DualSampler objects), smoothing, etc

+
+
Returns:
+

Batch (namedtuple) – namedtuple with low_res and high_res attributes

+
+
+
+ +
+
+preflight()#
+

Run checks before kicking off the queue.

+
+ +
+
+property queue_shape#
+

Shape of objects stored in the queue.

+
+ +
+
+property queue_thread#
+

Get new queue thread.

+
+ +
+
+property running#
+

Boolean to check whether to keep enqueueing batches.

+
+ +
+
+sample_batch()#
+

Update weights and get batch of samples from sampled container.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+property spatial_weights#
+

Get weights used to sample spatial bins.

+
+ +
+
+start()#
+

Start the val data batch queue in addition to the train batch +queue.

+
+ +
+
+stop()#
+

Stop the val data batch queue in addition to the train batch +queue.

+
+ +
+
+property temporal_weights#
+

Get weights used to sample temporal bins.

+
+ +
+
+transform(samples, smoothing=None, smoothing_ignore=None, temporal_coarsening_method='subsample')#
+

Coarsen high res data to get corresponding low res batch.

+
+
Parameters:
+
    +
  • samples (Union[np.ndarray, da.core.Array]) – High resolution batch of samples. +4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • smoothing (float | None) – Standard deviation to use for gaussian filtering of the coarse +data. This can be tuned by matching the kinetic energy of a low +resolution simulation with the kinetic energy of a coarsened and +smoothed high resolution simulation. If None no smoothing is +performed.

  • +
  • smoothing_ignore (list | None) – List of features to ignore for the smoothing filter. None will +smooth all features if smoothing kwarg is not None

  • +
  • temporal_coarsening_method (str) – Method to use for temporal coarsening. Can be subsample, average, +min, max, or total

  • +
+
+
Returns:
+

    +
  • low_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • high_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
+

+
+
+
+ +
+
+update_weights(spatial_weights, temporal_weights)#
+

Set weights used to sample spatial and temporal bins. This is called +by Sup3rGanDC after an epoch to update weights based on model +performance across validation samples.

+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_handlers.dc.html b/_autosummary/sup3r.preprocessing.batch_handlers.dc.html new file mode 100644 index 000000000..561889e0c --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_handlers.dc.html @@ -0,0 +1,743 @@ + + + + + + + + + + + sup3r.preprocessing.batch_handlers.dc — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.batch_handlers.dc

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.batch_handlers.dc#

+

Data centric batch handlers. Sample contained data according to +spatiotemporal weights, which are derived from losses on validation data during +training and updated each epoch.

+

TODO: Easy to implement dual dc batch handler - Just need to use DualBatchQueue +and override SamplerDC get_sample_index method.

+

Classes

+
+ + + + + +

BatchHandlerDC(train_containers, *[, ...])

Data-Centric BatchHandler.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandler.html b/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandler.html new file mode 100644 index 000000000..34c52a266 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandler.html @@ -0,0 +1,1311 @@ + + + + + + + + + + + sup3r.preprocessing.batch_handlers.factory.BatchHandler — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.batch_handlers.factory.BatchHandler#

+
+
+class BatchHandler(train_containers, *, val_containers=None, sample_shape=None, batch_size=16, n_batches=64, s_enhance=1, t_enhance=1, means=None, stds=None, queue_cap=None, transform_kwargs=None, mode='lazy', feature_sets=None, **kwargs)#
+

Bases: SingleBatchQueue

+

BatchHandler object built from two lists of +Container objects, one with +training data and one with validation data. These lists will be used +to initialize lists of class:Sampler objects that will then be used +to build batches at run time.

+

Notes

+

These lists of containers can contain data from the same underlying +data source (e.g. CONUS WTK) (e.g. initialize train / val containers +with different time period and / or regions, or they can be used to +sample from completely different data sources (e.g. train on CONUS WTK +while validating on Canada WTK).

+
+

See also

+

Sampler, AbstractBatchQueue, StatsCollection

+
+
+
Parameters:
+
    +
  • train_containers (List[Container]) – List of objects with a .data attribute, which will be used +to initialize Sampler objects and then used to initialize a +batch queue of training data. The data can be a Sup3rX or +Sup3rDataset object.

  • +
  • val_containers (List[Container]) – List of objects with a .data attribute, which will be used +to initialize Sampler objects and then used to initialize a +batch queue of validation data. The data can be a Sup3rX or a +Sup3rDataset object.

  • +
  • batch_size (int) – Number of observations / samples in a batch

  • +
  • n_batches (int) – Number of batches in an epoch, this sets the iteration limit for +this object.

  • +
  • s_enhance (int) – Integer factor by which the spatial axes is to be enhanced.

  • +
  • t_enhance (int) – Integer factor by which the temporal axes is to be enhanced.

  • +
  • means (str | dict | None) – Usually a file path for loading / saving results, or None for +just calculating stats and not saving. Can also be a dict.

  • +
  • stds (str | dict | None) – Usually a file path for loading / saving results, or None for +just calculating stats and not saving. Can also be a dict.

  • +
  • queue_cap (int) – Maximum number of batches the batch queue can store.

  • +
  • transform_kwargs (Union[Dict, None]) – Dictionary of kwargs to be passed to self.transform. This method +performs smoothing / coarsening.

  • +
  • mode (str) – Loading mode. Default is ‘lazy’, which only loads data into memory +as batches are queued. ‘eager’ will load all data into memory right +away.

  • +
  • feature_sets (Optional[dict]) – Optional dictionary describing how the full set of features is +split between lr_only_features and hr_exo_features.

    +
    +
    featureslist | tuple

    List of full set of features to use for sampling. If no entry +is provided then all data_vars from data will be used.

    +
    +
    lr_only_featureslist | tuple

    List of feature names or patt*erns that should only be +included in the low-res training set and not the high-res +observations.

    +
    +
    hr_exo_featureslist | tuple

    List of feature names or patt*erns that should be included +in the high-resolution observation but not expected to be +output from the generative model. An example is high-res +topography that is to be injected mid-network.

    +
    +
    +
  • +
  • kwargs (dict) – Additional keyword arguments for BatchQueue and / or Samplers. +This can vary depending on the type of BatchQueue / Sampler +given to the Factory. For example, to build a +BatchHandlerDC +object (data-centric batch handler) we use a queue and sampler +which takes spatial and temporal weight / bin arguments used to +determine how to weigh spatiotemporal regions when sampling. +Using +ConditionalBatchQueue +will result in arguments for computing moments from batches and +how to pad batch data to enable these calculations.

  • +
  • sample_shape (tuple) – Size of arrays to sample from the contained data.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_enhancement_factors()

Make sure the enhancement factors evenly divide the sample_shape.

check_features()

Make sure all samplers have the same sets of features.

check_shared_attr(attr)

Check if all containers have the same value for attr.

enqueue_batch()

Build batch and send to queue.

enqueue_batches()

Callback function for queue thread.

get_batch()

Get batch from queue or directly from a Sampler through sample_batch.

get_container_index()

Get random container index based on weights

get_queue()

Return FIFO queue for storing batches.

get_random_container()

Get random container based on container weights

init_samplers(train_containers, ...)

Initialize samplers from given data containers.

log_queue_info()

Log info about queue size.

post_init_log([args_dict])

Log additional arguments after initialization.

post_proc(samples)

Performs some post proc on dequeued samples before sending out for training.

preflight()

Run checks before kicking off the queue.

sample_batch()

Get random sampler from collection and return a batch of samples from that sampler.

start()

Start the val data batch queue in addition to the train batch queue.

stop()

Stop the val data batch queue in addition to the train batch queue.

transform(samples[, smoothing, ...])

Coarsen high res data to get corresponding low res batch.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

container_weights

Get weights used to sample from different containers based on relative sizes

data

Return underlying data.

features

Get all features contained in data.

hr_shape

Shape of high resolution sample in a low-res / high-res pair.

lr_shape

Shape of low resolution sample in a low-res / high-res pair.

queue_shape

Shape of objects stored in the queue.

queue_thread

Get new queue thread.

running

Boolean to check whether to keep enqueueing batches.

shape

Get shape of underlying data.

+
+
+
+class Batch(low_res, high_res)#
+

Bases: tuple

+

Create new instance of Batch(low_res, high_res)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+ +
+
+SAMPLER#
+

alias of Sampler

+
+ +
+
+TRAIN_QUEUE#
+

alias of SingleBatchQueue

+
+ +
+
+VAL_QUEUE#
+

alias of SingleBatchQueue

+
+ +
+
+check_enhancement_factors()#
+

Make sure the enhancement factors evenly divide the sample_shape.

+
+ +
+
+check_features()#
+

Make sure all samplers have the same sets of features.

+
+ +
+
+check_shared_attr(attr)#
+

Check if all containers have the same value for attr. If they do +the collection effectively inherits those attributes.

+
+ +
+
+property container_weights#
+

Get weights used to sample from different containers based on +relative sizes

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+enqueue_batch()#
+

Build batch and send to queue.

+
+ +
+
+enqueue_batches() None#
+

Callback function for queue thread. While training, the queue is +checked for empty spots and filled. In the training thread, batches are +removed from the queue.

+
+ +
+
+property features#
+

Get all features contained in data.

+
+ +
+
+get_batch() Batch#
+

Get batch from queue or directly from a Sampler through +sample_batch.

+
+ +
+
+get_container_index()#
+

Get random container index based on weights

+
+ +
+
+get_queue()#
+

Return FIFO queue for storing batches.

+
+ +
+
+get_random_container()#
+

Get random container based on container weights

+
+ +
+
+property hr_shape#
+

Shape of high resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+init_samplers(train_containers, val_containers, sample_shape, feature_sets, batch_size, sampler_kwargs)#
+

Initialize samplers from given data containers.

+
+ +
+
+log_queue_info()#
+

Log info about queue size.

+
+ +
+
+property lr_shape#
+

Shape of low resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+post_proc(samples) Batch#
+

Performs some post proc on dequeued samples before sending out for +training. Post processing can include coarsening on high-res data (if +Collection consists of Sampler objects and not +DualSampler objects), smoothing, etc

+
+
Returns:
+

Batch (namedtuple) – namedtuple with low_res and high_res attributes

+
+
+
+ +
+
+preflight()#
+

Run checks before kicking off the queue.

+
+ +
+
+property queue_shape#
+

Shape of objects stored in the queue.

+
+ +
+
+property queue_thread#
+

Get new queue thread.

+
+ +
+
+property running#
+

Boolean to check whether to keep enqueueing batches.

+
+ +
+
+sample_batch()#
+

Get random sampler from collection and return a batch of samples +from that sampler.

+

Notes

+

These samples are wrapped in an np.asarray call, so they have been +loaded into memory.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+start()#
+

Start the val data batch queue in addition to the train batch +queue.

+
+ +
+
+stop()#
+

Stop the val data batch queue in addition to the train batch +queue.

+
+ +
+
+transform(samples, smoothing=None, smoothing_ignore=None, temporal_coarsening_method='subsample')#
+

Coarsen high res data to get corresponding low res batch.

+
+
Parameters:
+
    +
  • samples (Union[np.ndarray, da.core.Array]) – High resolution batch of samples. +4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • smoothing (float | None) – Standard deviation to use for gaussian filtering of the coarse +data. This can be tuned by matching the kinetic energy of a low +resolution simulation with the kinetic energy of a coarsened and +smoothed high resolution simulation. If None no smoothing is +performed.

  • +
  • smoothing_ignore (list | None) – List of features to ignore for the smoothing filter. None will +smooth all features if smoothing kwarg is not None

  • +
  • temporal_coarsening_method (str) – Method to use for temporal coarsening. Can be subsample, average, +min, max, or total

  • +
+
+
Returns:
+

    +
  • low_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • high_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
+

+
+
+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerCC.html b/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerCC.html new file mode 100644 index 000000000..95c306eb4 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerCC.html @@ -0,0 +1,1286 @@ + + + + + + + + + + + sup3r.preprocessing.batch_handlers.factory.BatchHandlerCC — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.batch_handlers.factory.BatchHandlerCC#

+
+
+class BatchHandlerCC(train_containers, *, val_containers=None, sample_shape=None, batch_size=16, n_batches=64, s_enhance=1, t_enhance=1, means=None, stds=None, queue_cap=None, transform_kwargs=None, mode='lazy', feature_sets=None, **kwargs)#
+

Bases: DualBatchQueue

+

BatchHandler object built from two lists of +Container objects, one with +training data and one with validation data. These lists will be used +to initialize lists of class:Sampler objects that will then be used +to build batches at run time.

+

Notes

+

These lists of containers can contain data from the same underlying +data source (e.g. CONUS WTK) (e.g. initialize train / val containers +with different time period and / or regions, or they can be used to +sample from completely different data sources (e.g. train on CONUS WTK +while validating on Canada WTK).

+
+

See also

+

Sampler, AbstractBatchQueue, StatsCollection

+
+
+
Parameters:
+
    +
  • train_containers (List[Container]) – List of objects with a .data attribute, which will be used +to initialize Sampler objects and then used to initialize a +batch queue of training data. The data can be a Sup3rX or +Sup3rDataset object.

  • +
  • val_containers (List[Container]) – List of objects with a .data attribute, which will be used +to initialize Sampler objects and then used to initialize a +batch queue of validation data. The data can be a Sup3rX or a +Sup3rDataset object.

  • +
  • batch_size (int) – Number of observations / samples in a batch

  • +
  • n_batches (int) – Number of batches in an epoch, this sets the iteration limit for +this object.

  • +
  • s_enhance (int) – Integer factor by which the spatial axes is to be enhanced.

  • +
  • t_enhance (int) – Integer factor by which the temporal axes is to be enhanced.

  • +
  • means (str | dict | None) – Usually a file path for loading / saving results, or None for +just calculating stats and not saving. Can also be a dict.

  • +
  • stds (str | dict | None) – Usually a file path for loading / saving results, or None for +just calculating stats and not saving. Can also be a dict.

  • +
  • queue_cap (int) – Maximum number of batches the batch queue can store.

  • +
  • transform_kwargs (Union[Dict, None]) – Dictionary of kwargs to be passed to self.transform. This method +performs smoothing / coarsening.

  • +
  • mode (str) – Loading mode. Default is ‘lazy’, which only loads data into memory +as batches are queued. ‘eager’ will load all data into memory right +away.

  • +
  • feature_sets (Optional[dict]) – Optional dictionary describing how the full set of features is +split between lr_only_features and hr_exo_features.

    +
    +
    lr_only_featureslist | tuple

    List of feature names or patt*erns that should only be +included in the low-res training set and not the high-res +observations.

    +
    +
    hr_exo_featureslist | tuple

    List of feature names or patt*erns that should be included +in the high-resolution observation but not expected to be +output from the generative model. An example is high-res +topography that is to be injected mid-network.

    +
    +
    +
  • +
  • kwargs (dict) – Additional keyword arguments for BatchQueue and / or Samplers. +This can vary depending on the type of BatchQueue / Sampler +given to the Factory. For example, to build a +BatchHandlerDC +object (data-centric batch handler) we use a queue and sampler +which takes spatial and temporal weight / bin arguments used to +determine how to weigh spatiotemporal regions when sampling. +Using +ConditionalBatchQueue +will result in arguments for computing moments from batches and +how to pad batch data to enable these calculations.

  • +
  • sample_shape (tuple) – Size of arrays to sample from the high-res data. The sample shape +for the low-res sampler will be determined from the enhancement +factors.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_enhancement_factors()

Make sure each DualSampler has the same enhancment factors and they match those provided to the BatchQueue.

check_features()

Make sure all samplers have the same sets of features.

check_shared_attr(attr)

Check if all containers have the same value for attr.

enqueue_batch()

Build batch and send to queue.

enqueue_batches()

Callback function for queue thread.

get_batch()

Get batch from queue or directly from a Sampler through sample_batch.

get_container_index()

Get random container index based on weights

get_queue()

Return FIFO queue for storing batches.

get_random_container()

Get random container based on container weights

init_samplers(train_containers, ...)

Initialize samplers from given data containers.

log_queue_info()

Log info about queue size.

post_init_log([args_dict])

Log additional arguments after initialization.

post_proc(samples)

Performs some post proc on dequeued samples before sending out for training.

preflight()

Run checks before kicking off the queue.

sample_batch()

Get random sampler from collection and return a batch of samples from that sampler.

start()

Start the val data batch queue in addition to the train batch queue.

stop()

Stop the val data batch queue in addition to the train batch queue.

transform(samples[, smoothing, smoothing_ignore])

Perform smoothing if requested.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

container_weights

Get weights used to sample from different containers based on relative sizes

data

Return underlying data.

features

Get all features contained in data.

hr_shape

Shape of high resolution sample in a low-res / high-res pair.

lr_shape

Shape of low resolution sample in a low-res / high-res pair.

queue_shape

Shape of objects stored in the queue.

queue_thread

Get new queue thread.

running

Boolean to check whether to keep enqueueing batches.

shape

Get shape of underlying data.

+
+
+
+class Batch(low_res, high_res)#
+

Bases: tuple

+

Create new instance of Batch(low_res, high_res)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+ +
+
+SAMPLER#
+

alias of DualSamplerCC

+
+ +
+
+TRAIN_QUEUE#
+

alias of DualBatchQueue

+
+ +
+
+VAL_QUEUE#
+

alias of DualBatchQueue

+
+ +
+
+check_enhancement_factors()#
+

Make sure each DualSampler has the same enhancment factors and they +match those provided to the BatchQueue.

+
+ +
+
+check_features()#
+

Make sure all samplers have the same sets of features.

+
+ +
+
+check_shared_attr(attr)#
+

Check if all containers have the same value for attr. If they do +the collection effectively inherits those attributes.

+
+ +
+
+property container_weights#
+

Get weights used to sample from different containers based on +relative sizes

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+enqueue_batch()#
+

Build batch and send to queue.

+
+ +
+
+enqueue_batches() None#
+

Callback function for queue thread. While training, the queue is +checked for empty spots and filled. In the training thread, batches are +removed from the queue.

+
+ +
+
+property features#
+

Get all features contained in data.

+
+ +
+
+get_batch() Batch#
+

Get batch from queue or directly from a Sampler through +sample_batch.

+
+ +
+
+get_container_index()#
+

Get random container index based on weights

+
+ +
+
+get_queue()#
+

Return FIFO queue for storing batches.

+
+ +
+
+get_random_container()#
+

Get random container based on container weights

+
+ +
+
+property hr_shape#
+

Shape of high resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+init_samplers(train_containers, val_containers, sample_shape, feature_sets, batch_size, sampler_kwargs)#
+

Initialize samplers from given data containers.

+
+ +
+
+log_queue_info()#
+

Log info about queue size.

+
+ +
+
+property lr_shape#
+

Shape of low resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+post_proc(samples) Batch#
+

Performs some post proc on dequeued samples before sending out for +training. Post processing can include coarsening on high-res data (if +Collection consists of Sampler objects and not +DualSampler objects), smoothing, etc

+
+
Returns:
+

Batch (namedtuple) – namedtuple with low_res and high_res attributes

+
+
+
+ +
+
+preflight()#
+

Run checks before kicking off the queue.

+
+ +
+
+property queue_shape#
+

Shape of objects stored in the queue.

+
+ +
+
+property queue_thread#
+

Get new queue thread.

+
+ +
+
+property running#
+

Boolean to check whether to keep enqueueing batches.

+
+ +
+
+sample_batch()#
+

Get random sampler from collection and return a batch of samples +from that sampler.

+

Notes

+

These samples are wrapped in an np.asarray call, so they have been +loaded into memory.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+start()#
+

Start the val data batch queue in addition to the train batch +queue.

+
+ +
+
+stop()#
+

Stop the val data batch queue in addition to the train batch +queue.

+
+ +
+
+transform(samples, smoothing=None, smoothing_ignore=None)#
+

Perform smoothing if requested.

+
+

Note

+

This does not include temporal or spatial coarsening like +SingleBatchQueue

+
+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerFactory.html b/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerFactory.html new file mode 100644 index 000000000..f3c7f9e0d --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerFactory.html @@ -0,0 +1,780 @@ + + + + + + + + + + + sup3r.preprocessing.batch_handlers.factory.BatchHandlerFactory — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.batch_handlers.factory.BatchHandlerFactory

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.batch_handlers.factory.BatchHandlerFactory#

+
+
+BatchHandlerFactory(MainQueueClass, SamplerClass, ValQueueClass=None, name='BatchHandler')[source]#
+

BatchHandler factory. Can build handlers from different queue classes +and sampler classes. For example, to build a standard +BatchHandler use +SingleBatchQueue and +Sampler. To build a +DualBatchHandler use +DualBatchQueue and +DualSampler. To build a +BatchHandlerDC use a +BatchQueueDC, +ValBatchQueueDC and +SamplerDC

+
+

Note

+

(1) BatchHandlers include a queue for training samples and a queue for +validation samples. +(2) There is no need to generate “Spatial” batch handlers. Using +Sampler objects with a single time step in the sample shape will +produce batches without a time dimension.

+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom1.html b/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom1.html new file mode 100644 index 000000000..4a56e2d5d --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom1.html @@ -0,0 +1,1448 @@ + + + + + + + + + + + sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom1 — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom1

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom1#

+
+
+class BatchHandlerMom1(train_containers, *, val_containers=None, sample_shape=None, batch_size=16, n_batches=64, s_enhance=1, t_enhance=1, means=None, stds=None, queue_cap=None, transform_kwargs=None, mode='lazy', feature_sets=None, time_enhance_mode='constant', lower_models=None, s_padding=0, t_padding=0, end_t_padding=False, **kwargs)#
+

Bases: QueueMom1

+

BatchHandler object built from two lists of +Container objects, one with +training data and one with validation data. These lists will be used +to initialize lists of class:Sampler objects that will then be used +to build batches at run time.

+

Notes

+

These lists of containers can contain data from the same underlying +data source (e.g. CONUS WTK) (e.g. initialize train / val containers +with different time period and / or regions, or they can be used to +sample from completely different data sources (e.g. train on CONUS WTK +while validating on Canada WTK).

+
+

See also

+

Sampler, AbstractBatchQueue, StatsCollection

+
+
+
Parameters:
+
    +
  • train_containers (List[Container]) – List of objects with a .data attribute, which will be used +to initialize Sampler objects and then used to initialize a +batch queue of training data. The data can be a Sup3rX or +Sup3rDataset object.

  • +
  • val_containers (List[Container]) – List of objects with a .data attribute, which will be used +to initialize Sampler objects and then used to initialize a +batch queue of validation data. The data can be a Sup3rX or a +Sup3rDataset object.

  • +
  • batch_size (int) – Number of observations / samples in a batch

  • +
  • n_batches (int) – Number of batches in an epoch, this sets the iteration limit for +this object.

  • +
  • s_enhance (int) – Integer factor by which the spatial axes is to be enhanced.

  • +
  • t_enhance (int) – Integer factor by which the temporal axes is to be enhanced.

  • +
  • means (str | dict | None) – Usually a file path for loading / saving results, or None for +just calculating stats and not saving. Can also be a dict.

  • +
  • stds (str | dict | None) – Usually a file path for loading / saving results, or None for +just calculating stats and not saving. Can also be a dict.

  • +
  • queue_cap (int) – Maximum number of batches the batch queue can store.

  • +
  • transform_kwargs (Union[Dict, None]) – Dictionary of kwargs to be passed to self.transform. This method +performs smoothing / coarsening.

  • +
  • mode (str) – Loading mode. Default is ‘lazy’, which only loads data into memory +as batches are queued. ‘eager’ will load all data into memory right +away.

  • +
  • feature_sets (Optional[dict]) – Optional dictionary describing how the full set of features is +split between lr_only_features and hr_exo_features.

    +
    +
    featureslist | tuple

    List of full set of features to use for sampling. If no entry +is provided then all data_vars from data will be used.

    +
    +
    lr_only_featureslist | tuple

    List of feature names or patt*erns that should only be +included in the low-res training set and not the high-res +observations.

    +
    +
    hr_exo_featureslist | tuple

    List of feature names or patt*erns that should be included +in the high-resolution observation but not expected to be +output from the generative model. An example is high-res +topography that is to be injected mid-network.

    +
    +
    +
  • +
  • kwargs (dict) – Keyword arguments for parent class

  • +
  • sample_shape (tuple) – Size of arrays to sample from the contained data.

  • +
  • time_enhance_mode (str) – [constant, linear] +Method to enhance temporally when constructing subfilter. At every +temporal location, a low-res temporal data is subtracted from the +high-res temporal data predicted. constant will assume that the +low-res temporal data is constant between landmarks. linear will +linearly interpolate between landmarks to generate the low-res data +to remove from the high-res.

  • +
  • lower_models (Dict[int, Sup3rCondMom] | None) – Dictionary of models that predict lower moments. For example, if +this queue is part of a handler to estimate the 3rd moment +lower_models could include models that estimate the 1st and 2nd +moments. These lower moments can be required in higher order moment +calculations.

  • +
  • s_padding (int | None) – Width of spatial padding to predict only middle part. If None, no +padding is used

  • +
  • t_padding (int | None) – Width of temporal padding to predict only middle part. If None, no +padding is used

  • +
  • end_t_padding (bool | False) – Zero pad the end of temporal space. Ensures that loss is +calculated only if snapshot is surrounded by temporal landmarks. +False by default

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_enhancement_factors()

Make sure the enhancement factors evenly divide the sample_shape.

check_features()

Make sure all samplers have the same sets of features.

check_shared_attr(attr)

Check if all containers have the same value for attr.

enqueue_batch()

Build batch and send to queue.

enqueue_batches()

Callback function for queue thread.

get_batch()

Get batch from queue or directly from a Sampler through sample_batch.

get_container_index()

Get random container index based on weights

get_queue()

Return FIFO queue for storing batches.

get_random_container()

Get random container based on container weights

init_samplers(train_containers, ...)

Initialize samplers from given data containers.

log_queue_info()

Log info about queue size.

make_mask(high_res)

Make mask for output.

make_output(samples)

For the 1st moment the output is simply the high_res

post_init_log([args_dict])

Log additional arguments after initialization.

post_proc(samples)

Returns normalized collection of samples / observations along with mask and target output for conditional moment estimation.

preflight()

Run checks before kicking off the queue.

sample_batch()

Get random sampler from collection and return a batch of samples from that sampler.

start()

Start the val data batch queue in addition to the train batch queue.

stop()

Stop the val data batch queue in addition to the train batch queue.

transform(samples[, smoothing, ...])

Coarsen high res data to get corresponding low res batch.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

container_weights

Get weights used to sample from different containers based on relative sizes

data

Return underlying data.

features

Get all features contained in data.

hr_shape

Shape of high resolution sample in a low-res / high-res pair.

lr_shape

Shape of low resolution sample in a low-res / high-res pair.

queue_shape

Shape of objects stored in the queue.

queue_thread

Get new queue thread.

running

Boolean to check whether to keep enqueueing batches.

shape

Get shape of underlying data.

+
+
+
+class Batch(low_res, high_res)#
+

Bases: tuple

+

Create new instance of Batch(low_res, high_res)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+ +
+
+class ConditionalBatch(low_res, high_res, output, mask)#
+

Bases: tuple

+

Create new instance of ConditionalBatch(low_res, high_res, output, mask)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+
+mask#
+

Alias for field number 3

+
+ +
+
+output#
+

Alias for field number 2

+
+ +
+ +
+
+SAMPLER#
+

alias of Sampler

+
+ +
+
+TRAIN_QUEUE#
+

alias of QueueMom1

+
+ +
+
+VAL_QUEUE#
+

alias of QueueMom1

+
+ +
+
+check_enhancement_factors()#
+

Make sure the enhancement factors evenly divide the sample_shape.

+
+ +
+
+check_features()#
+

Make sure all samplers have the same sets of features.

+
+ +
+
+check_shared_attr(attr)#
+

Check if all containers have the same value for attr. If they do +the collection effectively inherits those attributes.

+
+ +
+
+property container_weights#
+

Get weights used to sample from different containers based on +relative sizes

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+enqueue_batch()#
+

Build batch and send to queue.

+
+ +
+
+enqueue_batches() None#
+

Callback function for queue thread. While training, the queue is +checked for empty spots and filled. In the training thread, batches are +removed from the queue.

+
+ +
+
+property features#
+

Get all features contained in data.

+
+ +
+
+get_batch() Batch#
+

Get batch from queue or directly from a Sampler through +sample_batch.

+
+ +
+
+get_container_index()#
+

Get random container index based on weights

+
+ +
+
+get_queue()#
+

Return FIFO queue for storing batches.

+
+ +
+
+get_random_container()#
+

Get random container based on container weights

+
+ +
+
+property hr_shape#
+

Shape of high resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+init_samplers(train_containers, val_containers, sample_shape, feature_sets, batch_size, sampler_kwargs)#
+

Initialize samplers from given data containers.

+
+ +
+
+log_queue_info()#
+

Log info about queue size.

+
+ +
+
+property lr_shape#
+

Shape of low resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+make_mask(high_res)#
+

Make mask for output. This is used to ensure consistency when +training conditional moments.

+
+

Note

+

Consider the case of learning E(HR|LR) where HR is the high_res and LR +is the low_res. In theory, the conditional moment estimation works if +the full LR is passed as input and predicts the full HR. In practice, +only the LR data that overlaps and surrounds the HR data is useful, ie +E(HR|LR) = E(HR|LR_nei) where LR_nei is the LR data that surrounds the +HR data. Physically, this is equivalent to saying that data far away +from a region of interest does not matter. This allows learning the +conditional moments on spatial and temporal chunks only if one +restricts the high_res output as being overlapped and surrounded by the +input low_res. The role of the mask is to ensure that the input +low_res always surrounds the output high_res.

+
+
+
Parameters:
+

high_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

+
+
Returns:
+

mask (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

+
+
+
+ +
+
+make_output(samples)#
+

For the 1st moment the output is simply the high_res

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+post_proc(samples)#
+

Returns normalized collection of samples / observations along with +mask and target output for conditional moment estimation. Performs +coarsening on high-res data if Collection consists of +Sampler objects and not DualSampler objects

+
+
Returns:
+

namedtuple – Named tuple with low_res, high_res, mask, and output +attributes

+
+
+
+ +
+
+preflight()#
+

Run checks before kicking off the queue.

+
+ +
+
+property queue_shape#
+

Shape of objects stored in the queue.

+
+ +
+
+property queue_thread#
+

Get new queue thread.

+
+ +
+
+property running#
+

Boolean to check whether to keep enqueueing batches.

+
+ +
+
+sample_batch()#
+

Get random sampler from collection and return a batch of samples +from that sampler.

+

Notes

+

These samples are wrapped in an np.asarray call, so they have been +loaded into memory.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+start()#
+

Start the val data batch queue in addition to the train batch +queue.

+
+ +
+
+stop()#
+

Stop the val data batch queue in addition to the train batch +queue.

+
+ +
+
+transform(samples, smoothing=None, smoothing_ignore=None, temporal_coarsening_method='subsample')#
+

Coarsen high res data to get corresponding low res batch.

+
+
Parameters:
+
    +
  • samples (Union[np.ndarray, da.core.Array]) – High resolution batch of samples. +4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • smoothing (float | None) – Standard deviation to use for gaussian filtering of the coarse +data. This can be tuned by matching the kinetic energy of a low +resolution simulation with the kinetic energy of a coarsened and +smoothed high resolution simulation. If None no smoothing is +performed.

  • +
  • smoothing_ignore (list | None) – List of features to ignore for the smoothing filter. None will +smooth all features if smoothing kwarg is not None

  • +
  • temporal_coarsening_method (str) – Method to use for temporal coarsening. Can be subsample, average, +min, max, or total

  • +
+
+
Returns:
+

    +
  • low_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • high_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
+

+
+
+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom1SF.html b/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom1SF.html new file mode 100644 index 000000000..3b679fdf9 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom1SF.html @@ -0,0 +1,1456 @@ + + + + + + + + + + + sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom1SF — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom1SF

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom1SF#

+
+
+class BatchHandlerMom1SF(train_containers, *, val_containers=None, sample_shape=None, batch_size=16, n_batches=64, s_enhance=1, t_enhance=1, means=None, stds=None, queue_cap=None, transform_kwargs=None, mode='lazy', feature_sets=None, time_enhance_mode='constant', lower_models=None, s_padding=0, t_padding=0, end_t_padding=False, **kwargs)#
+

Bases: QueueMom1SF

+

BatchHandler object built from two lists of +Container objects, one with +training data and one with validation data. These lists will be used +to initialize lists of class:Sampler objects that will then be used +to build batches at run time.

+

Notes

+

These lists of containers can contain data from the same underlying +data source (e.g. CONUS WTK) (e.g. initialize train / val containers +with different time period and / or regions, or they can be used to +sample from completely different data sources (e.g. train on CONUS WTK +while validating on Canada WTK).

+
+

See also

+

Sampler, AbstractBatchQueue, StatsCollection

+
+
+
Parameters:
+
    +
  • train_containers (List[Container]) – List of objects with a .data attribute, which will be used +to initialize Sampler objects and then used to initialize a +batch queue of training data. The data can be a Sup3rX or +Sup3rDataset object.

  • +
  • val_containers (List[Container]) – List of objects with a .data attribute, which will be used +to initialize Sampler objects and then used to initialize a +batch queue of validation data. The data can be a Sup3rX or a +Sup3rDataset object.

  • +
  • batch_size (int) – Number of observations / samples in a batch

  • +
  • n_batches (int) – Number of batches in an epoch, this sets the iteration limit for +this object.

  • +
  • s_enhance (int) – Integer factor by which the spatial axes is to be enhanced.

  • +
  • t_enhance (int) – Integer factor by which the temporal axes is to be enhanced.

  • +
  • means (str | dict | None) – Usually a file path for loading / saving results, or None for +just calculating stats and not saving. Can also be a dict.

  • +
  • stds (str | dict | None) – Usually a file path for loading / saving results, or None for +just calculating stats and not saving. Can also be a dict.

  • +
  • queue_cap (int) – Maximum number of batches the batch queue can store.

  • +
  • transform_kwargs (Union[Dict, None]) – Dictionary of kwargs to be passed to self.transform. This method +performs smoothing / coarsening.

  • +
  • mode (str) – Loading mode. Default is ‘lazy’, which only loads data into memory +as batches are queued. ‘eager’ will load all data into memory right +away.

  • +
  • feature_sets (Optional[dict]) – Optional dictionary describing how the full set of features is +split between lr_only_features and hr_exo_features.

    +
    +
    featureslist | tuple

    List of full set of features to use for sampling. If no entry +is provided then all data_vars from data will be used.

    +
    +
    lr_only_featureslist | tuple

    List of feature names or patt*erns that should only be +included in the low-res training set and not the high-res +observations.

    +
    +
    hr_exo_featureslist | tuple

    List of feature names or patt*erns that should be included +in the high-resolution observation but not expected to be +output from the generative model. An example is high-res +topography that is to be injected mid-network.

    +
    +
    +
  • +
  • kwargs (dict) – Keyword arguments for parent class

  • +
  • sample_shape (tuple) – Size of arrays to sample from the contained data.

  • +
  • time_enhance_mode (str) – [constant, linear] +Method to enhance temporally when constructing subfilter. At every +temporal location, a low-res temporal data is subtracted from the +high-res temporal data predicted. constant will assume that the +low-res temporal data is constant between landmarks. linear will +linearly interpolate between landmarks to generate the low-res data +to remove from the high-res.

  • +
  • lower_models (Dict[int, Sup3rCondMom] | None) – Dictionary of models that predict lower moments. For example, if +this queue is part of a handler to estimate the 3rd moment +lower_models could include models that estimate the 1st and 2nd +moments. These lower moments can be required in higher order moment +calculations.

  • +
  • s_padding (int | None) – Width of spatial padding to predict only middle part. If None, no +padding is used

  • +
  • t_padding (int | None) – Width of temporal padding to predict only middle part. If None, no +padding is used

  • +
  • end_t_padding (bool | False) – Zero pad the end of temporal space. Ensures that loss is +calculated only if snapshot is surrounded by temporal landmarks. +False by default

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_enhancement_factors()

Make sure the enhancement factors evenly divide the sample_shape.

check_features()

Make sure all samplers have the same sets of features.

check_shared_attr(attr)

Check if all containers have the same value for attr.

enqueue_batch()

Build batch and send to queue.

enqueue_batches()

Callback function for queue thread.

get_batch()

Get batch from queue or directly from a Sampler through sample_batch.

get_container_index()

Get random container index based on weights

get_queue()

Return FIFO queue for storing batches.

get_random_container()

Get random container based on container weights

init_samplers(train_containers, ...)

Initialize samplers from given data containers.

log_queue_info()

Log info about queue size.

make_mask(high_res)

Make mask for output.

make_output(samples)

post_init_log([args_dict])

Log additional arguments after initialization.

post_proc(samples)

Returns normalized collection of samples / observations along with mask and target output for conditional moment estimation.

preflight()

Run checks before kicking off the queue.

sample_batch()

Get random sampler from collection and return a batch of samples from that sampler.

start()

Start the val data batch queue in addition to the train batch queue.

stop()

Stop the val data batch queue in addition to the train batch queue.

transform(samples[, smoothing, ...])

Coarsen high res data to get corresponding low res batch.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

container_weights

Get weights used to sample from different containers based on relative sizes

data

Return underlying data.

features

Get all features contained in data.

hr_shape

Shape of high resolution sample in a low-res / high-res pair.

lr_shape

Shape of low resolution sample in a low-res / high-res pair.

queue_shape

Shape of objects stored in the queue.

queue_thread

Get new queue thread.

running

Boolean to check whether to keep enqueueing batches.

shape

Get shape of underlying data.

+
+
+
+class Batch(low_res, high_res)#
+

Bases: tuple

+

Create new instance of Batch(low_res, high_res)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+ +
+
+class ConditionalBatch(low_res, high_res, output, mask)#
+

Bases: tuple

+

Create new instance of ConditionalBatch(low_res, high_res, output, mask)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+
+mask#
+

Alias for field number 3

+
+ +
+
+output#
+

Alias for field number 2

+
+ +
+ +
+
+SAMPLER#
+

alias of Sampler

+
+ +
+
+TRAIN_QUEUE#
+

alias of QueueMom1SF

+
+ +
+
+VAL_QUEUE#
+

alias of QueueMom1SF

+
+ +
+
+check_enhancement_factors()#
+

Make sure the enhancement factors evenly divide the sample_shape.

+
+ +
+
+check_features()#
+

Make sure all samplers have the same sets of features.

+
+ +
+
+check_shared_attr(attr)#
+

Check if all containers have the same value for attr. If they do +the collection effectively inherits those attributes.

+
+ +
+
+property container_weights#
+

Get weights used to sample from different containers based on +relative sizes

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+enqueue_batch()#
+

Build batch and send to queue.

+
+ +
+
+enqueue_batches() None#
+

Callback function for queue thread. While training, the queue is +checked for empty spots and filled. In the training thread, batches are +removed from the queue.

+
+ +
+
+property features#
+

Get all features contained in data.

+
+ +
+
+get_batch() Batch#
+

Get batch from queue or directly from a Sampler through +sample_batch.

+
+ +
+
+get_container_index()#
+

Get random container index based on weights

+
+ +
+
+get_queue()#
+

Return FIFO queue for storing batches.

+
+ +
+
+get_random_container()#
+

Get random container based on container weights

+
+ +
+
+property hr_shape#
+

Shape of high resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+init_samplers(train_containers, val_containers, sample_shape, feature_sets, batch_size, sampler_kwargs)#
+

Initialize samplers from given data containers.

+
+ +
+
+log_queue_info()#
+

Log info about queue size.

+
+ +
+
+property lr_shape#
+

Shape of low resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+make_mask(high_res)#
+

Make mask for output. This is used to ensure consistency when +training conditional moments.

+
+

Note

+

Consider the case of learning E(HR|LR) where HR is the high_res and LR +is the low_res. In theory, the conditional moment estimation works if +the full LR is passed as input and predicts the full HR. In practice, +only the LR data that overlaps and surrounds the HR data is useful, ie +E(HR|LR) = E(HR|LR_nei) where LR_nei is the LR data that surrounds the +HR data. Physically, this is equivalent to saying that data far away +from a region of interest does not matter. This allows learning the +conditional moments on spatial and temporal chunks only if one +restricts the high_res output as being overlapped and surrounded by the +input low_res. The role of the mask is to ensure that the input +low_res always surrounds the output high_res.

+
+
+
Parameters:
+

high_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

+
+
Returns:
+

mask (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

+
+
+
+ +
+
+make_output(samples)#
+
+
Returns:
+

SF (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features) +SF is subfilter, HR is high-res and LR is low-res +SF = HR - LR

+
+
+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+post_proc(samples)#
+

Returns normalized collection of samples / observations along with +mask and target output for conditional moment estimation. Performs +coarsening on high-res data if Collection consists of +Sampler objects and not DualSampler objects

+
+
Returns:
+

namedtuple – Named tuple with low_res, high_res, mask, and output +attributes

+
+
+
+ +
+
+preflight()#
+

Run checks before kicking off the queue.

+
+ +
+
+property queue_shape#
+

Shape of objects stored in the queue.

+
+ +
+
+property queue_thread#
+

Get new queue thread.

+
+ +
+
+property running#
+

Boolean to check whether to keep enqueueing batches.

+
+ +
+
+sample_batch()#
+

Get random sampler from collection and return a batch of samples +from that sampler.

+

Notes

+

These samples are wrapped in an np.asarray call, so they have been +loaded into memory.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+start()#
+

Start the val data batch queue in addition to the train batch +queue.

+
+ +
+
+stop()#
+

Stop the val data batch queue in addition to the train batch +queue.

+
+ +
+
+transform(samples, smoothing=None, smoothing_ignore=None, temporal_coarsening_method='subsample')#
+

Coarsen high res data to get corresponding low res batch.

+
+
Parameters:
+
    +
  • samples (Union[np.ndarray, da.core.Array]) – High resolution batch of samples. +4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • smoothing (float | None) – Standard deviation to use for gaussian filtering of the coarse +data. This can be tuned by matching the kinetic energy of a low +resolution simulation with the kinetic energy of a coarsened and +smoothed high resolution simulation. If None no smoothing is +performed.

  • +
  • smoothing_ignore (list | None) – List of features to ignore for the smoothing filter. None will +smooth all features if smoothing kwarg is not None

  • +
  • temporal_coarsening_method (str) – Method to use for temporal coarsening. Can be subsample, average, +min, max, or total

  • +
+
+
Returns:
+

    +
  • low_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • high_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
+

+
+
+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2.html b/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2.html new file mode 100644 index 000000000..8d5090075 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2.html @@ -0,0 +1,1455 @@ + + + + + + + + + + + sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2 — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2#

+
+
+class BatchHandlerMom2(train_containers, *, val_containers=None, sample_shape=None, batch_size=16, n_batches=64, s_enhance=1, t_enhance=1, means=None, stds=None, queue_cap=None, transform_kwargs=None, mode='lazy', feature_sets=None, time_enhance_mode='constant', lower_models=None, s_padding=0, t_padding=0, end_t_padding=False, **kwargs)#
+

Bases: QueueMom2

+

BatchHandler object built from two lists of +Container objects, one with +training data and one with validation data. These lists will be used +to initialize lists of class:Sampler objects that will then be used +to build batches at run time.

+

Notes

+

These lists of containers can contain data from the same underlying +data source (e.g. CONUS WTK) (e.g. initialize train / val containers +with different time period and / or regions, or they can be used to +sample from completely different data sources (e.g. train on CONUS WTK +while validating on Canada WTK).

+
+

See also

+

Sampler, AbstractBatchQueue, StatsCollection

+
+
+
Parameters:
+
    +
  • train_containers (List[Container]) – List of objects with a .data attribute, which will be used +to initialize Sampler objects and then used to initialize a +batch queue of training data. The data can be a Sup3rX or +Sup3rDataset object.

  • +
  • val_containers (List[Container]) – List of objects with a .data attribute, which will be used +to initialize Sampler objects and then used to initialize a +batch queue of validation data. The data can be a Sup3rX or a +Sup3rDataset object.

  • +
  • batch_size (int) – Number of observations / samples in a batch

  • +
  • n_batches (int) – Number of batches in an epoch, this sets the iteration limit for +this object.

  • +
  • s_enhance (int) – Integer factor by which the spatial axes is to be enhanced.

  • +
  • t_enhance (int) – Integer factor by which the temporal axes is to be enhanced.

  • +
  • means (str | dict | None) – Usually a file path for loading / saving results, or None for +just calculating stats and not saving. Can also be a dict.

  • +
  • stds (str | dict | None) – Usually a file path for loading / saving results, or None for +just calculating stats and not saving. Can also be a dict.

  • +
  • queue_cap (int) – Maximum number of batches the batch queue can store.

  • +
  • transform_kwargs (Union[Dict, None]) – Dictionary of kwargs to be passed to self.transform. This method +performs smoothing / coarsening.

  • +
  • mode (str) – Loading mode. Default is ‘lazy’, which only loads data into memory +as batches are queued. ‘eager’ will load all data into memory right +away.

  • +
  • feature_sets (Optional[dict]) – Optional dictionary describing how the full set of features is +split between lr_only_features and hr_exo_features.

    +
    +
    featureslist | tuple

    List of full set of features to use for sampling. If no entry +is provided then all data_vars from data will be used.

    +
    +
    lr_only_featureslist | tuple

    List of feature names or patt*erns that should only be +included in the low-res training set and not the high-res +observations.

    +
    +
    hr_exo_featureslist | tuple

    List of feature names or patt*erns that should be included +in the high-resolution observation but not expected to be +output from the generative model. An example is high-res +topography that is to be injected mid-network.

    +
    +
    +
  • +
  • kwargs (dict) – Keyword arguments for parent class

  • +
  • sample_shape (tuple) – Size of arrays to sample from the contained data.

  • +
  • time_enhance_mode (str) – [constant, linear] +Method to enhance temporally when constructing subfilter. At every +temporal location, a low-res temporal data is subtracted from the +high-res temporal data predicted. constant will assume that the +low-res temporal data is constant between landmarks. linear will +linearly interpolate between landmarks to generate the low-res data +to remove from the high-res.

  • +
  • lower_models (Dict[int, Sup3rCondMom] | None) – Dictionary of models that predict lower moments. For example, if +this queue is part of a handler to estimate the 3rd moment +lower_models could include models that estimate the 1st and 2nd +moments. These lower moments can be required in higher order moment +calculations.

  • +
  • s_padding (int | None) – Width of spatial padding to predict only middle part. If None, no +padding is used

  • +
  • t_padding (int | None) – Width of temporal padding to predict only middle part. If None, no +padding is used

  • +
  • end_t_padding (bool | False) – Zero pad the end of temporal space. Ensures that loss is +calculated only if snapshot is surrounded by temporal landmarks. +False by default

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_enhancement_factors()

Make sure the enhancement factors evenly divide the sample_shape.

check_features()

Make sure all samplers have the same sets of features.

check_shared_attr(attr)

Check if all containers have the same value for attr.

enqueue_batch()

Build batch and send to queue.

enqueue_batches()

Callback function for queue thread.

get_batch()

Get batch from queue or directly from a Sampler through sample_batch.

get_container_index()

Get random container index based on weights

get_queue()

Return FIFO queue for storing batches.

get_random_container()

Get random container based on container weights

init_samplers(train_containers, ...)

Initialize samplers from given data containers.

log_queue_info()

Log info about queue size.

make_mask(high_res)

Make mask for output.

make_output(samples)

post_init_log([args_dict])

Log additional arguments after initialization.

post_proc(samples)

Returns normalized collection of samples / observations along with mask and target output for conditional moment estimation.

preflight()

Run checks before kicking off the queue.

sample_batch()

Get random sampler from collection and return a batch of samples from that sampler.

start()

Start the val data batch queue in addition to the train batch queue.

stop()

Stop the val data batch queue in addition to the train batch queue.

transform(samples[, smoothing, ...])

Coarsen high res data to get corresponding low res batch.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

container_weights

Get weights used to sample from different containers based on relative sizes

data

Return underlying data.

features

Get all features contained in data.

hr_shape

Shape of high resolution sample in a low-res / high-res pair.

lr_shape

Shape of low resolution sample in a low-res / high-res pair.

queue_shape

Shape of objects stored in the queue.

queue_thread

Get new queue thread.

running

Boolean to check whether to keep enqueueing batches.

shape

Get shape of underlying data.

+
+
+
+class Batch(low_res, high_res)#
+

Bases: tuple

+

Create new instance of Batch(low_res, high_res)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+ +
+
+class ConditionalBatch(low_res, high_res, output, mask)#
+

Bases: tuple

+

Create new instance of ConditionalBatch(low_res, high_res, output, mask)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+
+mask#
+

Alias for field number 3

+
+ +
+
+output#
+

Alias for field number 2

+
+ +
+ +
+
+SAMPLER#
+

alias of Sampler

+
+ +
+
+TRAIN_QUEUE#
+

alias of QueueMom2

+
+ +
+
+VAL_QUEUE#
+

alias of QueueMom2

+
+ +
+
+check_enhancement_factors()#
+

Make sure the enhancement factors evenly divide the sample_shape.

+
+ +
+
+check_features()#
+

Make sure all samplers have the same sets of features.

+
+ +
+
+check_shared_attr(attr)#
+

Check if all containers have the same value for attr. If they do +the collection effectively inherits those attributes.

+
+ +
+
+property container_weights#
+

Get weights used to sample from different containers based on +relative sizes

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+enqueue_batch()#
+

Build batch and send to queue.

+
+ +
+
+enqueue_batches() None#
+

Callback function for queue thread. While training, the queue is +checked for empty spots and filled. In the training thread, batches are +removed from the queue.

+
+ +
+
+property features#
+

Get all features contained in data.

+
+ +
+
+get_batch() Batch#
+

Get batch from queue or directly from a Sampler through +sample_batch.

+
+ +
+
+get_container_index()#
+

Get random container index based on weights

+
+ +
+
+get_queue()#
+

Return FIFO queue for storing batches.

+
+ +
+
+get_random_container()#
+

Get random container based on container weights

+
+ +
+
+property hr_shape#
+

Shape of high resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+init_samplers(train_containers, val_containers, sample_shape, feature_sets, batch_size, sampler_kwargs)#
+

Initialize samplers from given data containers.

+
+ +
+
+log_queue_info()#
+

Log info about queue size.

+
+ +
+
+property lr_shape#
+

Shape of low resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+make_mask(high_res)#
+

Make mask for output. This is used to ensure consistency when +training conditional moments.

+
+

Note

+

Consider the case of learning E(HR|LR) where HR is the high_res and LR +is the low_res. In theory, the conditional moment estimation works if +the full LR is passed as input and predicts the full HR. In practice, +only the LR data that overlaps and surrounds the HR data is useful, ie +E(HR|LR) = E(HR|LR_nei) where LR_nei is the LR data that surrounds the +HR data. Physically, this is equivalent to saying that data far away +from a region of interest does not matter. This allows learning the +conditional moments on spatial and temporal chunks only if one +restricts the high_res output as being overlapped and surrounded by the +input low_res. The role of the mask is to ensure that the input +low_res always surrounds the output high_res.

+
+
+
Parameters:
+

high_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

+
+
Returns:
+

mask (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

+
+
+
+ +
+
+make_output(samples)#
+
+
Returns:
+

(HR - <HR|LR>)**2 (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features) +HR is high-res and LR is low-res

+
+
+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+post_proc(samples)#
+

Returns normalized collection of samples / observations along with +mask and target output for conditional moment estimation. Performs +coarsening on high-res data if Collection consists of +Sampler objects and not DualSampler objects

+
+
Returns:
+

namedtuple – Named tuple with low_res, high_res, mask, and output +attributes

+
+
+
+ +
+
+preflight()#
+

Run checks before kicking off the queue.

+
+ +
+
+property queue_shape#
+

Shape of objects stored in the queue.

+
+ +
+
+property queue_thread#
+

Get new queue thread.

+
+ +
+
+property running#
+

Boolean to check whether to keep enqueueing batches.

+
+ +
+
+sample_batch()#
+

Get random sampler from collection and return a batch of samples +from that sampler.

+

Notes

+

These samples are wrapped in an np.asarray call, so they have been +loaded into memory.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+start()#
+

Start the val data batch queue in addition to the train batch +queue.

+
+ +
+
+stop()#
+

Stop the val data batch queue in addition to the train batch +queue.

+
+ +
+
+transform(samples, smoothing=None, smoothing_ignore=None, temporal_coarsening_method='subsample')#
+

Coarsen high res data to get corresponding low res batch.

+
+
Parameters:
+
    +
  • samples (Union[np.ndarray, da.core.Array]) – High resolution batch of samples. +4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • smoothing (float | None) – Standard deviation to use for gaussian filtering of the coarse +data. This can be tuned by matching the kinetic energy of a low +resolution simulation with the kinetic energy of a coarsened and +smoothed high resolution simulation. If None no smoothing is +performed.

  • +
  • smoothing_ignore (list | None) – List of features to ignore for the smoothing filter. None will +smooth all features if smoothing kwarg is not None

  • +
  • temporal_coarsening_method (str) – Method to use for temporal coarsening. Can be subsample, average, +min, max, or total

  • +
+
+
Returns:
+

    +
  • low_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • high_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
+

+
+
+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2SF.html b/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2SF.html new file mode 100644 index 000000000..abe8f7714 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2SF.html @@ -0,0 +1,761 @@ + + + + + + + + + + + sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2SF — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2SF

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2SF#

+
+
+BatchHandlerMom2SF#
+

alias of BatchHandler

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2Sep.html b/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2Sep.html new file mode 100644 index 000000000..572789c1c --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2Sep.html @@ -0,0 +1,1455 @@ + + + + + + + + + + + sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2Sep — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2Sep

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2Sep#

+
+
+class BatchHandlerMom2Sep(train_containers, *, val_containers=None, sample_shape=None, batch_size=16, n_batches=64, s_enhance=1, t_enhance=1, means=None, stds=None, queue_cap=None, transform_kwargs=None, mode='lazy', feature_sets=None, time_enhance_mode='constant', lower_models=None, s_padding=0, t_padding=0, end_t_padding=False, **kwargs)#
+

Bases: QueueMom2Sep

+

BatchHandler object built from two lists of +Container objects, one with +training data and one with validation data. These lists will be used +to initialize lists of class:Sampler objects that will then be used +to build batches at run time.

+

Notes

+

These lists of containers can contain data from the same underlying +data source (e.g. CONUS WTK) (e.g. initialize train / val containers +with different time period and / or regions, or they can be used to +sample from completely different data sources (e.g. train on CONUS WTK +while validating on Canada WTK).

+
+

See also

+

Sampler, AbstractBatchQueue, StatsCollection

+
+
+
Parameters:
+
    +
  • train_containers (List[Container]) – List of objects with a .data attribute, which will be used +to initialize Sampler objects and then used to initialize a +batch queue of training data. The data can be a Sup3rX or +Sup3rDataset object.

  • +
  • val_containers (List[Container]) – List of objects with a .data attribute, which will be used +to initialize Sampler objects and then used to initialize a +batch queue of validation data. The data can be a Sup3rX or a +Sup3rDataset object.

  • +
  • batch_size (int) – Number of observations / samples in a batch

  • +
  • n_batches (int) – Number of batches in an epoch, this sets the iteration limit for +this object.

  • +
  • s_enhance (int) – Integer factor by which the spatial axes is to be enhanced.

  • +
  • t_enhance (int) – Integer factor by which the temporal axes is to be enhanced.

  • +
  • means (str | dict | None) – Usually a file path for loading / saving results, or None for +just calculating stats and not saving. Can also be a dict.

  • +
  • stds (str | dict | None) – Usually a file path for loading / saving results, or None for +just calculating stats and not saving. Can also be a dict.

  • +
  • queue_cap (int) – Maximum number of batches the batch queue can store.

  • +
  • transform_kwargs (Union[Dict, None]) – Dictionary of kwargs to be passed to self.transform. This method +performs smoothing / coarsening.

  • +
  • mode (str) – Loading mode. Default is ‘lazy’, which only loads data into memory +as batches are queued. ‘eager’ will load all data into memory right +away.

  • +
  • feature_sets (Optional[dict]) – Optional dictionary describing how the full set of features is +split between lr_only_features and hr_exo_features.

    +
    +
    featureslist | tuple

    List of full set of features to use for sampling. If no entry +is provided then all data_vars from data will be used.

    +
    +
    lr_only_featureslist | tuple

    List of feature names or patt*erns that should only be +included in the low-res training set and not the high-res +observations.

    +
    +
    hr_exo_featureslist | tuple

    List of feature names or patt*erns that should be included +in the high-resolution observation but not expected to be +output from the generative model. An example is high-res +topography that is to be injected mid-network.

    +
    +
    +
  • +
  • kwargs (dict) – Keyword arguments for parent class

  • +
  • sample_shape (tuple) – Size of arrays to sample from the contained data.

  • +
  • time_enhance_mode (str) – [constant, linear] +Method to enhance temporally when constructing subfilter. At every +temporal location, a low-res temporal data is subtracted from the +high-res temporal data predicted. constant will assume that the +low-res temporal data is constant between landmarks. linear will +linearly interpolate between landmarks to generate the low-res data +to remove from the high-res.

  • +
  • lower_models (Dict[int, Sup3rCondMom] | None) – Dictionary of models that predict lower moments. For example, if +this queue is part of a handler to estimate the 3rd moment +lower_models could include models that estimate the 1st and 2nd +moments. These lower moments can be required in higher order moment +calculations.

  • +
  • s_padding (int | None) – Width of spatial padding to predict only middle part. If None, no +padding is used

  • +
  • t_padding (int | None) – Width of temporal padding to predict only middle part. If None, no +padding is used

  • +
  • end_t_padding (bool | False) – Zero pad the end of temporal space. Ensures that loss is +calculated only if snapshot is surrounded by temporal landmarks. +False by default

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_enhancement_factors()

Make sure the enhancement factors evenly divide the sample_shape.

check_features()

Make sure all samplers have the same sets of features.

check_shared_attr(attr)

Check if all containers have the same value for attr.

enqueue_batch()

Build batch and send to queue.

enqueue_batches()

Callback function for queue thread.

get_batch()

Get batch from queue or directly from a Sampler through sample_batch.

get_container_index()

Get random container index based on weights

get_queue()

Return FIFO queue for storing batches.

get_random_container()

Get random container based on container weights

init_samplers(train_containers, ...)

Initialize samplers from given data containers.

log_queue_info()

Log info about queue size.

make_mask(high_res)

Make mask for output.

make_output(samples)

post_init_log([args_dict])

Log additional arguments after initialization.

post_proc(samples)

Returns normalized collection of samples / observations along with mask and target output for conditional moment estimation.

preflight()

Run checks before kicking off the queue.

sample_batch()

Get random sampler from collection and return a batch of samples from that sampler.

start()

Start the val data batch queue in addition to the train batch queue.

stop()

Stop the val data batch queue in addition to the train batch queue.

transform(samples[, smoothing, ...])

Coarsen high res data to get corresponding low res batch.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

container_weights

Get weights used to sample from different containers based on relative sizes

data

Return underlying data.

features

Get all features contained in data.

hr_shape

Shape of high resolution sample in a low-res / high-res pair.

lr_shape

Shape of low resolution sample in a low-res / high-res pair.

queue_shape

Shape of objects stored in the queue.

queue_thread

Get new queue thread.

running

Boolean to check whether to keep enqueueing batches.

shape

Get shape of underlying data.

+
+
+
+class Batch(low_res, high_res)#
+

Bases: tuple

+

Create new instance of Batch(low_res, high_res)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+ +
+
+class ConditionalBatch(low_res, high_res, output, mask)#
+

Bases: tuple

+

Create new instance of ConditionalBatch(low_res, high_res, output, mask)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+
+mask#
+

Alias for field number 3

+
+ +
+
+output#
+

Alias for field number 2

+
+ +
+ +
+
+SAMPLER#
+

alias of Sampler

+
+ +
+
+TRAIN_QUEUE#
+

alias of QueueMom2Sep

+
+ +
+
+VAL_QUEUE#
+

alias of QueueMom2Sep

+
+ +
+
+check_enhancement_factors()#
+

Make sure the enhancement factors evenly divide the sample_shape.

+
+ +
+
+check_features()#
+

Make sure all samplers have the same sets of features.

+
+ +
+
+check_shared_attr(attr)#
+

Check if all containers have the same value for attr. If they do +the collection effectively inherits those attributes.

+
+ +
+
+property container_weights#
+

Get weights used to sample from different containers based on +relative sizes

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+enqueue_batch()#
+

Build batch and send to queue.

+
+ +
+
+enqueue_batches() None#
+

Callback function for queue thread. While training, the queue is +checked for empty spots and filled. In the training thread, batches are +removed from the queue.

+
+ +
+
+property features#
+

Get all features contained in data.

+
+ +
+
+get_batch() Batch#
+

Get batch from queue or directly from a Sampler through +sample_batch.

+
+ +
+
+get_container_index()#
+

Get random container index based on weights

+
+ +
+
+get_queue()#
+

Return FIFO queue for storing batches.

+
+ +
+
+get_random_container()#
+

Get random container based on container weights

+
+ +
+
+property hr_shape#
+

Shape of high resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+init_samplers(train_containers, val_containers, sample_shape, feature_sets, batch_size, sampler_kwargs)#
+

Initialize samplers from given data containers.

+
+ +
+
+log_queue_info()#
+

Log info about queue size.

+
+ +
+
+property lr_shape#
+

Shape of low resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+make_mask(high_res)#
+

Make mask for output. This is used to ensure consistency when +training conditional moments.

+
+

Note

+

Consider the case of learning E(HR|LR) where HR is the high_res and LR +is the low_res. In theory, the conditional moment estimation works if +the full LR is passed as input and predicts the full HR. In practice, +only the LR data that overlaps and surrounds the HR data is useful, ie +E(HR|LR) = E(HR|LR_nei) where LR_nei is the LR data that surrounds the +HR data. Physically, this is equivalent to saying that data far away +from a region of interest does not matter. This allows learning the +conditional moments on spatial and temporal chunks only if one +restricts the high_res output as being overlapped and surrounded by the +input low_res. The role of the mask is to ensure that the input +low_res always surrounds the output high_res.

+
+
+
Parameters:
+

high_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

+
+
Returns:
+

mask (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

+
+
+
+ +
+
+make_output(samples)#
+
+
Returns:
+

HR**2 (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features) +HR is high-res

+
+
+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+post_proc(samples)#
+

Returns normalized collection of samples / observations along with +mask and target output for conditional moment estimation. Performs +coarsening on high-res data if Collection consists of +Sampler objects and not DualSampler objects

+
+
Returns:
+

namedtuple – Named tuple with low_res, high_res, mask, and output +attributes

+
+
+
+ +
+
+preflight()#
+

Run checks before kicking off the queue.

+
+ +
+
+property queue_shape#
+

Shape of objects stored in the queue.

+
+ +
+
+property queue_thread#
+

Get new queue thread.

+
+ +
+
+property running#
+

Boolean to check whether to keep enqueueing batches.

+
+ +
+
+sample_batch()#
+

Get random sampler from collection and return a batch of samples +from that sampler.

+

Notes

+

These samples are wrapped in an np.asarray call, so they have been +loaded into memory.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+start()#
+

Start the val data batch queue in addition to the train batch +queue.

+
+ +
+
+stop()#
+

Stop the val data batch queue in addition to the train batch +queue.

+
+ +
+
+transform(samples, smoothing=None, smoothing_ignore=None, temporal_coarsening_method='subsample')#
+

Coarsen high res data to get corresponding low res batch.

+
+
Parameters:
+
    +
  • samples (Union[np.ndarray, da.core.Array]) – High resolution batch of samples. +4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • smoothing (float | None) – Standard deviation to use for gaussian filtering of the coarse +data. This can be tuned by matching the kinetic energy of a low +resolution simulation with the kinetic energy of a coarsened and +smoothed high resolution simulation. If None no smoothing is +performed.

  • +
  • smoothing_ignore (list | None) – List of features to ignore for the smoothing filter. None will +smooth all features if smoothing kwarg is not None

  • +
  • temporal_coarsening_method (str) – Method to use for temporal coarsening. Can be subsample, average, +min, max, or total

  • +
+
+
Returns:
+

    +
  • low_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • high_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
+

+
+
+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2SepSF.html b/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2SepSF.html new file mode 100644 index 000000000..07d58c158 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2SepSF.html @@ -0,0 +1,1456 @@ + + + + + + + + + + + sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2SepSF — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2SepSF

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2SepSF#

+
+
+class BatchHandlerMom2SepSF(train_containers, *, val_containers=None, sample_shape=None, batch_size=16, n_batches=64, s_enhance=1, t_enhance=1, means=None, stds=None, queue_cap=None, transform_kwargs=None, mode='lazy', feature_sets=None, time_enhance_mode='constant', lower_models=None, s_padding=0, t_padding=0, end_t_padding=False, **kwargs)#
+

Bases: QueueMom2SepSF

+

BatchHandler object built from two lists of +Container objects, one with +training data and one with validation data. These lists will be used +to initialize lists of class:Sampler objects that will then be used +to build batches at run time.

+

Notes

+

These lists of containers can contain data from the same underlying +data source (e.g. CONUS WTK) (e.g. initialize train / val containers +with different time period and / or regions, or they can be used to +sample from completely different data sources (e.g. train on CONUS WTK +while validating on Canada WTK).

+
+

See also

+

Sampler, AbstractBatchQueue, StatsCollection

+
+
+
Parameters:
+
    +
  • train_containers (List[Container]) – List of objects with a .data attribute, which will be used +to initialize Sampler objects and then used to initialize a +batch queue of training data. The data can be a Sup3rX or +Sup3rDataset object.

  • +
  • val_containers (List[Container]) – List of objects with a .data attribute, which will be used +to initialize Sampler objects and then used to initialize a +batch queue of validation data. The data can be a Sup3rX or a +Sup3rDataset object.

  • +
  • batch_size (int) – Number of observations / samples in a batch

  • +
  • n_batches (int) – Number of batches in an epoch, this sets the iteration limit for +this object.

  • +
  • s_enhance (int) – Integer factor by which the spatial axes is to be enhanced.

  • +
  • t_enhance (int) – Integer factor by which the temporal axes is to be enhanced.

  • +
  • means (str | dict | None) – Usually a file path for loading / saving results, or None for +just calculating stats and not saving. Can also be a dict.

  • +
  • stds (str | dict | None) – Usually a file path for loading / saving results, or None for +just calculating stats and not saving. Can also be a dict.

  • +
  • queue_cap (int) – Maximum number of batches the batch queue can store.

  • +
  • transform_kwargs (Union[Dict, None]) – Dictionary of kwargs to be passed to self.transform. This method +performs smoothing / coarsening.

  • +
  • mode (str) – Loading mode. Default is ‘lazy’, which only loads data into memory +as batches are queued. ‘eager’ will load all data into memory right +away.

  • +
  • feature_sets (Optional[dict]) – Optional dictionary describing how the full set of features is +split between lr_only_features and hr_exo_features.

    +
    +
    featureslist | tuple

    List of full set of features to use for sampling. If no entry +is provided then all data_vars from data will be used.

    +
    +
    lr_only_featureslist | tuple

    List of feature names or patt*erns that should only be +included in the low-res training set and not the high-res +observations.

    +
    +
    hr_exo_featureslist | tuple

    List of feature names or patt*erns that should be included +in the high-resolution observation but not expected to be +output from the generative model. An example is high-res +topography that is to be injected mid-network.

    +
    +
    +
  • +
  • kwargs (dict) – Keyword arguments for parent class

  • +
  • sample_shape (tuple) – Size of arrays to sample from the contained data.

  • +
  • time_enhance_mode (str) – [constant, linear] +Method to enhance temporally when constructing subfilter. At every +temporal location, a low-res temporal data is subtracted from the +high-res temporal data predicted. constant will assume that the +low-res temporal data is constant between landmarks. linear will +linearly interpolate between landmarks to generate the low-res data +to remove from the high-res.

  • +
  • lower_models (Dict[int, Sup3rCondMom] | None) – Dictionary of models that predict lower moments. For example, if +this queue is part of a handler to estimate the 3rd moment +lower_models could include models that estimate the 1st and 2nd +moments. These lower moments can be required in higher order moment +calculations.

  • +
  • s_padding (int | None) – Width of spatial padding to predict only middle part. If None, no +padding is used

  • +
  • t_padding (int | None) – Width of temporal padding to predict only middle part. If None, no +padding is used

  • +
  • end_t_padding (bool | False) – Zero pad the end of temporal space. Ensures that loss is +calculated only if snapshot is surrounded by temporal landmarks. +False by default

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_enhancement_factors()

Make sure the enhancement factors evenly divide the sample_shape.

check_features()

Make sure all samplers have the same sets of features.

check_shared_attr(attr)

Check if all containers have the same value for attr.

enqueue_batch()

Build batch and send to queue.

enqueue_batches()

Callback function for queue thread.

get_batch()

Get batch from queue or directly from a Sampler through sample_batch.

get_container_index()

Get random container index based on weights

get_queue()

Return FIFO queue for storing batches.

get_random_container()

Get random container based on container weights

init_samplers(train_containers, ...)

Initialize samplers from given data containers.

log_queue_info()

Log info about queue size.

make_mask(high_res)

Make mask for output.

make_output(samples)

post_init_log([args_dict])

Log additional arguments after initialization.

post_proc(samples)

Returns normalized collection of samples / observations along with mask and target output for conditional moment estimation.

preflight()

Run checks before kicking off the queue.

sample_batch()

Get random sampler from collection and return a batch of samples from that sampler.

start()

Start the val data batch queue in addition to the train batch queue.

stop()

Stop the val data batch queue in addition to the train batch queue.

transform(samples[, smoothing, ...])

Coarsen high res data to get corresponding low res batch.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

container_weights

Get weights used to sample from different containers based on relative sizes

data

Return underlying data.

features

Get all features contained in data.

hr_shape

Shape of high resolution sample in a low-res / high-res pair.

lr_shape

Shape of low resolution sample in a low-res / high-res pair.

queue_shape

Shape of objects stored in the queue.

queue_thread

Get new queue thread.

running

Boolean to check whether to keep enqueueing batches.

shape

Get shape of underlying data.

+
+
+
+class Batch(low_res, high_res)#
+

Bases: tuple

+

Create new instance of Batch(low_res, high_res)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+ +
+
+class ConditionalBatch(low_res, high_res, output, mask)#
+

Bases: tuple

+

Create new instance of ConditionalBatch(low_res, high_res, output, mask)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+
+mask#
+

Alias for field number 3

+
+ +
+
+output#
+

Alias for field number 2

+
+ +
+ +
+
+SAMPLER#
+

alias of Sampler

+
+ +
+
+TRAIN_QUEUE#
+

alias of QueueMom2SepSF

+
+ +
+
+VAL_QUEUE#
+

alias of QueueMom2SepSF

+
+ +
+
+check_enhancement_factors()#
+

Make sure the enhancement factors evenly divide the sample_shape.

+
+ +
+
+check_features()#
+

Make sure all samplers have the same sets of features.

+
+ +
+
+check_shared_attr(attr)#
+

Check if all containers have the same value for attr. If they do +the collection effectively inherits those attributes.

+
+ +
+
+property container_weights#
+

Get weights used to sample from different containers based on +relative sizes

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+enqueue_batch()#
+

Build batch and send to queue.

+
+ +
+
+enqueue_batches() None#
+

Callback function for queue thread. While training, the queue is +checked for empty spots and filled. In the training thread, batches are +removed from the queue.

+
+ +
+
+property features#
+

Get all features contained in data.

+
+ +
+
+get_batch() Batch#
+

Get batch from queue or directly from a Sampler through +sample_batch.

+
+ +
+
+get_container_index()#
+

Get random container index based on weights

+
+ +
+
+get_queue()#
+

Return FIFO queue for storing batches.

+
+ +
+
+get_random_container()#
+

Get random container based on container weights

+
+ +
+
+property hr_shape#
+

Shape of high resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+init_samplers(train_containers, val_containers, sample_shape, feature_sets, batch_size, sampler_kwargs)#
+

Initialize samplers from given data containers.

+
+ +
+
+log_queue_info()#
+

Log info about queue size.

+
+ +
+
+property lr_shape#
+

Shape of low resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+make_mask(high_res)#
+

Make mask for output. This is used to ensure consistency when +training conditional moments.

+
+

Note

+

Consider the case of learning E(HR|LR) where HR is the high_res and LR +is the low_res. In theory, the conditional moment estimation works if +the full LR is passed as input and predicts the full HR. In practice, +only the LR data that overlaps and surrounds the HR data is useful, ie +E(HR|LR) = E(HR|LR_nei) where LR_nei is the LR data that surrounds the +HR data. Physically, this is equivalent to saying that data far away +from a region of interest does not matter. This allows learning the +conditional moments on spatial and temporal chunks only if one +restricts the high_res output as being overlapped and surrounded by the +input low_res. The role of the mask is to ensure that the input +low_res always surrounds the output high_res.

+
+
+
Parameters:
+

high_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

+
+
Returns:
+

mask (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

+
+
+
+ +
+
+make_output(samples)#
+
+
Returns:
+

SF**2 (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features) +SF is subfilter, HR is high-res and LR is low-res +SF = HR - LR

+
+
+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+post_proc(samples)#
+

Returns normalized collection of samples / observations along with +mask and target output for conditional moment estimation. Performs +coarsening on high-res data if Collection consists of +Sampler objects and not DualSampler objects

+
+
Returns:
+

namedtuple – Named tuple with low_res, high_res, mask, and output +attributes

+
+
+
+ +
+
+preflight()#
+

Run checks before kicking off the queue.

+
+ +
+
+property queue_shape#
+

Shape of objects stored in the queue.

+
+ +
+
+property queue_thread#
+

Get new queue thread.

+
+ +
+
+property running#
+

Boolean to check whether to keep enqueueing batches.

+
+ +
+
+sample_batch()#
+

Get random sampler from collection and return a batch of samples +from that sampler.

+

Notes

+

These samples are wrapped in an np.asarray call, so they have been +loaded into memory.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+start()#
+

Start the val data batch queue in addition to the train batch +queue.

+
+ +
+
+stop()#
+

Stop the val data batch queue in addition to the train batch +queue.

+
+ +
+
+transform(samples, smoothing=None, smoothing_ignore=None, temporal_coarsening_method='subsample')#
+

Coarsen high res data to get corresponding low res batch.

+
+
Parameters:
+
    +
  • samples (Union[np.ndarray, da.core.Array]) – High resolution batch of samples. +4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • smoothing (float | None) – Standard deviation to use for gaussian filtering of the coarse +data. This can be tuned by matching the kinetic energy of a low +resolution simulation with the kinetic energy of a coarsened and +smoothed high resolution simulation. If None no smoothing is +performed.

  • +
  • smoothing_ignore (list | None) – List of features to ignore for the smoothing filter. None will +smooth all features if smoothing kwarg is not None

  • +
  • temporal_coarsening_method (str) – Method to use for temporal coarsening. Can be subsample, average, +min, max, or total

  • +
+
+
Returns:
+

    +
  • low_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • high_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
+

+
+
+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_handlers.factory.DualBatchHandler.html b/_autosummary/sup3r.preprocessing.batch_handlers.factory.DualBatchHandler.html new file mode 100644 index 000000000..97a6d1d49 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_handlers.factory.DualBatchHandler.html @@ -0,0 +1,1286 @@ + + + + + + + + + + + sup3r.preprocessing.batch_handlers.factory.DualBatchHandler — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.batch_handlers.factory.DualBatchHandler#

+
+
+class DualBatchHandler(train_containers, *, val_containers=None, sample_shape=None, batch_size=16, n_batches=64, s_enhance=1, t_enhance=1, means=None, stds=None, queue_cap=None, transform_kwargs=None, mode='lazy', feature_sets=None, **kwargs)#
+

Bases: DualBatchQueue

+

BatchHandler object built from two lists of +Container objects, one with +training data and one with validation data. These lists will be used +to initialize lists of class:Sampler objects that will then be used +to build batches at run time.

+

Notes

+

These lists of containers can contain data from the same underlying +data source (e.g. CONUS WTK) (e.g. initialize train / val containers +with different time period and / or regions, or they can be used to +sample from completely different data sources (e.g. train on CONUS WTK +while validating on Canada WTK).

+
+

See also

+

Sampler, AbstractBatchQueue, StatsCollection

+
+
+
Parameters:
+
    +
  • train_containers (List[Container]) – List of objects with a .data attribute, which will be used +to initialize Sampler objects and then used to initialize a +batch queue of training data. The data can be a Sup3rX or +Sup3rDataset object.

  • +
  • val_containers (List[Container]) – List of objects with a .data attribute, which will be used +to initialize Sampler objects and then used to initialize a +batch queue of validation data. The data can be a Sup3rX or a +Sup3rDataset object.

  • +
  • batch_size (int) – Number of observations / samples in a batch

  • +
  • n_batches (int) – Number of batches in an epoch, this sets the iteration limit for +this object.

  • +
  • s_enhance (int) – Integer factor by which the spatial axes is to be enhanced.

  • +
  • t_enhance (int) – Integer factor by which the temporal axes is to be enhanced.

  • +
  • means (str | dict | None) – Usually a file path for loading / saving results, or None for +just calculating stats and not saving. Can also be a dict.

  • +
  • stds (str | dict | None) – Usually a file path for loading / saving results, or None for +just calculating stats and not saving. Can also be a dict.

  • +
  • queue_cap (int) – Maximum number of batches the batch queue can store.

  • +
  • transform_kwargs (Union[Dict, None]) – Dictionary of kwargs to be passed to self.transform. This method +performs smoothing / coarsening.

  • +
  • mode (str) – Loading mode. Default is ‘lazy’, which only loads data into memory +as batches are queued. ‘eager’ will load all data into memory right +away.

  • +
  • feature_sets (Optional[dict]) – Optional dictionary describing how the full set of features is +split between lr_only_features and hr_exo_features.

    +
    +
    lr_only_featureslist | tuple

    List of feature names or patt*erns that should only be +included in the low-res training set and not the high-res +observations.

    +
    +
    hr_exo_featureslist | tuple

    List of feature names or patt*erns that should be included +in the high-resolution observation but not expected to be +output from the generative model. An example is high-res +topography that is to be injected mid-network.

    +
    +
    +
  • +
  • kwargs (dict) – Additional keyword arguments for BatchQueue and / or Samplers. +This can vary depending on the type of BatchQueue / Sampler +given to the Factory. For example, to build a +BatchHandlerDC +object (data-centric batch handler) we use a queue and sampler +which takes spatial and temporal weight / bin arguments used to +determine how to weigh spatiotemporal regions when sampling. +Using +ConditionalBatchQueue +will result in arguments for computing moments from batches and +how to pad batch data to enable these calculations.

  • +
  • sample_shape (tuple) – Size of arrays to sample from the high-res data. The sample shape +for the low-res sampler will be determined from the enhancement +factors.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_enhancement_factors()

Make sure each DualSampler has the same enhancment factors and they match those provided to the BatchQueue.

check_features()

Make sure all samplers have the same sets of features.

check_shared_attr(attr)

Check if all containers have the same value for attr.

enqueue_batch()

Build batch and send to queue.

enqueue_batches()

Callback function for queue thread.

get_batch()

Get batch from queue or directly from a Sampler through sample_batch.

get_container_index()

Get random container index based on weights

get_queue()

Return FIFO queue for storing batches.

get_random_container()

Get random container based on container weights

init_samplers(train_containers, ...)

Initialize samplers from given data containers.

log_queue_info()

Log info about queue size.

post_init_log([args_dict])

Log additional arguments after initialization.

post_proc(samples)

Performs some post proc on dequeued samples before sending out for training.

preflight()

Run checks before kicking off the queue.

sample_batch()

Get random sampler from collection and return a batch of samples from that sampler.

start()

Start the val data batch queue in addition to the train batch queue.

stop()

Stop the val data batch queue in addition to the train batch queue.

transform(samples[, smoothing, smoothing_ignore])

Perform smoothing if requested.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

container_weights

Get weights used to sample from different containers based on relative sizes

data

Return underlying data.

features

Get all features contained in data.

hr_shape

Shape of high resolution sample in a low-res / high-res pair.

lr_shape

Shape of low resolution sample in a low-res / high-res pair.

queue_shape

Shape of objects stored in the queue.

queue_thread

Get new queue thread.

running

Boolean to check whether to keep enqueueing batches.

shape

Get shape of underlying data.

+
+
+
+class Batch(low_res, high_res)#
+

Bases: tuple

+

Create new instance of Batch(low_res, high_res)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+ +
+
+SAMPLER#
+

alias of DualSampler

+
+ +
+
+TRAIN_QUEUE#
+

alias of DualBatchQueue

+
+ +
+
+VAL_QUEUE#
+

alias of DualBatchQueue

+
+ +
+
+check_enhancement_factors()#
+

Make sure each DualSampler has the same enhancment factors and they +match those provided to the BatchQueue.

+
+ +
+
+check_features()#
+

Make sure all samplers have the same sets of features.

+
+ +
+
+check_shared_attr(attr)#
+

Check if all containers have the same value for attr. If they do +the collection effectively inherits those attributes.

+
+ +
+
+property container_weights#
+

Get weights used to sample from different containers based on +relative sizes

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+enqueue_batch()#
+

Build batch and send to queue.

+
+ +
+
+enqueue_batches() None#
+

Callback function for queue thread. While training, the queue is +checked for empty spots and filled. In the training thread, batches are +removed from the queue.

+
+ +
+
+property features#
+

Get all features contained in data.

+
+ +
+
+get_batch() Batch#
+

Get batch from queue or directly from a Sampler through +sample_batch.

+
+ +
+
+get_container_index()#
+

Get random container index based on weights

+
+ +
+
+get_queue()#
+

Return FIFO queue for storing batches.

+
+ +
+
+get_random_container()#
+

Get random container based on container weights

+
+ +
+
+property hr_shape#
+

Shape of high resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+init_samplers(train_containers, val_containers, sample_shape, feature_sets, batch_size, sampler_kwargs)#
+

Initialize samplers from given data containers.

+
+ +
+
+log_queue_info()#
+

Log info about queue size.

+
+ +
+
+property lr_shape#
+

Shape of low resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+post_proc(samples) Batch#
+

Performs some post proc on dequeued samples before sending out for +training. Post processing can include coarsening on high-res data (if +Collection consists of Sampler objects and not +DualSampler objects), smoothing, etc

+
+
Returns:
+

Batch (namedtuple) – namedtuple with low_res and high_res attributes

+
+
+
+ +
+
+preflight()#
+

Run checks before kicking off the queue.

+
+ +
+
+property queue_shape#
+

Shape of objects stored in the queue.

+
+ +
+
+property queue_thread#
+

Get new queue thread.

+
+ +
+
+property running#
+

Boolean to check whether to keep enqueueing batches.

+
+ +
+
+sample_batch()#
+

Get random sampler from collection and return a batch of samples +from that sampler.

+

Notes

+

These samples are wrapped in an np.asarray call, so they have been +loaded into memory.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+start()#
+

Start the val data batch queue in addition to the train batch +queue.

+
+ +
+
+stop()#
+

Stop the val data batch queue in addition to the train batch +queue.

+
+ +
+
+transform(samples, smoothing=None, smoothing_ignore=None)#
+

Perform smoothing if requested.

+
+

Note

+

This does not include temporal or spatial coarsening like +SingleBatchQueue

+
+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_handlers.factory.html b/_autosummary/sup3r.preprocessing.batch_handlers.factory.html new file mode 100644 index 000000000..b5b23c579 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_handlers.factory.html @@ -0,0 +1,773 @@ + + + + + + + + + + + sup3r.preprocessing.batch_handlers.factory — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.batch_handlers.factory

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.batch_handlers.factory#

+

BatchHandler factory. Builds BatchHandler objects from batch queues and +samplers.

+

Functions

+
+ + + + + +

BatchHandlerFactory(MainQueueClass, SamplerClass)

BatchHandler factory.

+
+

Classes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

BatchHandler(train_containers, *[, ...])

BatchHandler object built from two lists of Container objects, one with training data and one with validation data.

BatchHandlerCC(train_containers, *[, ...])

BatchHandler object built from two lists of Container objects, one with training data and one with validation data.

BatchHandlerMom1(train_containers, *[, ...])

BatchHandler object built from two lists of Container objects, one with training data and one with validation data.

BatchHandlerMom1SF(train_containers, *[, ...])

BatchHandler object built from two lists of Container objects, one with training data and one with validation data.

BatchHandlerMom2(train_containers, *[, ...])

BatchHandler object built from two lists of Container objects, one with training data and one with validation data.

BatchHandlerMom2SF

alias of BatchHandler

BatchHandlerMom2Sep(train_containers, *[, ...])

BatchHandler object built from two lists of Container objects, one with training data and one with validation data.

BatchHandlerMom2SepSF(train_containers, *[, ...])

BatchHandler object built from two lists of Container objects, one with training data and one with validation data.

DualBatchHandler(train_containers, *[, ...])

BatchHandler object built from two lists of Container objects, one with training data and one with validation data.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_handlers.html b/_autosummary/sup3r.preprocessing.batch_handlers.html new file mode 100644 index 000000000..9513f2690 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_handlers.html @@ -0,0 +1,741 @@ + + + + + + + + + + + sup3r.preprocessing.batch_handlers — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.batch_handlers

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.batch_handlers#

+

Composite objects built from batch queues and samplers.

+
+ + + + + + + + +

dc

Data centric batch handlers.

factory

BatchHandler factory.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_queues.abstract.AbstractBatchQueue.html b/_autosummary/sup3r.preprocessing.batch_queues.abstract.AbstractBatchQueue.html new file mode 100644 index 000000000..0e90a8bd0 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_queues.abstract.AbstractBatchQueue.html @@ -0,0 +1,1203 @@ + + + + + + + + + + + sup3r.preprocessing.batch_queues.abstract.AbstractBatchQueue — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.batch_queues.abstract.AbstractBatchQueue#

+
+
+class AbstractBatchQueue(samplers: List[Sampler] | List[DualSampler], batch_size: int = 16, n_batches: int = 64, s_enhance: int = 1, t_enhance: int = 1, queue_cap: int | None = None, transform_kwargs: dict | None = None, max_workers: int = 1, thread_name: str = 'training', mode: str = 'lazy')[source]#
+

Bases: Collection, ABC

+

Abstract BatchQueue class. This class gets batches from a dataset +generator and maintains a queue of batches in a dedicated thread so the +training routine can proceed as soon as batches are available.

+
+
Parameters:
+
    +
  • samplers (List[Sampler]) – List of Sampler instances

  • +
  • batch_size (int) – Number of observations / samples in a batch

  • +
  • n_batches (int) – Number of batches in an epoch, this sets the iteration limit for +this object.

  • +
  • s_enhance (int) – Integer factor by which the spatial axes is to be enhanced.

  • +
  • t_enhance (int) – Integer factor by which the temporal axes is to be enhanced.

  • +
  • queue_cap (int) – Maximum number of batches the batch queue can store.

  • +
  • transform_kwargs (Union[Dict, None]) – Dictionary of kwargs to be passed to self.transform. This method +performs smoothing / coarsening.

  • +
  • max_workers (int) – Number of workers / threads to use for getting batches to fill +queue

  • +
  • thread_name (str) – Name of the queue thread. Default is ‘training’. Used to set name +to ‘validation’ for BatchHandler, which has a training and +validation queue.

  • +
  • mode (str) – Loading mode. Default is ‘lazy’, which only loads data into memory +as batches are queued. ‘eager’ will load all data into memory right +away.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_enhancement_factors()

Make sure the enhancement factors evenly divide the sample_shape.

check_features()

Make sure all samplers have the same sets of features.

check_shared_attr(attr)

Check if all containers have the same value for attr.

enqueue_batch()

Build batch and send to queue.

enqueue_batches()

Callback function for queue thread.

get_batch()

Get batch from queue or directly from a Sampler through sample_batch.

get_container_index()

Get random container index based on weights

get_queue()

Return FIFO queue for storing batches.

get_random_container()

Get random container based on container weights

log_queue_info()

Log info about queue size.

post_init_log([args_dict])

Log additional arguments after initialization.

post_proc(samples)

Performs some post proc on dequeued samples before sending out for training.

preflight()

Run checks before kicking off the queue.

sample_batch()

Get random sampler from collection and return a batch of samples from that sampler.

start()

Start thread to keep sample queue full for batches.

stop()

Stop loading batches.

transform(samples, **kwargs)

Apply transform on batch samples.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

container_weights

Get weights used to sample from different containers based on relative sizes

data

Return underlying data.

features

Get all features contained in data.

hr_shape

Shape of high resolution sample in a low-res / high-res pair.

lr_shape

Shape of low resolution sample in a low-res / high-res pair.

queue_shape

Shape of objects stored in the queue. e.g. for single dataset queues this is (batch_size, *sample_shape, len(features)). For dual dataset queues this is [(batch_size, *lr_shape), (batch_size, *hr_shape)].

queue_thread

Get new queue thread.

running

Boolean to check whether to keep enqueueing batches.

shape

Get shape of underlying data.

+
+
+
+class Batch(low_res, high_res)#
+

Bases: tuple

+

Create new instance of Batch(low_res, high_res)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+ +
+
+abstract property queue_shape#
+

Shape of objects stored in the queue. e.g. for single dataset queues +this is (batch_size, *sample_shape, len(features)). For dual dataset +queues this is [(batch_size, *lr_shape), (batch_size, *hr_shape)]

+
+ +
+
+get_queue()[source]#
+

Return FIFO queue for storing batches.

+
+ +
+
+preflight()[source]#
+

Run checks before kicking off the queue.

+
+ +
+
+property queue_thread#
+

Get new queue thread.

+
+ +
+
+check_features()[source]#
+

Make sure all samplers have the same sets of features.

+
+ +
+
+check_enhancement_factors()[source]#
+

Make sure the enhancement factors evenly divide the sample_shape.

+
+ +
+
+abstract transform(samples, **kwargs)[source]#
+

Apply transform on batch samples. This can include smoothing / +coarsening depending on the type of queue. e.g. coarsening could be +included for a single dataset queue where low res samples are coarsened +high res samples. For a dual dataset queue this will just include +smoothing.

+
+ +
+
+post_proc(samples) Batch[source]#
+

Performs some post proc on dequeued samples before sending out for +training. Post processing can include coarsening on high-res data (if +Collection consists of Sampler objects and not +DualSampler objects), smoothing, etc

+
+
Returns:
+

Batch (namedtuple) – namedtuple with low_res and high_res attributes

+
+
+
+ +
+
+start() None[source]#
+

Start thread to keep sample queue full for batches.

+
+ +
+
+stop() None[source]#
+

Stop loading batches.

+
+ +
+
+get_batch() Batch[source]#
+

Get batch from queue or directly from a Sampler through +sample_batch.

+
+ +
+
+property running#
+

Boolean to check whether to keep enqueueing batches.

+
+ +
+
+enqueue_batches() None[source]#
+

Callback function for queue thread. While training, the queue is +checked for empty spots and filled. In the training thread, batches are +removed from the queue.

+
+ +
+
+get_container_index()[source]#
+

Get random container index based on weights

+
+ +
+
+get_random_container()[source]#
+

Get random container based on container weights

+
+ +
+
+sample_batch()[source]#
+

Get random sampler from collection and return a batch of samples +from that sampler.

+

Notes

+

These samples are wrapped in an np.asarray call, so they have been +loaded into memory.

+
+ +
+
+log_queue_info()[source]#
+

Log info about queue size.

+
+ +
+
+enqueue_batch()[source]#
+

Build batch and send to queue.

+
+ +
+
+property lr_shape#
+

Shape of low resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+property hr_shape#
+

Shape of high resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+check_shared_attr(attr)#
+

Check if all containers have the same value for attr. If they do +the collection effectively inherits those attributes.

+
+ +
+
+property container_weights#
+

Get weights used to sample from different containers based on +relative sizes

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+property features#
+

Get all features contained in data.

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_queues.abstract.html b/_autosummary/sup3r.preprocessing.batch_queues.abstract.html new file mode 100644 index 000000000..9e8e46da9 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_queues.abstract.html @@ -0,0 +1,748 @@ + + + + + + + + + + + sup3r.preprocessing.batch_queues.abstract — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.batch_queues.abstract

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.batch_queues.abstract#

+

Abstract batch queue class used for multi-threaded batching / training.

+
+
TODO:
    +
  1. Figure out apparent “blocking” issue with threaded enqueue batches. +max_workers=1 is the fastest?

  2. +
+

(2) Setup distributed data handling so this can work with data distributed +over multiple nodes.

+
+
+

Classes

+
+ + + + + +

AbstractBatchQueue(samplers[, batch_size, ...])

Abstract BatchQueue class.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_queues.base.SingleBatchQueue.html b/_autosummary/sup3r.preprocessing.batch_queues.base.SingleBatchQueue.html new file mode 100644 index 000000000..0d3a0af9c --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_queues.base.SingleBatchQueue.html @@ -0,0 +1,1234 @@ + + + + + + + + + + + sup3r.preprocessing.batch_queues.base.SingleBatchQueue — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.batch_queues.base.SingleBatchQueue#

+
+
+class SingleBatchQueue(samplers: List[Sampler] | List[DualSampler], batch_size: int = 16, n_batches: int = 64, s_enhance: int = 1, t_enhance: int = 1, queue_cap: int | None = None, transform_kwargs: dict | None = None, max_workers: int = 1, thread_name: str = 'training', mode: str = 'lazy')[source]#
+

Bases: AbstractBatchQueue

+

Base BatchQueue class for single dataset containers

+
+

Note

+

Here we use len(self.features) for the last dimension of samples, since +samples in SingleBatchQueue queues are coarsened to produce +low-res samples, and then the lr_only_features are removed with +hr_features_ind. In contrast, for samples in DualBatchQueue +queues there are low / high res pairs and the high-res only stores the +hr_features

+
+
+
Parameters:
+
    +
  • samplers (List[Sampler]) – List of Sampler instances

  • +
  • batch_size (int) – Number of observations / samples in a batch

  • +
  • n_batches (int) – Number of batches in an epoch, this sets the iteration limit for +this object.

  • +
  • s_enhance (int) – Integer factor by which the spatial axes is to be enhanced.

  • +
  • t_enhance (int) – Integer factor by which the temporal axes is to be enhanced.

  • +
  • queue_cap (int) – Maximum number of batches the batch queue can store.

  • +
  • transform_kwargs (Union[Dict, None]) – Dictionary of kwargs to be passed to self.transform. This method +performs smoothing / coarsening.

  • +
  • max_workers (int) – Number of workers / threads to use for getting batches to fill +queue

  • +
  • thread_name (str) – Name of the queue thread. Default is ‘training’. Used to set name +to ‘validation’ for BatchHandler, which has a training and +validation queue.

  • +
  • mode (str) – Loading mode. Default is ‘lazy’, which only loads data into memory +as batches are queued. ‘eager’ will load all data into memory right +away.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_enhancement_factors()

Make sure the enhancement factors evenly divide the sample_shape.

check_features()

Make sure all samplers have the same sets of features.

check_shared_attr(attr)

Check if all containers have the same value for attr.

enqueue_batch()

Build batch and send to queue.

enqueue_batches()

Callback function for queue thread.

get_batch()

Get batch from queue or directly from a Sampler through sample_batch.

get_container_index()

Get random container index based on weights

get_queue()

Return FIFO queue for storing batches.

get_random_container()

Get random container based on container weights

log_queue_info()

Log info about queue size.

post_init_log([args_dict])

Log additional arguments after initialization.

post_proc(samples)

Performs some post proc on dequeued samples before sending out for training.

preflight()

Run checks before kicking off the queue.

sample_batch()

Get random sampler from collection and return a batch of samples from that sampler.

start()

Start thread to keep sample queue full for batches.

stop()

Stop loading batches.

transform(samples[, smoothing, ...])

Coarsen high res data to get corresponding low res batch.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

container_weights

Get weights used to sample from different containers based on relative sizes

data

Return underlying data.

features

Get all features contained in data.

hr_shape

Shape of high resolution sample in a low-res / high-res pair.

lr_shape

Shape of low resolution sample in a low-res / high-res pair.

queue_shape

Shape of objects stored in the queue.

queue_thread

Get new queue thread.

running

Boolean to check whether to keep enqueueing batches.

shape

Get shape of underlying data.

+
+
+
+property queue_shape#
+

Shape of objects stored in the queue.

+
+ +
+
+transform(samples, smoothing=None, smoothing_ignore=None, temporal_coarsening_method='subsample')[source]#
+

Coarsen high res data to get corresponding low res batch.

+
+
Parameters:
+
    +
  • samples (Union[np.ndarray, da.core.Array]) – High resolution batch of samples. +4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • smoothing (float | None) – Standard deviation to use for gaussian filtering of the coarse +data. This can be tuned by matching the kinetic energy of a low +resolution simulation with the kinetic energy of a coarsened and +smoothed high resolution simulation. If None no smoothing is +performed.

  • +
  • smoothing_ignore (list | None) – List of features to ignore for the smoothing filter. None will +smooth all features if smoothing kwarg is not None

  • +
  • temporal_coarsening_method (str) – Method to use for temporal coarsening. Can be subsample, average, +min, max, or total

  • +
+
+
Returns:
+

    +
  • low_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • high_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
+

+
+
+
+ +
+
+class Batch(low_res, high_res)#
+

Bases: tuple

+

Create new instance of Batch(low_res, high_res)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+ +
+
+check_enhancement_factors()#
+

Make sure the enhancement factors evenly divide the sample_shape.

+
+ +
+
+check_features()#
+

Make sure all samplers have the same sets of features.

+
+ +
+
+check_shared_attr(attr)#
+

Check if all containers have the same value for attr. If they do +the collection effectively inherits those attributes.

+
+ +
+
+property container_weights#
+

Get weights used to sample from different containers based on +relative sizes

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+enqueue_batch()#
+

Build batch and send to queue.

+
+ +
+
+enqueue_batches() None#
+

Callback function for queue thread. While training, the queue is +checked for empty spots and filled. In the training thread, batches are +removed from the queue.

+
+ +
+
+property features#
+

Get all features contained in data.

+
+ +
+
+get_batch() Batch#
+

Get batch from queue or directly from a Sampler through +sample_batch.

+
+ +
+
+get_container_index()#
+

Get random container index based on weights

+
+ +
+
+get_queue()#
+

Return FIFO queue for storing batches.

+
+ +
+
+get_random_container()#
+

Get random container based on container weights

+
+ +
+
+property hr_shape#
+

Shape of high resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+log_queue_info()#
+

Log info about queue size.

+
+ +
+
+property lr_shape#
+

Shape of low resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+post_proc(samples) Batch#
+

Performs some post proc on dequeued samples before sending out for +training. Post processing can include coarsening on high-res data (if +Collection consists of Sampler objects and not +DualSampler objects), smoothing, etc

+
+
Returns:
+

Batch (namedtuple) – namedtuple with low_res and high_res attributes

+
+
+
+ +
+
+preflight()#
+

Run checks before kicking off the queue.

+
+ +
+
+property queue_thread#
+

Get new queue thread.

+
+ +
+
+property running#
+

Boolean to check whether to keep enqueueing batches.

+
+ +
+
+sample_batch()#
+

Get random sampler from collection and return a batch of samples +from that sampler.

+

Notes

+

These samples are wrapped in an np.asarray call, so they have been +loaded into memory.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+start() None#
+

Start thread to keep sample queue full for batches.

+
+ +
+
+stop() None#
+

Stop loading batches.

+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_queues.base.html b/_autosummary/sup3r.preprocessing.batch_queues.base.html new file mode 100644 index 000000000..e2412cd3a --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_queues.base.html @@ -0,0 +1,740 @@ + + + + + + + + + + + sup3r.preprocessing.batch_queues.base — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.batch_queues.base

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.batch_queues.base#

+

Base objects which generate, build, and operate on batches. Also can +interface with models.

+

Classes

+
+ + + + + +

SingleBatchQueue(samplers[, batch_size, ...])

Base BatchQueue class for single dataset containers

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_queues.conditional.ConditionalBatchQueue.html b/_autosummary/sup3r.preprocessing.batch_queues.conditional.ConditionalBatchQueue.html new file mode 100644 index 000000000..2cf5db32d --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_queues.conditional.ConditionalBatchQueue.html @@ -0,0 +1,1386 @@ + + + + + + + + + + + sup3r.preprocessing.batch_queues.conditional.ConditionalBatchQueue — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.batch_queues.conditional.ConditionalBatchQueue

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.batch_queues.conditional.ConditionalBatchQueue#

+
+
+class ConditionalBatchQueue(samplers, *, time_enhance_mode='constant', lower_models=None, s_padding=0, t_padding=0, end_t_padding=False, batch_size=16, n_batches=64, s_enhance=1, t_enhance=1, queue_cap=None, transform_kwargs=None, thread_name='training', mode='lazy', **kwargs)[source]#
+

Bases: SingleBatchQueue

+

BatchQueue class for conditional moment estimation.

+
+
Parameters:
+
    +
  • samplers (List[Sampler]) – List of Sampler instances

  • +
  • time_enhance_mode (str) – [constant, linear] +Method to enhance temporally when constructing subfilter. At every +temporal location, a low-res temporal data is subtracted from the +high-res temporal data predicted. constant will assume that the +low-res temporal data is constant between landmarks. linear will +linearly interpolate between landmarks to generate the low-res data +to remove from the high-res.

  • +
  • lower_models (Dict[int, Sup3rCondMom] | None) – Dictionary of models that predict lower moments. For example, if +this queue is part of a handler to estimate the 3rd moment +lower_models could include models that estimate the 1st and 2nd +moments. These lower moments can be required in higher order moment +calculations.

  • +
  • s_padding (int | None) – Width of spatial padding to predict only middle part. If None, no +padding is used

  • +
  • t_padding (int | None) – Width of temporal padding to predict only middle part. If None, no +padding is used

  • +
  • end_t_padding (bool | False) – Zero pad the end of temporal space. Ensures that loss is +calculated only if snapshot is surrounded by temporal landmarks. +False by default

  • +
  • kwargs (dict) – Keyword arguments for parent class

  • +
  • batch_size (int) – Number of observations / samples in a batch

  • +
  • n_batches (int) – Number of batches in an epoch, this sets the iteration limit for +this object.

  • +
  • s_enhance (int) – Integer factor by which the spatial axes is to be enhanced.

  • +
  • t_enhance (int) – Integer factor by which the temporal axes is to be enhanced.

  • +
  • queue_cap (int) – Maximum number of batches the batch queue can store.

  • +
  • transform_kwargs (Union[Dict, None]) – Dictionary of kwargs to be passed to self.transform. This method +performs smoothing / coarsening.

  • +
  • thread_name (str) – Name of the queue thread. Default is ‘training’. Used to set name +to ‘validation’ for BatchHandler, which has a training and +validation queue.

  • +
  • mode (str) – Loading mode. Default is ‘lazy’, which only loads data into memory +as batches are queued. ‘eager’ will load all data into memory right +away.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_enhancement_factors()

Make sure the enhancement factors evenly divide the sample_shape.

check_features()

Make sure all samplers have the same sets of features.

check_shared_attr(attr)

Check if all containers have the same value for attr.

enqueue_batch()

Build batch and send to queue.

enqueue_batches()

Callback function for queue thread.

get_batch()

Get batch from queue or directly from a Sampler through sample_batch.

get_container_index()

Get random container index based on weights

get_queue()

Return FIFO queue for storing batches.

get_random_container()

Get random container based on container weights

log_queue_info()

Log info about queue size.

make_mask(high_res)

Make mask for output.

make_output(samples)

Make custom batch output.

post_init_log([args_dict])

Log additional arguments after initialization.

post_proc(samples)

Returns normalized collection of samples / observations along with mask and target output for conditional moment estimation.

preflight()

Run checks before kicking off the queue.

sample_batch()

Get random sampler from collection and return a batch of samples from that sampler.

start()

Start thread to keep sample queue full for batches.

stop()

Stop loading batches.

transform(samples[, smoothing, ...])

Coarsen high res data to get corresponding low res batch.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

container_weights

Get weights used to sample from different containers based on relative sizes

data

Return underlying data.

features

Get all features contained in data.

hr_shape

Shape of high resolution sample in a low-res / high-res pair.

lr_shape

Shape of low resolution sample in a low-res / high-res pair.

queue_shape

Shape of objects stored in the queue.

queue_thread

Get new queue thread.

running

Boolean to check whether to keep enqueueing batches.

shape

Get shape of underlying data.

+
+
+
+class ConditionalBatch(low_res, high_res, output, mask)#
+

Bases: tuple

+

Create new instance of ConditionalBatch(low_res, high_res, output, mask)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+
+mask#
+

Alias for field number 3

+
+ +
+
+output#
+

Alias for field number 2

+
+ +
+ +
+
+make_mask(high_res)[source]#
+

Make mask for output. This is used to ensure consistency when +training conditional moments.

+
+

Note

+

Consider the case of learning E(HR|LR) where HR is the high_res and LR +is the low_res. In theory, the conditional moment estimation works if +the full LR is passed as input and predicts the full HR. In practice, +only the LR data that overlaps and surrounds the HR data is useful, ie +E(HR|LR) = E(HR|LR_nei) where LR_nei is the LR data that surrounds the +HR data. Physically, this is equivalent to saying that data far away +from a region of interest does not matter. This allows learning the +conditional moments on spatial and temporal chunks only if one +restricts the high_res output as being overlapped and surrounded by the +input low_res. The role of the mask is to ensure that the input +low_res always surrounds the output high_res.

+
+
+
Parameters:
+

high_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

+
+
Returns:
+

mask (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

+
+
+
+ +
+
+abstract make_output(samples)[source]#
+

Make custom batch output. This depends on the moment type being +estimated. e.g. This could be the 1st moment, which is just high_res +or the 2nd moment, which is (high_res - 1st moment) ** 2

+
+
Parameters:
+

samples (Tuple[Union[np.ndarray, da.core.Array], …]) – Tuple of low_res, high_res. Each array is: +4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

+
+
Returns:
+

output (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

+
+
+
+ +
+
+post_proc(samples)[source]#
+

Returns normalized collection of samples / observations along with +mask and target output for conditional moment estimation. Performs +coarsening on high-res data if Collection consists of +Sampler objects and not DualSampler objects

+
+
Returns:
+

namedtuple – Named tuple with low_res, high_res, mask, and output +attributes

+
+
+
+ +
+
+class Batch(low_res, high_res)#
+

Bases: tuple

+

Create new instance of Batch(low_res, high_res)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+ +
+
+check_enhancement_factors()#
+

Make sure the enhancement factors evenly divide the sample_shape.

+
+ +
+
+check_features()#
+

Make sure all samplers have the same sets of features.

+
+ +
+
+check_shared_attr(attr)#
+

Check if all containers have the same value for attr. If they do +the collection effectively inherits those attributes.

+
+ +
+
+property container_weights#
+

Get weights used to sample from different containers based on +relative sizes

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+enqueue_batch()#
+

Build batch and send to queue.

+
+ +
+
+enqueue_batches() None#
+

Callback function for queue thread. While training, the queue is +checked for empty spots and filled. In the training thread, batches are +removed from the queue.

+
+ +
+
+property features#
+

Get all features contained in data.

+
+ +
+
+get_batch() Batch#
+

Get batch from queue or directly from a Sampler through +sample_batch.

+
+ +
+
+get_container_index()#
+

Get random container index based on weights

+
+ +
+
+get_queue()#
+

Return FIFO queue for storing batches.

+
+ +
+
+get_random_container()#
+

Get random container based on container weights

+
+ +
+
+property hr_shape#
+

Shape of high resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+log_queue_info()#
+

Log info about queue size.

+
+ +
+
+property lr_shape#
+

Shape of low resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+preflight()#
+

Run checks before kicking off the queue.

+
+ +
+
+property queue_shape#
+

Shape of objects stored in the queue.

+
+ +
+
+property queue_thread#
+

Get new queue thread.

+
+ +
+
+property running#
+

Boolean to check whether to keep enqueueing batches.

+
+ +
+
+sample_batch()#
+

Get random sampler from collection and return a batch of samples +from that sampler.

+

Notes

+

These samples are wrapped in an np.asarray call, so they have been +loaded into memory.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+start() None#
+

Start thread to keep sample queue full for batches.

+
+ +
+
+stop() None#
+

Stop loading batches.

+
+ +
+
+transform(samples, smoothing=None, smoothing_ignore=None, temporal_coarsening_method='subsample')#
+

Coarsen high res data to get corresponding low res batch.

+
+
Parameters:
+
    +
  • samples (Union[np.ndarray, da.core.Array]) – High resolution batch of samples. +4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • smoothing (float | None) – Standard deviation to use for gaussian filtering of the coarse +data. This can be tuned by matching the kinetic energy of a low +resolution simulation with the kinetic energy of a coarsened and +smoothed high resolution simulation. If None no smoothing is +performed.

  • +
  • smoothing_ignore (list | None) – List of features to ignore for the smoothing filter. None will +smooth all features if smoothing kwarg is not None

  • +
  • temporal_coarsening_method (str) – Method to use for temporal coarsening. Can be subsample, average, +min, max, or total

  • +
+
+
Returns:
+

    +
  • low_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • high_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
+

+
+
+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom1.html b/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom1.html new file mode 100644 index 000000000..a4f860fe6 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom1.html @@ -0,0 +1,1371 @@ + + + + + + + + + + + sup3r.preprocessing.batch_queues.conditional.QueueMom1 — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.batch_queues.conditional.QueueMom1#

+
+
+class QueueMom1(samplers, *, time_enhance_mode='constant', lower_models=None, s_padding=0, t_padding=0, end_t_padding=False, batch_size=16, n_batches=64, s_enhance=1, t_enhance=1, queue_cap=None, transform_kwargs=None, thread_name='training', mode='lazy', **kwargs)[source]#
+

Bases: ConditionalBatchQueue

+

Batch handling class for conditional estimation of first moment

+
+
Parameters:
+
    +
  • samplers (List[Sampler]) – List of Sampler instances

  • +
  • time_enhance_mode (str) – [constant, linear] +Method to enhance temporally when constructing subfilter. At every +temporal location, a low-res temporal data is subtracted from the +high-res temporal data predicted. constant will assume that the +low-res temporal data is constant between landmarks. linear will +linearly interpolate between landmarks to generate the low-res data +to remove from the high-res.

  • +
  • lower_models (Dict[int, Sup3rCondMom] | None) – Dictionary of models that predict lower moments. For example, if +this queue is part of a handler to estimate the 3rd moment +lower_models could include models that estimate the 1st and 2nd +moments. These lower moments can be required in higher order moment +calculations.

  • +
  • s_padding (int | None) – Width of spatial padding to predict only middle part. If None, no +padding is used

  • +
  • t_padding (int | None) – Width of temporal padding to predict only middle part. If None, no +padding is used

  • +
  • end_t_padding (bool | False) – Zero pad the end of temporal space. Ensures that loss is +calculated only if snapshot is surrounded by temporal landmarks. +False by default

  • +
  • kwargs (dict) – Keyword arguments for parent class

  • +
  • batch_size (int) – Number of observations / samples in a batch

  • +
  • n_batches (int) – Number of batches in an epoch, this sets the iteration limit for +this object.

  • +
  • s_enhance (int) – Integer factor by which the spatial axes is to be enhanced.

  • +
  • t_enhance (int) – Integer factor by which the temporal axes is to be enhanced.

  • +
  • queue_cap (int) – Maximum number of batches the batch queue can store.

  • +
  • transform_kwargs (Union[Dict, None]) – Dictionary of kwargs to be passed to self.transform. This method +performs smoothing / coarsening.

  • +
  • thread_name (str) – Name of the queue thread. Default is ‘training’. Used to set name +to ‘validation’ for BatchHandler, which has a training and +validation queue.

  • +
  • mode (str) – Loading mode. Default is ‘lazy’, which only loads data into memory +as batches are queued. ‘eager’ will load all data into memory right +away.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_enhancement_factors()

Make sure the enhancement factors evenly divide the sample_shape.

check_features()

Make sure all samplers have the same sets of features.

check_shared_attr(attr)

Check if all containers have the same value for attr.

enqueue_batch()

Build batch and send to queue.

enqueue_batches()

Callback function for queue thread.

get_batch()

Get batch from queue or directly from a Sampler through sample_batch.

get_container_index()

Get random container index based on weights

get_queue()

Return FIFO queue for storing batches.

get_random_container()

Get random container based on container weights

log_queue_info()

Log info about queue size.

make_mask(high_res)

Make mask for output.

make_output(samples)

For the 1st moment the output is simply the high_res

post_init_log([args_dict])

Log additional arguments after initialization.

post_proc(samples)

Returns normalized collection of samples / observations along with mask and target output for conditional moment estimation.

preflight()

Run checks before kicking off the queue.

sample_batch()

Get random sampler from collection and return a batch of samples from that sampler.

start()

Start thread to keep sample queue full for batches.

stop()

Stop loading batches.

transform(samples[, smoothing, ...])

Coarsen high res data to get corresponding low res batch.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

container_weights

Get weights used to sample from different containers based on relative sizes

data

Return underlying data.

features

Get all features contained in data.

hr_shape

Shape of high resolution sample in a low-res / high-res pair.

lr_shape

Shape of low resolution sample in a low-res / high-res pair.

queue_shape

Shape of objects stored in the queue.

queue_thread

Get new queue thread.

running

Boolean to check whether to keep enqueueing batches.

shape

Get shape of underlying data.

+
+
+
+make_output(samples)[source]#
+

For the 1st moment the output is simply the high_res

+
+ +
+
+class Batch(low_res, high_res)#
+

Bases: tuple

+

Create new instance of Batch(low_res, high_res)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+ +
+
+class ConditionalBatch(low_res, high_res, output, mask)#
+

Bases: tuple

+

Create new instance of ConditionalBatch(low_res, high_res, output, mask)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+
+mask#
+

Alias for field number 3

+
+ +
+
+output#
+

Alias for field number 2

+
+ +
+ +
+
+check_enhancement_factors()#
+

Make sure the enhancement factors evenly divide the sample_shape.

+
+ +
+
+check_features()#
+

Make sure all samplers have the same sets of features.

+
+ +
+
+check_shared_attr(attr)#
+

Check if all containers have the same value for attr. If they do +the collection effectively inherits those attributes.

+
+ +
+
+property container_weights#
+

Get weights used to sample from different containers based on +relative sizes

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+enqueue_batch()#
+

Build batch and send to queue.

+
+ +
+
+enqueue_batches() None#
+

Callback function for queue thread. While training, the queue is +checked for empty spots and filled. In the training thread, batches are +removed from the queue.

+
+ +
+
+property features#
+

Get all features contained in data.

+
+ +
+
+get_batch() Batch#
+

Get batch from queue or directly from a Sampler through +sample_batch.

+
+ +
+
+get_container_index()#
+

Get random container index based on weights

+
+ +
+
+get_queue()#
+

Return FIFO queue for storing batches.

+
+ +
+
+get_random_container()#
+

Get random container based on container weights

+
+ +
+
+property hr_shape#
+

Shape of high resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+log_queue_info()#
+

Log info about queue size.

+
+ +
+
+property lr_shape#
+

Shape of low resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+make_mask(high_res)#
+

Make mask for output. This is used to ensure consistency when +training conditional moments.

+
+

Note

+

Consider the case of learning E(HR|LR) where HR is the high_res and LR +is the low_res. In theory, the conditional moment estimation works if +the full LR is passed as input and predicts the full HR. In practice, +only the LR data that overlaps and surrounds the HR data is useful, ie +E(HR|LR) = E(HR|LR_nei) where LR_nei is the LR data that surrounds the +HR data. Physically, this is equivalent to saying that data far away +from a region of interest does not matter. This allows learning the +conditional moments on spatial and temporal chunks only if one +restricts the high_res output as being overlapped and surrounded by the +input low_res. The role of the mask is to ensure that the input +low_res always surrounds the output high_res.

+
+
+
Parameters:
+

high_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

+
+
Returns:
+

mask (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

+
+
+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+post_proc(samples)#
+

Returns normalized collection of samples / observations along with +mask and target output for conditional moment estimation. Performs +coarsening on high-res data if Collection consists of +Sampler objects and not DualSampler objects

+
+
Returns:
+

namedtuple – Named tuple with low_res, high_res, mask, and output +attributes

+
+
+
+ +
+
+preflight()#
+

Run checks before kicking off the queue.

+
+ +
+
+property queue_shape#
+

Shape of objects stored in the queue.

+
+ +
+
+property queue_thread#
+

Get new queue thread.

+
+ +
+
+property running#
+

Boolean to check whether to keep enqueueing batches.

+
+ +
+
+sample_batch()#
+

Get random sampler from collection and return a batch of samples +from that sampler.

+

Notes

+

These samples are wrapped in an np.asarray call, so they have been +loaded into memory.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+start() None#
+

Start thread to keep sample queue full for batches.

+
+ +
+
+stop() None#
+

Stop loading batches.

+
+ +
+
+transform(samples, smoothing=None, smoothing_ignore=None, temporal_coarsening_method='subsample')#
+

Coarsen high res data to get corresponding low res batch.

+
+
Parameters:
+
    +
  • samples (Union[np.ndarray, da.core.Array]) – High resolution batch of samples. +4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • smoothing (float | None) – Standard deviation to use for gaussian filtering of the coarse +data. This can be tuned by matching the kinetic energy of a low +resolution simulation with the kinetic energy of a coarsened and +smoothed high resolution simulation. If None no smoothing is +performed.

  • +
  • smoothing_ignore (list | None) – List of features to ignore for the smoothing filter. None will +smooth all features if smoothing kwarg is not None

  • +
  • temporal_coarsening_method (str) – Method to use for temporal coarsening. Can be subsample, average, +min, max, or total

  • +
+
+
Returns:
+

    +
  • low_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • high_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
+

+
+
+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom1SF.html b/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom1SF.html new file mode 100644 index 000000000..ff2e1dcdd --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom1SF.html @@ -0,0 +1,1380 @@ + + + + + + + + + + + sup3r.preprocessing.batch_queues.conditional.QueueMom1SF — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.batch_queues.conditional.QueueMom1SF#

+
+
+class QueueMom1SF(samplers, *, time_enhance_mode='constant', lower_models=None, s_padding=0, t_padding=0, end_t_padding=False, batch_size=16, n_batches=64, s_enhance=1, t_enhance=1, queue_cap=None, transform_kwargs=None, thread_name='training', mode='lazy', **kwargs)[source]#
+

Bases: ConditionalBatchQueue

+

Batch handling class for conditional estimation of first moment +of subfilter velocity

+
+
Parameters:
+
    +
  • samplers (List[Sampler]) – List of Sampler instances

  • +
  • time_enhance_mode (str) – [constant, linear] +Method to enhance temporally when constructing subfilter. At every +temporal location, a low-res temporal data is subtracted from the +high-res temporal data predicted. constant will assume that the +low-res temporal data is constant between landmarks. linear will +linearly interpolate between landmarks to generate the low-res data +to remove from the high-res.

  • +
  • lower_models (Dict[int, Sup3rCondMom] | None) – Dictionary of models that predict lower moments. For example, if +this queue is part of a handler to estimate the 3rd moment +lower_models could include models that estimate the 1st and 2nd +moments. These lower moments can be required in higher order moment +calculations.

  • +
  • s_padding (int | None) – Width of spatial padding to predict only middle part. If None, no +padding is used

  • +
  • t_padding (int | None) – Width of temporal padding to predict only middle part. If None, no +padding is used

  • +
  • end_t_padding (bool | False) – Zero pad the end of temporal space. Ensures that loss is +calculated only if snapshot is surrounded by temporal landmarks. +False by default

  • +
  • kwargs (dict) – Keyword arguments for parent class

  • +
  • batch_size (int) – Number of observations / samples in a batch

  • +
  • n_batches (int) – Number of batches in an epoch, this sets the iteration limit for +this object.

  • +
  • s_enhance (int) – Integer factor by which the spatial axes is to be enhanced.

  • +
  • t_enhance (int) – Integer factor by which the temporal axes is to be enhanced.

  • +
  • queue_cap (int) – Maximum number of batches the batch queue can store.

  • +
  • transform_kwargs (Union[Dict, None]) – Dictionary of kwargs to be passed to self.transform. This method +performs smoothing / coarsening.

  • +
  • thread_name (str) – Name of the queue thread. Default is ‘training’. Used to set name +to ‘validation’ for BatchHandler, which has a training and +validation queue.

  • +
  • mode (str) – Loading mode. Default is ‘lazy’, which only loads data into memory +as batches are queued. ‘eager’ will load all data into memory right +away.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_enhancement_factors()

Make sure the enhancement factors evenly divide the sample_shape.

check_features()

Make sure all samplers have the same sets of features.

check_shared_attr(attr)

Check if all containers have the same value for attr.

enqueue_batch()

Build batch and send to queue.

enqueue_batches()

Callback function for queue thread.

get_batch()

Get batch from queue or directly from a Sampler through sample_batch.

get_container_index()

Get random container index based on weights

get_queue()

Return FIFO queue for storing batches.

get_random_container()

Get random container based on container weights

log_queue_info()

Log info about queue size.

make_mask(high_res)

Make mask for output.

make_output(samples)

post_init_log([args_dict])

Log additional arguments after initialization.

post_proc(samples)

Returns normalized collection of samples / observations along with mask and target output for conditional moment estimation.

preflight()

Run checks before kicking off the queue.

sample_batch()

Get random sampler from collection and return a batch of samples from that sampler.

start()

Start thread to keep sample queue full for batches.

stop()

Stop loading batches.

transform(samples[, smoothing, ...])

Coarsen high res data to get corresponding low res batch.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

container_weights

Get weights used to sample from different containers based on relative sizes

data

Return underlying data.

features

Get all features contained in data.

hr_shape

Shape of high resolution sample in a low-res / high-res pair.

lr_shape

Shape of low resolution sample in a low-res / high-res pair.

queue_shape

Shape of objects stored in the queue.

queue_thread

Get new queue thread.

running

Boolean to check whether to keep enqueueing batches.

shape

Get shape of underlying data.

+
+
+
+make_output(samples)[source]#
+
+
Returns:
+

SF (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features) +SF is subfilter, HR is high-res and LR is low-res +SF = HR - LR

+
+
+
+ +
+
+class Batch(low_res, high_res)#
+

Bases: tuple

+

Create new instance of Batch(low_res, high_res)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+ +
+
+class ConditionalBatch(low_res, high_res, output, mask)#
+

Bases: tuple

+

Create new instance of ConditionalBatch(low_res, high_res, output, mask)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+
+mask#
+

Alias for field number 3

+
+ +
+
+output#
+

Alias for field number 2

+
+ +
+ +
+
+check_enhancement_factors()#
+

Make sure the enhancement factors evenly divide the sample_shape.

+
+ +
+
+check_features()#
+

Make sure all samplers have the same sets of features.

+
+ +
+
+check_shared_attr(attr)#
+

Check if all containers have the same value for attr. If they do +the collection effectively inherits those attributes.

+
+ +
+
+property container_weights#
+

Get weights used to sample from different containers based on +relative sizes

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+enqueue_batch()#
+

Build batch and send to queue.

+
+ +
+
+enqueue_batches() None#
+

Callback function for queue thread. While training, the queue is +checked for empty spots and filled. In the training thread, batches are +removed from the queue.

+
+ +
+
+property features#
+

Get all features contained in data.

+
+ +
+
+get_batch() Batch#
+

Get batch from queue or directly from a Sampler through +sample_batch.

+
+ +
+
+get_container_index()#
+

Get random container index based on weights

+
+ +
+
+get_queue()#
+

Return FIFO queue for storing batches.

+
+ +
+
+get_random_container()#
+

Get random container based on container weights

+
+ +
+
+property hr_shape#
+

Shape of high resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+log_queue_info()#
+

Log info about queue size.

+
+ +
+
+property lr_shape#
+

Shape of low resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+make_mask(high_res)#
+

Make mask for output. This is used to ensure consistency when +training conditional moments.

+
+

Note

+

Consider the case of learning E(HR|LR) where HR is the high_res and LR +is the low_res. In theory, the conditional moment estimation works if +the full LR is passed as input and predicts the full HR. In practice, +only the LR data that overlaps and surrounds the HR data is useful, ie +E(HR|LR) = E(HR|LR_nei) where LR_nei is the LR data that surrounds the +HR data. Physically, this is equivalent to saying that data far away +from a region of interest does not matter. This allows learning the +conditional moments on spatial and temporal chunks only if one +restricts the high_res output as being overlapped and surrounded by the +input low_res. The role of the mask is to ensure that the input +low_res always surrounds the output high_res.

+
+
+
Parameters:
+

high_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

+
+
Returns:
+

mask (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

+
+
+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+post_proc(samples)#
+

Returns normalized collection of samples / observations along with +mask and target output for conditional moment estimation. Performs +coarsening on high-res data if Collection consists of +Sampler objects and not DualSampler objects

+
+
Returns:
+

namedtuple – Named tuple with low_res, high_res, mask, and output +attributes

+
+
+
+ +
+
+preflight()#
+

Run checks before kicking off the queue.

+
+ +
+
+property queue_shape#
+

Shape of objects stored in the queue.

+
+ +
+
+property queue_thread#
+

Get new queue thread.

+
+ +
+
+property running#
+

Boolean to check whether to keep enqueueing batches.

+
+ +
+
+sample_batch()#
+

Get random sampler from collection and return a batch of samples +from that sampler.

+

Notes

+

These samples are wrapped in an np.asarray call, so they have been +loaded into memory.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+start() None#
+

Start thread to keep sample queue full for batches.

+
+ +
+
+stop() None#
+

Stop loading batches.

+
+ +
+
+transform(samples, smoothing=None, smoothing_ignore=None, temporal_coarsening_method='subsample')#
+

Coarsen high res data to get corresponding low res batch.

+
+
Parameters:
+
    +
  • samples (Union[np.ndarray, da.core.Array]) – High resolution batch of samples. +4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • smoothing (float | None) – Standard deviation to use for gaussian filtering of the coarse +data. This can be tuned by matching the kinetic energy of a low +resolution simulation with the kinetic energy of a coarsened and +smoothed high resolution simulation. If None no smoothing is +performed.

  • +
  • smoothing_ignore (list | None) – List of features to ignore for the smoothing filter. None will +smooth all features if smoothing kwarg is not None

  • +
  • temporal_coarsening_method (str) – Method to use for temporal coarsening. Can be subsample, average, +min, max, or total

  • +
+
+
Returns:
+

    +
  • low_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • high_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
+

+
+
+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom2.html b/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom2.html new file mode 100644 index 000000000..cc1274588 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom2.html @@ -0,0 +1,1378 @@ + + + + + + + + + + + sup3r.preprocessing.batch_queues.conditional.QueueMom2 — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.batch_queues.conditional.QueueMom2#

+
+
+class QueueMom2(samplers, *, time_enhance_mode='constant', lower_models=None, s_padding=0, t_padding=0, end_t_padding=False, batch_size=16, n_batches=64, s_enhance=1, t_enhance=1, queue_cap=None, transform_kwargs=None, thread_name='training', mode='lazy', **kwargs)[source]#
+

Bases: ConditionalBatchQueue

+

Batch handling class for conditional estimation of second moment

+
+
Parameters:
+
    +
  • samplers (List[Sampler]) – List of Sampler instances

  • +
  • time_enhance_mode (str) – [constant, linear] +Method to enhance temporally when constructing subfilter. At every +temporal location, a low-res temporal data is subtracted from the +high-res temporal data predicted. constant will assume that the +low-res temporal data is constant between landmarks. linear will +linearly interpolate between landmarks to generate the low-res data +to remove from the high-res.

  • +
  • lower_models (Dict[int, Sup3rCondMom] | None) – Dictionary of models that predict lower moments. For example, if +this queue is part of a handler to estimate the 3rd moment +lower_models could include models that estimate the 1st and 2nd +moments. These lower moments can be required in higher order moment +calculations.

  • +
  • s_padding (int | None) – Width of spatial padding to predict only middle part. If None, no +padding is used

  • +
  • t_padding (int | None) – Width of temporal padding to predict only middle part. If None, no +padding is used

  • +
  • end_t_padding (bool | False) – Zero pad the end of temporal space. Ensures that loss is +calculated only if snapshot is surrounded by temporal landmarks. +False by default

  • +
  • kwargs (dict) – Keyword arguments for parent class

  • +
  • batch_size (int) – Number of observations / samples in a batch

  • +
  • n_batches (int) – Number of batches in an epoch, this sets the iteration limit for +this object.

  • +
  • s_enhance (int) – Integer factor by which the spatial axes is to be enhanced.

  • +
  • t_enhance (int) – Integer factor by which the temporal axes is to be enhanced.

  • +
  • queue_cap (int) – Maximum number of batches the batch queue can store.

  • +
  • transform_kwargs (Union[Dict, None]) – Dictionary of kwargs to be passed to self.transform. This method +performs smoothing / coarsening.

  • +
  • thread_name (str) – Name of the queue thread. Default is ‘training’. Used to set name +to ‘validation’ for BatchHandler, which has a training and +validation queue.

  • +
  • mode (str) – Loading mode. Default is ‘lazy’, which only loads data into memory +as batches are queued. ‘eager’ will load all data into memory right +away.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_enhancement_factors()

Make sure the enhancement factors evenly divide the sample_shape.

check_features()

Make sure all samplers have the same sets of features.

check_shared_attr(attr)

Check if all containers have the same value for attr.

enqueue_batch()

Build batch and send to queue.

enqueue_batches()

Callback function for queue thread.

get_batch()

Get batch from queue or directly from a Sampler through sample_batch.

get_container_index()

Get random container index based on weights

get_queue()

Return FIFO queue for storing batches.

get_random_container()

Get random container based on container weights

log_queue_info()

Log info about queue size.

make_mask(high_res)

Make mask for output.

make_output(samples)

post_init_log([args_dict])

Log additional arguments after initialization.

post_proc(samples)

Returns normalized collection of samples / observations along with mask and target output for conditional moment estimation.

preflight()

Run checks before kicking off the queue.

sample_batch()

Get random sampler from collection and return a batch of samples from that sampler.

start()

Start thread to keep sample queue full for batches.

stop()

Stop loading batches.

transform(samples[, smoothing, ...])

Coarsen high res data to get corresponding low res batch.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

container_weights

Get weights used to sample from different containers based on relative sizes

data

Return underlying data.

features

Get all features contained in data.

hr_shape

Shape of high resolution sample in a low-res / high-res pair.

lr_shape

Shape of low resolution sample in a low-res / high-res pair.

queue_shape

Shape of objects stored in the queue.

queue_thread

Get new queue thread.

running

Boolean to check whether to keep enqueueing batches.

shape

Get shape of underlying data.

+
+
+
+make_output(samples)[source]#
+
+
Returns:
+

(HR - <HR|LR>)**2 (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features) +HR is high-res and LR is low-res

+
+
+
+ +
+
+class Batch(low_res, high_res)#
+

Bases: tuple

+

Create new instance of Batch(low_res, high_res)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+ +
+
+class ConditionalBatch(low_res, high_res, output, mask)#
+

Bases: tuple

+

Create new instance of ConditionalBatch(low_res, high_res, output, mask)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+
+mask#
+

Alias for field number 3

+
+ +
+
+output#
+

Alias for field number 2

+
+ +
+ +
+
+check_enhancement_factors()#
+

Make sure the enhancement factors evenly divide the sample_shape.

+
+ +
+
+check_features()#
+

Make sure all samplers have the same sets of features.

+
+ +
+
+check_shared_attr(attr)#
+

Check if all containers have the same value for attr. If they do +the collection effectively inherits those attributes.

+
+ +
+
+property container_weights#
+

Get weights used to sample from different containers based on +relative sizes

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+enqueue_batch()#
+

Build batch and send to queue.

+
+ +
+
+enqueue_batches() None#
+

Callback function for queue thread. While training, the queue is +checked for empty spots and filled. In the training thread, batches are +removed from the queue.

+
+ +
+
+property features#
+

Get all features contained in data.

+
+ +
+
+get_batch() Batch#
+

Get batch from queue or directly from a Sampler through +sample_batch.

+
+ +
+
+get_container_index()#
+

Get random container index based on weights

+
+ +
+
+get_queue()#
+

Return FIFO queue for storing batches.

+
+ +
+
+get_random_container()#
+

Get random container based on container weights

+
+ +
+
+property hr_shape#
+

Shape of high resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+log_queue_info()#
+

Log info about queue size.

+
+ +
+
+property lr_shape#
+

Shape of low resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+make_mask(high_res)#
+

Make mask for output. This is used to ensure consistency when +training conditional moments.

+
+

Note

+

Consider the case of learning E(HR|LR) where HR is the high_res and LR +is the low_res. In theory, the conditional moment estimation works if +the full LR is passed as input and predicts the full HR. In practice, +only the LR data that overlaps and surrounds the HR data is useful, ie +E(HR|LR) = E(HR|LR_nei) where LR_nei is the LR data that surrounds the +HR data. Physically, this is equivalent to saying that data far away +from a region of interest does not matter. This allows learning the +conditional moments on spatial and temporal chunks only if one +restricts the high_res output as being overlapped and surrounded by the +input low_res. The role of the mask is to ensure that the input +low_res always surrounds the output high_res.

+
+
+
Parameters:
+

high_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

+
+
Returns:
+

mask (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

+
+
+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+post_proc(samples)#
+

Returns normalized collection of samples / observations along with +mask and target output for conditional moment estimation. Performs +coarsening on high-res data if Collection consists of +Sampler objects and not DualSampler objects

+
+
Returns:
+

namedtuple – Named tuple with low_res, high_res, mask, and output +attributes

+
+
+
+ +
+
+preflight()#
+

Run checks before kicking off the queue.

+
+ +
+
+property queue_shape#
+

Shape of objects stored in the queue.

+
+ +
+
+property queue_thread#
+

Get new queue thread.

+
+ +
+
+property running#
+

Boolean to check whether to keep enqueueing batches.

+
+ +
+
+sample_batch()#
+

Get random sampler from collection and return a batch of samples +from that sampler.

+

Notes

+

These samples are wrapped in an np.asarray call, so they have been +loaded into memory.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+start() None#
+

Start thread to keep sample queue full for batches.

+
+ +
+
+stop() None#
+

Stop loading batches.

+
+ +
+
+transform(samples, smoothing=None, smoothing_ignore=None, temporal_coarsening_method='subsample')#
+

Coarsen high res data to get corresponding low res batch.

+
+
Parameters:
+
    +
  • samples (Union[np.ndarray, da.core.Array]) – High resolution batch of samples. +4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • smoothing (float | None) – Standard deviation to use for gaussian filtering of the coarse +data. This can be tuned by matching the kinetic energy of a low +resolution simulation with the kinetic energy of a coarsened and +smoothed high resolution simulation. If None no smoothing is +performed.

  • +
  • smoothing_ignore (list | None) – List of features to ignore for the smoothing filter. None will +smooth all features if smoothing kwarg is not None

  • +
  • temporal_coarsening_method (str) – Method to use for temporal coarsening. Can be subsample, average, +min, max, or total

  • +
+
+
Returns:
+

    +
  • low_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • high_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
+

+
+
+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom2SF.html b/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom2SF.html new file mode 100644 index 000000000..7c34d3118 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom2SF.html @@ -0,0 +1,1380 @@ + + + + + + + + + + + sup3r.preprocessing.batch_queues.conditional.QueueMom2SF — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.batch_queues.conditional.QueueMom2SF#

+
+
+class QueueMom2SF(samplers, *, time_enhance_mode='constant', lower_models=None, s_padding=0, t_padding=0, end_t_padding=False, batch_size=16, n_batches=64, s_enhance=1, t_enhance=1, queue_cap=None, transform_kwargs=None, thread_name='training', mode='lazy', **kwargs)[source]#
+

Bases: ConditionalBatchQueue

+

Batch handling class for conditional estimation of second moment of +subfilter velocity.

+
+
Parameters:
+
    +
  • samplers (List[Sampler]) – List of Sampler instances

  • +
  • time_enhance_mode (str) – [constant, linear] +Method to enhance temporally when constructing subfilter. At every +temporal location, a low-res temporal data is subtracted from the +high-res temporal data predicted. constant will assume that the +low-res temporal data is constant between landmarks. linear will +linearly interpolate between landmarks to generate the low-res data +to remove from the high-res.

  • +
  • lower_models (Dict[int, Sup3rCondMom] | None) – Dictionary of models that predict lower moments. For example, if +this queue is part of a handler to estimate the 3rd moment +lower_models could include models that estimate the 1st and 2nd +moments. These lower moments can be required in higher order moment +calculations.

  • +
  • s_padding (int | None) – Width of spatial padding to predict only middle part. If None, no +padding is used

  • +
  • t_padding (int | None) – Width of temporal padding to predict only middle part. If None, no +padding is used

  • +
  • end_t_padding (bool | False) – Zero pad the end of temporal space. Ensures that loss is +calculated only if snapshot is surrounded by temporal landmarks. +False by default

  • +
  • kwargs (dict) – Keyword arguments for parent class

  • +
  • batch_size (int) – Number of observations / samples in a batch

  • +
  • n_batches (int) – Number of batches in an epoch, this sets the iteration limit for +this object.

  • +
  • s_enhance (int) – Integer factor by which the spatial axes is to be enhanced.

  • +
  • t_enhance (int) – Integer factor by which the temporal axes is to be enhanced.

  • +
  • queue_cap (int) – Maximum number of batches the batch queue can store.

  • +
  • transform_kwargs (Union[Dict, None]) – Dictionary of kwargs to be passed to self.transform. This method +performs smoothing / coarsening.

  • +
  • thread_name (str) – Name of the queue thread. Default is ‘training’. Used to set name +to ‘validation’ for BatchHandler, which has a training and +validation queue.

  • +
  • mode (str) – Loading mode. Default is ‘lazy’, which only loads data into memory +as batches are queued. ‘eager’ will load all data into memory right +away.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_enhancement_factors()

Make sure the enhancement factors evenly divide the sample_shape.

check_features()

Make sure all samplers have the same sets of features.

check_shared_attr(attr)

Check if all containers have the same value for attr.

enqueue_batch()

Build batch and send to queue.

enqueue_batches()

Callback function for queue thread.

get_batch()

Get batch from queue or directly from a Sampler through sample_batch.

get_container_index()

Get random container index based on weights

get_queue()

Return FIFO queue for storing batches.

get_random_container()

Get random container based on container weights

log_queue_info()

Log info about queue size.

make_mask(high_res)

Make mask for output.

make_output(samples)

post_init_log([args_dict])

Log additional arguments after initialization.

post_proc(samples)

Returns normalized collection of samples / observations along with mask and target output for conditional moment estimation.

preflight()

Run checks before kicking off the queue.

sample_batch()

Get random sampler from collection and return a batch of samples from that sampler.

start()

Start thread to keep sample queue full for batches.

stop()

Stop loading batches.

transform(samples[, smoothing, ...])

Coarsen high res data to get corresponding low res batch.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

container_weights

Get weights used to sample from different containers based on relative sizes

data

Return underlying data.

features

Get all features contained in data.

hr_shape

Shape of high resolution sample in a low-res / high-res pair.

lr_shape

Shape of low resolution sample in a low-res / high-res pair.

queue_shape

Shape of objects stored in the queue.

queue_thread

Get new queue thread.

running

Boolean to check whether to keep enqueueing batches.

shape

Get shape of underlying data.

+
+
+
+make_output(samples)[source]#
+
+
Returns:
+

(SF - <SF|LR>)**2 (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features) +SF is subfilter, HR is high-res and LR is low-res +SF = HR - LR

+
+
+
+ +
+
+class Batch(low_res, high_res)#
+

Bases: tuple

+

Create new instance of Batch(low_res, high_res)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+ +
+
+class ConditionalBatch(low_res, high_res, output, mask)#
+

Bases: tuple

+

Create new instance of ConditionalBatch(low_res, high_res, output, mask)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+
+mask#
+

Alias for field number 3

+
+ +
+
+output#
+

Alias for field number 2

+
+ +
+ +
+
+check_enhancement_factors()#
+

Make sure the enhancement factors evenly divide the sample_shape.

+
+ +
+
+check_features()#
+

Make sure all samplers have the same sets of features.

+
+ +
+
+check_shared_attr(attr)#
+

Check if all containers have the same value for attr. If they do +the collection effectively inherits those attributes.

+
+ +
+
+property container_weights#
+

Get weights used to sample from different containers based on +relative sizes

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+enqueue_batch()#
+

Build batch and send to queue.

+
+ +
+
+enqueue_batches() None#
+

Callback function for queue thread. While training, the queue is +checked for empty spots and filled. In the training thread, batches are +removed from the queue.

+
+ +
+
+property features#
+

Get all features contained in data.

+
+ +
+
+get_batch() Batch#
+

Get batch from queue or directly from a Sampler through +sample_batch.

+
+ +
+
+get_container_index()#
+

Get random container index based on weights

+
+ +
+
+get_queue()#
+

Return FIFO queue for storing batches.

+
+ +
+
+get_random_container()#
+

Get random container based on container weights

+
+ +
+
+property hr_shape#
+

Shape of high resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+log_queue_info()#
+

Log info about queue size.

+
+ +
+
+property lr_shape#
+

Shape of low resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+make_mask(high_res)#
+

Make mask for output. This is used to ensure consistency when +training conditional moments.

+
+

Note

+

Consider the case of learning E(HR|LR) where HR is the high_res and LR +is the low_res. In theory, the conditional moment estimation works if +the full LR is passed as input and predicts the full HR. In practice, +only the LR data that overlaps and surrounds the HR data is useful, ie +E(HR|LR) = E(HR|LR_nei) where LR_nei is the LR data that surrounds the +HR data. Physically, this is equivalent to saying that data far away +from a region of interest does not matter. This allows learning the +conditional moments on spatial and temporal chunks only if one +restricts the high_res output as being overlapped and surrounded by the +input low_res. The role of the mask is to ensure that the input +low_res always surrounds the output high_res.

+
+
+
Parameters:
+

high_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

+
+
Returns:
+

mask (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

+
+
+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+post_proc(samples)#
+

Returns normalized collection of samples / observations along with +mask and target output for conditional moment estimation. Performs +coarsening on high-res data if Collection consists of +Sampler objects and not DualSampler objects

+
+
Returns:
+

namedtuple – Named tuple with low_res, high_res, mask, and output +attributes

+
+
+
+ +
+
+preflight()#
+

Run checks before kicking off the queue.

+
+ +
+
+property queue_shape#
+

Shape of objects stored in the queue.

+
+ +
+
+property queue_thread#
+

Get new queue thread.

+
+ +
+
+property running#
+

Boolean to check whether to keep enqueueing batches.

+
+ +
+
+sample_batch()#
+

Get random sampler from collection and return a batch of samples +from that sampler.

+

Notes

+

These samples are wrapped in an np.asarray call, so they have been +loaded into memory.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+start() None#
+

Start thread to keep sample queue full for batches.

+
+ +
+
+stop() None#
+

Stop loading batches.

+
+ +
+
+transform(samples, smoothing=None, smoothing_ignore=None, temporal_coarsening_method='subsample')#
+

Coarsen high res data to get corresponding low res batch.

+
+
Parameters:
+
    +
  • samples (Union[np.ndarray, da.core.Array]) – High resolution batch of samples. +4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • smoothing (float | None) – Standard deviation to use for gaussian filtering of the coarse +data. This can be tuned by matching the kinetic energy of a low +resolution simulation with the kinetic energy of a coarsened and +smoothed high resolution simulation. If None no smoothing is +performed.

  • +
  • smoothing_ignore (list | None) – List of features to ignore for the smoothing filter. None will +smooth all features if smoothing kwarg is not None

  • +
  • temporal_coarsening_method (str) – Method to use for temporal coarsening. Can be subsample, average, +min, max, or total

  • +
+
+
Returns:
+

    +
  • low_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • high_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
+

+
+
+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom2Sep.html b/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom2Sep.html new file mode 100644 index 000000000..9fb9808e1 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom2Sep.html @@ -0,0 +1,1379 @@ + + + + + + + + + + + sup3r.preprocessing.batch_queues.conditional.QueueMom2Sep — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.batch_queues.conditional.QueueMom2Sep#

+
+
+class QueueMom2Sep(samplers, *, time_enhance_mode='constant', lower_models=None, s_padding=0, t_padding=0, end_t_padding=False, batch_size=16, n_batches=64, s_enhance=1, t_enhance=1, queue_cap=None, transform_kwargs=None, thread_name='training', mode='lazy', **kwargs)[source]#
+

Bases: QueueMom1

+

Batch handling class for conditional estimation of second moment +without subtraction of first moment

+
+
Parameters:
+
    +
  • samplers (List[Sampler]) – List of Sampler instances

  • +
  • time_enhance_mode (str) – [constant, linear] +Method to enhance temporally when constructing subfilter. At every +temporal location, a low-res temporal data is subtracted from the +high-res temporal data predicted. constant will assume that the +low-res temporal data is constant between landmarks. linear will +linearly interpolate between landmarks to generate the low-res data +to remove from the high-res.

  • +
  • lower_models (Dict[int, Sup3rCondMom] | None) – Dictionary of models that predict lower moments. For example, if +this queue is part of a handler to estimate the 3rd moment +lower_models could include models that estimate the 1st and 2nd +moments. These lower moments can be required in higher order moment +calculations.

  • +
  • s_padding (int | None) – Width of spatial padding to predict only middle part. If None, no +padding is used

  • +
  • t_padding (int | None) – Width of temporal padding to predict only middle part. If None, no +padding is used

  • +
  • end_t_padding (bool | False) – Zero pad the end of temporal space. Ensures that loss is +calculated only if snapshot is surrounded by temporal landmarks. +False by default

  • +
  • kwargs (dict) – Keyword arguments for parent class

  • +
  • batch_size (int) – Number of observations / samples in a batch

  • +
  • n_batches (int) – Number of batches in an epoch, this sets the iteration limit for +this object.

  • +
  • s_enhance (int) – Integer factor by which the spatial axes is to be enhanced.

  • +
  • t_enhance (int) – Integer factor by which the temporal axes is to be enhanced.

  • +
  • queue_cap (int) – Maximum number of batches the batch queue can store.

  • +
  • transform_kwargs (Union[Dict, None]) – Dictionary of kwargs to be passed to self.transform. This method +performs smoothing / coarsening.

  • +
  • thread_name (str) – Name of the queue thread. Default is ‘training’. Used to set name +to ‘validation’ for BatchHandler, which has a training and +validation queue.

  • +
  • mode (str) – Loading mode. Default is ‘lazy’, which only loads data into memory +as batches are queued. ‘eager’ will load all data into memory right +away.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_enhancement_factors()

Make sure the enhancement factors evenly divide the sample_shape.

check_features()

Make sure all samplers have the same sets of features.

check_shared_attr(attr)

Check if all containers have the same value for attr.

enqueue_batch()

Build batch and send to queue.

enqueue_batches()

Callback function for queue thread.

get_batch()

Get batch from queue or directly from a Sampler through sample_batch.

get_container_index()

Get random container index based on weights

get_queue()

Return FIFO queue for storing batches.

get_random_container()

Get random container based on container weights

log_queue_info()

Log info about queue size.

make_mask(high_res)

Make mask for output.

make_output(samples)

post_init_log([args_dict])

Log additional arguments after initialization.

post_proc(samples)

Returns normalized collection of samples / observations along with mask and target output for conditional moment estimation.

preflight()

Run checks before kicking off the queue.

sample_batch()

Get random sampler from collection and return a batch of samples from that sampler.

start()

Start thread to keep sample queue full for batches.

stop()

Stop loading batches.

transform(samples[, smoothing, ...])

Coarsen high res data to get corresponding low res batch.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

container_weights

Get weights used to sample from different containers based on relative sizes

data

Return underlying data.

features

Get all features contained in data.

hr_shape

Shape of high resolution sample in a low-res / high-res pair.

lr_shape

Shape of low resolution sample in a low-res / high-res pair.

queue_shape

Shape of objects stored in the queue.

queue_thread

Get new queue thread.

running

Boolean to check whether to keep enqueueing batches.

shape

Get shape of underlying data.

+
+
+
+make_output(samples)[source]#
+
+
Returns:
+

HR**2 (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features) +HR is high-res

+
+
+
+ +
+
+class Batch(low_res, high_res)#
+

Bases: tuple

+

Create new instance of Batch(low_res, high_res)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+ +
+
+class ConditionalBatch(low_res, high_res, output, mask)#
+

Bases: tuple

+

Create new instance of ConditionalBatch(low_res, high_res, output, mask)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+
+mask#
+

Alias for field number 3

+
+ +
+
+output#
+

Alias for field number 2

+
+ +
+ +
+
+check_enhancement_factors()#
+

Make sure the enhancement factors evenly divide the sample_shape.

+
+ +
+
+check_features()#
+

Make sure all samplers have the same sets of features.

+
+ +
+
+check_shared_attr(attr)#
+

Check if all containers have the same value for attr. If they do +the collection effectively inherits those attributes.

+
+ +
+
+property container_weights#
+

Get weights used to sample from different containers based on +relative sizes

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+enqueue_batch()#
+

Build batch and send to queue.

+
+ +
+
+enqueue_batches() None#
+

Callback function for queue thread. While training, the queue is +checked for empty spots and filled. In the training thread, batches are +removed from the queue.

+
+ +
+
+property features#
+

Get all features contained in data.

+
+ +
+
+get_batch() Batch#
+

Get batch from queue or directly from a Sampler through +sample_batch.

+
+ +
+
+get_container_index()#
+

Get random container index based on weights

+
+ +
+
+get_queue()#
+

Return FIFO queue for storing batches.

+
+ +
+
+get_random_container()#
+

Get random container based on container weights

+
+ +
+
+property hr_shape#
+

Shape of high resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+log_queue_info()#
+

Log info about queue size.

+
+ +
+
+property lr_shape#
+

Shape of low resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+make_mask(high_res)#
+

Make mask for output. This is used to ensure consistency when +training conditional moments.

+
+

Note

+

Consider the case of learning E(HR|LR) where HR is the high_res and LR +is the low_res. In theory, the conditional moment estimation works if +the full LR is passed as input and predicts the full HR. In practice, +only the LR data that overlaps and surrounds the HR data is useful, ie +E(HR|LR) = E(HR|LR_nei) where LR_nei is the LR data that surrounds the +HR data. Physically, this is equivalent to saying that data far away +from a region of interest does not matter. This allows learning the +conditional moments on spatial and temporal chunks only if one +restricts the high_res output as being overlapped and surrounded by the +input low_res. The role of the mask is to ensure that the input +low_res always surrounds the output high_res.

+
+
+
Parameters:
+

high_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

+
+
Returns:
+

mask (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

+
+
+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+post_proc(samples)#
+

Returns normalized collection of samples / observations along with +mask and target output for conditional moment estimation. Performs +coarsening on high-res data if Collection consists of +Sampler objects and not DualSampler objects

+
+
Returns:
+

namedtuple – Named tuple with low_res, high_res, mask, and output +attributes

+
+
+
+ +
+
+preflight()#
+

Run checks before kicking off the queue.

+
+ +
+
+property queue_shape#
+

Shape of objects stored in the queue.

+
+ +
+
+property queue_thread#
+

Get new queue thread.

+
+ +
+
+property running#
+

Boolean to check whether to keep enqueueing batches.

+
+ +
+
+sample_batch()#
+

Get random sampler from collection and return a batch of samples +from that sampler.

+

Notes

+

These samples are wrapped in an np.asarray call, so they have been +loaded into memory.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+start() None#
+

Start thread to keep sample queue full for batches.

+
+ +
+
+stop() None#
+

Stop loading batches.

+
+ +
+
+transform(samples, smoothing=None, smoothing_ignore=None, temporal_coarsening_method='subsample')#
+

Coarsen high res data to get corresponding low res batch.

+
+
Parameters:
+
    +
  • samples (Union[np.ndarray, da.core.Array]) – High resolution batch of samples. +4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • smoothing (float | None) – Standard deviation to use for gaussian filtering of the coarse +data. This can be tuned by matching the kinetic energy of a low +resolution simulation with the kinetic energy of a coarsened and +smoothed high resolution simulation. If None no smoothing is +performed.

  • +
  • smoothing_ignore (list | None) – List of features to ignore for the smoothing filter. None will +smooth all features if smoothing kwarg is not None

  • +
  • temporal_coarsening_method (str) – Method to use for temporal coarsening. Can be subsample, average, +min, max, or total

  • +
+
+
Returns:
+

    +
  • low_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • high_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
+

+
+
+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom2SepSF.html b/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom2SepSF.html new file mode 100644 index 000000000..260317abf --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom2SepSF.html @@ -0,0 +1,1380 @@ + + + + + + + + + + + sup3r.preprocessing.batch_queues.conditional.QueueMom2SepSF — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.batch_queues.conditional.QueueMom2SepSF

+ +
+ +
+
+ + + + +
+ +
+

sup3r.preprocessing.batch_queues.conditional.QueueMom2SepSF#

+
+
+class QueueMom2SepSF(samplers, *, time_enhance_mode='constant', lower_models=None, s_padding=0, t_padding=0, end_t_padding=False, batch_size=16, n_batches=64, s_enhance=1, t_enhance=1, queue_cap=None, transform_kwargs=None, thread_name='training', mode='lazy', **kwargs)[source]#
+

Bases: QueueMom1SF

+

Batch of low_res, high_res and output data when learning second moment +of subfilter vel separate from first moment

+
+
Parameters:
+
    +
  • samplers (List[Sampler]) – List of Sampler instances

  • +
  • time_enhance_mode (str) – [constant, linear] +Method to enhance temporally when constructing subfilter. At every +temporal location, a low-res temporal data is subtracted from the +high-res temporal data predicted. constant will assume that the +low-res temporal data is constant between landmarks. linear will +linearly interpolate between landmarks to generate the low-res data +to remove from the high-res.

  • +
  • lower_models (Dict[int, Sup3rCondMom] | None) – Dictionary of models that predict lower moments. For example, if +this queue is part of a handler to estimate the 3rd moment +lower_models could include models that estimate the 1st and 2nd +moments. These lower moments can be required in higher order moment +calculations.

  • +
  • s_padding (int | None) – Width of spatial padding to predict only middle part. If None, no +padding is used

  • +
  • t_padding (int | None) – Width of temporal padding to predict only middle part. If None, no +padding is used

  • +
  • end_t_padding (bool | False) – Zero pad the end of temporal space. Ensures that loss is +calculated only if snapshot is surrounded by temporal landmarks. +False by default

  • +
  • kwargs (dict) – Keyword arguments for parent class

  • +
  • batch_size (int) – Number of observations / samples in a batch

  • +
  • n_batches (int) – Number of batches in an epoch, this sets the iteration limit for +this object.

  • +
  • s_enhance (int) – Integer factor by which the spatial axes is to be enhanced.

  • +
  • t_enhance (int) – Integer factor by which the temporal axes is to be enhanced.

  • +
  • queue_cap (int) – Maximum number of batches the batch queue can store.

  • +
  • transform_kwargs (Union[Dict, None]) – Dictionary of kwargs to be passed to self.transform. This method +performs smoothing / coarsening.

  • +
  • thread_name (str) – Name of the queue thread. Default is ‘training’. Used to set name +to ‘validation’ for BatchHandler, which has a training and +validation queue.

  • +
  • mode (str) – Loading mode. Default is ‘lazy’, which only loads data into memory +as batches are queued. ‘eager’ will load all data into memory right +away.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_enhancement_factors()

Make sure the enhancement factors evenly divide the sample_shape.

check_features()

Make sure all samplers have the same sets of features.

check_shared_attr(attr)

Check if all containers have the same value for attr.

enqueue_batch()

Build batch and send to queue.

enqueue_batches()

Callback function for queue thread.

get_batch()

Get batch from queue or directly from a Sampler through sample_batch.

get_container_index()

Get random container index based on weights

get_queue()

Return FIFO queue for storing batches.

get_random_container()

Get random container based on container weights

log_queue_info()

Log info about queue size.

make_mask(high_res)

Make mask for output.

make_output(samples)

post_init_log([args_dict])

Log additional arguments after initialization.

post_proc(samples)

Returns normalized collection of samples / observations along with mask and target output for conditional moment estimation.

preflight()

Run checks before kicking off the queue.

sample_batch()

Get random sampler from collection and return a batch of samples from that sampler.

start()

Start thread to keep sample queue full for batches.

stop()

Stop loading batches.

transform(samples[, smoothing, ...])

Coarsen high res data to get corresponding low res batch.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

container_weights

Get weights used to sample from different containers based on relative sizes

data

Return underlying data.

features

Get all features contained in data.

hr_shape

Shape of high resolution sample in a low-res / high-res pair.

lr_shape

Shape of low resolution sample in a low-res / high-res pair.

queue_shape

Shape of objects stored in the queue.

queue_thread

Get new queue thread.

running

Boolean to check whether to keep enqueueing batches.

shape

Get shape of underlying data.

+
+
+
+make_output(samples)[source]#
+
+
Returns:
+

SF**2 (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features) +SF is subfilter, HR is high-res and LR is low-res +SF = HR - LR

+
+
+
+ +
+
+class Batch(low_res, high_res)#
+

Bases: tuple

+

Create new instance of Batch(low_res, high_res)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+ +
+
+class ConditionalBatch(low_res, high_res, output, mask)#
+

Bases: tuple

+

Create new instance of ConditionalBatch(low_res, high_res, output, mask)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+
+mask#
+

Alias for field number 3

+
+ +
+
+output#
+

Alias for field number 2

+
+ +
+ +
+
+check_enhancement_factors()#
+

Make sure the enhancement factors evenly divide the sample_shape.

+
+ +
+
+check_features()#
+

Make sure all samplers have the same sets of features.

+
+ +
+
+check_shared_attr(attr)#
+

Check if all containers have the same value for attr. If they do +the collection effectively inherits those attributes.

+
+ +
+
+property container_weights#
+

Get weights used to sample from different containers based on +relative sizes

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+enqueue_batch()#
+

Build batch and send to queue.

+
+ +
+
+enqueue_batches() None#
+

Callback function for queue thread. While training, the queue is +checked for empty spots and filled. In the training thread, batches are +removed from the queue.

+
+ +
+
+property features#
+

Get all features contained in data.

+
+ +
+
+get_batch() Batch#
+

Get batch from queue or directly from a Sampler through +sample_batch.

+
+ +
+
+get_container_index()#
+

Get random container index based on weights

+
+ +
+
+get_queue()#
+

Return FIFO queue for storing batches.

+
+ +
+
+get_random_container()#
+

Get random container based on container weights

+
+ +
+
+property hr_shape#
+

Shape of high resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+log_queue_info()#
+

Log info about queue size.

+
+ +
+
+property lr_shape#
+

Shape of low resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+make_mask(high_res)#
+

Make mask for output. This is used to ensure consistency when +training conditional moments.

+
+

Note

+

Consider the case of learning E(HR|LR) where HR is the high_res and LR +is the low_res. In theory, the conditional moment estimation works if +the full LR is passed as input and predicts the full HR. In practice, +only the LR data that overlaps and surrounds the HR data is useful, ie +E(HR|LR) = E(HR|LR_nei) where LR_nei is the LR data that surrounds the +HR data. Physically, this is equivalent to saying that data far away +from a region of interest does not matter. This allows learning the +conditional moments on spatial and temporal chunks only if one +restricts the high_res output as being overlapped and surrounded by the +input low_res. The role of the mask is to ensure that the input +low_res always surrounds the output high_res.

+
+
+
Parameters:
+

high_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

+
+
Returns:
+

mask (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

+
+
+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+post_proc(samples)#
+

Returns normalized collection of samples / observations along with +mask and target output for conditional moment estimation. Performs +coarsening on high-res data if Collection consists of +Sampler objects and not DualSampler objects

+
+
Returns:
+

namedtuple – Named tuple with low_res, high_res, mask, and output +attributes

+
+
+
+ +
+
+preflight()#
+

Run checks before kicking off the queue.

+
+ +
+
+property queue_shape#
+

Shape of objects stored in the queue.

+
+ +
+
+property queue_thread#
+

Get new queue thread.

+
+ +
+
+property running#
+

Boolean to check whether to keep enqueueing batches.

+
+ +
+
+sample_batch()#
+

Get random sampler from collection and return a batch of samples +from that sampler.

+

Notes

+

These samples are wrapped in an np.asarray call, so they have been +loaded into memory.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+start() None#
+

Start thread to keep sample queue full for batches.

+
+ +
+
+stop() None#
+

Stop loading batches.

+
+ +
+
+transform(samples, smoothing=None, smoothing_ignore=None, temporal_coarsening_method='subsample')#
+

Coarsen high res data to get corresponding low res batch.

+
+
Parameters:
+
    +
  • samples (Union[np.ndarray, da.core.Array]) – High resolution batch of samples. +4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • smoothing (float | None) – Standard deviation to use for gaussian filtering of the coarse +data. This can be tuned by matching the kinetic energy of a low +resolution simulation with the kinetic energy of a coarsened and +smoothed high resolution simulation. If None no smoothing is +performed.

  • +
  • smoothing_ignore (list | None) – List of features to ignore for the smoothing filter. None will +smooth all features if smoothing kwarg is not None

  • +
  • temporal_coarsening_method (str) – Method to use for temporal coarsening. Can be subsample, average, +min, max, or total

  • +
+
+
Returns:
+

    +
  • low_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • high_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
+

+
+
+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_queues.conditional.html b/_autosummary/sup3r.preprocessing.batch_queues.conditional.html new file mode 100644 index 000000000..72168d491 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_queues.conditional.html @@ -0,0 +1,757 @@ + + + + + + + + + + + sup3r.preprocessing.batch_queues.conditional — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.batch_queues.conditional

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.batch_queues.conditional#

+

Abstract batch queue class used for conditional moment estimation.

+

Classes

+
+ + + + + + + + + + + + + + + + + + + + + + + +

ConditionalBatchQueue(samplers, *[, ...])

BatchQueue class for conditional moment estimation.

QueueMom1(samplers, *[, time_enhance_mode, ...])

Batch handling class for conditional estimation of first moment

QueueMom1SF(samplers, *[, ...])

Batch handling class for conditional estimation of first moment of subfilter velocity

QueueMom2(samplers, *[, time_enhance_mode, ...])

Batch handling class for conditional estimation of second moment

QueueMom2SF(samplers, *[, ...])

Batch handling class for conditional estimation of second moment of subfilter velocity.

QueueMom2Sep(samplers, *[, ...])

Batch handling class for conditional estimation of second moment without subtraction of first moment

QueueMom2SepSF(samplers, *[, ...])

Batch of low_res, high_res and output data when learning second moment of subfilter vel separate from first moment

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_queues.dc.BatchQueueDC.html b/_autosummary/sup3r.preprocessing.batch_queues.dc.BatchQueueDC.html new file mode 100644 index 000000000..4c51cd0c2 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_queues.dc.BatchQueueDC.html @@ -0,0 +1,1265 @@ + + + + + + + + + + + sup3r.preprocessing.batch_queues.dc.BatchQueueDC — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.batch_queues.dc.BatchQueueDC#

+
+
+class BatchQueueDC(samplers, *, n_space_bins=1, n_time_bins=1, batch_size=16, n_batches=64, s_enhance=1, t_enhance=1, queue_cap=None, transform_kwargs=None, thread_name='training', mode='lazy', **kwargs)[source]#
+

Bases: SingleBatchQueue

+

Sample from data based on spatial and temporal weights. These weights +can be derived from validation training losses and updated during training +or set a priori to construct a validation queue

+
+
Parameters:
+
    +
  • samplers (List[Sampler]) – List of Sampler instances

  • +
  • n_space_bins (int) – Number of spatial bins to use for weighted sampling. e.g. if this +is 4 the spatial domain will be divided into 4 equal regions and +losses will be calculated across these regions during traning in +order to adaptively sample from lower performing regions.

  • +
  • n_time_bins (int) – Number of time bins to use for weighted sampling. e.g. if this +is 4 the temporal domain will be divided into 4 equal periods and +losses will be calculated across these periods during traning in +order to adaptively sample from lower performing time periods.

  • +
  • kwargs (dict) – Keyword arguments for parent class.

  • +
  • batch_size (int) – Number of observations / samples in a batch

  • +
  • n_batches (int) – Number of batches in an epoch, this sets the iteration limit for +this object.

  • +
  • s_enhance (int) – Integer factor by which the spatial axes is to be enhanced.

  • +
  • t_enhance (int) – Integer factor by which the temporal axes is to be enhanced.

  • +
  • queue_cap (int) – Maximum number of batches the batch queue can store.

  • +
  • transform_kwargs (Union[Dict, None]) – Dictionary of kwargs to be passed to self.transform. This method +performs smoothing / coarsening.

  • +
  • thread_name (str) – Name of the queue thread. Default is ‘training’. Used to set name +to ‘validation’ for BatchHandler, which has a training and +validation queue.

  • +
  • mode (str) – Loading mode. Default is ‘lazy’, which only loads data into memory +as batches are queued. ‘eager’ will load all data into memory right +away.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_enhancement_factors()

Make sure the enhancement factors evenly divide the sample_shape.

check_features()

Make sure all samplers have the same sets of features.

check_shared_attr(attr)

Check if all containers have the same value for attr.

enqueue_batch()

Build batch and send to queue.

enqueue_batches()

Callback function for queue thread.

get_batch()

Get batch from queue or directly from a Sampler through sample_batch.

get_container_index()

Get random container index based on weights

get_queue()

Return FIFO queue for storing batches.

get_random_container()

Get random container based on container weights

log_queue_info()

Log info about queue size.

post_init_log([args_dict])

Log additional arguments after initialization.

post_proc(samples)

Performs some post proc on dequeued samples before sending out for training.

preflight()

Run checks before kicking off the queue.

sample_batch()

Update weights and get batch of samples from sampled container.

start()

Start thread to keep sample queue full for batches.

stop()

Stop loading batches.

transform(samples[, smoothing, ...])

Coarsen high res data to get corresponding low res batch.

update_weights(spatial_weights, temporal_weights)

Set weights used to sample spatial and temporal bins.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

container_weights

Get weights used to sample from different containers based on relative sizes

data

Return underlying data.

features

Get all features contained in data.

hr_shape

Shape of high resolution sample in a low-res / high-res pair.

lr_shape

Shape of low resolution sample in a low-res / high-res pair.

queue_shape

Shape of objects stored in the queue.

queue_thread

Get new queue thread.

running

Boolean to check whether to keep enqueueing batches.

shape

Get shape of underlying data.

spatial_weights

Get weights used to sample spatial bins.

temporal_weights

Get weights used to sample temporal bins.

+
+
+
+sample_batch()[source]#
+

Update weights and get batch of samples from sampled container.

+
+ +
+
+property spatial_weights#
+

Get weights used to sample spatial bins.

+
+ +
+
+property temporal_weights#
+

Get weights used to sample temporal bins.

+
+ +
+
+update_weights(spatial_weights, temporal_weights)[source]#
+

Set weights used to sample spatial and temporal bins. This is called +by Sup3rGanDC after an epoch to update weights based on model +performance across validation samples.

+
+ +
+
+class Batch(low_res, high_res)#
+

Bases: tuple

+

Create new instance of Batch(low_res, high_res)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+ +
+
+check_enhancement_factors()#
+

Make sure the enhancement factors evenly divide the sample_shape.

+
+ +
+
+check_features()#
+

Make sure all samplers have the same sets of features.

+
+ +
+
+check_shared_attr(attr)#
+

Check if all containers have the same value for attr. If they do +the collection effectively inherits those attributes.

+
+ +
+
+property container_weights#
+

Get weights used to sample from different containers based on +relative sizes

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+enqueue_batch()#
+

Build batch and send to queue.

+
+ +
+
+enqueue_batches() None#
+

Callback function for queue thread. While training, the queue is +checked for empty spots and filled. In the training thread, batches are +removed from the queue.

+
+ +
+
+property features#
+

Get all features contained in data.

+
+ +
+
+get_batch() Batch#
+

Get batch from queue or directly from a Sampler through +sample_batch.

+
+ +
+
+get_container_index()#
+

Get random container index based on weights

+
+ +
+
+get_queue()#
+

Return FIFO queue for storing batches.

+
+ +
+
+get_random_container()#
+

Get random container based on container weights

+
+ +
+
+property hr_shape#
+

Shape of high resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+log_queue_info()#
+

Log info about queue size.

+
+ +
+
+property lr_shape#
+

Shape of low resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+post_proc(samples) Batch#
+

Performs some post proc on dequeued samples before sending out for +training. Post processing can include coarsening on high-res data (if +Collection consists of Sampler objects and not +DualSampler objects), smoothing, etc

+
+
Returns:
+

Batch (namedtuple) – namedtuple with low_res and high_res attributes

+
+
+
+ +
+
+preflight()#
+

Run checks before kicking off the queue.

+
+ +
+
+property queue_shape#
+

Shape of objects stored in the queue.

+
+ +
+
+property queue_thread#
+

Get new queue thread.

+
+ +
+
+property running#
+

Boolean to check whether to keep enqueueing batches.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+start() None#
+

Start thread to keep sample queue full for batches.

+
+ +
+
+stop() None#
+

Stop loading batches.

+
+ +
+
+transform(samples, smoothing=None, smoothing_ignore=None, temporal_coarsening_method='subsample')#
+

Coarsen high res data to get corresponding low res batch.

+
+
Parameters:
+
    +
  • samples (Union[np.ndarray, da.core.Array]) – High resolution batch of samples. +4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • smoothing (float | None) – Standard deviation to use for gaussian filtering of the coarse +data. This can be tuned by matching the kinetic energy of a low +resolution simulation with the kinetic energy of a coarsened and +smoothed high resolution simulation. If None no smoothing is +performed.

  • +
  • smoothing_ignore (list | None) – List of features to ignore for the smoothing filter. None will +smooth all features if smoothing kwarg is not None

  • +
  • temporal_coarsening_method (str) – Method to use for temporal coarsening. Can be subsample, average, +min, max, or total

  • +
+
+
Returns:
+

    +
  • low_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • high_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
+

+
+
+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_queues.dc.ValBatchQueueDC.html b/_autosummary/sup3r.preprocessing.batch_queues.dc.ValBatchQueueDC.html new file mode 100644 index 000000000..7c4f691d2 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_queues.dc.ValBatchQueueDC.html @@ -0,0 +1,1269 @@ + + + + + + + + + + + sup3r.preprocessing.batch_queues.dc.ValBatchQueueDC — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.batch_queues.dc.ValBatchQueueDC#

+
+
+class ValBatchQueueDC(samplers, *, n_space_bins=1, n_time_bins=1, batch_size=16, n_batches=64, s_enhance=1, t_enhance=1, queue_cap=None, transform_kwargs=None, thread_name='training', mode='lazy', **kwargs)[source]#
+

Bases: BatchQueueDC

+

Queue to construct a single batch for each spatiotemporal validation +bin. e.g. If we have 4 time bins and 1 space bin this will get batch_size +samples for 4 batches, with batch_size samples from each bin. The model +performance across these batches will determine the weights for how the +training batch queue is sampled.

+
+
Parameters:
+
    +
  • samplers (List[Sampler]) – List of Sampler instances

  • +
  • n_space_bins (int) – Number of spatial bins to use for weighted sampling. e.g. if this +is 4 the spatial domain will be divided into 4 equal regions and +losses will be calculated across these regions during traning in +order to adaptively sample from lower performing regions.

  • +
  • n_time_bins (int) – Number of time bins to use for weighted sampling. e.g. if this +is 4 the temporal domain will be divided into 4 equal periods and +losses will be calculated across these periods during traning in +order to adaptively sample from lower performing time periods.

  • +
  • kwargs (dict) – Keyword arguments for parent class.

  • +
  • batch_size (int) – Number of observations / samples in a batch

  • +
  • n_batches (int) – Number of batches in an epoch, this sets the iteration limit for +this object.

  • +
  • s_enhance (int) – Integer factor by which the spatial axes is to be enhanced.

  • +
  • t_enhance (int) – Integer factor by which the temporal axes is to be enhanced.

  • +
  • queue_cap (int) – Maximum number of batches the batch queue can store.

  • +
  • transform_kwargs (Union[Dict, None]) – Dictionary of kwargs to be passed to self.transform. This method +performs smoothing / coarsening.

  • +
  • thread_name (str) – Name of the queue thread. Default is ‘training’. Used to set name +to ‘validation’ for BatchHandler, which has a training and +validation queue.

  • +
  • mode (str) – Loading mode. Default is ‘lazy’, which only loads data into memory +as batches are queued. ‘eager’ will load all data into memory right +away.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_enhancement_factors()

Make sure the enhancement factors evenly divide the sample_shape.

check_features()

Make sure all samplers have the same sets of features.

check_shared_attr(attr)

Check if all containers have the same value for attr.

enqueue_batch()

Build batch and send to queue.

enqueue_batches()

Callback function for queue thread.

get_batch()

Get batch from queue or directly from a Sampler through sample_batch.

get_container_index()

Get random container index based on weights

get_queue()

Return FIFO queue for storing batches.

get_random_container()

Get random container based on container weights

log_queue_info()

Log info about queue size.

post_init_log([args_dict])

Log additional arguments after initialization.

post_proc(samples)

Performs some post proc on dequeued samples before sending out for training.

preflight()

Run checks before kicking off the queue.

sample_batch()

Update weights and get batch of samples from sampled container.

start()

Start thread to keep sample queue full for batches.

stop()

Stop loading batches.

transform(samples[, smoothing, ...])

Coarsen high res data to get corresponding low res batch.

update_weights(spatial_weights, temporal_weights)

Set weights used to sample spatial and temporal bins.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

container_weights

Get weights used to sample from different containers based on relative sizes

data

Return underlying data.

features

Get all features contained in data.

hr_shape

Shape of high resolution sample in a low-res / high-res pair.

lr_shape

Shape of low resolution sample in a low-res / high-res pair.

queue_shape

Shape of objects stored in the queue.

queue_thread

Get new queue thread.

running

Boolean to check whether to keep enqueueing batches.

shape

Get shape of underlying data.

spatial_weights

Sample entirely from this spatial bin determined by the batch number.

temporal_weights

Sample entirely from this temporal bin determined by the batch number.

+
+
+
+property spatial_weights#
+

Sample entirely from this spatial bin determined by the batch +number.

+
+ +
+
+property temporal_weights#
+

Sample entirely from this temporal bin determined by the batch +number.

+
+ +
+
+class Batch(low_res, high_res)#
+

Bases: tuple

+

Create new instance of Batch(low_res, high_res)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+ +
+
+check_enhancement_factors()#
+

Make sure the enhancement factors evenly divide the sample_shape.

+
+ +
+
+check_features()#
+

Make sure all samplers have the same sets of features.

+
+ +
+
+check_shared_attr(attr)#
+

Check if all containers have the same value for attr. If they do +the collection effectively inherits those attributes.

+
+ +
+
+property container_weights#
+

Get weights used to sample from different containers based on +relative sizes

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+enqueue_batch()#
+

Build batch and send to queue.

+
+ +
+
+enqueue_batches() None#
+

Callback function for queue thread. While training, the queue is +checked for empty spots and filled. In the training thread, batches are +removed from the queue.

+
+ +
+
+property features#
+

Get all features contained in data.

+
+ +
+
+get_batch() Batch#
+

Get batch from queue or directly from a Sampler through +sample_batch.

+
+ +
+
+get_container_index()#
+

Get random container index based on weights

+
+ +
+
+get_queue()#
+

Return FIFO queue for storing batches.

+
+ +
+
+get_random_container()#
+

Get random container based on container weights

+
+ +
+
+property hr_shape#
+

Shape of high resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+log_queue_info()#
+

Log info about queue size.

+
+ +
+
+property lr_shape#
+

Shape of low resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+post_proc(samples) Batch#
+

Performs some post proc on dequeued samples before sending out for +training. Post processing can include coarsening on high-res data (if +Collection consists of Sampler objects and not +DualSampler objects), smoothing, etc

+
+
Returns:
+

Batch (namedtuple) – namedtuple with low_res and high_res attributes

+
+
+
+ +
+
+preflight()#
+

Run checks before kicking off the queue.

+
+ +
+
+property queue_shape#
+

Shape of objects stored in the queue.

+
+ +
+
+property queue_thread#
+

Get new queue thread.

+
+ +
+
+property running#
+

Boolean to check whether to keep enqueueing batches.

+
+ +
+
+sample_batch()#
+

Update weights and get batch of samples from sampled container.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+start() None#
+

Start thread to keep sample queue full for batches.

+
+ +
+
+stop() None#
+

Stop loading batches.

+
+ +
+
+transform(samples, smoothing=None, smoothing_ignore=None, temporal_coarsening_method='subsample')#
+

Coarsen high res data to get corresponding low res batch.

+
+
Parameters:
+
    +
  • samples (Union[np.ndarray, da.core.Array]) – High resolution batch of samples. +4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • smoothing (float | None) – Standard deviation to use for gaussian filtering of the coarse +data. This can be tuned by matching the kinetic energy of a low +resolution simulation with the kinetic energy of a coarsened and +smoothed high resolution simulation. If None no smoothing is +performed.

  • +
  • smoothing_ignore (list | None) – List of features to ignore for the smoothing filter. None will +smooth all features if smoothing kwarg is not None

  • +
  • temporal_coarsening_method (str) – Method to use for temporal coarsening. Can be subsample, average, +min, max, or total

  • +
+
+
Returns:
+

    +
  • low_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • high_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
+

+
+
+
+ +
+
+update_weights(spatial_weights, temporal_weights)#
+

Set weights used to sample spatial and temporal bins. This is called +by Sup3rGanDC after an epoch to update weights based on model +performance across validation samples.

+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_queues.dc.html b/_autosummary/sup3r.preprocessing.batch_queues.dc.html new file mode 100644 index 000000000..681020b4b --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_queues.dc.html @@ -0,0 +1,743 @@ + + + + + + + + + + + sup3r.preprocessing.batch_queues.dc — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.batch_queues.dc

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.batch_queues.dc#

+

Data centric batch handler for dynamic batching strategy based on +performance during training

+

Classes

+
+ + + + + + + + +

BatchQueueDC(samplers, *[, n_space_bins, ...])

Sample from data based on spatial and temporal weights.

ValBatchQueueDC(samplers, *[, n_space_bins, ...])

Queue to construct a single batch for each spatiotemporal validation bin.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_queues.dual.DualBatchQueue.html b/_autosummary/sup3r.preprocessing.batch_queues.dual.DualBatchQueue.html new file mode 100644 index 000000000..704d91604 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_queues.dual.DualBatchQueue.html @@ -0,0 +1,1200 @@ + + + + + + + + + + + sup3r.preprocessing.batch_queues.dual.DualBatchQueue — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.batch_queues.dual.DualBatchQueue#

+
+
+class DualBatchQueue(samplers, *, batch_size=16, n_batches=64, s_enhance=1, t_enhance=1, queue_cap=None, transform_kwargs=None, thread_name='training', mode='lazy')[source]#
+

Bases: AbstractBatchQueue

+

Base BatchQueue for use with +DualSampler objects.

+
+
Parameters:
+
    +
  • samplers (List[Sampler]) – List of Sampler instances

  • +
  • batch_size (int) – Number of observations / samples in a batch

  • +
  • n_batches (int) – Number of batches in an epoch, this sets the iteration limit for +this object.

  • +
  • s_enhance (int) – Integer factor by which the spatial axes is to be enhanced.

  • +
  • t_enhance (int) – Integer factor by which the temporal axes is to be enhanced.

  • +
  • queue_cap (int) – Maximum number of batches the batch queue can store.

  • +
  • transform_kwargs (Union[Dict, None]) – Dictionary of kwargs to be passed to self.transform. This method +performs smoothing / coarsening.

  • +
  • thread_name (str) – Name of the queue thread. Default is ‘training’. Used to set name +to ‘validation’ for BatchHandler, which has a training and +validation queue.

  • +
  • mode (str) – Loading mode. Default is ‘lazy’, which only loads data into memory +as batches are queued. ‘eager’ will load all data into memory right +away.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_enhancement_factors()

Make sure each DualSampler has the same enhancment factors and they match those provided to the BatchQueue.

check_features()

Make sure all samplers have the same sets of features.

check_shared_attr(attr)

Check if all containers have the same value for attr.

enqueue_batch()

Build batch and send to queue.

enqueue_batches()

Callback function for queue thread.

get_batch()

Get batch from queue or directly from a Sampler through sample_batch.

get_container_index()

Get random container index based on weights

get_queue()

Return FIFO queue for storing batches.

get_random_container()

Get random container based on container weights

log_queue_info()

Log info about queue size.

post_init_log([args_dict])

Log additional arguments after initialization.

post_proc(samples)

Performs some post proc on dequeued samples before sending out for training.

preflight()

Run checks before kicking off the queue.

sample_batch()

Get random sampler from collection and return a batch of samples from that sampler.

start()

Start thread to keep sample queue full for batches.

stop()

Stop loading batches.

transform(samples[, smoothing, smoothing_ignore])

Perform smoothing if requested.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

container_weights

Get weights used to sample from different containers based on relative sizes

data

Return underlying data.

features

Get all features contained in data.

hr_shape

Shape of high resolution sample in a low-res / high-res pair.

lr_shape

Shape of low resolution sample in a low-res / high-res pair.

queue_shape

Shape of objects stored in the queue.

queue_thread

Get new queue thread.

running

Boolean to check whether to keep enqueueing batches.

shape

Get shape of underlying data.

+
+
+
+property queue_shape#
+

Shape of objects stored in the queue.

+
+ +
+
+check_enhancement_factors()[source]#
+

Make sure each DualSampler has the same enhancment factors and they +match those provided to the BatchQueue.

+
+ +
+
+transform(samples, smoothing=None, smoothing_ignore=None)[source]#
+

Perform smoothing if requested.

+
+

Note

+

This does not include temporal or spatial coarsening like +SingleBatchQueue

+
+
+ +
+
+class Batch(low_res, high_res)#
+

Bases: tuple

+

Create new instance of Batch(low_res, high_res)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+ +
+
+check_features()#
+

Make sure all samplers have the same sets of features.

+
+ +
+
+check_shared_attr(attr)#
+

Check if all containers have the same value for attr. If they do +the collection effectively inherits those attributes.

+
+ +
+
+property container_weights#
+

Get weights used to sample from different containers based on +relative sizes

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+enqueue_batch()#
+

Build batch and send to queue.

+
+ +
+
+enqueue_batches() None#
+

Callback function for queue thread. While training, the queue is +checked for empty spots and filled. In the training thread, batches are +removed from the queue.

+
+ +
+
+property features#
+

Get all features contained in data.

+
+ +
+
+get_batch() Batch#
+

Get batch from queue or directly from a Sampler through +sample_batch.

+
+ +
+
+get_container_index()#
+

Get random container index based on weights

+
+ +
+
+get_queue()#
+

Return FIFO queue for storing batches.

+
+ +
+
+get_random_container()#
+

Get random container based on container weights

+
+ +
+
+property hr_shape#
+

Shape of high resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+log_queue_info()#
+

Log info about queue size.

+
+ +
+
+property lr_shape#
+

Shape of low resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+post_proc(samples) Batch#
+

Performs some post proc on dequeued samples before sending out for +training. Post processing can include coarsening on high-res data (if +Collection consists of Sampler objects and not +DualSampler objects), smoothing, etc

+
+
Returns:
+

Batch (namedtuple) – namedtuple with low_res and high_res attributes

+
+
+
+ +
+
+preflight()#
+

Run checks before kicking off the queue.

+
+ +
+
+property queue_thread#
+

Get new queue thread.

+
+ +
+
+property running#
+

Boolean to check whether to keep enqueueing batches.

+
+ +
+
+sample_batch()#
+

Get random sampler from collection and return a batch of samples +from that sampler.

+

Notes

+

These samples are wrapped in an np.asarray call, so they have been +loaded into memory.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+start() None#
+

Start thread to keep sample queue full for batches.

+
+ +
+
+stop() None#
+

Stop loading batches.

+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_queues.dual.html b/_autosummary/sup3r.preprocessing.batch_queues.dual.html new file mode 100644 index 000000000..c26ae656f --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_queues.dual.html @@ -0,0 +1,740 @@ + + + + + + + + + + + sup3r.preprocessing.batch_queues.dual — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.batch_queues.dual

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.batch_queues.dual#

+

Base objects which generate, build, and operate on batches. Also can +interface with models.

+

Classes

+
+ + + + + +

DualBatchQueue(samplers, *[, batch_size, ...])

Base BatchQueue for use with DualSampler objects.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_queues.html b/_autosummary/sup3r.preprocessing.batch_queues.html new file mode 100644 index 000000000..b9a404833 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_queues.html @@ -0,0 +1,753 @@ + + + + + + + + + + + sup3r.preprocessing.batch_queues — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.batch_queues

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.batch_queues#

+

Container collection objects used to build batches for training.

+
+ + + + + + + + + + + + + + + + + + + + +

abstract

Abstract batch queue class used for multi-threaded batching / training.

base

Base objects which generate, build, and operate on batches.

conditional

Abstract batch queue class used for conditional moment estimation.

dc

Data centric batch handler for dynamic batching strategy based on performance during training

dual

Base objects which generate, build, and operate on batches.

utilities

Miscellaneous utilities shared across the batch_queues module

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_queues.utilities.html b/_autosummary/sup3r.preprocessing.batch_queues.utilities.html new file mode 100644 index 000000000..953e19a90 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_queues.utilities.html @@ -0,0 +1,745 @@ + + + + + + + + + + + sup3r.preprocessing.batch_queues.utilities — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.batch_queues.utilities

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.batch_queues.utilities#

+

Miscellaneous utilities shared across the batch_queues module

+

Functions

+
+ + + + + + + + + + + +

smooth_data(low_res, training_features, ...)

Smooth data using a gaussian filter

spatial_simple_enhancing(data[, s_enhance, ...])

Simple enhancing according to s_enhance resolution

temporal_simple_enhancing(data[, t_enhance, ...])

Upsample data according to t_enhance resolution

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_queues.utilities.smooth_data.html b/_autosummary/sup3r.preprocessing.batch_queues.utilities.smooth_data.html new file mode 100644 index 000000000..7a6c0f194 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_queues.utilities.smooth_data.html @@ -0,0 +1,783 @@ + + + + + + + + + + + sup3r.preprocessing.batch_queues.utilities.smooth_data — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.batch_queues.utilities.smooth_data

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.batch_queues.utilities.smooth_data#

+
+
+smooth_data(low_res, training_features, smoothing_ignore, smoothing=None)[source]#
+

Smooth data using a gaussian filter

+
+
Parameters:
+
    +
  • low_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • training_features (list | None) – Ordered list of training features input to the generative model

  • +
  • smoothing_ignore (list | None) – List of features to ignore for the smoothing filter. None will +smooth all features if smoothing kwarg is not None

  • +
  • smoothing (float | None) – Standard deviation to use for gaussian filtering of the coarse +data. This can be tuned by matching the kinetic energy of a low +resolution simulation with the kinetic energy of a coarsened and +smoothed high resolution simulation. If None no smoothing is +performed.

  • +
+
+
Returns:
+

low_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_queues.utilities.spatial_simple_enhancing.html b/_autosummary/sup3r.preprocessing.batch_queues.utilities.spatial_simple_enhancing.html new file mode 100644 index 000000000..b8a3de2b4 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_queues.utilities.spatial_simple_enhancing.html @@ -0,0 +1,779 @@ + + + + + + + + + + + sup3r.preprocessing.batch_queues.utilities.spatial_simple_enhancing — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.batch_queues.utilities.spatial_simple_enhancing

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.batch_queues.utilities.spatial_simple_enhancing#

+
+
+spatial_simple_enhancing(data, s_enhance=2, obs_axis=True)[source]#
+

Simple enhancing according to s_enhance resolution

+
+
Parameters:
+
    +
  • data (Union[np.ndarray, da.core.Array]) – 5D | 4D | 3D array with dimensions: +(n_obs, spatial_1, spatial_2, temporal, features) (obs_axis=True) +(n_obs, spatial_1, spatial_2, features) (obs_axis=True) +(spatial_1, spatial_2, temporal, features) (obs_axis=False) +(spatial_1, spatial_2, temporal_or_features) (obs_axis=False)

  • +
  • s_enhance (int) – factor by which to enhance spatial dimensions

  • +
  • obs_axis (bool) – Flag for if axis=0 is the observation axis. If True (default) +spatial axis=(1, 2) (zero-indexed), if False spatial axis=(0, 1)

  • +
+
+
Returns:
+

enhanced_data (Union[np.ndarray, da.core.Array]) – 3D | 4D | 5D array with same dimensions as data with new enhanced +resolution

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.batch_queues.utilities.temporal_simple_enhancing.html b/_autosummary/sup3r.preprocessing.batch_queues.utilities.temporal_simple_enhancing.html new file mode 100644 index 000000000..05590a6be --- /dev/null +++ b/_autosummary/sup3r.preprocessing.batch_queues.utilities.temporal_simple_enhancing.html @@ -0,0 +1,774 @@ + + + + + + + + + + + sup3r.preprocessing.batch_queues.utilities.temporal_simple_enhancing — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.batch_queues.utilities.temporal_simple_enhancing

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.batch_queues.utilities.temporal_simple_enhancing#

+
+
+temporal_simple_enhancing(data, t_enhance=4, mode='constant')[source]#
+

Upsample data according to t_enhance resolution

+
+
Parameters:
+
    +
  • data (Union[np.ndarray, da.core.Array]) – 5D array with dimensions +(observations, spatial_1, spatial_2, temporal, features)

  • +
  • t_enhance (int) – factor by which to enhance temporal dimension

  • +
  • mode (str) – interpolation method for enhancement.

  • +
+
+
Returns:
+

enhanced_data (Union[np.ndarray, da.core.Array]) – 5D array with same dimensions as data with new enhanced resolution

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.cachers.base.Cacher.html b/_autosummary/sup3r.preprocessing.cachers.base.Cacher.html new file mode 100644 index 000000000..47cc81c59 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.cachers.base.Cacher.html @@ -0,0 +1,1039 @@ + + + + + + + + + + + sup3r.preprocessing.cachers.base.Cacher — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.cachers.base.Cacher#

+
+
+class Cacher(data: Sup3rX | Sup3rDataset, cache_kwargs: Dict | None = None)[source]#
+

Bases: Container

+

Base cacher object. Simply writes given data to H5 or NETCDF files. By +default every feature will be written to a separate file. To write multiple +features to the same file call write_netcdf() or write_h5() +directly

+
+
Parameters:
+
    +
  • data (Union[Sup3rX, Sup3rDataset]) – Data to write to file

  • +
  • cache_kwargs (dict) – Dictionary with kwargs for caching wrangled data. This should at +minimum include a ‘cache_pattern’ key, value. This pattern must +have a {feature} format key and either a h5 or nc file extension, +based on desired output type.

    +

    Can also include a max_workers key and chunks key. +max_workers is an inteeger specifying number of threads to use +for writing chunks to output files and chunks is a dictionary +of dictionaries for each feature (or a single dictionary to use +for all features). e.g. +.. code-block:: JSON

    +
    +
    +
    {‘cache_pattern’: …,
    +
    ‘chunks’: {
    +
    ‘u_10m’: {

    ‘time’: 20, +‘south_north’: 100, +‘west_east’: 100

    +
    +
    +

    }

    +
    +
    +

    }

    +
    +
    +

    }

    +
    +
  • +
+
+
+
+

Note

+

This is only for saving cached data. If you want to reload the +cached files load them with a Loader object. DataHandler +objects can cache and reload from cache automatically.

+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

add_coord_meta(out_file, data)

Add flattened coordinate meta to out_file.

cache_data(cache_pattern[, chunks, ...])

Cache data to file with file type based on user provided cache_pattern.

get_chunk_slices(chunks, shape)

Get slices used to write xarray data to netcdf file in chunks.

get_chunksizes(dset, data, chunks)

Get chunksizes after rechunking (could be undetermined beforehand if chunks == 'auto') and return rechunked data.

parse_chunks(feature, chunks, dims)

Parse chunks input to Cacher.

post_init_log([args_dict])

Log additional arguments after initialization.

wrap(data)

Return a Sup3rDataset object or tuple of such.

write_chunk(out_file, dset, chunk_slice, ...)

Add chunk to netcdf file.

write_h5(out_file, data[, features, chunks, ...])

Cache data to h5 file using user provided chunks value.

write_netcdf(out_file, data[, features, ...])

Cache data to a netcdf file.

write_netcdf_chunks(out_file, feature, data)

Write netcdf chunks with delayed dask tasks.

+
+

Attributes

+
+ + + + + + + + +

data

Return underlying data.

shape

Get shape of underlying data.

+
+
+
+cache_data(cache_pattern, chunks=None, max_workers=None, mode='w', attrs=None, verbose=False)[source]#
+

Cache data to file with file type based on user provided +cache_pattern.

+
+
Parameters:
+
    +
  • cache_pattern (str) – Cache file pattern. Must have a {feature} format key. The extension +(.h5 or .nc) specifies which format to use for caching.

  • +
  • chunks (dict | None) – Chunk sizes for coordinate dimensions. e.g. +{'u_10m': {'time': 10, 'south_north': 100, 'west_east': 100}}

  • +
  • max_workers (int | None) – Number of workers to use for parallel writing of chunks

  • +
  • mode (str) – Write mode for out_file. Defaults to write.

  • +
  • attrs (dict | None) – Optional attributes to write to file. Can specify dataset specific +attributes by adding a dictionary with the dataset name as a key. +e.g. {**global_attrs, dset: {…}}

  • +
  • verbose (bool) – Whether to log progress for each chunk written to output files.

  • +
+
+
+
+ +
+
+static parse_chunks(feature, chunks, dims)[source]#
+

Parse chunks input to Cacher. Needs to be a dictionary of dimensions +and chunk values but parsed to a tuple for H5 caching.

+
+ +
+
+classmethod get_chunksizes(dset, data, chunks)[source]#
+

Get chunksizes after rechunking (could be undetermined beforehand +if chunks == 'auto') and return rechunked data.

+
+ +
+
+classmethod add_coord_meta(out_file, data)[source]#
+

Add flattened coordinate meta to out_file. This is used for h5 +caching.

+
+ +
+
+classmethod write_h5(out_file, data, features='all', chunks=None, max_workers=None, mode='w', attrs=None, verbose=False)[source]#
+

Cache data to h5 file using user provided chunks value.

+
+
Parameters:
+
    +
  • out_file (str) – Name of file to write. Must have a .h5 extension.

  • +
  • data (Sup3rDataset | Sup3rX | xr.Dataset) – Data to write to file. Comes from self.data, so an +xr.Dataset like object with .dims and .coords

  • +
  • features (str | list) – Name of feature(s) to write to file.

  • +
  • chunks (dict | None) – Chunk sizes for coordinate dimensions. e.g. +{'u_10m': {'time': 10, 'south_north': 100, 'west_east': 100}}

  • +
  • max_workers (int | None) – Number of workers to use for parallel writing of chunks

  • +
  • mode (str) – Write mode for out_file. Defaults to write.

  • +
  • attrs (dict | None) – Optional attributes to write to file. Can specify dataset specific +attributes by adding a dictionary with the dataset name as a key. +e.g. {**global_attrs, dset: {…}}

  • +
  • verbose (bool) – Dummy arg to match write_netcdf signature

  • +
+
+
+
+ +
+
+static get_chunk_slices(chunks, shape)[source]#
+

Get slices used to write xarray data to netcdf file in chunks.

+
+ +
+
+static write_chunk(out_file, dset, chunk_slice, chunk_data, msg=None)[source]#
+

Add chunk to netcdf file.

+
+ +
+
+classmethod write_netcdf_chunks(out_file, feature, data, chunks=None, max_workers=None, verbose=False)[source]#
+

Write netcdf chunks with delayed dask tasks.

+
+ +
+
+classmethod write_netcdf(out_file, data, features='all', chunks=None, max_workers=None, mode='w', attrs=None, verbose=False)[source]#
+

Cache data to a netcdf file.

+
+
Parameters:
+
    +
  • out_file (str) – Name of file to write. Must have a .nc extension.

  • +
  • data (Sup3rDataset) – Data to write to file. Comes from self.data, so a +Sup3rDataset with coords attributes

  • +
  • features (str | list) – Names of feature(s) to write to file.

  • +
  • chunks (dict | None) – Chunk sizes for coordinate dimensions. e.g. {'south_north': 100, +'west_east': 100, 'time': 10} Can also include dataset specific +values. e.g. {'windspeed': {'south_north': 100, 'west_east': 100, +'time': 10}}

  • +
  • max_workers (int | None) – Number of workers to use for parallel writing of chunks

  • +
  • mode (str) – Write mode for out_file. Defaults to write.

  • +
  • attrs (dict | None) – Optional attributes to write to file. Can specify dataset specific +attributes by adding a dictionary with the dataset name as a key. +e.g. {**global_attrs, dset: {…}}

  • +
  • verbose (bool) – Whether to log output after each chunk is written.

  • +
+
+
+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.cachers.base.html b/_autosummary/sup3r.preprocessing.cachers.base.html new file mode 100644 index 000000000..2bb6b32e7 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.cachers.base.html @@ -0,0 +1,739 @@ + + + + + + + + + + + sup3r.preprocessing.cachers.base — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.cachers.base

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.cachers.base#

+

Basic objects that can cache rasterized / derived data.

+

Classes

+
+ + + + + +

Cacher(data[, cache_kwargs])

Base cacher object.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.cachers.html b/_autosummary/sup3r.preprocessing.cachers.html new file mode 100644 index 000000000..9e590124d --- /dev/null +++ b/_autosummary/sup3r.preprocessing.cachers.html @@ -0,0 +1,741 @@ + + + + + + + + + + + sup3r.preprocessing.cachers — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.cachers

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.cachers#

+

Basic Cacher container.

+
+ + + + + + + + +

base

Basic objects that can cache rasterized / derived data.

utilities

Basic objects that can cache rasterized / derived data.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.cachers.utilities.html b/_autosummary/sup3r.preprocessing.cachers.utilities.html new file mode 100644 index 000000000..cf3a4abe1 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.cachers.utilities.html @@ -0,0 +1,730 @@ + + + + + + + + + + + sup3r.preprocessing.cachers.utilities — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.cachers.utilities

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.cachers.utilities#

+

Basic objects that can cache rasterized / derived data.

+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.collections.base.Collection.html b/_autosummary/sup3r.preprocessing.collections.base.Collection.html new file mode 100644 index 000000000..c6762d0bb --- /dev/null +++ b/_autosummary/sup3r.preprocessing.collections.base.Collection.html @@ -0,0 +1,895 @@ + + + + + + + + + + + sup3r.preprocessing.collections.base.Collection — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.collections.base.Collection#

+
+
+class Collection(containers: List[Container] | List[Sampler] | List[DualSampler])[source]#
+

Bases: Container

+

Object consisting of a set of containers. These objects are distinct +from Sup3rDataset objects, which also +contain multiple data members, because these members are completely +independent of each other. They are collected together for the purpose of +expanding a training dataset (e.g. BatchHandlers).

+
+
Parameters:
+

data (Union[Sup3rX, Sup3rDataset, Tuple[Sup3rX, …],) – Tuple[Sup3rDataset, …] +Can be an xr.Dataset, a Sup3rX object, a +Sup3rDataset object, or a tuple of such objects.

+
+

Note

+

.data will return a Sup3rDataset object or tuple of +such. This is a tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple +or 1-tuple (e.g. len(data) == 2 or len(data) == 1). This is +a 2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+
+
+

Methods

+
+ + + + + + + + + + + +

check_shared_attr(attr)

Check if all containers have the same value for attr.

post_init_log([args_dict])

Log additional arguments after initialization.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + +

container_weights

Get weights used to sample from different containers based on relative sizes

data

Return underlying data.

features

Get all features contained in data.

shape

Get shape of underlying data.

+
+
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+property features#
+

Get all features contained in data.

+
+ +
+
+property container_weights#
+

Get weights used to sample from different containers based on +relative sizes

+
+ +
+
+check_shared_attr(attr)[source]#
+

Check if all containers have the same value for attr. If they do +the collection effectively inherits those attributes.

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.collections.base.html b/_autosummary/sup3r.preprocessing.collections.base.html new file mode 100644 index 000000000..aeec1816d --- /dev/null +++ b/_autosummary/sup3r.preprocessing.collections.base.html @@ -0,0 +1,744 @@ + + + + + + + + + + + sup3r.preprocessing.collections.base — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.collections.base

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.collections.base#

+

Base collection classes. These are objects that contain sets / lists of +containers like batch handlers. Of course these also contain data so they’re +containers too!

+

TODO: xarray-contrib/datatree could unify Sup3rDataset and +collections of data. Consider migrating once datatree has been fully +integrated into xarray (in progress as of 8/8/2024)

+

Classes

+
+ + + + + +

Collection(containers)

Object consisting of a set of containers.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.collections.html b/_autosummary/sup3r.preprocessing.collections.html new file mode 100644 index 000000000..02c8dbd98 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.collections.html @@ -0,0 +1,741 @@ + + + + + + + + + + + sup3r.preprocessing.collections — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.collections

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.collections#

+

Classes consisting of collections of containers.

+
+ + + + + + + + +

base

Base collection classes.

stats

Collection object with methods to compute and save stats.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.collections.stats.StatsCollection.html b/_autosummary/sup3r.preprocessing.collections.stats.StatsCollection.html new file mode 100644 index 000000000..3b15c2c1d --- /dev/null +++ b/_autosummary/sup3r.preprocessing.collections.stats.StatsCollection.html @@ -0,0 +1,937 @@ + + + + + + + + + + + sup3r.preprocessing.collections.stats.StatsCollection — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.collections.stats.StatsCollection#

+
+
+class StatsCollection(containers, means=None, stds=None)[source]#
+

Bases: Collection

+

Extended collection object with methods for computing means and stds and +saving these to files.

+
+

Note

+

We write stats as float64 because float32 is not json serializable

+
+
+
Parameters:
+
    +
  • containers (List[Rasterizer]) – List of containers to compute stats for.

  • +
  • means (str | dict | None) – Usually a file path for saving results, or None for just +calculating stats and not saving. Can also be a dict, which will +just get returned as the “result”.

  • +
  • stds (str | dict | None) – Usually a file path for saving results, or None for just +calculating stats and not saving. Can also be a dict, which will +just get returned as the “result”.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + +

check_shared_attr(attr)

Check if all containers have the same value for attr.

get_means(means)

Dictionary of means for each feature, computed across all data handlers.

get_stds(stds)

Dictionary of standard deviations for each feature, computed across all data handlers.

normalize(containers)

Normalize container data with computed stats.

post_init_log([args_dict])

Log additional arguments after initialization.

save_stats(stds, means)

Save stats to json files.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + +

container_weights

Get weights used to sample from different containers based on relative sizes

data

Return underlying data.

features

Get all features contained in data.

shape

Get shape of underlying data.

+
+
+
+get_means(means)[source]#
+

Dictionary of means for each feature, computed across all data +handlers.

+
+ +
+
+get_stds(stds)[source]#
+

Dictionary of standard deviations for each feature, computed across +all data handlers.

+
+ +
+
+save_stats(stds, means)[source]#
+

Save stats to json files.

+
+ +
+
+normalize(containers)[source]#
+

Normalize container data with computed stats.

+
+ +
+
+check_shared_attr(attr)#
+

Check if all containers have the same value for attr. If they do +the collection effectively inherits those attributes.

+
+ +
+
+property container_weights#
+

Get weights used to sample from different containers based on +relative sizes

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+property features#
+

Get all features contained in data.

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.collections.stats.html b/_autosummary/sup3r.preprocessing.collections.stats.html new file mode 100644 index 000000000..967b143e3 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.collections.stats.html @@ -0,0 +1,739 @@ + + + + + + + + + + + sup3r.preprocessing.collections.stats — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.collections.stats

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.collections.stats#

+

Collection object with methods to compute and save stats.

+

Classes

+
+ + + + + +

StatsCollection(containers[, means, stds])

Extended collection object with methods for computing means and stds and saving these to files.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.data_handlers.exo.ExoData.html b/_autosummary/sup3r.preprocessing.data_handlers.exo.ExoData.html new file mode 100644 index 000000000..60dcf94cb --- /dev/null +++ b/_autosummary/sup3r.preprocessing.data_handlers.exo.ExoData.html @@ -0,0 +1,1033 @@ + + + + + + + + + + + sup3r.preprocessing.data_handlers.exo.ExoData — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.data_handlers.exo.ExoData#

+
+
+class ExoData(steps)[source]#
+

Bases: dict

+

Special dictionary class for multiple exogenous_data steps

+

TODO: Can we simplify this by relying more on xr.Dataset meta data instead +of storing enhancement factors for each step? Seems like we could take the +highest res data and coarsen based on s/t enhance, also.

+

Combine multiple SingleExoDataStep objects

+
+
Parameters:
+

steps (dict) – Dictionary with feature keys each with entries describing whether +features should be combined at input, a mid network layer, or with +output. e.g.:

+
+
+
‘topography’: {
+
‘steps’: [

{‘combine_type’: ‘input’, ‘model’: 0, ‘data’: …}, +{‘combine_type’: ‘layer’, ‘model’: 0, ‘data’: …}]

+
+
+

}

+
+
+
+

}

+

Each array in in ‘data’ key has 3D or 4D shape: +(spatial_1, spatial_2, 1) +(spatial_1, spatial_2, n_temporal, 1)

+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

clear()

copy()

fromkeys([value])

Create a new dictionary with keys from iterable and values set to value.

get(key[, default])

Return the value for key if key is in the dictionary, else default.

get_chunk(lr_slices)

Get the data for all model steps corresponding to the low res extent selected by lr_slices

get_combine_type_data(feature, combine_type)

Get exogenous data for given feature which is used according to the given combine_type (input/output/layer) for this model_step.

get_model_step_exo(model_step)

Get the exogenous data for the given model_step from the full list of steps

items()

keys()

pop(k[,d])

If the key is not found, return the default if given; otherwise, raise a KeyError.

popitem()

Remove and return a (key, value) pair as a 2-tuple.

setdefault(key[, default])

Insert key with a value of default if key is not in the dictionary.

split(split_steps)

Split self into multiple ExoData objects based on split_steps.

update([E, ]**F)

If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v In either case, this is followed by: for k in F: D[k] = F[k]

values()

+
+
+
+get_model_step_exo(model_step)[source]#
+

Get the exogenous data for the given model_step from the full list +of steps

+
+
Parameters:
+

model_step (int) – Index of the model to get exogenous data for.

+
+
Returns:
+

model_step_exo (dict) – Dictionary of features each with list of steps which match the +given model_step

+
+
+
+ +
+
+split(split_steps)[source]#
+

Split self into multiple ExoData objects based on +split_steps. The splits are done such that the steps in the ith +entry of the returned list all have a +model number < split_steps[i].

+
+

Note

+

This is used for multi-step models to correctly distribute the set of +all exo data steps to the appropriate models. For example, +MultiStepGan models with some +temporal (spatial) steps followed by some spatial (temporal) steps. The +temporal (spatial) models might take the first N exo data steps and +then the spatial (temporal) models will take the remaining exo data +steps.

+
+
+
Parameters:
+

split_steps (list) – Step index list to use for splitting. To split this into exo data +for spatial models and temporal models split_steps should be +[len(spatial_models)]. If this is for a +MultiStepGan model with temporal steps +first, split_steps should be [len(temporal_models)]. If +this is for a multi step model composed of more than two models +(e.g. SolarMultiStepGan) split_steps +should be [len(spatial_solar_models), len(spatial_solar_models) + +len(spatial_wind_models)]

+
+
Returns:
+

split_list (List[ExoData]) – List of ExoData objects coming from the split of self, +according to split_steps

+
+
+
+ +
+
+get_combine_type_data(feature, combine_type, model_step=None)[source]#
+

Get exogenous data for given feature which is used according to the +given combine_type (input/output/layer) for this model_step.

+
+
Parameters:
+
    +
  • feature (str) – Name of exogenous feature to get data for

  • +
  • combine_type (str) – Usage type for requested data. e.g input/output/layer

  • +
  • model_step (int | None) – Model step the data will be used for. If this is not None then +only steps with self[feature][‘steps’][:][‘model’] == model_step +will be searched for data.

  • +
+
+
Returns:
+

data (tf.Tensor | np.ndarray) – Exogenous data for given parameters

+
+
+
+ +
+
+get_chunk(lr_slices)[source]#
+

Get the data for all model steps corresponding to the low res extent +selected by lr_slices

+
+
Parameters:
+
    +
  • lr_slices (list List of spatiotemporal slices which specify extent of)

  • +
  • the low-resolution input data.

  • +
+
+
Returns:
+

exo_data (ExoData) – ExoData object composed of multiple +SingleExoDataStep objects. This is the sliced exo data for +the extent specified by lr_slices.

+
+
+
+ +
+
+clear() None.  Remove all items from D.#
+
+ +
+
+copy() a shallow copy of D#
+
+ +
+
+fromkeys(value=None, /)#
+

Create a new dictionary with keys from iterable and values set to value.

+
+ +
+
+get(key, default=None, /)#
+

Return the value for key if key is in the dictionary, else default.

+
+ +
+
+items() a set-like object providing a view on D's items#
+
+ +
+
+keys() a set-like object providing a view on D's keys#
+
+ +
+
+pop(k[, d]) v, remove specified key and return the corresponding value.#
+

If the key is not found, return the default if given; otherwise, +raise a KeyError.

+
+ +
+
+popitem()#
+

Remove and return a (key, value) pair as a 2-tuple.

+

Pairs are returned in LIFO (last-in, first-out) order. +Raises KeyError if the dict is empty.

+
+ +
+
+setdefault(key, default=None, /)#
+

Insert key with a value of default if key is not in the dictionary.

+

Return the value for key if key is in the dictionary, else default.

+
+ +
+
+update([E, ]**F) None.  Update D from dict/iterable E and F.#
+

If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] +If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v +In either case, this is followed by: for k in F: D[k] = F[k]

+
+ +
+
+values() an object providing a view on D's values#
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.data_handlers.exo.ExoDataHandler.html b/_autosummary/sup3r.preprocessing.data_handlers.exo.ExoDataHandler.html new file mode 100644 index 000000000..2dc795d6c --- /dev/null +++ b/_autosummary/sup3r.preprocessing.data_handlers.exo.ExoDataHandler.html @@ -0,0 +1,926 @@ + + + + + + + + + + + sup3r.preprocessing.data_handlers.exo.ExoDataHandler — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.data_handlers.exo.ExoDataHandler#

+
+
+class ExoDataHandler(file_paths: str | list | Path, feature: str, model: Sup3rGan | MultiStepGan | None = None, steps: list | None = None, source_file: str | None = None, input_handler_name: str | None = None, input_handler_kwargs: dict | None = None, cache_dir: str = './exo_cache', chunks: str | dict | None = 'auto', distance_upper_bound: int | None = None)[source]#
+

Bases: object

+

Class to rasterize exogenous features for multistep forward passes. e.g. +Multiple topography arrays at different resolutions for multiple spatial +enhancement steps.

+

This takes a list of models and uses the different sets of models features +to retrieve and rasterize exogenous data according to the requested target +coordinate and grid shape, for each model step.

+
+
Parameters:
+
    +
  • file_paths (str | list) – A single source h5 file or netcdf file to extract raster data from. The +string can be a unix-style file path which will be passed through +glob.glob. This is typically low-res WRF output or GCM netcdf data that +is source low-resolution data intended to be sup3r resolved.

  • +
  • feature (str) – Exogenous feature to extract from file_paths

  • +
  • model (Sup3rGan | MultiStepGan) – Model used to get exogenous data. If a MultiStepGan +lr_features, hr_exo_features, and hr_out_features will be +checked for each model in model.models and exogenous data will be +retrieved based on the resolution required for that type of feature. +e.g. If a model has topography as a lr and hr_exo feature, and the +model performs 5x spatial enhancement with an input resolution of 30km +then topography at 30km and at 6km will be retrieved. Either this or +list of steps needs to be provided.

  • +
  • steps (list) – List of dictionaries containing info on which models to use for a given +step index and what type of exo data the step requires. e.g.:: +[{‘model’: 0, ‘combine_type’: ‘input’},

    +
    +

    {‘model’: 0, ‘combine_type’: ‘layer’}]

    +
    +

    Each step entry can also contain enhancement factors. e.g.:: +[{‘model’: 0, ‘combine_type’: ‘input’, ‘s_enhance’: 1, ‘t_enhance’: 1},

    +
    +

    {‘model’: 0, ‘combine_type’: ‘layer’, ‘s_enhance’: 3, ‘t_enhance’: 1}]

    +
    +
  • +
  • source_file (str) – Filepath to source wtk, nsrdb, or netcdf file to get hi-res data from +which will be mapped to the enhanced grid of the file_paths input. +Pixels from this file will be mapped to their nearest low-res pixel in +the file_paths input. Accordingly, the input should be a significantly +higher resolution than file_paths. Warnings will be raised if the +low-resolution pixels in file_paths do not have unique nearest pixels +from this exo source data.

  • +
  • input_handler_name (str) – data handler class used by the exo handler. Provide a string name to +match a Rasterizer. If None +the correct handler will be guessed based on file type and time series +properties. This is passed directly to the exo handler, along with +input_handler_kwargs

  • +
  • input_handler_kwargs (dict | None) – Any kwargs for initializing the input_handler_name class used by +the exo handler.

  • +
  • cache_dir (str | None) – Directory for storing cache data. Default is ‘./exo_cache’. If None +then no data will be cached.

  • +
  • chunks (str | dict) – Dictionary of dimension chunk sizes for returned exo data. e.g. +{‘time’: 100, ‘south_north’: 100, ‘west_east’: 100}. This can also just +be “auto”. This is passed to .chunk() before returning exo data +through .data attribute

  • +
  • distance_upper_bound (float | None) – Maximum distance to map high-resolution data from source_file to the +low-resolution file_paths input. None (default) will calculate this +based on the median distance between points in source_file

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + +

get_all_step_data()

Get exo data for each model step.

get_exo_rasterizer(s_enhance, t_enhance)

Get exo rasterizer instance for given enhancement factors

get_exo_steps(feature, models)

Get list of steps describing how to use exogenous data for the given feature in the list of given models.

get_single_step_data(s_enhance, t_enhance)

Get exo data for a single model step, with specific enhancement factors.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

cache_dir

cache_files

Get exo data cache file for all enhancement factors

chunks

distance_upper_bound

input_handler_kwargs

input_handler_name

model

source_file

steps

file_paths

feature

+
+
+
+classmethod get_exo_steps(feature, models)[source]#
+

Get list of steps describing how to use exogenous data for the given +feature in the list of given models. This checks the input and +exo feature lists for each model step and adds that step if the +given feature is found in the list.

+
+ +
+
+get_exo_rasterizer(s_enhance, t_enhance)[source]#
+

Get exo rasterizer instance for given enhancement factors

+
+ +
+
+get_single_step_data(s_enhance, t_enhance)[source]#
+

Get exo data for a single model step, with specific enhancement +factors.

+
+ +
+
+property cache_files#
+

Get exo data cache file for all enhancement factors

+
+ +
+
+get_all_step_data()[source]#
+

Get exo data for each model step.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.data_handlers.exo.SingleExoDataStep.html b/_autosummary/sup3r.preprocessing.data_handlers.exo.SingleExoDataStep.html new file mode 100644 index 000000000..3744cc49b --- /dev/null +++ b/_autosummary/sup3r.preprocessing.data_handlers.exo.SingleExoDataStep.html @@ -0,0 +1,927 @@ + + + + + + + + + + + sup3r.preprocessing.data_handlers.exo.SingleExoDataStep — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.data_handlers.exo.SingleExoDataStep#

+
+
+class SingleExoDataStep(feature, combine_type, model, data)[source]#
+

Bases: dict

+

Special dictionary class for exogenous_data step

+

exogenous_data step dictionary for a given model step

+
+
Parameters:
+
    +
  • feature (str) – Name of feature corresponding to data.

  • +
  • combine_type (str) – Specifies how the exogenous_data should be used for this step. e.g. +“input”, “layer”, “output”. For example, if tis equals “input” the +data will be used as input to the forward pass for the model +step given by model

  • +
  • model (int) – Specifies the model index which will use the data. For example, +if model == 1 then the data will be used according to +combine_type in the 2nd model step in a MultiStepGan.

  • +
  • data (Union[np.ndarray, da.core.Array]) – The data to be used for the given model step.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

clear()

copy()

fromkeys([value])

Create a new dictionary with keys from iterable and values set to value.

get(key[, default])

Return the value for key if key is in the dictionary, else default.

items()

keys()

pop(k[,d])

If the key is not found, return the default if given; otherwise, raise a KeyError.

popitem()

Remove and return a (key, value) pair as a 2-tuple.

setdefault(key[, default])

Insert key with a value of default if key is not in the dictionary.

update([E, ]**F)

If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v In either case, this is followed by: for k in F: D[k] = F[k]

values()

+
+

Attributes

+
+ + + + + +

shape

Shape of data array for this model step.

+
+
+
+property shape#
+

Shape of data array for this model step.

+
+ +
+
+clear() None.  Remove all items from D.#
+
+ +
+
+copy() a shallow copy of D#
+
+ +
+
+fromkeys(value=None, /)#
+

Create a new dictionary with keys from iterable and values set to value.

+
+ +
+
+get(key, default=None, /)#
+

Return the value for key if key is in the dictionary, else default.

+
+ +
+
+items() a set-like object providing a view on D's items#
+
+ +
+
+keys() a set-like object providing a view on D's keys#
+
+ +
+
+pop(k[, d]) v, remove specified key and return the corresponding value.#
+

If the key is not found, return the default if given; otherwise, +raise a KeyError.

+
+ +
+
+popitem()#
+

Remove and return a (key, value) pair as a 2-tuple.

+

Pairs are returned in LIFO (last-in, first-out) order. +Raises KeyError if the dict is empty.

+
+ +
+
+setdefault(key, default=None, /)#
+

Insert key with a value of default if key is not in the dictionary.

+

Return the value for key if key is in the dictionary, else default.

+
+ +
+
+update([E, ]**F) None.  Update D from dict/iterable E and F.#
+

If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] +If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v +In either case, this is followed by: for k in F: D[k] = F[k]

+
+ +
+
+values() an object providing a view on D's values#
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.data_handlers.exo.html b/_autosummary/sup3r.preprocessing.data_handlers.exo.html new file mode 100644 index 000000000..8a6e3a461 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.data_handlers.exo.html @@ -0,0 +1,747 @@ + + + + + + + + + + + sup3r.preprocessing.data_handlers.exo — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.data_handlers.exo

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.data_handlers.exo#

+

Exogenous data handler and related objects. The ExoDataHandler performs +exogenous data rasterization for one or more model steps for requested +features.

+

Classes

+
+ + + + + + + + + + + +

ExoData(steps)

Special dictionary class for multiple exogenous_data steps

ExoDataHandler(file_paths, feature[, model, ...])

Class to rasterize exogenous features for multistep forward passes.

SingleExoDataStep(feature, combine_type, ...)

Special dictionary class for exogenous_data step

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.data_handlers.factory.DailyDataHandler.html b/_autosummary/sup3r.preprocessing.data_handlers.factory.DailyDataHandler.html new file mode 100644 index 000000000..166c5c5c4 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.data_handlers.factory.DailyDataHandler.html @@ -0,0 +1,1033 @@ + + + + + + + + + + + sup3r.preprocessing.data_handlers.factory.DailyDataHandler — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.data_handlers.factory.DailyDataHandler#

+
+
+class DailyDataHandler(file_paths, *, features='all', res_kwargs=None, chunks='auto', target=None, shape=None, time_slice=slice(None, None, None), threshold=None, time_roll=0, time_shift=None, hr_spatial_coarsen=1, nan_method_kwargs=None, BaseLoader=None, FeatureRegistry=None, interp_kwargs=None, cache_kwargs=None, **kwargs)[source]#
+

Bases: DataHandler

+

General data handler class with daily data as an additional attribute. +xr.Dataset coarsen method employed to compute averages / mins / maxes over +daily windows. Special treatment of clearsky_ratio, which requires +derivation from total clearsky_ghi and total ghi.

+

TODO: +(1) Not a fan of manually adding cs_ghi / ghi and then removing. Maybe +this could be handled through a derivation instead

+

(2) We assume daily and hourly data here but we could generalize this to +go from daily -> any time step. This would then enable the CC models to do +arbitrary temporal enhancement.

+
+
Parameters:
+
    +
  • file_paths (str | list | pathlib.Path) – file_paths input to LoaderClass

  • +
  • features (list | str) – Features to load and / or derive. If ‘all’ then all available raw +features will be loaded. Specify explicit feature names for +derivations.

  • +
  • res_kwargs (dict) – Additional keyword arguments passed through to the BaseLoader. +BaseLoader is usually xr.open_mfdataset for NETCDF files and +MultiFileResourceX for H5 files.

  • +
  • chunks (dict | str) – Dictionary of chunk sizes to pass through to +dask.array.from_array() or xr.Dataset().chunk(). Will be +converted to a tuple when used in from_array(). These are the +methods for H5 and NETCDF data, respectively. This argument can +be “auto” or None in addition to a dictionary. None will not do +any chunking and load data into memory as np.array

  • +
  • target (tuple) – (lat, lon) lower left corner of raster. Either need target+shape +or raster_file.

  • +
  • shape (tuple) – (rows, cols) grid size. Either need target+shape or raster_file.

  • +
  • time_slice (slice | list) – Slice specifying extent and step of temporal extraction. e.g. +slice(start, stop, step). If equal to slice(None, None, 1) the full +time dimension is selected. Can be also be a list [start, stop, +step]

  • +
  • threshold (float) – Nearest neighbor euclidean distance threshold. If the coordinates +are more than this value away from the target lat/lon, an error is +raised.

  • +
  • time_roll (int) – Number of steps to roll along the time axis. Passed to +xr.Dataset.roll()

  • +
  • time_shift (int | None) – Number of minutes to shift time axis. This can be used, for +example, to shift the time index for daily data so that the time +stamp for a given day starts at the zeroth minute instead of at +noon, as is the case for most GCM data.

  • +
  • hr_spatial_coarsen (int) – Spatial coarsening factor. Passed to xr.Dataset.coarsen()

  • +
  • nan_method_kwargs (str | dict | None) – Keyword arguments for nan handling. If ‘mask’, time steps with nans +will be dropped. Otherwise this should be a dict of kwargs which +will be passed to +sup3r.preprocessing.accessor.Sup3rX.interpolate_na().

  • +
  • BaseLoader (Callable) – Base level file loader wrapped by +Loader. This is usually +xr.open_mfdataset for NETCDF files and MultiFileResourceX for H5 +files.

  • +
  • FeatureRegistry (dict) – Dictionary of +DerivedFeature +objects used for derivations

  • +
  • interp_kwargs (dict | None) – Dictionary of kwargs for level interpolation. Can include “method” +and “run_level_check” keys. Method specifies how to perform height +interpolation. e.g. Deriving u_20m from u_10m and u_100m. Options +are “linear” and “log”. See +sup3r.preprocessing.derivers.Deriver.do_level_interpolation()

  • +
  • cache_kwargs (dict | None) – Dictionary with kwargs for caching wrangled data. This should at +minimum include a cache_pattern key, value. This pattern must +have a {feature} format key and either a h5 or nc file extension, +based on desired output type. See class:Cacher for description +of more arguments.

  • +
  • kwargs (dict) – Dictionary of additional keyword args for +Rasterizer, used +specifically for rasterizing flattened data

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_registry(feature)

Get compute method from the registry if available.

derive(feature)

Routine to derive requested features.

do_level_interpolation(feature[, interp_kwargs])

Interpolate over height or pressure to derive the given feature.

get_inputs(feature)

Get inputs for the given feature and inputs for those inputs.

get_multi_level_data(feature)

Get data stored in multi-level arrays, like u stored on pressure levels.

get_single_level_data(feature)

When doing level interpolation we should include the single level data available.

has_interp_variables(feature)

Check if the given feature can be interpolated from values at nearby heights or from pressure level data.

map_new_name(feature, pattern)

If the search for a derivation method first finds an alternative name for the feature we want to derive, by matching a wildcard pattern, we need to replace the wildcard with the specific height or pressure we want and continue the search for a derivation method with this new name.

no_overlap(feature)

Check if any of the nested inputs for 'feature' contain 'feature'

post_init_log([args_dict])

Log additional arguments after initialization.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + +

FEATURE_REGISTRY

data

Return underlying data.

shape

Get shape of underlying data.

+
+
+
+check_registry(feature) ndarray | Array | str | None#
+

Get compute method from the registry if available. Will check for +pattern feature match in feature registry. e.g. if u_100m matches a +feature registry entry of u_(.*)m

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+derive(feature) ndarray | Array#
+

Routine to derive requested features. Employs a little recursion to +locate differently named features with a name map in the feature +registry. i.e. if FEATURE_REGISTRY contains a key, value pair like +“windspeed”: “wind_speed” then requesting “windspeed” will ultimately +return a compute method (or fetch from raw data) for “wind_speed

+
+

Note

+

Features are all saved as lower case names and __contains__ checks will +use feature.lower()

+
+
+ +
+
+do_level_interpolation(feature, interp_kwargs=None) DataArray#
+

Interpolate over height or pressure to derive the given feature.

+
+ +
+
+get_inputs(feature)#
+

Get inputs for the given feature and inputs for those inputs.

+
+ +
+
+get_multi_level_data(feature)#
+

Get data stored in multi-level arrays, like u stored on pressure +levels.

+
+ +
+
+get_single_level_data(feature)#
+

When doing level interpolation we should include the single level +data available. e.g. If we have u_100m already and want to interpolate +u_40m from multi-level data U we should add u_100m at height 100m +before doing interpolation, since 100 could be a closer level to 40m +than those available in U.

+
+ +
+
+has_interp_variables(feature)#
+

Check if the given feature can be interpolated from values at nearby +heights or from pressure level data. e.g. If u_10m and u_50m +exist then u_30m can be interpolated from these. If a pressure +level array u is available this can also be used, in conjunction +with height data.

+
+ +
+
+map_new_name(feature, pattern)#
+

If the search for a derivation method first finds an alternative +name for the feature we want to derive, by matching a wildcard pattern, +we need to replace the wildcard with the specific height or pressure we +want and continue the search for a derivation method with this new +name.

+
+ +
+
+no_overlap(feature)#
+

Check if any of the nested inputs for ‘feature’ contain ‘feature’

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.data_handlers.factory.DataHandler.html b/_autosummary/sup3r.preprocessing.data_handlers.factory.DataHandler.html new file mode 100644 index 000000000..970c1ba87 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.data_handlers.factory.DataHandler.html @@ -0,0 +1,1028 @@ + + + + + + + + + + + sup3r.preprocessing.data_handlers.factory.DataHandler — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.data_handlers.factory.DataHandler#

+
+
+class DataHandler(file_paths, features='all', res_kwargs: dict | None = None, chunks: str | Dict[str, int] = 'auto', target: tuple | None = None, shape: tuple | None = None, time_slice: slice | tuple | list | None = slice(None, None, None), threshold: float | None = None, time_roll: int = 0, time_shift: int | None = None, hr_spatial_coarsen: int = 1, nan_method_kwargs: dict | None = None, BaseLoader: Callable | None = None, FeatureRegistry: dict | None = None, interp_kwargs: dict | None = None, cache_kwargs: dict | None = None, **kwargs)[source]#
+

Bases: Deriver

+

Base DataHandler. Composes +Rasterizer, +Loader, +Deriver, and +Cacher classes.

+
+
Parameters:
+
    +
  • file_paths (str | list | pathlib.Path) – file_paths input to LoaderClass

  • +
  • features (list | str) – Features to load and / or derive. If ‘all’ then all available raw +features will be loaded. Specify explicit feature names for +derivations.

  • +
  • res_kwargs (dict) – Additional keyword arguments passed through to the BaseLoader. +BaseLoader is usually xr.open_mfdataset for NETCDF files and +MultiFileResourceX for H5 files.

  • +
  • chunks (dict | str) – Dictionary of chunk sizes to pass through to +dask.array.from_array() or xr.Dataset().chunk(). Will be +converted to a tuple when used in from_array(). These are the +methods for H5 and NETCDF data, respectively. This argument can +be “auto” or None in addition to a dictionary. None will not do +any chunking and load data into memory as np.array

  • +
  • target (tuple) – (lat, lon) lower left corner of raster. Either need target+shape +or raster_file.

  • +
  • shape (tuple) – (rows, cols) grid size. Either need target+shape or raster_file.

  • +
  • time_slice (slice | list) – Slice specifying extent and step of temporal extraction. e.g. +slice(start, stop, step). If equal to slice(None, None, 1) the full +time dimension is selected. Can be also be a list [start, stop, +step]

  • +
  • threshold (float) – Nearest neighbor euclidean distance threshold. If the coordinates +are more than this value away from the target lat/lon, an error is +raised.

  • +
  • time_roll (int) – Number of steps to roll along the time axis. Passed to +xr.Dataset.roll()

  • +
  • time_shift (int | None) – Number of minutes to shift time axis. This can be used, for +example, to shift the time index for daily data so that the time +stamp for a given day starts at the zeroth minute instead of at +noon, as is the case for most GCM data.

  • +
  • hr_spatial_coarsen (int) – Spatial coarsening factor. Passed to xr.Dataset.coarsen()

  • +
  • nan_method_kwargs (str | dict | None) – Keyword arguments for nan handling. If ‘mask’, time steps with nans +will be dropped. Otherwise this should be a dict of kwargs which +will be passed to +sup3r.preprocessing.accessor.Sup3rX.interpolate_na().

  • +
  • BaseLoader (Callable) – Base level file loader wrapped by +Loader. This is usually +xr.open_mfdataset for NETCDF files and MultiFileResourceX for H5 +files.

  • +
  • FeatureRegistry (dict) – Dictionary of +DerivedFeature +objects used for derivations

  • +
  • interp_kwargs (dict | None) – Dictionary of kwargs for level interpolation. Can include “method” +and “run_level_check” keys. Method specifies how to perform height +interpolation. e.g. Deriving u_20m from u_10m and u_100m. Options +are “linear” and “log”. See +sup3r.preprocessing.derivers.Deriver.do_level_interpolation()

  • +
  • cache_kwargs (dict | None) – Dictionary with kwargs for caching wrangled data. This should at +minimum include a cache_pattern key, value. This pattern must +have a {feature} format key and either a h5 or nc file extension, +based on desired output type. See class:Cacher for description +of more arguments.

  • +
  • kwargs (dict) – Dictionary of additional keyword args for +Rasterizer, used +specifically for rasterizing flattened data

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_registry(feature)

Get compute method from the registry if available.

derive(feature)

Routine to derive requested features.

do_level_interpolation(feature[, interp_kwargs])

Interpolate over height or pressure to derive the given feature.

get_inputs(feature)

Get inputs for the given feature and inputs for those inputs.

get_multi_level_data(feature)

Get data stored in multi-level arrays, like u stored on pressure levels.

get_single_level_data(feature)

When doing level interpolation we should include the single level data available.

has_interp_variables(feature)

Check if the given feature can be interpolated from values at nearby heights or from pressure level data.

map_new_name(feature, pattern)

If the search for a derivation method first finds an alternative name for the feature we want to derive, by matching a wildcard pattern, we need to replace the wildcard with the specific height or pressure we want and continue the search for a derivation method with this new name.

no_overlap(feature)

Check if any of the nested inputs for 'feature' contain 'feature'

post_init_log([args_dict])

Log additional arguments after initialization.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + +

FEATURE_REGISTRY

data

Return underlying data.

shape

Get shape of underlying data.

+
+
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+check_registry(feature) ndarray | Array | str | None#
+

Get compute method from the registry if available. Will check for +pattern feature match in feature registry. e.g. if u_100m matches a +feature registry entry of u_(.*)m

+
+ +
+
+derive(feature) ndarray | Array#
+

Routine to derive requested features. Employs a little recursion to +locate differently named features with a name map in the feature +registry. i.e. if FEATURE_REGISTRY contains a key, value pair like +“windspeed”: “wind_speed” then requesting “windspeed” will ultimately +return a compute method (or fetch from raw data) for “wind_speed

+
+

Note

+

Features are all saved as lower case names and __contains__ checks will +use feature.lower()

+
+
+ +
+
+do_level_interpolation(feature, interp_kwargs=None) DataArray#
+

Interpolate over height or pressure to derive the given feature.

+
+ +
+
+get_inputs(feature)#
+

Get inputs for the given feature and inputs for those inputs.

+
+ +
+
+get_multi_level_data(feature)#
+

Get data stored in multi-level arrays, like u stored on pressure +levels.

+
+ +
+
+get_single_level_data(feature)#
+

When doing level interpolation we should include the single level +data available. e.g. If we have u_100m already and want to interpolate +u_40m from multi-level data U we should add u_100m at height 100m +before doing interpolation, since 100 could be a closer level to 40m +than those available in U.

+
+ +
+
+has_interp_variables(feature)#
+

Check if the given feature can be interpolated from values at nearby +heights or from pressure level data. e.g. If u_10m and u_50m +exist then u_30m can be interpolated from these. If a pressure +level array u is available this can also be used, in conjunction +with height data.

+
+ +
+
+map_new_name(feature, pattern)#
+

If the search for a derivation method first finds an alternative +name for the feature we want to derive, by matching a wildcard pattern, +we need to replace the wildcard with the specific height or pressure we +want and continue the search for a derivation method with this new +name.

+
+ +
+
+no_overlap(feature)#
+

Check if any of the nested inputs for ‘feature’ contain ‘feature’

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.data_handlers.factory.DataHandlerFactory.html b/_autosummary/sup3r.preprocessing.data_handlers.factory.DataHandlerFactory.html new file mode 100644 index 000000000..27ddd7a1b --- /dev/null +++ b/_autosummary/sup3r.preprocessing.data_handlers.factory.DataHandlerFactory.html @@ -0,0 +1,774 @@ + + + + + + + + + + + sup3r.preprocessing.data_handlers.factory.DataHandlerFactory — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.data_handlers.factory.DataHandlerFactory

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.data_handlers.factory.DataHandlerFactory#

+
+
+DataHandlerFactory(cls, BaseLoader=None, FeatureRegistry=None, name=None)[source]#
+

Build composite objects that load from file_paths, rasterize a specified +region, derive new features, and cache derived data.

+
+
Parameters:
+
    +
  • BaseLoader (Callable) – Optional base loader update. The default for H5 is MultiFileWindX and +for NETCDF the default is xarray

  • +
  • FeatureRegistry (Dict[str, DerivedFeature]) – Dictionary of compute methods for features. This is used to look up how +to derive features that are not contained in the raw loaded data.

  • +
  • name (str) – Optional class name, used to resolve repr(Class) and distinguish +partially initialized DataHandlers with different FeatureRegistrys

  • +
+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.data_handlers.factory.DataHandlerH5SolarCC.html b/_autosummary/sup3r.preprocessing.data_handlers.factory.DataHandlerH5SolarCC.html new file mode 100644 index 000000000..c9909d4ca --- /dev/null +++ b/_autosummary/sup3r.preprocessing.data_handlers.factory.DataHandlerH5SolarCC.html @@ -0,0 +1,1026 @@ + + + + + + + + + + + sup3r.preprocessing.data_handlers.factory.DataHandlerH5SolarCC — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.data_handlers.factory.DataHandlerH5SolarCC#

+
+
+class DataHandlerH5SolarCC(file_paths, *, features='all', res_kwargs=None, chunks='auto', target=None, shape=None, time_slice=slice(None, None, None), threshold=None, time_roll=0, time_shift=None, hr_spatial_coarsen=1, nan_method_kwargs=None, interp_kwargs=None, cache_kwargs=None, **kwargs)#
+

Bases: DailyDataHandler

+

FactoryDataHandler object. Is a partially initialized instance with +BaseLoader, FeatureRegistry, and name set.

+
+
Parameters:
+
    +
  • file_paths (str | list | pathlib.Path) – file_paths input to LoaderClass

  • +
  • features (list | str) – Features to load and / or derive. If ‘all’ then all available raw +features will be loaded. Specify explicit feature names for +derivations.

  • +
  • res_kwargs (dict) – Additional keyword arguments passed through to the BaseLoader. +BaseLoader is usually xr.open_mfdataset for NETCDF files and +MultiFileResourceX for H5 files.

  • +
  • chunks (dict | str) – Dictionary of chunk sizes to pass through to +dask.array.from_array() or xr.Dataset().chunk(). Will be +converted to a tuple when used in from_array(). These are the +methods for H5 and NETCDF data, respectively. This argument can +be “auto” or None in addition to a dictionary. None will not do +any chunking and load data into memory as np.array

  • +
  • target (tuple) – (lat, lon) lower left corner of raster. Either need target+shape +or raster_file.

  • +
  • shape (tuple) – (rows, cols) grid size. Either need target+shape or raster_file.

  • +
  • time_slice (slice | list) – Slice specifying extent and step of temporal extraction. e.g. +slice(start, stop, step). If equal to slice(None, None, 1) the full +time dimension is selected. Can be also be a list [start, stop, +step]

  • +
  • threshold (float) – Nearest neighbor euclidean distance threshold. If the coordinates +are more than this value away from the target lat/lon, an error is +raised.

  • +
  • time_roll (int) – Number of steps to roll along the time axis. Passed to +xr.Dataset.roll()

  • +
  • time_shift (int | None) – Number of minutes to shift time axis. This can be used, for +example, to shift the time index for daily data so that the time +stamp for a given day starts at the zeroth minute instead of at +noon, as is the case for most GCM data.

  • +
  • hr_spatial_coarsen (int) – Spatial coarsening factor. Passed to xr.Dataset.coarsen()

  • +
  • nan_method_kwargs (str | dict | None) – Keyword arguments for nan handling. If ‘mask’, time steps with nans +will be dropped. Otherwise this should be a dict of kwargs which +will be passed to +sup3r.preprocessing.accessor.Sup3rX.interpolate_na().

  • +
  • interp_kwargs (dict | None) – Dictionary of kwargs for level interpolation. Can include “method” +and “run_level_check” keys. Method specifies how to perform height +interpolation. e.g. Deriving u_20m from u_10m and u_100m. Options +are “linear” and “log”. See +sup3r.preprocessing.derivers.Deriver.do_level_interpolation()

  • +
  • cache_kwargs (dict | None) – Dictionary with kwargs for caching wrangled data. This should at +minimum include a cache_pattern key, value. This pattern must +have a {feature} format key and either a h5 or nc file extension, +based on desired output type. See class:Cacher for description +of more arguments.

  • +
  • kwargs (dict) – Dictionary of additional keyword args for +Rasterizer, used +specifically for rasterizing flattened data

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_registry(feature)

Get compute method from the registry if available.

derive(feature)

Routine to derive requested features.

do_level_interpolation(feature[, interp_kwargs])

Interpolate over height or pressure to derive the given feature.

get_inputs(feature)

Get inputs for the given feature and inputs for those inputs.

get_multi_level_data(feature)

Get data stored in multi-level arrays, like u stored on pressure levels.

get_single_level_data(feature)

When doing level interpolation we should include the single level data available.

has_interp_variables(feature)

Check if the given feature can be interpolated from values at nearby heights or from pressure level data.

map_new_name(feature, pattern)

If the search for a derivation method first finds an alternative name for the feature we want to derive, by matching a wildcard pattern, we need to replace the wildcard with the specific height or pressure we want and continue the search for a derivation method with this new name.

no_overlap(feature)

Check if any of the nested inputs for 'feature' contain 'feature'

post_init_log([args_dict])

Log additional arguments after initialization.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + +

FEATURE_REGISTRY

data

Return underlying data.

shape

Get shape of underlying data.

+
+
+
+BASE_LOADER#
+

alias of MultiFileNSRDBX

+
+ +
+
+check_registry(feature) ndarray | Array | str | None#
+

Get compute method from the registry if available. Will check for +pattern feature match in feature registry. e.g. if u_100m matches a +feature registry entry of u_(.*)m

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+derive(feature) ndarray | Array#
+

Routine to derive requested features. Employs a little recursion to +locate differently named features with a name map in the feature +registry. i.e. if FEATURE_REGISTRY contains a key, value pair like +“windspeed”: “wind_speed” then requesting “windspeed” will ultimately +return a compute method (or fetch from raw data) for “wind_speed

+
+

Note

+

Features are all saved as lower case names and __contains__ checks will +use feature.lower()

+
+
+ +
+
+do_level_interpolation(feature, interp_kwargs=None) DataArray#
+

Interpolate over height or pressure to derive the given feature.

+
+ +
+
+get_inputs(feature)#
+

Get inputs for the given feature and inputs for those inputs.

+
+ +
+
+get_multi_level_data(feature)#
+

Get data stored in multi-level arrays, like u stored on pressure +levels.

+
+ +
+
+get_single_level_data(feature)#
+

When doing level interpolation we should include the single level +data available. e.g. If we have u_100m already and want to interpolate +u_40m from multi-level data U we should add u_100m at height 100m +before doing interpolation, since 100 could be a closer level to 40m +than those available in U.

+
+ +
+
+has_interp_variables(feature)#
+

Check if the given feature can be interpolated from values at nearby +heights or from pressure level data. e.g. If u_10m and u_50m +exist then u_30m can be interpolated from these. If a pressure +level array u is available this can also be used, in conjunction +with height data.

+
+ +
+
+map_new_name(feature, pattern)#
+

If the search for a derivation method first finds an alternative +name for the feature we want to derive, by matching a wildcard pattern, +we need to replace the wildcard with the specific height or pressure we +want and continue the search for a derivation method with this new +name.

+
+ +
+
+no_overlap(feature)#
+

Check if any of the nested inputs for ‘feature’ contain ‘feature’

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.data_handlers.factory.DataHandlerH5WindCC.html b/_autosummary/sup3r.preprocessing.data_handlers.factory.DataHandlerH5WindCC.html new file mode 100644 index 000000000..e97c3a233 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.data_handlers.factory.DataHandlerH5WindCC.html @@ -0,0 +1,1026 @@ + + + + + + + + + + + sup3r.preprocessing.data_handlers.factory.DataHandlerH5WindCC — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.data_handlers.factory.DataHandlerH5WindCC#

+
+
+class DataHandlerH5WindCC(file_paths, *, features='all', res_kwargs=None, chunks='auto', target=None, shape=None, time_slice=slice(None, None, None), threshold=None, time_roll=0, time_shift=None, hr_spatial_coarsen=1, nan_method_kwargs=None, interp_kwargs=None, cache_kwargs=None, **kwargs)#
+

Bases: DailyDataHandler

+

FactoryDataHandler object. Is a partially initialized instance with +BaseLoader, FeatureRegistry, and name set.

+
+
Parameters:
+
    +
  • file_paths (str | list | pathlib.Path) – file_paths input to LoaderClass

  • +
  • features (list | str) – Features to load and / or derive. If ‘all’ then all available raw +features will be loaded. Specify explicit feature names for +derivations.

  • +
  • res_kwargs (dict) – Additional keyword arguments passed through to the BaseLoader. +BaseLoader is usually xr.open_mfdataset for NETCDF files and +MultiFileResourceX for H5 files.

  • +
  • chunks (dict | str) – Dictionary of chunk sizes to pass through to +dask.array.from_array() or xr.Dataset().chunk(). Will be +converted to a tuple when used in from_array(). These are the +methods for H5 and NETCDF data, respectively. This argument can +be “auto” or None in addition to a dictionary. None will not do +any chunking and load data into memory as np.array

  • +
  • target (tuple) – (lat, lon) lower left corner of raster. Either need target+shape +or raster_file.

  • +
  • shape (tuple) – (rows, cols) grid size. Either need target+shape or raster_file.

  • +
  • time_slice (slice | list) – Slice specifying extent and step of temporal extraction. e.g. +slice(start, stop, step). If equal to slice(None, None, 1) the full +time dimension is selected. Can be also be a list [start, stop, +step]

  • +
  • threshold (float) – Nearest neighbor euclidean distance threshold. If the coordinates +are more than this value away from the target lat/lon, an error is +raised.

  • +
  • time_roll (int) – Number of steps to roll along the time axis. Passed to +xr.Dataset.roll()

  • +
  • time_shift (int | None) – Number of minutes to shift time axis. This can be used, for +example, to shift the time index for daily data so that the time +stamp for a given day starts at the zeroth minute instead of at +noon, as is the case for most GCM data.

  • +
  • hr_spatial_coarsen (int) – Spatial coarsening factor. Passed to xr.Dataset.coarsen()

  • +
  • nan_method_kwargs (str | dict | None) – Keyword arguments for nan handling. If ‘mask’, time steps with nans +will be dropped. Otherwise this should be a dict of kwargs which +will be passed to +sup3r.preprocessing.accessor.Sup3rX.interpolate_na().

  • +
  • interp_kwargs (dict | None) – Dictionary of kwargs for level interpolation. Can include “method” +and “run_level_check” keys. Method specifies how to perform height +interpolation. e.g. Deriving u_20m from u_10m and u_100m. Options +are “linear” and “log”. See +sup3r.preprocessing.derivers.Deriver.do_level_interpolation()

  • +
  • cache_kwargs (dict | None) – Dictionary with kwargs for caching wrangled data. This should at +minimum include a cache_pattern key, value. This pattern must +have a {feature} format key and either a h5 or nc file extension, +based on desired output type. See class:Cacher for description +of more arguments.

  • +
  • kwargs (dict) – Dictionary of additional keyword args for +Rasterizer, used +specifically for rasterizing flattened data

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_registry(feature)

Get compute method from the registry if available.

derive(feature)

Routine to derive requested features.

do_level_interpolation(feature[, interp_kwargs])

Interpolate over height or pressure to derive the given feature.

get_inputs(feature)

Get inputs for the given feature and inputs for those inputs.

get_multi_level_data(feature)

Get data stored in multi-level arrays, like u stored on pressure levels.

get_single_level_data(feature)

When doing level interpolation we should include the single level data available.

has_interp_variables(feature)

Check if the given feature can be interpolated from values at nearby heights or from pressure level data.

map_new_name(feature, pattern)

If the search for a derivation method first finds an alternative name for the feature we want to derive, by matching a wildcard pattern, we need to replace the wildcard with the specific height or pressure we want and continue the search for a derivation method with this new name.

no_overlap(feature)

Check if any of the nested inputs for 'feature' contain 'feature'

post_init_log([args_dict])

Log additional arguments after initialization.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + +

FEATURE_REGISTRY

data

Return underlying data.

shape

Get shape of underlying data.

+
+
+
+BASE_LOADER#
+

alias of MultiFileNSRDBX

+
+ +
+
+check_registry(feature) ndarray | Array | str | None#
+

Get compute method from the registry if available. Will check for +pattern feature match in feature registry. e.g. if u_100m matches a +feature registry entry of u_(.*)m

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+derive(feature) ndarray | Array#
+

Routine to derive requested features. Employs a little recursion to +locate differently named features with a name map in the feature +registry. i.e. if FEATURE_REGISTRY contains a key, value pair like +“windspeed”: “wind_speed” then requesting “windspeed” will ultimately +return a compute method (or fetch from raw data) for “wind_speed

+
+

Note

+

Features are all saved as lower case names and __contains__ checks will +use feature.lower()

+
+
+ +
+
+do_level_interpolation(feature, interp_kwargs=None) DataArray#
+

Interpolate over height or pressure to derive the given feature.

+
+ +
+
+get_inputs(feature)#
+

Get inputs for the given feature and inputs for those inputs.

+
+ +
+
+get_multi_level_data(feature)#
+

Get data stored in multi-level arrays, like u stored on pressure +levels.

+
+ +
+
+get_single_level_data(feature)#
+

When doing level interpolation we should include the single level +data available. e.g. If we have u_100m already and want to interpolate +u_40m from multi-level data U we should add u_100m at height 100m +before doing interpolation, since 100 could be a closer level to 40m +than those available in U.

+
+ +
+
+has_interp_variables(feature)#
+

Check if the given feature can be interpolated from values at nearby +heights or from pressure level data. e.g. If u_10m and u_50m +exist then u_30m can be interpolated from these. If a pressure +level array u is available this can also be used, in conjunction +with height data.

+
+ +
+
+map_new_name(feature, pattern)#
+

If the search for a derivation method first finds an alternative +name for the feature we want to derive, by matching a wildcard pattern, +we need to replace the wildcard with the specific height or pressure we +want and continue the search for a derivation method with this new +name.

+
+ +
+
+no_overlap(feature)#
+

Check if any of the nested inputs for ‘feature’ contain ‘feature’

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.data_handlers.factory.html b/_autosummary/sup3r.preprocessing.data_handlers.factory.html new file mode 100644 index 000000000..87082b94d --- /dev/null +++ b/_autosummary/sup3r.preprocessing.data_handlers.factory.html @@ -0,0 +1,764 @@ + + + + + + + + + + + sup3r.preprocessing.data_handlers.factory — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.data_handlers.factory

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.data_handlers.factory#

+

DataHandler objects, which are built through composition of +Rasterizer, +Loader, +Deriver, and +Cacher classes.

+

TODO: If .data is a Sup3rDataset with more than one member only the +high resolution data is cached. We could extend caching and loading to write / +load both with groups

+

Functions

+
+ + + + + +

DataHandlerFactory(cls[, BaseLoader, ...])

Build composite objects that load from file_paths, rasterize a specified region, derive new features, and cache derived data.

+
+

Classes

+
+ + + + + + + + + + + + + + +

DailyDataHandler(file_paths, *[, features, ...])

General data handler class with daily data as an additional attribute.

DataHandler(file_paths[, features, ...])

Base DataHandler.

DataHandlerH5SolarCC(file_paths, *[, ...])

FactoryDataHandler object.

DataHandlerH5WindCC(file_paths, *[, ...])

FactoryDataHandler object.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.data_handlers.html b/_autosummary/sup3r.preprocessing.data_handlers.html new file mode 100644 index 000000000..6eb7d1ca9 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.data_handlers.html @@ -0,0 +1,744 @@ + + + + + + + + + + + sup3r.preprocessing.data_handlers — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.data_handlers

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.data_handlers#

+

Composite objects built from loaders, rasterizers, and derivers.

+
+ + + + + + + + + + + +

exo

Exogenous data handler and related objects.

factory

DataHandler objects, which are built through composition of Rasterizer, Loader, Deriver, and Cacher classes.

nc_cc

NETCDF DataHandler for climate change applications.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.data_handlers.nc_cc.DataHandlerNCforCC.html b/_autosummary/sup3r.preprocessing.data_handlers.nc_cc.DataHandlerNCforCC.html new file mode 100644 index 000000000..8bb9c0c27 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.data_handlers.nc_cc.DataHandlerNCforCC.html @@ -0,0 +1,1086 @@ + + + + + + + + + + + sup3r.preprocessing.data_handlers.nc_cc.DataHandlerNCforCC — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.data_handlers.nc_cc.DataHandlerNCforCC#

+
+
+class DataHandlerNCforCC(file_paths, *, features='all', nsrdb_source_fp=None, nsrdb_agg=1, nsrdb_smoothing=0, res_kwargs=None, chunks='auto', target=None, shape=None, time_slice=slice(None, None, None), threshold=None, time_roll=0, time_shift=None, hr_spatial_coarsen=1, nan_method_kwargs=None, interp_kwargs=None, cache_kwargs=None, **kwargs)[source]#
+

Bases: FactoryDataHandler

+

Extended NETCDF data handler. This implements a rasterizer hook to add +“clearsky_ghi” to the rasterized data if “clearsky_ghi” is requested.

+
+
Parameters:
+
    +
  • file_paths (str | list | pathlib.Path) – file_paths input to LoaderClass

  • +
  • features (list | str) – Features to load and / or derive. If ‘all’ then all available raw +features will be loaded. Specify explicit feature names for +derivations.

  • +
  • nsrdb_source_fp (str | None) – Optional NSRDB source h5 file to retrieve clearsky_ghi from to +calculate CC clearsky_ratio along with rsds (ghi) from the CC +netcdf file.

  • +
  • nsrdb_agg (int) – Optional number of NSRDB source pixels to aggregate clearsky_ghi +from to a single climate change netcdf pixel. This can be used if +the CC.nc data is at a much coarser resolution than the source +nsrdb data.

  • +
  • nsrdb_smoothing (float) – Optional gaussian filter smoothing factor to smooth out +clearsky_ghi from high-resolution nsrdb source data. This is +typically done because spatially aggregated nsrdb data is still +usually rougher than CC irradiance data.

  • +
  • kwargs (dict) – Dictionary of additional keyword args for +Rasterizer, used +specifically for rasterizing flattened data

  • +
  • res_kwargs (dict) – Additional keyword arguments passed through to the BaseLoader. +BaseLoader is usually xr.open_mfdataset for NETCDF files and +MultiFileResourceX for H5 files.

  • +
  • chunks (dict | str) – Dictionary of chunk sizes to pass through to +dask.array.from_array() or xr.Dataset().chunk(). Will be +converted to a tuple when used in from_array(). These are the +methods for H5 and NETCDF data, respectively. This argument can +be “auto” or None in addition to a dictionary. None will not do +any chunking and load data into memory as np.array

  • +
  • target (tuple) – (lat, lon) lower left corner of raster. Either need target+shape +or raster_file.

  • +
  • shape (tuple) – (rows, cols) grid size. Either need target+shape or raster_file.

  • +
  • time_slice (slice | list) – Slice specifying extent and step of temporal extraction. e.g. +slice(start, stop, step). If equal to slice(None, None, 1) the full +time dimension is selected. Can be also be a list [start, stop, +step]

  • +
  • threshold (float) – Nearest neighbor euclidean distance threshold. If the coordinates +are more than this value away from the target lat/lon, an error is +raised.

  • +
  • time_roll (int) – Number of steps to roll along the time axis. Passed to +xr.Dataset.roll()

  • +
  • time_shift (int | None) – Number of minutes to shift time axis. This can be used, for +example, to shift the time index for daily data so that the time +stamp for a given day starts at the zeroth minute instead of at +noon, as is the case for most GCM data.

  • +
  • hr_spatial_coarsen (int) – Spatial coarsening factor. Passed to xr.Dataset.coarsen()

  • +
  • nan_method_kwargs (str | dict | None) – Keyword arguments for nan handling. If ‘mask’, time steps with nans +will be dropped. Otherwise this should be a dict of kwargs which +will be passed to +sup3r.preprocessing.accessor.Sup3rX.interpolate_na().

  • +
  • interp_kwargs (dict | None) – Dictionary of kwargs for level interpolation. Can include “method” +and “run_level_check” keys. Method specifies how to perform height +interpolation. e.g. Deriving u_20m from u_10m and u_100m. Options +are “linear” and “log”. See +sup3r.preprocessing.derivers.Deriver.do_level_interpolation()

  • +
  • cache_kwargs (dict | None) – Dictionary with kwargs for caching wrangled data. This should at +minimum include a cache_pattern key, value. This pattern must +have a {feature} format key and either a h5 or nc file extension, +based on desired output type. See class:Cacher for description +of more arguments.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_registry(feature)

Get compute method from the registry if available.

derive(feature)

Routine to derive requested features.

do_level_interpolation(feature[, interp_kwargs])

Interpolate over height or pressure to derive the given feature.

get_clearsky_ghi()

Get clearsky ghi from an exogenous NSRDB source h5 file at the target CC meta data and time index.

get_inputs(feature)

Get inputs for the given feature and inputs for those inputs.

get_multi_level_data(feature)

Get data stored in multi-level arrays, like u stored on pressure levels.

get_single_level_data(feature)

When doing level interpolation we should include the single level data available.

get_time_slice(ti_nsrdb)

Get nsrdb data time slice consistent with self.time_index.

has_interp_variables(feature)

Check if the given feature can be interpolated from values at nearby heights or from pressure level data.

map_new_name(feature, pattern)

If the search for a derivation method first finds an alternative name for the feature we want to derive, by matching a wildcard pattern, we need to replace the wildcard with the specific height or pressure we want and continue the search for a derivation method with this new name.

no_overlap(feature)

Check if any of the nested inputs for 'feature' contain 'feature'

post_init_log([args_dict])

Log additional arguments after initialization.

run_input_checks()

Run checks on the files provided for extracting clearsky_ghi.

run_wrap_checks(cs_ghi)

Run check on rasterized data from clearsky_ghi source.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + +

BASE_LOADER

FEATURE_REGISTRY

data

Return underlying data.

shape

Get shape of underlying data.

+
+
+
+run_input_checks()[source]#
+

Run checks on the files provided for extracting clearsky_ghi. Make +sure the loaded data is daily data and the step size is one day.

+
+ +
+
+run_wrap_checks(cs_ghi)[source]#
+

Run check on rasterized data from clearsky_ghi source.

+
+ +
+
+get_time_slice(ti_nsrdb)[source]#
+

Get nsrdb data time slice consistent with self.time_index.

+
+ +
+
+get_clearsky_ghi()[source]#
+

Get clearsky ghi from an exogenous NSRDB source h5 file at the +target CC meta data and time index.

+

TODO: Replace some of this with call to Regridder? Perform daily +means with self.loader.coarsen?

+
+
Returns:
+

cs_ghi (Union[np.ndarray, da.core.Array]) – Clearsky ghi (W/m2) from the nsrdb_source_fp h5 source file. Data +shape is (lat, lon, time) where time is daily average values.

+
+
+
+ +
+
+check_registry(feature) ndarray | Array | str | None#
+

Get compute method from the registry if available. Will check for +pattern feature match in feature registry. e.g. if u_100m matches a +feature registry entry of u_(.*)m

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+derive(feature) ndarray | Array#
+

Routine to derive requested features. Employs a little recursion to +locate differently named features with a name map in the feature +registry. i.e. if FEATURE_REGISTRY contains a key, value pair like +“windspeed”: “wind_speed” then requesting “windspeed” will ultimately +return a compute method (or fetch from raw data) for “wind_speed

+
+

Note

+

Features are all saved as lower case names and __contains__ checks will +use feature.lower()

+
+
+ +
+
+do_level_interpolation(feature, interp_kwargs=None) DataArray#
+

Interpolate over height or pressure to derive the given feature.

+
+ +
+
+get_inputs(feature)#
+

Get inputs for the given feature and inputs for those inputs.

+
+ +
+
+get_multi_level_data(feature)#
+

Get data stored in multi-level arrays, like u stored on pressure +levels.

+
+ +
+
+get_single_level_data(feature)#
+

When doing level interpolation we should include the single level +data available. e.g. If we have u_100m already and want to interpolate +u_40m from multi-level data U we should add u_100m at height 100m +before doing interpolation, since 100 could be a closer level to 40m +than those available in U.

+
+ +
+
+has_interp_variables(feature)#
+

Check if the given feature can be interpolated from values at nearby +heights or from pressure level data. e.g. If u_10m and u_50m +exist then u_30m can be interpolated from these. If a pressure +level array u is available this can also be used, in conjunction +with height data.

+
+ +
+
+map_new_name(feature, pattern)#
+

If the search for a derivation method first finds an alternative +name for the feature we want to derive, by matching a wildcard pattern, +we need to replace the wildcard with the specific height or pressure we +want and continue the search for a derivation method with this new +name.

+
+ +
+
+no_overlap(feature)#
+

Check if any of the nested inputs for ‘feature’ contain ‘feature’

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.data_handlers.nc_cc.DataHandlerNCforCCwithPowerLaw.html b/_autosummary/sup3r.preprocessing.data_handlers.nc_cc.DataHandlerNCforCCwithPowerLaw.html new file mode 100644 index 000000000..73d5aab5b --- /dev/null +++ b/_autosummary/sup3r.preprocessing.data_handlers.nc_cc.DataHandlerNCforCCwithPowerLaw.html @@ -0,0 +1,1085 @@ + + + + + + + + + + + sup3r.preprocessing.data_handlers.nc_cc.DataHandlerNCforCCwithPowerLaw — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.data_handlers.nc_cc.DataHandlerNCforCCwithPowerLaw#

+
+
+class DataHandlerNCforCCwithPowerLaw(file_paths, *, features='all', nsrdb_source_fp=None, nsrdb_agg=1, nsrdb_smoothing=0, res_kwargs=None, chunks='auto', target=None, shape=None, time_slice=slice(None, None, None), threshold=None, time_roll=0, time_shift=None, hr_spatial_coarsen=1, nan_method_kwargs=None, interp_kwargs=None, cache_kwargs=None, **kwargs)[source]#
+

Bases: DataHandlerNCforCC

+

Add power law wind methods to feature registry.

+
+
Parameters:
+
    +
  • file_paths (str | list | pathlib.Path) – file_paths input to LoaderClass

  • +
  • features (list | str) – Features to load and / or derive. If ‘all’ then all available raw +features will be loaded. Specify explicit feature names for +derivations.

  • +
  • nsrdb_source_fp (str | None) – Optional NSRDB source h5 file to retrieve clearsky_ghi from to +calculate CC clearsky_ratio along with rsds (ghi) from the CC +netcdf file.

  • +
  • nsrdb_agg (int) – Optional number of NSRDB source pixels to aggregate clearsky_ghi +from to a single climate change netcdf pixel. This can be used if +the CC.nc data is at a much coarser resolution than the source +nsrdb data.

  • +
  • nsrdb_smoothing (float) – Optional gaussian filter smoothing factor to smooth out +clearsky_ghi from high-resolution nsrdb source data. This is +typically done because spatially aggregated nsrdb data is still +usually rougher than CC irradiance data.

  • +
  • kwargs (dict) – Dictionary of additional keyword args for +Rasterizer, used +specifically for rasterizing flattened data

  • +
  • res_kwargs (dict) – Additional keyword arguments passed through to the BaseLoader. +BaseLoader is usually xr.open_mfdataset for NETCDF files and +MultiFileResourceX for H5 files.

  • +
  • chunks (dict | str) – Dictionary of chunk sizes to pass through to +dask.array.from_array() or xr.Dataset().chunk(). Will be +converted to a tuple when used in from_array(). These are the +methods for H5 and NETCDF data, respectively. This argument can +be “auto” or None in addition to a dictionary. None will not do +any chunking and load data into memory as np.array

  • +
  • target (tuple) – (lat, lon) lower left corner of raster. Either need target+shape +or raster_file.

  • +
  • shape (tuple) – (rows, cols) grid size. Either need target+shape or raster_file.

  • +
  • time_slice (slice | list) – Slice specifying extent and step of temporal extraction. e.g. +slice(start, stop, step). If equal to slice(None, None, 1) the full +time dimension is selected. Can be also be a list [start, stop, +step]

  • +
  • threshold (float) – Nearest neighbor euclidean distance threshold. If the coordinates +are more than this value away from the target lat/lon, an error is +raised.

  • +
  • time_roll (int) – Number of steps to roll along the time axis. Passed to +xr.Dataset.roll()

  • +
  • time_shift (int | None) – Number of minutes to shift time axis. This can be used, for +example, to shift the time index for daily data so that the time +stamp for a given day starts at the zeroth minute instead of at +noon, as is the case for most GCM data.

  • +
  • hr_spatial_coarsen (int) – Spatial coarsening factor. Passed to xr.Dataset.coarsen()

  • +
  • nan_method_kwargs (str | dict | None) – Keyword arguments for nan handling. If ‘mask’, time steps with nans +will be dropped. Otherwise this should be a dict of kwargs which +will be passed to +sup3r.preprocessing.accessor.Sup3rX.interpolate_na().

  • +
  • interp_kwargs (dict | None) – Dictionary of kwargs for level interpolation. Can include “method” +and “run_level_check” keys. Method specifies how to perform height +interpolation. e.g. Deriving u_20m from u_10m and u_100m. Options +are “linear” and “log”. See +sup3r.preprocessing.derivers.Deriver.do_level_interpolation()

  • +
  • cache_kwargs (dict | None) – Dictionary with kwargs for caching wrangled data. This should at +minimum include a cache_pattern key, value. This pattern must +have a {feature} format key and either a h5 or nc file extension, +based on desired output type. See class:Cacher for description +of more arguments.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_registry(feature)

Get compute method from the registry if available.

derive(feature)

Routine to derive requested features.

do_level_interpolation(feature[, interp_kwargs])

Interpolate over height or pressure to derive the given feature.

get_clearsky_ghi()

Get clearsky ghi from an exogenous NSRDB source h5 file at the target CC meta data and time index.

get_inputs(feature)

Get inputs for the given feature and inputs for those inputs.

get_multi_level_data(feature)

Get data stored in multi-level arrays, like u stored on pressure levels.

get_single_level_data(feature)

When doing level interpolation we should include the single level data available.

get_time_slice(ti_nsrdb)

Get nsrdb data time slice consistent with self.time_index.

has_interp_variables(feature)

Check if the given feature can be interpolated from values at nearby heights or from pressure level data.

map_new_name(feature, pattern)

If the search for a derivation method first finds an alternative name for the feature we want to derive, by matching a wildcard pattern, we need to replace the wildcard with the specific height or pressure we want and continue the search for a derivation method with this new name.

no_overlap(feature)

Check if any of the nested inputs for 'feature' contain 'feature'

post_init_log([args_dict])

Log additional arguments after initialization.

run_input_checks()

Run checks on the files provided for extracting clearsky_ghi.

run_wrap_checks(cs_ghi)

Run check on rasterized data from clearsky_ghi source.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + +

BASE_LOADER

FEATURE_REGISTRY

data

Return underlying data.

shape

Get shape of underlying data.

+
+
+
+check_registry(feature) ndarray | Array | str | None#
+

Get compute method from the registry if available. Will check for +pattern feature match in feature registry. e.g. if u_100m matches a +feature registry entry of u_(.*)m

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+derive(feature) ndarray | Array#
+

Routine to derive requested features. Employs a little recursion to +locate differently named features with a name map in the feature +registry. i.e. if FEATURE_REGISTRY contains a key, value pair like +“windspeed”: “wind_speed” then requesting “windspeed” will ultimately +return a compute method (or fetch from raw data) for “wind_speed

+
+

Note

+

Features are all saved as lower case names and __contains__ checks will +use feature.lower()

+
+
+ +
+
+do_level_interpolation(feature, interp_kwargs=None) DataArray#
+

Interpolate over height or pressure to derive the given feature.

+
+ +
+
+get_clearsky_ghi()#
+

Get clearsky ghi from an exogenous NSRDB source h5 file at the +target CC meta data and time index.

+

TODO: Replace some of this with call to Regridder? Perform daily +means with self.loader.coarsen?

+
+
Returns:
+

cs_ghi (Union[np.ndarray, da.core.Array]) – Clearsky ghi (W/m2) from the nsrdb_source_fp h5 source file. Data +shape is (lat, lon, time) where time is daily average values.

+
+
+
+ +
+
+get_inputs(feature)#
+

Get inputs for the given feature and inputs for those inputs.

+
+ +
+
+get_multi_level_data(feature)#
+

Get data stored in multi-level arrays, like u stored on pressure +levels.

+
+ +
+
+get_single_level_data(feature)#
+

When doing level interpolation we should include the single level +data available. e.g. If we have u_100m already and want to interpolate +u_40m from multi-level data U we should add u_100m at height 100m +before doing interpolation, since 100 could be a closer level to 40m +than those available in U.

+
+ +
+
+get_time_slice(ti_nsrdb)#
+

Get nsrdb data time slice consistent with self.time_index.

+
+ +
+
+has_interp_variables(feature)#
+

Check if the given feature can be interpolated from values at nearby +heights or from pressure level data. e.g. If u_10m and u_50m +exist then u_30m can be interpolated from these. If a pressure +level array u is available this can also be used, in conjunction +with height data.

+
+ +
+
+map_new_name(feature, pattern)#
+

If the search for a derivation method first finds an alternative +name for the feature we want to derive, by matching a wildcard pattern, +we need to replace the wildcard with the specific height or pressure we +want and continue the search for a derivation method with this new +name.

+
+ +
+
+no_overlap(feature)#
+

Check if any of the nested inputs for ‘feature’ contain ‘feature’

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+run_input_checks()#
+

Run checks on the files provided for extracting clearsky_ghi. Make +sure the loaded data is daily data and the step size is one day.

+
+ +
+
+run_wrap_checks(cs_ghi)#
+

Run check on rasterized data from clearsky_ghi source.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.data_handlers.nc_cc.html b/_autosummary/sup3r.preprocessing.data_handlers.nc_cc.html new file mode 100644 index 000000000..8814211e8 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.data_handlers.nc_cc.html @@ -0,0 +1,742 @@ + + + + + + + + + + + sup3r.preprocessing.data_handlers.nc_cc — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.data_handlers.nc_cc

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.data_handlers.nc_cc#

+

NETCDF DataHandler for climate change applications.

+

Classes

+
+ + + + + + + + +

DataHandlerNCforCC(file_paths, *[, ...])

Extended NETCDF data handler.

DataHandlerNCforCCwithPowerLaw(file_paths, *)

Add power law wind methods to feature registry.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.derivers.base.BaseDeriver.html b/_autosummary/sup3r.preprocessing.derivers.base.BaseDeriver.html new file mode 100644 index 000000000..d4f34bd52 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.derivers.base.BaseDeriver.html @@ -0,0 +1,986 @@ + + + + + + + + + + + sup3r.preprocessing.derivers.base.BaseDeriver — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.derivers.base.BaseDeriver#

+
+
+class BaseDeriver(data: Sup3rX | Sup3rDataset, features, FeatureRegistry=None, interp_kwargs=None)[source]#
+

Bases: Container

+

Container subclass with additional methods for transforming / deriving +data exposed through an Rasterizer object.

+
+
Parameters:
+
    +
  • data (Union[Sup3rX, Sup3rDataset]) – Data to use for derivations. Usually comes from the .data +attribute of a Rasterizer object.

  • +
  • features (list) – List of feature names to derive from the Rasterizer data. +The Rasterizer object contains the features available to +use in the derivation. e.g. rasterizer.features = [‘windspeed’, +‘winddirection’] with self.features = [‘U’, ‘V’]

  • +
  • FeatureRegistry (Dict) – Optional FeatureRegistry dictionary to use for derivation method +lookups. When the Deriver is asked to derive a feature +that is not found in the Rasterizer data it will look for +a method to derive the feature in the registry.

  • +
  • interp_kwargs (dict | None) – Dictionary of kwargs for level interpolation. Can include “method” +and “run_level_check” keys. Method specifies how to perform height +interpolation. e.g. Deriving u_20m from u_10m and u_100m. Options +are “linear” and “log”. See +sup3r.preprocessing.derivers.Deriver.do_level_interpolation()

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_registry(feature)

Get compute method from the registry if available.

derive(feature)

Routine to derive requested features.

do_level_interpolation(feature[, interp_kwargs])

Interpolate over height or pressure to derive the given feature.

get_inputs(feature)

Get inputs for the given feature and inputs for those inputs.

get_multi_level_data(feature)

Get data stored in multi-level arrays, like u stored on pressure levels.

get_single_level_data(feature)

When doing level interpolation we should include the single level data available.

has_interp_variables(feature)

Check if the given feature can be interpolated from values at nearby heights or from pressure level data.

map_new_name(feature, pattern)

If the search for a derivation method first finds an alternative name for the feature we want to derive, by matching a wildcard pattern, we need to replace the wildcard with the specific height or pressure we want and continue the search for a derivation method with this new name.

no_overlap(feature)

Check if any of the nested inputs for 'feature' contain 'feature'

post_init_log([args_dict])

Log additional arguments after initialization.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + +

FEATURE_REGISTRY

data

Return underlying data.

shape

Get shape of underlying data.

+
+
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+get_inputs(feature)[source]#
+

Get inputs for the given feature and inputs for those inputs.

+
+ +
+
+no_overlap(feature)[source]#
+

Check if any of the nested inputs for ‘feature’ contain ‘feature’

+
+ +
+
+check_registry(feature) ndarray | Array | str | None[source]#
+

Get compute method from the registry if available. Will check for +pattern feature match in feature registry. e.g. if u_100m matches a +feature registry entry of u_(.*)m

+
+ +
+
+map_new_name(feature, pattern)[source]#
+

If the search for a derivation method first finds an alternative +name for the feature we want to derive, by matching a wildcard pattern, +we need to replace the wildcard with the specific height or pressure we +want and continue the search for a derivation method with this new +name.

+
+ +
+
+has_interp_variables(feature)[source]#
+

Check if the given feature can be interpolated from values at nearby +heights or from pressure level data. e.g. If u_10m and u_50m +exist then u_30m can be interpolated from these. If a pressure +level array u is available this can also be used, in conjunction +with height data.

+
+ +
+
+derive(feature) ndarray | Array[source]#
+

Routine to derive requested features. Employs a little recursion to +locate differently named features with a name map in the feature +registry. i.e. if FEATURE_REGISTRY contains a key, value pair like +“windspeed”: “wind_speed” then requesting “windspeed” will ultimately +return a compute method (or fetch from raw data) for “wind_speed

+
+

Note

+

Features are all saved as lower case names and __contains__ checks will +use feature.lower()

+
+
+ +
+
+get_single_level_data(feature)[source]#
+

When doing level interpolation we should include the single level +data available. e.g. If we have u_100m already and want to interpolate +u_40m from multi-level data U we should add u_100m at height 100m +before doing interpolation, since 100 could be a closer level to 40m +than those available in U.

+
+ +
+
+get_multi_level_data(feature)[source]#
+

Get data stored in multi-level arrays, like u stored on pressure +levels.

+
+ +
+
+do_level_interpolation(feature, interp_kwargs=None) DataArray[source]#
+

Interpolate over height or pressure to derive the given feature.

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.derivers.base.Deriver.html b/_autosummary/sup3r.preprocessing.derivers.base.Deriver.html new file mode 100644 index 000000000..b977c69ef --- /dev/null +++ b/_autosummary/sup3r.preprocessing.derivers.base.Deriver.html @@ -0,0 +1,989 @@ + + + + + + + + + + + sup3r.preprocessing.derivers.base.Deriver — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.derivers.base.Deriver#

+
+
+class Deriver(data: Sup3rX | Sup3rDataset, features, time_roll=0, time_shift=None, hr_spatial_coarsen=1, nan_method_kwargs=None, FeatureRegistry=None, interp_kwargs=None)[source]#
+

Bases: BaseDeriver

+

Extends base BaseDeriver class with time_roll and +hr_spatial_coarsen args.

+
+
Parameters:
+
    +
  • data (Union[Sup3rX, Sup3rDataset]) – Data used for derivations

  • +
  • features (list) – List of features to derive

  • +
  • time_roll (int) – Number of steps to roll along the time axis. Passed to +xr.Dataset.roll()

  • +
  • time_shift (int | None) – Number of minutes to shift time axis. This can be used, for +example, to shift the time index for daily data so that the time +stamp for a given day starts at the zeroth minute instead of at +noon, as is the case for most GCM data.

  • +
  • hr_spatial_coarsen (int) – Spatial coarsening factor. Passed to xr.Dataset.coarsen()

  • +
  • nan_method_kwargs (str | dict | None) – Keyword arguments for nan handling. If ‘mask’, time steps with nans +will be dropped. Otherwise this should be a dict of kwargs which +will be passed to Sup3rX.interpolate_na().

  • +
  • FeatureRegistry (dict) – Dictionary of DerivedFeature objects used for derivations

  • +
  • interp_kwargs (dict | None) – Dictionary of kwargs for level interpolation. Can include “method” +and “run_level_check” keys. Method specifies how to perform height +interpolation. e.g. Deriving u_20m from u_10m and u_100m. Options +are “linear” and “log”. See +sup3r.preprocessing.derivers.Deriver.do_level_interpolation()

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_registry(feature)

Get compute method from the registry if available.

derive(feature)

Routine to derive requested features.

do_level_interpolation(feature[, interp_kwargs])

Interpolate over height or pressure to derive the given feature.

get_inputs(feature)

Get inputs for the given feature and inputs for those inputs.

get_multi_level_data(feature)

Get data stored in multi-level arrays, like u stored on pressure levels.

get_single_level_data(feature)

When doing level interpolation we should include the single level data available.

has_interp_variables(feature)

Check if the given feature can be interpolated from values at nearby heights or from pressure level data.

map_new_name(feature, pattern)

If the search for a derivation method first finds an alternative name for the feature we want to derive, by matching a wildcard pattern, we need to replace the wildcard with the specific height or pressure we want and continue the search for a derivation method with this new name.

no_overlap(feature)

Check if any of the nested inputs for 'feature' contain 'feature'

post_init_log([args_dict])

Log additional arguments after initialization.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + +

FEATURE_REGISTRY

data

Return underlying data.

shape

Get shape of underlying data.

+
+
+
+check_registry(feature) ndarray | Array | str | None#
+

Get compute method from the registry if available. Will check for +pattern feature match in feature registry. e.g. if u_100m matches a +feature registry entry of u_(.*)m

+
+ +
+
+derive(feature) ndarray | Array#
+

Routine to derive requested features. Employs a little recursion to +locate differently named features with a name map in the feature +registry. i.e. if FEATURE_REGISTRY contains a key, value pair like +“windspeed”: “wind_speed” then requesting “windspeed” will ultimately +return a compute method (or fetch from raw data) for “wind_speed

+
+

Note

+

Features are all saved as lower case names and __contains__ checks will +use feature.lower()

+
+
+ +
+
+do_level_interpolation(feature, interp_kwargs=None) DataArray#
+

Interpolate over height or pressure to derive the given feature.

+
+ +
+
+get_inputs(feature)#
+

Get inputs for the given feature and inputs for those inputs.

+
+ +
+
+get_multi_level_data(feature)#
+

Get data stored in multi-level arrays, like u stored on pressure +levels.

+
+ +
+
+get_single_level_data(feature)#
+

When doing level interpolation we should include the single level +data available. e.g. If we have u_100m already and want to interpolate +u_40m from multi-level data U we should add u_100m at height 100m +before doing interpolation, since 100 could be a closer level to 40m +than those available in U.

+
+ +
+
+has_interp_variables(feature)#
+

Check if the given feature can be interpolated from values at nearby +heights or from pressure level data. e.g. If u_10m and u_50m +exist then u_30m can be interpolated from these. If a pressure +level array u is available this can also be used, in conjunction +with height data.

+
+ +
+
+map_new_name(feature, pattern)#
+

If the search for a derivation method first finds an alternative +name for the feature we want to derive, by matching a wildcard pattern, +we need to replace the wildcard with the specific height or pressure we +want and continue the search for a derivation method with this new +name.

+
+ +
+
+no_overlap(feature)#
+

Check if any of the nested inputs for ‘feature’ contain ‘feature’

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.derivers.base.html b/_autosummary/sup3r.preprocessing.derivers.base.html new file mode 100644 index 000000000..ee89ba52b --- /dev/null +++ b/_autosummary/sup3r.preprocessing.derivers.base.html @@ -0,0 +1,743 @@ + + + + + + + + + + + sup3r.preprocessing.derivers.base — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.derivers.base

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.derivers.base#

+

Basic objects that can perform derivations of new features from loaded / +rasterized features.

+

Classes

+
+ + + + + + + + +

BaseDeriver(data, features[, ...])

Container subclass with additional methods for transforming / deriving data exposed through an Rasterizer object.

Deriver(data, features[, time_roll, ...])

Extends base BaseDeriver class with time_roll and hr_spatial_coarsen args.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.derivers.html b/_autosummary/sup3r.preprocessing.derivers.html new file mode 100644 index 000000000..c4023817d --- /dev/null +++ b/_autosummary/sup3r.preprocessing.derivers.html @@ -0,0 +1,745 @@ + + + + + + + + + + + sup3r.preprocessing.derivers — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.derivers

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.derivers#

+

Loader subclass with methods for extracting and processing the contained +data.

+
+ + + + + + + + + + + +

base

Basic objects that can perform derivations of new features from loaded / rasterized features.

methods

Derivation methods for deriving features from raw data.

utilities

Miscellaneous utilities shared across the derivers module

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.derivers.methods.ClearSkyRatio.html b/_autosummary/sup3r.preprocessing.derivers.methods.ClearSkyRatio.html new file mode 100644 index 000000000..2e4237cb5 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.derivers.methods.ClearSkyRatio.html @@ -0,0 +1,799 @@ + + + + + + + + + + + sup3r.preprocessing.derivers.methods.ClearSkyRatio — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.derivers.methods.ClearSkyRatio

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.derivers.methods.ClearSkyRatio#

+
+
+class ClearSkyRatio[source]#
+

Bases: DerivedFeature

+

Clear Sky Ratio feature class. Inputs here are typically found in H5 +data like the NSRDB

+

Methods

+
+ + + + + +

compute(data)

Compute the clearsky ratio

+
+

Attributes

+
+ + + + + +

inputs

+
+
+
+classmethod compute(data)[source]#
+

Compute the clearsky ratio

+
+
Returns:
+

cs_ratio (ndarray) – Clearsky ratio, e.g. the all-sky ghi / the clearsky ghi. NaN where +nighttime.

+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.derivers.methods.ClearSkyRatioCC.html b/_autosummary/sup3r.preprocessing.derivers.methods.ClearSkyRatioCC.html new file mode 100644 index 000000000..c76410166 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.derivers.methods.ClearSkyRatioCC.html @@ -0,0 +1,803 @@ + + + + + + + + + + + sup3r.preprocessing.derivers.methods.ClearSkyRatioCC — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.derivers.methods.ClearSkyRatioCC

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.derivers.methods.ClearSkyRatioCC#

+
+
+class ClearSkyRatioCC[source]#
+

Bases: DerivedFeature

+

Clear Sky Ratio feature class for computing from climate change netcdf +data

+

Methods

+
+ + + + + +

compute(data)

Compute the daily average climate change clearsky ratio

+
+

Attributes

+
+ + + + + +

inputs

+
+
+
+classmethod compute(data)[source]#
+

Compute the daily average climate change clearsky ratio

+
+
Parameters:
+

data (Union[Sup3rX, Sup3rDataset]) – xarray dataset used for this compuation, must include clearsky_ghi +and rsds (rsds==ghi for cc datasets)

+
+
Returns:
+

cs_ratio (ndarray) – Clearsky ratio, e.g. the all-sky ghi / the clearsky ghi. This is +assumed to be daily average data for climate change source data.

+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.derivers.methods.CloudMask.html b/_autosummary/sup3r.preprocessing.derivers.methods.CloudMask.html new file mode 100644 index 000000000..47e5f24f2 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.derivers.methods.CloudMask.html @@ -0,0 +1,799 @@ + + + + + + + + + + + sup3r.preprocessing.derivers.methods.CloudMask — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.derivers.methods.CloudMask

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.derivers.methods.CloudMask#

+
+
+class CloudMask[source]#
+

Bases: DerivedFeature

+

Cloud Mask feature class. Inputs here are typically found in H5 data +like the NSRDB.

+

Methods

+
+ + + + + +

compute(data)

+
+

Attributes

+
+ + + + + +

inputs

+
+
+
+classmethod compute(data)[source]#
+
+
Returns:
+

cloud_mask (ndarray) – Cloud mask, e.g. 1 where cloudy, 0 where clear. NaN where +nighttime. Data is float32 so it can be normalized without any +integer weirdness.

+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.derivers.methods.DerivedFeature.html b/_autosummary/sup3r.preprocessing.derivers.methods.DerivedFeature.html new file mode 100644 index 000000000..9c8f9bf1b --- /dev/null +++ b/_autosummary/sup3r.preprocessing.derivers.methods.DerivedFeature.html @@ -0,0 +1,811 @@ + + + + + + + + + + + sup3r.preprocessing.derivers.methods.DerivedFeature — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.derivers.methods.DerivedFeature

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.derivers.methods.DerivedFeature#

+
+
+class DerivedFeature[source]#
+

Bases: ABC

+

Abstract class for special features which need to be derived from raw +features

+
+

Note

+

inputs list will be used to search already derived / loaded data so this +should include all features required for a successful .compute call.

+
+

Methods

+
+ + + + + +

compute(data, **kwargs)

Compute method for derived feature.

+
+

Attributes

+
+ + + + + +

inputs

+
+
+
+abstract classmethod compute(data: Sup3rX | Sup3rDataset, **kwargs)[source]#
+

Compute method for derived feature. This can use any of the features +contained in the xr.Dataset data and the attributes (e.g. +.lat_lon, .time_index accessed through Sup3rX accessor).

+
+
Parameters:
+
    +
  • data (Union[Sup3rX, Sup3rDataset]) – Initialized and standardized through a Loader with a +specific spatiotemporal extent rasterized for the features +contained using a Rasterizer.

  • +
  • kwargs (dict) – Optional keyword arguments used in derivation. height is a typical +example. Could also be pressure.

  • +
+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.derivers.methods.PressureWRF.html b/_autosummary/sup3r.preprocessing.derivers.methods.PressureWRF.html new file mode 100644 index 000000000..8b1ccf4c5 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.derivers.methods.PressureWRF.html @@ -0,0 +1,793 @@ + + + + + + + + + + + sup3r.preprocessing.derivers.methods.PressureWRF — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.derivers.methods.PressureWRF

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.derivers.methods.PressureWRF#

+
+
+class PressureWRF[source]#
+

Bases: DerivedFeature

+

Pressure feature class for WRF data. Needed since P is perturbation +pressure.

+

Methods

+
+ + + + + +

compute(data, height)

Method to compute pressure from NETCDF data

+
+

Attributes

+
+ + + + + +

inputs

+
+
+
+classmethod compute(data, height)[source]#
+

Method to compute pressure from NETCDF data

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.derivers.methods.SurfaceRH.html b/_autosummary/sup3r.preprocessing.derivers.methods.SurfaceRH.html new file mode 100644 index 000000000..1f8d99788 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.derivers.methods.SurfaceRH.html @@ -0,0 +1,796 @@ + + + + + + + + + + + sup3r.preprocessing.derivers.methods.SurfaceRH — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.derivers.methods.SurfaceRH

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.derivers.methods.SurfaceRH#

+
+
+class SurfaceRH[source]#
+

Bases: DerivedFeature

+

Surface Relative humidity feature for computing rh from dewpoint +temperature and ambient temperature. This is in a 0 - 100 scale to match +the ERA5 pressure level relative humidity scale.

+

https://earthscience.stackexchange.com/questions/24156/era5-single-level-calculate-relative-humidity

+

https://journals.ametsoc.org/view/journals/bams/86/2/bams-86-2-225.xml?tab_body=pdf

+

Methods

+
+ + + + + +

compute(data)

Compute surface relative humidity.

+
+

Attributes

+
+ + + + + +

inputs

+
+
+
+classmethod compute(data)[source]#
+

Compute surface relative humidity.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.derivers.methods.Sza.html b/_autosummary/sup3r.preprocessing.derivers.methods.Sza.html new file mode 100644 index 000000000..e94235be2 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.derivers.methods.Sza.html @@ -0,0 +1,792 @@ + + + + + + + + + + + sup3r.preprocessing.derivers.methods.Sza — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.derivers.methods.Sza

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.derivers.methods.Sza#

+
+
+class Sza[source]#
+

Bases: DerivedFeature

+

Solar zenith angle derived feature.

+

Methods

+
+ + + + + +

compute(data)

Compute method for sza.

+
+

Attributes

+
+ + + + + +

inputs

+
+
+
+classmethod compute(data)[source]#
+

Compute method for sza.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.derivers.methods.Tas.html b/_autosummary/sup3r.preprocessing.derivers.methods.Tas.html new file mode 100644 index 000000000..88d3f1edc --- /dev/null +++ b/_autosummary/sup3r.preprocessing.derivers.methods.Tas.html @@ -0,0 +1,792 @@ + + + + + + + + + + + sup3r.preprocessing.derivers.methods.Tas — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.derivers.methods.Tas

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.derivers.methods.Tas#

+
+
+class Tas[source]#
+

Bases: DerivedFeature

+

Air temperature near surface variable from climate change nc files

+

Methods

+
+ + + + + +

compute(data)

Method to compute tas in Celsius from tas source in Kelvin

+
+

Attributes

+
+ + + + + +

inputs

+
+
+
+classmethod compute(data)[source]#
+

Method to compute tas in Celsius from tas source in Kelvin

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.derivers.methods.TasMax.html b/_autosummary/sup3r.preprocessing.derivers.methods.TasMax.html new file mode 100644 index 000000000..a67d3cd6c --- /dev/null +++ b/_autosummary/sup3r.preprocessing.derivers.methods.TasMax.html @@ -0,0 +1,793 @@ + + + + + + + + + + + sup3r.preprocessing.derivers.methods.TasMax — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.derivers.methods.TasMax

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.derivers.methods.TasMax#

+
+
+class TasMax[source]#
+

Bases: Tas

+

Daily max air temperature near surface variable from climate change nc +files

+

Methods

+
+ + + + + +

compute(data)

Method to compute tas in Celsius from tas source in Kelvin

+
+

Attributes

+
+ + + + + +

inputs

+
+
+
+classmethod compute(data)#
+

Method to compute tas in Celsius from tas source in Kelvin

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.derivers.methods.TasMin.html b/_autosummary/sup3r.preprocessing.derivers.methods.TasMin.html new file mode 100644 index 000000000..34876ddf5 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.derivers.methods.TasMin.html @@ -0,0 +1,793 @@ + + + + + + + + + + + sup3r.preprocessing.derivers.methods.TasMin — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.derivers.methods.TasMin

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.derivers.methods.TasMin#

+
+
+class TasMin[source]#
+

Bases: Tas

+

Daily min air temperature near surface variable from climate change nc +files

+

Methods

+
+ + + + + +

compute(data)

Method to compute tas in Celsius from tas source in Kelvin

+
+

Attributes

+
+ + + + + +

inputs

+
+
+
+classmethod compute(data)#
+

Method to compute tas in Celsius from tas source in Kelvin

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.derivers.methods.TempNCforCC.html b/_autosummary/sup3r.preprocessing.derivers.methods.TempNCforCC.html new file mode 100644 index 000000000..a14218d6a --- /dev/null +++ b/_autosummary/sup3r.preprocessing.derivers.methods.TempNCforCC.html @@ -0,0 +1,792 @@ + + + + + + + + + + + sup3r.preprocessing.derivers.methods.TempNCforCC — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.derivers.methods.TempNCforCC

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.derivers.methods.TempNCforCC#

+
+
+class TempNCforCC[source]#
+

Bases: DerivedFeature

+

Air temperature variable from climate change nc files

+

Methods

+
+ + + + + +

compute(data, height)

Method to compute ta in Celsius from ta source in Kelvin

+
+

Attributes

+
+ + + + + +

inputs

+
+
+
+classmethod compute(data, height)[source]#
+

Method to compute ta in Celsius from ta source in Kelvin

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.derivers.methods.USolar.html b/_autosummary/sup3r.preprocessing.derivers.methods.USolar.html new file mode 100644 index 000000000..9e8145b62 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.derivers.methods.USolar.html @@ -0,0 +1,793 @@ + + + + + + + + + + + sup3r.preprocessing.derivers.methods.USolar — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.derivers.methods.USolar

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.derivers.methods.USolar#

+
+
+class USolar[source]#
+

Bases: DerivedFeature

+

U wind component feature class with needed inputs method and compute +method for NSRDB data (which has just a single windspeed hub height)

+

Methods

+
+ + + + + +

compute(data)

Method to compute U wind component from data

+
+

Attributes

+
+ + + + + +

inputs

+
+
+
+classmethod compute(data)[source]#
+

Method to compute U wind component from data

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.derivers.methods.UWind.html b/_autosummary/sup3r.preprocessing.derivers.methods.UWind.html new file mode 100644 index 000000000..5819f4a41 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.derivers.methods.UWind.html @@ -0,0 +1,793 @@ + + + + + + + + + + + sup3r.preprocessing.derivers.methods.UWind — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.derivers.methods.UWind

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.derivers.methods.UWind#

+
+
+class UWind[source]#
+

Bases: DerivedFeature

+

U wind component feature class with needed inputs method and compute +method

+

Methods

+
+ + + + + +

compute(data, height)

Method to compute U wind component from data

+
+

Attributes

+
+ + + + + +

inputs

+
+
+
+classmethod compute(data, height)[source]#
+

Method to compute U wind component from data

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.derivers.methods.UWindPowerLaw.html b/_autosummary/sup3r.preprocessing.derivers.methods.UWindPowerLaw.html new file mode 100644 index 000000000..b3c91bb3c --- /dev/null +++ b/_autosummary/sup3r.preprocessing.derivers.methods.UWindPowerLaw.html @@ -0,0 +1,814 @@ + + + + + + + + + + + sup3r.preprocessing.derivers.methods.UWindPowerLaw — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.derivers.methods.UWindPowerLaw

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.derivers.methods.UWindPowerLaw#

+
+
+class UWindPowerLaw[source]#
+

Bases: DerivedFeature

+

U wind component feature class with needed inputs method and compute +method. Uses power law extrapolation to get values above surface

+

https://csl.noaa.gov/projects/lamar/windshearformula.html +https://www.tandfonline.com/doi/epdf/10.1080/00022470.1977.10470503

+

Methods

+
+ + + + + +

compute(data, height)

Method to compute U wind component from data

+
+

Attributes

+
+ + + + + + + + + + + +

ALPHA

NEAR_SFC_HEIGHT

inputs

+
+
+
+classmethod compute(data, height)[source]#
+

Method to compute U wind component from data

+
+
Parameters:
+
    +
  • data (Union[Sup3rX, Sup3rDataset]) – Initialized and standardized through a Loader with a +specific spatiotemporal extent rasterized for the features +contained using a Rasterizer.

  • +
  • height (str | int) – Height at which to compute the derived feature

  • +
+
+
Returns:
+

ndarray – Derived feature array

+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.derivers.methods.VSolar.html b/_autosummary/sup3r.preprocessing.derivers.methods.VSolar.html new file mode 100644 index 000000000..260ad8ef5 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.derivers.methods.VSolar.html @@ -0,0 +1,793 @@ + + + + + + + + + + + sup3r.preprocessing.derivers.methods.VSolar — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.derivers.methods.VSolar

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.derivers.methods.VSolar#

+
+
+class VSolar[source]#
+

Bases: DerivedFeature

+

V wind component feature class with needed inputs method and compute +method for NSRDB data (which has just a single windspeed hub height)

+

Methods

+
+ + + + + +

compute(data)

Method to compute U wind component from data

+
+

Attributes

+
+ + + + + +

inputs

+
+
+
+classmethod compute(data)[source]#
+

Method to compute U wind component from data

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.derivers.methods.VWind.html b/_autosummary/sup3r.preprocessing.derivers.methods.VWind.html new file mode 100644 index 000000000..efa1e4dc7 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.derivers.methods.VWind.html @@ -0,0 +1,793 @@ + + + + + + + + + + + sup3r.preprocessing.derivers.methods.VWind — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.derivers.methods.VWind

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.derivers.methods.VWind#

+
+
+class VWind[source]#
+

Bases: DerivedFeature

+

V wind component feature class with needed inputs method and compute +method

+

Methods

+
+ + + + + +

compute(data, height)

Method to compute V wind component from data

+
+

Attributes

+
+ + + + + +

inputs

+
+
+
+classmethod compute(data, height)[source]#
+

Method to compute V wind component from data

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.derivers.methods.VWindPowerLaw.html b/_autosummary/sup3r.preprocessing.derivers.methods.VWindPowerLaw.html new file mode 100644 index 000000000..c84c6987b --- /dev/null +++ b/_autosummary/sup3r.preprocessing.derivers.methods.VWindPowerLaw.html @@ -0,0 +1,801 @@ + + + + + + + + + + + sup3r.preprocessing.derivers.methods.VWindPowerLaw — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.derivers.methods.VWindPowerLaw

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.derivers.methods.VWindPowerLaw#

+
+
+class VWindPowerLaw[source]#
+

Bases: DerivedFeature

+

V wind component feature class with needed inputs method and compute +method. Uses power law extrapolation to get values above surface

+

https://csl.noaa.gov/projects/lamar/windshearformula.html +https://www.tandfonline.com/doi/epdf/10.1080/00022470.1977.10470503

+

Methods

+
+ + + + + +

compute(data, height)

Method to compute V wind component from data

+
+

Attributes

+
+ + + + + + + + + + + +

ALPHA

NEAR_SFC_HEIGHT

inputs

+
+
+
+classmethod compute(data, height)[source]#
+

Method to compute V wind component from data

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.derivers.methods.Winddirection.html b/_autosummary/sup3r.preprocessing.derivers.methods.Winddirection.html new file mode 100644 index 000000000..d88afa0b3 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.derivers.methods.Winddirection.html @@ -0,0 +1,792 @@ + + + + + + + + + + + sup3r.preprocessing.derivers.methods.Winddirection — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.derivers.methods.Winddirection

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.derivers.methods.Winddirection#

+
+
+class Winddirection[source]#
+

Bases: DerivedFeature

+

Winddirection feature from rasterized data

+

Methods

+
+ + + + + +

compute(data, height)

Compute winddirection

+
+

Attributes

+
+ + + + + +

inputs

+
+
+
+classmethod compute(data, height)[source]#
+

Compute winddirection

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.derivers.methods.Windspeed.html b/_autosummary/sup3r.preprocessing.derivers.methods.Windspeed.html new file mode 100644 index 000000000..b947fa51c --- /dev/null +++ b/_autosummary/sup3r.preprocessing.derivers.methods.Windspeed.html @@ -0,0 +1,792 @@ + + + + + + + + + + + sup3r.preprocessing.derivers.methods.Windspeed — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.derivers.methods.Windspeed

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.derivers.methods.Windspeed#

+
+
+class Windspeed[source]#
+

Bases: DerivedFeature

+

Windspeed feature from rasterized data

+

Methods

+
+ + + + + +

compute(data, height)

Compute windspeed

+
+

Attributes

+
+ + + + + +

inputs

+
+
+
+classmethod compute(data, height)[source]#
+

Compute windspeed

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.derivers.methods.html b/_autosummary/sup3r.preprocessing.derivers.methods.html new file mode 100644 index 000000000..9a880abd5 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.derivers.methods.html @@ -0,0 +1,793 @@ + + + + + + + + + + + sup3r.preprocessing.derivers.methods — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.derivers.methods

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.derivers.methods#

+

Derivation methods for deriving features from raw data.

+

Classes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

ClearSkyRatio()

Clear Sky Ratio feature class.

ClearSkyRatioCC()

Clear Sky Ratio feature class for computing from climate change netcdf data

CloudMask()

Cloud Mask feature class.

DerivedFeature()

Abstract class for special features which need to be derived from raw features

PressureWRF()

Pressure feature class for WRF data.

SurfaceRH()

Surface Relative humidity feature for computing rh from dewpoint temperature and ambient temperature.

Sza()

Solar zenith angle derived feature.

Tas()

Air temperature near surface variable from climate change nc files

TasMax()

Daily max air temperature near surface variable from climate change nc files

TasMin()

Daily min air temperature near surface variable from climate change nc files

TempNCforCC()

Air temperature variable from climate change nc files

USolar()

U wind component feature class with needed inputs method and compute method for NSRDB data (which has just a single windspeed hub height)

UWind()

U wind component feature class with needed inputs method and compute method

UWindPowerLaw()

U wind component feature class with needed inputs method and compute method.

VSolar()

V wind component feature class with needed inputs method and compute method for NSRDB data (which has just a single windspeed hub height)

VWind()

V wind component feature class with needed inputs method and compute method

VWindPowerLaw()

V wind component feature class with needed inputs method and compute method.

Winddirection()

Winddirection feature from rasterized data

Windspeed()

Windspeed feature from rasterized data

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.derivers.utilities.SolarZenith.html b/_autosummary/sup3r.preprocessing.derivers.utilities.SolarZenith.html new file mode 100644 index 000000000..218d0d207 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.derivers.utilities.SolarZenith.html @@ -0,0 +1,799 @@ + + + + + + + + + + + sup3r.preprocessing.derivers.utilities.SolarZenith — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.derivers.utilities.SolarZenith

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.derivers.utilities.SolarZenith#

+
+
+class SolarZenith[source]#
+

Bases: object

+

Class to compute solar zenith angle. Use SPA from rex and wrap some of +those methods in dask.array.map_blocks so this can be computed in +parallel across chunks.

+

Methods

+
+ + + + + +

get_zenith(time_index, lat_lon[, ll_chunks])

Compute solar zenith angle from time_index and location

+
+
+
+static get_zenith(time_index, lat_lon, ll_chunks=(10, 10, 1))[source]#
+

Compute solar zenith angle from time_index and location

+
+
Parameters:
+
    +
  • time_index (ndarray | pandas.DatetimeIndex | str) – Datetime stamps of interest

  • +
  • lat_lon (ndarray, da.core.Array) – (latitude, longitude, 2) for site(s) of interest

  • +
  • ll_chunks (tuple) – Chunks for lat_lon array. To run this on a large domain, even with +delayed computations through dask, we need to use small chunks for +the lat lon array.

  • +
+
+
Returns:
+

zenith (da.core.Array) – Solar zenith angle in degrees

+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.derivers.utilities.html b/_autosummary/sup3r.preprocessing.derivers.utilities.html new file mode 100644 index 000000000..dc486a31a --- /dev/null +++ b/_autosummary/sup3r.preprocessing.derivers.utilities.html @@ -0,0 +1,757 @@ + + + + + + + + + + + sup3r.preprocessing.derivers.utilities — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.derivers.utilities

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.derivers.utilities#

+

Miscellaneous utilities shared across the derivers module

+

Functions

+
+ + + + + + + + + + + + + + +

invert_uv(u, v, lat_lon)

Transform u and v back to windspeed and winddirection

parse_feature(feature)

Parse feature name to get the "basename" (i.e. U for u_100m), the height (100 for u_100m), and pressure if available (1000 for u_1000pa).

transform_rotate_wind(ws, wd, lat_lon)

Transform windspeed/direction to u and v and align u and v with grid

windspeed_log_law(z, a, b, c)

Windspeed log profile.

+
+

Classes

+
+ + + + + +

SolarZenith()

Class to compute solar zenith angle.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.derivers.utilities.invert_uv.html b/_autosummary/sup3r.preprocessing.derivers.utilities.invert_uv.html new file mode 100644 index 000000000..e55b361b9 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.derivers.utilities.invert_uv.html @@ -0,0 +1,784 @@ + + + + + + + + + + + sup3r.preprocessing.derivers.utilities.invert_uv — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.derivers.utilities.invert_uv

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.derivers.utilities.invert_uv#

+
+
+invert_uv(u, v, lat_lon)[source]#
+

Transform u and v back to windspeed and winddirection

+
+
Parameters:
+
    +
  • u (Union[np.ndarray, da.core.Array]) – 3D array of high res U data +(spatial_1, spatial_2, temporal)

  • +
  • v (Union[np.ndarray, da.core.Array]) – 3D array of high res V data +(spatial_1, spatial_2, temporal)

  • +
  • lat_lon (Union[np.ndarray, da.core.Array]) – 3D array of lat lon +(spatial_1, spatial_2, 2) +Last dimension has lat / lon in that order

  • +
+
+
Returns:
+

    +
  • ws (Union[np.ndarray, da.core.Array]) – 3D array of high res windspeed data +(spatial_1, spatial_2, temporal)

  • +
  • wd (Union[np.ndarray, da.core.Array]) – 3D array of high res winddirection data. Angle is in degrees and +measured relative to the south_north direction. +(spatial_1, spatial_2, temporal)

  • +
+

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.derivers.utilities.parse_feature.html b/_autosummary/sup3r.preprocessing.derivers.utilities.parse_feature.html new file mode 100644 index 000000000..c303e5103 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.derivers.utilities.parse_feature.html @@ -0,0 +1,762 @@ + + + + + + + + + + + sup3r.preprocessing.derivers.utilities.parse_feature — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.derivers.utilities.parse_feature

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.derivers.utilities.parse_feature#

+
+
+parse_feature(feature)[source]#
+

Parse feature name to get the “basename” (i.e. U for u_100m), the height +(100 for u_100m), and pressure if available (1000 for u_1000pa).

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.derivers.utilities.transform_rotate_wind.html b/_autosummary/sup3r.preprocessing.derivers.utilities.transform_rotate_wind.html new file mode 100644 index 000000000..ba1b400f2 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.derivers.utilities.transform_rotate_wind.html @@ -0,0 +1,784 @@ + + + + + + + + + + + sup3r.preprocessing.derivers.utilities.transform_rotate_wind — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.derivers.utilities.transform_rotate_wind

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.derivers.utilities.transform_rotate_wind#

+
+
+transform_rotate_wind(ws, wd, lat_lon)[source]#
+

Transform windspeed/direction to u and v and align u and v with grid

+
+
Parameters:
+
    +
  • ws (Union[np.ndarray, da.core.Array]) – 3D array of high res windspeed data +(spatial_1, spatial_2, temporal)

  • +
  • wd (Union[np.ndarray, da.core.Array]) – 3D array of high res winddirection data. Angle is in degrees and +measured relative to the south_north direction. +(spatial_1, spatial_2, temporal)

  • +
  • lat_lon (Union[np.ndarray, da.core.Array]) – 3D array of lat lon +(spatial_1, spatial_2, 2) +Last dimension has lat / lon in that order

  • +
+
+
Returns:
+

    +
  • u (Union[np.ndarray, da.core.Array]) – 3D array of high res U data +(spatial_1, spatial_2, temporal)

  • +
  • v (Union[np.ndarray, da.core.Array]) – 3D array of high res V data +(spatial_1, spatial_2, temporal)

  • +
+

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.derivers.utilities.windspeed_log_law.html b/_autosummary/sup3r.preprocessing.derivers.utilities.windspeed_log_law.html new file mode 100644 index 000000000..b79e6f628 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.derivers.utilities.windspeed_log_law.html @@ -0,0 +1,776 @@ + + + + + + + + + + + sup3r.preprocessing.derivers.utilities.windspeed_log_law — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.derivers.utilities.windspeed_log_law

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.derivers.utilities.windspeed_log_law#

+
+
+windspeed_log_law(z, a, b, c)[source]#
+

Windspeed log profile.

+
+
Parameters:
+
    +
  • z (float) – Height above ground in meters

  • +
  • a (float) – Proportional to friction velocity

  • +
  • b (float) – Related to zero-plane displacement in meters (height above the ground +at which zero mean wind speed is achieved as a result of flow obstacles +such as trees or buildings)

  • +
  • c (float) – Proportional to stability term.

  • +
+
+
Returns:
+

ws (float) – Value of windspeed at a given height.

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.html b/_autosummary/sup3r.preprocessing.html new file mode 100644 index 000000000..da20e66a1 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.html @@ -0,0 +1,793 @@ + + + + + + + + + + + sup3r.preprocessing — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing#

+

Sup3r preprocessing module. Here you will find things that have access to +data, which we call Containers. Loaders, Rasterizers, Samplers, +Derivers, Handlers, Batchers, etc, are all subclasses of +Containers. Rather than having a single object that does everything - +extract data, compute features, sample the data for batching, split into train +and val, etc, we have fundamental objects that do one of these things and we +build multi-purpose objects with class factories. These factory generated +objects are DataHandlers and BatchHandlers.

+

If you want to extract a specific spatiotemporal extent from a data file then +use Rasterizer. If you want to split into a test and validation set +then use Rasterizer to extract different temporal extents separately. +If you’ve already rasterized data and written that to a file and then want to +sample that data for batches, then use a Loader (or a +DataHandler), and give that object to a BatchHandler. If +you want to have training and validation batches then load those separate data +sets, and provide these to BatchHandler. If you want to have a +BatchQueue containing pairs of low / high res data, rather than coarsening +high-res to get low res, then load lr and hr data with separate Loaders or +DataHandlers, use DualRasterizer to match the lr and hr grids, and +provide this to DualBatchHandler.

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

accessor

Accessor for xarray.

base

Base classes - fundamental dataset objects and the base Container object, which just contains dataset objects.

batch_handlers

Composite objects built from batch queues and samplers.

batch_queues

Container collection objects used to build batches for training.

cachers

Basic Cacher container.

collections

Classes consisting of collections of containers.

data_handlers

Composite objects built from loaders, rasterizers, and derivers.

derivers

Loader subclass with methods for extracting and processing the contained data.

loaders

Container subclass with additional methods for loading the contained data.

names

Mappings from coord / dim / feature names to standard names and Dimension class for standardizing dimension orders and names.

rasterizers

Container subclass with methods for extracting a specific spatiotemporal extents from data.

samplers

Container subclass with methods for sampling contained data.

utilities

Methods used across container objects.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.loaders.Loader.html b/_autosummary/sup3r.preprocessing.loaders.Loader.html new file mode 100644 index 000000000..1886e8a89 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.loaders.Loader.html @@ -0,0 +1,798 @@ + + + + + + + + + + + sup3r.preprocessing.loaders.Loader — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.loaders.Loader

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.loaders.Loader#

+
+
+class Loader(file_paths, *, features='all', res_kwargs=None, chunks='auto', BaseLoader=None)[source]#
+

Bases: object

+
+
Parameters:
+
    +
  • file_paths (str | pathlib.Path | list) – Location(s) of files to load

  • +
  • features (list | str) – Features to return in loaded dataset. If ‘all’ then all available +features will be returned.

  • +
  • res_kwargs (dict) – Additional keyword arguments passed through to the BaseLoader. +BaseLoader is usually xr.open_mfdataset for NETCDF files and +MultiFileResourceX for H5 files.

  • +
  • chunks (dict | str | None) – Dictionary of chunk sizes to pass through to +dask.array.from_array() or xr.Dataset().chunk(). Will be +converted to a tuple when used in from_array(). These are the +methods for H5 and NETCDF data, respectively. This argument can +be “auto” in additional to a dictionary. If this is None then the +data will not be chunked and instead loaded directly into memory.

  • +
  • BaseLoader (Callable) – Optional base loader update. The default for H5 files is +MultiFileResourceX and for NETCDF is xarray.open_mfdataset

  • +
+
+
+

Override parent class to return type specific class based on +source_file

+

Methods

+
+ + +
+
+

Attributes

+
+ + + + + +

TypeSpecificClasses

+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.loaders.base.BaseLoader.html b/_autosummary/sup3r.preprocessing.loaders.base.BaseLoader.html new file mode 100644 index 000000000..de20ad7e7 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.loaders.base.BaseLoader.html @@ -0,0 +1,885 @@ + + + + + + + + + + + sup3r.preprocessing.loaders.base.BaseLoader — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.loaders.base.BaseLoader

+ +
+ +
+
+ + + + +
+ +
+

sup3r.preprocessing.loaders.base.BaseLoader#

+
+
+class BaseLoader(file_paths, features='all', res_kwargs=None, chunks='auto', BaseLoader=None)[source]#
+

Bases: Container, ABC

+

Base loader. “Loads” files so that a .data attribute provides access +to the data in the files as a dask array with shape (lats, lons, time, +features). This object provides a __getitem__ method that can be used by +Sampler objects to build batches or +by Rasterizer objects to derive / +extract specific features / regions / time_periods.

+
+
Parameters:
+
    +
  • file_paths (str | pathlib.Path | list) – Location(s) of files to load

  • +
  • features (list | str) – Features to return in loaded dataset. If ‘all’ then all available +features will be returned.

  • +
  • res_kwargs (dict) – Additional keyword arguments passed through to the BaseLoader. +BaseLoader is usually xr.open_mfdataset for NETCDF files and +MultiFileResourceX for H5 files.

  • +
  • chunks (dict | str | None) – Dictionary of chunk sizes to pass through to +dask.array.from_array() or xr.Dataset().chunk(). Will be +converted to a tuple when used in from_array(). These are the +methods for H5 and NETCDF data, respectively. This argument can +be “auto” in additional to a dictionary. If this is None then the +data will not be chunked and instead loaded directly into memory.

  • +
  • BaseLoader (Callable) – Optional base loader update. The default for H5 files is +MultiFileResourceX and for NETCDF is xarray.open_mfdataset

  • +
+
+
+

Methods

+
+ + + + + + + + + + + +

BASE_LOADER(**kwargs)

Wrapper for xr.open_mfdataset with default opening options.

post_init_log([args_dict])

Log additional arguments after initialization.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + +

data

Return underlying data.

file_paths

Get file paths for input data

shape

Get shape of underlying data.

+
+
+
+BASE_LOADER(**kwargs)#
+

Wrapper for xr.open_mfdataset with default opening options.

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+property file_paths#
+

Get file paths for input data

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.loaders.base.html b/_autosummary/sup3r.preprocessing.loaders.base.html new file mode 100644 index 000000000..d63580023 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.loaders.base.html @@ -0,0 +1,740 @@ + + + + + + + + + + + sup3r.preprocessing.loaders.base — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.loaders.base

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.loaders.base#

+

Abstract Loader class merely for loading data from file paths. This data is +always loaded lazily.

+

Classes

+
+ + + + + +

BaseLoader(file_paths[, features, ...])

Base loader.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.loaders.h5.LoaderH5.html b/_autosummary/sup3r.preprocessing.loaders.h5.LoaderH5.html new file mode 100644 index 000000000..5257272ac --- /dev/null +++ b/_autosummary/sup3r.preprocessing.loaders.h5.LoaderH5.html @@ -0,0 +1,894 @@ + + + + + + + + + + + sup3r.preprocessing.loaders.h5.LoaderH5 — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.loaders.h5.LoaderH5

+ +
+ +
+
+ + + + +
+ +
+

sup3r.preprocessing.loaders.h5.LoaderH5#

+
+
+class LoaderH5(file_paths, features='all', res_kwargs=None, chunks='auto', BaseLoader=None)[source]#
+

Bases: BaseLoader

+

Base H5 loader. “Loads” h5 files so that a .data attribute +provides access to the data in the files. This object provides a +__getitem__ method that can be used by +Sampler objects to build batches or +by Rasterizer objects to derive / +extract specific features / regions / time_periods.

+
+
Parameters:
+
    +
  • file_paths (str | pathlib.Path | list) – Location(s) of files to load

  • +
  • features (list | str) – Features to return in loaded dataset. If ‘all’ then all available +features will be returned.

  • +
  • res_kwargs (dict) – Additional keyword arguments passed through to the BaseLoader. +BaseLoader is usually xr.open_mfdataset for NETCDF files and +MultiFileResourceX for H5 files.

  • +
  • chunks (dict | str | None) – Dictionary of chunk sizes to pass through to +dask.array.from_array() or xr.Dataset().chunk(). Will be +converted to a tuple when used in from_array(). These are the +methods for H5 and NETCDF data, respectively. This argument can +be “auto” in additional to a dictionary. If this is None then the +data will not be chunked and instead loaded directly into memory.

  • +
  • BaseLoader (Callable) – Optional base loader update. The default for H5 files is +MultiFileResourceX and for NETCDF is xarray.open_mfdataset

  • +
+
+
+

Methods

+
+ + + + + + + + + + + +

post_init_log([args_dict])

Log additional arguments after initialization.

scale_factor(feature)

Get scale factor for given feature.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + +

data

Return underlying data.

file_paths

Get file paths for input data

shape

Get shape of underlying data.

+
+
+
+BASE_LOADER#
+

alias of MultiFileWindX

+
+ +
+
+scale_factor(feature)[source]#
+

Get scale factor for given feature. Data is stored in scaled form to +reduce memory.

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+property file_paths#
+

Get file paths for input data

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.loaders.h5.html b/_autosummary/sup3r.preprocessing.loaders.h5.html new file mode 100644 index 000000000..3421da35f --- /dev/null +++ b/_autosummary/sup3r.preprocessing.loaders.h5.html @@ -0,0 +1,742 @@ + + + + + + + + + + + sup3r.preprocessing.loaders.h5 — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.loaders.h5

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.loaders.h5#

+

Base loading class for H5 files.

+

TODO: Explore replacing rex handlers with xarray. xarray should be able to +load H5 files fine. We would still need get_raster_index method in Rasterizers +though.

+

Classes

+
+ + + + + +

LoaderH5(file_paths[, features, res_kwargs, ...])

Base H5 loader.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.loaders.html b/_autosummary/sup3r.preprocessing.loaders.html new file mode 100644 index 000000000..54c836337 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.loaders.html @@ -0,0 +1,757 @@ + + + + + + + + + + + sup3r.preprocessing.loaders — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.loaders

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.loaders#

+

Container subclass with additional methods for loading the contained +data.

+

Classes

+
+ + + + + +

Loader(file_paths, *[, features, ...])

+
+
+ + + + + + + + + + + + + + +

base

Abstract Loader class merely for loading data from file paths.

h5

Base loading class for H5 files.

nc

Base loading classes.

utilities

Utilities used by Loaders.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.loaders.nc.LoaderNC.html b/_autosummary/sup3r.preprocessing.loaders.nc.LoaderNC.html new file mode 100644 index 000000000..0ae202e6f --- /dev/null +++ b/_autosummary/sup3r.preprocessing.loaders.nc.LoaderNC.html @@ -0,0 +1,908 @@ + + + + + + + + + + + sup3r.preprocessing.loaders.nc.LoaderNC — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.loaders.nc.LoaderNC#

+
+
+class LoaderNC(file_paths, features='all', res_kwargs=None, chunks='auto', BaseLoader=None)[source]#
+

Bases: BaseLoader

+

Base NETCDF loader. “Loads” netcdf files so that a .data attribute +provides access to the data in the files. This object provides a +__getitem__ method that can be used by Sampler objects to build batches +or by other objects to derive / extract specific features / regions / +time_periods.

+
+
Parameters:
+
    +
  • file_paths (str | pathlib.Path | list) – Location(s) of files to load

  • +
  • features (list | str) – Features to return in loaded dataset. If ‘all’ then all available +features will be returned.

  • +
  • res_kwargs (dict) – Additional keyword arguments passed through to the BaseLoader. +BaseLoader is usually xr.open_mfdataset for NETCDF files and +MultiFileResourceX for H5 files.

  • +
  • chunks (dict | str | None) – Dictionary of chunk sizes to pass through to +dask.array.from_array() or xr.Dataset().chunk(). Will be +converted to a tuple when used in from_array(). These are the +methods for H5 and NETCDF data, respectively. This argument can +be “auto” in additional to a dictionary. If this is None then the +data will not be chunked and instead loaded directly into memory.

  • +
  • BaseLoader (Callable) – Optional base loader update. The default for H5 files is +MultiFileResourceX and for NETCDF is xarray.open_mfdataset

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + +

BASE_LOADER(file_paths, **kwargs)

Lowest level interface to data.

get_coords(res)

Get coordinate dictionary to use in xr.Dataset().assign_coords().

get_dims(res)

Get dimension name map using our standard mappping and the names used for coordinate dimensions.

post_init_log([args_dict])

Log additional arguments after initialization.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + +

data

Return underlying data.

file_paths

Get file paths for input data

shape

Get shape of underlying data.

+
+
+
+BASE_LOADER(file_paths, **kwargs)[source]#
+

Lowest level interface to data.

+
+ +
+
+static get_coords(res)[source]#
+

Get coordinate dictionary to use in +xr.Dataset().assign_coords().

+
+ +
+
+static get_dims(res)[source]#
+

Get dimension name map using our standard mappping and the names +used for coordinate dimensions.

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+property file_paths#
+

Get file paths for input data

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.loaders.nc.html b/_autosummary/sup3r.preprocessing.loaders.nc.html new file mode 100644 index 000000000..49612ff38 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.loaders.nc.html @@ -0,0 +1,741 @@ + + + + + + + + + + + sup3r.preprocessing.loaders.nc — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.loaders.nc

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.loaders.nc#

+

Base loading classes. These are containers which also load data from +file_paths and include some sampling ability to interface with batcher +classes.

+

Classes

+
+ + + + + +

LoaderNC(file_paths[, features, res_kwargs, ...])

Base NETCDF loader.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.loaders.utilities.html b/_autosummary/sup3r.preprocessing.loaders.utilities.html new file mode 100644 index 000000000..f2f6d0523 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.loaders.utilities.html @@ -0,0 +1,742 @@ + + + + + + + + + + + sup3r.preprocessing.loaders.utilities — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.loaders.utilities

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.loaders.utilities#

+

Utilities used by Loaders.

+

Functions

+
+ + + + + + + + +

standardize_names(data, standard_names)

Standardize fields in the dataset using the standard_names dictionary.

standardize_values(data)

Standardize units and coordinate values.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.loaders.utilities.standardize_names.html b/_autosummary/sup3r.preprocessing.loaders.utilities.standardize_names.html new file mode 100644 index 000000000..9ebdd3911 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.loaders.utilities.standardize_names.html @@ -0,0 +1,762 @@ + + + + + + + + + + + sup3r.preprocessing.loaders.utilities.standardize_names — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.loaders.utilities.standardize_names

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.loaders.utilities.standardize_names#

+
+
+standardize_names(data, standard_names)[source]#
+

Standardize fields in the dataset using the standard_names +dictionary.

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.loaders.utilities.standardize_values.html b/_autosummary/sup3r.preprocessing.loaders.utilities.standardize_values.html new file mode 100644 index 000000000..2c0a72de2 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.loaders.utilities.standardize_values.html @@ -0,0 +1,766 @@ + + + + + + + + + + + sup3r.preprocessing.loaders.utilities.standardize_values — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.loaders.utilities.standardize_values

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.loaders.utilities.standardize_values#

+
+
+standardize_values(data)[source]#
+

Standardize units and coordinate values. e.g. All temperatures in +celsius, all longitudes between -180 and 180, etc.

+
+
dataxr.Dataset

xarray dataset to be updated with standardized values.

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.names.Dimension.html b/_autosummary/sup3r.preprocessing.names.Dimension.html new file mode 100644 index 000000000..0995ad2cd --- /dev/null +++ b/_autosummary/sup3r.preprocessing.names.Dimension.html @@ -0,0 +1,1621 @@ + + + + + + + + + + + sup3r.preprocessing.names.Dimension — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.names.Dimension#

+
+
+class Dimension(value)[source]#
+

Bases: str, Enum

+

Dimension names used for Sup3rX accessor.

+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

encode([encoding, errors])

Encode the string using the codec registered for encoding.

replace(old, new[, count])

Return a copy with all occurrences of substring old replaced by new.

split([sep, maxsplit])

Return a list of the substrings in the string, using sep as the separator string.

rsplit([sep, maxsplit])

Return a list of the substrings in the string, using sep as the separator string.

join(iterable, /)

Concatenate any number of strings.

capitalize()

Return a capitalized version of the string.

casefold()

Return a version of the string suitable for caseless comparisons.

title()

Return a version of the string where each word is titlecased.

center(width[, fillchar])

Return a centered string of length width.

count(sub[, start[, end]])

Return the number of non-overlapping occurrences of substring sub in string S[start:end].

expandtabs([tabsize])

Return a copy where all tab characters are expanded using spaces.

find(sub[, start[, end]])

Return the lowest index in S where substring sub is found, such that sub is contained within S[start:end].

partition(sep, /)

Partition the string into three parts using the given separator.

index(sub[, start[, end]])

Return the lowest index in S where substring sub is found, such that sub is contained within S[start:end].

ljust(width[, fillchar])

Return a left-justified string of length width.

lower()

Return a copy of the string converted to lowercase.

lstrip([chars])

Return a copy of the string with leading whitespace removed.

rfind(sub[, start[, end]])

Return the highest index in S where substring sub is found, such that sub is contained within S[start:end].

rindex(sub[, start[, end]])

Return the highest index in S where substring sub is found, such that sub is contained within S[start:end].

rjust(width[, fillchar])

Return a right-justified string of length width.

rstrip([chars])

Return a copy of the string with trailing whitespace removed.

rpartition(sep, /)

Partition the string into three parts using the given separator.

splitlines([keepends])

Return a list of the lines in the string, breaking at line boundaries.

strip([chars])

Return a copy of the string with leading and trailing whitespace removed.

swapcase()

Convert uppercase characters to lowercase and lowercase characters to uppercase.

translate(table, /)

Replace each character in the string using the given translation table.

upper()

Return a copy of the string converted to uppercase.

startswith(prefix[, start[, end]])

Return True if S starts with the specified prefix, False otherwise.

endswith(suffix[, start[, end]])

Return True if S ends with the specified suffix, False otherwise.

removeprefix(prefix, /)

Return a str with the given prefix string removed if present.

removesuffix(suffix, /)

Return a str with the given suffix string removed if present.

isascii()

Return True if all characters in the string are ASCII, False otherwise.

islower()

Return True if the string is a lowercase string, False otherwise.

isupper()

Return True if the string is an uppercase string, False otherwise.

istitle()

Return True if the string is a title-cased string, False otherwise.

isspace()

Return True if the string is a whitespace string, False otherwise.

isdecimal()

Return True if the string is a decimal string, False otherwise.

isdigit()

Return True if the string is a digit string, False otherwise.

isnumeric()

Return True if the string is a numeric string, False otherwise.

isalpha()

Return True if the string is an alphabetic string, False otherwise.

isalnum()

Return True if the string is an alpha-numeric string, False otherwise.

isidentifier()

Return True if the string is a valid Python identifier, False otherwise.

isprintable()

Return True if the string is printable, False otherwise.

zfill(width, /)

Pad a numeric string with zeros on the left, to fill a field of the given width.

format(*args, **kwargs)

Return a formatted version of S, using substitutions from args and kwargs.

format_map(mapping)

Return a formatted version of S, using substitutions from mapping.

maketrans

Return a translation table usable for str.translate().

order()

Return standard dimension order.

flat_2d()

Return ordered tuple for 2d flattened data.

dims_2d()

Return ordered tuple for 2d spatial dimensions.

coords_2d()

Return ordered tuple for 2d spatial coordinates.

coords_3d()

Return ordered tuple for 3d spatiotemporal coordinates.

dims_3d()

Return ordered tuple for 3d spatiotemporal dimensions.

dims_4d()

Return ordered tuple for 4d spatiotemporal dimensions.

coords_4d_pres()

Return ordered tuple for 4d coordinates, with a pressure level.

coords_4d()

Return ordered tuple for 4d coordinates, with a height level.

dims_4d_pres()

Return ordered tuple for 4d spatiotemporal dimensions with vertical pressure levels

dims_3d_bc()

Return ordered tuple for 3d spatiotemporal dimensions.

dims_4d_bc()

Return ordered tuple for 4d spatiotemporal dimensions specifically for bias correction factor files.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

FLATTENED_SPATIAL

SOUTH_NORTH

WEST_EAST

TIME

PRESSURE_LEVEL

HEIGHT

VARIABLE

LATITUDE

LONGITUDE

QUANTILE

GLOBAL_TIME

+
+
+
+classmethod order()[source]#
+

Return standard dimension order.

+
+ +
+
+classmethod flat_2d()[source]#
+

Return ordered tuple for 2d flattened data.

+
+ +
+
+classmethod dims_2d()[source]#
+

Return ordered tuple for 2d spatial dimensions. Usually +(south_north, west_east)

+
+ +
+
+classmethod coords_2d()[source]#
+

Return ordered tuple for 2d spatial coordinates.

+
+ +
+
+classmethod coords_3d()[source]#
+

Return ordered tuple for 3d spatiotemporal coordinates.

+
+ +
+
+classmethod dims_3d()[source]#
+

Return ordered tuple for 3d spatiotemporal dimensions.

+
+ +
+
+classmethod dims_4d()[source]#
+

Return ordered tuple for 4d spatiotemporal dimensions.

+
+ +
+
+classmethod coords_4d_pres()[source]#
+

Return ordered tuple for 4d coordinates, with a pressure level.

+
+ +
+
+classmethod coords_4d()[source]#
+

Return ordered tuple for 4d coordinates, with a height level.

+
+ +
+
+classmethod dims_4d_pres()[source]#
+

Return ordered tuple for 4d spatiotemporal dimensions with vertical +pressure levels

+
+ +
+
+classmethod dims_3d_bc()[source]#
+

Return ordered tuple for 3d spatiotemporal dimensions.

+
+ +
+
+classmethod dims_4d_bc()[source]#
+

Return ordered tuple for 4d spatiotemporal dimensions specifically +for bias correction factor files.

+
+ +
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+encode(encoding='utf-8', errors='strict')#
+

Encode the string using the codec registered for encoding.

+
+
encoding

The encoding in which to encode the string.

+
+
errors

The error handling scheme to use for encoding errors. +The default is ‘strict’ meaning that encoding errors raise a +UnicodeEncodeError. Other possible values are ‘ignore’, ‘replace’ and +‘xmlcharrefreplace’ as well as any other name registered with +codecs.register_error that can handle UnicodeEncodeErrors.

+
+
+
+ +
+
+replace(old, new, count=-1, /)#
+

Return a copy with all occurrences of substring old replaced by new.

+
+
+
count

Maximum number of occurrences to replace. +-1 (the default value) means replace all occurrences.

+
+
+
+

If the optional argument count is given, only the first count occurrences are +replaced.

+
+ +
+
+split(sep=None, maxsplit=-1)#
+

Return a list of the substrings in the string, using sep as the separator string.

+
+
+
sep

The separator used to split the string.

+

When set to None (the default value), will split on any whitespace +character (including \n \r \t \f and spaces) and will discard +empty strings from the result.

+
+
maxsplit

Maximum number of splits (starting from the left). +-1 (the default value) means no limit.

+
+
+
+

Note, str.split() is mainly useful for data that has been intentionally +delimited. With natural text that includes punctuation, consider using +the regular expression module.

+
+ +
+
+rsplit(sep=None, maxsplit=-1)#
+

Return a list of the substrings in the string, using sep as the separator string.

+
+
+
sep

The separator used to split the string.

+

When set to None (the default value), will split on any whitespace +character (including \n \r \t \f and spaces) and will discard +empty strings from the result.

+
+
maxsplit

Maximum number of splits (starting from the left). +-1 (the default value) means no limit.

+
+
+
+

Splitting starts at the end of the string and works to the front.

+
+ +
+
+join(iterable, /)#
+

Concatenate any number of strings.

+

The string whose method is called is inserted in between each given string. +The result is returned as a new string.

+

Example: ‘.’.join([‘ab’, ‘pq’, ‘rs’]) -> ‘ab.pq.rs’

+
+ +
+
+capitalize()#
+

Return a capitalized version of the string.

+

More specifically, make the first character have upper case and the rest lower +case.

+
+ +
+
+casefold()#
+

Return a version of the string suitable for caseless comparisons.

+
+ +
+
+title()#
+

Return a version of the string where each word is titlecased.

+

More specifically, words start with uppercased characters and all remaining +cased characters have lower case.

+
+ +
+
+center(width, fillchar=' ', /)#
+

Return a centered string of length width.

+

Padding is done using the specified fill character (default is a space).

+
+ +
+
+count(sub[, start[, end]]) int#
+

Return the number of non-overlapping occurrences of substring sub in +string S[start:end]. Optional arguments start and end are +interpreted as in slice notation.

+
+ +
+
+expandtabs(tabsize=8)#
+

Return a copy where all tab characters are expanded using spaces.

+

If tabsize is not given, a tab size of 8 characters is assumed.

+
+ +
+
+find(sub[, start[, end]]) int#
+

Return the lowest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.

+

Return -1 on failure.

+
+ +
+
+partition(sep, /)#
+

Partition the string into three parts using the given separator.

+

This will search for the separator in the string. If the separator is found, +returns a 3-tuple containing the part before the separator, the separator +itself, and the part after it.

+

If the separator is not found, returns a 3-tuple containing the original string +and two empty strings.

+
+ +
+
+index(sub[, start[, end]]) int#
+

Return the lowest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.

+

Raises ValueError when the substring is not found.

+
+ +
+
+ljust(width, fillchar=' ', /)#
+

Return a left-justified string of length width.

+

Padding is done using the specified fill character (default is a space).

+
+ +
+
+lower()#
+

Return a copy of the string converted to lowercase.

+
+ +
+
+lstrip(chars=None, /)#
+

Return a copy of the string with leading whitespace removed.

+

If chars is given and not None, remove characters in chars instead.

+
+ +
+
+rfind(sub[, start[, end]]) int#
+

Return the highest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.

+

Return -1 on failure.

+
+ +
+
+rindex(sub[, start[, end]]) int#
+

Return the highest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.

+

Raises ValueError when the substring is not found.

+
+ +
+
+rjust(width, fillchar=' ', /)#
+

Return a right-justified string of length width.

+

Padding is done using the specified fill character (default is a space).

+
+ +
+
+rstrip(chars=None, /)#
+

Return a copy of the string with trailing whitespace removed.

+

If chars is given and not None, remove characters in chars instead.

+
+ +
+
+rpartition(sep, /)#
+

Partition the string into three parts using the given separator.

+

This will search for the separator in the string, starting at the end. If +the separator is found, returns a 3-tuple containing the part before the +separator, the separator itself, and the part after it.

+

If the separator is not found, returns a 3-tuple containing two empty strings +and the original string.

+
+ +
+
+splitlines(keepends=False)#
+

Return a list of the lines in the string, breaking at line boundaries.

+

Line breaks are not included in the resulting list unless keepends is given and +true.

+
+ +
+
+strip(chars=None, /)#
+

Return a copy of the string with leading and trailing whitespace removed.

+

If chars is given and not None, remove characters in chars instead.

+
+ +
+
+swapcase()#
+

Convert uppercase characters to lowercase and lowercase characters to uppercase.

+
+ +
+
+translate(table, /)#
+

Replace each character in the string using the given translation table.

+
+
+
table

Translation table, which must be a mapping of Unicode ordinals to +Unicode ordinals, strings, or None.

+
+
+
+

The table must implement lookup/indexing via __getitem__, for instance a +dictionary or list. If this operation raises LookupError, the character is +left untouched. Characters mapped to None are deleted.

+
+ +
+
+upper()#
+

Return a copy of the string converted to uppercase.

+
+ +
+
+startswith(prefix[, start[, end]]) bool#
+

Return True if S starts with the specified prefix, False otherwise. +With optional start, test S beginning at that position. +With optional end, stop comparing S at that position. +prefix can also be a tuple of strings to try.

+
+ +
+
+endswith(suffix[, start[, end]]) bool#
+

Return True if S ends with the specified suffix, False otherwise. +With optional start, test S beginning at that position. +With optional end, stop comparing S at that position. +suffix can also be a tuple of strings to try.

+
+ +
+
+removeprefix(prefix, /)#
+

Return a str with the given prefix string removed if present.

+

If the string starts with the prefix string, return string[len(prefix):]. +Otherwise, return a copy of the original string.

+
+ +
+
+removesuffix(suffix, /)#
+

Return a str with the given suffix string removed if present.

+

If the string ends with the suffix string and that suffix is not empty, +return string[:-len(suffix)]. Otherwise, return a copy of the original +string.

+
+ +
+
+isascii()#
+

Return True if all characters in the string are ASCII, False otherwise.

+

ASCII characters have code points in the range U+0000-U+007F. +Empty string is ASCII too.

+
+ +
+
+islower()#
+

Return True if the string is a lowercase string, False otherwise.

+

A string is lowercase if all cased characters in the string are lowercase and +there is at least one cased character in the string.

+
+ +
+
+isupper()#
+

Return True if the string is an uppercase string, False otherwise.

+

A string is uppercase if all cased characters in the string are uppercase and +there is at least one cased character in the string.

+
+ +
+
+istitle()#
+

Return True if the string is a title-cased string, False otherwise.

+

In a title-cased string, upper- and title-case characters may only +follow uncased characters and lowercase characters only cased ones.

+
+ +
+
+isspace()#
+

Return True if the string is a whitespace string, False otherwise.

+

A string is whitespace if all characters in the string are whitespace and there +is at least one character in the string.

+
+ +
+
+isdecimal()#
+

Return True if the string is a decimal string, False otherwise.

+

A string is a decimal string if all characters in the string are decimal and +there is at least one character in the string.

+
+ +
+
+isdigit()#
+

Return True if the string is a digit string, False otherwise.

+

A string is a digit string if all characters in the string are digits and there +is at least one character in the string.

+
+ +
+
+isnumeric()#
+

Return True if the string is a numeric string, False otherwise.

+

A string is numeric if all characters in the string are numeric and there is at +least one character in the string.

+
+ +
+
+isalpha()#
+

Return True if the string is an alphabetic string, False otherwise.

+

A string is alphabetic if all characters in the string are alphabetic and there +is at least one character in the string.

+
+ +
+
+isalnum()#
+

Return True if the string is an alpha-numeric string, False otherwise.

+

A string is alpha-numeric if all characters in the string are alpha-numeric and +there is at least one character in the string.

+
+ +
+
+isidentifier()#
+

Return True if the string is a valid Python identifier, False otherwise.

+

Call keyword.iskeyword(s) to test whether string s is a reserved identifier, +such as “def” or “class”.

+
+ +
+
+isprintable()#
+

Return True if the string is printable, False otherwise.

+

A string is printable if all of its characters are considered printable in +repr() or if it is empty.

+
+ +
+
+zfill(width, /)#
+

Pad a numeric string with zeros on the left, to fill a field of the given width.

+

The string is never truncated.

+
+ +
+
+format(*args, **kwargs) str#
+

Return a formatted version of S, using substitutions from args and kwargs. +The substitutions are identified by braces (‘{’ and ‘}’).

+
+ +
+
+format_map(mapping) str#
+

Return a formatted version of S, using substitutions from mapping. +The substitutions are identified by braces (‘{’ and ‘}’).

+
+ +
+
+static maketrans()#
+

Return a translation table usable for str.translate().

+

If there is only one argument, it must be a dictionary mapping Unicode +ordinals (integers) or characters to Unicode ordinals, strings or None. +Character keys will be then converted to ordinals. +If there are two arguments, they must be strings of equal length, and +in the resulting dictionary, each character in x will be mapped to the +character at the same position in y. If there is a third argument, it +must be a string, whose characters will be mapped to None in the result.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.names.html b/_autosummary/sup3r.preprocessing.names.html new file mode 100644 index 000000000..5e1992ea6 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.names.html @@ -0,0 +1,740 @@ + + + + + + + + + + + sup3r.preprocessing.names — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.names

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.names#

+

Mappings from coord / dim / feature names to standard names and Dimension +class for standardizing dimension orders and names.

+

Classes

+
+ + + + + +

Dimension(value)

Dimension names used for Sup3rX accessor.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.rasterizers.base.BaseRasterizer.html b/_autosummary/sup3r.preprocessing.rasterizers.base.BaseRasterizer.html new file mode 100644 index 000000000..9944c0285 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.rasterizers.base.BaseRasterizer.html @@ -0,0 +1,991 @@ + + + + + + + + + + + sup3r.preprocessing.rasterizers.base.BaseRasterizer — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.rasterizers.base.BaseRasterizer#

+
+
+class BaseRasterizer(loader, features='all', target=None, shape=None, time_slice=slice(None, None, None), threshold=None)[source]#
+

Bases: Container

+

Container subclass with additional methods for extracting a +spatiotemporal extent from contained data.

+
+

Note

+

This Rasterizer base class is for 3D rasterized data. This usually +comes from NETCDF files but can also be cached H5 files saved from +previously rasterized data. For 3D, whether H5 or NETCDF, the full domain +will be rasterized automatically if no target / shape are provided.

+
+
+
Parameters:
+
    +
  • loader (Loader) – Loader type container with .data attribute exposing data to +extract.

  • +
  • features (list | str) – Features to return in loaded dataset. If ‘all’ then all available +features will be returned.

  • +
  • target (tuple) – (lat, lon) lower left corner of raster. Either need target+shape or +raster_file.

  • +
  • shape (tuple) – (rows, cols) grid size. Either need target+shape or raster_file.

  • +
  • time_slice (slice | list) – Slice specifying extent and step of temporal extraction. e.g. +slice(start, stop, step). If equal to slice(None, None, 1) the full +time dimension is selected. Can be also be a list [start, stop, +step]

  • +
  • threshold (float) – Nearest neighbor euclidean distance threshold. If the coordinates +are more than this value away from the target lat/lon, an error is +raised.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + +

check_target_and_shape(full_lat_lon)

The data is assumed to use a regular grid so if either target or shape is not given we can easily find the values that give the maximum extent.

get_closest_row_col(lat_lon, target)

Get closest indices to target lat lon

get_lat_lon()

Get the 2D array of coordinates corresponding to the requested target and shape.

get_raster_index()

Get set of slices or indices selecting the requested region from the contained data.

post_init_log([args_dict])

Log additional arguments after initialization.

rasterize_data()

Get rasterized data.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + +

data

Return underlying data.

grid_shape

Return the grid_shape based on the raster_index, since self._grid_shape does not need to be provided as an input if the raster_file is.

lat_lon

Get 2D grid of coordinates with target as the lower left coordinate.

shape

Get shape of underlying data.

target

Return the true value based on the closest lat lon instead of the user provided value self._target, which is used to find the closest lat lon.

time_slice

Return time slice for rasterized time period.

+
+
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+property time_slice#
+

Return time slice for rasterized time period.

+
+ +
+
+property target#
+

Return the true value based on the closest lat lon instead of the +user provided value self._target, which is used to find the closest lat +lon.

+
+ +
+
+property grid_shape#
+

Return the grid_shape based on the raster_index, since +self._grid_shape does not need to be provided as an input if the +raster_file is.

+
+ +
+
+property lat_lon#
+

Get 2D grid of coordinates with target as the lower left +coordinate. (lats, lons, 2)

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+
+rasterize_data()[source]#
+

Get rasterized data.

+
+ +
+
+check_target_and_shape(full_lat_lon)[source]#
+

The data is assumed to use a regular grid so if either target or +shape is not given we can easily find the values that give the maximum +extent.

+
+ +
+
+get_raster_index()[source]#
+

Get set of slices or indices selecting the requested region from +the contained data.

+
+ +
+
+get_closest_row_col(lat_lon, target)[source]#
+

Get closest indices to target lat lon

+
+
Parameters:
+
    +
  • lat_lon (ndarray) – Array of lat/lon +(spatial_1, spatial_2, 2) +Last dimension in order of (lat, lon)

  • +
  • target (tuple) – (lat, lon) for target coordinate

  • +
+
+
Returns:
+

    +
  • row (int) – row index for closest lat/lon to target lat/lon

  • +
  • col (int) – col index for closest lat/lon to target lat/lon

  • +
+

+
+
+
+ +
+
+get_lat_lon()[source]#
+

Get the 2D array of coordinates corresponding to the requested +target and shape.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.rasterizers.base.html b/_autosummary/sup3r.preprocessing.rasterizers.base.html new file mode 100644 index 000000000..c1f6a4621 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.rasterizers.base.html @@ -0,0 +1,741 @@ + + + + + + + + + + + sup3r.preprocessing.rasterizers.base — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.rasterizers.base

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.rasterizers.base#

+

Objects that can rasterize 3D spatiotemporal data. Examples include WRF, +ERA5, and GCM data. Can also work with 3D H5 data, just not flattened H5 data +like WTK and NSRDB.

+

Classes

+
+ + + + + +

BaseRasterizer(loader[, features, target, ...])

Container subclass with additional methods for extracting a spatiotemporal extent from contained data.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.rasterizers.dual.DualRasterizer.html b/_autosummary/sup3r.preprocessing.rasterizers.dual.DualRasterizer.html new file mode 100644 index 000000000..49cca9bec --- /dev/null +++ b/_autosummary/sup3r.preprocessing.rasterizers.dual.DualRasterizer.html @@ -0,0 +1,919 @@ + + + + + + + + + + + sup3r.preprocessing.rasterizers.dual.DualRasterizer — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.rasterizers.dual.DualRasterizer#

+
+
+class DualRasterizer(data: Sup3rDataset | Tuple[Dataset, Dataset], regrid_workers=1, regrid_lr=True, s_enhance=1, t_enhance=1, lr_cache_kwargs=None, hr_cache_kwargs=None)[source]#
+

Bases: Container

+

Object containing xr.Dataset instances for low and high-res data. +(Usually ERA5 and WTK, respectively). This essentially just regrids the +low-res data to the coarsened high-res grid. This is useful for caching +prepping data which then can go directly to a +DualSampler +DualBatchQueue.

+
+

Note

+

When first extracting the low_res data make sure to extract a region that +completely overlaps the high_res region. It is easiest to load the full +low_res domain and let DualRasterizer select the appropriate +region through regridding.

+
+

Initialize data container lr and hr Data instances. +Typically lr = ERA5 data and hr = WTK data.

+
+
Parameters:
+
    +
  • data (Sup3rDataset | Tuple[xr.Dataset, xr.Dataset]) – A tuple of xr.Dataset instances. The first must be low-res +and the second must be high-res data

  • +
  • regrid_workers (int | None) – Number of workers to use for regridding routine.

  • +
  • regrid_lr (bool) – Flag to regrid the low-res data to the high-res grid. This will +take care of any minor inconsistencies in different projections. +Disable this if the grids are known to be the same.

  • +
  • s_enhance (int) – Spatial enhancement factor

  • +
  • t_enhance (int) – Temporal enhancement factor

  • +
  • lr_cache_kwargs (dict) – Cache kwargs for the call to lr_data.cache_data(cache_kwargs). +Must include ‘cache_pattern’ key if not None, and can also include +dictionary of chunk tuples with feature keys

  • +
  • hr_cache_kwargs (dict) – Cache kwargs for the call to hr_data.cache_data(cache_kwargs). +Must include ‘cache_pattern’ key if not None, and can also include +dictionary of chunk tuples with feature keys

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + +

check_regridded_lr_data()

Check for NaNs after regridding and do NN fill if needed.

get_regridder()

Get regridder object

post_init_log([args_dict])

Log additional arguments after initialization.

update_hr_data()

Set the high resolution data attribute and check if hr_data.shape is divisible by s_enhance.

update_lr_data()

Regrid low_res data for all requested noncached features.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + +

data

Return underlying data.

shape

Get shape of underlying data.

+
+
+
+update_hr_data()[source]#
+

Set the high resolution data attribute and check if +hr_data.shape is divisible by s_enhance. If not, take the largest +shape that can be.

+
+ +
+
+get_regridder()[source]#
+

Get regridder object

+
+ +
+
+update_lr_data()[source]#
+

Regrid low_res data for all requested noncached features. Load +cached features if available and overwrite=False

+
+ +
+
+check_regridded_lr_data()[source]#
+

Check for NaNs after regridding and do NN fill if needed.

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.rasterizers.dual.html b/_autosummary/sup3r.preprocessing.rasterizers.dual.html new file mode 100644 index 000000000..0abc6b728 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.rasterizers.dual.html @@ -0,0 +1,740 @@ + + + + + + + + + + + sup3r.preprocessing.rasterizers.dual — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.rasterizers.dual

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.rasterizers.dual#

+

Paired rasterizer class for matching separate low_res and high_res +datasets

+

Classes

+
+ + + + + +

DualRasterizer(data[, regrid_workers, ...])

Object containing xr.Dataset instances for low and high-res data.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.rasterizers.exo.BaseExoRasterizer.html b/_autosummary/sup3r.preprocessing.rasterizers.exo.BaseExoRasterizer.html new file mode 100644 index 000000000..148875eb6 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.rasterizers.exo.BaseExoRasterizer.html @@ -0,0 +1,1038 @@ + + + + + + + + + + + sup3r.preprocessing.rasterizers.exo.BaseExoRasterizer — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.rasterizers.exo.BaseExoRasterizer#

+
+
+class BaseExoRasterizer(file_paths: str | None = None, source_file: str | None = None, feature: str | None = None, s_enhance: int = 1, t_enhance: int = 1, input_handler_name: str | None = None, input_handler_kwargs: dict | None = None, cache_dir: str = './exo_cache/', chunks: str | dict | None = 'auto', distance_upper_bound: int | None = None, max_workers: int = 1, verbose: bool = False)[source]#
+

Bases: ABC

+

Class to extract high-res (4km+) data rasters for new +spatially-enhanced datasets (e.g. GCM files after spatial enhancement) +using nearest neighbor mapping and aggregation from NREL datasets (e.g. WTK +or NSRDB)

+
+
Parameters:
+
    +
  • file_paths (str | list) – A single source h5 file to extract raster data from or a list +of netcdf files with identical grid. The string can be a unix-style +file path which will be passed through glob.glob. This is +typically low-res WRF output or GCM netcdf data files that is +source low-resolution data intended to be sup3r resolved.

  • +
  • source_file (str) – Filepath to source data file to get hi-res exogenous data from which +will be mapped to the enhanced grid of the file_paths input. Pixels +from this source_file will be mapped to their nearest low-res pixel in +the file_paths input. Accordingly, source_file should be a +significantly higher resolution than file_paths. Warnings will be +raised if the low-resolution pixels in file_paths do not have unique +nearest pixels from source_file. File format can be .h5 for +ExoRasterizerH5 or .nc for ExoRasterizerNC

  • +
  • feature (str) – Name of exogenous feature to rasterize.

  • +
  • s_enhance (int) – Factor by which the Sup3rGan model will enhance the spatial +dimensions of low resolution data from file_paths input. For +example, if getting topography data, file_paths has 100km data, and +s_enhance is 4, this class will output a topography raster +corresponding to the file_paths grid enhanced 4x to ~25km

  • +
  • t_enhance (int) – Factor by which the Sup3rGan model will enhance the temporal +dimension of low resolution data from file_paths input. For +example, if getting “sza” data, file_paths has hourly data, and +t_enhance is 4, this class will output an “sza” raster +corresponding to file_paths, temporally enhanced 4x to 15 min

  • +
  • input_handler_name (str) – data handler class to use for input data. Provide a string name to +match a Rasterizer. If None +the correct handler will be guessed based on file type and time series +properties.

  • +
  • input_handler_kwargs (dict | None) – Any kwargs for initializing the input_handler_name class.

  • +
  • cache_dir (str | ‘./exo_cache’) – Directory to use for caching rasterized data.

  • +
  • chunks (str | dict) – Dictionary of dimension chunk sizes for returned exo data. e.g. +{‘time’: 100, ‘south_north’: 100, ‘west_east’: 100}. This can also just +be “auto”. This is passed to .chunk() before returning exo data +through .data attribute

  • +
  • distance_upper_bound (float | None) – Maximum distance to map high-resolution data from source_file to the +low-resolution file_paths input. None (default) will calculate this +based on the median distance between points in source_file

  • +
  • max_workers (int) – Number of workers used for writing data to cache files. Gets passed to +Cacher.write_netcdf.

  • +
  • verbose (bool) – Whether to log output as each chunk is written to cache file.

  • +
+
+
+

Methods

+
+ + + + + + + + +

get_data()

Get a raster of source values corresponding to the high-resolution grid (the file_paths input grid * s_enhance * t_enhance).

get_distance_upper_bound()

Maximum distance (float) to map high-resolution data from source_file to the low-resolution file_paths input.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

cache_dir

cache_file

Get cache file name

chunks

coords

Get coords dictionary for initializing xr.Dataset.

data

Get a raster of source values corresponding to the high-resolution grid (the file_paths input grid * s_enhance * t_enhance).

distance_upper_bound

feature

file_paths

hr_lat_lon

Lat lon grid for data in format (spatial_1, spatial_2, 2) Lat/Lon array with same ordering in last dimension.

hr_shape

Get the high-resolution spatiotemporal shape

hr_time_index

Get the full time index for aggregated source data

input_handler_kwargs

input_handler_name

lr_shape

Get the low-resolution spatiotemporal shape

max_workers

nn

Get the nearest neighbor indices.

s_enhance

source_data

Get the 1D array of source data from the source_file_h5

source_file

source_handler

Get the Loader object that handles the exogenous data file.

source_lat_lon

Get the 2D array (n, 2) of lat, lon data from the source_file_h5

t_enhance

tree

Get the KDTree built on the target lat lon data from the file_paths input with s_enhance

verbose

+
+
+
+abstract property source_data#
+

Get the 1D array of source data from the source_file_h5

+
+ +
+
+property source_handler#
+

Get the Loader object that handles the exogenous data file.

+
+ +
+
+property cache_file#
+

Get cache file name

+
+
Returns:
+

cache_fp (str) – Name of cache file. This is a netcdf file which will be saved with +Cacher and loaded with +Loader

+
+
+
+ +
+
+property coords#
+

Get coords dictionary for initializing xr.Dataset.

+
+ +
+
+property source_lat_lon#
+

Get the 2D array (n, 2) of lat, lon data from the source_file_h5

+
+ +
+
+property lr_shape#
+

Get the low-resolution spatiotemporal shape

+
+ +
+
+property hr_shape#
+

Get the high-resolution spatiotemporal shape

+
+ +
+
+property hr_lat_lon#
+

Lat lon grid for data in format (spatial_1, spatial_2, 2) Lat/Lon +array with same ordering in last dimension. This corresponds to the +enhanced meta data from the file_paths input * s_enhance.

+
+
Returns:
+

ndarray

+
+
+
+ +
+
+property hr_time_index#
+

Get the full time index for aggregated source data

+
+ +
+
+get_distance_upper_bound()[source]#
+

Maximum distance (float) to map high-resolution data from +source_file to the low-resolution file_paths input.

+
+ +
+
+property tree#
+

Get the KDTree built on the target lat lon data from the file_paths +input with s_enhance

+
+ +
+
+property nn#
+

Get the nearest neighbor indices. This uses a single neighbor by +default

+
+ +
+
+property data#
+

Get a raster of source values corresponding to the +high-resolution grid (the file_paths input grid * s_enhance * +t_enhance). The shape is (lats, lons, temporal, 1)

+
+ +
+
+get_data()[source]#
+

Get a raster of source values corresponding to the +high-resolution grid (the file_paths input grid * s_enhance * +t_enhance). The shape is (lats, lons, 1)

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.rasterizers.exo.ExoRasterizer.html b/_autosummary/sup3r.preprocessing.rasterizers.exo.ExoRasterizer.html new file mode 100644 index 000000000..0d83276ff --- /dev/null +++ b/_autosummary/sup3r.preprocessing.rasterizers.exo.ExoRasterizer.html @@ -0,0 +1,1043 @@ + + + + + + + + + + + sup3r.preprocessing.rasterizers.exo.ExoRasterizer — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.rasterizers.exo.ExoRasterizer#

+
+
+class ExoRasterizer(*, file_paths=None, source_file=None, feature=None, s_enhance=1, t_enhance=1, input_handler_name=None, input_handler_kwargs=None, cache_dir='./exo_cache/', chunks='auto', distance_upper_bound=None, verbose=False)[source]#
+

Bases: BaseExoRasterizer

+

Class to extract high-res (4km+) data rasters for new +spatially-enhanced datasets (e.g. GCM files after spatial enhancement) +using nearest neighbor mapping and aggregation from NREL datasets (e.g. WTK +or NSRDB)

+
+
Parameters:
+
    +
  • file_paths (str | list) – A single source h5 file to extract raster data from or a list +of netcdf files with identical grid. The string can be a unix-style +file path which will be passed through glob.glob. This is +typically low-res WRF output or GCM netcdf data files that is +source low-resolution data intended to be sup3r resolved.

  • +
  • source_file (str) – Filepath to source data file to get hi-res exogenous data from which +will be mapped to the enhanced grid of the file_paths input. Pixels +from this source_file will be mapped to their nearest low-res pixel in +the file_paths input. Accordingly, source_file should be a +significantly higher resolution than file_paths. Warnings will be +raised if the low-resolution pixels in file_paths do not have unique +nearest pixels from source_file. File format can be .h5 for +ExoRasterizerH5 or .nc for ExoRasterizerNC

  • +
  • feature (str) – Name of exogenous feature to rasterize.

  • +
  • s_enhance (int) – Factor by which the Sup3rGan model will enhance the spatial +dimensions of low resolution data from file_paths input. For +example, if getting topography data, file_paths has 100km data, and +s_enhance is 4, this class will output a topography raster +corresponding to the file_paths grid enhanced 4x to ~25km

  • +
  • t_enhance (int) – Factor by which the Sup3rGan model will enhance the temporal +dimension of low resolution data from file_paths input. For +example, if getting “sza” data, file_paths has hourly data, and +t_enhance is 4, this class will output an “sza” raster +corresponding to file_paths, temporally enhanced 4x to 15 min

  • +
  • input_handler_name (str) – data handler class to use for input data. Provide a string name to +match a Rasterizer. If None +the correct handler will be guessed based on file type and time series +properties.

  • +
  • input_handler_kwargs (dict | None) – Any kwargs for initializing the input_handler_name class.

  • +
  • cache_dir (str | ‘./exo_cache’) – Directory to use for caching rasterized data.

  • +
  • chunks (str | dict) – Dictionary of dimension chunk sizes for returned exo data. e.g. +{‘time’: 100, ‘south_north’: 100, ‘west_east’: 100}. This can also just +be “auto”. This is passed to .chunk() before returning exo data +through .data attribute

  • +
  • distance_upper_bound (float | None) – Maximum distance to map high-resolution data from source_file to the +low-resolution file_paths input. None (default) will calculate this +based on the median distance between points in source_file

  • +
  • max_workers (int) – Number of workers used for writing data to cache files. Gets passed to +Cacher.write_netcdf.

  • +
  • verbose (bool) – Whether to log output as each chunk is written to cache file.

  • +
+
+
+

Override parent class to return type specific class based on +source_file

+

Methods

+
+ + + + + + + + +

get_data()

Get a raster of source values corresponding to the high-resolution grid (the file_paths input grid * s_enhance * t_enhance).

get_distance_upper_bound()

Maximum distance (float) to map high-resolution data from source_file to the low-resolution file_paths input.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

TypeSpecificClasses

cache_dir

cache_file

Get cache file name

chunks

coords

Get coords dictionary for initializing xr.Dataset.

data

Get a raster of source values corresponding to the high-resolution grid (the file_paths input grid * s_enhance * t_enhance).

distance_upper_bound

feature

file_paths

hr_lat_lon

Lat lon grid for data in format (spatial_1, spatial_2, 2) Lat/Lon array with same ordering in last dimension.

hr_shape

Get the high-resolution spatiotemporal shape

hr_time_index

Get the full time index for aggregated source data

input_handler_kwargs

input_handler_name

lr_shape

Get the low-resolution spatiotemporal shape

max_workers

nn

Get the nearest neighbor indices.

s_enhance

source_data

Get the 1D array of source data from the source_file_h5

source_file

source_handler

Get the Loader object that handles the exogenous data file.

source_lat_lon

Get the 2D array (n, 2) of lat, lon data from the source_file_h5

t_enhance

tree

Get the KDTree built on the target lat lon data from the file_paths input with s_enhance

verbose

+
+
+
+property cache_file#
+

Get cache file name

+
+
Returns:
+

cache_fp (str) – Name of cache file. This is a netcdf file which will be saved with +Cacher and loaded with +Loader

+
+
+
+ +
+
+property coords#
+

Get coords dictionary for initializing xr.Dataset.

+
+ +
+
+property data#
+

Get a raster of source values corresponding to the +high-resolution grid (the file_paths input grid * s_enhance * +t_enhance). The shape is (lats, lons, temporal, 1)

+
+ +
+
+get_data()#
+

Get a raster of source values corresponding to the +high-resolution grid (the file_paths input grid * s_enhance * +t_enhance). The shape is (lats, lons, 1)

+
+ +
+
+get_distance_upper_bound()#
+

Maximum distance (float) to map high-resolution data from +source_file to the low-resolution file_paths input.

+
+ +
+
+property hr_lat_lon#
+

Lat lon grid for data in format (spatial_1, spatial_2, 2) Lat/Lon +array with same ordering in last dimension. This corresponds to the +enhanced meta data from the file_paths input * s_enhance.

+
+
Returns:
+

ndarray

+
+
+
+ +
+
+property hr_shape#
+

Get the high-resolution spatiotemporal shape

+
+ +
+
+property hr_time_index#
+

Get the full time index for aggregated source data

+
+ +
+
+property lr_shape#
+

Get the low-resolution spatiotemporal shape

+
+ +
+
+property nn#
+

Get the nearest neighbor indices. This uses a single neighbor by +default

+
+ +
+
+abstract property source_data#
+

Get the 1D array of source data from the source_file_h5

+
+ +
+
+property source_handler#
+

Get the Loader object that handles the exogenous data file.

+
+ +
+
+property source_lat_lon#
+

Get the 2D array (n, 2) of lat, lon data from the source_file_h5

+
+ +
+
+property tree#
+

Get the KDTree built on the target lat lon data from the file_paths +input with s_enhance

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.rasterizers.exo.ExoRasterizerH5.html b/_autosummary/sup3r.preprocessing.rasterizers.exo.ExoRasterizerH5.html new file mode 100644 index 000000000..f24083f61 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.rasterizers.exo.ExoRasterizerH5.html @@ -0,0 +1,989 @@ + + + + + + + + + + + sup3r.preprocessing.rasterizers.exo.ExoRasterizerH5 — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.rasterizers.exo.ExoRasterizerH5#

+
+
+class ExoRasterizerH5(file_paths: str | None = None, source_file: str | None = None, feature: str | None = None, s_enhance: int = 1, t_enhance: int = 1, input_handler_name: str | None = None, input_handler_kwargs: dict | None = None, cache_dir: str = './exo_cache/', chunks: str | dict | None = 'auto', distance_upper_bound: int | None = None, max_workers: int = 1, verbose: bool = False)[source]#
+

Bases: BaseExoRasterizer

+

ExoRasterizer for H5 files

+

Methods

+
+ + + + + + + + +

get_data()

Get a raster of source values corresponding to the high-resolution grid (the file_paths input grid * s_enhance * t_enhance).

get_distance_upper_bound()

Maximum distance (float) to map high-resolution data from source_file to the low-resolution file_paths input.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

cache_dir

cache_file

Get cache file name

chunks

coords

Get coords dictionary for initializing xr.Dataset.

data

Get a raster of source values corresponding to the high-resolution grid (the file_paths input grid * s_enhance * t_enhance).

distance_upper_bound

feature

file_paths

hr_lat_lon

Lat lon grid for data in format (spatial_1, spatial_2, 2) Lat/Lon array with same ordering in last dimension.

hr_shape

Get the high-resolution spatiotemporal shape

hr_time_index

Get the full time index for aggregated source data

input_handler_kwargs

input_handler_name

lr_shape

Get the low-resolution spatiotemporal shape

max_workers

nn

Get the nearest neighbor indices.

s_enhance

source_data

Get the 1D array of exogenous data from the source_file_h5

source_file

source_handler

Get the Loader object that handles the exogenous data file.

source_lat_lon

Get the 2D array (n, 2) of lat, lon data from the source_file_h5

t_enhance

tree

Get the KDTree built on the target lat lon data from the file_paths input with s_enhance

verbose

+
+
+
+property source_data#
+

Get the 1D array of exogenous data from the source_file_h5

+
+ +
+
+property cache_file#
+

Get cache file name

+
+
Returns:
+

cache_fp (str) – Name of cache file. This is a netcdf file which will be saved with +Cacher and loaded with +Loader

+
+
+
+ +
+
+property coords#
+

Get coords dictionary for initializing xr.Dataset.

+
+ +
+
+property data#
+

Get a raster of source values corresponding to the +high-resolution grid (the file_paths input grid * s_enhance * +t_enhance). The shape is (lats, lons, temporal, 1)

+
+ +
+
+get_data()#
+

Get a raster of source values corresponding to the +high-resolution grid (the file_paths input grid * s_enhance * +t_enhance). The shape is (lats, lons, 1)

+
+ +
+
+get_distance_upper_bound()#
+

Maximum distance (float) to map high-resolution data from +source_file to the low-resolution file_paths input.

+
+ +
+
+property hr_lat_lon#
+

Lat lon grid for data in format (spatial_1, spatial_2, 2) Lat/Lon +array with same ordering in last dimension. This corresponds to the +enhanced meta data from the file_paths input * s_enhance.

+
+
Returns:
+

ndarray

+
+
+
+ +
+
+property hr_shape#
+

Get the high-resolution spatiotemporal shape

+
+ +
+
+property hr_time_index#
+

Get the full time index for aggregated source data

+
+ +
+
+property lr_shape#
+

Get the low-resolution spatiotemporal shape

+
+ +
+
+property nn#
+

Get the nearest neighbor indices. This uses a single neighbor by +default

+
+ +
+
+property source_handler#
+

Get the Loader object that handles the exogenous data file.

+
+ +
+
+property source_lat_lon#
+

Get the 2D array (n, 2) of lat, lon data from the source_file_h5

+
+ +
+
+property tree#
+

Get the KDTree built on the target lat lon data from the file_paths +input with s_enhance

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.rasterizers.exo.ExoRasterizerNC.html b/_autosummary/sup3r.preprocessing.rasterizers.exo.ExoRasterizerNC.html new file mode 100644 index 000000000..dfb049226 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.rasterizers.exo.ExoRasterizerNC.html @@ -0,0 +1,989 @@ + + + + + + + + + + + sup3r.preprocessing.rasterizers.exo.ExoRasterizerNC — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.rasterizers.exo.ExoRasterizerNC#

+
+
+class ExoRasterizerNC(file_paths: str | None = None, source_file: str | None = None, feature: str | None = None, s_enhance: int = 1, t_enhance: int = 1, input_handler_name: str | None = None, input_handler_kwargs: dict | None = None, cache_dir: str = './exo_cache/', chunks: str | dict | None = 'auto', distance_upper_bound: int | None = None, max_workers: int = 1, verbose: bool = False)[source]#
+

Bases: BaseExoRasterizer

+

ExoRasterizer for netCDF files

+

Methods

+
+ + + + + + + + +

get_data()

Get a raster of source values corresponding to the high-resolution grid (the file_paths input grid * s_enhance * t_enhance).

get_distance_upper_bound()

Maximum distance (float) to map high-resolution data from source_file to the low-resolution file_paths input.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

cache_dir

cache_file

Get cache file name

chunks

coords

Get coords dictionary for initializing xr.Dataset.

data

Get a raster of source values corresponding to the high-resolution grid (the file_paths input grid * s_enhance * t_enhance).

distance_upper_bound

feature

file_paths

hr_lat_lon

Lat lon grid for data in format (spatial_1, spatial_2, 2) Lat/Lon array with same ordering in last dimension.

hr_shape

Get the high-resolution spatiotemporal shape

hr_time_index

Get the full time index for aggregated source data

input_handler_kwargs

input_handler_name

lr_shape

Get the low-resolution spatiotemporal shape

max_workers

nn

Get the nearest neighbor indices.

s_enhance

source_data

Get the 1D array of exogenous data from the source_file_nc

source_file

source_handler

Get the Loader object that handles the exogenous data file.

source_lat_lon

Get the 2D array (n, 2) of lat, lon data from the source_file

t_enhance

tree

Get the KDTree built on the target lat lon data from the file_paths input with s_enhance

verbose

+
+
+
+property source_data#
+

Get the 1D array of exogenous data from the source_file_nc

+
+ +
+
+property source_lat_lon#
+

Get the 2D array (n, 2) of lat, lon data from the source_file

+
+ +
+
+property cache_file#
+

Get cache file name

+
+
Returns:
+

cache_fp (str) – Name of cache file. This is a netcdf file which will be saved with +Cacher and loaded with +Loader

+
+
+
+ +
+
+property coords#
+

Get coords dictionary for initializing xr.Dataset.

+
+ +
+
+property data#
+

Get a raster of source values corresponding to the +high-resolution grid (the file_paths input grid * s_enhance * +t_enhance). The shape is (lats, lons, temporal, 1)

+
+ +
+
+get_data()#
+

Get a raster of source values corresponding to the +high-resolution grid (the file_paths input grid * s_enhance * +t_enhance). The shape is (lats, lons, 1)

+
+ +
+
+get_distance_upper_bound()#
+

Maximum distance (float) to map high-resolution data from +source_file to the low-resolution file_paths input.

+
+ +
+
+property hr_lat_lon#
+

Lat lon grid for data in format (spatial_1, spatial_2, 2) Lat/Lon +array with same ordering in last dimension. This corresponds to the +enhanced meta data from the file_paths input * s_enhance.

+
+
Returns:
+

ndarray

+
+
+
+ +
+
+property hr_shape#
+

Get the high-resolution spatiotemporal shape

+
+ +
+
+property hr_time_index#
+

Get the full time index for aggregated source data

+
+ +
+
+property lr_shape#
+

Get the low-resolution spatiotemporal shape

+
+ +
+
+property nn#
+

Get the nearest neighbor indices. This uses a single neighbor by +default

+
+ +
+
+property source_handler#
+

Get the Loader object that handles the exogenous data file.

+
+ +
+
+property tree#
+

Get the KDTree built on the target lat lon data from the file_paths +input with s_enhance

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.rasterizers.exo.SzaRasterizer.html b/_autosummary/sup3r.preprocessing.rasterizers.exo.SzaRasterizer.html new file mode 100644 index 000000000..88807bc03 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.rasterizers.exo.SzaRasterizer.html @@ -0,0 +1,989 @@ + + + + + + + + + + + sup3r.preprocessing.rasterizers.exo.SzaRasterizer — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.rasterizers.exo.SzaRasterizer#

+
+
+class SzaRasterizer(file_paths: str | None = None, source_file: str | None = None, feature: str | None = None, s_enhance: int = 1, t_enhance: int = 1, input_handler_name: str | None = None, input_handler_kwargs: dict | None = None, cache_dir: str = './exo_cache/', chunks: str | dict | None = 'auto', distance_upper_bound: int | None = None, max_workers: int = 1, verbose: bool = False)[source]#
+

Bases: BaseExoRasterizer

+

SzaRasterizer for H5 files

+

Methods

+
+ + + + + + + + +

get_data()

Get a raster of source values corresponding to the high-res grid (the file_paths input grid * s_enhance * t_enhance).

get_distance_upper_bound()

Maximum distance (float) to map high-resolution data from source_file to the low-resolution file_paths input.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

cache_dir

cache_file

Get cache file name

chunks

coords

Get coords dictionary for initializing xr.Dataset.

data

Get a raster of source values corresponding to the high-resolution grid (the file_paths input grid * s_enhance * t_enhance).

distance_upper_bound

feature

file_paths

hr_lat_lon

Lat lon grid for data in format (spatial_1, spatial_2, 2) Lat/Lon array with same ordering in last dimension.

hr_shape

Get the high-resolution spatiotemporal shape

hr_time_index

Get the full time index for aggregated source data

input_handler_kwargs

input_handler_name

lr_shape

Get the low-resolution spatiotemporal shape

max_workers

nn

Get the nearest neighbor indices.

s_enhance

source_data

Get the 1D array of sza data from the source_file_h5

source_file

source_handler

Get the Loader object that handles the exogenous data file.

source_lat_lon

Get the 2D array (n, 2) of lat, lon data from the source_file_h5

t_enhance

tree

Get the KDTree built on the target lat lon data from the file_paths input with s_enhance

verbose

+
+
+
+property source_data#
+

Get the 1D array of sza data from the source_file_h5

+
+ +
+
+get_data()[source]#
+

Get a raster of source values corresponding to the high-res grid +(the file_paths input grid * s_enhance * t_enhance). The shape is +(lats, lons, temporal)

+
+ +
+
+property cache_file#
+

Get cache file name

+
+
Returns:
+

cache_fp (str) – Name of cache file. This is a netcdf file which will be saved with +Cacher and loaded with +Loader

+
+
+
+ +
+
+property coords#
+

Get coords dictionary for initializing xr.Dataset.

+
+ +
+
+property data#
+

Get a raster of source values corresponding to the +high-resolution grid (the file_paths input grid * s_enhance * +t_enhance). The shape is (lats, lons, temporal, 1)

+
+ +
+
+get_distance_upper_bound()#
+

Maximum distance (float) to map high-resolution data from +source_file to the low-resolution file_paths input.

+
+ +
+
+property hr_lat_lon#
+

Lat lon grid for data in format (spatial_1, spatial_2, 2) Lat/Lon +array with same ordering in last dimension. This corresponds to the +enhanced meta data from the file_paths input * s_enhance.

+
+
Returns:
+

ndarray

+
+
+
+ +
+
+property hr_shape#
+

Get the high-resolution spatiotemporal shape

+
+ +
+
+property hr_time_index#
+

Get the full time index for aggregated source data

+
+ +
+
+property lr_shape#
+

Get the low-resolution spatiotemporal shape

+
+ +
+
+property nn#
+

Get the nearest neighbor indices. This uses a single neighbor by +default

+
+ +
+
+property source_handler#
+

Get the Loader object that handles the exogenous data file.

+
+ +
+
+property source_lat_lon#
+

Get the 2D array (n, 2) of lat, lon data from the source_file_h5

+
+ +
+
+property tree#
+

Get the KDTree built on the target lat lon data from the file_paths +input with s_enhance

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.rasterizers.exo.html b/_autosummary/sup3r.preprocessing.rasterizers.exo.html new file mode 100644 index 000000000..16ef28487 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.rasterizers.exo.html @@ -0,0 +1,753 @@ + + + + + + + + + + + sup3r.preprocessing.rasterizers.exo — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.rasterizers.exo

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.rasterizers.exo#

+

Exo data rasterizers for topography and sza

+

TODO: ExoDataHandler is pretty similar to ExoRasterizer. Maybe a mixin or +subclass refactor here.

+

Classes

+
+ + + + + + + + + + + + + + + + + +

BaseExoRasterizer([file_paths, source_file, ...])

Class to extract high-res (4km+) data rasters for new spatially-enhanced datasets (e.g. GCM files after spatial enhancement) using nearest neighbor mapping and aggregation from NREL datasets (e.g. WTK or NSRDB).

ExoRasterizer(*[, file_paths, source_file, ...])

Class to extract high-res (4km+) data rasters for new spatially-enhanced datasets (e.g. GCM files after spatial enhancement) using nearest neighbor mapping and aggregation from NREL datasets (e.g. WTK or NSRDB).

ExoRasterizerH5([file_paths, source_file, ...])

ExoRasterizer for H5 files

ExoRasterizerNC([file_paths, source_file, ...])

ExoRasterizer for netCDF files

SzaRasterizer([file_paths, source_file, ...])

SzaRasterizer for H5 files

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.rasterizers.extended.Rasterizer.html b/_autosummary/sup3r.preprocessing.rasterizers.extended.Rasterizer.html new file mode 100644 index 000000000..00c5db9fe --- /dev/null +++ b/_autosummary/sup3r.preprocessing.rasterizers.extended.Rasterizer.html @@ -0,0 +1,1021 @@ + + + + + + + + + + + sup3r.preprocessing.rasterizers.extended.Rasterizer — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.rasterizers.extended.Rasterizer#

+
+
+class Rasterizer(file_paths, features='all', res_kwargs=None, chunks='auto', target=None, shape=None, time_slice=slice(None, None, None), threshold=None, raster_file=None, max_delta=20, BaseLoader=None)[source]#
+

Bases: BaseRasterizer

+

Extended Rasterizer class which also handles the flattened data format +used for some H5 files (e.g. Wind Toolkit or NSRDB data), and rasterizes +directly from file paths rather than taking a Loader as input

+
+
Parameters:
+
    +
  • file_paths (str | list | pathlib.Path) – file_paths input to LoaderClass

  • +
  • features (list | str) – Features to return in loaded dataset. If ‘all’ then all +available features will be returned.

  • +
  • res_kwargs (dict) – Additional keyword arguments passed through to the BaseLoader. +BaseLoader is usually xr.open_mfdataset for NETCDF files and +MultiFileResourceX for H5 files.

  • +
  • chunks (dict | str | None) – Dictionary of chunk sizes to pass through to +dask.array.from_array() or xr.Dataset().chunk(). Will be +converted to a tuple when used in from_array(). These are the +methods for H5 and NETCDF data, respectively. This argument can +be “auto” in additional to a dictionary. If this is None then the +data will not be chunked and instead loaded directly into memory.

  • +
  • target (tuple) – (lat, lon) lower left corner of raster. Either need +target+shape or raster_file.

  • +
  • shape (tuple) – (rows, cols) grid size. Either need target+shape or +raster_file.

  • +
  • time_slice (slice | list) – Slice specifying extent and step of temporal extraction. e.g. +slice(start, stop, step). If equal to slice(None, None, 1) the full +time dimension is selected. Can be also be a list [start, stop, +step]

  • +
  • threshold (float) – Nearest neighbor euclidean distance threshold. If the +coordinates are more than this value away from the target +lat/lon, an error is raised.

  • +
  • raster_file (str | None) – File for raster_index array for the corresponding target and shape. +If specified the raster_index will be loaded from the file if it +exists or written to the file if it does not yet exist. If None and +raster_index is not provided raster_index will be calculated +directly. Either need target+shape, raster_file, or raster_index +input.

  • +
  • max_delta (int) – Optional maximum limit on the raster shape that is retrieved at +once. If shape is (20, 20) and max_delta=10, the full raster will +be retrieved in four chunks of (10, 10). This helps adapt to +non-regular grids that curve over large distances.

  • +
  • BaseLoader (Callable) – Optional base loader method update. This is a function which +takes file_paths and **kwargs and returns an initialized +base loader with those arguments. The default for h5 is a +method which returns MultiFileWindX(file_paths, **kwargs) and +for nc the default is +xarray.open_mfdataset(file_paths, **kwargs)

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +

check_target_and_shape(full_lat_lon)

The data is assumed to use a regular grid so if either target or shape is not given we can easily find the values that give the maximum extent.

get_closest_row_col(lat_lon, target)

Get closest indices to target lat lon

get_lat_lon()

Get the 2D array of coordinates corresponding to the requested target and shape.

get_raster_index()

Get set of slices or indices selecting the requested region from the contained data.

post_init_log([args_dict])

Log additional arguments after initialization.

rasterize_data()

Get rasterized data.

save_raster_index()

Save raster index to cache file.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + +

data

Return underlying data.

grid_shape

Return the grid_shape based on the raster_index, since self._grid_shape does not need to be provided as an input if the raster_file is.

lat_lon

Get 2D grid of coordinates with target as the lower left coordinate.

shape

Get shape of underlying data.

target

Return the true value based on the closest lat lon instead of the user provided value self._target, which is used to find the closest lat lon.

time_slice

Return time slice for rasterized time period.

+
+
+
+rasterize_data()[source]#
+

Get rasterized data.

+
+ +
+
+save_raster_index()[source]#
+

Save raster index to cache file.

+
+ +
+
+get_raster_index()[source]#
+

Get set of slices or indices selecting the requested region from +the contained data.

+
+ +
+
+get_lat_lon()[source]#
+

Get the 2D array of coordinates corresponding to the requested +target and shape.

+
+ +
+
+check_target_and_shape(full_lat_lon)#
+

The data is assumed to use a regular grid so if either target or +shape is not given we can easily find the values that give the maximum +extent.

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+get_closest_row_col(lat_lon, target)#
+

Get closest indices to target lat lon

+
+
Parameters:
+
    +
  • lat_lon (ndarray) – Array of lat/lon +(spatial_1, spatial_2, 2) +Last dimension in order of (lat, lon)

  • +
  • target (tuple) – (lat, lon) for target coordinate

  • +
+
+
Returns:
+

    +
  • row (int) – row index for closest lat/lon to target lat/lon

  • +
  • col (int) – col index for closest lat/lon to target lat/lon

  • +
+

+
+
+
+ +
+
+property grid_shape#
+

Return the grid_shape based on the raster_index, since +self._grid_shape does not need to be provided as an input if the +raster_file is.

+
+ +
+
+property lat_lon#
+

Get 2D grid of coordinates with target as the lower left +coordinate. (lats, lons, 2)

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+property target#
+

Return the true value based on the closest lat lon instead of the +user provided value self._target, which is used to find the closest lat +lon.

+
+ +
+
+property time_slice#
+

Return time slice for rasterized time period.

+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.rasterizers.extended.html b/_autosummary/sup3r.preprocessing.rasterizers.extended.html new file mode 100644 index 000000000..c296de07c --- /dev/null +++ b/_autosummary/sup3r.preprocessing.rasterizers.extended.html @@ -0,0 +1,739 @@ + + + + + + + + + + + sup3r.preprocessing.rasterizers.extended — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.rasterizers.extended

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.rasterizers.extended#

+

Extended Rasterizer that can rasterize flattened data.

+

Classes

+
+ + + + + +

Rasterizer(file_paths[, features, ...])

Extended Rasterizer class which also handles the flattened data format used for some H5 files (e.g. Wind Toolkit or NSRDB data), and rasterizes directly from file paths rather than taking a Loader as input.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.rasterizers.html b/_autosummary/sup3r.preprocessing.rasterizers.html new file mode 100644 index 000000000..4062dc780 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.rasterizers.html @@ -0,0 +1,753 @@ + + + + + + + + + + + sup3r.preprocessing.rasterizers — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.rasterizers

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.rasterizers#

+

Container subclass with methods for extracting a specific spatiotemporal +extents from data. Rasterizer objects mostly operate on +Loader objects, which just load data from +files but do not do anything else to the data. Rasterizer objects are +mostly operated on by Deriver objects, +which derive new features from the data contained in Rasterizer +objects.

+
+ + + + + + + + + + + + + + +

base

Objects that can rasterize 3D spatiotemporal data.

dual

Paired rasterizer class for matching separate low_res and high_res datasets

exo

Exo data rasterizers for topography and sza

extended

Extended Rasterizer that can rasterize flattened data.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.samplers.base.Sampler.html b/_autosummary/sup3r.preprocessing.samplers.base.Sampler.html new file mode 100644 index 000000000..b4f2325e1 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.samplers.base.Sampler.html @@ -0,0 +1,998 @@ + + + + + + + + + + + sup3r.preprocessing.samplers.base.Sampler — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.samplers.base.Sampler#

+
+
+class Sampler(data, sample_shape: tuple | None = None, batch_size: int = 16, feature_sets: Dict | None = None)[source]#
+

Bases: Container

+

Basic Sampler class for iterating through batches of samples from the +contained data.

+
+
Parameters:
+
    +
  • data (Union[Sup3rX, Sup3rDataset],) – Object with data that will be sampled from. Usually the .data +attribute of various Container +objects. i.e. Loader, +Rasterizer, +Deriver, as long as the +spatial dimensions are not flattened.

  • +
  • sample_shape (tuple) – Size of arrays to sample from the contained data.

  • +
  • batch_size (int) – Number of samples to get to build a single batch. A sample of +(sample_shape[0], sample_shape[1], batch_size * +sample_shape[2]) is first selected from underlying dataset and +then reshaped into (batch_size, *sample_shape) to get a single +batch. This is more efficient than getting N = batch_size +samples and then stacking.

  • +
  • feature_sets (Optional[dict]) – Optional dictionary describing how the full set of features is +split between lr_only_features and hr_exo_features.

    +
    +
    featureslist | tuple

    List of full set of features to use for sampling. If no entry +is provided then all data_vars from data will be used.

    +
    +
    lr_only_featureslist | tuple

    List of feature names or patt*erns that should only be +included in the low-res training set and not the high-res +observations.

    +
    +
    hr_exo_featureslist | tuple

    List of feature names or patt*erns that should be included +in the high-resolution observation but not expected to be +output from the generative model. An example is high-res +topography that is to be injected mid-network.

    +
    +
    +
  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + +

get_sample_index([n_obs])

Randomly gets spatiotemporal sample index.

post_init_log([args_dict])

Log additional arguments after initialization.

preflight()

Check if the sample_shape is larger than the requested raster size

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

data

Return underlying data.

hr_exo_features

Get a list of exogenous high-resolution features that are only used for training e.g., mid-network high-res topo injection.

hr_features

Get the high-resolution features corresponding to hr_features_ind

hr_features_ind

Get the high-resolution feature channel indices that should be included for training.

hr_out_features

Get a list of high-resolution features that are intended to be output by the GAN.

hr_sample_shape

Shape of the data sample to select when __next__() is called.

lr_only_features

List of feature names or patt*erns that should only be included in the low-res training set and not the high-res observations.

sample_shape

Shape of the data sample to select when __next__() is called.

shape

Get shape of underlying data.

+
+
+
+get_sample_index(n_obs=None)[source]#
+

Randomly gets spatiotemporal sample index.

+

Notes

+

If n_obs > 1 this will get a time slice with n_obs * +self.sample_shape[2] time steps, which will then be reshaped into +n_obs samples each with self.sample_shape[2] time steps. This +is a much more efficient way of getting batches of samples but only +works if there are enough continuous time steps to sample.

+
+
Returns:
+

sample_index (tuple) – Tuple of latitude slice, longitude slice, time slice, and features. +Used to get single observation like self.data[sample_index]

+
+
+
+ +
+
+preflight()[source]#
+

Check if the sample_shape is larger than the requested raster +size

+
+ +
+
+property sample_shape: Tuple#
+

Shape of the data sample to select when __next__() is called.

+
+ +
+
+property hr_sample_shape: Tuple#
+

Shape of the data sample to select when __next__() is called. Same +as sample_shape

+
+ +
+
+property lr_only_features#
+

List of feature names or patt*erns that should only be included in +the low-res training set and not the high-res observations.

+
+ +
+
+property hr_exo_features#
+

Get a list of exogenous high-resolution features that are only used +for training e.g., mid-network high-res topo injection. These must come +at the end of the high-res feature set. These can also be input to the +model as low-res features.

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+property hr_out_features#
+

Get a list of high-resolution features that are intended to be +output by the GAN. Does not include high-resolution exogenous +features

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+
+property hr_features_ind#
+

Get the high-resolution feature channel indices that should be +included for training. Any high-resolution features that are only +included in the data handler to be coarsened for the low-res input are +removed

+
+ +
+
+property hr_features#
+

Get the high-resolution features corresponding to +hr_features_ind

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.samplers.base.html b/_autosummary/sup3r.preprocessing.samplers.base.html new file mode 100644 index 000000000..92214ac03 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.samplers.base.html @@ -0,0 +1,741 @@ + + + + + + + + + + + sup3r.preprocessing.samplers.base — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.samplers.base

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.samplers.base#

+

Basic Sampler objects. These are containers which also can sample from +the underlying data. These interface with BatchQueues so they also have +additional information about how different features are used by models.

+

Classes

+
+ + + + + +

Sampler(data[, sample_shape, batch_size, ...])

Basic Sampler class for iterating through batches of samples from the contained data.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.samplers.cc.DualSamplerCC.html b/_autosummary/sup3r.preprocessing.samplers.cc.DualSamplerCC.html new file mode 100644 index 000000000..ce4b22a33 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.samplers.cc.DualSamplerCC.html @@ -0,0 +1,1068 @@ + + + + + + + + + + + sup3r.preprocessing.samplers.cc.DualSamplerCC — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.samplers.cc.DualSamplerCC#

+
+
+class DualSamplerCC(data: Sup3rDataset, sample_shape: tuple | None = None, batch_size: int = 16, s_enhance: int = 1, t_enhance: int = 24, feature_sets: Dict | None = None)[source]#
+

Bases: DualSampler

+

Special sampling of WTK or NSRDB data for climate change applications

+
+

Note

+

This will always give daily / hourly data if t_enhance != 1. The number +of days / hours in the samples is determined by t_enhance. For example, if +t_enhance = 8 and sample_shape = (..., 24) there will be 3 days in +the low res sample: lr_sample_shape = (…, 3). If +1 < t_enhance != 24 reduce_high_res_sub_daily() will be used to +reduce a high res sample shape from +(..., sample_shape[2] * 24 // t_enhance) to (..., sample_shape[2])

+
+
+
Parameters:
+
    +
  • data (Sup3rDataset) – A Sup3rDataset instance with low-res +and high-res data members

  • +
  • sample_shape (tuple) – Size of arrays to sample from the high-res data. The sample shape +for the low-res sampler will be determined from the enhancement +factors.

  • +
  • s_enhance (int) – Spatial enhancement factor

  • +
  • t_enhance (int) – Temporal enhancement factor

  • +
  • feature_sets (Optional[dict]) – Optional dictionary describing how the full set of features is +split between lr_only_features and hr_exo_features.

    +
    +
    lr_only_featureslist | tuple

    List of feature names or patt*erns that should only be +included in the low-res training set and not the high-res +observations.

    +
    +
    hr_exo_featureslist | tuple

    List of feature names or patt*erns that should be included +in the high-resolution observation but not expected to be +output from the generative model. An example is high-res +topography that is to be injected mid-network.

    +
    +
    +
  • +
+
+
+
+

See also

+

DualSampler

+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +

check_for_consistent_shapes()

Make sure container shapes and sample shapes are compatible with enhancement factors.

get_features(feature_sets)

Return default set of features composed from data vars in low res and high res data objects or the value provided through the feature_sets dictionary.

get_middle_days(high_res, sample_shape)

Get middle chunk of high_res data that will then be reduced to day time steps.

get_sample_index([n_obs])

Get sample index for expanded hourly chunk which will be reduced to the given sample shape.

post_init_log([args_dict])

Log additional arguments after initialization.

preflight()

Check if the sample_shape is larger than the requested raster size

reduce_high_res_sub_daily(high_res[, csr_ind])

Take an hourly high-res observation and reduce the temporal axis down to lr_sample_shape[2] * t_enhance time steps, using only daylight hours on the middle part of the high res data.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

data

Return underlying data.

hr_exo_features

Get a list of exogenous high-resolution features that are only used for training e.g., mid-network high-res topo injection.

hr_features

Get the high-resolution features corresponding to hr_features_ind

hr_features_ind

Get the high-resolution feature channel indices that should be included for training.

hr_out_features

Get a list of high-resolution features that are intended to be output by the GAN.

hr_sample_shape

Shape of the data sample to select when __next__() is called.

lr_only_features

List of feature names or patt*erns that should only be included in the low-res training set and not the high-res observations.

sample_shape

Shape of the data sample to select when __next__() is called.

shape

Get shape of underlying data.

+
+
+
+check_for_consistent_shapes()[source]#
+

Make sure container shapes and sample shapes are compatible with +enhancement factors.

+
+ +
+
+reduce_high_res_sub_daily(high_res, csr_ind=0)[source]#
+

Take an hourly high-res observation and reduce the temporal axis +down to lr_sample_shape[2] * t_enhance time steps, using only daylight +hours on the middle part of the high res data.

+
+
Parameters:
+
    +
  • high_res (Union[np.ndarray, da.core.Array]) – 5D array with dimensions (n_obs, spatial_1, spatial_2, temporal, +n_features) where temporal >= 24 (set by the data handler).

  • +
  • csr_ind (int) – Feature index of clearsky_ratio. e.g. self.data[…, csr_ind] -> +cs_ratio

  • +
+
+
Returns:
+

high_res (Union[np.ndarray, da.core.Array]) – 5D array with dimensions (n_obs, spatial_1, spatial_2, temporal, +n_features) where temporal has been reduced down to the integer +lr_sample_shape[2] * t_enhance. For example if hr_sample_shape[2] +is 9 and t_enhance = 8, 72 hourly time steps will be reduced to 9 +using the center daylight 9 hours from the second day.

+
+
+
+

Note

+

This only does something when 1 < t_enhance < 24. If +t_enhance = 24 there is no need for reduction since every daily +time step will have 24 hourly time steps in the high_res batch data. +Of course, if t_enhance = 1, we are running for a spatial only +model so this routine is unnecessary.

+

*Needs review from @grantbuster

+
+
+ +
+
+static get_middle_days(high_res, sample_shape)[source]#
+

Get middle chunk of high_res data that will then be reduced to day +time steps. This has n_time_steps = 24 if sample_shape[-1] <= 24 +otherwise n_time_steps = sample_shape[-1].

+
+ +
+
+get_sample_index(n_obs=None)[source]#
+

Get sample index for expanded hourly chunk which will be reduced to +the given sample shape.

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+get_features(feature_sets)#
+

Return default set of features composed from data vars in low res +and high res data objects or the value provided through the +feature_sets dictionary.

+
+ +
+
+property hr_exo_features#
+

Get a list of exogenous high-resolution features that are only used +for training e.g., mid-network high-res topo injection. These must come +at the end of the high-res feature set. These can also be input to the +model as low-res features.

+
+ +
+
+property hr_features#
+

Get the high-resolution features corresponding to +hr_features_ind

+
+ +
+
+property hr_features_ind#
+

Get the high-resolution feature channel indices that should be +included for training. Any high-resolution features that are only +included in the data handler to be coarsened for the low-res input are +removed

+
+ +
+
+property hr_out_features#
+

Get a list of high-resolution features that are intended to be +output by the GAN. Does not include high-resolution exogenous +features

+
+ +
+
+property hr_sample_shape: Tuple#
+

Shape of the data sample to select when __next__() is called. Same +as sample_shape

+
+ +
+
+property lr_only_features#
+

List of feature names or patt*erns that should only be included in +the low-res training set and not the high-res observations.

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+preflight()#
+

Check if the sample_shape is larger than the requested raster +size

+
+ +
+
+property sample_shape: Tuple#
+

Shape of the data sample to select when __next__() is called.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.samplers.cc.html b/_autosummary/sup3r.preprocessing.samplers.cc.html new file mode 100644 index 000000000..d27ba3fb6 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.samplers.cc.html @@ -0,0 +1,739 @@ + + + + + + + + + + + sup3r.preprocessing.samplers.cc — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.samplers.cc

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.samplers.cc#

+

Sampler for climate change applications.

+

Classes

+
+ + + + + +

DualSamplerCC(data[, sample_shape, ...])

Special sampling of WTK or NSRDB data for climate change applications

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.samplers.dc.SamplerDC.html b/_autosummary/sup3r.preprocessing.samplers.dc.SamplerDC.html new file mode 100644 index 000000000..84a7e337d --- /dev/null +++ b/_autosummary/sup3r.preprocessing.samplers.dc.SamplerDC.html @@ -0,0 +1,995 @@ + + + + + + + + + + + sup3r.preprocessing.samplers.dc.SamplerDC — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.samplers.dc.SamplerDC#

+
+
+class SamplerDC(data: Sup3rX | Sup3rDataset, sample_shape: tuple | None = None, batch_size: int = 16, feature_sets: Dict | None = None, spatial_weights: ndarray | Array | List | None = None, temporal_weights: ndarray | Array | List | None = None)[source]#
+

Bases: Sampler

+

DataCentric Sampler class used for sampling based on weights which can +be updated during training.

+
+
Parameters:
+
    +
  • data (Union[Sup3rX, Sup3rDataset],) – Object with data that will be sampled from. Usually the .data +attribute of various Container objects. i.e. +Loader, Rasterizer, Deriver, as long as +the spatial dimensions are not flattened.

  • +
  • sample_shape (tuple) – Size of arrays to sample from the contained data.

  • +
  • batch_size (int) – Number of samples to get to build a single batch. A sample of +(sample_shape[0], sample_shape[1], batch_size * sample_shape[2]) +is first selected from underlying dataset and then reshaped into +(batch_size, *sample_shape) to get a single batch. This is more +efficient than getting N = batch_size samples and then stacking.

  • +
  • feature_sets (Optional[dict]) – Optional dictionary describing how the full set of features is +split between lr_only_features and hr_exo_features. See +Sampler

  • +
  • spatial_weights (Union[np.ndarray, da.core.Array] | List | None) – Set of weights used to initialize the spatial sampling. e.g. If we +want to start off sampling across 2 spatial bins evenly this should +be [0.5, 0.5]. During training these weights will be updated based +only performance across the bins associated with these weights.

  • +
  • temporal_weights (Union[np.ndarray, da.core.Array] | List | None) – Set of weights used to initialize the temporal sampling. e.g. If we +want to start off sampling only the first season of the year this +should be [1, 0, 0, 0]. During training these weights will be +updated based only performance across the bins associated with +these weights.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + +

get_sample_index([n_obs])

Randomly gets weighted spatial sample and time sample indices

post_init_log([args_dict])

Log additional arguments after initialization.

preflight()

Check if the sample_shape is larger than the requested raster size

update_weights(spatial_weights, temporal_weights)

Update spatial and temporal sampling weights.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

data

Return underlying data.

hr_exo_features

Get a list of exogenous high-resolution features that are only used for training e.g., mid-network high-res topo injection.

hr_features

Get the high-resolution features corresponding to hr_features_ind

hr_features_ind

Get the high-resolution feature channel indices that should be included for training.

hr_out_features

Get a list of high-resolution features that are intended to be output by the GAN.

hr_sample_shape

Shape of the data sample to select when __next__() is called.

lr_only_features

List of feature names or patt*erns that should only be included in the low-res training set and not the high-res observations.

sample_shape

Shape of the data sample to select when __next__() is called.

shape

Get shape of underlying data.

+
+
+
+update_weights(spatial_weights, temporal_weights)[source]#
+

Update spatial and temporal sampling weights.

+
+ +
+
+get_sample_index(n_obs=None)[source]#
+

Randomly gets weighted spatial sample and time sample indices

+
+
Returns:
+

observation_index (tuple) – Tuple of sampled spatial grid, time slice, and features indices. +Used to get single observation like self.data[observation_index]

+
+
+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+property hr_exo_features#
+

Get a list of exogenous high-resolution features that are only used +for training e.g., mid-network high-res topo injection. These must come +at the end of the high-res feature set. These can also be input to the +model as low-res features.

+
+ +
+
+property hr_features#
+

Get the high-resolution features corresponding to +hr_features_ind

+
+ +
+
+property hr_features_ind#
+

Get the high-resolution feature channel indices that should be +included for training. Any high-resolution features that are only +included in the data handler to be coarsened for the low-res input are +removed

+
+ +
+
+property hr_out_features#
+

Get a list of high-resolution features that are intended to be +output by the GAN. Does not include high-resolution exogenous +features

+
+ +
+
+property hr_sample_shape: Tuple#
+

Shape of the data sample to select when __next__() is called. Same +as sample_shape

+
+ +
+
+property lr_only_features#
+

List of feature names or patt*erns that should only be included in +the low-res training set and not the high-res observations.

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+preflight()#
+

Check if the sample_shape is larger than the requested raster +size

+
+ +
+
+property sample_shape: Tuple#
+

Shape of the data sample to select when __next__() is called.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.samplers.dc.html b/_autosummary/sup3r.preprocessing.samplers.dc.html new file mode 100644 index 000000000..6ddb70fd3 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.samplers.dc.html @@ -0,0 +1,740 @@ + + + + + + + + + + + sup3r.preprocessing.samplers.dc — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.samplers.dc

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.samplers.dc#

+

Data centric sampler. This samples container data according to weights +which are updated during training based on performance of the model.

+

Classes

+
+ + + + + +

SamplerDC(data[, sample_shape, batch_size, ...])

DataCentric Sampler class used for sampling based on weights which can be updated during training.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.samplers.dual.DualSampler.html b/_autosummary/sup3r.preprocessing.samplers.dual.DualSampler.html new file mode 100644 index 000000000..0144c1c29 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.samplers.dual.DualSampler.html @@ -0,0 +1,1004 @@ + + + + + + + + + + + sup3r.preprocessing.samplers.dual.DualSampler — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.preprocessing.samplers.dual.DualSampler#

+
+
+class DualSampler(data: Sup3rDataset, sample_shape: tuple | None = None, batch_size: int = 16, s_enhance: int = 1, t_enhance: int = 1, feature_sets: Dict | None = None)[source]#
+

Bases: Sampler

+

Sampler for sampling from paired (or dual) datasets. Pairs consist of +low and high resolution data, which are contained by a Sup3rDataset.

+
+
Parameters:
+
    +
  • data (Sup3rDataset) – A Sup3rDataset instance with +low-res and high-res data members

  • +
  • sample_shape (tuple) – Size of arrays to sample from the high-res data. The sample shape +for the low-res sampler will be determined from the enhancement +factors.

  • +
  • s_enhance (int) – Spatial enhancement factor

  • +
  • t_enhance (int) – Temporal enhancement factor

  • +
  • feature_sets (Optional[dict]) – Optional dictionary describing how the full set of features is +split between lr_only_features and hr_exo_features.

    +
    +
    lr_only_featureslist | tuple

    List of feature names or patt*erns that should only be +included in the low-res training set and not the high-res +observations.

    +
    +
    hr_exo_featureslist | tuple

    List of feature names or patt*erns that should be included +in the high-resolution observation but not expected to be +output from the generative model. An example is high-res +topography that is to be injected mid-network.

    +
    +
    +
  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + +

check_for_consistent_shapes()

Make sure container shapes are compatible with enhancement factors.

get_features(feature_sets)

Return default set of features composed from data vars in low res and high res data objects or the value provided through the feature_sets dictionary.

get_sample_index([n_obs])

Get paired sample index, consisting of index for the low res sample and the index for the high res sample with the same spatiotemporal extent.

post_init_log([args_dict])

Log additional arguments after initialization.

preflight()

Check if the sample_shape is larger than the requested raster size

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

data

Return underlying data.

hr_exo_features

Get a list of exogenous high-resolution features that are only used for training e.g., mid-network high-res topo injection.

hr_features

Get the high-resolution features corresponding to hr_features_ind

hr_features_ind

Get the high-resolution feature channel indices that should be included for training.

hr_out_features

Get a list of high-resolution features that are intended to be output by the GAN.

hr_sample_shape

Shape of the data sample to select when __next__() is called.

lr_only_features

List of feature names or patt*erns that should only be included in the low-res training set and not the high-res observations.

sample_shape

Shape of the data sample to select when __next__() is called.

shape

Get shape of underlying data.

+
+
+
+property hr_sample_shape: Tuple#
+

Shape of the data sample to select when __next__() is called. Same +as sample_shape

+
+ +
+
+get_features(feature_sets)[source]#
+

Return default set of features composed from data vars in low res +and high res data objects or the value provided through the +feature_sets dictionary.

+
+ +
+
+check_for_consistent_shapes()[source]#
+

Make sure container shapes are compatible with enhancement +factors.

+
+ +
+
+get_sample_index(n_obs=None)[source]#
+

Get paired sample index, consisting of index for the low res sample +and the index for the high res sample with the same spatiotemporal +extent.

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+property hr_exo_features#
+

Get a list of exogenous high-resolution features that are only used +for training e.g., mid-network high-res topo injection. These must come +at the end of the high-res feature set. These can also be input to the +model as low-res features.

+
+ +
+
+property hr_features#
+

Get the high-resolution features corresponding to +hr_features_ind

+
+ +
+
+property hr_features_ind#
+

Get the high-resolution feature channel indices that should be +included for training. Any high-resolution features that are only +included in the data handler to be coarsened for the low-res input are +removed

+
+ +
+
+property hr_out_features#
+

Get a list of high-resolution features that are intended to be +output by the GAN. Does not include high-resolution exogenous +features

+
+ +
+
+property lr_only_features#
+

List of feature names or patt*erns that should only be included in +the low-res training set and not the high-res observations.

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+preflight()#
+

Check if the sample_shape is larger than the requested raster +size

+
+ +
+
+property sample_shape: Tuple#
+

Shape of the data sample to select when __next__() is called.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.samplers.dual.html b/_autosummary/sup3r.preprocessing.samplers.dual.html new file mode 100644 index 000000000..f874095c5 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.samplers.dual.html @@ -0,0 +1,741 @@ + + + + + + + + + + + sup3r.preprocessing.samplers.dual — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.samplers.dual

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.samplers.dual#

+

Dual Sampler objects. These are used to sample from paired datasets with +low and high resolution data. These paired datasets are contained in a +Sup3rDataset object.

+

Classes

+
+ + + + + +

DualSampler(data[, sample_shape, ...])

Sampler for sampling from paired (or dual) datasets.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.samplers.html b/_autosummary/sup3r.preprocessing.samplers.html new file mode 100644 index 000000000..93ae2d3fe --- /dev/null +++ b/_autosummary/sup3r.preprocessing.samplers.html @@ -0,0 +1,753 @@ + + + + + + + + + + + sup3r.preprocessing.samplers — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.samplers

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.samplers#

+

Container subclass with methods for sampling contained data.

+

TODO: With lazy loading / delayed calculations we could coarsen data prior to +sampling. This would allow us to use dual samplers for all cases, instead of +just for special paired datasets. This would be a nice unification.

+
+ + + + + + + + + + + + + + + + + +

base

Basic Sampler objects.

cc

Sampler for climate change applications.

dc

Data centric sampler.

dual

Dual Sampler objects.

utilities

Miscellaneous utilities for sampling

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.samplers.utilities.daily_time_sampler.html b/_autosummary/sup3r.preprocessing.samplers.utilities.daily_time_sampler.html new file mode 100644 index 000000000..45d9ed39a --- /dev/null +++ b/_autosummary/sup3r.preprocessing.samplers.utilities.daily_time_sampler.html @@ -0,0 +1,775 @@ + + + + + + + + + + + sup3r.preprocessing.samplers.utilities.daily_time_sampler — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.samplers.utilities.daily_time_sampler

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.samplers.utilities.daily_time_sampler#

+
+
+daily_time_sampler(data, shape, time_index)[source]#
+

Finds a random temporal slice from data starting at midnight

+
+
Parameters:
+
    +
  • data (Union[np.ndarray, da.core.Array]) – Data array with dimensions +(spatial_1, spatial_2, temporal, features)

  • +
  • shape (int) – (time_steps) Size of time slice to sample from data, must be an integer +less than or equal to 24.

  • +
  • time_index (pd.DatetimeIndex) – Time index that matches the data axis=2

  • +
+
+
Returns:
+

slice (slice) – time slice with size shape of data starting at the beginning of the day

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.samplers.utilities.html b/_autosummary/sup3r.preprocessing.samplers.utilities.html new file mode 100644 index 000000000..0c3161c87 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.samplers.utilities.html @@ -0,0 +1,757 @@ + + + + + + + + + + + sup3r.preprocessing.samplers.utilities — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.samplers.utilities

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.samplers.utilities#

+

Miscellaneous utilities for sampling

+

Functions

+
+ + + + + + + + + + + + + + + + + + + + + + + +

daily_time_sampler(data, shape, time_index)

Finds a random temporal slice from data starting at midnight

nsrdb_reduce_daily_data(data, shape[, csr_ind])

Takes a 5D array and reduces the axis=3 temporal dim to daylight hours.

nsrdb_sub_daily_sampler(data, shape[, ...])

Finds a random sample during daylight hours of a day.

uniform_box_sampler(data_shape, sample_shape)

Returns a 2D spatial slice used to extract a sample from a data array.

uniform_time_sampler(data_shape, sample_shape)

Returns temporal slice used to extract temporal chunk from data.

weighted_box_sampler(data_shape, ...)

Extracts a temporal slice from data with selection weighted based on provided weights

weighted_time_sampler(data_shape, ...)

Returns a temporal slice with selection weighted based on provided weights used to extract temporal chunk from data

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.samplers.utilities.nsrdb_reduce_daily_data.html b/_autosummary/sup3r.preprocessing.samplers.utilities.nsrdb_reduce_daily_data.html new file mode 100644 index 000000000..ac70c7e2f --- /dev/null +++ b/_autosummary/sup3r.preprocessing.samplers.utilities.nsrdb_reduce_daily_data.html @@ -0,0 +1,778 @@ + + + + + + + + + + + sup3r.preprocessing.samplers.utilities.nsrdb_reduce_daily_data — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.samplers.utilities.nsrdb_reduce_daily_data

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.samplers.utilities.nsrdb_reduce_daily_data#

+
+
+nsrdb_reduce_daily_data(data, shape, csr_ind=0)[source]#
+

Takes a 5D array and reduces the axis=3 temporal dim to daylight hours.

+
+
Parameters:
+
    +
  • data (Union[np.ndarray, da.core.Array]) – 5D data array, where […, csr_ind] is assumed to be clearsky ratio +with NaN at night. +(n_obs, spatial_1, spatial_2, temporal, features)

  • +
  • shape (int) – (time_steps) Size of time slice to sample from data. If this is +greater than data.shape[-2] data won’t be reduced.

  • +
  • csr_ind (int) – Index of the feature axis where clearsky ratio is located and NaN’s can +be found at night.

  • +
+
+
Returns:
+

data (Union[np.ndarray, da.core.Array]) – Same as input but with axis=3 reduced to dailylight hours with +requested shape.

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.samplers.utilities.nsrdb_sub_daily_sampler.html b/_autosummary/sup3r.preprocessing.samplers.utilities.nsrdb_sub_daily_sampler.html new file mode 100644 index 000000000..d7fbbb84b --- /dev/null +++ b/_autosummary/sup3r.preprocessing.samplers.utilities.nsrdb_sub_daily_sampler.html @@ -0,0 +1,777 @@ + + + + + + + + + + + sup3r.preprocessing.samplers.utilities.nsrdb_sub_daily_sampler — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.samplers.utilities.nsrdb_sub_daily_sampler

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.samplers.utilities.nsrdb_sub_daily_sampler#

+
+
+nsrdb_sub_daily_sampler(data, shape, time_index=None)[source]#
+

Finds a random sample during daylight hours of a day. Nightime is +assumed to be marked as NaN in feature axis == csr_ind in the data input.

+
+
Parameters:
+
    +
  • data (Union[Sup3rX, Sup3rDataset]) – Dataset object with ‘clearsky_ratio’ accessible as +data[‘clearsky_ratio’] (spatial_1, spatial_2, temporal, features)

  • +
  • shape (int) – (time_steps) Size of time slice to sample from data, must be an integer +less than or equal to 24.

  • +
  • time_index (pd.DatetimeIndex) – Time index corresponding the the time axis of data. If None then +data.time_index will be used.

  • +
+
+
Returns:
+

tslice (slice) – time slice with size shape of data starting at the beginning of the day

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.samplers.utilities.uniform_box_sampler.html b/_autosummary/sup3r.preprocessing.samplers.utilities.uniform_box_sampler.html new file mode 100644 index 000000000..abb4b2a4a --- /dev/null +++ b/_autosummary/sup3r.preprocessing.samplers.utilities.uniform_box_sampler.html @@ -0,0 +1,772 @@ + + + + + + + + + + + sup3r.preprocessing.samplers.utilities.uniform_box_sampler — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.samplers.utilities.uniform_box_sampler

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.samplers.utilities.uniform_box_sampler#

+
+
+uniform_box_sampler(data_shape, sample_shape)[source]#
+

Returns a 2D spatial slice used to extract a sample from a data array.

+
+
Parameters:
+
    +
  • data_shape (tuple) – (rows, cols) Size of full grid available for sampling

  • +
  • sample_shape (tuple) – (rows, cols) Size of grid to sample from data

  • +
+
+
Returns:
+

slices (list) – List of slices corresponding to row and col extent of arr sample

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.samplers.utilities.uniform_time_sampler.html b/_autosummary/sup3r.preprocessing.samplers.utilities.uniform_time_sampler.html new file mode 100644 index 000000000..28287b47d --- /dev/null +++ b/_autosummary/sup3r.preprocessing.samplers.utilities.uniform_time_sampler.html @@ -0,0 +1,774 @@ + + + + + + + + + + + sup3r.preprocessing.samplers.utilities.uniform_time_sampler — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.samplers.utilities.uniform_time_sampler

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.samplers.utilities.uniform_time_sampler#

+
+
+uniform_time_sampler(data_shape, sample_shape, crop_slice=slice(None, None, None))[source]#
+

Returns temporal slice used to extract temporal chunk from data.

+
+
Parameters:
+
    +
  • data_shape (tuple) – (rows, cols, n_steps) Size of full spatiotemporal data grid available +for sampling

  • +
  • sample_shape (int) – (time_steps) Size of time slice to sample from data grid

  • +
  • crop_slice (slice) – Optional slice used to restrict the sampling window.

  • +
+
+
Returns:
+

slice (slice) – time slice with size shape

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.samplers.utilities.weighted_box_sampler.html b/_autosummary/sup3r.preprocessing.samplers.utilities.weighted_box_sampler.html new file mode 100644 index 000000000..c0467308e --- /dev/null +++ b/_autosummary/sup3r.preprocessing.samplers.utilities.weighted_box_sampler.html @@ -0,0 +1,777 @@ + + + + + + + + + + + sup3r.preprocessing.samplers.utilities.weighted_box_sampler — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.samplers.utilities.weighted_box_sampler

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.samplers.utilities.weighted_box_sampler#

+
+
+weighted_box_sampler(data_shape, sample_shape, weights)[source]#
+

Extracts a temporal slice from data with selection weighted based on +provided weights

+
+
Parameters:
+
    +
  • data_shape (tuple) – (rows, cols) Size of full spatial grid available for sampling

  • +
  • sample_shape (tuple) – (rows, cols) Size of grid to sample from data

  • +
  • weights (ndarray) – Array of weights used to specify selection strategy. e.g. If weights is +[0.2, 0.4, 0.1, 0.3] then the upper left quadrant of the spatial +domain will be sampled 20 percent of the time, the upper right quadrant +will be sampled 40 percent of the time, etc.

  • +
+
+
Returns:
+

slices (list) – List of spatial slices [spatial_1, spatial_2]

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.samplers.utilities.weighted_time_sampler.html b/_autosummary/sup3r.preprocessing.samplers.utilities.weighted_time_sampler.html new file mode 100644 index 000000000..646d96d11 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.samplers.utilities.weighted_time_sampler.html @@ -0,0 +1,778 @@ + + + + + + + + + + + sup3r.preprocessing.samplers.utilities.weighted_time_sampler — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.samplers.utilities.weighted_time_sampler

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.samplers.utilities.weighted_time_sampler#

+
+
+weighted_time_sampler(data_shape, sample_shape, weights)[source]#
+

Returns a temporal slice with selection weighted based on +provided weights used to extract temporal chunk from data

+
+
Parameters:
+
    +
  • data_shape (tuple) – (rows, cols, n_steps) Size of full spatiotemporal data grid available +for sampling

  • +
  • shape (tuple) – (time_steps) Size of time slice to sample from data

  • +
  • weights (list) – List of weights used to specify selection strategy. e.g. If weights +is [0.2, 0.8] then the start of the temporal slice will be selected +from the first half of the temporal extent with 0.8 probability and +0.2 probability for the second half.

  • +
+
+
Returns:
+

slice (slice) – time slice with size shape

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.utilities.check_signatures.html b/_autosummary/sup3r.preprocessing.utilities.check_signatures.html new file mode 100644 index 000000000..a8f6e137c --- /dev/null +++ b/_autosummary/sup3r.preprocessing.utilities.check_signatures.html @@ -0,0 +1,761 @@ + + + + + + + + + + + sup3r.preprocessing.utilities.check_signatures — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.utilities.check_signatures

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.utilities.check_signatures#

+
+
+check_signatures(objs, skip_params=None)[source]#
+

Make sure signatures of objects can be parsed for required arguments.

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.utilities.composite_info.html b/_autosummary/sup3r.preprocessing.utilities.composite_info.html new file mode 100644 index 000000000..f997c92de --- /dev/null +++ b/_autosummary/sup3r.preprocessing.utilities.composite_info.html @@ -0,0 +1,761 @@ + + + + + + + + + + + sup3r.preprocessing.utilities.composite_info — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.utilities.composite_info

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.utilities.composite_info#

+
+
+composite_info(objs, skip_params=None)[source]#
+

Get composite signature and doc string for given set of objects.

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.utilities.composite_sig.html b/_autosummary/sup3r.preprocessing.utilities.composite_sig.html new file mode 100644 index 000000000..3662c438d --- /dev/null +++ b/_autosummary/sup3r.preprocessing.utilities.composite_sig.html @@ -0,0 +1,761 @@ + + + + + + + + + + + sup3r.preprocessing.utilities.composite_sig — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.utilities.composite_sig

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.utilities.composite_sig#

+
+
+composite_sig(docs: CommandDocumentation)[source]#
+

Get composite signature from command documentation instance.

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.utilities.compute_if_dask.html b/_autosummary/sup3r.preprocessing.utilities.compute_if_dask.html new file mode 100644 index 000000000..06469158b --- /dev/null +++ b/_autosummary/sup3r.preprocessing.utilities.compute_if_dask.html @@ -0,0 +1,762 @@ + + + + + + + + + + + sup3r.preprocessing.utilities.compute_if_dask — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.utilities.compute_if_dask

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.utilities.compute_if_dask#

+
+
+compute_if_dask(arr)[source]#
+

Apply compute method to input if it consists of a dask array or slice +with dask elements.

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.utilities.contains_ellipsis.html b/_autosummary/sup3r.preprocessing.utilities.contains_ellipsis.html new file mode 100644 index 000000000..c2bc23f41 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.utilities.contains_ellipsis.html @@ -0,0 +1,762 @@ + + + + + + + + + + + sup3r.preprocessing.utilities.contains_ellipsis — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.utilities.contains_ellipsis

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.utilities.contains_ellipsis#

+
+
+contains_ellipsis(vals)[source]#
+

Check if vals contain an ellipse. This is used to correctly parse keys +for Sup3rX.__getitem__

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.utilities.dims_array_tuple.html b/_autosummary/sup3r.preprocessing.utilities.dims_array_tuple.html new file mode 100644 index 000000000..bd6912782 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.utilities.dims_array_tuple.html @@ -0,0 +1,763 @@ + + + + + + + + + + + sup3r.preprocessing.utilities.dims_array_tuple — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.utilities.dims_array_tuple

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.utilities.dims_array_tuple#

+
+
+dims_array_tuple(arr)[source]#
+

Return a tuple of (dims, array) with dims equal to the ordered slice +of Dimension.order() with the same len as arr.shape. This is used to set +xr.Dataset entries. e.g. dset[var] = (dims, array)

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.utilities.expand_paths.html b/_autosummary/sup3r.preprocessing.utilities.expand_paths.html new file mode 100644 index 000000000..d61b3891c --- /dev/null +++ b/_autosummary/sup3r.preprocessing.utilities.expand_paths.html @@ -0,0 +1,780 @@ + + + + + + + + + + + sup3r.preprocessing.utilities.expand_paths — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.utilities.expand_paths

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.utilities.expand_paths#

+
+
+expand_paths(fps)[source]#
+

Expand path(s)

+
+

Parameter#

+
+
fpsstr or pathlib.Path or any Sequence of those

One or multiple paths to file

+
+
+
+
returns:
+

list[str] – A list of expanded unique and sorted paths as str

+
+
+

Examples

+
>>> expand_paths("myfile.h5")
+
+
+
>>> expand_paths(["myfile.h5", "*.hdf"])
+
+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.utilities.get_class_kwargs.html b/_autosummary/sup3r.preprocessing.utilities.get_class_kwargs.html new file mode 100644 index 000000000..400c52cd3 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.utilities.get_class_kwargs.html @@ -0,0 +1,761 @@ + + + + + + + + + + + sup3r.preprocessing.utilities.get_class_kwargs — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.utilities.get_class_kwargs

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.utilities.get_class_kwargs#

+
+
+get_class_kwargs(obj, kwargs)[source]#
+

Get kwargs which match obj signature.

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.utilities.get_date_range_kwargs.html b/_autosummary/sup3r.preprocessing.utilities.get_date_range_kwargs.html new file mode 100644 index 000000000..c82cf5af8 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.utilities.get_date_range_kwargs.html @@ -0,0 +1,772 @@ + + + + + + + + + + + sup3r.preprocessing.utilities.get_date_range_kwargs — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.utilities.get_date_range_kwargs

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.utilities.get_date_range_kwargs#

+
+
+get_date_range_kwargs(time_index)[source]#
+

Get kwargs for pd.date_range from a DatetimeIndex. This is used to +provide a concise time_index representation which can be passed through +the cli and avoid logging lengthly time indices.

+
+
Parameters:
+

time_index (pd.DatetimeIndex) – Output time index.

+
+
Returns:
+

kwargs (dict) – Dictionary to pass to pd.date_range(). Can also include kwarg +drop_leap

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.utilities.get_input_handler_class.html b/_autosummary/sup3r.preprocessing.utilities.get_input_handler_class.html new file mode 100644 index 000000000..ac915cc6a --- /dev/null +++ b/_autosummary/sup3r.preprocessing.utilities.get_input_handler_class.html @@ -0,0 +1,774 @@ + + + + + + + + + + + sup3r.preprocessing.utilities.get_input_handler_class — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.utilities.get_input_handler_class

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.utilities.get_input_handler_class#

+
+
+get_input_handler_class(input_handler_name: str | None = None)[source]#
+

Get the DataHandler or +Rasterizer object.

+
+
Parameters:
+

input_handler_name (str) – Class to use for input data. Provide a string name to match a class in +sup3r.preprocessing. If None this will return +Rasterizer, which uses +LoaderNC or LoaderH5 depending on file type. This is a simple +handler object which does not derive new features from raw data.

+
+
Returns:
+

HandlerClass (Rasterizer | DataHandler) – DataHandler or Rasterizer class from sup3r.preprocessing.

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.utilities.get_obj_params.html b/_autosummary/sup3r.preprocessing.utilities.get_obj_params.html new file mode 100644 index 000000000..db6441897 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.utilities.get_obj_params.html @@ -0,0 +1,761 @@ + + + + + + + + + + + sup3r.preprocessing.utilities.get_obj_params — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.utilities.get_obj_params

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.utilities.get_obj_params#

+
+
+get_obj_params(obj)[source]#
+

Get available signature parameters for obj and obj bases

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.utilities.get_source_type.html b/_autosummary/sup3r.preprocessing.utilities.get_source_type.html new file mode 100644 index 000000000..50d01ecee --- /dev/null +++ b/_autosummary/sup3r.preprocessing.utilities.get_source_type.html @@ -0,0 +1,769 @@ + + + + + + + + + + + sup3r.preprocessing.utilities.get_source_type — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.utilities.get_source_type

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.utilities.get_source_type#

+
+
+get_source_type(file_paths)[source]#
+

Get data source type

+
+
Parameters:
+

file_paths (list | str) – One or more paths to data files, can include a unix-style pat*tern

+
+
Returns:
+

source_type (str) – Either “h5” or “nc”

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.utilities.html b/_autosummary/sup3r.preprocessing.utilities.html new file mode 100644 index 000000000..7254cc043 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.utilities.html @@ -0,0 +1,808 @@ + + + + + + + + + + + sup3r.preprocessing.utilities — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.utilities

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.utilities#

+

Methods used across container objects.

+

Functions

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_signatures(objs[, skip_params])

Make sure signatures of objects can be parsed for required arguments.

composite_info(objs[, skip_params])

Get composite signature and doc string for given set of objects.

composite_sig(docs)

Get composite signature from command documentation instance.

compute_if_dask(arr)

Apply compute method to input if it consists of a dask array or slice with dask elements.

contains_ellipsis(vals)

Check if vals contain an ellipse.

dims_array_tuple(arr)

Return a tuple of (dims, array) with dims equal to the ordered slice of Dimension.order() with the same len as arr.shape.

expand_paths(fps)

Expand path(s)

get_class_kwargs(obj, kwargs)

Get kwargs which match obj signature.

get_date_range_kwargs(time_index)

Get kwargs for pd.date_range from a DatetimeIndex.

get_input_handler_class([input_handler_name])

Get the DataHandler or Rasterizer object.

get_obj_params(obj)

Get available signature parameters for obj and obj bases

get_source_type(file_paths)

Get data source type

is_type_of(vals, vtype)

Check if vals is an instance of type or group of that type.

log_args(func)

Decorator to log annotations and args.

lower_names(data)

Set all fields / coords / dims to lower case.

lowered(features)

Return a lower case version of the given str or list of strings.

make_time_index_from_kws(date_range_kwargs)

Function to make a pandas DatetimeIndex from the get_date_range_kwargs outputs

numpy_if_tensor(arr)

Cast array to numpy array if it is a tensor.

ordered_array(data)

Transpose arrays so they have a (space, time, ...) or (space, time, ..., feature) ordering.

ordered_dims(dims)

Return the order of dims that follows the ordering of Dimension.order() for the common dim names.

parse_ellipsis(vals, dim_num)

Replace ellipsis with N slices where N is dim_num - len(vals) + 1

parse_features([features, data])

Parse possible inputs for features (list, str, None, 'all').

parse_keys(keys[, default_coords, ...])

Return set of features and slices for all dimensions contained in dataset that can be passed to isel and transposed to standard dimension order.

parse_to_list([features, data])

Parse features and return as a list, even if features is a string.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.utilities.is_type_of.html b/_autosummary/sup3r.preprocessing.utilities.is_type_of.html new file mode 100644 index 000000000..c4792f004 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.utilities.is_type_of.html @@ -0,0 +1,761 @@ + + + + + + + + + + + sup3r.preprocessing.utilities.is_type_of — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.utilities.is_type_of

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.utilities.is_type_of#

+
+
+is_type_of(vals, vtype)[source]#
+

Check if vals is an instance of type or group of that type.

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.utilities.log_args.html b/_autosummary/sup3r.preprocessing.utilities.log_args.html new file mode 100644 index 000000000..a69c07043 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.utilities.log_args.html @@ -0,0 +1,762 @@ + + + + + + + + + + + sup3r.preprocessing.utilities.log_args — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.utilities.log_args

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.utilities.log_args#

+
+
+log_args(func)[source]#
+

Decorator to log annotations and args. This can be used to wrap __init__ +methods so we need to pass through the signature and docs

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.utilities.lower_names.html b/_autosummary/sup3r.preprocessing.utilities.lower_names.html new file mode 100644 index 000000000..d2362b360 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.utilities.lower_names.html @@ -0,0 +1,761 @@ + + + + + + + + + + + sup3r.preprocessing.utilities.lower_names — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.utilities.lower_names

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.utilities.lower_names#

+
+
+lower_names(data)[source]#
+

Set all fields / coords / dims to lower case.

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.utilities.lowered.html b/_autosummary/sup3r.preprocessing.utilities.lowered.html new file mode 100644 index 000000000..bc9a3fc31 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.utilities.lowered.html @@ -0,0 +1,762 @@ + + + + + + + + + + + sup3r.preprocessing.utilities.lowered — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.utilities.lowered

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.utilities.lowered#

+
+
+lowered(features)[source]#
+

Return a lower case version of the given str or list of strings. Used to +standardize storage and lookup of features.

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.utilities.make_time_index_from_kws.html b/_autosummary/sup3r.preprocessing.utilities.make_time_index_from_kws.html new file mode 100644 index 000000000..ae21eab29 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.utilities.make_time_index_from_kws.html @@ -0,0 +1,771 @@ + + + + + + + + + + + sup3r.preprocessing.utilities.make_time_index_from_kws — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.utilities.make_time_index_from_kws

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.utilities.make_time_index_from_kws#

+
+
+make_time_index_from_kws(date_range_kwargs)[source]#
+

Function to make a pandas DatetimeIndex from the +get_date_range_kwargs outputs

+
+
Parameters:
+

date_range_kwargs (dict) – Dictionary to pass to pd.date_range(), typically produced from +get_date_range_kwargs(). Can also include kwarg drop_leap

+
+
Returns:
+

time_index (pd.DatetimeIndex) – Output time index.

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.utilities.numpy_if_tensor.html b/_autosummary/sup3r.preprocessing.utilities.numpy_if_tensor.html new file mode 100644 index 000000000..b8c66766b --- /dev/null +++ b/_autosummary/sup3r.preprocessing.utilities.numpy_if_tensor.html @@ -0,0 +1,761 @@ + + + + + + + + + + + sup3r.preprocessing.utilities.numpy_if_tensor — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.utilities.numpy_if_tensor

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.utilities.numpy_if_tensor#

+
+
+numpy_if_tensor(arr)[source]#
+

Cast array to numpy array if it is a tensor.

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.utilities.ordered_array.html b/_autosummary/sup3r.preprocessing.utilities.ordered_array.html new file mode 100644 index 000000000..1286606fc --- /dev/null +++ b/_autosummary/sup3r.preprocessing.utilities.ordered_array.html @@ -0,0 +1,767 @@ + + + + + + + + + + + sup3r.preprocessing.utilities.ordered_array — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.utilities.ordered_array

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.utilities.ordered_array#

+
+
+ordered_array(data: DataArray)[source]#
+

Transpose arrays so they have a (space, time, …) or (space, time, +…, feature) ordering.

+
+
Parameters:
+

data (xr.DataArray) – xr.DataArray with .dims attribute listing all contained dimensions

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.utilities.ordered_dims.html b/_autosummary/sup3r.preprocessing.utilities.ordered_dims.html new file mode 100644 index 000000000..89b343e28 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.utilities.ordered_dims.html @@ -0,0 +1,764 @@ + + + + + + + + + + + sup3r.preprocessing.utilities.ordered_dims — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.utilities.ordered_dims

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.utilities.ordered_dims#

+
+
+ordered_dims(dims: Tuple)[source]#
+

Return the order of dims that follows the ordering of Dimension.order() +for the common dim names. e.g dims = (‘time’, ‘south_north’, ‘dummy’, +‘west_east’) will return (‘south_north’, ‘west_east’, ‘time’, +‘dummy’).

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.utilities.parse_ellipsis.html b/_autosummary/sup3r.preprocessing.utilities.parse_ellipsis.html new file mode 100644 index 000000000..fb811bd7f --- /dev/null +++ b/_autosummary/sup3r.preprocessing.utilities.parse_ellipsis.html @@ -0,0 +1,769 @@ + + + + + + + + + + + sup3r.preprocessing.utilities.parse_ellipsis — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.utilities.parse_ellipsis

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.utilities.parse_ellipsis#

+
+
+parse_ellipsis(vals, dim_num)[source]#
+

Replace ellipsis with N slices where N is dim_num - len(vals) + 1

+
+
Parameters:
+
    +
  • vals (list | tuple) – Entries that will be used to index an array with dim_num dimensions.

  • +
  • dim_num (int) – Number of dimensions of array that will be indexed with given vals.

  • +
+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.utilities.parse_features.html b/_autosummary/sup3r.preprocessing.utilities.parse_features.html new file mode 100644 index 000000000..2baf4ba2a --- /dev/null +++ b/_autosummary/sup3r.preprocessing.utilities.parse_features.html @@ -0,0 +1,775 @@ + + + + + + + + + + + sup3r.preprocessing.utilities.parse_features — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.utilities.parse_features

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.utilities.parse_features#

+
+
+parse_features(features: str | list | None = None, data=None)[source]#
+

Parse possible inputs for features (list, str, None, ‘all’). If ‘all’ +this returns all data_vars in data. If None this returns an empty list.

+
+

Note

+

Returns a string if input is a string and list otherwise. Need to manually +get [features] if a list is required.

+
+
+
Parameters:
+
    +
  • features (list | str | None) – Feature request to parse.

  • +
  • data (T_Dataset) – Data containing available features

  • +
+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.utilities.parse_keys.html b/_autosummary/sup3r.preprocessing.utilities.parse_keys.html new file mode 100644 index 000000000..03a310a3f --- /dev/null +++ b/_autosummary/sup3r.preprocessing.utilities.parse_keys.html @@ -0,0 +1,764 @@ + + + + + + + + + + + sup3r.preprocessing.utilities.parse_keys — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.utilities.parse_keys

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.utilities.parse_keys#

+
+
+parse_keys(keys, default_coords=None, default_dims=None, default_features=None)[source]#
+

Return set of features and slices for all dimensions contained in +dataset that can be passed to isel and transposed to standard dimension +order. If keys is empty then we just want to return the coordinate +data, so features will be set to just the coordinate names.

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.preprocessing.utilities.parse_to_list.html b/_autosummary/sup3r.preprocessing.utilities.parse_to_list.html new file mode 100644 index 000000000..af6733b82 --- /dev/null +++ b/_autosummary/sup3r.preprocessing.utilities.parse_to_list.html @@ -0,0 +1,761 @@ + + + + + + + + + + + sup3r.preprocessing.utilities.parse_to_list — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.preprocessing.utilities.parse_to_list

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.preprocessing.utilities.parse_to_list#

+
+
+parse_to_list(features=None, data=None)[source]#
+

Parse features and return as a list, even if features is a string.

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.qa.html b/_autosummary/sup3r.qa.html new file mode 100644 index 000000000..312a52367 --- /dev/null +++ b/_autosummary/sup3r.qa.html @@ -0,0 +1,744 @@ + + + + + + + + + + + sup3r.qa — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.qa

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.qa#

+

sup3r QA module.

+
+ + + + + + + + + + + +

qa

sup3r QA module.

qa_cli

sup3r QA module CLI entry points.

utilities

Utilities used for QA

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.qa.qa.Sup3rQa.html b/_autosummary/sup3r.qa.qa.Sup3rQa.html new file mode 100644 index 000000000..ce4d44297 --- /dev/null +++ b/_autosummary/sup3r.qa.qa.Sup3rQa.html @@ -0,0 +1,1017 @@ + + + + + + + + + + + sup3r.qa.qa.Sup3rQa — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.qa.qa.Sup3rQa#

+
+
+class Sup3rQa(source_file_paths, out_file_path, s_enhance, t_enhance, temporal_coarsening_method, features=None, output_names=None, input_handler_name=None, input_handler_kwargs=None, qa_fp=None, bias_correct_method=None, bias_correct_kwargs=None, save_sources=True)[source]#
+

Bases: object

+

Class for doing QA on sup3r forward pass outputs.

+
+

Note

+

This only works if the sup3r forward pass output can be reshaped into a 2D +raster dataset (e.g. no sparsifying of the meta data).

+
+
+
Parameters:
+
    +
  • source_file_paths (list | str) – A list of low-resolution source files to extract raster data from. +Each file must have the same number of timesteps. Can also pass a +string with a unix-style file path which will be passed through +glob.glob

  • +
  • out_file_path (str) – A single sup3r-resolved output file (either .nc or .h5) with +high-resolution data corresponding to the +source_file_paths * s_enhance * t_enhance

  • +
  • s_enhance (int) – Factor by which the Sup3rGan model will enhance the spatial +dimensions of low resolution data

  • +
  • t_enhance (int) – Factor by which the Sup3rGan model will enhance temporal dimension +of low resolution data

  • +
  • temporal_coarsening_method (str | list) – [subsample, average, total, min, max] +Subsample will take every t_enhance-th time step, average will +average over t_enhance time steps, total will sum over t_enhance +time steps. This can also be a list of method names corresponding +to the list of features.

  • +
  • features (str | list | None) – Explicit list of features to validate. Can be a single feature str, +list of string feature names, or None for all features found in the +out_file_path.

  • +
  • output_names (str | list) – Optional output file dataset names corresponding to the features +list input

  • +
  • input_handler_name (str | None) – data handler class to use for input data. Provide a string name to +match a class in sup3r.preprocessing.data_handlers. If None the +correct handler will be guessed based on file type.

  • +
  • input_handler_kwargs (dict) – Keyword arguments for input_handler. See Rasterizer +class for argument details.

  • +
  • qa_fp (str | None) – Optional filepath to output QA file when you call Sup3rQa.run() +(only .h5 is supported)

  • +
  • bias_correct_method (str | None) – Optional bias correction function name that can be imported from +the sup3r.bias.bias_transforms module. This will transform the +source data according to some predefined bias correction +transformation along with the bias_correct_kwargs. As the first +argument, this method must receive a generic numpy array of data to +be bias corrected

  • +
  • bias_correct_kwargs (dict | None) – Optional namespace of kwargs to provide to bias_correct_method. +If this is provided, it must be a dictionary where each key is a +feature name and each value is a dictionary of kwargs to correct +that feature. You can bias correct only certain input features by +only including those feature names in this dict.

  • +
  • save_sources (bool) – Flag to save re-coarsened synthetic data and true low-res data to +qa_fp in addition to the error dataset

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + +

bias_correct_input_handler(input_handler)

Apply bias correction to all source features which have bias correction data and return Deriver instance to use for derivations of features to match output features.

close()

Close any open file handlers

coarsen_data(idf, feature, data)

Re-coarsen a high-resolution synthetic output dataset

export(qa_fp, data, dset_name[, dset_suffix])

Export error dictionary to h5 file.

get_dset_out(name)

Get an output dataset from the forward pass output file.

get_node_cmd(config)

Get a CLI call to initialize Sup3rQa and execute the Sup3rQa.run() method based on an input config

run()

Go through all datasets and get the error for the re-coarsened synthetic minus the true low-res source data.

+
+

Attributes

+
+ + + + + + + + + + + +

features

Get a list of feature names from the output file, excluding meta and time index datasets

output_names

Get a list of output dataset names corresponding to the features list

output_type

Get output data type

+
+
+
+close()[source]#
+

Close any open file handlers

+
+ +
+
+property features#
+

Get a list of feature names from the output file, excluding meta and +time index datasets

+
+
Returns:
+

list

+
+
+
+ +
+
+property output_names#
+

Get a list of output dataset names corresponding to the features +list

+
+ +
+
+property output_type#
+

Get output data type

+
+
Returns:
+

output_type – e.g. ‘nc’ or ‘h5’

+
+
+
+ +
+
+bias_correct_input_handler(input_handler)[source]#
+

Apply bias correction to all source features which have bias +correction data and return Deriver instance to use for +derivations of features to match output features.

+

(1) Check if we need to derive any features included in the +bias_correct_kwargs. +(2) Derive these features using the input_handler.derive method, and +update the stored data. +(3) Apply bias correction to all the features in the +bias_correct_kwargs +(4) Derive the features required for validation from the bias corrected +data and update the stored data +(5) Return the updated input_handler, now a Deriver object.

+
+ +
+
+get_dset_out(name)[source]#
+

Get an output dataset from the forward pass output file.

+

TODO: Make this dim order agnostic. If we didnt have the h5 condition +we could just do transpose(‘south_north’, ‘west_east’, ‘time’)

+
+
Parameters:
+

name (str) – Name of the output dataset to retrieve. Must be found in the +features property and the forward pass output file.

+
+
Returns:
+

out (np.ndarray) – A copy of the high-resolution output data as a numpy +array of shape (spatial_1, spatial_2, temporal)

+
+
+
+ +
+
+coarsen_data(idf, feature, data)[source]#
+

Re-coarsen a high-resolution synthetic output dataset

+
+
Parameters:
+
    +
  • idf (int) – Feature index

  • +
  • feature (str) – Feature name

  • +
  • data (Union[np.ndarray, da.core.Array]) – A copy of the high-resolution output data as a numpy +array of shape (spatial_1, spatial_2, temporal)

  • +
+
+
Returns:
+

data (Union[np.ndarray, da.core.Array]) – A spatiotemporally coarsened copy of the input dataset, still with +shape (spatial_1, spatial_2, temporal)

+
+
+
+ +
+
+classmethod get_node_cmd(config)[source]#
+

Get a CLI call to initialize Sup3rQa and execute the Sup3rQa.run() +method based on an input config

+
+
Parameters:
+

config (dict) – sup3r QA config with all necessary args and kwargs to +initialize Sup3rQa and execute Sup3rQa.run()

+
+
+
+ +
+
+export(qa_fp, data, dset_name, dset_suffix='')[source]#
+

Export error dictionary to h5 file.

+
+
Parameters:
+
    +
  • qa_fp (str | None) – Optional filepath to output QA file (only .h5 is supported)

  • +
  • data (Union[np.ndarray, da.core.Array]) – An array with shape (space1, space2, time) that represents the +re-coarsened synthetic data minus the source true low-res data, or +another dataset of the same shape to be written to disk

  • +
  • dset_name (str) – Base dataset name to save data to

  • +
  • dset_suffix (str) – Optional suffix to append to dset_name with an underscore before +saving.

  • +
+
+
+
+ +
+
+run()[source]#
+

Go through all datasets and get the error for the re-coarsened +synthetic minus the true low-res source data.

+
+
Returns:
+

errors (dict) – Dictionary of errors, where keys are the feature names, and each +value is an array with shape (space1, space2, time) that represents +the re-coarsened synthetic data minus the source true low-res data

+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.qa.qa.html b/_autosummary/sup3r.qa.qa.html new file mode 100644 index 000000000..6d68d4eee --- /dev/null +++ b/_autosummary/sup3r.qa.qa.html @@ -0,0 +1,742 @@ + + + + + + + + + + + sup3r.qa.qa — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.qa.qa

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.qa.qa#

+

sup3r QA module.

+

TODO: Good initial refactor but can do more cleaning here. Should use Loaders +and Sup3rX.unflatten() method (for H5) to make things more agnostic to dim +ordering.

+

Classes

+
+ + + + + +

Sup3rQa(source_file_paths, out_file_path, ...)

Class for doing QA on sup3r forward pass outputs.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.qa.qa_cli.html b/_autosummary/sup3r.qa.qa_cli.html new file mode 100644 index 000000000..83e4b124b --- /dev/null +++ b/_autosummary/sup3r.qa.qa_cli.html @@ -0,0 +1,730 @@ + + + + + + + + + + + sup3r.qa.qa_cli — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.qa.qa_cli

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.qa.qa_cli#

+

sup3r QA module CLI entry points.

+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.qa.utilities.continuous_dist.html b/_autosummary/sup3r.qa.utilities.continuous_dist.html new file mode 100644 index 000000000..a7192cd41 --- /dev/null +++ b/_autosummary/sup3r.qa.utilities.continuous_dist.html @@ -0,0 +1,782 @@ + + + + + + + + + + + sup3r.qa.utilities.continuous_dist — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.qa.utilities.continuous_dist

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.qa.utilities.continuous_dist#

+
+
+continuous_dist(diffs, bins=None, range=None, interpolate=False)[source]#
+

Get interpolated distribution from histogram

+
+
Parameters:
+
    +
  • diffs (ndarray) – Array of values to use to construct distribution

  • +
  • bins (int) – Number of bins for the distribution. If None then the number of bins +will be determined from the value range and the smallest difference +between values

  • +
  • range (tuple | None) – Optional min/max range for the distribution.

  • +
  • interpolate (bool) – Whether to interpolate over histogram counts. e.g. if a bin has +count = 0 and surrounding bins have count > 0 the bin with count = 0 +will have an interpolated value.

  • +
+
+
Returns:
+

    +
  • ndarray – distribution value counts

  • +
  • ndarray – distribution values at bin centers

  • +
+

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.qa.utilities.direct_dist.html b/_autosummary/sup3r.qa.utilities.direct_dist.html new file mode 100644 index 000000000..011b38012 --- /dev/null +++ b/_autosummary/sup3r.qa.utilities.direct_dist.html @@ -0,0 +1,794 @@ + + + + + + + + + + + sup3r.qa.utilities.direct_dist — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.qa.utilities.direct_dist

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.qa.utilities.direct_dist#

+
+
+direct_dist(var, bins=40, range=None, diff_max=None, scale=1, percentile=99.9, interpolate=False, period=None)[source]#
+

Returns the direct distribution for the given variable.

+
+
Parameters:
+
    +
  • var (ndarray) – (lat, lon, temporal)

  • +
  • bins (int) – Number of bins for the direct pdf.

  • +
  • range (tuple | None) – Optional min/max range for the direct pdf.

  • +
  • diff_max (float) – Max value to keep for given variable

  • +
  • scale (int) – Factor to scale the distribution by. This is used so that distributions +from data with different resolutions can be compared. For instance, if +this is calculating a vorticity distribution from data with a spatial +resolution of 4km then the distribution needs to be scaled by 4km to +compare to another scaled vorticity distribution with a different +resolution.

  • +
  • percentile (float) – Percentile to use to determine the maximum allowable value in the +distribution. e.g. percentile=99 eliminates values above the 99th +percentile from the histogram.

  • +
  • interpolate (bool) – Whether to interpolate over histogram counts. e.g. if a bin has +count = 0 and surrounding bins have count > 0 the bin with count = 0 +will have an interpolated value.

  • +
  • period (float | None) – If variable is periodic this gives that period. e.g. If the variable +is winddirection the period is 360 degrees and we need to account for +0 and 360 being close.

  • +
+
+
Returns:
+

    +
  • ndarray – var at bin centers

  • +
  • ndarray – var value counts

  • +
  • float – Normalization factor

  • +
+

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.qa.utilities.frequency_spectrum.html b/_autosummary/sup3r.qa.utilities.frequency_spectrum.html new file mode 100644 index 000000000..350ab574d --- /dev/null +++ b/_autosummary/sup3r.qa.utilities.frequency_spectrum.html @@ -0,0 +1,781 @@ + + + + + + + + + + + sup3r.qa.utilities.frequency_spectrum — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.qa.utilities.frequency_spectrum

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.qa.utilities.frequency_spectrum#

+
+
+frequency_spectrum(var, f_range=None)[source]#
+

Frequency Spectrum. Gives the portion of the variable +associated with each frequency.

+
+
Parameters:
+
    +
  • var (ndarray) – (lat, lon, temporal)

  • +
  • f_range (list | None) – List with min and max frequency. When comparing spectra for different +domains this needs to be tailored to the specific domain. e.g. f = +[1/max_time, …, 1/min_time] If this is not specified f with be +set to [0, …, len(y)] where y is the fft output.

  • +
+
+
Returns:
+

    +
  • ndarray – Array of frequencies corresponding to energy amplitudes

  • +
  • ndarray – 1D array of amplitudes corresponding to the portion of the variable +with a given frequency

  • +
+

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.qa.utilities.gradient_dist.html b/_autosummary/sup3r.qa.utilities.gradient_dist.html new file mode 100644 index 000000000..b88e361af --- /dev/null +++ b/_autosummary/sup3r.qa.utilities.gradient_dist.html @@ -0,0 +1,794 @@ + + + + + + + + + + + sup3r.qa.utilities.gradient_dist — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.qa.utilities.gradient_dist

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.qa.utilities.gradient_dist#

+
+
+gradient_dist(var, bins=40, range=None, diff_max=None, scale=1, percentile=99.9, interpolate=False, period=None)[source]#
+

Returns the gradient distribution for the given variable.

+
+
Parameters:
+
    +
  • var (ndarray) – (lat, lon, temporal)

  • +
  • bins (int) – Number of bins for the gradient pdf.

  • +
  • range (tuple | None) – Optional min/max range for the gradient pdf.

  • +
  • diff_max (float) – Max value to keep for gradient

  • +
  • scale (int) – Factor to scale the distribution by. This is used so that distributions +from data with different resolutions can be compared. For instance, if +this is calculating a velocity gradient distribution from data with a +spatial resolution of 4km then the distribution needs to be scaled by +4km to compare to another scaled velocity gradient distribution with a +different resolution.

  • +
  • percentile (float) – Percentile to use to determine the maximum allowable value in the +distribution. e.g. percentile=99 eliminates values above the 99th +percentile from the histogram.

  • +
  • interpolate (bool) – Whether to interpolate over histogram counts. e.g. if a bin has +count = 0 and surrounding bins have count > 0 the bin with count = 0 +will have an interpolated value.

  • +
  • period (float | None) – If variable is periodic this gives that period. e.g. If the variable +is winddirection the period is 360 degrees and we need to account for +0 and 360 being close.

  • +
+
+
Returns:
+

    +
  • ndarray – d(var) / dx at bin centers

  • +
  • ndarray – d(var) / dx value counts

  • +
  • float – Normalization factor

  • +
+

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.qa.utilities.html b/_autosummary/sup3r.qa.utilities.html new file mode 100644 index 000000000..3952fb16e --- /dev/null +++ b/_autosummary/sup3r.qa.utilities.html @@ -0,0 +1,760 @@ + + + + + + + + + + + sup3r.qa.utilities — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.qa.utilities

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.qa.utilities#

+

Utilities used for QA

+

Functions

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +

continuous_dist(diffs[, bins, range, ...])

Get interpolated distribution from histogram

direct_dist(var[, bins, range, diff_max, ...])

Returns the direct distribution for the given variable.

frequency_spectrum(var[, f_range])

Frequency Spectrum.

gradient_dist(var[, bins, range, diff_max, ...])

Returns the gradient distribution for the given variable.

time_derivative_dist(var[, bins, range, ...])

Returns the time derivative distribution for the given variable.

tke_frequency_spectrum(u, v[, f_range])

Kinetic Energy Spectrum.

tke_wavenumber_spectrum(u, v[, x_range, axis])

Turbulent Kinetic Energy Spectrum.

wavenumber_spectrum(var[, x_range, axis])

Wavenumber Spectrum.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.qa.utilities.time_derivative_dist.html b/_autosummary/sup3r.qa.utilities.time_derivative_dist.html new file mode 100644 index 000000000..490b74fad --- /dev/null +++ b/_autosummary/sup3r.qa.utilities.time_derivative_dist.html @@ -0,0 +1,796 @@ + + + + + + + + + + + sup3r.qa.utilities.time_derivative_dist — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.qa.utilities.time_derivative_dist

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.qa.utilities.time_derivative_dist#

+
+
+time_derivative_dist(var, bins=40, range=None, diff_max=None, t_steps=1, scale=1, percentile=99.9, interpolate=False, period=None)[source]#
+

Returns the time derivative distribution for the given variable.

+
+
Parameters:
+
    +
  • var (ndarray) – (lat, lon, temporal)

  • +
  • bins (int) – Number of bins for the time derivative pdf.

  • +
  • range (tuple | None) – Optional min/max range for the time derivative pdf.

  • +
  • diff_max (float) – Max value to keep for time derivative

  • +
  • t_steps (int) – Number of time steps to use for differences. e.g. If t_steps=1 this +uses var[i + 1] - [i] to compute time derivatives.

  • +
  • scale (int) – Factor to scale the distribution by. This is used so that distributions +from data with different resolutions can be compared. For instance, if +this is calculating a time derivative distribution from data with a +temporal resolution of 15min then the distribution needs to be scaled +by 15min to compare to another scaled time derivative distribution with +a different resolution

  • +
  • percentile (float) – Percentile to use to determine the maximum allowable value in the +distribution. e.g. percentile=99 eliminates values above the 99th +percentile from the histogram.

  • +
  • interpolate (bool) – Whether to interpolate over histogram counts. e.g. if a bin has +count = 0 and surrounding bins have count > 0 the bin with count = 0 +will have an interpolated value.

  • +
  • period (float | None) – If variable is periodic this gives that period. e.g. If the variable +is winddirection the period is 360 degrees and we need to account for +0 and 360 being close.

  • +
+
+
Returns:
+

    +
  • ndarray – d(var) / dt values at bin centers

  • +
  • ndarray – d(var) / dt value counts

  • +
  • float – Normalization factor

  • +
+

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.qa.utilities.tke_frequency_spectrum.html b/_autosummary/sup3r.qa.utilities.tke_frequency_spectrum.html new file mode 100644 index 000000000..cf3708ae3 --- /dev/null +++ b/_autosummary/sup3r.qa.utilities.tke_frequency_spectrum.html @@ -0,0 +1,780 @@ + + + + + + + + + + + sup3r.qa.utilities.tke_frequency_spectrum — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.qa.utilities.tke_frequency_spectrum

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.qa.utilities.tke_frequency_spectrum#

+
+
+tke_frequency_spectrum(u, v, f_range=None)[source]#
+

Kinetic Energy Spectrum. Gives the portion of kinetic energy +associated with each frequency.

+
+
Parameters:
+
    +
  • u (ndarray) – (lat, lon) +U component of wind

  • +
  • v (ndarray) – (lat, lon) +V component of wind

  • +
  • f_range (list | None) – List with min and max frequency. When comparing spectra for different +domains this needs to be tailored to the specific domain. e.g. f = +[1/max_time, …, 1/min_time] If this is not specified f with be +set to [0, …, len(y)] where y is the fft output.

  • +
+
+
Returns:
+

ndarray – 1D array of amplitudes corresponding to the portion of total energy +with a given frequency

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.qa.utilities.tke_wavenumber_spectrum.html b/_autosummary/sup3r.qa.utilities.tke_wavenumber_spectrum.html new file mode 100644 index 000000000..6229a91e0 --- /dev/null +++ b/_autosummary/sup3r.qa.utilities.tke_wavenumber_spectrum.html @@ -0,0 +1,786 @@ + + + + + + + + + + + sup3r.qa.utilities.tke_wavenumber_spectrum — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.qa.utilities.tke_wavenumber_spectrum

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.qa.utilities.tke_wavenumber_spectrum#

+
+
+tke_wavenumber_spectrum(u, v, x_range=None, axis=0)[source]#
+

Turbulent Kinetic Energy Spectrum. Gives the portion of kinetic energy +associated with each wavenumber.

+
+
Parameters:
+
    +
  • u (ndarray) – (lat, lon) +U component of wind

  • +
  • v (ndarray) – (lat, lon) +V component of wind

  • +
  • x_range (list | None) – List with min and max wavenumber. When comparing spectra for different +domains this needs to be tailored to the specific domain. e.g. k = +[1/max_length, …, 1/min_length] If this is not specified k with be +set to [0, …, len(y)] where y is the fft output.

  • +
  • axis (int) – Axis to average over to get a 1D wind field. If axis=0 this returns +the zonal energy spectrum

  • +
+
+
Returns:
+

    +
  • ndarray – Array of wavenumbers corresponding to energy amplitudes

  • +
  • ndarray – 1D array of amplitudes corresponding to the portion of total energy +with a given wavenumber

  • +
+

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.qa.utilities.wavenumber_spectrum.html b/_autosummary/sup3r.qa.utilities.wavenumber_spectrum.html new file mode 100644 index 000000000..52a0a6b0b --- /dev/null +++ b/_autosummary/sup3r.qa.utilities.wavenumber_spectrum.html @@ -0,0 +1,783 @@ + + + + + + + + + + + sup3r.qa.utilities.wavenumber_spectrum — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.qa.utilities.wavenumber_spectrum

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.qa.utilities.wavenumber_spectrum#

+
+
+wavenumber_spectrum(var, x_range=None, axis=0)[source]#
+

Wavenumber Spectrum. Gives the portion of the given variable +associated with each wavenumber.

+
+
Parameters:
+
    +
  • var (ndarray) – (lat, lon)

  • +
  • x_range (list | None) – List with min and max wavenumber. When comparing spectra for different +domains this needs to be tailored to the specific domain. e.g. k = +[1/max_length, …, 1/min_length] If this is not specified k with be +set to [0, …, len(y)] where y is the fft output.

  • +
  • axis (int) – Axis to average over to get a 1D field. If axis=0 this returns +the zonal spectrum

  • +
+
+
Returns:
+

    +
  • ndarray – Array of wavenumbers corresponding to amplitudes

  • +
  • ndarray – 1D array of amplitudes corresponding to the portion of the given +variable with a given wavenumber

  • +
+

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.solar.html b/_autosummary/sup3r.solar.html new file mode 100644 index 000000000..396e0e50c --- /dev/null +++ b/_autosummary/sup3r.solar.html @@ -0,0 +1,742 @@ + + + + + + + + + + + sup3r.solar — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.solar

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.solar#

+

Custom sup3r solar module. This primarily converts GAN output clearsky ratio +to GHI, DNI, and DHI using NSRDB data and utility modules like DISC

+
+ + + + + + + + +

solar

Custom sup3r solar module.

solar_cli

sup3r solar CLI entry points.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.solar.solar.Solar.html b/_autosummary/sup3r.solar.solar.Solar.html new file mode 100644 index 000000000..6b36cc9d5 --- /dev/null +++ b/_autosummary/sup3r.solar.solar.Solar.html @@ -0,0 +1,1169 @@ + + + + + + + + + + + sup3r.solar.solar.Solar — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.solar.solar.Solar#

+
+
+class Solar(sup3r_fps, nsrdb_fp, t_slice=slice(None, None, None), tz=-6, agg_factor=1, nn_threshold=0.5, cloud_threshold=0.99)[source]#
+

Bases: object

+

Custom sup3r solar module. This primarily converts GAN output clearsky +ratio to GHI, DNI, and DHI using NSRDB data and utility modules like +DISC

+
+
Parameters:
+
    +
  • sup3r_fps (str | list) – Full .h5 filepath(s) to one or more sup3r GAN output .h5 chunk +files containing clearsky_ratio, time_index, and meta. These files +must have the same meta data but can be sequential and ordered +temporal chunks. The data in this file has been rolled by tz +to a local time (assumed that the GAN was trained on local time +solar data) and will be converted back into UTC, so it’s wise to +include some padding in the sup3r_fps file list.

  • +
  • nsrdb_fp (str) – Filepath to NSRDB .h5 file containing clearsky_ghi, clearsky_dni, +clearsky_dhi data.

  • +
  • t_slice (slice) – Slicing argument to slice the temporal axis of the sup3r_fps source +data after doing the tz roll to UTC but before returning the +irradiance variables. This can be used to effectively pad the solar +irradiance calculation in UTC time. For example, if sup3r_fps is 3 +files each with 24 hours of data, t_slice can be slice(24, 48) to +only output the middle day of irradiance data, but padded by the +other two days for the UTC output.

  • +
  • tz (int) – The timezone offset for the data in sup3r_fps. It is assumed that +the GAN is trained on data in local time and therefore the output +in sup3r_fps should be treated as local time. For example, -6 is +CST which is default for CONUS training data.

  • +
  • agg_factor (int) – Spatial aggregation factor for nsrdb-to-GAN-meta e.g. the number of +NSRDB spatial pixels to average for a single sup3r GAN output site.

  • +
  • nn_threshold (float) – The KDTree nearest neighbor threshold that determines how far the +sup3r GAN output data has to be from the NSRDB source data to get +irradiance=0. Note that is value is in decimal degrees which is a +very approximate way to determine real distance.

  • +
  • cloud_threshold (float) – Clearsky ratio threshold below which the data is considered cloudy +and DNI is calculated using DISC.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + +

close()

Close all internal file handlers

get_node_cmd(config)

Get a CLI call to run Solar.run_temporal_chunk() on a single node based on an input config.

get_nsrdb_data(dset)

Get an NSRDB dataset with spatial index corresponding to the sup3r GAN output data, averaged across the agg_factor.

get_sup3r_fps(fp_pattern[, ignore])

Get a list of file chunks to run in parallel based on a file pattern

preflight()

Run preflight checks on source data to make sure everything will work together.

run_temporal_chunks(fp_pattern, nsrdb_fp[, ...])

Run the solar module on all spatial chunks for each temporal chunk corresponding to the fp_pattern and the given list of temporal_ids.

write(fp_out[, features])

Write irradiance datasets (ghi, dni, dhi) to output h5 file.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

clearsky_ratio

Get the clearsky ghi ratio data from the GAN output, rolled from tz to UTC.

cloud_mask

Get the cloud mask (True if cloudy) based on the GAN output clearsky ratio in UTC.

dhi

Get the dhi (W/m2) which is calculated based on the simple relationship between GHI, DNI and solar zenith angle.

dist

Get the nearest neighbor distances from the sup3r GAN data sites to the NSRDB nearest neighbors.

dni

Get the dni (W/m2) which is clearsky dni (from NSRDB) when the GAN output is clear (clearsky_ratio is > cloud_threshold), and calculated from the DISC model when cloudy

ghi

Get the ghi (W/m2) based on the GAN output clearsky ratio + clearsky_ghi in UTC.

idnn

Get the nearest neighbor meta data indices from the NSRDB data that correspond to the sup3r GAN data

nsrdb_tslice

Get the time slice of the NSRDB data corresponding to the sup3r GAN output.

out_of_bounds

Get a boolean mask for the sup3r data that is out of bounds (too far from the NSRDB data).

solar_zenith_angle

Get the solar zenith angle (degrees)

time_index

Time index for the sup3r GAN output data but sliced by t_slice

+
+
+
+preflight()[source]#
+

Run preflight checks on source data to make sure everything will +work together.

+
+ +
+
+close()[source]#
+

Close all internal file handlers

+
+ +
+
+property idnn#
+

Get the nearest neighbor meta data indices from the NSRDB data that +correspond to the sup3r GAN data

+
+
Returns:
+

idnn (Union[np.ndarray, da.core.Array]) – 2D array of length (n_sup3r_sites, agg_factor) where the values are +meta data indices from the NSRDB.

+
+
+
+ +
+
+property dist#
+

Get the nearest neighbor distances from the sup3r GAN data sites to +the NSRDB nearest neighbors.

+
+
Returns:
+

dist (Union[np.ndarray, da.core.Array]) – 2D array of length (n_sup3r_sites, agg_factor) where the values are +decimal degree distances from the sup3r sites to the nsrdb nearest +neighbors.

+
+
+
+ +
+
+property time_index#
+

Time index for the sup3r GAN output data but sliced by t_slice

+
+
Returns:
+

pd.DatetimeIndex

+
+
+
+ +
+
+property out_of_bounds#
+

Get a boolean mask for the sup3r data that is out of bounds (too far +from the NSRDB data).

+
+
Returns:
+

out_of_bounds (Union[np.ndarray, da.core.Array]) – 1D boolean array with length == number of sup3r GAN sites. True if +the site is too far from the NSRDB.

+
+
+
+ +
+
+property nsrdb_tslice#
+

Get the time slice of the NSRDB data corresponding to the sup3r GAN +output.

+
+ +
+
+property clearsky_ratio#
+

Get the clearsky ghi ratio data from the GAN output, rolled from tz +to UTC.

+
+
Returns:
+

clearsky_ratio (Union[np.ndarray, da.core.Array]) – 2D array with shape (time, sites) in UTC.

+
+
+
+ +
+
+property solar_zenith_angle#
+

Get the solar zenith angle (degrees)

+
+
Returns:
+

solar_zenith_angle (Union[np.ndarray, da.core.Array]) – 2D array with shape (time, sites) in UTC.

+
+
+
+ +
+
+property ghi#
+

Get the ghi (W/m2) based on the GAN output clearsky ratio + +clearsky_ghi in UTC.

+
+
Returns:
+

ghi (Union[np.ndarray, da.core.Array]) – 2D array with shape (time, sites) in UTC.

+
+
+
+ +
+
+property dni#
+

Get the dni (W/m2) which is clearsky dni (from NSRDB) when the GAN +output is clear (clearsky_ratio is > cloud_threshold), and calculated +from the DISC model when cloudy

+
+
Returns:
+

dni (Union[np.ndarray, da.core.Array]) – 2D array with shape (time, sites) in UTC.

+
+
+
+ +
+
+property dhi#
+

Get the dhi (W/m2) which is calculated based on the simple +relationship between GHI, DNI and solar zenith angle.

+
+
Returns:
+

dhi (Union[np.ndarray, da.core.Array]) – 2D array with shape (time, sites) in UTC.

+
+
+
+ +
+
+property cloud_mask#
+

Get the cloud mask (True if cloudy) based on the GAN output clearsky +ratio in UTC.

+
+
Returns:
+

cloud_mask (Union[np.ndarray, da.core.Array]) – 2D array with shape (time, sites) in UTC.

+
+
+
+ +
+
+get_nsrdb_data(dset)[source]#
+

Get an NSRDB dataset with spatial index corresponding to the sup3r +GAN output data, averaged across the agg_factor.

+
+
Parameters:
+

dset (str) – Name of dataset to retrieve from NSRDB source file

+
+
Returns:
+

out (Union[np.ndarray, da.core.Array]) – Dataset of shape (time, sites) where time and sites correspond to +the same shape as the sup3r GAN output data and if agg_factor > 1 +the sites is an average across multiple NSRDB sites.

+
+
+
+ +
+
+static get_sup3r_fps(fp_pattern, ignore=None)[source]#
+

Get a list of file chunks to run in parallel based on a file pattern

+
+

Note

+

It’s assumed that all source files have the pattern +sup3r_file_TTTTTT_SSSSSS.h5 where TTTTTT is the zero-padded temporal +chunk index and SSSSSS is the zero-padded spatial chunk index.

+
+
+
Parameters:
+
    +
  • fp_pattern (str | list) – Unix-style file*pattern that matches a set of spatiotemporally +chunked sup3r forward pass output files.

  • +
  • ignore (str | None) – Ignore all files that have this string in their filenames.

  • +
+
+
Returns:
+

    +
  • fp_sets (list) – List of file sets where each file set is 3 temporally sequential +files over the same spatial chunk. Each file set overlaps with its +neighbor such that fp_sets[0][-1] == fp_sets[1][0] (this is so +Solar can do temporal padding when rolling from GAN local time +output to UTC).

  • +
  • t_slices (list) – List of t_slice arguments corresponding to fp_sets to pass to +Solar class initialization that will slice and reduce the +overlapping time axis when Solar outputs irradiance data.

  • +
  • temporal_ids (list) – List of temporal id strings TTTTTT corresponding to the fp_sets

  • +
  • spatial_ids (list) – List of spatial id strings SSSSSS corresponding to the fp_sets

  • +
  • target_fps (list) – List of actual target files corresponding to fp_sets, so for +example the file set fp_sets[10] sliced by t_slices[10] is designed +to process target_fps[10]

  • +
+

+
+
+
+ +
+
+classmethod get_node_cmd(config)[source]#
+

Get a CLI call to run Solar.run_temporal_chunk() on a single +node based on an input config.

+
+
Parameters:
+

config (dict) – sup3r solar config with all necessary args and kwargs to +run Solar.run_temporal_chunk() on a single node.

+
+
+
+ +
+
+write(fp_out, features=('ghi', 'dni', 'dhi'))[source]#
+

Write irradiance datasets (ghi, dni, dhi) to output h5 file.

+
+
Parameters:
+
    +
  • fp_out (str) – Filepath to an output h5 file to write irradiance variables to. +Parent directory will be created if it does not exist.

  • +
  • features (list | tuple) – List of features to write to disk. These have to be attributes of +the Solar class (ghi, dni, dhi).

  • +
+
+
+
+ +
+
+classmethod run_temporal_chunks(fp_pattern, nsrdb_fp, fp_out_suffix='irradiance', tz=-6, agg_factor=1, nn_threshold=0.5, cloud_threshold=0.99, features=('ghi', 'dni', 'dhi'), temporal_ids=None)[source]#
+

Run the solar module on all spatial chunks for each temporal +chunk corresponding to the fp_pattern and the given list of +temporal_ids. This typically gets run from the CLI.

+
+
Parameters:
+
    +
  • fp_pattern (str) – Unix-style file*pattern that matches a set of spatiotemporally +chunked sup3r forward pass output files.

  • +
  • nsrdb_fp (str) – Filepath to NSRDB .h5 file containing clearsky_ghi, clearsky_dni, +clearsky_dhi data.

  • +
  • fp_out_suffix (str) – Suffix to add to the input sup3r source files when writing the +processed solar irradiance data to new data files.

  • +
  • tz (int) – The timezone offset for the data in sup3r_fps. It is assumed that +the GAN is trained on data in local time and therefore the output +in sup3r_fps should be treated as local time. For example, -6 is +CST which is default for CONUS training data.

  • +
  • agg_factor (int) – Spatial aggregation factor for nsrdb-to-GAN-meta e.g. the number of +NSRDB spatial pixels to average for a single sup3r GAN output site.

  • +
  • nn_threshold (float) – The KDTree nearest neighbor threshold that determines how far the +sup3r GAN output data has to be from the NSRDB source data to get +irradiance=0. Note that is value is in decimal degrees which is a +very approximate way to determine real distance.

  • +
  • cloud_threshold (float) – Clearsky ratio threshold below which the data is considered cloudy +and DNI is calculated using DISC.

  • +
  • features (list | tuple) – List of features to write to disk. These have to be attributes of +the Solar class (ghi, dni, dhi).

  • +
  • temporal_ids (list | None) – Lise of zero-padded temporal ids from the file chunks that match +fp_pattern. This input typically gets set from the CLI. If None, +this will run all temporal indices.

  • +
+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.solar.solar.html b/_autosummary/sup3r.solar.solar.html new file mode 100644 index 000000000..2c2eddcae --- /dev/null +++ b/_autosummary/sup3r.solar.solar.html @@ -0,0 +1,742 @@ + + + + + + + + + + + sup3r.solar.solar — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.solar.solar

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.solar.solar#

+

Custom sup3r solar module. This primarily converts GAN output clearsky ratio +to GHI, DNI, and DHI using NSRDB data and utility modules like DISC

+

Note that clearsky_ratio is assumed to be clearsky ghi ratio and is calculated +as daily average GHI / daily average clearsky GHI.

+

Classes

+
+ + + + + +

Solar(sup3r_fps, nsrdb_fp[, t_slice, tz, ...])

Custom sup3r solar module.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.solar.solar_cli.html b/_autosummary/sup3r.solar.solar_cli.html new file mode 100644 index 000000000..07c18de10 --- /dev/null +++ b/_autosummary/sup3r.solar.solar_cli.html @@ -0,0 +1,742 @@ + + + + + + + + + + + sup3r.solar.solar_cli — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.solar.solar_cli

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.solar.solar_cli#

+

sup3r solar CLI entry points.

+

Functions

+
+ + + + + + + + +

kickoff_local_job(ctx, cmd[, pipeline_step])

Run sup3r solar locally.

kickoff_slurm_job(ctx, cmd[, pipeline_step, ...])

Run sup3r on HPC via SLURM job submission.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.solar.solar_cli.kickoff_local_job.html b/_autosummary/sup3r.solar.solar_cli.kickoff_local_job.html new file mode 100644 index 000000000..86507260b --- /dev/null +++ b/_autosummary/sup3r.solar.solar_cli.kickoff_local_job.html @@ -0,0 +1,777 @@ + + + + + + + + + + + sup3r.solar.solar_cli.kickoff_local_job — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.solar.solar_cli.kickoff_local_job

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.solar.solar_cli.kickoff_local_job#

+
+
+kickoff_local_job(ctx, cmd, pipeline_step=None)[source]#
+

Run sup3r solar locally.

+
+
Parameters:
+
    +
  • ctx (click.pass_context) – Click context object where ctx.obj is a dictionary

  • +
  • cmd (str) –

    +
    +
    Command to be submitted in shell script. Example:

    ‘python -m sup3r.cli forward_pass -c <config_file>’

    +
    +
    +
  • +
  • pipeline_step (str, optional) – Name of the pipeline step being run. If None, the +pipeline_step will be set to the module_name, +mimicking old reV behavior. By default, None.

  • +
+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.solar.solar_cli.kickoff_slurm_job.html b/_autosummary/sup3r.solar.solar_cli.kickoff_slurm_job.html new file mode 100644 index 000000000..013478511 --- /dev/null +++ b/_autosummary/sup3r.solar.solar_cli.kickoff_slurm_job.html @@ -0,0 +1,783 @@ + + + + + + + + + + + sup3r.solar.solar_cli.kickoff_slurm_job — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.solar.solar_cli.kickoff_slurm_job

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.solar.solar_cli.kickoff_slurm_job#

+
+
+kickoff_slurm_job(ctx, cmd, pipeline_step=None, alloc='sup3r', memory=None, walltime=4, feature=None, stdout_path='./stdout/')[source]#
+

Run sup3r on HPC via SLURM job submission.

+
+
Parameters:
+
    +
  • ctx (click.pass_context) – Click context object where ctx.obj is a dictionary

  • +
  • cmd (str) –

    +
    +
    Command to be submitted in SLURM shell script. Example:

    ‘python -m sup3r.cli forward_pass -c <config_file>’

    +
    +
    +
  • +
  • pipeline_step (str, optional) – Name of the pipeline step being run. If None, the +pipeline_step will be set to the module_name, +mimicking old reV behavior. By default, None.

  • +
  • alloc (str) – HPC project (allocation) handle. Example: ‘sup3r’.

  • +
  • memory (int) – Node memory request in GB.

  • +
  • walltime (float) – Node walltime request in hours.

  • +
  • feature (str) – Additional flags for SLURM job. Format is “–qos=high” +or “–depend=[state:job_id]”. Default is None.

  • +
  • stdout_path (str) – Path to print .stdout and .stderr files.

  • +
+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.ModuleName.html b/_autosummary/sup3r.utilities.ModuleName.html new file mode 100644 index 000000000..76882d2f5 --- /dev/null +++ b/_autosummary/sup3r.utilities.ModuleName.html @@ -0,0 +1,1500 @@ + + + + + + + + + + + sup3r.utilities.ModuleName — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.utilities.ModuleName#

+
+
+class ModuleName(value)[source]#
+

Bases: str, Enum

+

A collection of the module names available in sup3r. +Each module name should match the name of the click command +that will be used to invoke its respective cli. As of 5/26/2022, +this means that all commands are lowercase with underscores +replaced by dashes.

+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

encode([encoding, errors])

Encode the string using the codec registered for encoding.

replace(old, new[, count])

Return a copy with all occurrences of substring old replaced by new.

split([sep, maxsplit])

Return a list of the substrings in the string, using sep as the separator string.

rsplit([sep, maxsplit])

Return a list of the substrings in the string, using sep as the separator string.

join(iterable, /)

Concatenate any number of strings.

capitalize()

Return a capitalized version of the string.

casefold()

Return a version of the string suitable for caseless comparisons.

title()

Return a version of the string where each word is titlecased.

center(width[, fillchar])

Return a centered string of length width.

count(sub[, start[, end]])

Return the number of non-overlapping occurrences of substring sub in string S[start:end].

expandtabs([tabsize])

Return a copy where all tab characters are expanded using spaces.

find(sub[, start[, end]])

Return the lowest index in S where substring sub is found, such that sub is contained within S[start:end].

partition(sep, /)

Partition the string into three parts using the given separator.

index(sub[, start[, end]])

Return the lowest index in S where substring sub is found, such that sub is contained within S[start:end].

ljust(width[, fillchar])

Return a left-justified string of length width.

lower()

Return a copy of the string converted to lowercase.

lstrip([chars])

Return a copy of the string with leading whitespace removed.

rfind(sub[, start[, end]])

Return the highest index in S where substring sub is found, such that sub is contained within S[start:end].

rindex(sub[, start[, end]])

Return the highest index in S where substring sub is found, such that sub is contained within S[start:end].

rjust(width[, fillchar])

Return a right-justified string of length width.

rstrip([chars])

Return a copy of the string with trailing whitespace removed.

rpartition(sep, /)

Partition the string into three parts using the given separator.

splitlines([keepends])

Return a list of the lines in the string, breaking at line boundaries.

strip([chars])

Return a copy of the string with leading and trailing whitespace removed.

swapcase()

Convert uppercase characters to lowercase and lowercase characters to uppercase.

translate(table, /)

Replace each character in the string using the given translation table.

upper()

Return a copy of the string converted to uppercase.

startswith(prefix[, start[, end]])

Return True if S starts with the specified prefix, False otherwise.

endswith(suffix[, start[, end]])

Return True if S ends with the specified suffix, False otherwise.

removeprefix(prefix, /)

Return a str with the given prefix string removed if present.

removesuffix(suffix, /)

Return a str with the given suffix string removed if present.

isascii()

Return True if all characters in the string are ASCII, False otherwise.

islower()

Return True if the string is a lowercase string, False otherwise.

isupper()

Return True if the string is an uppercase string, False otherwise.

istitle()

Return True if the string is a title-cased string, False otherwise.

isspace()

Return True if the string is a whitespace string, False otherwise.

isdecimal()

Return True if the string is a decimal string, False otherwise.

isdigit()

Return True if the string is a digit string, False otherwise.

isnumeric()

Return True if the string is a numeric string, False otherwise.

isalpha()

Return True if the string is an alphabetic string, False otherwise.

isalnum()

Return True if the string is an alpha-numeric string, False otherwise.

isidentifier()

Return True if the string is a valid Python identifier, False otherwise.

isprintable()

Return True if the string is printable, False otherwise.

zfill(width, /)

Pad a numeric string with zeros on the left, to fill a field of the given width.

format(*args, **kwargs)

Return a formatted version of S, using substitutions from args and kwargs.

format_map(mapping)

Return a formatted version of S, using substitutions from mapping.

maketrans

Return a translation table usable for str.translate().

all_names()

All module names.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

FORWARD_PASS

DATA_EXTRACT

DATA_COLLECT

QA

SOLAR

STATS

BIAS_CALC

VISUAL_QA

REGRID

+
+
+
+classmethod all_names()[source]#
+

All module names.

+
+
Returns:
+

set – The set of all module name strings.

+
+
+
+ +
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+encode(encoding='utf-8', errors='strict')#
+

Encode the string using the codec registered for encoding.

+
+
encoding

The encoding in which to encode the string.

+
+
errors

The error handling scheme to use for encoding errors. +The default is ‘strict’ meaning that encoding errors raise a +UnicodeEncodeError. Other possible values are ‘ignore’, ‘replace’ and +‘xmlcharrefreplace’ as well as any other name registered with +codecs.register_error that can handle UnicodeEncodeErrors.

+
+
+
+ +
+
+replace(old, new, count=-1, /)#
+

Return a copy with all occurrences of substring old replaced by new.

+
+
+
count

Maximum number of occurrences to replace. +-1 (the default value) means replace all occurrences.

+
+
+
+

If the optional argument count is given, only the first count occurrences are +replaced.

+
+ +
+
+split(sep=None, maxsplit=-1)#
+

Return a list of the substrings in the string, using sep as the separator string.

+
+
+
sep

The separator used to split the string.

+

When set to None (the default value), will split on any whitespace +character (including \n \r \t \f and spaces) and will discard +empty strings from the result.

+
+
maxsplit

Maximum number of splits (starting from the left). +-1 (the default value) means no limit.

+
+
+
+

Note, str.split() is mainly useful for data that has been intentionally +delimited. With natural text that includes punctuation, consider using +the regular expression module.

+
+ +
+
+rsplit(sep=None, maxsplit=-1)#
+

Return a list of the substrings in the string, using sep as the separator string.

+
+
+
sep

The separator used to split the string.

+

When set to None (the default value), will split on any whitespace +character (including \n \r \t \f and spaces) and will discard +empty strings from the result.

+
+
maxsplit

Maximum number of splits (starting from the left). +-1 (the default value) means no limit.

+
+
+
+

Splitting starts at the end of the string and works to the front.

+
+ +
+
+join(iterable, /)#
+

Concatenate any number of strings.

+

The string whose method is called is inserted in between each given string. +The result is returned as a new string.

+

Example: ‘.’.join([‘ab’, ‘pq’, ‘rs’]) -> ‘ab.pq.rs’

+
+ +
+
+capitalize()#
+

Return a capitalized version of the string.

+

More specifically, make the first character have upper case and the rest lower +case.

+
+ +
+
+casefold()#
+

Return a version of the string suitable for caseless comparisons.

+
+ +
+
+title()#
+

Return a version of the string where each word is titlecased.

+

More specifically, words start with uppercased characters and all remaining +cased characters have lower case.

+
+ +
+
+center(width, fillchar=' ', /)#
+

Return a centered string of length width.

+

Padding is done using the specified fill character (default is a space).

+
+ +
+
+count(sub[, start[, end]]) int#
+

Return the number of non-overlapping occurrences of substring sub in +string S[start:end]. Optional arguments start and end are +interpreted as in slice notation.

+
+ +
+
+expandtabs(tabsize=8)#
+

Return a copy where all tab characters are expanded using spaces.

+

If tabsize is not given, a tab size of 8 characters is assumed.

+
+ +
+
+find(sub[, start[, end]]) int#
+

Return the lowest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.

+

Return -1 on failure.

+
+ +
+
+partition(sep, /)#
+

Partition the string into three parts using the given separator.

+

This will search for the separator in the string. If the separator is found, +returns a 3-tuple containing the part before the separator, the separator +itself, and the part after it.

+

If the separator is not found, returns a 3-tuple containing the original string +and two empty strings.

+
+ +
+
+index(sub[, start[, end]]) int#
+

Return the lowest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.

+

Raises ValueError when the substring is not found.

+
+ +
+
+ljust(width, fillchar=' ', /)#
+

Return a left-justified string of length width.

+

Padding is done using the specified fill character (default is a space).

+
+ +
+
+lower()#
+

Return a copy of the string converted to lowercase.

+
+ +
+
+lstrip(chars=None, /)#
+

Return a copy of the string with leading whitespace removed.

+

If chars is given and not None, remove characters in chars instead.

+
+ +
+
+rfind(sub[, start[, end]]) int#
+

Return the highest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.

+

Return -1 on failure.

+
+ +
+
+rindex(sub[, start[, end]]) int#
+

Return the highest index in S where substring sub is found, +such that sub is contained within S[start:end]. Optional +arguments start and end are interpreted as in slice notation.

+

Raises ValueError when the substring is not found.

+
+ +
+
+rjust(width, fillchar=' ', /)#
+

Return a right-justified string of length width.

+

Padding is done using the specified fill character (default is a space).

+
+ +
+
+rstrip(chars=None, /)#
+

Return a copy of the string with trailing whitespace removed.

+

If chars is given and not None, remove characters in chars instead.

+
+ +
+
+rpartition(sep, /)#
+

Partition the string into three parts using the given separator.

+

This will search for the separator in the string, starting at the end. If +the separator is found, returns a 3-tuple containing the part before the +separator, the separator itself, and the part after it.

+

If the separator is not found, returns a 3-tuple containing two empty strings +and the original string.

+
+ +
+
+splitlines(keepends=False)#
+

Return a list of the lines in the string, breaking at line boundaries.

+

Line breaks are not included in the resulting list unless keepends is given and +true.

+
+ +
+
+strip(chars=None, /)#
+

Return a copy of the string with leading and trailing whitespace removed.

+

If chars is given and not None, remove characters in chars instead.

+
+ +
+
+swapcase()#
+

Convert uppercase characters to lowercase and lowercase characters to uppercase.

+
+ +
+
+translate(table, /)#
+

Replace each character in the string using the given translation table.

+
+
+
table

Translation table, which must be a mapping of Unicode ordinals to +Unicode ordinals, strings, or None.

+
+
+
+

The table must implement lookup/indexing via __getitem__, for instance a +dictionary or list. If this operation raises LookupError, the character is +left untouched. Characters mapped to None are deleted.

+
+ +
+
+upper()#
+

Return a copy of the string converted to uppercase.

+
+ +
+
+startswith(prefix[, start[, end]]) bool#
+

Return True if S starts with the specified prefix, False otherwise. +With optional start, test S beginning at that position. +With optional end, stop comparing S at that position. +prefix can also be a tuple of strings to try.

+
+ +
+
+endswith(suffix[, start[, end]]) bool#
+

Return True if S ends with the specified suffix, False otherwise. +With optional start, test S beginning at that position. +With optional end, stop comparing S at that position. +suffix can also be a tuple of strings to try.

+
+ +
+
+removeprefix(prefix, /)#
+

Return a str with the given prefix string removed if present.

+

If the string starts with the prefix string, return string[len(prefix):]. +Otherwise, return a copy of the original string.

+
+ +
+
+removesuffix(suffix, /)#
+

Return a str with the given suffix string removed if present.

+

If the string ends with the suffix string and that suffix is not empty, +return string[:-len(suffix)]. Otherwise, return a copy of the original +string.

+
+ +
+
+isascii()#
+

Return True if all characters in the string are ASCII, False otherwise.

+

ASCII characters have code points in the range U+0000-U+007F. +Empty string is ASCII too.

+
+ +
+
+islower()#
+

Return True if the string is a lowercase string, False otherwise.

+

A string is lowercase if all cased characters in the string are lowercase and +there is at least one cased character in the string.

+
+ +
+
+isupper()#
+

Return True if the string is an uppercase string, False otherwise.

+

A string is uppercase if all cased characters in the string are uppercase and +there is at least one cased character in the string.

+
+ +
+
+istitle()#
+

Return True if the string is a title-cased string, False otherwise.

+

In a title-cased string, upper- and title-case characters may only +follow uncased characters and lowercase characters only cased ones.

+
+ +
+
+isspace()#
+

Return True if the string is a whitespace string, False otherwise.

+

A string is whitespace if all characters in the string are whitespace and there +is at least one character in the string.

+
+ +
+
+isdecimal()#
+

Return True if the string is a decimal string, False otherwise.

+

A string is a decimal string if all characters in the string are decimal and +there is at least one character in the string.

+
+ +
+
+isdigit()#
+

Return True if the string is a digit string, False otherwise.

+

A string is a digit string if all characters in the string are digits and there +is at least one character in the string.

+
+ +
+
+isnumeric()#
+

Return True if the string is a numeric string, False otherwise.

+

A string is numeric if all characters in the string are numeric and there is at +least one character in the string.

+
+ +
+
+isalpha()#
+

Return True if the string is an alphabetic string, False otherwise.

+

A string is alphabetic if all characters in the string are alphabetic and there +is at least one character in the string.

+
+ +
+
+isalnum()#
+

Return True if the string is an alpha-numeric string, False otherwise.

+

A string is alpha-numeric if all characters in the string are alpha-numeric and +there is at least one character in the string.

+
+ +
+
+isidentifier()#
+

Return True if the string is a valid Python identifier, False otherwise.

+

Call keyword.iskeyword(s) to test whether string s is a reserved identifier, +such as “def” or “class”.

+
+ +
+
+isprintable()#
+

Return True if the string is printable, False otherwise.

+

A string is printable if all of its characters are considered printable in +repr() or if it is empty.

+
+ +
+
+zfill(width, /)#
+

Pad a numeric string with zeros on the left, to fill a field of the given width.

+

The string is never truncated.

+
+ +
+
+format(*args, **kwargs) str#
+

Return a formatted version of S, using substitutions from args and kwargs. +The substitutions are identified by braces (‘{’ and ‘}’).

+
+ +
+
+format_map(mapping) str#
+

Return a formatted version of S, using substitutions from mapping. +The substitutions are identified by braces (‘{’ and ‘}’).

+
+ +
+
+static maketrans()#
+

Return a translation table usable for str.translate().

+

If there is only one argument, it must be a dictionary mapping Unicode +ordinals (integers) or characters to Unicode ordinals, strings or None. +Character keys will be then converted to ordinals. +If there are two arguments, they must be strings of equal length, and +in the resulting dictionary, each character in x will be mapped to the +character at the same position in y. If there is a third argument, it +must be a string, whose characters will be mapped to None in the result.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.cli.BaseCLI.html b/_autosummary/sup3r.utilities.cli.BaseCLI.html new file mode 100644 index 000000000..634998999 --- /dev/null +++ b/_autosummary/sup3r.utilities.cli.BaseCLI.html @@ -0,0 +1,922 @@ + + + + + + + + + + + sup3r.utilities.cli.BaseCLI — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.utilities.cli.BaseCLI#

+
+
+class BaseCLI[source]#
+

Bases: object

+

Base CLI class used to create CLI for modules in ModuleName

+

Methods

+
+ + + + + + + + + + + + + + + + + + + + +

add_status_cmd(config, pipeline_step, cmd)

Append status file command to command for executing given module

check_module_name(module_name)

Make sure module_name is a valid member of the ModuleName class

from_config(module_name, module_class, ctx, ...)

Run sup3r module from a config file.

from_config_preflight(module_name, ctx, ...)

Parse conifg file prior to running sup3r module.

kickoff_local_job(module_name, ctx, cmd[, ...])

Run sup3r module locally.

kickoff_slurm_job(module_name, ctx, cmd[, ...])

Run sup3r module on HPC via SLURM job submission.

+
+
+
+classmethod from_config(module_name, module_class, ctx, config_file, verbose, pipeline_step=None)[source]#
+

Run sup3r module from a config file.

+
+
Parameters:
+
    +
  • module_name (str) – Module name string from sup3r.utilities.ModuleName.

  • +
  • module_class (Object) – Class object used to call get_node_cmd(config). +e.g. Sup3rQa.get_node_cmd(config)

  • +
  • ctx (click.pass_context) – Click context object where ctx.obj is a dictionary

  • +
  • config_file (str) – Path to config file provided all needed inputs to module_class

  • +
  • verbose (bool) – Whether to run in verbose mode.

  • +
  • pipeline_step (str, optional) – Name of the pipeline step being run. If None, the +pipeline_step will be set to the module_name, +mimicking old reV behavior. By default, None.

  • +
+
+
+
+ +
+
+classmethod from_config_preflight(module_name, ctx, config_file, verbose)[source]#
+

Parse conifg file prior to running sup3r module.

+
+
Parameters:
+
    +
  • module_name (str) – Module name string from sup3r.utilities.ModuleName.

  • +
  • module_class (Object) – Class object used to call get_node_cmd(config). +e.g. Sup3rQa.get_node_cmd(config)

  • +
  • ctx (click.pass_context) – Click context object where ctx.obj is a dictionary

  • +
  • config_file (str) – Path to config file provided all needed inputs to module_class

  • +
  • verbose (bool) – Whether to run in verbose mode.

  • +
+
+
Returns:
+

config (dict) – Dictionary corresponding to config_file

+
+
+
+ +
+
+classmethod check_module_name(module_name)[source]#
+

Make sure module_name is a valid member of the ModuleName class

+
+ +
+
+classmethod kickoff_slurm_job(module_name, ctx, cmd, alloc='sup3r', memory=None, walltime=4, feature=None, stdout_path='./stdout/', pipeline_step=None)[source]#
+

Run sup3r module on HPC via SLURM job submission.

+
+
Parameters:
+
    +
  • module_name (str) – Module name string from sup3r.utilities.ModuleName.

  • +
  • ctx (click.pass_context) – Click context object where ctx.obj is a dictionary

  • +
  • cmd (str) –

    +
    +
    Command to be submitted in SLURM shell script. Example:

    ‘python -m sup3r.cli <module_name> -c <config_file>’

    +
    +
    +
  • +
  • alloc (str) – HPC project (allocation) handle. Example: ‘sup3r’.

  • +
  • memory (int) – Node memory request in GB.

  • +
  • walltime (float) – Node walltime request in hours.

  • +
  • feature (str) – Additional flags for SLURM job. Format is “–qos=high” +or “–depend=[state:job_id]”. Default is None.

  • +
  • stdout_path (str) – Path to print .stdout and .stderr files.

  • +
  • pipeline_step (str, optional) – Name of the pipeline step being run. If None, the +pipeline_step will be set to the module_name, +mimicking old reV behavior. By default, None.

  • +
+
+
+
+ +
+
+classmethod kickoff_local_job(module_name, ctx, cmd, pipeline_step=None)[source]#
+

Run sup3r module locally.

+
+
Parameters:
+
    +
  • module_name (str) – Module name string from sup3r.utilities.ModuleName.

  • +
  • ctx (click.pass_context) – Click context object where ctx.obj is a dictionary

  • +
  • cmd (str) –

    +
    +
    Command to be submitted in shell script. Example:

    ‘python -m sup3r.cli <module_name> -c <config_file>’

    +
    +
    +
  • +
  • pipeline_step (str, optional) – Name of the pipeline step being run. If None, the +pipeline_step will be set to the module_name, +mimicking old reV behavior. By default, None.

  • +
+
+
+
+ +
+
+classmethod add_status_cmd(config, pipeline_step, cmd)[source]#
+

Append status file command to command for executing given module

+
+
Parameters:
+
    +
  • config (dict) – sup3r config with all necessary args and kwargs to run given +module.

  • +
  • pipeline_step (str) – Name of the pipeline step being run.

  • +
  • cmd (str) – String including command to execute given module.

  • +
+
+
Returns:
+

cmd (str) – Command string with status file command included if job_name is +not None

+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.cli.SlurmManager.html b/_autosummary/sup3r.utilities.cli.SlurmManager.html new file mode 100644 index 000000000..a8168d24d --- /dev/null +++ b/_autosummary/sup3r.utilities.cli.SlurmManager.html @@ -0,0 +1,1199 @@ + + + + + + + + + + + sup3r.utilities.cli.SlurmManager — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.utilities.cli.SlurmManager#

+
+
+class SlurmManager(user=None, queue_dict=None)[source]#
+

Bases: SLURM

+

GAPs-compliant SLURM manager

+
+
Parameters:
+
    +
  • user (str | None) – HPC username. None will get your username using getpass.getuser()

  • +
  • queue_dict (dict | None) – Parsed HPC queue dictionary (qstat for PBS or squeue for SLURM) +from parse_queue_str(). None will get the queue from PBS or SLURM.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

change_qos(arg, qos)

Change the priority (quality of service) for a job.

check_status([job_id, job_name])

Check the status of an HPC job using the HPC queue.

check_status_using_job_id(job_id)

Check the status of a job using the HPC queue and job ID.

format_walltime(hours)

Get the SLURM walltime string in format "HH:MM:SS"

hold(arg)

Temporarily hold a job from submitting.

make_path(d)

Make a directory tree if it doesn't exist.

make_sh(fname, script)

Make a shell script (.sh file) to execute a subprocess.

parse_queue_str(queue_str[, keys])

Parse the qstat or squeue output string into a dict format keyed by integer job id with nested dictionary of job properties (queue printout columns).

query_queue([job_name, user, qformat, skip_rows])

Run the HPC queue command and return the raw stdout string.

release(arg)

Release a job that was previously on hold so it will be submitted to a compute node.

rm(fname)

Remove a file.

s(s)

Format input as str w/ appropriate quote types for python cli entry.

sbatch(cmd[, alloc, walltime, memory, ...])

Submit a SLURM job via sbatch command and SLURM shell script

scancel(arg)

Cancel a slurm job.

scontrol(cmd)

Submit an scontrol command.

submit(cmd[, background, background_stdout])

Open a subprocess and submit a command.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

MAX_NAME_LEN

QCOL_ID

QCOL_NAME

QCOL_STATUS

QSKIP

SQ_FORMAT

USER

queue

Get the HPC queue parsed into dict format keyed by integer job id

queue_job_ids

Get a list of the job integer ids in the queue

queue_job_names

Get a list of the job names in the queue

+
+
+
+check_status_using_job_id(job_id)[source]#
+

Check the status of a job using the HPC queue and job ID.

+
+
Parameters:
+

job_id (int) – Job integer ID number.

+
+
Returns:
+

status (str | None) – Queue job status string or None if not found.

+
+
+
+ +
+
+change_qos(arg, qos)#
+

Change the priority (quality of service) for a job.

+
+
Parameters:
+
    +
  • arg (int | list | str) – SLURM integer job id(s) to change qos for. +Can be ‘all’ for all jobs.

  • +
  • qos (str) – New qos value

  • +
+
+
+
+ +
+
+check_status(job_id=None, job_name=None)#
+

Check the status of an HPC job using the HPC queue.

+
+
Parameters:
+
    +
  • job_id (int | None) – Job integer ID number (preferred input)

  • +
  • job_name (str) – Job name string.

  • +
+
+
Returns:
+

status (str | NoneType) – Queue job status str or None if not found. +SLURM status strings: PD, R, CG (pending, running, complete). +PBS status strings: Q, R, C (queued, running, complete).

+
+
+
+ +
+
+static format_walltime(hours)#
+

Get the SLURM walltime string in format “HH:MM:SS”

+
+
Parameters:
+

hours (float | int) – Requested number of job hours.

+
+
Returns:
+

walltime (str) – SLURM walltime request in format “HH:MM:SS”

+
+
+
+ +
+
+hold(arg)#
+

Temporarily hold a job from submitting. Held jobs will stay in queue +but will not get nodes until released.

+
+
Parameters:
+

arg (int | list | str) – SLURM integer job id(s) to hold. Can be ‘all’ to hold all jobs.

+
+
+
+ +
+
+static make_path(d)#
+

Make a directory tree if it doesn’t exist.

+
+
Parameters:
+

d (str) – Directory tree to check and potentially create.

+
+
+
+ +
+
+static make_sh(fname, script)#
+

Make a shell script (.sh file) to execute a subprocess.

+
+
Parameters:
+
    +
  • fname (str) – Name of the .sh file to create.

  • +
  • script (str) – Contents to be written into the .sh file.

  • +
+
+
+
+ +
+
+classmethod parse_queue_str(queue_str, keys=0)#
+

Parse the qstat or squeue output string into a dict format keyed by +integer job id with nested dictionary of job properties (queue +printout columns).

+
+
Parameters:
+
    +
  • queue_str (str) – HPC queue output string (qstat for PBS or squeue for SLURM). +Typically a space-delimited string with line breaks.

  • +
  • keys (list | int) – Argument to set the queue job attributes (column headers). +This defaults to an integer which says which row index contains +the space-delimited column headers. Can also be a list to +explicitly set the column headers.

  • +
+
+
Returns:
+

queue_dict (dict) – HPC queue parsed into dictionary format keyed by integer job id +with nested dictionary of job properties (queue printout columns).

+
+
+
+ +
+
+classmethod query_queue(job_name=None, user=None, qformat=None, skip_rows=None)#
+

Run the HPC queue command and return the raw stdout string.

+
+
Parameters:
+
    +
  • job_name (str | None) – Optional to check the squeue for a specific job name (not limited +to the 8 shown characters) or None to show user’s whole queue.

  • +
  • user (str | None) – HPC username. None will get your username using getpass.getuser()

  • +
  • qformat (str | None) – Queue format string specification. Changing this form the +default (None) could have adverse effects!

  • +
  • skip_rows (int | list | None) – Optional row index values to skip.

  • +
+
+
Returns:
+

stdout (str) – HPC queue output string. Can be split on line breaks to get list.

+
+
+
+ +
+
+property queue#
+

Get the HPC queue parsed into dict format keyed by integer job id

+
+
Returns:
+

queue (dict) – HPC queue parsed into dictionary format keyed by integer job id +with nested dictionary of job properties (queue printout columns).

+
+
+
+ +
+
+property queue_job_ids#
+

Get a list of the job integer ids in the queue

+
+ +
+
+property queue_job_names#
+

Get a list of the job names in the queue

+
+ +
+
+release(arg)#
+

Release a job that was previously on hold so it will be submitted +to a compute node.

+
+
Parameters:
+

arg (int | list | str) – SLURM integer job id(s) to release. +Can be ‘all’ to release all jobs.

+
+
+
+ +
+
+static rm(fname)#
+

Remove a file.

+
+
Parameters:
+

fname (str) – Filename (with path) to remove.

+
+
+
+ +
+
+static s(s)#
+

Format input as str w/ appropriate quote types for python cli entry.

+

Examples

+

list, tuple -> “[‘one’, ‘two’]” +dict -> “{‘key’: ‘val’}” +int, float, None -> ‘0’ +str, other -> ‘string’

+
+ +
+
+sbatch(cmd, alloc=None, walltime=None, memory=None, nodes=1, feature=None, name='reV', stdout_path='./stdout', keep_sh=False, conda_env=None, module=None, module_root='/shared-projects/rev/modulefiles')#
+

Submit a SLURM job via sbatch command and SLURM shell script

+
+
Parameters:
+
    +
  • cmd (str) –

    +
    +
    Command to be submitted in SLURM shell script. Example:

    ‘python -m reV.generation.cli_gen’

    +
    +
    +
  • +
  • alloc (str) – HPC project (allocation) handle. Example: ‘rev’. Default is not to +state an allocation (does not work on Eagle slurm).

  • +
  • walltime (float) – Node walltime request in hours. Default is not to state a walltime +(does not work on Eagle slurm).

  • +
  • memory (int) – Node memory request in GB.

  • +
  • nodes (int) – Number of nodes to use for this sbatch job. Default is 1.

  • +
  • feature (str) – Additional flags for SLURM job. Format is “–qos=high” +or “–depend=[state:job_id]”. Default is None.

  • +
  • name (str) – SLURM job name.

  • +
  • stdout_path (str) – Path to print .stdout and .stderr files.

  • +
  • keep_sh (bool) – Boolean to keep the .sh files. Default is to remove these files +after job submission.

  • +
  • conda_env (str) – Conda environment to activate

  • +
  • module (bool) – Module to load

  • +
  • module_root (str) – Path to module root to load

  • +
+
+
Returns:
+

    +
  • out (str) – sbatch standard output, if submitted successfully, this is the +slurm job id.

  • +
  • err (str) – sbatch standard error, this is typically an empty string if the job +was submitted successfully.

  • +
+

+
+
+
+ +
+
+scancel(arg)#
+

Cancel a slurm job.

+
+
Parameters:
+

arg (int | list | str) – SLURM integer job id(s) to cancel. Can be a list of integer +job ids, ‘all’ to cancel all jobs, or a feature (-p short) to +cancel all jobs with a given feature

+
+
+
+ +
+
+static scontrol(cmd)#
+

Submit an scontrol command.

+
+
Parameters:
+

cmd (str) – Command string after “scontrol” word

+
+
+
+ +
+
+static submit(cmd, background=False, background_stdout=False)#
+

Open a subprocess and submit a command.

+
+
Parameters:
+
    +
  • cmd (str) – Command to be submitted using python subprocess.

  • +
  • background (bool) – Flag to submit subprocess in the background. stdout stderr will +be empty strings if this is True.

  • +
  • background_stdout (bool) – Flag to capture the stdout/stderr from the background process +in a nohup.out file.

  • +
+
+
Returns:
+

    +
  • stdout (str) – Subprocess standard output. This is decoded from the subprocess +stdout with rstrip.

  • +
  • stderr (str) – Subprocess standard error. This is decoded from the subprocess +stderr with rstrip. After decoding/rstrip, this will be empty if +the subprocess doesn’t return an error.

  • +
+

+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.cli.html b/_autosummary/sup3r.utilities.cli.html new file mode 100644 index 000000000..19a03eb4e --- /dev/null +++ b/_autosummary/sup3r.utilities.cli.html @@ -0,0 +1,742 @@ + + + + + + + + + + + sup3r.utilities.cli — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.cli

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.utilities.cli#

+

Sup3r base CLI class.

+

Classes

+
+ + + + + + + + +

BaseCLI()

Base CLI class used to create CLI for modules in ModuleName

SlurmManager([user, queue_dict])

GAPs-compliant SLURM manager

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.era_downloader.EraDownloader.html b/_autosummary/sup3r.utilities.era_downloader.EraDownloader.html new file mode 100644 index 000000000..cb91d9edc --- /dev/null +++ b/_autosummary/sup3r.utilities.era_downloader.EraDownloader.html @@ -0,0 +1,1226 @@ + + + + + + + + + + + sup3r.utilities.era_downloader.EraDownloader — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.utilities.era_downloader.EraDownloader#

+
+
+class EraDownloader(year, month, area, levels, file_pattern, overwrite=False, variables=None, product_type='reanalysis')[source]#
+

Bases: object

+

Class to handle ERA5 downloading, variable renaming, and file +combinations.

+

Initialize the class.

+
+
Parameters:
+
    +
  • year (int) – Year of data to download.

  • +
  • month (int) – Month of data to download.

  • +
  • area (list) – Domain area of the data to download. +[max_lat, min_lon, min_lat, max_lon]

  • +
  • levels (list) – List of pressure levels to download.

  • +
  • file_pattern (str) – Pattern for combined monthly output file. Must include year and +month format keys. e.g. ‘era5_{year}_{month}_combined.nc’

  • +
  • overwrite (bool) – Whether to overwrite existing files.

  • +
  • variables (list | None) – Variables to download. If None this defaults to just gepotential +and wind components.

  • +
  • product_type (str) – Can be ‘reanalysis’, ‘ensemble_mean’, ‘ensemble_spread’, +‘ensemble_members’, ‘monthly_averaged_reanalysis’, +‘monthly_averaged_ensemble_members’

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

add_pressure(ds)

Add pressure to dataset

all_vars_exist(year, file_pattern, variables)

Check if all yearly variable files for the requested year exist.

convert_z(ds, name)

Convert z to given height variable

download_file(variables, time_dict, area, ...)

Download either single-level or pressure-level file

download_process_combine()

Run the download routine.

get_cds_client()

Get the copernicus climate data store (CDS) API object for ERA downloads.

get_hours()

ERA5 is hourly and EDA is 3-hourly.

get_monthly_file()

Download level and surface files, process variables, and combine processed files.

get_tmp_file(file)

Get temp file for given file.

make_yearly_file(year, file_pattern, variables)

Combine yearly variable files into a single file.

make_yearly_var_file(year, ...[, chunks, ...])

Combine monthly variable files into a single yearly variable file.

prep_var_lists(variables)

Create surface and level variable lists based on requested variables.

process_and_combine()

Process variables and combine.

process_level_file()

Convert geopotential to geopotential height.

process_surface_file()

Rename variables and convert geopotential to geopotential height.

run(year, area, levels, monthly_file_pattern)

Run routine for all requested months in the requested year.

run_for_var(year, area, levels, ...[, ...])

Run routine for all requested months in the requested year for the given variable.

run_month(year, month, area, levels, ...[, ...])

Run routine for the given month and year.

run_qa(file[, res_kwargs, log_file])

Check for NaN values and log min / max / mean / stds for all variables.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + +

days

Get list of days for the requested month

level_file

Get name of file with variables from pressure level download

monthly_file

Name of file with all surface and level variables for a given month and year.

surface_file

Get name of file with variables from single level download

variables

Get list of requested variables

+
+
+
+get_hours()[source]#
+

ERA5 is hourly and EDA is 3-hourly. Check and warn for incompatible +requests.

+
+ +
+
+property variables#
+

Get list of requested variables

+
+ +
+
+property days#
+

Get list of days for the requested month

+
+ +
+
+property monthly_file#
+

Name of file with all surface and level variables for a given month +and year.

+
+ +
+
+property surface_file#
+

Get name of file with variables from single level download

+
+ +
+
+property level_file#
+

Get name of file with variables from pressure level download

+
+ +
+
+classmethod get_tmp_file(file)[source]#
+

Get temp file for given file. Then only needed variables will be +written to the given file.

+
+ +
+
+prep_var_lists(variables)[source]#
+

Create surface and level variable lists based on requested +variables.

+
+ +
+
+static get_cds_client()[source]#
+

Get the copernicus climate data store (CDS) API object for ERA +downloads.

+
+ +
+
+download_process_combine()[source]#
+

Run the download routine.

+
+ +
+
+classmethod download_file(variables, time_dict, area, out_file, level_type, levels=None, product_type='reanalysis', overwrite=False)[source]#
+

Download either single-level or pressure-level file

+
+
Parameters:
+
    +
  • variables (list) – List of variables to download

  • +
  • time_dict (dict) – Dictionary with year, month, day, time entries.

  • +
  • area (list) – List of bounding box coordinates. +e.g. [max_lat, min_lon, min_lat, max_lon]

  • +
  • out_file (str) – Name of output file

  • +
  • level_type (str) – Either ‘single’ or ‘pressure’

  • +
  • levels (list) – List of pressure levels to download, if level_type == ‘pressure’

  • +
  • product_type (str) – Can be ‘reanalysis’, ‘ensemble_mean’, ‘ensemble_spread’, +‘ensemble_members’, ‘monthly_averaged_reanalysis’, +‘monthly_averaged_ensemble_members’

  • +
  • overwrite (bool) – Whether to overwrite existing file

  • +
+
+
+
+ +
+
+process_surface_file()[source]#
+

Rename variables and convert geopotential to geopotential height.

+
+ +
+
+add_pressure(ds)[source]#
+

Add pressure to dataset

+
+
Parameters:
+

ds (Dataset) – xr.Dataset() object for which to add pressure

+
+
Returns:
+

ds (Dataset)

+
+
+
+ +
+
+convert_z(ds, name)[source]#
+

Convert z to given height variable

+
+
Parameters:
+
    +
  • ds (Dataset) – xr.Dataset() object for new file

  • +
  • name (str) – Variable name. e.g. zg or orog, typically

  • +
+
+
Returns:
+

ds (Dataset) – xr.Dataset() object for new file with new height variable written.

+
+
+
+ +
+
+process_level_file()[source]#
+

Convert geopotential to geopotential height.

+
+ +
+
+process_and_combine()[source]#
+

Process variables and combine.

+
+ +
+
+get_monthly_file()[source]#
+

Download level and surface files, process variables, and combine +processed files. Includes checks for shape and variables.

+
+ +
+
+classmethod all_vars_exist(year, file_pattern, variables)[source]#
+

Check if all yearly variable files for the requested year exist.

+
+
Parameters:
+
    +
  • year (int) – Year used for data download.

  • +
  • file_pattern (str) – Pattern for variable file. Must include year and +var format keys. e.g. ‘era5_{year}_{var}_combined.nc’

  • +
  • variables (list) – Variables that should have been downloaded

  • +
+
+
Returns:
+

bool – True if all monthly variable files for the requested year and month +exist.

+
+
+
+ +
+
+classmethod run_month(year, month, area, levels, file_pattern, overwrite=False, variables=None, product_type='reanalysis')[source]#
+

Run routine for the given month and year.

+
+
Parameters:
+
    +
  • year (int) – Year of data to download.

  • +
  • month (int) – Month of data to download.

  • +
  • area (list) – Domain area of the data to download. +[max_lat, min_lon, min_lat, max_lon]

  • +
  • levels (list) – List of pressure levels to download.

  • +
  • file_pattern (str) – Pattern for combined monthly output file. Must include year and +month format keys. e.g. ‘era5_{year}_{month}_combined.nc’

  • +
  • overwrite (bool) – Whether to overwrite existing files.

  • +
  • variables (list | None) – Variables to download. If None this defaults to just gepotential +and wind components.

  • +
  • product_type (str) – Can be ‘reanalysis’, ‘ensemble_mean’, ‘ensemble_spread’, +‘ensemble_members’, ‘monthly_averaged_reanalysis’, +‘monthly_averaged_ensemble_members’

  • +
+
+
+
+ +
+
+classmethod run_for_var(year, area, levels, monthly_file_pattern, yearly_file_pattern=None, months=None, overwrite=False, max_workers=None, variable=None, product_type='reanalysis', chunks='auto', res_kwargs=None)[source]#
+

Run routine for all requested months in the requested year for the +given variable.

+
+
Parameters:
+
    +
  • year (int) – Year of data to download.

  • +
  • area (list) – Domain area of the data to download. +[max_lat, min_lon, min_lat, max_lon]

  • +
  • levels (list) – List of pressure levels to download.

  • +
  • monthly_file_pattern (str) – Pattern for monthly output files. Must include year, month, and var +format keys. e.g. ‘era5_{year}_{month}_{var}.nc’

  • +
  • yearly_file_pattern (str) – Pattern for yearly output files. Must include year and var format +keys. e.g. ‘era5_{year}_{var}.nc’

  • +
  • months (list | None) – List of months to download data for. If None then all months for +the given year will be downloaded.

  • +
  • overwrite (bool) – Whether to overwrite existing files.

  • +
  • max_workers (int) – Max number of workers to use for downloading and processing monthly +files.

  • +
  • variable (str) – Variable to download.

  • +
  • product_type (str) – Can be ‘reanalysis’, ‘ensemble_mean’, ‘ensemble_spread’, +‘ensemble_members’, ‘monthly_averaged_reanalysis’, +‘monthly_averaged_ensemble_members’

  • +
  • chunks (str | dict) – Dictionary of chunksizes used when writing data to netcdf files. +Can also be ‘auto’.

  • +
+
+
+
+ +
+
+classmethod run(year, area, levels, monthly_file_pattern, yearly_file_pattern=None, months=None, overwrite=False, max_workers=None, variables=None, product_type='reanalysis', chunks='auto', combine_all_files=False, res_kwargs=None)[source]#
+

Run routine for all requested months in the requested year.

+
+
Parameters:
+
    +
  • year (int) – Year of data to download.

  • +
  • area (list) – Domain area of the data to download. +[max_lat, min_lon, min_lat, max_lon]

  • +
  • levels (list) – List of pressure levels to download.

  • +
  • monthly_file_pattern (str) – Pattern for monthly output file. Must include year, month, and var +format keys. e.g. ‘era5_{year}_{month}_{var}_combined.nc’

  • +
  • yearly_file_pattern (str) – Pattern for yearly output file. Must include year and var +format keys. e.g. ‘era5_{year}_{var}_combined.nc’

  • +
  • months (list | None) – List of months to download data for. If None then all months for +the given year will be downloaded.

  • +
  • overwrite (bool) – Whether to overwrite existing files.

  • +
  • max_workers (int) – Max number of workers to use for downloading and processing monthly +files.

  • +
  • variables (list | None) – Variables to download. If None this defaults to just gepotential +and wind components.

  • +
  • product_type (str) – Can be ‘reanalysis’, ‘ensemble_mean’, ‘ensemble_spread’, +‘ensemble_members’, ‘monthly_averaged_reanalysis’, +‘monthly_averaged_ensemble_members’

  • +
  • chunks (str | dict) – Dictionary of chunksizes used when writing data to netcdf files. +Can also be ‘auto’

  • +
  • combine_all_files (bool) – Whether to combine separate yearly variable files into a single +yearly file with all variables included

  • +
+
+
+
+ +
+
+classmethod make_yearly_var_file(year, monthly_file_pattern, yearly_file_pattern, variable, chunks='auto', res_kwargs=None)[source]#
+

Combine monthly variable files into a single yearly variable file.

+
+
Parameters:
+
    +
  • year (int) – Year used to download data

  • +
  • monthly_file_pattern (str) – File pattern for monthly variable files. Must have year, month, and +var format keys. e.g. ‘./era_{year}_{month}_{var}_combined.nc’

  • +
  • yearly_file_pattern (str) – File pattern for yearly variable files. Must have year and var +format keys. e.g. ‘./era_{year}_{var}_combined.nc’

  • +
  • variable (string) – Variable name for the files to be combined.

  • +
  • chunks (str | dict) – Dictionary of chunksizes used when writing data to netcdf files. +Can also be ‘auto’.

  • +
  • res_kwargs (None | dict) – Keyword arguments for base resource handler, like +xr.open_mfdataset. This is passed to a Loader object and +then used in the base loader contained by that obkect.

  • +
+
+
+
+ +
+
+classmethod make_yearly_file(year, file_pattern, variables, chunks='auto', res_kwargs=None)[source]#
+

Combine yearly variable files into a single file.

+
+
Parameters:
+
    +
  • year (int) – Year for the data to make into a yearly file.

  • +
  • file_pattern (str) – File pattern for output files. Must have year and var +format keys. e.g. ‘./era_{year}_{var}_combined.nc’

  • +
  • variables (list) – List of variables corresponding to the yearly variable files to +combine.

  • +
  • chunks (str | dict) – Dictionary of chunksizes used when writing data to netcdf files. +Can also be ‘auto’.

  • +
  • res_kwargs (None | dict) – Keyword arguments for base resource handler, like +xr.open_mfdataset. This is passed to a Loader object and +then used in the base loader contained by that obkect.

  • +
+
+
+
+ +
+
+classmethod run_qa(file, res_kwargs=None, log_file=None)[source]#
+

Check for NaN values and log min / max / mean / stds for all +variables.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.era_downloader.html b/_autosummary/sup3r.utilities.era_downloader.html new file mode 100644 index 000000000..743ee2adc --- /dev/null +++ b/_autosummary/sup3r.utilities.era_downloader.html @@ -0,0 +1,745 @@ + + + + + + + + + + + sup3r.utilities.era_downloader — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.era_downloader

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.utilities.era_downloader#

+

Download ERA5 file for the given year and month

+
+

Note

+

To use this you need to have cdsapi package installed and a ~/.cdsapirc +file with a url and api key. Follow the instructions here: +https://cds.climate.copernicus.eu/how-to-api

+
+

Classes

+
+ + + + + +

EraDownloader(year, month, area, levels, ...)

Class to handle ERA5 downloading, variable renaming, and file combinations.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.html b/_autosummary/sup3r.utilities.html new file mode 100644 index 000000000..d8b602169 --- /dev/null +++ b/_autosummary/sup3r.utilities.html @@ -0,0 +1,762 @@ + + + + + + + + + + + sup3r.utilities — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.utilities#

+

Sup3r utilities

+

Classes

+
+ + + + + +

ModuleName(value)

A collection of the module names available in sup3r.

+
+
+ + + + + + + + + + + + + + + + + + + + +

cli

Sup3r base CLI class.

era_downloader

Download ERA5 file for the given year and month

interpolation

Interpolator class with methods for pressure and height interpolation

loss_metrics

Content loss metrics for Sup3r

pytest

Pytest helper utilities

utilities

Miscellaneous utilities shared across multiple modules

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.interpolation.Interpolator.html b/_autosummary/sup3r.utilities.interpolation.Interpolator.html new file mode 100644 index 000000000..ddc80e700 --- /dev/null +++ b/_autosummary/sup3r.utilities.interpolation.Interpolator.html @@ -0,0 +1,843 @@ + + + + + + + + + + + sup3r.utilities.interpolation.Interpolator — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.interpolation.Interpolator

+ +
+ +
+
+ + + + +
+ +
+

sup3r.utilities.interpolation.Interpolator#

+
+
+class Interpolator[source]#
+

Bases: object

+

Class for handling pressure and height interpolation

+

Methods

+
+ + + + + + + + +

get_level_masks(lev_array, level)

Get the masks used to select closest surrounding levels in the lev_array to requested interpolation level.

interp_to_level(lev_array, var_array, level)

Interpolate var_array to the given level.

+
+
+
+classmethod get_level_masks(lev_array, level)[source]#
+

Get the masks used to select closest surrounding levels in the +lev_array to requested interpolation level.

+
+
Parameters:
+
    +
  • var_array (Union[np.ndarray, da.core.Array]) – Array of variable data, for example u-wind in a 4D array of shape +(lat, lon, time, level)

  • +
  • lev_array (Union[np.ndarray, da.core.Array]) – Height or pressure values for the corresponding entries in +var_array, in the same shape as var_array. If this is height and +the requested levels are hub heights above surface, lev_array +should be the geopotential height corresponding to every var_array +index relative to the surface elevation (subtract the elevation at +the surface from the geopotential height)

  • +
  • level (float) – level to interpolate to (e.g. final desired hub height +above surface elevation)

  • +
+
+
Returns:
+

    +
  • mask1 (Union[np.ndarray, da.core.Array]) – Array of bools selecting the entries with the closest levels to the +one requested. +(lat, lon, time, level)

  • +
  • mask2 (Union[np.ndarray, da.core.Array]) – Array of bools selecting the entries with the second closest levels +to the one requested. +(lat, lon, time, level)

  • +
+

+
+
+
+ +
+
+classmethod interp_to_level(lev_array: ndarray | Array, var_array: ndarray | Array, level, interp_kwargs=None)[source]#
+

Interpolate var_array to the given level.

+
+
Parameters:
+
    +
  • var_array (xr.DataArray) – Array of variable data, for example u-wind in a 4D array of shape +(lat, lon, time, level)

  • +
  • lev_array (xr.DataArray) – Height or pressure values for the corresponding entries in +var_array, in the same shape as var_array. If this is height and +the requested levels are hub heights above surface, lev_array +should be the geopotential height corresponding to every var_array +index relative to the surface elevation (subtract the elevation at +the surface from the geopotential height)

  • +
  • level (float) – level or levels to interpolate to (e.g. final desired hub height +above surface elevation)

  • +
  • interp_kwargs (dict | None) – Dictionary of kwargs for level interpolation. Can include “method” +and “run_level_check” keys

  • +
+
+
Returns:
+

out (Union[np.ndarray, da.core.Array]) – Interpolated var_array (lat, lon, time)

+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.interpolation.html b/_autosummary/sup3r.utilities.interpolation.html new file mode 100644 index 000000000..0334c53c7 --- /dev/null +++ b/_autosummary/sup3r.utilities.interpolation.html @@ -0,0 +1,739 @@ + + + + + + + + + + + sup3r.utilities.interpolation — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.interpolation

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.utilities.interpolation#

+

Interpolator class with methods for pressure and height interpolation

+

Classes

+
+ + + + + +

Interpolator()

Class for handling pressure and height interpolation

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.loss_metrics.CoarseMseLoss.html b/_autosummary/sup3r.utilities.loss_metrics.CoarseMseLoss.html new file mode 100644 index 000000000..4a361f8b2 --- /dev/null +++ b/_autosummary/sup3r.utilities.loss_metrics.CoarseMseLoss.html @@ -0,0 +1,869 @@ + + + + + + + + + + + sup3r.utilities.loss_metrics.CoarseMseLoss — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.loss_metrics.CoarseMseLoss

+ +
+ +
+
+ + + + +
+ +
+

sup3r.utilities.loss_metrics.CoarseMseLoss#

+
+
+class CoarseMseLoss(reduction='auto', name=None)[source]#
+

Bases: Loss

+

Loss class for coarse mse on spatial average of 5D tensor

+

Initializes Loss class.

+
+
Args:
+
reduction: Type of tf.keras.losses.Reduction to apply to

loss. Default value is AUTO. AUTO indicates that the +reduction option will be determined by the usage context. For +almost all cases this defaults to SUM_OVER_BATCH_SIZE. When +used under a tf.distribute.Strategy, except via +Model.compile() and Model.fit(), using AUTO or +SUM_OVER_BATCH_SIZE will raise an error. Please see this +custom training [tutorial]( +https://www.tensorflow.org/tutorials/distribute/custom_training) +for more details.

+
+
+

name: Optional name for the instance.

+
+
+

Methods

+
+ + + + + + + + + + + +

call(y_true, y_pred)

Invokes the Loss instance.

from_config(config)

Instantiates a Loss from its config (output of get_config()).

get_config()

Returns the config dictionary for a Loss instance.

+
+

Attributes

+
+ + + + + +

MSE_LOSS

+
+
+
+__call__(x1, x2)[source]#
+

Exponential difference loss function

+
+
Parameters:
+
    +
  • x1 (tf.tensor) – synthetic generator output +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
  • x2 (tf.tensor) – high resolution data +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
+
+
Returns:
+

tf.tensor – 0D tensor with loss value

+
+
+
+ +
+
+abstract call(y_true, y_pred)#
+

Invokes the Loss instance.

+
+
Args:
+
y_true: Ground truth values. shape = [batch_size, d0, .. dN],

except sparse loss functions such as sparse categorical +crossentropy where shape = [batch_size, d0, .. dN-1]

+
+
+

y_pred: The predicted values. shape = [batch_size, d0, .. dN]

+
+
Returns:

Loss values with the shape [batch_size, d0, .. dN-1].

+
+
+
+ +
+
+classmethod from_config(config)#
+

Instantiates a Loss from its config (output of get_config()).

+
+
Args:

config: Output of get_config().

+
+
Returns:

A Loss instance.

+
+
+
+ +
+
+get_config()#
+

Returns the config dictionary for a Loss instance.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.loss_metrics.ExpLoss.html b/_autosummary/sup3r.utilities.loss_metrics.ExpLoss.html new file mode 100644 index 000000000..80b016c4c --- /dev/null +++ b/_autosummary/sup3r.utilities.loss_metrics.ExpLoss.html @@ -0,0 +1,860 @@ + + + + + + + + + + + sup3r.utilities.loss_metrics.ExpLoss — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.loss_metrics.ExpLoss

+ +
+ +
+
+ + + + +
+ +
+

sup3r.utilities.loss_metrics.ExpLoss#

+
+
+class ExpLoss(reduction='auto', name=None)[source]#
+

Bases: Loss

+

Loss class for squared exponential difference

+

Initializes Loss class.

+
+
Args:
+
reduction: Type of tf.keras.losses.Reduction to apply to

loss. Default value is AUTO. AUTO indicates that the +reduction option will be determined by the usage context. For +almost all cases this defaults to SUM_OVER_BATCH_SIZE. When +used under a tf.distribute.Strategy, except via +Model.compile() and Model.fit(), using AUTO or +SUM_OVER_BATCH_SIZE will raise an error. Please see this +custom training [tutorial]( +https://www.tensorflow.org/tutorials/distribute/custom_training) +for more details.

+
+
+

name: Optional name for the instance.

+
+
+

Methods

+
+ + + + + + + + + + + +

call(y_true, y_pred)

Invokes the Loss instance.

from_config(config)

Instantiates a Loss from its config (output of get_config()).

get_config()

Returns the config dictionary for a Loss instance.

+
+
+
+__call__(x1, x2)[source]#
+

Exponential difference loss function

+
+
Parameters:
+
    +
  • x1 (tf.tensor) – synthetic generator output +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
  • x2 (tf.tensor) – high resolution data +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
+
+
Returns:
+

tf.tensor – 0D tensor with loss value

+
+
+
+ +
+
+abstract call(y_true, y_pred)#
+

Invokes the Loss instance.

+
+
Args:
+
y_true: Ground truth values. shape = [batch_size, d0, .. dN],

except sparse loss functions such as sparse categorical +crossentropy where shape = [batch_size, d0, .. dN-1]

+
+
+

y_pred: The predicted values. shape = [batch_size, d0, .. dN]

+
+
Returns:

Loss values with the shape [batch_size, d0, .. dN-1].

+
+
+
+ +
+
+classmethod from_config(config)#
+

Instantiates a Loss from its config (output of get_config()).

+
+
Args:

config: Output of get_config().

+
+
Returns:

A Loss instance.

+
+
+
+ +
+
+get_config()#
+

Returns the config dictionary for a Loss instance.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.loss_metrics.LowResLoss.html b/_autosummary/sup3r.utilities.loss_metrics.LowResLoss.html new file mode 100644 index 000000000..301e8e74d --- /dev/null +++ b/_autosummary/sup3r.utilities.loss_metrics.LowResLoss.html @@ -0,0 +1,876 @@ + + + + + + + + + + + sup3r.utilities.loss_metrics.LowResLoss — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.loss_metrics.LowResLoss

+ +
+ +
+
+ + + + +
+ +
+

sup3r.utilities.loss_metrics.LowResLoss#

+
+
+class LowResLoss(s_enhance=1, t_enhance=1, t_method='average', tf_loss='MeanSquaredError', ex_loss=None)[source]#
+

Bases: Loss

+

Content loss that is calculated by coarsening the synthetic and true +high-resolution data pairs and then performing the pointwise content loss +on the low-resolution fields

+

Initialize the loss with given weight

+
+
Parameters:
+
    +
  • s_enhance (int) – factor by which to coarsen spatial dimensions. 1 will keep the +spatial axes as high-res

  • +
  • t_enhance (int) – factor by which to coarsen temporal dimension. 1 will keep the +temporal axes as high-res

  • +
  • t_method (str) – Accepted options: [subsample, average] +Subsample will take every t_enhance-th time step, average will +average over t_enhance time steps

  • +
  • tf_loss (str) – The tensorflow loss function to operate on the low-res fields. Must +be the name of a loss class that can be retrieved from +tf.keras.losses e.g., “MeanSquaredError” or “MeanAbsoluteError”

  • +
  • ex_loss (None | str) – Optional additional loss metric evaluating the spatial or temporal +extremes of the high-res data. Can be “SpatialExtremesOnlyLoss” or +“TemporalExtremesOnlyLoss” (keys in EX_LOSS_METRICS).

  • +
+
+
+

Methods

+
+ + + + + + + + + + + +

call(y_true, y_pred)

Invokes the Loss instance.

from_config(config)

Instantiates a Loss from its config (output of get_config()).

get_config()

Returns the config dictionary for a Loss instance.

+
+

Attributes

+
+ + + + + +

EX_LOSS_METRICS

+
+
+
+__call__(x1, x2)[source]#
+

Custom content loss calculated on re-coarsened low-res fields

+
+
Parameters:
+
    +
  • x1 (tf.tensor) – Synthetic high-res generator output, shape is either of these: +(n_obs, spatial_1, spatial_2, features) +(n_obs, spatial_1, spatial_2, temporal, features)

  • +
  • x2 (tf.tensor) – True high resolution data, shape is either of these: +(n_obs, spatial_1, spatial_2, features) +(n_obs, spatial_1, spatial_2, temporal, features)

  • +
+
+
Returns:
+

tf.tensor – 0D tensor loss value

+
+
+
+ +
+
+abstract call(y_true, y_pred)#
+

Invokes the Loss instance.

+
+
Args:
+
y_true: Ground truth values. shape = [batch_size, d0, .. dN],

except sparse loss functions such as sparse categorical +crossentropy where shape = [batch_size, d0, .. dN-1]

+
+
+

y_pred: The predicted values. shape = [batch_size, d0, .. dN]

+
+
Returns:

Loss values with the shape [batch_size, d0, .. dN-1].

+
+
+
+ +
+
+classmethod from_config(config)#
+

Instantiates a Loss from its config (output of get_config()).

+
+
Args:

config: Output of get_config().

+
+
Returns:

A Loss instance.

+
+
+
+ +
+
+get_config()#
+

Returns the config dictionary for a Loss instance.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.loss_metrics.MaterialDerivativeLoss.html b/_autosummary/sup3r.utilities.loss_metrics.MaterialDerivativeLoss.html new file mode 100644 index 000000000..4c50b0327 --- /dev/null +++ b/_autosummary/sup3r.utilities.loss_metrics.MaterialDerivativeLoss.html @@ -0,0 +1,876 @@ + + + + + + + + + + + sup3r.utilities.loss_metrics.MaterialDerivativeLoss — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.loss_metrics.MaterialDerivativeLoss

+ +
+ +
+
+ + + + +
+ +
+

sup3r.utilities.loss_metrics.MaterialDerivativeLoss#

+
+
+class MaterialDerivativeLoss(reduction='auto', name=None)[source]#
+

Bases: Loss

+

Loss class for the material derivative. This is the left hand side of +the Navier-Stokes equation and is equal to internal + external forces +divided by density.

+

References

+

https://en.wikipedia.org/wiki/Material_derivative

+

Initializes Loss class.

+
+
Args:
+
reduction: Type of tf.keras.losses.Reduction to apply to

loss. Default value is AUTO. AUTO indicates that the +reduction option will be determined by the usage context. For +almost all cases this defaults to SUM_OVER_BATCH_SIZE. When +used under a tf.distribute.Strategy, except via +Model.compile() and Model.fit(), using AUTO or +SUM_OVER_BATCH_SIZE will raise an error. Please see this +custom training [tutorial]( +https://www.tensorflow.org/tutorials/distribute/custom_training) +for more details.

+
+
+

name: Optional name for the instance.

+
+
+

Methods

+
+ + + + + + + + + + + +

call(y_true, y_pred)

Invokes the Loss instance.

from_config(config)

Instantiates a Loss from its config (output of get_config()).

get_config()

Returns the config dictionary for a Loss instance.

+
+

Attributes

+
+ + + + + +

LOSS_METRIC

+
+
+
+__call__(x1, x2)[source]#
+

Custom content loss that encourages accuracy of the material +derivative. This assumes that the first 2 * N features are N u/v +wind components at different hub heights and that the total number of +features is either 2 * N or 2 * N + 1

+
+
Parameters:
+
    +
  • x1 (tf.tensor) – synthetic generator output +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
  • x2 (tf.tensor) – high resolution data +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
+
+
Returns:
+

tf.tensor – 0D tensor with loss value

+
+
+
+ +
+
+abstract call(y_true, y_pred)#
+

Invokes the Loss instance.

+
+
Args:
+
y_true: Ground truth values. shape = [batch_size, d0, .. dN],

except sparse loss functions such as sparse categorical +crossentropy where shape = [batch_size, d0, .. dN-1]

+
+
+

y_pred: The predicted values. shape = [batch_size, d0, .. dN]

+
+
Returns:

Loss values with the shape [batch_size, d0, .. dN-1].

+
+
+
+ +
+
+classmethod from_config(config)#
+

Instantiates a Loss from its config (output of get_config()).

+
+
Args:

config: Output of get_config().

+
+
Returns:

A Loss instance.

+
+
+
+ +
+
+get_config()#
+

Returns the config dictionary for a Loss instance.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.loss_metrics.MmdLoss.html b/_autosummary/sup3r.utilities.loss_metrics.MmdLoss.html new file mode 100644 index 000000000..ef424cc40 --- /dev/null +++ b/_autosummary/sup3r.utilities.loss_metrics.MmdLoss.html @@ -0,0 +1,862 @@ + + + + + + + + + + + sup3r.utilities.loss_metrics.MmdLoss — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.loss_metrics.MmdLoss

+ +
+ +
+
+ + + + +
+ +
+

sup3r.utilities.loss_metrics.MmdLoss#

+
+
+class MmdLoss(reduction='auto', name=None)[source]#
+

Bases: Loss

+

Loss class for max mean discrepancy loss

+

Initializes Loss class.

+
+
Args:
+
reduction: Type of tf.keras.losses.Reduction to apply to

loss. Default value is AUTO. AUTO indicates that the +reduction option will be determined by the usage context. For +almost all cases this defaults to SUM_OVER_BATCH_SIZE. When +used under a tf.distribute.Strategy, except via +Model.compile() and Model.fit(), using AUTO or +SUM_OVER_BATCH_SIZE will raise an error. Please see this +custom training [tutorial]( +https://www.tensorflow.org/tutorials/distribute/custom_training) +for more details.

+
+
+

name: Optional name for the instance.

+
+
+

Methods

+
+ + + + + + + + + + + +

call(y_true, y_pred)

Invokes the Loss instance.

from_config(config)

Instantiates a Loss from its config (output of get_config()).

get_config()

Returns the config dictionary for a Loss instance.

+
+
+
+__call__(x1, x2, sigma=1.0)[source]#
+

Maximum mean discrepancy (MMD) based on Gaussian kernel function +for keras models

+
+
Parameters:
+
    +
  • x1 (tf.tensor) – synthetic generator output +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
  • x2 (tf.tensor) – high resolution data +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
  • sigma (float) – standard deviation for gaussian kernel

  • +
+
+
Returns:
+

tf.tensor – 0D tensor with loss value

+
+
+
+ +
+
+abstract call(y_true, y_pred)#
+

Invokes the Loss instance.

+
+
Args:
+
y_true: Ground truth values. shape = [batch_size, d0, .. dN],

except sparse loss functions such as sparse categorical +crossentropy where shape = [batch_size, d0, .. dN-1]

+
+
+

y_pred: The predicted values. shape = [batch_size, d0, .. dN]

+
+
Returns:

Loss values with the shape [batch_size, d0, .. dN-1].

+
+
+
+ +
+
+classmethod from_config(config)#
+

Instantiates a Loss from its config (output of get_config()).

+
+
Args:

config: Output of get_config().

+
+
Returns:

A Loss instance.

+
+
+
+ +
+
+get_config()#
+

Returns the config dictionary for a Loss instance.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.loss_metrics.MmdMseLoss.html b/_autosummary/sup3r.utilities.loss_metrics.MmdMseLoss.html new file mode 100644 index 000000000..c8f4b3e7b --- /dev/null +++ b/_autosummary/sup3r.utilities.loss_metrics.MmdMseLoss.html @@ -0,0 +1,874 @@ + + + + + + + + + + + sup3r.utilities.loss_metrics.MmdMseLoss — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.loss_metrics.MmdMseLoss

+ +
+ +
+
+ + + + +
+ +
+

sup3r.utilities.loss_metrics.MmdMseLoss#

+
+
+class MmdMseLoss(reduction='auto', name=None)[source]#
+

Bases: Loss

+

Loss class for MMD + MSE

+

Initializes Loss class.

+
+
Args:
+
reduction: Type of tf.keras.losses.Reduction to apply to

loss. Default value is AUTO. AUTO indicates that the +reduction option will be determined by the usage context. For +almost all cases this defaults to SUM_OVER_BATCH_SIZE. When +used under a tf.distribute.Strategy, except via +Model.compile() and Model.fit(), using AUTO or +SUM_OVER_BATCH_SIZE will raise an error. Please see this +custom training [tutorial]( +https://www.tensorflow.org/tutorials/distribute/custom_training) +for more details.

+
+
+

name: Optional name for the instance.

+
+
+

Methods

+
+ + + + + + + + + + + +

call(y_true, y_pred)

Invokes the Loss instance.

from_config(config)

Instantiates a Loss from its config (output of get_config()).

get_config()

Returns the config dictionary for a Loss instance.

+
+

Attributes

+
+ + + + + + + + +

MMD_LOSS

MSE_LOSS

+
+
+
+__call__(x1, x2, sigma=1.0)[source]#
+

Maximum mean discrepancy (MMD) based on Gaussian kernel function +for keras models plus the typical MSE loss.

+
+
Parameters:
+
    +
  • x1 (tf.tensor) – synthetic generator output +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
  • x2 (tf.tensor) – high resolution data +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
  • sigma (float) – standard deviation for gaussian kernel

  • +
+
+
Returns:
+

tf.tensor – 0D tensor with loss value

+
+
+
+ +
+
+abstract call(y_true, y_pred)#
+

Invokes the Loss instance.

+
+
Args:
+
y_true: Ground truth values. shape = [batch_size, d0, .. dN],

except sparse loss functions such as sparse categorical +crossentropy where shape = [batch_size, d0, .. dN-1]

+
+
+

y_pred: The predicted values. shape = [batch_size, d0, .. dN]

+
+
Returns:

Loss values with the shape [batch_size, d0, .. dN-1].

+
+
+
+ +
+
+classmethod from_config(config)#
+

Instantiates a Loss from its config (output of get_config()).

+
+
Args:

config: Output of get_config().

+
+
Returns:

A Loss instance.

+
+
+
+ +
+
+get_config()#
+

Returns the config dictionary for a Loss instance.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.loss_metrics.MseExpLoss.html b/_autosummary/sup3r.utilities.loss_metrics.MseExpLoss.html new file mode 100644 index 000000000..3fdf8ef95 --- /dev/null +++ b/_autosummary/sup3r.utilities.loss_metrics.MseExpLoss.html @@ -0,0 +1,869 @@ + + + + + + + + + + + sup3r.utilities.loss_metrics.MseExpLoss — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.loss_metrics.MseExpLoss

+ +
+ +
+
+ + + + +
+ +
+

sup3r.utilities.loss_metrics.MseExpLoss#

+
+
+class MseExpLoss(reduction='auto', name=None)[source]#
+

Bases: Loss

+

Loss class for mse + squared exponential difference

+

Initializes Loss class.

+
+
Args:
+
reduction: Type of tf.keras.losses.Reduction to apply to

loss. Default value is AUTO. AUTO indicates that the +reduction option will be determined by the usage context. For +almost all cases this defaults to SUM_OVER_BATCH_SIZE. When +used under a tf.distribute.Strategy, except via +Model.compile() and Model.fit(), using AUTO or +SUM_OVER_BATCH_SIZE will raise an error. Please see this +custom training [tutorial]( +https://www.tensorflow.org/tutorials/distribute/custom_training) +for more details.

+
+
+

name: Optional name for the instance.

+
+
+

Methods

+
+ + + + + + + + + + + +

call(y_true, y_pred)

Invokes the Loss instance.

from_config(config)

Instantiates a Loss from its config (output of get_config()).

get_config()

Returns the config dictionary for a Loss instance.

+
+

Attributes

+
+ + + + + +

MSE_LOSS

+
+
+
+__call__(x1, x2)[source]#
+

Mse + Exponential difference loss function

+
+
Parameters:
+
    +
  • x1 (tf.tensor) – synthetic generator output +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
  • x2 (tf.tensor) – high resolution data +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
+
+
Returns:
+

tf.tensor – 0D tensor with loss value

+
+
+
+ +
+
+abstract call(y_true, y_pred)#
+

Invokes the Loss instance.

+
+
Args:
+
y_true: Ground truth values. shape = [batch_size, d0, .. dN],

except sparse loss functions such as sparse categorical +crossentropy where shape = [batch_size, d0, .. dN-1]

+
+
+

y_pred: The predicted values. shape = [batch_size, d0, .. dN]

+
+
Returns:

Loss values with the shape [batch_size, d0, .. dN-1].

+
+
+
+ +
+
+classmethod from_config(config)#
+

Instantiates a Loss from its config (output of get_config()).

+
+
Args:

config: Output of get_config().

+
+
Returns:

A Loss instance.

+
+
+
+ +
+
+get_config()#
+

Returns the config dictionary for a Loss instance.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.loss_metrics.SpatialExtremesLoss.html b/_autosummary/sup3r.utilities.loss_metrics.SpatialExtremesLoss.html new file mode 100644 index 000000000..391855df3 --- /dev/null +++ b/_autosummary/sup3r.utilities.loss_metrics.SpatialExtremesLoss.html @@ -0,0 +1,863 @@ + + + + + + + + + + + sup3r.utilities.loss_metrics.SpatialExtremesLoss — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.loss_metrics.SpatialExtremesLoss

+ +
+ +
+
+ + + + +
+ +
+

sup3r.utilities.loss_metrics.SpatialExtremesLoss#

+
+
+class SpatialExtremesLoss(weight=1.0)[source]#
+

Bases: Loss

+

Loss class that encourages accuracy of the min/max values in the +spatial domain

+

Initialize the loss with given weight

+
+
Parameters:
+

weight (float) – Weight for min/max loss terms. Setting this to zero turns +loss into MAE.

+
+
+

Methods

+
+ + + + + + + + + + + +

call(y_true, y_pred)

Invokes the Loss instance.

from_config(config)

Instantiates a Loss from its config (output of get_config()).

get_config()

Returns the config dictionary for a Loss instance.

+
+

Attributes

+
+ + + + + + + + +

EX_LOSS

MAE_LOSS

+
+
+
+__call__(x1, x2)[source]#
+

Custom content loss that encourages temporal min/max accuracy

+
+
Parameters:
+
    +
  • x1 (tf.tensor) – synthetic generator output +(n_observations, spatial_1, spatial_2, features)

  • +
  • x2 (tf.tensor) – high resolution data +(n_observations, spatial_1, spatial_2, features)

  • +
+
+
Returns:
+

tf.tensor – 0D tensor with loss value

+
+
+
+ +
+
+abstract call(y_true, y_pred)#
+

Invokes the Loss instance.

+
+
Args:
+
y_true: Ground truth values. shape = [batch_size, d0, .. dN],

except sparse loss functions such as sparse categorical +crossentropy where shape = [batch_size, d0, .. dN-1]

+
+
+

y_pred: The predicted values. shape = [batch_size, d0, .. dN]

+
+
Returns:

Loss values with the shape [batch_size, d0, .. dN-1].

+
+
+
+ +
+
+classmethod from_config(config)#
+

Instantiates a Loss from its config (output of get_config()).

+
+
Args:

config: Output of get_config().

+
+
Returns:

A Loss instance.

+
+
+
+ +
+
+get_config()#
+

Returns the config dictionary for a Loss instance.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.loss_metrics.SpatialExtremesOnlyLoss.html b/_autosummary/sup3r.utilities.loss_metrics.SpatialExtremesOnlyLoss.html new file mode 100644 index 000000000..bbb5fdb58 --- /dev/null +++ b/_autosummary/sup3r.utilities.loss_metrics.SpatialExtremesOnlyLoss.html @@ -0,0 +1,870 @@ + + + + + + + + + + + sup3r.utilities.loss_metrics.SpatialExtremesOnlyLoss — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.utilities.loss_metrics.SpatialExtremesOnlyLoss#

+
+
+class SpatialExtremesOnlyLoss(reduction='auto', name=None)[source]#
+

Bases: Loss

+

Loss class that encourages accuracy of the min/max values in the +spatial domain. This does not include an additional MAE term

+

Initializes Loss class.

+
+
Args:
+
reduction: Type of tf.keras.losses.Reduction to apply to

loss. Default value is AUTO. AUTO indicates that the +reduction option will be determined by the usage context. For +almost all cases this defaults to SUM_OVER_BATCH_SIZE. When +used under a tf.distribute.Strategy, except via +Model.compile() and Model.fit(), using AUTO or +SUM_OVER_BATCH_SIZE will raise an error. Please see this +custom training [tutorial]( +https://www.tensorflow.org/tutorials/distribute/custom_training) +for more details.

+
+
+

name: Optional name for the instance.

+
+
+

Methods

+
+ + + + + + + + + + + +

call(y_true, y_pred)

Invokes the Loss instance.

from_config(config)

Instantiates a Loss from its config (output of get_config()).

get_config()

Returns the config dictionary for a Loss instance.

+
+

Attributes

+
+ + + + + +

MAE_LOSS

+
+
+
+__call__(x1, x2)[source]#
+

Custom content loss that encourages temporal min/max accuracy

+
+
Parameters:
+
    +
  • x1 (tf.tensor) – synthetic generator output +(n_observations, spatial_1, spatial_2, features)

  • +
  • x2 (tf.tensor) – high resolution data +(n_observations, spatial_1, spatial_2, features)

  • +
+
+
Returns:
+

tf.tensor – 0D tensor with loss value

+
+
+
+ +
+
+abstract call(y_true, y_pred)#
+

Invokes the Loss instance.

+
+
Args:
+
y_true: Ground truth values. shape = [batch_size, d0, .. dN],

except sparse loss functions such as sparse categorical +crossentropy where shape = [batch_size, d0, .. dN-1]

+
+
+

y_pred: The predicted values. shape = [batch_size, d0, .. dN]

+
+
Returns:

Loss values with the shape [batch_size, d0, .. dN-1].

+
+
+
+ +
+
+classmethod from_config(config)#
+

Instantiates a Loss from its config (output of get_config()).

+
+
Args:

config: Output of get_config().

+
+
Returns:

A Loss instance.

+
+
+
+ +
+
+get_config()#
+

Returns the config dictionary for a Loss instance.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.loss_metrics.SpatialFftOnlyLoss.html b/_autosummary/sup3r.utilities.loss_metrics.SpatialFftOnlyLoss.html new file mode 100644 index 000000000..bbcfa4a94 --- /dev/null +++ b/_autosummary/sup3r.utilities.loss_metrics.SpatialFftOnlyLoss.html @@ -0,0 +1,869 @@ + + + + + + + + + + + sup3r.utilities.loss_metrics.SpatialFftOnlyLoss — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.loss_metrics.SpatialFftOnlyLoss

+ +
+ +
+
+ + + + +
+ +
+

sup3r.utilities.loss_metrics.SpatialFftOnlyLoss#

+
+
+class SpatialFftOnlyLoss(reduction='auto', name=None)[source]#
+

Bases: Loss

+

Loss class that encourages accuracy of the spatial frequency spectrum

+

Initializes Loss class.

+
+
Args:
+
reduction: Type of tf.keras.losses.Reduction to apply to

loss. Default value is AUTO. AUTO indicates that the +reduction option will be determined by the usage context. For +almost all cases this defaults to SUM_OVER_BATCH_SIZE. When +used under a tf.distribute.Strategy, except via +Model.compile() and Model.fit(), using AUTO or +SUM_OVER_BATCH_SIZE will raise an error. Please see this +custom training [tutorial]( +https://www.tensorflow.org/tutorials/distribute/custom_training) +for more details.

+
+
+

name: Optional name for the instance.

+
+
+

Methods

+
+ + + + + + + + + + + +

call(y_true, y_pred)

Invokes the Loss instance.

from_config(config)

Instantiates a Loss from its config (output of get_config()).

get_config()

Returns the config dictionary for a Loss instance.

+
+

Attributes

+
+ + + + + +

MAE_LOSS

+
+
+
+__call__(x1, x2)[source]#
+

Custom content loss that encourages frequency domain accuracy

+
+
Parameters:
+
    +
  • x1 (tf.tensor) – synthetic generator output +(n_observations, spatial_1, spatial_2, features)

  • +
  • x2 (tf.tensor) – high resolution data +(n_observations, spatial_1, spatial_2, features)

  • +
+
+
Returns:
+

tf.tensor – 0D tensor with loss value

+
+
+
+ +
+
+abstract call(y_true, y_pred)#
+

Invokes the Loss instance.

+
+
Args:
+
y_true: Ground truth values. shape = [batch_size, d0, .. dN],

except sparse loss functions such as sparse categorical +crossentropy where shape = [batch_size, d0, .. dN-1]

+
+
+

y_pred: The predicted values. shape = [batch_size, d0, .. dN]

+
+
Returns:

Loss values with the shape [batch_size, d0, .. dN-1].

+
+
+
+ +
+
+classmethod from_config(config)#
+

Instantiates a Loss from its config (output of get_config()).

+
+
Args:

config: Output of get_config().

+
+
Returns:

A Loss instance.

+
+
+
+ +
+
+get_config()#
+

Returns the config dictionary for a Loss instance.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.loss_metrics.SpatiotemporalExtremesLoss.html b/_autosummary/sup3r.utilities.loss_metrics.SpatiotemporalExtremesLoss.html new file mode 100644 index 000000000..c2b6c6088 --- /dev/null +++ b/_autosummary/sup3r.utilities.loss_metrics.SpatiotemporalExtremesLoss.html @@ -0,0 +1,868 @@ + + + + + + + + + + + sup3r.utilities.loss_metrics.SpatiotemporalExtremesLoss — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.utilities.loss_metrics.SpatiotemporalExtremesLoss#

+
+
+class SpatiotemporalExtremesLoss(spatial_weight=1.0, temporal_weight=1.0)[source]#
+

Bases: Loss

+

Loss class that encourages accuracy of the min/max values across both +space and time

+

Initialize the loss with given weight

+
+
Parameters:
+
    +
  • spatial_weight (float) – Weight for spatial min/max loss terms.

  • +
  • temporal_weight (float) – Weight for temporal min/max loss terms.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + +

call(y_true, y_pred)

Invokes the Loss instance.

from_config(config)

Instantiates a Loss from its config (output of get_config()).

get_config()

Returns the config dictionary for a Loss instance.

+
+

Attributes

+
+ + + + + + + + + + + +

MAE_LOSS

S_EX_LOSS

T_EX_LOSS

+
+
+
+__call__(x1, x2)[source]#
+

Custom content loss that encourages spatiotemporal min/max accuracy.

+
+
Parameters:
+
    +
  • x1 (tf.tensor) – synthetic generator output +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
  • x2 (tf.tensor) – high resolution data +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
+
+
Returns:
+

tf.tensor – 0D tensor with loss value

+
+
+
+ +
+
+abstract call(y_true, y_pred)#
+

Invokes the Loss instance.

+
+
Args:
+
y_true: Ground truth values. shape = [batch_size, d0, .. dN],

except sparse loss functions such as sparse categorical +crossentropy where shape = [batch_size, d0, .. dN-1]

+
+
+

y_pred: The predicted values. shape = [batch_size, d0, .. dN]

+
+
Returns:

Loss values with the shape [batch_size, d0, .. dN-1].

+
+
+
+ +
+
+classmethod from_config(config)#
+

Instantiates a Loss from its config (output of get_config()).

+
+
Args:

config: Output of get_config().

+
+
Returns:

A Loss instance.

+
+
+
+ +
+
+get_config()#
+

Returns the config dictionary for a Loss instance.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.loss_metrics.SpatiotemporalFftOnlyLoss.html b/_autosummary/sup3r.utilities.loss_metrics.SpatiotemporalFftOnlyLoss.html new file mode 100644 index 000000000..0835194c6 --- /dev/null +++ b/_autosummary/sup3r.utilities.loss_metrics.SpatiotemporalFftOnlyLoss.html @@ -0,0 +1,870 @@ + + + + + + + + + + + sup3r.utilities.loss_metrics.SpatiotemporalFftOnlyLoss — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.utilities.loss_metrics.SpatiotemporalFftOnlyLoss#

+
+
+class SpatiotemporalFftOnlyLoss(reduction='auto', name=None)[source]#
+

Bases: Loss

+

Loss class that encourages accuracy of the spatiotemporal frequency +spectrum

+

Initializes Loss class.

+
+
Args:
+
reduction: Type of tf.keras.losses.Reduction to apply to

loss. Default value is AUTO. AUTO indicates that the +reduction option will be determined by the usage context. For +almost all cases this defaults to SUM_OVER_BATCH_SIZE. When +used under a tf.distribute.Strategy, except via +Model.compile() and Model.fit(), using AUTO or +SUM_OVER_BATCH_SIZE will raise an error. Please see this +custom training [tutorial]( +https://www.tensorflow.org/tutorials/distribute/custom_training) +for more details.

+
+
+

name: Optional name for the instance.

+
+
+

Methods

+
+ + + + + + + + + + + +

call(y_true, y_pred)

Invokes the Loss instance.

from_config(config)

Instantiates a Loss from its config (output of get_config()).

get_config()

Returns the config dictionary for a Loss instance.

+
+

Attributes

+
+ + + + + +

MAE_LOSS

+
+
+
+__call__(x1, x2)[source]#
+

Custom content loss that encourages frequency domain accuracy

+
+
Parameters:
+
    +
  • x1 (tf.tensor) – synthetic generator output +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
  • x2 (tf.tensor) – high resolution data +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
+
+
Returns:
+

tf.tensor – 0D tensor with loss value

+
+
+
+ +
+
+abstract call(y_true, y_pred)#
+

Invokes the Loss instance.

+
+
Args:
+
y_true: Ground truth values. shape = [batch_size, d0, .. dN],

except sparse loss functions such as sparse categorical +crossentropy where shape = [batch_size, d0, .. dN-1]

+
+
+

y_pred: The predicted values. shape = [batch_size, d0, .. dN]

+
+
Returns:

Loss values with the shape [batch_size, d0, .. dN-1].

+
+
+
+ +
+
+classmethod from_config(config)#
+

Instantiates a Loss from its config (output of get_config()).

+
+
Args:

config: Output of get_config().

+
+
Returns:

A Loss instance.

+
+
+
+ +
+
+get_config()#
+

Returns the config dictionary for a Loss instance.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.loss_metrics.StExtremesFftLoss.html b/_autosummary/sup3r.utilities.loss_metrics.StExtremesFftLoss.html new file mode 100644 index 000000000..f1377e4dd --- /dev/null +++ b/_autosummary/sup3r.utilities.loss_metrics.StExtremesFftLoss.html @@ -0,0 +1,855 @@ + + + + + + + + + + + sup3r.utilities.loss_metrics.StExtremesFftLoss — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.loss_metrics.StExtremesFftLoss

+ +
+ +
+
+ + + + +
+ +
+

sup3r.utilities.loss_metrics.StExtremesFftLoss#

+
+
+class StExtremesFftLoss(spatial_weight=1.0, temporal_weight=1.0, fft_weight=1.0)[source]#
+

Bases: Loss

+

Loss class that encourages accuracy of the min/max values across both +space and time as well as frequency domain accuracy.

+

Initialize the loss with given weight

+
+
Parameters:
+
    +
  • spatial_weight (float) – Weight for spatial min/max loss terms.

  • +
  • temporal_weight (float) – Weight for temporal min/max loss terms.

  • +
  • fft_weight (float) – Weight for the fft loss term.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + +

call(y_true, y_pred)

Invokes the Loss instance.

from_config(config)

Instantiates a Loss from its config (output of get_config()).

get_config()

Returns the config dictionary for a Loss instance.

+
+
+
+__call__(x1, x2)[source]#
+

Custom content loss that encourages spatiotemporal min/max accuracy +and fft accuracy.

+
+
Parameters:
+
    +
  • x1 (tf.tensor) – synthetic generator output +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
  • x2 (tf.tensor) – high resolution data +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
+
+
Returns:
+

tf.tensor – 0D tensor with loss value

+
+
+
+ +
+
+abstract call(y_true, y_pred)#
+

Invokes the Loss instance.

+
+
Args:
+
y_true: Ground truth values. shape = [batch_size, d0, .. dN],

except sparse loss functions such as sparse categorical +crossentropy where shape = [batch_size, d0, .. dN-1]

+
+
+

y_pred: The predicted values. shape = [batch_size, d0, .. dN]

+
+
Returns:

Loss values with the shape [batch_size, d0, .. dN-1].

+
+
+
+ +
+
+classmethod from_config(config)#
+

Instantiates a Loss from its config (output of get_config()).

+
+
Args:

config: Output of get_config().

+
+
Returns:

A Loss instance.

+
+
+
+ +
+
+get_config()#
+

Returns the config dictionary for a Loss instance.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.loss_metrics.TemporalExtremesLoss.html b/_autosummary/sup3r.utilities.loss_metrics.TemporalExtremesLoss.html new file mode 100644 index 000000000..14635c390 --- /dev/null +++ b/_autosummary/sup3r.utilities.loss_metrics.TemporalExtremesLoss.html @@ -0,0 +1,863 @@ + + + + + + + + + + + sup3r.utilities.loss_metrics.TemporalExtremesLoss — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.loss_metrics.TemporalExtremesLoss

+ +
+ +
+
+ + + + +
+ +
+

sup3r.utilities.loss_metrics.TemporalExtremesLoss#

+
+
+class TemporalExtremesLoss(weight=1.0)[source]#
+

Bases: Loss

+

Loss class that encourages accuracy of the min/max values in the +timeseries

+

Initialize the loss with given weight

+
+
Parameters:
+

weight (float) – Weight for min/max loss terms. Setting this to zero turns +loss into MAE.

+
+
+

Methods

+
+ + + + + + + + + + + +

call(y_true, y_pred)

Invokes the Loss instance.

from_config(config)

Instantiates a Loss from its config (output of get_config()).

get_config()

Returns the config dictionary for a Loss instance.

+
+

Attributes

+
+ + + + + + + + +

EX_LOSS

MAE_LOSS

+
+
+
+__call__(x1, x2)[source]#
+

Custom content loss that encourages temporal min/max accuracy

+
+
Parameters:
+
    +
  • x1 (tf.tensor) – synthetic generator output +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
  • x2 (tf.tensor) – high resolution data +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
+
+
Returns:
+

tf.tensor – 0D tensor with loss value

+
+
+
+ +
+
+abstract call(y_true, y_pred)#
+

Invokes the Loss instance.

+
+
Args:
+
y_true: Ground truth values. shape = [batch_size, d0, .. dN],

except sparse loss functions such as sparse categorical +crossentropy where shape = [batch_size, d0, .. dN-1]

+
+
+

y_pred: The predicted values. shape = [batch_size, d0, .. dN]

+
+
Returns:

Loss values with the shape [batch_size, d0, .. dN-1].

+
+
+
+ +
+
+classmethod from_config(config)#
+

Instantiates a Loss from its config (output of get_config()).

+
+
Args:

config: Output of get_config().

+
+
Returns:

A Loss instance.

+
+
+
+ +
+
+get_config()#
+

Returns the config dictionary for a Loss instance.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.loss_metrics.TemporalExtremesOnlyLoss.html b/_autosummary/sup3r.utilities.loss_metrics.TemporalExtremesOnlyLoss.html new file mode 100644 index 000000000..f9f865e1e --- /dev/null +++ b/_autosummary/sup3r.utilities.loss_metrics.TemporalExtremesOnlyLoss.html @@ -0,0 +1,870 @@ + + + + + + + + + + + sup3r.utilities.loss_metrics.TemporalExtremesOnlyLoss — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.utilities.loss_metrics.TemporalExtremesOnlyLoss#

+
+
+class TemporalExtremesOnlyLoss(reduction='auto', name=None)[source]#
+

Bases: Loss

+

Loss class that encourages accuracy of the min/max values in the +timeseries. This does not include an additional mae term

+

Initializes Loss class.

+
+
Args:
+
reduction: Type of tf.keras.losses.Reduction to apply to

loss. Default value is AUTO. AUTO indicates that the +reduction option will be determined by the usage context. For +almost all cases this defaults to SUM_OVER_BATCH_SIZE. When +used under a tf.distribute.Strategy, except via +Model.compile() and Model.fit(), using AUTO or +SUM_OVER_BATCH_SIZE will raise an error. Please see this +custom training [tutorial]( +https://www.tensorflow.org/tutorials/distribute/custom_training) +for more details.

+
+
+

name: Optional name for the instance.

+
+
+

Methods

+
+ + + + + + + + + + + +

call(y_true, y_pred)

Invokes the Loss instance.

from_config(config)

Instantiates a Loss from its config (output of get_config()).

get_config()

Returns the config dictionary for a Loss instance.

+
+

Attributes

+
+ + + + + +

MAE_LOSS

+
+
+
+__call__(x1, x2)[source]#
+

Custom content loss that encourages temporal min/max accuracy

+
+
Parameters:
+
    +
  • x1 (tf.tensor) – synthetic generator output +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
  • x2 (tf.tensor) – high resolution data +(n_observations, spatial_1, spatial_2, temporal, features)

  • +
+
+
Returns:
+

tf.tensor – 0D tensor with loss value

+
+
+
+ +
+
+abstract call(y_true, y_pred)#
+

Invokes the Loss instance.

+
+
Args:
+
y_true: Ground truth values. shape = [batch_size, d0, .. dN],

except sparse loss functions such as sparse categorical +crossentropy where shape = [batch_size, d0, .. dN-1]

+
+
+

y_pred: The predicted values. shape = [batch_size, d0, .. dN]

+
+
Returns:

Loss values with the shape [batch_size, d0, .. dN-1].

+
+
+
+ +
+
+classmethod from_config(config)#
+

Instantiates a Loss from its config (output of get_config()).

+
+
Args:

config: Output of get_config().

+
+
Returns:

A Loss instance.

+
+
+
+ +
+
+get_config()#
+

Returns the config dictionary for a Loss instance.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.loss_metrics.gaussian_kernel.html b/_autosummary/sup3r.utilities.loss_metrics.gaussian_kernel.html new file mode 100644 index 000000000..a79460390 --- /dev/null +++ b/_autosummary/sup3r.utilities.loss_metrics.gaussian_kernel.html @@ -0,0 +1,777 @@ + + + + + + + + + + + sup3r.utilities.loss_metrics.gaussian_kernel — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.loss_metrics.gaussian_kernel

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.utilities.loss_metrics.gaussian_kernel#

+
+
+gaussian_kernel(x1, x2, sigma=1.0)[source]#
+

Gaussian kernel for mmd content loss

+
+
Parameters:
+
    +
  • x1 (tf.tensor) – synthetic generator output +(n_obs, spatial_1, spatial_2, temporal, features)

  • +
  • x2 (tf.tensor) – high resolution data +(n_obs, spatial_1, spatial_2, temporal, features)

  • +
  • sigma (float) – Standard deviation for gaussian kernel

  • +
+
+
Returns:
+

tf.tensor – kernel output tensor

+
+
+

References

+

Following MMD implementation in lmjohns3/theanets

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.loss_metrics.html b/_autosummary/sup3r.utilities.loss_metrics.html new file mode 100644 index 000000000..3405ba66a --- /dev/null +++ b/_autosummary/sup3r.utilities.loss_metrics.html @@ -0,0 +1,790 @@ + + + + + + + + + + + sup3r.utilities.loss_metrics — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.loss_metrics

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.utilities.loss_metrics#

+

Content loss metrics for Sup3r

+

Functions

+
+ + + + + +

gaussian_kernel(x1, x2[, sigma])

Gaussian kernel for mmd content loss

+
+

Classes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

CoarseMseLoss([reduction, name])

Loss class for coarse mse on spatial average of 5D tensor

ExpLoss([reduction, name])

Loss class for squared exponential difference

LowResLoss([s_enhance, t_enhance, t_method, ...])

Content loss that is calculated by coarsening the synthetic and true high-resolution data pairs and then performing the pointwise content loss on the low-resolution fields

MaterialDerivativeLoss([reduction, name])

Loss class for the material derivative.

MmdLoss([reduction, name])

Loss class for max mean discrepancy loss

MmdMseLoss([reduction, name])

Loss class for MMD + MSE

MseExpLoss([reduction, name])

Loss class for mse + squared exponential difference

SpatialExtremesLoss([weight])

Loss class that encourages accuracy of the min/max values in the spatial domain

SpatialExtremesOnlyLoss([reduction, name])

Loss class that encourages accuracy of the min/max values in the spatial domain.

SpatialFftOnlyLoss([reduction, name])

Loss class that encourages accuracy of the spatial frequency spectrum

SpatiotemporalExtremesLoss([spatial_weight, ...])

Loss class that encourages accuracy of the min/max values across both space and time

SpatiotemporalFftOnlyLoss([reduction, name])

Loss class that encourages accuracy of the spatiotemporal frequency spectrum

StExtremesFftLoss([spatial_weight, ...])

Loss class that encourages accuracy of the min/max values across both space and time as well as frequency domain accuracy.

TemporalExtremesLoss([weight])

Loss class that encourages accuracy of the min/max values in the timeseries

TemporalExtremesOnlyLoss([reduction, name])

Loss class that encourages accuracy of the min/max values in the timeseries.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.pytest.helpers.BatchHandlerTesterCC.html b/_autosummary/sup3r.utilities.pytest.helpers.BatchHandlerTesterCC.html new file mode 100644 index 000000000..a59778c79 --- /dev/null +++ b/_autosummary/sup3r.utilities.pytest.helpers.BatchHandlerTesterCC.html @@ -0,0 +1,1272 @@ + + + + + + + + + + + sup3r.utilities.pytest.helpers.BatchHandlerTesterCC — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.utilities.pytest.helpers.BatchHandlerTesterCC#

+
+
+class BatchHandlerTesterCC(train_containers, *, val_containers=None, sample_shape=None, batch_size=16, n_batches=64, s_enhance=1, t_enhance=1, means=None, stds=None, queue_cap=None, transform_kwargs=None, mode='lazy', feature_sets=None, **kwargs)[source]#
+

Bases: BatchHandler

+

Batch handler with sampler with running index record.

+
+
Parameters:
+
    +
  • train_containers (List[Container]) – List of objects with a .data attribute, which will be used +to initialize Sampler objects and then used to initialize a +batch queue of training data. The data can be a Sup3rX or +Sup3rDataset object.

  • +
  • val_containers (List[Container]) – List of objects with a .data attribute, which will be used +to initialize Sampler objects and then used to initialize a +batch queue of validation data. The data can be a Sup3rX or a +Sup3rDataset object.

  • +
  • batch_size (int) – Number of observations / samples in a batch

  • +
  • n_batches (int) – Number of batches in an epoch, this sets the iteration limit for +this object.

  • +
  • s_enhance (int) – Integer factor by which the spatial axes is to be enhanced.

  • +
  • t_enhance (int) – Integer factor by which the temporal axes is to be enhanced.

  • +
  • means (str | dict | None) – Usually a file path for loading / saving results, or None for +just calculating stats and not saving. Can also be a dict.

  • +
  • stds (str | dict | None) – Usually a file path for loading / saving results, or None for +just calculating stats and not saving. Can also be a dict.

  • +
  • queue_cap (int) – Maximum number of batches the batch queue can store.

  • +
  • transform_kwargs (Union[Dict, None]) – Dictionary of kwargs to be passed to self.transform. This method +performs smoothing / coarsening.

  • +
  • mode (str) – Loading mode. Default is ‘lazy’, which only loads data into memory +as batches are queued. ‘eager’ will load all data into memory right +away.

  • +
  • feature_sets (Optional[dict]) – Optional dictionary describing how the full set of features is +split between lr_only_features and hr_exo_features.

    +
    +
    lr_only_featureslist | tuple

    List of feature names or patt*erns that should only be +included in the low-res training set and not the high-res +observations.

    +
    +
    hr_exo_featureslist | tuple

    List of feature names or patt*erns that should be included +in the high-resolution observation but not expected to be +output from the generative model. An example is high-res +topography that is to be injected mid-network.

    +
    +
    +
  • +
  • kwargs (dict) – Additional keyword arguments for BatchQueue and / or Samplers. +This can vary depending on the type of BatchQueue / Sampler +given to the Factory. For example, to build a +BatchHandlerDC +object (data-centric batch handler) we use a queue and sampler +which takes spatial and temporal weight / bin arguments used to +determine how to weigh spatiotemporal regions when sampling. +Using +ConditionalBatchQueue +will result in arguments for computing moments from batches and +how to pad batch data to enable these calculations.

  • +
  • sample_shape (tuple) – Size of arrays to sample from the high-res data. The sample shape +for the low-res sampler will be determined from the enhancement +factors.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_enhancement_factors()

Make sure each DualSampler has the same enhancment factors and they match those provided to the BatchQueue.

check_features()

Make sure all samplers have the same sets of features.

check_shared_attr(attr)

Check if all containers have the same value for attr.

enqueue_batch()

Build batch and send to queue.

enqueue_batches()

Callback function for queue thread.

get_batch()

Get batch from queue or directly from a Sampler through sample_batch.

get_container_index()

Get random container index based on weights

get_queue()

Return FIFO queue for storing batches.

get_random_container()

Get random container based on container weights

init_samplers(train_containers, ...)

Initialize samplers from given data containers.

log_queue_info()

Log info about queue size.

post_init_log([args_dict])

Log additional arguments after initialization.

post_proc(samples)

Performs some post proc on dequeued samples before sending out for training.

preflight()

Run checks before kicking off the queue.

sample_batch()

Get random sampler from collection and return a batch of samples from that sampler.

start()

Start the val data batch queue in addition to the train batch queue.

stop()

Stop the val data batch queue in addition to the train batch queue.

transform(samples[, smoothing, smoothing_ignore])

Perform smoothing if requested.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

container_weights

Get weights used to sample from different containers based on relative sizes

data

Return underlying data.

features

Get all features contained in data.

hr_shape

Shape of high resolution sample in a low-res / high-res pair.

lr_shape

Shape of low resolution sample in a low-res / high-res pair.

queue_shape

Shape of objects stored in the queue.

queue_thread

Get new queue thread.

running

Boolean to check whether to keep enqueueing batches.

shape

Get shape of underlying data.

+
+
+
+SAMPLER#
+

alias of SamplerTester

+
+ +
+
+class Batch(low_res, high_res)#
+

Bases: tuple

+

Create new instance of Batch(low_res, high_res)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+ +
+
+TRAIN_QUEUE#
+

alias of DualBatchQueue

+
+ +
+
+VAL_QUEUE#
+

alias of DualBatchQueue

+
+ +
+
+check_enhancement_factors()#
+

Make sure each DualSampler has the same enhancment factors and they +match those provided to the BatchQueue.

+
+ +
+
+check_features()#
+

Make sure all samplers have the same sets of features.

+
+ +
+
+check_shared_attr(attr)#
+

Check if all containers have the same value for attr. If they do +the collection effectively inherits those attributes.

+
+ +
+
+property container_weights#
+

Get weights used to sample from different containers based on +relative sizes

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+enqueue_batch()#
+

Build batch and send to queue.

+
+ +
+
+enqueue_batches() None#
+

Callback function for queue thread. While training, the queue is +checked for empty spots and filled. In the training thread, batches are +removed from the queue.

+
+ +
+
+property features#
+

Get all features contained in data.

+
+ +
+
+get_batch() Batch#
+

Get batch from queue or directly from a Sampler through +sample_batch.

+
+ +
+
+get_container_index()#
+

Get random container index based on weights

+
+ +
+
+get_queue()#
+

Return FIFO queue for storing batches.

+
+ +
+
+get_random_container()#
+

Get random container based on container weights

+
+ +
+
+property hr_shape#
+

Shape of high resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+init_samplers(train_containers, val_containers, sample_shape, feature_sets, batch_size, sampler_kwargs)#
+

Initialize samplers from given data containers.

+
+ +
+
+log_queue_info()#
+

Log info about queue size.

+
+ +
+
+property lr_shape#
+

Shape of low resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+post_proc(samples) Batch#
+

Performs some post proc on dequeued samples before sending out for +training. Post processing can include coarsening on high-res data (if +Collection consists of Sampler objects and not +DualSampler objects), smoothing, etc

+
+
Returns:
+

Batch (namedtuple) – namedtuple with low_res and high_res attributes

+
+
+
+ +
+
+preflight()#
+

Run checks before kicking off the queue.

+
+ +
+
+property queue_shape#
+

Shape of objects stored in the queue.

+
+ +
+
+property queue_thread#
+

Get new queue thread.

+
+ +
+
+property running#
+

Boolean to check whether to keep enqueueing batches.

+
+ +
+
+sample_batch()#
+

Get random sampler from collection and return a batch of samples +from that sampler.

+

Notes

+

These samples are wrapped in an np.asarray call, so they have been +loaded into memory.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+start()#
+

Start the val data batch queue in addition to the train batch +queue.

+
+ +
+
+stop()#
+

Stop the val data batch queue in addition to the train batch +queue.

+
+ +
+
+transform(samples, smoothing=None, smoothing_ignore=None)#
+

Perform smoothing if requested.

+
+

Note

+

This does not include temporal or spatial coarsening like +SingleBatchQueue

+
+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.pytest.helpers.BatchHandlerTesterDC.html b/_autosummary/sup3r.utilities.pytest.helpers.BatchHandlerTesterDC.html new file mode 100644 index 000000000..aea3cc44e --- /dev/null +++ b/_autosummary/sup3r.utilities.pytest.helpers.BatchHandlerTesterDC.html @@ -0,0 +1,1331 @@ + + + + + + + + + + + sup3r.utilities.pytest.helpers.BatchHandlerTesterDC — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.pytest.helpers.BatchHandlerTesterDC

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.utilities.pytest.helpers.BatchHandlerTesterDC#

+
+
+class BatchHandlerTesterDC(train_containers, *, val_containers=None, sample_shape=None, batch_size=16, n_batches=64, s_enhance=1, t_enhance=1, means=None, stds=None, queue_cap=None, transform_kwargs=None, mode='lazy', feature_sets=None, spatial_weights=None, temporal_weights=None, n_space_bins=1, n_time_bins=1)[source]#
+

Bases: BatchHandlerDC

+

Data-centric batch handler with record for testing

+
+
Parameters:
+
    +
  • train_containers (List[Container]) – List of objects with a .data attribute, which will be used +to initialize Sampler objects and then used to initialize a +batch queue of training data. The data can be a Sup3rX or +Sup3rDataset object.

  • +
  • val_containers (List[Container]) – List of objects with a .data attribute, which will be used +to initialize Sampler objects and then used to initialize a +batch queue of validation data. The data can be a Sup3rX or a +Sup3rDataset object.

  • +
  • batch_size (int) – Number of observations / samples in a batch

  • +
  • n_batches (int) – Number of batches in an epoch, this sets the iteration limit for +this object.

  • +
  • s_enhance (int) – Integer factor by which the spatial axes is to be enhanced.

  • +
  • t_enhance (int) – Integer factor by which the temporal axes is to be enhanced.

  • +
  • means (str | dict | None) – Usually a file path for loading / saving results, or None for +just calculating stats and not saving. Can also be a dict.

  • +
  • stds (str | dict | None) – Usually a file path for loading / saving results, or None for +just calculating stats and not saving. Can also be a dict.

  • +
  • queue_cap (int) – Maximum number of batches the batch queue can store.

  • +
  • transform_kwargs (Union[Dict, None]) – Dictionary of kwargs to be passed to self.transform. This method +performs smoothing / coarsening.

  • +
  • mode (str) – Loading mode. Default is ‘lazy’, which only loads data into memory +as batches are queued. ‘eager’ will load all data into memory right +away.

  • +
  • feature_sets (Optional[dict]) – Optional dictionary describing how the full set of features is +split between lr_only_features and hr_exo_features. See +Sampler

  • +
  • sample_shape (tuple) – Size of arrays to sample from the contained data.

  • +
  • spatial_weights (Union[np.ndarray, da.core.Array] | List | None) – Set of weights used to initialize the spatial sampling. e.g. If we +want to start off sampling across 2 spatial bins evenly this should +be [0.5, 0.5]. During training these weights will be updated based +only performance across the bins associated with these weights.

  • +
  • temporal_weights (Union[np.ndarray, da.core.Array] | List | None) – Set of weights used to initialize the temporal sampling. e.g. If we +want to start off sampling only the first season of the year this +should be [1, 0, 0, 0]. During training these weights will be +updated based only performance across the bins associated with +these weights.

  • +
  • n_space_bins (int) – Number of spatial bins to use for weighted sampling. e.g. if this +is 4 the spatial domain will be divided into 4 equal regions and +losses will be calculated across these regions during traning in +order to adaptively sample from lower performing regions.

  • +
  • n_time_bins (int) – Number of time bins to use for weighted sampling. e.g. if this +is 4 the temporal domain will be divided into 4 equal periods and +losses will be calculated across these periods during traning in +order to adaptively sample from lower performing time periods.

  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_enhancement_factors()

Make sure the enhancement factors evenly divide the sample_shape.

check_features()

Make sure all samplers have the same sets of features.

check_shared_attr(attr)

Check if all containers have the same value for attr.

enqueue_batch()

Build batch and send to queue.

enqueue_batches()

Callback function for queue thread.

get_batch()

Get batch from queue or directly from a Sampler through sample_batch.

get_container_index()

Get random container index based on weights

get_queue()

Return FIFO queue for storing batches.

get_random_container()

Get random container based on container weights

init_samplers(train_containers, ...)

Initialize samplers from given data containers.

log_queue_info()

Log info about queue size.

post_init_log([args_dict])

Log additional arguments after initialization.

post_proc(samples)

Performs some post proc on dequeued samples before sending out for training.

preflight()

Run checks before kicking off the queue.

sample_batch()

Override get_samples to track sample indices.

start()

Start the val data batch queue in addition to the train batch queue.

stop()

Stop the val data batch queue in addition to the train batch queue.

transform(samples[, smoothing, ...])

Coarsen high res data to get corresponding low res batch.

update_record()

Reset records for a new epoch.

update_weights(spatial_weights, temporal_weights)

Set weights used to sample spatial and temporal bins.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

container_weights

Get weights used to sample from different containers based on relative sizes

data

Return underlying data.

features

Get all features contained in data.

hr_shape

Shape of high resolution sample in a low-res / high-res pair.

lr_shape

Shape of low resolution sample in a low-res / high-res pair.

queue_shape

Shape of objects stored in the queue.

queue_thread

Get new queue thread.

running

Boolean to check whether to keep enqueueing batches.

shape

Get shape of underlying data.

spatial_weights

Get weights used to sample spatial bins.

temporal_weights

Get weights used to sample temporal bins.

+
+
+
+SAMPLER#
+

alias of SamplerTester

+
+ +
+
+sample_batch()[source]#
+

Override get_samples to track sample indices.

+
+ +
+
+update_record()[source]#
+

Reset records for a new epoch.

+
+ +
+
+class Batch(low_res, high_res)#
+

Bases: tuple

+

Create new instance of Batch(low_res, high_res)

+
+
+__add__(value, /)#
+

Return self+value.

+
+ +
+
+__mul__(value, /)#
+

Return self*value.

+
+ +
+
+count(value, /)#
+

Return number of occurrences of value.

+
+ +
+
+high_res#
+

Alias for field number 1

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)#
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+low_res#
+

Alias for field number 0

+
+ +
+ +
+
+TRAIN_QUEUE#
+

alias of BatchQueueDC

+
+ +
+
+VAL_QUEUE#
+

alias of ValBatchQueueDC

+
+ +
+
+check_enhancement_factors()#
+

Make sure the enhancement factors evenly divide the sample_shape.

+
+ +
+
+check_features()#
+

Make sure all samplers have the same sets of features.

+
+ +
+
+check_shared_attr(attr)#
+

Check if all containers have the same value for attr. If they do +the collection effectively inherits those attributes.

+
+ +
+
+property container_weights#
+

Get weights used to sample from different containers based on +relative sizes

+
+ +
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+enqueue_batch()#
+

Build batch and send to queue.

+
+ +
+
+enqueue_batches() None#
+

Callback function for queue thread. While training, the queue is +checked for empty spots and filled. In the training thread, batches are +removed from the queue.

+
+ +
+
+property features#
+

Get all features contained in data.

+
+ +
+
+get_batch() Batch#
+

Get batch from queue or directly from a Sampler through +sample_batch.

+
+ +
+
+get_container_index()#
+

Get random container index based on weights

+
+ +
+
+get_queue()#
+

Return FIFO queue for storing batches.

+
+ +
+
+get_random_container()#
+

Get random container based on container weights

+
+ +
+
+property hr_shape#
+

Shape of high resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+init_samplers(train_containers, val_containers, sample_shape, feature_sets, batch_size, sampler_kwargs)#
+

Initialize samplers from given data containers.

+
+ +
+
+log_queue_info()#
+

Log info about queue size.

+
+ +
+
+property lr_shape#
+

Shape of low resolution sample in a low-res / high-res pair. (e.g. +(spatial_1, spatial_2, temporal, features))

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+post_proc(samples) Batch#
+

Performs some post proc on dequeued samples before sending out for +training. Post processing can include coarsening on high-res data (if +Collection consists of Sampler objects and not +DualSampler objects), smoothing, etc

+
+
Returns:
+

Batch (namedtuple) – namedtuple with low_res and high_res attributes

+
+
+
+ +
+
+preflight()#
+

Run checks before kicking off the queue.

+
+ +
+
+property queue_shape#
+

Shape of objects stored in the queue.

+
+ +
+
+property queue_thread#
+

Get new queue thread.

+
+ +
+
+property running#
+

Boolean to check whether to keep enqueueing batches.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+property spatial_weights#
+

Get weights used to sample spatial bins.

+
+ +
+
+start()#
+

Start the val data batch queue in addition to the train batch +queue.

+
+ +
+
+stop()#
+

Stop the val data batch queue in addition to the train batch +queue.

+
+ +
+
+property temporal_weights#
+

Get weights used to sample temporal bins.

+
+ +
+
+transform(samples, smoothing=None, smoothing_ignore=None, temporal_coarsening_method='subsample')#
+

Coarsen high res data to get corresponding low res batch.

+
+
Parameters:
+
    +
  • samples (Union[np.ndarray, da.core.Array]) – High resolution batch of samples. +4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • smoothing (float | None) – Standard deviation to use for gaussian filtering of the coarse +data. This can be tuned by matching the kinetic energy of a low +resolution simulation with the kinetic energy of a coarsened and +smoothed high resolution simulation. If None no smoothing is +performed.

  • +
  • smoothing_ignore (list | None) – List of features to ignore for the smoothing filter. None will +smooth all features if smoothing kwarg is not None

  • +
  • temporal_coarsening_method (str) – Method to use for temporal coarsening. Can be subsample, average, +min, max, or total

  • +
+
+
Returns:
+

    +
  • low_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
  • high_res (Union[np.ndarray, da.core.Array]) – 4D | 5D array +(batch_size, spatial_1, spatial_2, features) +(batch_size, spatial_1, spatial_2, temporal, features)

  • +
+

+
+
+
+ +
+
+update_weights(spatial_weights, temporal_weights)#
+

Set weights used to sample spatial and temporal bins. This is called +by Sup3rGanDC after an epoch to update weights based on model +performance across validation samples.

+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.pytest.helpers.BatchHandlerTesterFactory.html b/_autosummary/sup3r.utilities.pytest.helpers.BatchHandlerTesterFactory.html new file mode 100644 index 000000000..baf4e4249 --- /dev/null +++ b/_autosummary/sup3r.utilities.pytest.helpers.BatchHandlerTesterFactory.html @@ -0,0 +1,762 @@ + + + + + + + + + + + sup3r.utilities.pytest.helpers.BatchHandlerTesterFactory — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.pytest.helpers.BatchHandlerTesterFactory

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.utilities.pytest.helpers.BatchHandlerTesterFactory#

+
+
+BatchHandlerTesterFactory(BatchHandlerClass, SamplerClass)[source]#
+

Batch handler factory with sample counter and deterministic sampling for +testing.

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.pytest.helpers.DualSamplerTesterCC.html b/_autosummary/sup3r.utilities.pytest.helpers.DualSamplerTesterCC.html new file mode 100644 index 000000000..a9b3fc681 --- /dev/null +++ b/_autosummary/sup3r.utilities.pytest.helpers.DualSamplerTesterCC.html @@ -0,0 +1,761 @@ + + + + + + + + + + + sup3r.utilities.pytest.helpers.DualSamplerTesterCC — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.pytest.helpers.DualSamplerTesterCC

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.utilities.pytest.helpers.DualSamplerTesterCC#

+
+
+DualSamplerTesterCC#
+

alias of SamplerTester

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.pytest.helpers.DummyData.html b/_autosummary/sup3r.utilities.pytest.helpers.DummyData.html new file mode 100644 index 000000000..f8ed91664 --- /dev/null +++ b/_autosummary/sup3r.utilities.pytest.helpers.DummyData.html @@ -0,0 +1,856 @@ + + + + + + + + + + + sup3r.utilities.pytest.helpers.DummyData — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.pytest.helpers.DummyData

+ +
+ +
+
+ + + + +
+ +
+

sup3r.utilities.pytest.helpers.DummyData#

+
+
+class DummyData(data_shape, features)[source]#
+

Bases: Container

+

Dummy container with random data.

+
+
Parameters:
+

data (Union[Sup3rX, Sup3rDataset, Tuple[Sup3rX, …],) – Tuple[Sup3rDataset, …] +Can be an xr.Dataset, a Sup3rX object, a +Sup3rDataset object, or a tuple of such objects.

+
+

Note

+

.data will return a Sup3rDataset object or tuple of +such. This is a tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple +or 1-tuple (e.g. len(data) == 2 or len(data) == 1). This is +a 2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+
+
+

Methods

+
+ + + + + + + + +

post_init_log([args_dict])

Log additional arguments after initialization.

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + +

data

Return underlying data.

shape

Get shape of underlying data.

+
+
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.pytest.helpers.DummySampler.html b/_autosummary/sup3r.utilities.pytest.helpers.DummySampler.html new file mode 100644 index 000000000..e9925a17c --- /dev/null +++ b/_autosummary/sup3r.utilities.pytest.helpers.DummySampler.html @@ -0,0 +1,997 @@ + + + + + + + + + + + sup3r.utilities.pytest.helpers.DummySampler — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.utilities.pytest.helpers.DummySampler#

+
+
+class DummySampler(sample_shape, data_shape, features, batch_size, feature_sets=None)[source]#
+

Bases: Sampler

+

Dummy container with random data.

+
+
Parameters:
+
    +
  • data (Union[Sup3rX, Sup3rDataset],) – Object with data that will be sampled from. Usually the .data +attribute of various Container +objects. i.e. Loader, +Rasterizer, +Deriver, as long as the +spatial dimensions are not flattened.

  • +
  • sample_shape (tuple) – Size of arrays to sample from the contained data.

  • +
  • batch_size (int) – Number of samples to get to build a single batch. A sample of +(sample_shape[0], sample_shape[1], batch_size * +sample_shape[2]) is first selected from underlying dataset and +then reshaped into (batch_size, *sample_shape) to get a single +batch. This is more efficient than getting N = batch_size +samples and then stacking.

  • +
  • feature_sets (Optional[dict]) – Optional dictionary describing how the full set of features is +split between lr_only_features and hr_exo_features.

    +
    +
    featureslist | tuple

    List of full set of features to use for sampling. If no entry +is provided then all data_vars from data will be used.

    +
    +
    lr_only_featureslist | tuple

    List of feature names or patt*erns that should only be +included in the low-res training set and not the high-res +observations.

    +
    +
    hr_exo_featureslist | tuple

    List of feature names or patt*erns that should be included +in the high-resolution observation but not expected to be +output from the generative model. An example is high-res +topography that is to be injected mid-network.

    +
    +
    +
  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + +

get_sample_index([n_obs])

Randomly gets spatiotemporal sample index.

post_init_log([args_dict])

Log additional arguments after initialization.

preflight()

Check if the sample_shape is larger than the requested raster size

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

data

Return underlying data.

hr_exo_features

Get a list of exogenous high-resolution features that are only used for training e.g., mid-network high-res topo injection.

hr_features

Get the high-resolution features corresponding to hr_features_ind

hr_features_ind

Get the high-resolution feature channel indices that should be included for training.

hr_out_features

Get a list of high-resolution features that are intended to be output by the GAN.

hr_sample_shape

Shape of the data sample to select when __next__() is called.

lr_only_features

List of feature names or patt*erns that should only be included in the low-res training set and not the high-res observations.

sample_shape

Shape of the data sample to select when __next__() is called.

shape

Get shape of underlying data.

+
+
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+get_sample_index(n_obs=None)#
+

Randomly gets spatiotemporal sample index.

+

Notes

+

If n_obs > 1 this will get a time slice with n_obs * +self.sample_shape[2] time steps, which will then be reshaped into +n_obs samples each with self.sample_shape[2] time steps. This +is a much more efficient way of getting batches of samples but only +works if there are enough continuous time steps to sample.

+
+
Returns:
+

sample_index (tuple) – Tuple of latitude slice, longitude slice, time slice, and features. +Used to get single observation like self.data[sample_index]

+
+
+
+ +
+
+property hr_exo_features#
+

Get a list of exogenous high-resolution features that are only used +for training e.g., mid-network high-res topo injection. These must come +at the end of the high-res feature set. These can also be input to the +model as low-res features.

+
+ +
+
+property hr_features#
+

Get the high-resolution features corresponding to +hr_features_ind

+
+ +
+
+property hr_features_ind#
+

Get the high-resolution feature channel indices that should be +included for training. Any high-resolution features that are only +included in the data handler to be coarsened for the low-res input are +removed

+
+ +
+
+property hr_out_features#
+

Get a list of high-resolution features that are intended to be +output by the GAN. Does not include high-resolution exogenous +features

+
+ +
+
+property hr_sample_shape: Tuple#
+

Shape of the data sample to select when __next__() is called. Same +as sample_shape

+
+ +
+
+property lr_only_features#
+

List of feature names or patt*erns that should only be included in +the low-res training set and not the high-res observations.

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+preflight()#
+

Check if the sample_shape is larger than the requested raster +size

+
+ +
+
+property sample_shape: Tuple#
+

Shape of the data sample to select when __next__() is called.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.pytest.helpers.SamplerTester.html b/_autosummary/sup3r.utilities.pytest.helpers.SamplerTester.html new file mode 100644 index 000000000..a2ca663ff --- /dev/null +++ b/_autosummary/sup3r.utilities.pytest.helpers.SamplerTester.html @@ -0,0 +1,988 @@ + + + + + + + + + + + sup3r.utilities.pytest.helpers.SamplerTester — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

sup3r.utilities.pytest.helpers.SamplerTester#

+
+
+class SamplerTester(*args, **kwargs)#
+

Bases: Sampler

+

Keep a record of sample indices for testing.

+
+
Parameters:
+
    +
  • data (Union[Sup3rX, Sup3rDataset],) – Object with data that will be sampled from. Usually the .data +attribute of various Container +objects. i.e. Loader, +Rasterizer, +Deriver, as long as the +spatial dimensions are not flattened.

  • +
  • sample_shape (tuple) – Size of arrays to sample from the contained data.

  • +
  • batch_size (int) – Number of samples to get to build a single batch. A sample of +(sample_shape[0], sample_shape[1], batch_size * +sample_shape[2]) is first selected from underlying dataset and +then reshaped into (batch_size, *sample_shape) to get a single +batch. This is more efficient than getting N = batch_size +samples and then stacking.

  • +
  • feature_sets (Optional[dict]) – Optional dictionary describing how the full set of features is +split between lr_only_features and hr_exo_features.

    +
    +
    featureslist | tuple

    List of full set of features to use for sampling. If no entry +is provided then all data_vars from data will be used.

    +
    +
    lr_only_featureslist | tuple

    List of feature names or patt*erns that should only be +included in the low-res training set and not the high-res +observations.

    +
    +
    hr_exo_featureslist | tuple

    List of feature names or patt*erns that should be included +in the high-resolution observation but not expected to be +output from the generative model. An example is high-res +topography that is to be injected mid-network.

    +
    +
    +
  • +
+
+
+

Methods

+
+ + + + + + + + + + + + + + +

get_sample_index(**kwargs)

Override get_sample_index to keep record of index accessible by batch handler.

post_init_log([args_dict])

Log additional arguments after initialization.

preflight()

Check if the sample_shape is larger than the requested raster size

wrap(data)

Return a Sup3rDataset object or tuple of such.

+
+

Attributes

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

data

Return underlying data.

hr_exo_features

Get a list of exogenous high-resolution features that are only used for training e.g., mid-network high-res topo injection.

hr_features

Get the high-resolution features corresponding to hr_features_ind

hr_features_ind

Get the high-resolution feature channel indices that should be included for training.

hr_out_features

Get a list of high-resolution features that are intended to be output by the GAN.

hr_sample_shape

Shape of the data sample to select when __next__() is called.

lr_only_features

List of feature names or patt*erns that should only be included in the low-res training set and not the high-res observations.

sample_shape

Shape of the data sample to select when __next__() is called.

shape

Get shape of underlying data.

+
+
+
+property data#
+

Return underlying data.

+
+
Returns:
+

Sup3rDataset

+
+
+
+

See also

+

wrap()

+
+
+ +
+
+get_sample_index(**kwargs)#
+

Override get_sample_index to keep record of index accessible by +batch handler. We store the index with the time entry divided by +the batch size, since we have multiplied by the batch size to get +a continuous time sample for multiple observations.

+
+ +
+
+property hr_exo_features#
+

Get a list of exogenous high-resolution features that are only used +for training e.g., mid-network high-res topo injection. These must come +at the end of the high-res feature set. These can also be input to the +model as low-res features.

+
+ +
+
+property hr_features#
+

Get the high-resolution features corresponding to +hr_features_ind

+
+ +
+
+property hr_features_ind#
+

Get the high-resolution feature channel indices that should be +included for training. Any high-resolution features that are only +included in the data handler to be coarsened for the low-res input are +removed

+
+ +
+
+property hr_out_features#
+

Get a list of high-resolution features that are intended to be +output by the GAN. Does not include high-resolution exogenous +features

+
+ +
+
+property hr_sample_shape: Tuple#
+

Shape of the data sample to select when __next__() is called. Same +as sample_shape

+
+ +
+
+property lr_only_features#
+

List of feature names or patt*erns that should only be included in +the low-res training set and not the high-res observations.

+
+ +
+
+post_init_log(args_dict=None)#
+

Log additional arguments after initialization.

+
+ +
+
+preflight()#
+

Check if the sample_shape is larger than the requested raster +size

+
+ +
+
+property sample_shape: Tuple#
+

Shape of the data sample to select when __next__() is called.

+
+ +
+
+property shape#
+

Get shape of underlying data.

+
+ +
+
+wrap(data)#
+

Return a Sup3rDataset object or tuple of such. This is a +tuple when the .data attribute belongs to a +Collection object like +BatchHandler. Otherwise this is +Sup3rDataset object, which is either a wrapped 2-tuple or +1-tuple (e.g. len(data) == 2 or len(data) == 1). This is a +2-tuple when .data belongs to a dual container object like +DualSampler and a 1-tuple otherwise.

+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.pytest.helpers.SamplerTesterDC.html b/_autosummary/sup3r.utilities.pytest.helpers.SamplerTesterDC.html new file mode 100644 index 000000000..76d809324 --- /dev/null +++ b/_autosummary/sup3r.utilities.pytest.helpers.SamplerTesterDC.html @@ -0,0 +1,761 @@ + + + + + + + + + + + sup3r.utilities.pytest.helpers.SamplerTesterDC — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.pytest.helpers.SamplerTesterDC

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.utilities.pytest.helpers.SamplerTesterDC#

+
+
+SamplerTesterDC#
+

alias of SamplerTester

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.pytest.helpers.html b/_autosummary/sup3r.utilities.pytest.helpers.html new file mode 100644 index 000000000..e3ec8b6eb --- /dev/null +++ b/_autosummary/sup3r.utilities.pytest.helpers.html @@ -0,0 +1,787 @@ + + + + + + + + + + + sup3r.utilities.pytest.helpers — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.pytest.helpers

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.utilities.pytest.helpers#

+

Testing helpers.

+

Functions

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +

BatchHandlerTesterFactory(BatchHandlerClass, ...)

Batch handler factory with sample counter and deterministic sampling for testing.

make_collect_chunks(td)

Make fake h5 chunked output files for collection tests.

make_fake_cs_ratio_files(td, low_res_times, ...)

Make a set of dummy clearsky ratio files that match the GAN fwp outputs

make_fake_dset(shape, features[, const])

Make dummy data for tests.

make_fake_h5_chunks(td)

Make fake h5 chunked output files for a 5x spatial 2x temporal multi-node forward pass output.

make_fake_nc_file(file_name, shape, features)

Make nc file with dummy data for tests.

make_fake_tif(shape, outfile)

Make dummy data for tests.

test_sampler_factory(SamplerClass)

Build test samplers which track indices.

+
+

Classes

+
+ + + + + + + + + + + + + + + + + + + + + + + +

BatchHandlerTesterCC(train_containers, *[, ...])

Batch handler with sampler with running index record.

BatchHandlerTesterDC(train_containers, *[, ...])

Data-centric batch handler with record for testing

DualSamplerTesterCC

alias of SamplerTester

DummyData(data_shape, features)

Dummy container with random data.

DummySampler(sample_shape, data_shape, ...)

Dummy container with random data.

SamplerTester(*args, **kwargs)

Keep a record of sample indices for testing.

SamplerTesterDC

alias of SamplerTester

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.pytest.helpers.make_collect_chunks.html b/_autosummary/sup3r.utilities.pytest.helpers.make_collect_chunks.html new file mode 100644 index 000000000..80b25fa7c --- /dev/null +++ b/_autosummary/sup3r.utilities.pytest.helpers.make_collect_chunks.html @@ -0,0 +1,781 @@ + + + + + + + + + + + sup3r.utilities.pytest.helpers.make_collect_chunks — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.pytest.helpers.make_collect_chunks

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.utilities.pytest.helpers.make_collect_chunks#

+
+
+make_collect_chunks(td)[source]#
+

Make fake h5 chunked output files for collection tests.

+
+
Parameters:
+

td (tempfile.TemporaryDirectory) – Test TemporaryDirectory

+
+
Returns:
+

    +
  • out_files (list) – List of filepaths to chunked files.

  • +
  • data (ndarray) – (spatial_1, spatial_2, temporal, features) +High resolution forward pass output

  • +
  • ws_true (ndarray) – Windspeed between 0 and 20 in shape (spatial_1, spatial_2, temporal, 1)

  • +
  • wd_true (ndarray) – Windir between 0 and 360 in shape (spatial_1, spatial_2, temporal, 1)

  • +
  • features (list) – List of feature names corresponding to the last dimension of data +[‘windspeed_100m’, ‘winddirection_100m’]

  • +
  • hr_lat_lon (ndarray) – Array of lat/lon for hr data. (spatial_1, spatial_2, 2) +Last dimension has ordering (lat, lon)

  • +
  • hr_times (list) – List of np.datetime64 objects for hr data.

  • +
+

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.pytest.helpers.make_fake_cs_ratio_files.html b/_autosummary/sup3r.utilities.pytest.helpers.make_fake_cs_ratio_files.html new file mode 100644 index 000000000..949728f78 --- /dev/null +++ b/_autosummary/sup3r.utilities.pytest.helpers.make_fake_cs_ratio_files.html @@ -0,0 +1,781 @@ + + + + + + + + + + + sup3r.utilities.pytest.helpers.make_fake_cs_ratio_files — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.pytest.helpers.make_fake_cs_ratio_files

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.utilities.pytest.helpers.make_fake_cs_ratio_files#

+
+
+make_fake_cs_ratio_files(td, low_res_times, low_res_lat_lon, model_meta)[source]#
+

Make a set of dummy clearsky ratio files that match the GAN fwp outputs

+
+
Parameters:
+
    +
  • td (tempfile.TemporaryDirectory) – Test TemporaryDirectory

  • +
  • low_res_times – List of times for low res input data. If there is only a single low +res timestep, it is assumed the data is daily.

  • +
  • low_res_lat_lon – Array of lat/lon for input data. +(spatial_1, spatial_2, 2) +Last dimension has ordering (lat, lon)

  • +
  • model_meta (dict) – Meta data for model to write to file.

  • +
+
+
Returns:
+

    +
  • fps (list) – List of clearsky ratio .h5 chunked files.

  • +
  • fp_pattern (str) – Glob pattern*string to find fps

  • +
+

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.pytest.helpers.make_fake_dset.html b/_autosummary/sup3r.utilities.pytest.helpers.make_fake_dset.html new file mode 100644 index 000000000..726375dec --- /dev/null +++ b/_autosummary/sup3r.utilities.pytest.helpers.make_fake_dset.html @@ -0,0 +1,761 @@ + + + + + + + + + + + sup3r.utilities.pytest.helpers.make_fake_dset — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.pytest.helpers.make_fake_dset

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.utilities.pytest.helpers.make_fake_dset#

+
+
+make_fake_dset(shape, features, const=None)[source]#
+

Make dummy data for tests.

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.pytest.helpers.make_fake_h5_chunks.html b/_autosummary/sup3r.utilities.pytest.helpers.make_fake_h5_chunks.html new file mode 100644 index 000000000..5106ee61e --- /dev/null +++ b/_autosummary/sup3r.utilities.pytest.helpers.make_fake_h5_chunks.html @@ -0,0 +1,786 @@ + + + + + + + + + + + sup3r.utilities.pytest.helpers.make_fake_h5_chunks — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.pytest.helpers.make_fake_h5_chunks

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.utilities.pytest.helpers.make_fake_h5_chunks#

+
+
+make_fake_h5_chunks(td)[source]#
+

Make fake h5 chunked output files for a 5x spatial 2x temporal +multi-node forward pass output.

+
+
Parameters:
+

td (tempfile.TemporaryDirectory) – Test TemporaryDirectory

+
+
Returns:
+

    +
  • out_files (list) – List of filepaths to chunked files.

  • +
  • data (ndarray) – (spatial_1, spatial_2, temporal, features) +High resolution forward pass output

  • +
  • ws_true (ndarray) – Windspeed between 0 and 20 in shape (spatial_1, spatial_2, temporal, 1)

  • +
  • wd_true (ndarray) – Windir between 0 and 360 in shape (spatial_1, spatial_2, temporal, 1)

  • +
  • features (list) – List of feature names corresponding to the last dimension of data +[‘windspeed_100m’, ‘winddirection_100m’]

  • +
  • t_slices_lr (list) – List of low res temporal slices

  • +
  • t_slices_hr (list) – List of high res temporal slices

  • +
  • s_slices_lr (list) – List of low res spatial slices

  • +
  • s_slices_hr (list) – List of high res spatial slices

  • +
  • low_res_lat_lon (ndarray) – Array of lat/lon for input data. (spatial_1, spatial_2, 2) +Last dimension has ordering (lat, lon)

  • +
  • low_res_times (list) – List of np.datetime64 objects for coarse data.

  • +
+

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.pytest.helpers.make_fake_nc_file.html b/_autosummary/sup3r.utilities.pytest.helpers.make_fake_nc_file.html new file mode 100644 index 000000000..59e807e83 --- /dev/null +++ b/_autosummary/sup3r.utilities.pytest.helpers.make_fake_nc_file.html @@ -0,0 +1,761 @@ + + + + + + + + + + + sup3r.utilities.pytest.helpers.make_fake_nc_file — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.pytest.helpers.make_fake_nc_file

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.utilities.pytest.helpers.make_fake_nc_file#

+
+
+make_fake_nc_file(file_name, shape, features)[source]#
+

Make nc file with dummy data for tests.

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.pytest.helpers.make_fake_tif.html b/_autosummary/sup3r.utilities.pytest.helpers.make_fake_tif.html new file mode 100644 index 000000000..27fbdb240 --- /dev/null +++ b/_autosummary/sup3r.utilities.pytest.helpers.make_fake_tif.html @@ -0,0 +1,761 @@ + + + + + + + + + + + sup3r.utilities.pytest.helpers.make_fake_tif — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.pytest.helpers.make_fake_tif

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.utilities.pytest.helpers.make_fake_tif#

+
+
+make_fake_tif(shape, outfile)[source]#
+

Make dummy data for tests.

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.pytest.helpers.test_sampler_factory.html b/_autosummary/sup3r.utilities.pytest.helpers.test_sampler_factory.html new file mode 100644 index 000000000..2e54e64e4 --- /dev/null +++ b/_autosummary/sup3r.utilities.pytest.helpers.test_sampler_factory.html @@ -0,0 +1,761 @@ + + + + + + + + + + + sup3r.utilities.pytest.helpers.test_sampler_factory — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.pytest.helpers.test_sampler_factory

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.utilities.pytest.helpers.test_sampler_factory#

+
+
+test_sampler_factory(SamplerClass)[source]#
+

Build test samplers which track indices.

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.pytest.html b/_autosummary/sup3r.utilities.pytest.html new file mode 100644 index 000000000..80b75df31 --- /dev/null +++ b/_autosummary/sup3r.utilities.pytest.html @@ -0,0 +1,738 @@ + + + + + + + + + + + sup3r.utilities.pytest — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.pytest

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.utilities.pytest#

+

Pytest helper utilities

+
+ + + + + +

helpers

Testing helpers.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.utilities.Timer.html b/_autosummary/sup3r.utilities.utilities.Timer.html new file mode 100644 index 000000000..d918734ef --- /dev/null +++ b/_autosummary/sup3r.utilities.utilities.Timer.html @@ -0,0 +1,843 @@ + + + + + + + + + + + sup3r.utilities.utilities.Timer — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.utilities.Timer

+ +
+ +
+
+ + + + +
+ +
+

sup3r.utilities.utilities.Timer#

+
+
+class Timer[source]#
+

Bases: object

+

Timer class for timing and storing function call times.

+

Methods

+
+ + + + + + + + +

start()

Set start of timing period.

stop()

Set stop time of timing period.

+
+

Attributes

+
+ + + + + + + + +

elapsed

Elapsed time between start and stop.

elapsed_str

Elapsed time in string format.

+
+
+
+start()[source]#
+

Set start of timing period.

+
+ +
+
+stop()[source]#
+

Set stop time of timing period.

+
+ +
+
+property elapsed#
+

Elapsed time between start and stop.

+
+ +
+
+property elapsed_str#
+

Elapsed time in string format.

+
+ +
+
+__call__(func, call_id=None, log=False)[source]#
+

Time function call and store elapsed time in self.log.

+
+
Parameters:
+
    +
  • func (function) – Function to time

  • +
  • call_id (int | None) – ID to distingush calls with the same function name. For example, +when runnning forward passes on multiple chunks.

  • +
  • log (bool) – Whether to write to active logger

  • +
+
+
Returns:
+

output of func

+
+
+
+ +
+ +
+ + +
+ + + + + + + + +
+ + + + + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.utilities.generate_random_string.html b/_autosummary/sup3r.utilities.utilities.generate_random_string.html new file mode 100644 index 000000000..341112f98 --- /dev/null +++ b/_autosummary/sup3r.utilities.utilities.generate_random_string.html @@ -0,0 +1,762 @@ + + + + + + + + + + + sup3r.utilities.utilities.generate_random_string — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.utilities.generate_random_string

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.utilities.utilities.generate_random_string#

+
+
+generate_random_string(length)[source]#
+

Generate random string with given length. Used for naming temporary +files to avoid collisions.

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.utilities.html b/_autosummary/sup3r.utilities.utilities.html new file mode 100644 index 000000000..8569fc611 --- /dev/null +++ b/_autosummary/sup3r.utilities.utilities.html @@ -0,0 +1,772 @@ + + + + + + + + + + + sup3r.utilities.utilities — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.utilities

+ +
+
+ +
+
+
+ + + + +
+ +
+

sup3r.utilities.utilities#

+

Miscellaneous utilities shared across multiple modules

+

Functions

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

generate_random_string(length)

Generate random string with given length.

nn_fill_array(array)

Fill any NaN values in an np.ndarray from the nearest non-nan values.

pd_date_range(*args, **kwargs)

A simple wrapper on the pd.date_range() method that handles the closed vs.

preprocess_datasets(dset)

Standardization preprocessing applied before datasets are concatenated by xr.open_mfdataset

safe_cast(o)

Cast to type safe for serialization.

safe_serialize(obj, **kwargs)

json.dumps with non-serializable object handling.

spatial_coarsening(data[, s_enhance, obs_axis])

Coarsen data according to s_enhance resolution

temporal_coarsening(data[, t_enhance, method])

Coarsen data according to t_enhance resolution

xr_open_mfdataset(files, **kwargs)

Wrapper for xr.open_mfdataset with default opening options.

+
+

Classes

+
+ + + + + +

Timer()

Timer class for timing and storing function call times.

+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.utilities.nn_fill_array.html b/_autosummary/sup3r.utilities.utilities.nn_fill_array.html new file mode 100644 index 000000000..e81abe147 --- /dev/null +++ b/_autosummary/sup3r.utilities.utilities.nn_fill_array.html @@ -0,0 +1,769 @@ + + + + + + + + + + + sup3r.utilities.utilities.nn_fill_array — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.utilities.nn_fill_array

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.utilities.utilities.nn_fill_array#

+
+
+nn_fill_array(array)[source]#
+

Fill any NaN values in an np.ndarray from the nearest non-nan values.

+
+
Parameters:
+

array (Union[np.ndarray, da.core.Array]) – Input array with NaN values

+
+
Returns:
+

array (Union[np.ndarray, da.core.Array]) – Output array with NaN values filled

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.utilities.pd_date_range.html b/_autosummary/sup3r.utilities.utilities.pd_date_range.html new file mode 100644 index 000000000..e8b64dafb --- /dev/null +++ b/_autosummary/sup3r.utilities.utilities.pd_date_range.html @@ -0,0 +1,762 @@ + + + + + + + + + + + sup3r.utilities.utilities.pd_date_range — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.utilities.pd_date_range

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.utilities.utilities.pd_date_range#

+
+
+pd_date_range(*args, **kwargs)[source]#
+

A simple wrapper on the pd.date_range() method that handles the closed +vs. inclusive kwarg change in pd 1.4.0

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.utilities.preprocess_datasets.html b/_autosummary/sup3r.utilities.utilities.preprocess_datasets.html new file mode 100644 index 000000000..d510e0abf --- /dev/null +++ b/_autosummary/sup3r.utilities.utilities.preprocess_datasets.html @@ -0,0 +1,762 @@ + + + + + + + + + + + sup3r.utilities.utilities.preprocess_datasets — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.utilities.preprocess_datasets

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.utilities.utilities.preprocess_datasets#

+
+
+preprocess_datasets(dset)[source]#
+

Standardization preprocessing applied before datasets are concatenated +by xr.open_mfdataset

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.utilities.safe_cast.html b/_autosummary/sup3r.utilities.utilities.safe_cast.html new file mode 100644 index 000000000..38c61c8a9 --- /dev/null +++ b/_autosummary/sup3r.utilities.utilities.safe_cast.html @@ -0,0 +1,761 @@ + + + + + + + + + + + sup3r.utilities.utilities.safe_cast — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.utilities.safe_cast

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.utilities.utilities.safe_cast#

+
+
+safe_cast(o)[source]#
+

Cast to type safe for serialization.

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.utilities.safe_serialize.html b/_autosummary/sup3r.utilities.utilities.safe_serialize.html new file mode 100644 index 000000000..498f707cd --- /dev/null +++ b/_autosummary/sup3r.utilities.utilities.safe_serialize.html @@ -0,0 +1,761 @@ + + + + + + + + + + + sup3r.utilities.utilities.safe_serialize — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.utilities.safe_serialize

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.utilities.utilities.safe_serialize#

+
+
+safe_serialize(obj, **kwargs)[source]#
+

json.dumps with non-serializable object handling.

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.utilities.spatial_coarsening.html b/_autosummary/sup3r.utilities.utilities.spatial_coarsening.html new file mode 100644 index 000000000..197426fe6 --- /dev/null +++ b/_autosummary/sup3r.utilities.utilities.spatial_coarsening.html @@ -0,0 +1,780 @@ + + + + + + + + + + + sup3r.utilities.utilities.spatial_coarsening — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.utilities.spatial_coarsening

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.utilities.utilities.spatial_coarsening#

+
+
+spatial_coarsening(data, s_enhance=2, obs_axis=True)[source]#
+

Coarsen data according to s_enhance resolution

+
+
Parameters:
+
    +
  • data (Union[np.ndarray, da.core.Array]) – 5D | 4D | 3D | 2D array with dimensions: +(n_obs, spatial_1, spatial_2, temporal, features) (obs_axis=True) +(n_obs, spatial_1, spatial_2, features) (obs_axis=True) +(spatial_1, spatial_2, temporal, features) (obs_axis=False) +(spatial_1, spatial_2, temporal_or_features) (obs_axis=False) +(spatial_1, spatial_2) (obs_axis=False)

  • +
  • s_enhance (int) – factor by which to coarsen spatial dimensions

  • +
  • obs_axis (bool) – Flag for if axis=0 is the observation axis. If True (default) +spatial axis=(1, 2) (zero-indexed), if False spatial axis=(0, 1)

  • +
+
+
Returns:
+

data (Union[np.ndarray, da.core.Array]) – 2D, 3D | 4D | 5D array with same dimensions as data with new coarse +resolution

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.utilities.temporal_coarsening.html b/_autosummary/sup3r.utilities.utilities.temporal_coarsening.html new file mode 100644 index 000000000..ea9662214 --- /dev/null +++ b/_autosummary/sup3r.utilities.utilities.temporal_coarsening.html @@ -0,0 +1,776 @@ + + + + + + + + + + + sup3r.utilities.utilities.temporal_coarsening — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.utilities.temporal_coarsening

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.utilities.utilities.temporal_coarsening#

+
+
+temporal_coarsening(data, t_enhance=4, method='subsample')[source]#
+

Coarsen data according to t_enhance resolution

+
+
Parameters:
+
    +
  • data (Union[np.ndarray, da.core.Array]) – 5D array with dimensions +(observations, spatial_1, spatial_2, temporal, features)

  • +
  • t_enhance (int) – factor by which to coarsen temporal dimension

  • +
  • method (str) – accepted options: [subsample, average, total, min, max] +Subsample will take every t_enhance-th time step, average will average +over t_enhance time steps, total will sum over t_enhance time steps

  • +
+
+
Returns:
+

coarse_data (Union[np.ndarray, da.core.Array]) – 5D array with same dimensions as data with new coarse resolution

+
+
+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_autosummary/sup3r.utilities.utilities.xr_open_mfdataset.html b/_autosummary/sup3r.utilities.utilities.xr_open_mfdataset.html new file mode 100644 index 000000000..b2136a540 --- /dev/null +++ b/_autosummary/sup3r.utilities.utilities.xr_open_mfdataset.html @@ -0,0 +1,761 @@ + + + + + + + + + + + sup3r.utilities.utilities.xr_open_mfdataset — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r.utilities.utilities.xr_open_mfdataset

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r.utilities.utilities.xr_open_mfdataset#

+
+
+xr_open_mfdataset(files, **kwargs)[source]#
+

Wrapper for xr.open_mfdataset with default opening options.

+
+ +
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_cli/cli.html b/_cli/cli.html new file mode 100644 index 000000000..2ee58559c --- /dev/null +++ b/_cli/cli.html @@ -0,0 +1,743 @@ + + + + + + + + + + + Command Line Interfaces (CLIs) — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Command Line Interfaces (CLIs)

+ +
+
+ +
+
+
+ + + + + + + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_cli/sup3r.html b/_cli/sup3r.html new file mode 100644 index 000000000..deed1e909 --- /dev/null +++ b/_cli/sup3r.html @@ -0,0 +1,1169 @@ + + + + + + + + + + + sup3r — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

sup3r

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

sup3r#

+

Sup3r command line interface.

+

Try using the following commands to pull up the help pages for the +respective sup3r CLIs:

+
$ sup3r --help
+
+$ sup3r -c config.json pipeline --help
+
+$ sup3r -c config.json forward-pass --help
+
+$ sup3r -c config.json data-collect --help
+
+
+

Typically, a good place to start is to set up a sup3r job with a pipeline +config that points to several sup3r modules that you want to run in serial. +You would call the sup3r pipeline CLI using either of these equivalent +commands:

+
$ sup3r -c config_pipeline.json pipeline
+
+$ sup3r-pipeline from-config -c config_pipeline.json
+
+
+

See the help pages of the module CLIs for more details on the config files +for each CLI.

+
sup3r [OPTIONS] COMMAND [ARGS]...
+
+
+

Options

+
+
+--version#
+

Show the version and exit.

+
+ +
+
+-c, --config_file <config_file>#
+

sup3r configuration file json for a single module.

+
+ +
+
+-v, --verbose#
+

Flag to turn on debug logging. Default is not verbose.

+
+ +
+

batch#

+

Create and run multiple sup3r project directories based on batch +permutation logic.

+

The sup3r batch module (built on the reV batch functionality) is a way to +create and run many sup3r pipeline projects based on permutations of +key-value pairs in the run config files. A user configures the batch file +by creating one or more “sets” that contain one or more arguments (keys +found in config files) that are to be parameterized. For example, in the +config below, four sup3r pipelines will be created where arg1 and arg2 are +set to [0, “a”], [0, “b”], [1, “a”], [1, “b”] in config_fwp.json:

+

+{
+    "pipeline_config": "./config_pipeline.json",
+    "sets": [
+      {
+        "args": {
+          "arg1": [0, 1],
+          "arg2": ["a", "b"],
+        },
+        "files": ["./config_fwp.json"],
+        "set_tag": "set1"
+      }
+}
+
+
+

Note that you can use multiple “sets” to isolate parameter permutations.

+
sup3r batch [OPTIONS] COMMAND [ARGS]...
+
+
+

Options

+
+
+--dry-run#
+

Flag to do a dry run (make batch dirs without running).

+
+ +
+
+--cancel#
+

Flag to cancel all jobs associated with a given batch.

+
+ +
+
+--delete#
+

Flag to delete all batch job sub directories associated with the batch_jobs.csv in the current batch config directory.

+
+ +
+
+--monitor-background#
+

Flag to monitor all batch pipelines continuously in the background using the nohup command. Note that the stdout/stderr will not be captured, but you can set a pipeline “log_file” to capture logs.

+
+ +
+
+-v, --verbose#
+

Flag to turn on debug logging.

+
+ +
+
+

bias-calc#

+

Sup3r bias correction calculation module to create bias correction +factors for low res input data.

+

This is typically used to understand and correct the biases in global +climate model (GCM) data from CMIP. Bias correcting GCM data is a very +common process when dealing with climate data and this helps automate that +process prior to passing GCM data through sup3r generative models. This is +typically the first step in a GCM downscaling pipeline.

+

You can call the bias calc module via the sup3r-pipeline CLI, or call it +directly with either of these equivalent commands:

+
$ sup3r -c config_bias.json bias-calc
+
+$ sup3r-bias-calc from-config -c config_bias.json
+
+
+

A sup3r bias calc config.json file can contain any arguments or keyword +arguments required to call the

+

The config has high level bias_calc_class and jobs keys. The +bias_calc_class is a class name from the sup3r.bias +module, and the jobs argument is a list of kwargs required to +initialize the bias_calc_class and run the bias_calc_class.run() +method (for example, see +sup3r.bias.bias_calc.LinearCorrection.run()). There are also has +several optional arguments: log_pattern, log_level, and +execution_control. Here’s a small example bias calc config:

+

+{
+    "bias_calc_class": "LinearCorrection",
+    "jobs": [
+        {
+            "base_fps" : ["/datasets/WIND/HRRR/HRRR_2015.h5"],
+            "bias_fps": ["./ta_day_EC-Earth3-Veg_ssp585.nc"],
+            "base_dset": "u_100m",
+            "bias_feature": "u_100m",
+            "target": [20, -130],
+            "shape": [48, 95]
+         }
+    ],
+    "execution_control": {
+        "option": "kestrel",
+        "walltime": 4,
+        "alloc": "sup3r"
+    }
+}
+
+
+

Note that the execution_control block contains kwargs that would +be required to distribute the job on multiple nodes on the NREL HPC. +To run the job locally, use execution_control: {"option": "local"}.

+
sup3r bias-calc [OPTIONS]
+
+
+

Options

+
+
+-v, --verbose#
+

Flag to turn on debug logging.

+
+ +
+
+

data-collect#

+

Sup3r data collection following forward pass.

+

The sup3r data-collect module can be used to collect time-chunked files +that were spit out by multi-node forward pass jobs. You can call the +data-collect module via the sup3r-pipeline CLI, or call it directly with +either of these equivalent commands:

+
$ sup3r -c config_collect.json data-collect
+
+$ sup3r-data-collect from-config -c config_collect.json
+
+
+

A sup3r data-collect config.json file can contain any arguments or keyword +arguments required to run the +sup3r.postprocessing.collectors.Collector.collect() method. The +config also has several optional arguments: log_file, log_level, +and execution_control. Here’s a small example data-collect config:

+

+{
+    "file_paths": "./outputs/*.h5",
+    "out_file": "./outputs/output_file.h5",
+    "features": ["windspeed_100m", "winddirection_100m"],
+    "log_file": "./logs/collect.log",
+    "execution_control": {"option": "local"},
+    "log_level": "DEBUG"
+}
+
+
+

Note that the execution_control has the same options as forward-pass +and you can set "option": "kestrel" to run on the NREL HPC.

+
sup3r data-collect [OPTIONS]
+
+
+

Options

+
+
+-v, --verbose#
+

Flag to turn on debug logging.

+
+ +
+
+

forward-pass#

+

Sup3r forward pass to super-resolve data.

+

The sup3r forward pass is where all the magic happens. This module takes in +low resolution data from source files (e.g. WRF or GCM outputs), loads a +saved Sup3rGan model, does a forward pass of the low res data through the +generator, and then saves the output high resolution data to disk. This +module also handles multi-node and multi-core parallelization.

+

You can call the forward-pass module via the sup3r-pipeline CLI, or call it +directly with either of these equivalent commands:

+
$ sup3r -c config_fwp.json forward-pass
+
+$ sup3r-forward-pass from-config -c config_fwp.json
+
+
+

A sup3r forward pass config.json file can contain any arguments or keyword +arguments required to initialize the +sup3r.pipeline.forward_pass.ForwardPassStrategy module. The config +also has several optional arguments: log_pattern, log_level, and +execution_control. Here’s a small example forward pass config:

+

+{
+    "file_paths": "./source_files*.nc",
+    "model_kwargs": {
+        "model_dir": "./sup3r_model/"
+    },
+    "out_pattern": "./output/sup3r_out_{file_id}.h5",
+    "log_pattern": "./logs/log_{node_index}.log",
+    "log_level": "DEBUG",
+    "fwp_chunk_shape": [5, 5, 3],
+    "spatial_pad": 1,
+    "temporal_pad": 1,
+    "max_nodes": 1,
+    "output_workers": 1,
+    "pass_workers": 8,
+    "input_handler_kwargs": {"max_workers": 1},
+    "execution_control": {
+        "option": "kestrel",
+        "walltime": 4,
+        "alloc": "sup3r"
+    }
+}
+
+
+

Note that the execution_control block contains kwargs that would +be required to distribute the job on multiple nodes on the NREL HPC. +To run the job locally, use execution_control: {"option": "local"}.

+
sup3r forward-pass [OPTIONS]
+
+
+

Options

+
+
+-v, --verbose#
+

Flag to turn on debug logging.

+
+ +
+
+

pipeline#

+

Execute multiple steps in a Sup3r pipeline.

+

Typically, a good place to start is to set up a sup3r job with a pipeline +config that points to several sup3r modules that you want to run in serial. +You would call the sup3r pipeline CLI using either of these equivalent +commands:

+
$ sup3r -c config_pipeline.json pipeline
+
+$ sup3r-pipeline from-config -c config_pipeline.json
+
+
+

A typical sup3r pipeline config.json file might look like this:

+

+{
+    "logging": {"log_level": "DEBUG"},
+    "pipeline": [
+        {"forward-pass": "./config_fwp.json"},
+        {"data-collect": "./config_collect.json"}
+    ]
+}
+
+
+

See the other CLI help pages for what the respective module configs +require.

+
sup3r pipeline [OPTIONS] COMMAND [ARGS]...
+
+
+

Options

+
+
+--cancel#
+

Flag to cancel all jobs associated with a given pipeline.

+
+ +
+
+--monitor#
+

Flag to monitor pipeline jobs continuously. Default is not to monitor (kick off jobs and exit).

+
+ +
+
+--background#
+

Flag to monitor pipeline jobs continuously in the background using the nohup command. Note that the stdout/stderr will not be captured, but you can set a pipeline “log_file” to capture logs.

+
+ +
+
+-v, --verbose#
+

Flag to turn on debug logging.

+
+ +
+
+

qa#

+

Sup3r QA module following forward pass and collection.

+

The sup3r QA module can be used to verify how well the high-resolution +sup3r resolved outputs adhere to the low-resolution source data. You can +call the QA module via the sup3r-pipeline CLI, or call it +directly with either of these equivalent commands:

+
$ sup3r -c config_qa.json qa
+
+$ sup3r-qa from-config -c config_qa.json
+
+
+

A sup3r QA config.json file can contain any arguments or keyword +arguments required to initialize the sup3r.qa.qa.Sup3rQa class. +The config also has several optional arguments: log_file, +log_level, and execution_control. Here’s a small example +QA config:

+

+{
+    "source_file_paths": "./source_files*.nc",
+    "out_file_path": "./outputs/collected_output_file.h5",
+    "s_enhance": 25,
+    "t_enhance": 24,
+    "temporal_coarsening_method": "average",
+    "log_file": "./logs/qa.log",
+    "execution_control": {"option": "local"},
+    "log_level": "DEBUG"
+}
+
+
+

Note that the execution_control has the same options as forward-pass +and you can set "option": "kestrel" to run on the NREL HPC.

+
sup3r qa [OPTIONS]
+
+
+

Options

+
+
+-v, --verbose#
+

Flag to turn on debug logging.

+
+ +
+
+

solar#

+

Sup3r solar module to convert GAN output clearsky ratio to irradiance

+

Typically we train solar GAN’s on clearsky ratio to remove the dependence +on known variables like solar position and clearsky irradiance. This module +converts the clearsky ratio output by the GAN in the forward-pass step to +actual irradiance values (ghi, dni, and dhi). This should be run after the +forward-pass but before the data-collect step.

+

You can call the solar module via the sup3r-pipeline CLI, or call it +directly with either of these equivalent commands:

+
$ sup3r -c config_solar.json solar
+
+$ sup3r-solar from-config -c config_solar.json
+
+
+

A sup3r solar config.json file can contain any arguments or keyword +arguments required to call the +sup3r.solar.solar.Solar.run_temporal_chunk() method. You do not need +to include the i_t_chunk input, this is added by the CLI. The config +also has several optional arguments: log_pattern, log_level, and +execution_control. Here’s a small example solar config:

+

+{
+    "fp_pattern": "./chunks/sup3r*.h5",
+    "nsrdb_fp": "/datasets/NSRDB/current/nsrdb_2015.h5",
+    "max_nodes": 100,
+    "execution_control": {
+        "option": "kestrel",
+        "walltime": 4,
+        "alloc": "sup3r"
+    }
+}
+
+
+

Note that the execution_control block contains kwargs that would +be required to distribute the job on multiple nodes on the NREL HPC. +To run the job locally, use execution_control: {"option": "local"}.

+
sup3r solar [OPTIONS]
+
+
+

Options

+
+
+-v, --verbose#
+

Flag to turn on debug logging.

+
+ +
+
+ + +
+ + + + + + + + +
+ + + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/index.html b/_modules/index.html new file mode 100644 index 000000000..79ca6d323 --- /dev/null +++ b/_modules/index.html @@ -0,0 +1,736 @@ + + + + + + + + + + Overview: module code — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

All modules for which code is available

+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/bias/base.html b/_modules/sup3r/bias/base.html new file mode 100644 index 000000000..b51caff3d --- /dev/null +++ b/_modules/sup3r/bias/base.html @@ -0,0 +1,1466 @@ + + + + + + + + + + sup3r.bias.base — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.bias.base

+"""Data retrieval class for performing / evaluating bias correction.
+
+TODO: This can likely leverage the new data handling objects. Refactor
+accordingly.
+"""
+
+import logging
+from abc import abstractmethod
+
+import numpy as np
+import pandas as pd
+import rex
+from rex.utilities.fun_utils import get_fun_call_str
+from scipy import stats
+from scipy.spatial import KDTree
+
+import sup3r.preprocessing
+from sup3r.preprocessing import DataHandler
+from sup3r.preprocessing.utilities import expand_paths
+from sup3r.utilities import VERSION_RECORD, ModuleName
+from sup3r.utilities.cli import BaseCLI
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class DataRetrievalBase: + """Base class to handle data retrieval for the biased data and the + baseline data + """ + + def __init__( + self, + base_fps, + bias_fps, + base_dset, + bias_feature, + distance_upper_bound=None, + target=None, + shape=None, + base_handler='Resource', + bias_handler='DataHandlerNCforCC', + base_handler_kwargs=None, + bias_handler_kwargs=None, + decimals=None, + match_zero_rate=False, + pre_load=True + ): + """ + Parameters + ---------- + base_fps : list | str + One or more baseline .h5 filepaths representing non-biased data to + use to correct the biased dataset. This is typically several years + of WTK or NSRDB files. + bias_fps : list | str + One or more biased .nc or .h5 filepaths representing the biased + data to be corrected based on the baseline data. This is typically + several years of GCM .nc files. + base_dset : str + A single dataset from the base_fps to retrieve. In the case of wind + components, this can be u_100m or v_100m which will retrieve + windspeed and winddirection and derive the U/V component. + bias_feature : str + This is the biased feature from bias_fps to retrieve. This should + be a single feature name corresponding to base_dset + distance_upper_bound : float + Upper bound on the nearest neighbor distance in decimal degrees. + This should be the approximate resolution of the low-resolution + bias data. None (default) will calculate this based on the median + distance between points in bias_fps + target : tuple + (lat, lon) lower left corner of raster to retrieve from bias_fps. + If None then the lower left corner of the full domain will be used. + shape : tuple + (rows, cols) grid size to retrieve from bias_fps. If None then the + full domain shape will be used. + base_handler : str + Name of rex resource handler or sup3r.preprocessing class to be + retrieved from the rex/sup3r library. If a sup3r.preprocessing + class is used, all data will be loaded in this class' + initialization and the subsequent bias calculation will be done in + serial + bias_handler : str + Name of the bias data handler class to be retrieved from the + sup3r.preprocessing library. + base_handler_kwargs : dict | None + Optional kwargs to send to the initialization of the base_handler + class + bias_handler_kwargs : dict | None + Optional kwargs to send to the initialization of the bias_handler + class + decimals : int | None + Option to round bias and base data to this number of decimals, this + gets passed to np.around(). If decimals is negative, it specifies + the number of positions to the left of the decimal point. + match_zero_rate : bool + Option to fix the frequency of zero values in the biased data. The + lowest percentile of values in the biased data will be set to zero + to match the percentile of zeros in the base data. If + SkillAssessment is being run and this is True, the distributions + will not be mean-centered. This helps resolve the issue where + global climate models produce too many days with small + precipitation totals e.g., the "drizzle problem" [Polade2014]_. + pre_load : bool + Flag to preload all data needed for bias correction. This is + currently recommended to improve performance with the new sup3r + data handler access patterns + + References + ---------- + .. [Polade2014] Polade, S. D., Pierce, D. W., Cayan, D. R., Gershunov, + A., & Dettineer, M. D. (2014). The key role of dry days in changing + regional climate and precipitation regimes. Scientific reports, + 4(1), 4364. https://doi.org/10.1038/srep04364 + """ + + logger.info( + 'Initializing DataRetrievalBase for base dset "{}" ' + 'correcting biased dataset(s): {}'.format(base_dset, bias_feature) + ) + self.base_fps = base_fps + self.bias_fps = bias_fps + self.base_dset = base_dset + self.bias_feature = bias_feature + self.target = target + self.shape = shape + self.decimals = decimals + self.base_handler_kwargs = base_handler_kwargs or {} + self.bias_handler_kwargs = bias_handler_kwargs or {} + self.bad_bias_gids = [] + self.match_zero_rate = match_zero_rate + self.base_fps = expand_paths(self.base_fps) + self.bias_fps = expand_paths(self.bias_fps) + self._distance_upper_bound = distance_upper_bound + + base_sup3r_handler = getattr(sup3r.preprocessing, base_handler, None) + base_rex_handler = getattr(rex, base_handler, None) + + if base_rex_handler is not None: + self.base_handler = base_rex_handler + self.base_dh = self.base_handler( + self.base_fps[0], **self.base_handler_kwargs + ) + elif base_sup3r_handler is not None: + self.base_handler = base_sup3r_handler + self.base_handler_kwargs['features'] = [self.base_dset] + self.base_dh = self.base_handler( + self.base_fps, **self.base_handler_kwargs + ) + msg = ( + 'Base data handler opened with a sup3r DataHandler class ' + 'must load cached data!' + ) + assert self.base_dh.data is not None, msg + else: + msg = f'Could not retrieve "{base_handler}" from sup3r or rex!' + logger.error(msg) + raise RuntimeError(msg) + + self.bias_handler = getattr(sup3r.preprocessing, bias_handler) + self.base_meta = self.base_dh.meta + self.bias_dh = self.bias_handler( + self.bias_fps, + [self.bias_feature], + target=self.target, + shape=self.shape, + **self.bias_handler_kwargs, + ) + lats = self.bias_dh.lat_lon[..., 0].flatten() + self.bias_meta = self.bias_dh.meta + self.bias_ti = self.bias_dh.time_index + + raster_shape = self.bias_dh.lat_lon[..., 0].shape + bias_lat_lon = self.bias_meta[['latitude', 'longitude']].values + self.bias_tree = KDTree(bias_lat_lon) + self.bias_gid_raster = np.arange(lats.size) + self.bias_gid_raster = self.bias_gid_raster.reshape(raster_shape) + + self.nn_dist, self.nn_ind = self.bias_tree.query( + self.base_meta[['latitude', 'longitude']], + distance_upper_bound=self.distance_upper_bound + ) + + if pre_load: + self.pre_load() + + self.out = None + self._init_out() + + logger.info('Finished initializing DataRetrievalBase.') + +
+[docs] + def pre_load(self): + """Preload all data needed for bias correction. This is currently + recommended to improve performance with the new sup3r data handler + access patterns""" + + if hasattr(self.base_dh, 'compute'): + logger.info('Pre loading baseline unbiased data into memory...') + self.base_dh.data.compute() + logger.info('Finished pre loading baseline unbiased data.') + + if hasattr(self.bias_dh, 'compute'): + logger.info('Pre loading historical biased data into memory...') + self.bias_dh.data.compute() + logger.info('Finished pre loading historical biased data.')
+ + + @abstractmethod + def _init_out(self): + """Initialize output arrays""" + + @property + def meta(self): + """Get a meta data dictionary on how these bias factors were + calculated""" + meta = { + 'base_fps': self.base_fps, + 'bias_fps': self.bias_fps, + 'base_dset': self.base_dset, + 'bias_feature': self.bias_feature, + 'target': self.target, + 'shape': self.shape, + 'class': str(self.__class__), + 'version_record': VERSION_RECORD, + } + return meta + + @property + def distance_upper_bound(self): + """Maximum distance (float) to map high-resolution data from exo_source + to the low-resolution file_paths input.""" + if self._distance_upper_bound is None: + diff = np.diff( + self.bias_meta[['latitude', 'longitude']].values, axis=0 + ) + diff = np.max(np.median(diff, axis=0)) + self._distance_upper_bound = diff + logger.info( + 'Set distance upper bound to {:.4f}'.format( + self._distance_upper_bound + ) + ) + return self._distance_upper_bound + +
+[docs] + @staticmethod + def compare_dists(base_data, bias_data, adder=0, scalar=1): + """Compare two distributions using the two-sample Kolmogorov-Smirnov. + When the output is minimized, the two distributions are similar. + + Parameters + ---------- + base_data : np.ndarray + 1D array of base data observations. + bias_data : np.ndarray + 1D array of biased data observations. + adder : float + Factor to adjust the biased data before comparing distributions: + bias_data * scalar + adder + scalar : float + Factor to adjust the biased data before comparing distributions: + bias_data * scalar + adder + + Returns + ------- + out : float + KS test statistic + """ + out = stats.ks_2samp(base_data, bias_data * scalar + adder) + return out.statistic
+ + +
+[docs] + @classmethod + def get_node_cmd(cls, config): + """Get a CLI call to call cls.run() on a single node based on an input + config. + + Parameters + ---------- + config : dict + sup3r bias calc config with all necessary args and kwargs to + initialize the class and call run() on a single node. + """ + import_str = 'import time;\n' + import_str += 'from gaps import Status;\n' + import_str += 'from rex import init_logger;\n' + import_str += f'from sup3r.bias import {cls.__name__}' + + if not hasattr(cls, 'run'): + msg = ( + 'I can only get you a node command for subclasses of ' + 'DataRetrievalBase with a run() method.' + ) + logger.error(msg) + raise NotImplementedError(msg) + + # pylint: disable=E1101 + init_str = get_fun_call_str(cls, config) + fun_str = get_fun_call_str(cls.run, config) + fun_str = fun_str.partition('.')[-1] + fun_str = 'bc.' + fun_str + + log_file = config.get('log_file', None) + log_level = config.get('log_level', 'INFO') + log_arg_str = f'"sup3r", log_level="{log_level}"' + if log_file is not None: + log_arg_str += f', log_file="{log_file}"' + + cmd = ( + f"python -c '{import_str};\n" + 't0 = time.time();\n' + f'logger = init_logger({log_arg_str});\n' + f'bc = {init_str};\n' + f'{fun_str};\n' + 't_elap = time.time() - t0;\n' + ) + + pipeline_step = config.get('pipeline_step') or ModuleName.BIAS_CALC + cmd = BaseCLI.add_status_cmd(config, pipeline_step, cmd) + cmd += ";'\n" + + return cmd.replace('\\', '/')
+ + +
+[docs] + def get_bias_gid(self, coord): + """Get the bias gid from a coordinate. + + Parameters + ---------- + coord : tuple + (lat, lon) to get data for. + + Returns + ------- + bias_gid : int + gid of the data to retrieve in the bias data source raster data. + The gids for this data source are the enumerated indices of the + flattened coordinate array. + d : float + Distance in decimal degrees from coord to bias gid + """ + d, i = self.bias_tree.query(coord) + bias_gid = self.bias_gid_raster.flatten()[i] + return bias_gid, d
+ + +
+[docs] + def get_base_gid(self, bias_gid): + """Get one or more base gid(s) corresponding to a bias gid. + + Parameters + ---------- + bias_gid : int + gid of the data to retrieve in the bias data source raster data. + The gids for this data source are the enumerated indices of the + flattened coordinate array. + + Returns + ------- + dist : np.ndarray + Array of nearest neighbor distances with length equal to the number + of high-resolution baseline gids that map to the low resolution + bias gid pixel. + base_gid : np.ndarray + Array of base gids that are the nearest neighbors of bias_gid with + length equal to the number of high-resolution baseline gids that + map to the low resolution bias gid pixel. + """ + base_gid = np.where(self.nn_ind == bias_gid)[0] + dist = self.nn_dist[base_gid] + return dist, base_gid
+ + +
+[docs] + def get_data_pair(self, coord, daily_reduction='avg'): + """Get base and bias data observations based on a single bias gid. + + Parameters + ---------- + coord : tuple + (lat, lon) to get data for. + daily_reduction : None | str + Option to do a reduction of the hourly+ source base data to daily + data. Can be None (no reduction, keep source time frequency), "avg" + (daily average), "max" (daily max), "min" (daily min), + "sum" (daily sum/total) + + Returns + ------- + base_data : np.ndarray + 1D array of base data spatially averaged across the base_gid input + and possibly daily-averaged or min/max'd as well. + bias_data : np.ndarray + 1D array of temporal data at the requested gid. + base_dist : np.ndarray + Array of nearest neighbor distances from coord to the base data + sites with length equal to the number of high-resolution baseline + gids that map to the low resolution bias gid pixel. + bias_dist : Float + Nearest neighbor distance from coord to the bias data site + """ + bias_gid, bias_dist = self.get_bias_gid(coord) + base_dist, base_gid = self.get_base_gid(bias_gid) + bias_data = self.get_bias_data(bias_gid) + base_data = self.get_base_data( + self.base_fps, + self.base_dset, + base_gid, + self.base_handler, + daily_reduction=daily_reduction, + decimals=self.decimals, + ) + base_data = base_data[0] + return base_data, bias_data, base_dist, bias_dist
+ + +
+[docs] + def get_bias_data(self, bias_gid, bias_dh=None): + """Get data from the biased data source for a single gid + + Parameters + ---------- + bias_gid : int + gid of the data to retrieve in the bias data source raster data. + The gids for this data source are the enumerated indices of the + flattened coordinate array. + bias_dh : DataHandler, default=self.bias_dh + Any ``DataHandler`` from :mod:`sup3r.preprocessing`. This optional + argument allows an alternative handler other than the usual + :attr:`bias_dh`. For instance, the derived + :class:`~qdm.QuantileDeltaMappingCorrection` uses it to access the + reference biased dataset as well as the target biased dataset. + + Returns + ------- + bias_data : np.ndarray + 1D array of temporal data at the requested gid. + """ + + row, col = np.where(self.bias_gid_raster == bias_gid) + + # This can be confusing. If the given argument `bias_dh` is None, + # the default value for dh is `self.bias_dh`. + dh = bias_dh or self.bias_dh + bias_data = dh.data[self.bias_feature][row[0], col[0], ...] + + if bias_data.ndim != 1: + msg = ( + 'Found a weird number of feature channels for the bias ' + 'data retrieval: {}. Need just one channel'.format( + bias_data.shape + ) + ) + logger.error(msg) + raise RuntimeError(msg) + + if self.decimals is not None: + bias_data = np.around(bias_data, decimals=self.decimals) + + return np.asarray(bias_data)
+ + +
+[docs] + @classmethod + def get_base_data( + cls, + base_fps, + base_dset, + base_gid, + base_handler, + base_handler_kwargs=None, + daily_reduction='avg', + decimals=None, + base_dh_inst=None, + ): + """Get data from the baseline data source, possibly for many high-res + base gids corresponding to a single coarse low-res bias gid. + + Parameters + ---------- + base_fps : list | str + One or more baseline .h5 filepaths representing non-biased data to + use to correct the biased dataset. This is typically several years + of WTK or NSRDB files. + base_dset : str + A single dataset from the base_fps to retrieve. + base_gid : int | np.ndarray + One or more spatial gids to retrieve from base_fps. The data will + be spatially averaged across all of these sites. + base_handler : rex.Resource + A rex data handler similar to rex.Resource or sup3r.DataHandler + classes (if using the latter, must also input base_dh_inst) + base_handler_kwargs : dict | None + Optional kwargs to send to the initialization of the base_handler + class + daily_reduction : None | str + Option to do a reduction of the hourly+ source base data to daily + data. Can be None (no reduction, keep source time frequency), "avg" + (daily average), "max" (daily max), "min" (daily min), + "sum" (daily sum/total) + decimals : int | None + Option to round bias and base data to this number of + decimals, this gets passed to np.around(). If decimals + is negative, it specifies the number of positions to + the left of the decimal point. + base_dh_inst : sup3r.DataHandler + Instantiated DataHandler class that has already loaded the base + data (required if base files are .nc and are not being opened by a + rex Resource handler). + + Returns + ------- + out_data : np.ndarray + 1D array of base data spatially averaged across the base_gid input + and possibly daily-averaged or min/max'd as well. + out_ti : pd.DatetimeIndex + DatetimeIndex object of datetimes corresponding to the + output data. + """ + + out_data = [] + out_ti = [] + all_cs_ghi = [] + base_handler_kwargs = base_handler_kwargs or {} + + if issubclass(base_handler, DataHandler) and base_dh_inst is None: + msg = ( + 'The method `get_base_data()` is only to be used with ' + '`base_handler` as a `sup3r.DataHandler` subclass if ' + '`base_dh_inst` is also provided!' + ) + logger.error(msg) + raise RuntimeError(msg) + + if issubclass(base_handler, DataHandler) and base_dh_inst is not None: + out_ti = base_dh_inst.time_index + out_data = cls._read_base_sup3r_data( + base_dh_inst, base_dset, base_gid + ) + all_cs_ghi = np.ones(len(out_data), dtype=np.float32) * np.nan + else: + for fp in base_fps: + with base_handler(fp, **base_handler_kwargs) as res: + base_ti = res.time_index + temp_out = cls._read_base_rex_data( + res, base_dset, base_gid + ) + base_data, base_cs_ghi = temp_out + + out_data.append(base_data) + out_ti.append(base_ti) + all_cs_ghi.append(base_cs_ghi) + + out_data = np.hstack(out_data) + out_ti = pd.DatetimeIndex(np.hstack(out_ti)) + all_cs_ghi = np.hstack(all_cs_ghi) + + if daily_reduction is not None: + out_data, out_ti = cls._reduce_base_data( + out_ti, out_data, all_cs_ghi, base_dset, daily_reduction + ) + + if decimals is not None: + out_data = np.around(out_data, decimals=decimals) + + return np.asarray(out_data), out_ti
+ + + @staticmethod + def _match_zero_rate(bias_data, base_data): + """The lowest percentile of values in the biased data will be set to + zero to match the percentile of zeros in the base data. This helps + resolve the issue where global climate models produce too many days + with small precipitation totals e.g., the "drizzle problem". + Ref: Polade et al., 2014 https://doi.org/10.1038/srep04364 + + Parameters + ---------- + bias_data : Union[np.ndarray, da.core.Array] + 1D array of biased data observations. + base_data : Union[np.ndarray, da.core.Array] + 1D array of base data observations. + + Returns + ------- + bias_data : np.ndarray + 1D array of biased data observations. Values below the quantile + associated with zeros in base_data will be set to zero + """ + + q_zero_base_in = np.nanmean(base_data == 0) + q_zero_bias_in = np.nanmean(bias_data == 0) + + q_bias = np.linspace(0, 1, len(bias_data)) + min_value_bias = np.interp(q_zero_base_in, q_bias, sorted(bias_data)) + + bias_data[bias_data < min_value_bias] = 0 + + q_zero_base_out = np.nanmean(base_data == 0) + q_zero_bias_out = np.nanmean(bias_data == 0) + + logger.debug( + 'Input bias/base zero rate is {:.3e}/{:.3e}, ' + 'output is {:.3e}/{:.3e}'.format( + q_zero_bias_in, + q_zero_base_in, + q_zero_bias_out, + q_zero_base_out, + ) + ) + + return bias_data + + @staticmethod + def _read_base_sup3r_data(dh, base_dset, base_gid): + """Read baseline data from a sup3r DataHandler + + Parameters + ---------- + dh : sup3r.DataHandler + sup3r DataHandler that is an open file handler of the base file(s) + base_dset : str + A single dataset from the base_fps to retrieve. + base_gid : int | np.ndarray + One or more spatial gids to retrieve from base_fps. The data will + be spatially averaged across all of these sites. + + Returns + ------- + base_data : np.ndarray + 1D array of base data spatially averaged across the base_gid input + """ + gid_raster = np.arange(len(dh.meta)) + gid_raster = gid_raster.reshape(dh.shape[:2]) + idy, idx = np.where(np.isin(gid_raster, base_gid)) + if dh.data.loaded: + # faster direct access of numpy array if loaded + base_data = dh.data[base_dset].data[idy, idx] + else: + base_data = dh.data[base_dset].data.vindex[idy, idx] + + assert base_data.shape[0] == len(base_gid) + assert base_data.shape[1] == len(dh.time_index) + return base_data.mean(axis=0) + + @staticmethod + def _read_base_rex_data(res, base_dset, base_gid): + """Read baseline data from a rex resource handler with extra logic for + special datasets (e.g. u/v wind components or clearsky_ratio) + + Parameters + ---------- + res : rex.Resource + rex Resource handler that is an open file handler of the base + file(s) + base_dset : str + A single dataset from the base_fps to retrieve. + base_gid : int | np.ndarray + One or more spatial gids to retrieve from base_fps. The data will + be spatially averaged across all of these sites. + + Returns + ------- + base_data : np.ndarray + 1D array of base data spatially averaged across the base_gid input + base_cs_ghi : np.ndarray + If base_dset == "clearsky_ratio", the base_data array is GHI and + this base_cs_ghi is clearsky GHI. Otherwise this is an array with + same length as base_data but full of np.nan + """ + + msg = '`res` input must not be a `DataHandler` subclass!' + assert not issubclass(res.__class__, DataHandler), msg + + base_cs_ghi = None + + if base_dset.lower().startswith(('u_', 'v_')): + dset_ws = base_dset.lower().replace('u_', 'windspeed_') + dset_ws = dset_ws.lower().replace('v_', 'windspeed_') + dset_wd = dset_ws.replace('speed', 'direction') + base_ws = res[dset_ws, :, base_gid] + base_wd = res[dset_wd, :, base_gid] + + if base_dset.startswith('u_'): + base_data = -base_ws * np.sin(np.radians(base_wd)) + else: + base_data = -base_ws * np.cos(np.radians(base_wd)) + + elif base_dset == 'clearsky_ratio': + base_data = res['ghi', :, base_gid] + base_cs_ghi = res['clearsky_ghi', :, base_gid] + + else: + base_data = res[base_dset, :, base_gid] + + if len(base_data.shape) == 2: + base_data = np.nanmean(base_data, axis=1) + if base_cs_ghi is not None: + base_cs_ghi = np.nanmean(base_cs_ghi, axis=1) + + if base_cs_ghi is None: + base_cs_ghi = np.ones(len(base_data), dtype=np.float32) * np.nan + + return base_data, base_cs_ghi + + @staticmethod + def _reduce_base_data( + base_ti, base_data, base_cs_ghi, base_dset, daily_reduction + ): + """Reduce the base timeseries data using some sort of daily reduction + function. + + Parameters + ---------- + base_ti : pd.DatetimeIndex + Time index associated with base_data + base_data : np.ndarray + 1D array of base data spatially averaged across the base_gid input + base_cs_ghi : np.ndarray + If base_dset == "clearsky_ratio", the base_data array is GHI and + this base_cs_ghi is clearsky GHI. Otherwise this is an array with + same length as base_data but full of np.nan + base_dset : str + A single dataset from the base_fps to retrieve. + daily_reduction : str + Option to do a reduction of the hourly+ source base data to daily + data. Can be None (no reduction, keep source time frequency), "avg" + (daily average), "max" (daily max), "min" (daily min), + "sum" (daily sum/total) + + Returns + ------- + base_data : np.ndarray + 1D array of base data spatially averaged across the base_gid input + and possibly daily-averaged or min/max'd as well. + daily_ti : pd.DatetimeIndex + Daily DatetimeIndex corresponding to the daily base_data + """ + + if daily_reduction is None: + return base_data + + daily_ti = pd.DatetimeIndex(sorted(set(base_ti.date))) + df = pd.DataFrame( + { + 'date': base_ti.date, + 'base_data': base_data, + 'base_cs_ghi': base_cs_ghi, + } + ) + + cs_ratio = ( + daily_reduction.lower() in ('avg', 'average', 'mean') + and base_dset == 'clearsky_ratio' + ) + + if cs_ratio: + daily_ghi = df.groupby('date').sum()['base_data'].values + daily_cs_ghi = df.groupby('date').sum()['base_cs_ghi'].values + daily_ghi[daily_cs_ghi == 0] = 0 + daily_cs_ghi[daily_cs_ghi == 0] = 1 + base_data = daily_ghi / daily_cs_ghi + mask = np.isnan(base_data) + msg = ( + 'Could not calculate daily average "clearsky_ratio" with ' + 'input ghi and cs ghi inputs: \n{}, \n{}'.format( + daily_ghi[mask], daily_cs_ghi[mask] + ) + ) + assert not np.isnan(base_data).any(), msg + + elif daily_reduction.lower() in ('avg', 'average', 'mean'): + base_data = df.groupby('date').mean()['base_data'].values + + elif daily_reduction.lower() in ('max', 'maximum'): + base_data = df.groupby('date').max()['base_data'].values + + elif daily_reduction.lower() in ('min', 'minimum'): + base_data = df.groupby('date').min()['base_data'].values + + elif daily_reduction.lower() in ('sum', 'total'): + base_data = df.groupby('date').sum()['base_data'].values + + msg = ( + f'Daily reduced base data shape {base_data.shape} does not ' + f'match daily time index shape {daily_ti.shape}, ' + 'something went wrong!' + ) + assert len(base_data.shape) == 1, msg + assert base_data.shape == daily_ti.shape, msg + + return base_data, daily_ti
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/bias/bias_calc.html b/_modules/sup3r/bias/bias_calc.html new file mode 100644 index 000000000..04c4d067c --- /dev/null +++ b/_modules/sup3r/bias/bias_calc.html @@ -0,0 +1,1289 @@ + + + + + + + + + + sup3r.bias.bias_calc — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.bias.bias_calc

+"""Utilities to calculate the bias correction factors for biased data that is
+going to be fed into the sup3r downscaling models. This is typically used to
+bias correct GCM data vs. some historical record like the WTK or NSRDB.
+
+TODO: Generalize the ``with ProcessPoolExecutor() as exe: ...`` so we don't
+need to duplicate this wherever we kickoff a process or thread pool
+"""
+
+import copy
+import json
+import logging
+import os
+from concurrent.futures import ProcessPoolExecutor, as_completed
+
+import h5py
+import numpy as np
+from scipy import stats
+
+from sup3r.preprocessing import DataHandler
+
+from .base import DataRetrievalBase
+from .mixins import FillAndSmoothMixin
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class LinearCorrection(FillAndSmoothMixin, DataRetrievalBase): + """Calculate linear correction *scalar +adder factors to bias correct data + + This calculation operates on single bias sites for the full time series of + available data (no season bias correction) + """ + + NT = 1 + """size of the time dimension, 1 is no time-based bias correction""" + + def _init_out(self): + """Initialize output arrays""" + keys = [ + f'{self.bias_feature}_scalar', + f'{self.bias_feature}_adder', + f'bias_{self.bias_feature}_mean', + f'bias_{self.bias_feature}_std', + f'base_{self.base_dset}_mean', + f'base_{self.base_dset}_std', + ] + self.out = { + k: np.full( + (*self.bias_gid_raster.shape, self.NT), np.nan, np.float32 + ) + for k in keys + } + +
+[docs] + @staticmethod + def get_linear_correction(bias_data, base_data, bias_feature, base_dset): + """Get the linear correction factors based on 1D bias and base datasets + + Parameters + ---------- + bias_data : np.ndarray + 1D array of biased data observations. + base_data : np.ndarray + 1D array of base data observations. + bias_feature : str + This is the biased feature from bias_fps to retrieve. This should + be a single feature name corresponding to base_dset + base_dset : str + A single dataset from the base_fps to retrieve. In the case of wind + components, this can be u_100m or v_100m which will retrieve + windspeed and winddirection and derive the U/V component. + + Returns + ------- + out : dict + Dictionary of values defining the mean/std of the bias + base + data and the scalar + adder factors to correct the biased data + like: bias_data * scalar + adder + """ + + bias_std = np.nanstd(bias_data) + if bias_std == 0: + bias_std = np.nanstd(base_data) + + scalar = np.nanstd(base_data) / bias_std + adder = np.nanmean(base_data) - np.nanmean(bias_data) * scalar + + out = { + f'bias_{bias_feature}_mean': np.nanmean(bias_data), + f'bias_{bias_feature}_std': bias_std, + f'base_{base_dset}_mean': np.nanmean(base_data), + f'base_{base_dset}_std': np.nanstd(base_data), + f'{bias_feature}_scalar': scalar, + f'{bias_feature}_adder': adder, + } + + return out
+ + + # pylint: disable=W0613 + @classmethod + def _run_single( + cls, + bias_data, + base_fps, + bias_feature, + base_dset, + base_gid, + base_handler, + daily_reduction, + bias_ti, # noqa: ARG003 + decimals, + base_dh_inst=None, + match_zero_rate=False, + ): + """Find the nominal scalar + adder combination to bias correct data + at a single site""" + + base_data, _ = cls.get_base_data( + base_fps, + base_dset, + base_gid, + base_handler, + daily_reduction=daily_reduction, + decimals=decimals, + base_dh_inst=base_dh_inst, + ) + + if match_zero_rate: + bias_data = cls._match_zero_rate(bias_data, base_data) + + out = cls.get_linear_correction( + bias_data, base_data, bias_feature, base_dset + ) + return out + +
+[docs] + def write_outputs(self, fp_out, out): + """Write outputs to an .h5 file. + + Parameters + ---------- + fp_out : str | None + Optional .h5 output file to write scalar and adder arrays. + out : dict + Dictionary of values defining the mean/std of the bias + base + data and the scalar + adder factors to correct the biased data + like: bias_data * scalar + adder. Each value is of shape + (lat, lon, time). + """ + + if fp_out is not None: + if not os.path.exists(os.path.dirname(fp_out)): + os.makedirs(os.path.dirname(fp_out), exist_ok=True) + + with h5py.File(fp_out, 'w') as f: + # pylint: disable=E1136 + lat = self.bias_dh.lat_lon[..., 0] + lon = self.bias_dh.lat_lon[..., 1] + f.create_dataset('latitude', data=lat) + f.create_dataset('longitude', data=lon) + for dset, data in out.items(): + f.create_dataset(dset, data=data) + + for k, v in self.meta.items(): + f.attrs[k] = json.dumps(v) + + logger.info( + 'Wrote scalar adder factors to file: {}'.format(fp_out) + )
+ + +
+[docs] + def run( + self, + fp_out=None, + max_workers=None, + daily_reduction='avg', + fill_extend=True, + smooth_extend=0, + smooth_interior=0, + ): + """Run linear correction factor calculations for every site in the bias + dataset + + Parameters + ---------- + fp_out : str | None + Optional .h5 output file to write scalar and adder arrays. + max_workers : int + Number of workers to run in parallel. 1 is serial and None is all + available. + daily_reduction : None | str + Option to do a reduction of the hourly+ source base data to daily + data. Can be None (no reduction, keep source time frequency), "avg" + (daily average), "max" (daily max), "min" (daily min), + "sum" (daily sum/total) + fill_extend : bool + Flag to fill data past distance_upper_bound using spatial nearest + neighbor. If False, the extended domain will be left as NaN. + smooth_extend : float + Option to smooth the scalar/adder data outside of the spatial + domain set by the distance_upper_bound input. This alleviates the + weird seams far from the domain of interest. This value is the + standard deviation for the gaussian_filter kernel + smooth_interior : float + Option to smooth the scalar/adder data within the valid spatial + domain. This can reduce the affect of extreme values within + aggregations over large number of pixels. + + Returns + ------- + out : dict + Dictionary of values defining the mean/std of the bias + base + data and the scalar + adder factors to correct the biased data + like: bias_data * scalar + adder. Each value is of shape + (lat, lon, time). + """ + logger.debug('Starting linear correction calculation...') + + logger.info( + 'Initialized scalar / adder with shape: {}'.format( + self.bias_gid_raster.shape + ) + ) + + self.bad_bias_gids = [] + + # sup3r DataHandler opening base files will load all data in parallel + # during the init and should not be passed in parallel to workers + if isinstance(self.base_dh, DataHandler): + max_workers = 1 + + if max_workers == 1: + logger.debug('Running serial calculation.') + for i, bias_gid in enumerate(self.bias_meta.index): + raster_loc = np.where(self.bias_gid_raster == bias_gid) + _, base_gid = self.get_base_gid(bias_gid) + + if not base_gid.any(): + self.bad_bias_gids.append(bias_gid) + else: + bias_data = self.get_bias_data(bias_gid) + single_out = self._run_single( + bias_data, + self.base_fps, + self.bias_feature, + self.base_dset, + base_gid, + self.base_handler, + daily_reduction, + self.bias_ti, + self.decimals, + base_dh_inst=self.base_dh, + match_zero_rate=self.match_zero_rate, + ) + for key, arr in single_out.items(): + self.out[key][raster_loc] = arr + + logger.info( + 'Completed bias calculations for {} out of {} ' + 'sites'.format(i + 1, len(self.bias_meta)) + ) + + else: + logger.debug( + 'Running parallel calculation with {} workers.'.format( + max_workers + ) + ) + with ProcessPoolExecutor(max_workers=max_workers) as exe: + futures = {} + for bias_gid in self.bias_meta.index: + raster_loc = np.where(self.bias_gid_raster == bias_gid) + _, base_gid = self.get_base_gid(bias_gid) + + if not base_gid.any(): + self.bad_bias_gids.append(bias_gid) + else: + bias_data = self.get_bias_data(bias_gid) + future = exe.submit( + self._run_single, + bias_data, + self.base_fps, + self.bias_feature, + self.base_dset, + base_gid, + self.base_handler, + daily_reduction, + self.bias_ti, + self.decimals, + match_zero_rate=self.match_zero_rate, + ) + futures[future] = raster_loc + + logger.debug('Finished launching futures.') + for i, future in enumerate(as_completed(futures)): + raster_loc = futures[future] + single_out = future.result() + for key, arr in single_out.items(): + self.out[key][raster_loc] = arr + + logger.info( + 'Completed bias calculations for {} out of {} ' + 'sites'.format(i + 1, len(futures)) + ) + + logger.info('Finished calculating bias correction factors.') + + self.out = self.fill_and_smooth( + self.out, fill_extend, smooth_extend, smooth_interior + ) + + self.write_outputs(fp_out, self.out) + + return copy.deepcopy(self.out)
+
+ + + +
+[docs] +class ScalarCorrection(LinearCorrection): + """Calculate annual linear correction *scalar factors to bias correct data. + This typically used when base data is just monthly or annual means and + standard deviations cannot be computed. This is case for vortex data, for + example. Thus, just scalar factors are computed as mean(base_data) / + mean(bias_data). Adder factors are still written but are exactly zero. + + This calculation operates on single bias sites on a monthly basis + """ + +
+[docs] + @staticmethod + def get_linear_correction(bias_data, base_data, bias_feature, base_dset): + """Get the linear correction factors based on 1D bias and base datasets + + Parameters + ---------- + bias_data : np.ndarray + 1D array of biased data observations. + base_data : np.ndarray + 1D array of base data observations. + bias_feature : str + This is the biased feature from bias_fps to retrieve. This should + be a single feature name corresponding to base_dset + base_dset : str + A single dataset from the base_fps to retrieve. In the case of wind + components, this can be u_100m or v_100m which will retrieve + windspeed and winddirection and derive the U/V component. + + Returns + ------- + out : dict + Dictionary of values defining the mean/std of the bias + base + data and the scalar + adder factors to correct the biased data + like: bias_data * scalar + adder + """ + + bias_std = np.nanstd(bias_data) + if bias_std == 0: + bias_std = np.nanstd(base_data) + + base_mean = np.nanmean(base_data) + bias_mean = np.nanmean(bias_data) + scalar = base_mean / bias_mean + adder = np.zeros(scalar.shape) + + out = { + f'bias_{bias_feature}_mean': bias_mean, + f'base_{base_dset}_mean': base_mean, + f'{bias_feature}_scalar': scalar, + f'{bias_feature}_adder': adder, + } + + return out
+
+ + + +
+[docs] +class MonthlyLinearCorrection(LinearCorrection): + """Calculate linear correction *scalar +adder factors to bias correct data + + This calculation operates on single bias sites on a monthly basis + """ + + NT = 12 + """size of the time dimension, 12 is monthly bias correction""" + + @classmethod + def _run_single( + cls, + bias_data, + base_fps, + bias_feature, + base_dset, + base_gid, + base_handler, + daily_reduction, + bias_ti, + decimals, + base_dh_inst=None, + match_zero_rate=False, + ): + """Find the nominal scalar + adder combination to bias correct data + at a single site""" + + base_data, base_ti = cls.get_base_data( + base_fps, + base_dset, + base_gid, + base_handler, + daily_reduction=daily_reduction, + decimals=decimals, + base_dh_inst=base_dh_inst, + ) + + if match_zero_rate: + bias_data = cls._match_zero_rate(bias_data, base_data) + + base_arr = np.full(cls.NT, np.nan, dtype=np.float32) + out = {} + + for month in range(1, 13): + bias_mask = bias_ti.month == month + base_mask = base_ti.month == month + + if any(bias_mask) and any(base_mask): + mout = cls.get_linear_correction( + bias_data[bias_mask], + base_data[base_mask], + bias_feature, + base_dset, + ) + for k, v in mout.items(): + if k not in out: + out[k] = base_arr.copy() + out[k][month - 1] = v + + return out
+ + + +
+[docs] +class MonthlyScalarCorrection(MonthlyLinearCorrection, ScalarCorrection): + """Calculate linear correction *scalar factors for each month""" + + NT = 12
+ + + +
+[docs] +class SkillAssessment(MonthlyLinearCorrection): + """Calculate historical skill of one dataset compared to another.""" + + PERCENTILES = (1, 5, 25, 50, 75, 95, 99) + """Data percentiles to report.""" + + def _init_out(self): + """Initialize output arrays""" + + monthly_keys = [ + f'{self.bias_feature}_scalar', + f'{self.bias_feature}_adder', + f'bias_{self.bias_feature}_mean_monthly', + f'bias_{self.bias_feature}_std_monthly', + f'base_{self.base_dset}_mean_monthly', + f'base_{self.base_dset}_std_monthly', + ] + + annual_keys = [ + f'{self.bias_feature}_ks_stat', + f'{self.bias_feature}_ks_p', + f'{self.bias_feature}_bias', + f'bias_{self.bias_feature}_mean', + f'bias_{self.bias_feature}_std', + f'bias_{self.bias_feature}_skew', + f'bias_{self.bias_feature}_kurtosis', + f'bias_{self.bias_feature}_zero_rate', + f'base_{self.base_dset}_mean', + f'base_{self.base_dset}_std', + f'base_{self.base_dset}_skew', + f'base_{self.base_dset}_kurtosis', + f'base_{self.base_dset}_zero_rate', + ] + + self.out = { + k: np.full( + (*self.bias_gid_raster.shape, self.NT), np.nan, np.float32 + ) + for k in monthly_keys + } + + arr = np.full((*self.bias_gid_raster.shape, 1), np.nan, np.float32) + for k in annual_keys: + self.out[k] = arr.copy() + + for p in self.PERCENTILES: + base_k = f'base_{self.base_dset}_percentile_{p}' + bias_k = f'bias_{self.bias_feature}_percentile_{p}' + self.out[base_k] = arr.copy() + self.out[bias_k] = arr.copy() + + @classmethod + def _run_skill_eval( + cls, + bias_data, + base_data, + bias_feature, + base_dset, + match_zero_rate=False, + ): + """Run skill assessment metrics on 1D datasets at a single site. + + Note we run the KS test on the mean=0 distributions as per: + S. Brands et al. 2013 https://doi.org/10.1007/s00382-013-1742-8 + """ + + if match_zero_rate: + bias_data = cls._match_zero_rate(bias_data, base_data) + + out = {} + bias_mean = np.nanmean(bias_data) + base_mean = np.nanmean(base_data) + out[f'{bias_feature}_bias'] = bias_mean - base_mean + + out[f'bias_{bias_feature}_mean'] = bias_mean + out[f'bias_{bias_feature}_std'] = np.nanstd(bias_data) + out[f'bias_{bias_feature}_skew'] = stats.skew(bias_data) + out[f'bias_{bias_feature}_kurtosis'] = stats.kurtosis(bias_data) + out[f'bias_{bias_feature}_zero_rate'] = np.nanmean(bias_data == 0) + + out[f'base_{base_dset}_mean'] = base_mean + out[f'base_{base_dset}_std'] = np.nanstd(base_data) + out[f'base_{base_dset}_skew'] = stats.skew(base_data) + out[f'base_{base_dset}_kurtosis'] = stats.kurtosis(base_data) + out[f'base_{base_dset}_zero_rate'] = np.nanmean(base_data == 0) + + if match_zero_rate: + ks_out = stats.ks_2samp(base_data, bias_data) + else: + ks_out = stats.ks_2samp( + base_data - base_mean, bias_data - bias_mean + ) + + out[f'{bias_feature}_ks_stat'] = ks_out.statistic + out[f'{bias_feature}_ks_p'] = ks_out.pvalue + + for p in cls.PERCENTILES: + base_k = f'base_{base_dset}_percentile_{p}' + bias_k = f'bias_{bias_feature}_percentile_{p}' + out[base_k] = np.percentile(base_data, p) + out[bias_k] = np.percentile(bias_data, p) + + return out + + @classmethod + def _run_single( + cls, + bias_data, + base_fps, + bias_feature, + base_dset, + base_gid, + base_handler, + daily_reduction, + bias_ti, + decimals, + base_dh_inst=None, + match_zero_rate=False, + ): + """Do a skill assessment at a single site""" + + base_data, base_ti = cls.get_base_data( + base_fps, + base_dset, + base_gid, + base_handler, + daily_reduction=daily_reduction, + decimals=decimals, + base_dh_inst=base_dh_inst, + ) + + arr = np.full(cls.NT, np.nan, dtype=np.float32) + out = { + f'bias_{bias_feature}_mean_monthly': arr.copy(), + f'bias_{bias_feature}_std_monthly': arr.copy(), + f'base_{base_dset}_mean_monthly': arr.copy(), + f'base_{base_dset}_std_monthly': arr.copy(), + } + + out.update( + cls._run_skill_eval( + bias_data, + base_data, + bias_feature, + base_dset, + match_zero_rate=match_zero_rate, + ) + ) + + for month in range(1, 13): + bias_mask = bias_ti.month == month + base_mask = base_ti.month == month + + if any(bias_mask) and any(base_mask): + mout = cls.get_linear_correction( + bias_data[bias_mask], + base_data[base_mask], + bias_feature, + base_dset, + ) + for k, v in mout.items(): + if not k.endswith(('_scalar', '_adder')): + k += '_monthly' + out[k][month - 1] = v + + return out
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/bias/bias_calc_cli.html b/_modules/sup3r/bias/bias_calc_cli.html new file mode 100644 index 000000000..2b11199f4 --- /dev/null +++ b/_modules/sup3r/bias/bias_calc_cli.html @@ -0,0 +1,790 @@ + + + + + + + + + + sup3r.bias.bias_calc_cli — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.bias.bias_calc_cli

+"""sup3r bias correction calculation CLI entry points."""
+import copy
+import logging
+import os
+
+import click
+
+import sup3r.bias
+from sup3r import __version__
+from sup3r.utilities import ModuleName
+from sup3r.utilities.cli import AVAILABLE_HARDWARE_OPTIONS, BaseCLI
+
+logger = logging.getLogger(__name__)
+
+
+@click.group()
+@click.version_option(version=__version__)
+@click.option('-v', '--verbose', is_flag=True,
+              help='Flag to turn on debug logging. Default is not verbose.')
+@click.pass_context
+def main(ctx, verbose):
+    """Sup3r bias calc Command Line Interface"""
+    ctx.ensure_object(dict)
+    ctx.obj['VERBOSE'] = verbose
+
+
+@main.command()
+@click.option('--config_file', '-c', required=True,
+              type=click.Path(exists=True),
+              help='sup3r bias correction calculation config .json file.')
+@click.option('-v', '--verbose', is_flag=True,
+              help='Flag to turn on debug logging. Default is not verbose.')
+@click.pass_context
+def from_config(ctx, config_file, verbose=False, pipeline_step=None):
+    """Run sup3r bias correction calculation from a config file."""
+    config = BaseCLI.from_config_preflight(ModuleName.BIAS_CALC, ctx,
+                                           config_file, verbose)
+
+    exec_kwargs = config.get('execution_control', {})
+    hardware_option = exec_kwargs.pop('option', 'local')
+    calc_class_name = config['bias_calc_class']
+    BiasCalcClass = getattr(sup3r.bias, calc_class_name)
+    basename = config['job_name']
+    log_pattern = config.get('log_pattern', None)
+
+    jobs = config['jobs']
+    for i_node, job in enumerate(jobs):
+        node_config = copy.deepcopy(job)
+        node_config['status_dir'] = config['status_dir']
+        node_config['log_file'] = (
+            log_pattern if log_pattern is None
+            else os.path.normpath(log_pattern.format(node_index=i_node)))
+        name = ('{}_{}'.format(basename, str(i_node).zfill(6)))
+        ctx.obj['NAME'] = name
+        node_config['job_name'] = name
+        node_config["pipeline_step"] = pipeline_step
+
+        cmd = BiasCalcClass.get_node_cmd(node_config)
+
+        cmd_log = '\n\t'.join(cmd.split('\n'))
+        logger.debug(f'Running command:\n\t{cmd_log}')
+
+        if hardware_option.lower() in AVAILABLE_HARDWARE_OPTIONS:
+            kickoff_slurm_job(ctx, cmd, pipeline_step, **exec_kwargs)
+        else:
+            kickoff_local_job(ctx, cmd, pipeline_step)
+
+
+
+[docs] +def kickoff_slurm_job(ctx, cmd, pipeline_step=None, alloc='sup3r', + memory=None, walltime=4, feature=None, + stdout_path='./stdout/'): + """Run sup3r on HPC via SLURM job submission. + + Parameters + ---------- + ctx : click.pass_context + Click context object where ctx.obj is a dictionary + cmd : str + Command to be submitted in SLURM shell script. Example: + 'python -m sup3r.cli forward_pass -c <config_file>' + pipeline_step : str, optional + Name of the pipeline step being run. If ``None``, the + ``pipeline_step`` will be set to the ``module_name``, + mimicking old reV behavior. By default, ``None``. + alloc : str + HPC project (allocation) handle. Example: 'sup3r'. + memory : int + Node memory request in GB. + walltime : float + Node walltime request in hours. + feature : str + Additional flags for SLURM job. Format is "--qos=high" + or "--depend=[state:job_id]". Default is None. + stdout_path : str + Path to print .stdout and .stderr files. + """ + BaseCLI.kickoff_slurm_job(ModuleName.BIAS_CALC, ctx, cmd, alloc, memory, + walltime, feature, stdout_path, pipeline_step)
+ + + +
+[docs] +def kickoff_local_job(ctx, cmd, pipeline_step=None): + """Run sup3r bias calc locally. + + Parameters + ---------- + ctx : click.pass_context + Click context object where ctx.obj is a dictionary + cmd : str + Command to be submitted in shell script. Example: + 'python -m sup3r.cli forward_pass -c <config_file>' + pipeline_step : str, optional + Name of the pipeline step being run. If ``None``, the + ``pipeline_step`` will be set to the ``module_name``, + mimicking old reV behavior. By default, ``None``. + """ + BaseCLI.kickoff_local_job(ModuleName.BIAS_CALC, ctx, cmd, pipeline_step)
+ + + +if __name__ == '__main__': + try: + main(obj={}) + except Exception: + logger.exception('Error running sup3r bias calc CLI') + raise +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/bias/bias_calc_vortex.html b/_modules/sup3r/bias/bias_calc_vortex.html new file mode 100644 index 000000000..c93a58c09 --- /dev/null +++ b/_modules/sup3r/bias/bias_calc_vortex.html @@ -0,0 +1,1248 @@ + + + + + + + + + + sup3r.bias.bias_calc_vortex — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.bias.bias_calc_vortex

+"""Classes to compute means from vortex and era data and compute bias
+correction factors.
+
+Vortex mean files can be downloaded from IRENA.
+https://globalatlas.irena.org/workspace
+"""
+
+import calendar
+import logging
+import os
+
+import dask
+import numpy as np
+import pandas as pd
+import xarray as xr
+from rex import Resource
+from scipy.interpolate import interp1d
+
+from sup3r.postprocessing import OutputHandler, RexOutputs
+from sup3r.preprocessing.utilities import log_args
+from sup3r.utilities import VERSION_RECORD
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class VortexMeanPrepper: + """Class for converting monthly vortex tif files for each height to a + single h5 files containing all monthly means for all requested output + heights. + """ + + @log_args + def __init__(self, path_pattern, in_heights, out_heights, overwrite=False): + """ + Parameters + ---------- + path_pattern : str + Pattern for input tif files. Needs to include {month} and {height} + format keys. + in_heights : list + List of heights for input files. + out_heights : list + List of output heights used for interpolation + overwrite : bool + Whether to overwrite intermediate netcdf files containing the + interpolated masked monthly means. + """ + msg = 'path_pattern needs to have {month} and {height} format keys' + assert '{month}' in path_pattern and '{height}' in path_pattern, msg + self.path_pattern = path_pattern + self.in_heights = in_heights + self.out_heights = out_heights + self.out_dir = os.path.dirname(path_pattern) + self.overwrite = overwrite + self._mask = None + self._meta = None + + @property + def in_features(self): + """List of features corresponding to input heights.""" + return [f'windspeed_{h}m' for h in self.in_heights] + + @property + def out_features(self): + """List of features corresponding to output heights""" + return [f'windspeed_{h}m' for h in self.out_heights] + +
+[docs] + def get_input_file(self, month, height): + """Get vortex tif file for given month and height.""" + return self.path_pattern.format(month=month, height=height)
+ + +
+[docs] + def get_height_files(self, month): + """Get set of netcdf files for given month""" + files = [] + for height in self.in_heights: + infile = self.get_input_file(month, height) + outfile = infile.replace('.tif', '.nc') + files.append(outfile) + return files
+ + + @property + def input_files(self): + """Get list of all input files used for h5 meta.""" + files = [] + for height in self.in_heights: + for i in range(1, 13): + month = calendar.month_name[i] + files.append(self.get_input_file(month, height)) + return files + +
+[docs] + def get_output_file(self, month): + """Get name of netcdf file for a given month.""" + return os.path.join( + self.out_dir.replace('{month}', month), f'{month}.nc' + )
+ + + @property + def output_files(self): + """List of output monthly output files each with windspeed for all + input heights + """ + files = [] + for i in range(1, 13): + month = calendar.month_name[i] + files.append(self.get_output_file(month)) + return files + +
+[docs] + def convert_month_height_tif(self, month, height): + """Get windspeed mean for the given month and hub height from the + corresponding input file and write this to a netcdf file. + """ + infile = self.get_input_file(month, height) + logger.info(f'Getting mean windspeed_{height}m for {month}.') + outfile = infile.replace('.tif', '.nc') + if os.path.exists(outfile) and self.overwrite: + os.remove(outfile) + + if not os.path.exists(outfile) or self.overwrite: + ds = xr.open_mfdataset(infile) + ds = ds.rename( + { + 'band_data': f'windspeed_{height}m', + 'x': 'longitude', + 'y': 'latitude', + } + ) + ds = ds.isel(band=0).drop_vars('band') + ds.to_netcdf(outfile, format='NETCDF4', engine='h5netcdf') + return outfile
+ + +
+[docs] + def convert_month_tif(self, month): + """Write netcdf files for all heights for the given month.""" + for height in self.in_heights: + self.convert_month_height_tif(month, height)
+ + +
+[docs] + def convert_all_tifs(self): + """Write netcdf files for all heights for all months.""" + for i in range(1, 13): + month = calendar.month_name[i] + logger.info(f'Converting tif files to netcdf files for {month}') + self.convert_month_tif(month)
+ + + @property + def mask(self): + """Mask coordinates without data""" + if self._mask is None: + with xr.open_mfdataset(self.get_height_files('January')) as res: + mask = (res[self.in_features[0]] != -999) & ( + ~np.isnan(res[self.in_features[0]]) + ) + for feat in self.in_features[1:]: + tmp = (res[feat] != -999) & (~np.isnan(res[feat])) + mask &= tmp + self._mask = np.array(mask).flatten() + return self._mask + +
+[docs] + def get_month(self, month): + """Get interpolated means for all hub heights for the given month. + + Parameters + ---------- + month : str + Name of month to get data for + + Returns + ------- + data : xarray.Dataset + xarray dataset object containing interpolated monthly windspeed + means for all input and output heights + + """ + month_file = self.get_output_file(month) + if os.path.exists(month_file) and self.overwrite: + os.remove(month_file) + + if os.path.exists(month_file) and not self.overwrite: + logger.info(f'Loading month_file {month_file}.') + data = xr.open_mfdataset(month_file) + else: + logger.info( + 'Getting mean windspeed for all heights ' + f'({self.in_heights}) for {month}' + ) + data = xr.open_mfdataset(self.get_height_files(month)) + logger.info( + 'Interpolating windspeed for all heights ' + f'({self.out_heights}) for {month}.' + ) + data = self.interp(data) + data.to_netcdf(month_file, format='NETCDF4', engine='h5netcdf') + logger.info( + 'Saved interpolated means for all heights for ' + f'{month} to {month_file}.' + ) + return data
+ + +
+[docs] + def interp(self, data): + """Interpolate data to requested output heights. + + Parameters + ---------- + data : xarray.Dataset + xarray dataset object containing windspeed for all input heights + + Returns + ------- + data : xarray.Dataset + xarray dataset object containing windspeed for all input and output + heights + """ + var_array = np.zeros( + ( + len(data.latitude) * len(data.longitude), + len(self.in_heights), + ), + dtype=np.float32, + ) + lev_array = var_array.copy() + for i, (h, feat) in enumerate(zip(self.in_heights, self.in_features)): + var_array[..., i] = data[feat].values.flatten() + lev_array[..., i] = h + + logger.info( + f'Interpolating {self.in_features} to {self.out_features} ' + f'for {var_array.shape[0]} coordinates.' + ) + tmp = [ + interp1d(h, v, fill_value='extrapolate')(self.out_heights) + for h, v in zip(lev_array[self.mask], var_array[self.mask]) + ] + out = np.full( + (len(data.latitude), len(data.longitude), len(self.out_heights)), + np.nan, + dtype=np.float32, + ) + out[self.mask.reshape((len(data.latitude), len(data.longitude)))] = tmp + for i, feat in enumerate(self.out_features): + if feat not in data: + data[feat] = (('latitude', 'longitude'), out[..., i]) + return data
+ + +
+[docs] + def get_lat_lon(self): + """Get lat lon grid""" + with xr.open_mfdataset(self.get_height_files('January')) as res: + lons, lats = np.meshgrid( + res['longitude'].values, res['latitude'].values + ) + return np.array(lats), np.array(lons)
+ + + @property + def meta(self): + """Get meta with latitude/longitude""" + if self._meta is None: + lats, lons = self.get_lat_lon() + self._meta = pd.DataFrame() + self._meta['latitude'] = lats.flatten()[self.mask] + self._meta['longitude'] = lons.flatten()[self.mask] + return self._meta + + @property + def time_index(self): + """Get time index so output conforms to standard format""" + times = [f'2000-{str(i).zfill(2)}' for i in range(1, 13)] + time_index = pd.DatetimeIndex(times) + return time_index + +
+[docs] + def get_all_data(self): + """Get interpolated monthly means for all out heights as a dictionary + to use for h5 writing. + + Returns + ------- + out : dict + Dictionary of arrays containing monthly means for each hub height. + Also includes latitude and longitude. Spatial dimensions are + flattened + """ + data_dict = {} + s_num = len(self.meta) + for i in range(1, 13): + month = calendar.month_name[i] + out = self.get_month(month) + for feat in self.out_features: + if feat not in data_dict: + data_dict[feat] = np.full((s_num, 12), np.nan) + data = out[feat].values.flatten()[self.mask] + data_dict[feat][..., i - 1] = data + return data_dict
+ + + @property + def global_attrs(self): + """Get dictionary on how this data is prepared""" + attrs = { + 'input_files': self.input_files, + 'class': str(self.__class__), + 'version_record': str(VERSION_RECORD), + } + return attrs + +
+[docs] + def write_data(self, fp_out, out): + """Write monthly means for all heights to h5 file""" + if fp_out is not None: + if not os.path.exists(os.path.dirname(fp_out)): + os.makedirs(os.path.dirname(fp_out), exist_ok=True) + + if not os.path.exists(fp_out) or self.overwrite: + OutputHandler._init_h5( + fp_out, self.time_index, self.meta, self.global_attrs + ) + with RexOutputs(fp_out, 'a') as f: + for dset, data in out.items(): + OutputHandler._ensure_dset_in_output(fp_out, dset) + f[dset] = data.T + logger.info(f'Added {dset} to {fp_out}.') + + logger.info( + f'Wrote monthly means for all out heights: {fp_out}' + ) + elif os.path.exists(fp_out): + logger.info(f'{fp_out} already exists and overwrite=False.')
+ + +
+[docs] + @classmethod + def run( + cls, path_pattern, in_heights, out_heights, fp_out, overwrite=False + ): + """Read vortex tif files, convert these to monthly netcdf files for all + input heights, interpolate this data to requested output heights, mask + fill values, and write all data to h5 file. + + Parameters + ---------- + path_pattern : str + Pattern for input tif files. Needs to include {month} and {height} + format keys. + in_heights : list + List of heights for input files. + out_heights : list + List of output heights used for interpolation + fp_out : str + Name of final h5 output file to write with means. + overwrite : bool + Whether to overwrite intermediate netcdf files containing the + interpolated masked monthly means. + """ + vprep = cls(path_pattern, in_heights, out_heights, overwrite=overwrite) + vprep.convert_all_tifs() + out = vprep.get_all_data() + vprep.write_data(fp_out, out)
+
+ + + +
+[docs] +class BiasCorrectUpdate: + """Class for bias correcting existing files and writing corrected files.""" + +
+[docs] + @classmethod + def get_bc_factors(cls, bc_file, dset, month, global_scalar=1): + """Get bias correction factors for the given dset and month + + Parameters + ---------- + bc_file : str + Name of h5 file containing bias correction factors + dset : str + Name of dataset to apply bias correction factors for + month : int + Index of month to bias correct + global_scalar : float + Optional global scalar to multiply all bias correction + factors. This can be used to improve systemic bias against + observation data. + + Returns + ------- + factors : ndarray + Array of bias correction factors for the given dset and month. + """ + with Resource(bc_file) as res: + logger.info( + f'Getting {dset} bias correction factors for month {month}.' + ) + bc_factor = res[f'{dset}_scalar', :, month - 1] + factors = global_scalar * bc_factor + logger.info( + f'Retrieved {dset} bias correction factors for month {month}. ' + f'Using global_scalar={global_scalar}.' + ) + return factors
+ + + @classmethod + def _correct_month( + cls, fh_in, month, out_file, dset, bc_file, global_scalar + ): + """Bias correct data for a given month. + + Parameters + ---------- + fh_in : Resource() + Resource handler for input file being corrected + month : int + Index of month to be corrected + out_file : str + Name of h5 file containing bias corrected data + dset : str + Name of dataset to bias correct + bc_file : str + Name of file containing bias correction factors for the given dset + global_scalar : float + Optional global scalar to multiply all bias correction + factors. This can be used to improve systemic bias against + observation data. + """ + with RexOutputs(out_file, 'a') as fh: + mask = fh.time_index.month == month + mask = np.arange(len(fh.time_index))[mask] + mask = slice(mask[0], mask[-1] + 1) + bc_factors = cls.get_bc_factors( + bc_file=bc_file, + dset=dset, + month=month, + global_scalar=global_scalar, + ) + logger.info(f'Applying bias correction factors for month {month}') + fh[dset, mask, :] = bc_factors * fh_in[dset, mask, :] + +
+[docs] + @classmethod + def update_file( + cls, + in_file, + out_file, + dset, + bc_file, + global_scalar=1, + max_workers=None, + ): + """Update the in_file with bias corrected values for the given dset + and write to out_file. + + Parameters + ---------- + in_file : str + Name of h5 file containing data to bias correct + out_file : str + Name of h5 file containing bias corrected data + dset : str + Name of dataset to bias correct + bc_file : str + Name of file containing bias correction factors for the given dset + global_scalar : float + Optional global scalar to multiply all bias correction + factors. This can be used to improve systemic bias against + observation data. + max_workers : int | None + Number of workers to use for parallel processing. + """ + tmp_file = out_file.replace('.h5', '.h5.tmp') + logger.info(f'Bias correcting {dset} in {in_file} with {bc_file}.') + with Resource(in_file) as fh_in: + OutputHandler._init_h5( + tmp_file, fh_in.time_index, fh_in.meta, fh_in.global_attrs + ) + OutputHandler._ensure_dset_in_output(tmp_file, dset) + + tasks = [] + for i in range(1, 13): + task = dask.delayed(cls._correct_month)( + fh_in, + month=i, + out_file=tmp_file, + dset=dset, + bc_file=bc_file, + global_scalar=global_scalar, + ) + tasks.append(task) + + logger.info('Added %s bias correction futures', len(tasks)) + if max_workers == 1: + dask.compute(*tasks, scheduler='single-threaded') + else: + dask.compute( + *tasks, scheduler='threads', num_workers=max_workers + ) + logger.info('Finished bias correcting %s in %s', dset, in_file) + + os.replace(tmp_file, out_file) + msg = f'Saved bias corrected {dset} to: {out_file}' + logger.info(msg)
+ + +
+[docs] + @classmethod + def run( + cls, + in_file, + out_file, + dset, + bc_file, + overwrite=False, + global_scalar=1, + max_workers=None, + ): + """Run bias correction update. + + Parameters + ---------- + in_file : str + Name of h5 file containing data to bias correct + out_file : str + Name of h5 file containing bias corrected data + dset : str + Name of dataset to bias correct + bc_file : str + Name of file containing bias correction factors for the given dset + overwrite : bool + Whether to overwrite the output file if it already exists. + global_scalar : float + Optional global scalar to multiply all bias correction + factors. This can be used to improve systemic bias against + observation data. + max_workers : int | None + Number of workers to use for parallel processing. + """ + if os.path.exists(out_file) and not overwrite: + logger.info( + f'{out_file} already exists and overwrite=False. Skipping.' + ) + else: + if os.path.exists(out_file) and overwrite: + logger.info( + f'{out_file} exists but overwrite=True. ' + f'Removing {out_file}.' + ) + os.remove(out_file) + cls.update_file( + in_file, + out_file, + dset, + bc_file, + global_scalar=global_scalar, + max_workers=max_workers, + )
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/bias/bias_transforms.html b/_modules/sup3r/bias/bias_transforms.html new file mode 100644 index 000000000..e24ef6080 --- /dev/null +++ b/_modules/sup3r/bias/bias_transforms.html @@ -0,0 +1,1789 @@ + + + + + + + + + + sup3r.bias.bias_transforms — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.bias.bias_transforms

+"""Bias correction transformation functions.
+
+TODO: These methods need to be refactored to use lazy calculations. They
+currently slow down the forward pass runs when operating on full input data
+volume.
+
+We should write bc factor files in a format compatible with Loaders /
+Rasterizers so we can use those class methods to match factors with locations
+"""
+
+import logging
+from typing import Union
+from warnings import warn
+
+import dask.array as da
+import numpy as np
+from rex.utilities.bc_utils import QuantileDeltaMapping
+from scipy.ndimage import gaussian_filter
+
+from sup3r.preprocessing import Rasterizer
+from sup3r.preprocessing.utilities import make_time_index_from_kws
+
+logger = logging.getLogger(__name__)
+
+
+def _get_factors(target, shape, var_names, bias_fp, threshold=0.1):
+    """Get bias correction factors from sup3r's standard resource
+
+    This was stripped without any change from original
+    `_get_spatial_bc_factors` to allow re-use in other `*_bc_factors`
+    functions.
+
+    Parameters
+    ----------
+    target : tuple
+        (lat, lon) lower left corner of raster. Either need target+shape or
+        raster_file.
+    shape : tuple
+        (rows, cols) grid size. Either need target+shape or raster_file.
+    var_names : dict
+        A dictionary mapping the expected variable name in the `Resource`
+        and the desired name to output. For instance the dictionary
+        `{'base': 'base_ghi_params'}` would return a `params['base']` with
+        a value equals to `base_ghi_params` from the `bias_fp` file.
+    bias_fp : str
+        Filepath to bias correction file from the bias calc module. Must have
+        datasets defined as values of the dictionary `var_names` given as an
+        input argument, such as "{feature_name}_scalar" or
+        "base_{base_dset}_params". Those are the full low-resolution shape of
+        the forward pass input that will be sliced using lr_padded_slice for
+        the current chunk.
+    threshold : float
+        Nearest neighbor euclidean distance threshold. If the coordinates are
+        more than this value away from the bias correction lat/lon, an error is
+        raised.
+
+    Return
+    ------
+    dict :
+        A dictionary with the content from `bias_fp` as mapped by `var_names`,
+        therefore, the keys here are the same keys in `var_names`.
+        Also includes 'global_attrs' from Rasterizer.
+    """
+    res = Rasterizer(
+        file_paths=bias_fp,
+        target=np.asarray(target),
+        shape=shape,
+        threshold=threshold,
+    )
+    missing = [d for d in var_names.values() if d.lower() not in res.features]
+    msg = f'Missing {" and ".join(missing)} in resource: {bias_fp}.'
+    assert missing == [], msg
+    # pylint: disable=E1136
+    out = {k: res[var_names[k].lower()][...] for k in var_names}
+    out['cfg'] = res.global_attrs
+    return out
+
+
+def _get_spatial_bc_factors(lat_lon, feature_name, bias_fp, threshold=0.1):
+    """Get bc factors (scalar/adder) for the given feature for the given
+    domain (specified by lat_lon).
+
+    Parameters
+    ----------
+    lat_lon : ndarray
+        Array of latitudes and longitudes for the domain to bias correct
+        (n_lats, n_lons, 2)
+    feature_name : str
+        Name of feature that is being corrected. Datasets with names
+        "{feature_name}_scalar" and "{feature_name}_adder" will be retrieved
+        from bias_fp.
+    bias_fp : str
+        Filepath to bias correction file from the bias calc module. Must have
+        datasets "{feature_name}_scalar" and "{feature_name}_adder" that are
+        the full low-resolution shape of the forward pass input that will be
+        sliced using lr_padded_slice for the current chunk.
+    threshold : float
+        Nearest neighbor euclidean distance threshold. If the coordinates are
+        more than this value away from the bias correction lat/lon, an error is
+        raised.
+    """
+    var_names = {
+        'scalar': f'{feature_name}_scalar',
+        'adder': f'{feature_name}_adder',
+    }
+    target = lat_lon[-1, 0, :]
+    shape = lat_lon.shape[:-1]
+    return _get_factors(
+        target=target,
+        shape=shape,
+        var_names=var_names,
+        bias_fp=bias_fp,
+        threshold=threshold,
+    )
+
+
+def _get_spatial_bc_quantiles(
+    lat_lon: Union[np.ndarray, da.core.Array],
+    base_dset: str,
+    feature_name: str,
+    bias_fp: str,
+    threshold: float = 0.1,
+):
+    """Statistical distributions previously estimated for given lat/lon points
+
+    Recover the parameters that describe the statistical distribution
+    previously estimated with
+    :class:`~sup3r.bias.qdm.QuantileDeltaMappingCorrection` for three
+    datasets: ``base`` (historical reference), ``bias`` (historical biased
+    reference), and ``bias_fut`` (the future biased dataset, usually the data
+    to correct).
+
+    Parameters
+    ----------
+    lat_lon : Union[np.ndarray, da.core.Array]
+        Array of latitudes and longitudes for the domain to bias correct
+        (n_lats, n_lons, 2)
+    base_dset : str
+        Name of feature used as historical reference. A Dataset with name
+        "base_{base_dset}_params" will be retrieved from ``bias_fp``.
+    feature_name : str
+        Name of the feature that is being corrected. Datasets with names
+        "bias_{feature_name}_params" and "bias_fut_{feature_name}_params" will
+        be retrieved from ``bias_fp``.
+    bias_fp : str
+        Filepath to bias correction file from the bias calc module. Must have
+        datasets "base_{base_dset}_params", "bias_{feature_name}_params", and
+        "bias_fut_{feature_name}_params" that define the statistical
+        distributions.
+    threshold : float
+        Nearest neighbor euclidean distance threshold. If the coordinates are
+        more than this value away from the bias correction lat/lon, an error
+        is raised.
+
+    Returns
+    -------
+    params : dict
+        A dictionary collecting the following parameters:
+        - base : np.array
+          Parameters used to define the statistical distribution estimated for
+          the ``base_dset``. It has a shape of (I, J, P), where (I, J) are the
+          same first two dimensions of the given `lat_lon` and P is the number
+          of parameters and depends on the type of distribution. See
+          :class:`~sup3r.bias.qdm.QuantileDeltaMappingCorrection` for more
+          details.
+        - bias : np.array
+          Parameters used to define the statistical distribution estimated for
+          (historical) ``feature_name``. It has a shape of (I, J, P), where
+          (I, J) are the same first two dimensions of the given `lat_lon` and P
+          is the number of parameters and depends on the type of distribution.
+          See :class:`~sup3r.bias.qdm.QuantileDeltaMappingCorrection` for
+          more details.
+        - bias_fut : np.array
+          Parameters used to define the statistical distribution estimated for
+          (future) ``feature_name``. It has a shape of (I, J, P), where (I, J)
+          are the same first two dimensions of the given `lat_lon` and P is the
+          number of parameters used and depends on the type of distribution.
+          See :class:`~sup3r.bias.qdm.QuantileDeltaMappingCorrection` for more
+          details.
+        - global_attrs : dict
+          Metadata used to guide how to use of the previous parameters on
+          reconstructing the statistical distributions. For instance,
+          `cfg['dist']` defines the type of distribution. See
+          :class:`~sup3r.bias.qdm.QuantileDeltaMappingCorrection` for more
+          details, including which metadata is saved.
+
+    Warnings
+    --------
+    Be careful selecting which `bias_fp` to use. In particular, if
+    "bias_fut_{feature_name}_params" is representative for the desired target
+    period.
+
+    See Also
+    --------
+    sup3r.bias.qdm.QuantileDeltaMappingCorrection
+        Estimate the statistical distributions loaded here.
+
+    Examples
+    --------
+    >>> lat_lon = np.array([
+    ...              [39.649033, -105.46875 ],
+    ...              [39.649033, -104.765625]])
+    >>> params = _get_spatial_bc_quantiles(
+    ...             lat_lon, "ghi", "rsds", "./dist_params.hdf")
+
+    """
+    var_names = {
+        'base': f'base_{base_dset}_params',
+        'bias': f'bias_{feature_name}_params',
+        'bias_fut': f'bias_fut_{feature_name}_params',
+    }
+    target = lat_lon[-1, 0, :]
+    shape = lat_lon.shape[:-1]
+    return _get_factors(
+        target=target,
+        shape=shape,
+        var_names=var_names,
+        bias_fp=bias_fp,
+        threshold=threshold,
+    )
+
+
+
+[docs] +def global_linear_bc(data, scalar, adder, out_range=None): + """Bias correct data using a simple global *scalar +adder method. + + Parameters + ---------- + data : np.ndarray + Sup3r input data to be bias corrected, assumed to be 3D with shape + (spatial, spatial, temporal) for a single feature. + scalar : float + Scalar (multiplicative) value to apply to input data. + adder : float + Adder value to apply to input data. + out_range : None | tuple + Option to set floor/ceiling values on the output data. + + Returns + ------- + out : np.ndarray + out = data * scalar + adder + """ + out = data * scalar + adder + if out_range is not None: + out = np.maximum(out, np.min(out_range)) + out = np.minimum(out, np.max(out_range)) + return out
+ + + +
+[docs] +def local_linear_bc( + data, + lat_lon, + feature_name, + bias_fp, + lr_padded_slice=None, + out_range=None, + smoothing=0, +): + """Bias correct data using a simple annual (or multi-year) *scalar +adder + method on a site-by-site basis. + + Parameters + ---------- + data : np.ndarray + Sup3r input data to be bias corrected, assumed to be 3D with shape + (spatial, spatial, temporal) for a single feature. + lat_lon : ndarray + Array of latitudes and longitudes for the domain to bias correct + (n_lats, n_lons, 2) + feature_name : str + Name of feature that is being corrected. Datasets with names + "{feature_name}_scalar" and "{feature_name}_adder" will be retrieved + from bias_fp. + bias_fp : str + Filepath to bias correction file from the bias calc module. Must have + datasets "{feature_name}_scalar" and "{feature_name}_adder" that are + the full low-resolution shape of the forward pass input that will be + sliced using lr_padded_slice for the current chunk. + lr_padded_slice : tuple | None + Tuple of length four that slices (spatial_1, spatial_2, temporal, + features) where each tuple entry is a slice object for that axes. + Note that if this method is called as part of a sup3r forward pass, the + lr_padded_slice will be included in the kwargs for the active chunk. + If this is None, no slicing will be done and the full bias correction + source shape will be used. + out_range : None | tuple + Option to set floor/ceiling values on the output data. + smoothing : float + Value to use to smooth the scalar/adder data. This can reduce the + effect of extreme values within aggregations over large number of + pixels. This value is the standard deviation for the gaussian_filter + kernel. + + Returns + ------- + out : np.ndarray + out = data * scalar + adder + """ + + out = _get_spatial_bc_factors(lat_lon, feature_name, bias_fp) + scalar, adder = out['scalar'], out['adder'] + # 3D bias correction factors have seasonal/monthly correction in last axis + if len(scalar.shape) == 3 and len(adder.shape) == 3: + scalar = scalar.mean(axis=-1) + adder = adder.mean(axis=-1) + + if lr_padded_slice is not None: + spatial_slice = (lr_padded_slice[0], lr_padded_slice[1]) + scalar = scalar[spatial_slice] + adder = adder[spatial_slice] + + if np.isnan(scalar).any() or np.isnan(adder).any(): + msg = ( + 'Bias correction scalar/adder values had NaNs for ' + f'"{feature_name}" from: {bias_fp}' + ) + logger.warning(msg) + warn(msg) + + scalar = np.expand_dims(scalar, axis=-1) + adder = np.expand_dims(adder, axis=-1) + + scalar = np.repeat(scalar, data.shape[-1], axis=-1) + adder = np.repeat(adder, data.shape[-1], axis=-1) + + if smoothing > 0: + for idt in range(scalar.shape[-1]): + scalar[..., idt] = gaussian_filter( + scalar[..., idt], smoothing, mode='nearest' + ) + adder[..., idt] = gaussian_filter( + adder[..., idt], smoothing, mode='nearest' + ) + + out = data * scalar + adder + if out_range is not None: + out = np.maximum(out, np.min(out_range)) + out = np.minimum(out, np.max(out_range)) + + return out
+ + + +
+[docs] +def monthly_local_linear_bc( + data, + lat_lon, + feature_name, + bias_fp, + date_range_kwargs, + lr_padded_slice=None, + temporal_avg=True, + out_range=None, + smoothing=0, + scalar_range=None, + adder_range=None, +): + """Bias correct data using a simple monthly *scalar +adder method on a + site-by-site basis. + + Parameters + ---------- + data : np.ndarray + Sup3r input data to be bias corrected, assumed to be 3D with shape + (spatial, spatial, temporal) for a single feature. + lat_lon : ndarray + Array of latitudes and longitudes for the domain to bias correct + (n_lats, n_lons, 2) + feature_name : str + Name of feature that is being corrected. Datasets with names + "{feature_name}_scalar" and "{feature_name}_adder" will be retrieved + from bias_fp. + bias_fp : str + Filepath to bias correction file from the bias calc module. Must have + datasets "{feature_name}_scalar" and "{feature_name}_adder" that are + the full low-resolution shape of the forward pass input that will be + sliced using lr_padded_slice for the current chunk. + date_range_kwargs : dict + Keyword args for pd.date_range to produce a DatetimeIndex object + associated with the input data temporal axis (assumed 3rd axis e.g. + axis=2). Note that if this method is called as part of a sup3r + resolution forward pass, the date_range_kwargs will be included + automatically for the current chunk. + lr_padded_slice : tuple | None + Tuple of length four that slices (spatial_1, spatial_2, temporal, + features) where each tuple entry is a slice object for that axes. + Note that if this method is called as part of a sup3r forward pass, the + lr_padded_slice will be included automatically in the kwargs for the + active chunk. If this is None, no slicing will be done and the full + bias correction source shape will be used. + temporal_avg : bool + Take the average scalars and adders for the chunk's time index, this + will smooth the transition of scalars/adders from month to month if + processing small chunks. If processing the full annual time index, set + this to False. + out_range : None | tuple + Option to set floor/ceiling values on the output data. + smoothing : float + Value to use to smooth the scalar/adder data. This can reduce the + effect of extreme values within aggregations over large number of + pixels. This value is the standard deviation for the gaussian_filter + kernel. + scalar_range : tuple | None + Allowed range for the scalar term in the linear bias correction. + adder_range : tuple | None + Allowed range for the adder term in the linear bias correction. + + Returns + ------- + out : np.ndarray + out = data * scalar + adder + """ + time_index = make_time_index_from_kws(date_range_kwargs) + out = _get_spatial_bc_factors(lat_lon, feature_name, bias_fp) + scalar, adder = out['scalar'], out['adder'] + + assert len(scalar.shape) == 3, 'Monthly bias correct needs 3D scalars' + assert len(adder.shape) == 3, 'Monthly bias correct needs 3D adders' + + if lr_padded_slice is not None: + spatial_slice = (lr_padded_slice[0], lr_padded_slice[1]) + scalar = scalar[spatial_slice] + adder = adder[spatial_slice] + + imonths = time_index.month.values - 1 + scalar = scalar[..., imonths] + adder = adder[..., imonths] + + if temporal_avg: + scalar = scalar.mean(axis=-1) + adder = adder.mean(axis=-1) + scalar = np.expand_dims(scalar, axis=-1) + adder = np.expand_dims(adder, axis=-1) + scalar = np.repeat(scalar, data.shape[-1], axis=-1) + adder = np.repeat(adder, data.shape[-1], axis=-1) + if len(time_index.month.unique()) > 2: + msg = ( + 'Bias correction method "monthly_local_linear_bc" was used ' + 'with temporal averaging over a time index with >2 months.' + ) + warn(msg) + logger.warning(msg) + + if np.isnan(scalar).any() or np.isnan(adder).any(): + msg = ( + 'Bias correction scalar/adder values had NaNs for ' + f'"{feature_name}" from: {bias_fp}' + ) + logger.warning(msg) + warn(msg) + + if smoothing > 0: + for idt in range(scalar.shape[-1]): + scalar[..., idt] = gaussian_filter( + scalar[..., idt], smoothing, mode='nearest' + ) + adder[..., idt] = gaussian_filter( + adder[..., idt], smoothing, mode='nearest' + ) + + if scalar_range is not None: + scalar = np.minimum(scalar, np.max(scalar_range)) + scalar = np.maximum(scalar, np.min(scalar_range)) + + if adder_range is not None: + adder = np.minimum(adder, np.max(adder_range)) + adder = np.maximum(adder, np.min(adder_range)) + + out = data * scalar + adder + if out_range is not None: + out = np.maximum(out, np.min(out_range)) + out = np.minimum(out, np.max(out_range)) + + return out
+ + + +
+[docs] +def local_qdm_bc( + data: np.ndarray, + lat_lon: np.ndarray, + base_dset: str, + feature_name: str, + bias_fp, + date_range_kwargs: dict, + lr_padded_slice=None, + threshold=0.1, + relative=True, + no_trend=False, + delta_denom_min=None, + delta_denom_zero=None, + delta_range=None, + out_range=None, + max_workers=1, +): + """Bias correction using QDM + + Apply QDM to correct bias on the given data. It assumes that the required + statistical distributions were previously estimated and saved in + ``bias_fp``. + + Assume CDF for the nearest day of year (doy) is representative. + + Parameters + ---------- + data : Union[np.ndarray, da.core.Array] + Sup3r input data to be bias corrected, assumed to be 3D with shape + (spatial, spatial, temporal) for a single feature. + lat_lon : np.ndarray + Array of latitudes and longitudes for the domain to bias correct + (n_lats, n_lons, 2) + base_dset : + Name of feature that is used as (historical) reference. Dataset with + names "base_{base_dset}_params" will be retrieved. + feature_name : str + Name of feature that is being corrected. Datasets with names + "bias_{feature_name}_params" and "bias_fut_{feature_name}_params" will + be retrieved. + date_range_kwargs : dict + Keyword args for pd.date_range to produce a DatetimeIndex object + associated with the input data temporal axis (assumed 3rd axis e.g. + axis=2). Note that if this method is called as part of a sup3r + resolution forward pass, the date_range_kwargs will be included + automatically for the current chunk. + bias_fp : str + Filepath to statistical distributions file from the bias calc module. + Must have datasets "bias_{feature_name}_params", + "bias_fut_{feature_name}_params", and "base_{base_dset}_params" that + are the parameters to define the statistical distributions to be used + to correct the given `data`. + lr_padded_slice : tuple | None + Tuple of length four that slices (spatial_1, spatial_2, temporal, + features) where each tuple entry is a slice object for that axes. + Note that if this method is called as part of a sup3r forward pass, the + lr_padded_slice will be included automatically in the kwargs for the + active chunk. If this is None, no slicing will be done and the full + bias correction source shape will be used. + no_trend: bool, default=False + An option to ignore the trend component of the correction, thus + resulting in an ordinary Quantile Mapping, i.e. corrects the bias by + comparing the distributions of the biased dataset with a reference + datasets. See + ``params_mf`` of :class:`rex.utilities.bc_utils.QuantileDeltaMapping`. + Note that this assumes that params_mh is the data distribution + representative for the target data. + delta_denom_min : float | None + Option to specify a minimum value for the denominator term in the + calculation of a relative delta value. This prevents division by a + very small number making delta blow up and resulting in very large + output bias corrected values. See equation 4 of Cannon et al., 2015 + for the delta term. + delta_denom_zero : float | None + Option to specify a value to replace zeros in the denominator term + in the calculation of a relative delta value. This prevents + division by a very small number making delta blow up and resulting + in very large output bias corrected values. See equation 4 of + Cannon et al., 2015 for the delta term. + delta_range : tuple | None + Option to set a (min, max) on the delta term in QDM. This can help + prevent QDM from making non-realistic increases/decreases in + otherwise physical values. See equation 4 of Cannon et al., 2015 for + the delta term. + out_range : None | tuple + Option to set floor/ceiling values on the output data. + max_workers: int | None + Max number of workers to use for QDM process pool + + Returns + ------- + out : np.ndarray + The input data corrected by QDM. Its shape is the same of the input + (spatial, spatial, time_window, temporal). The dimension time_window + aligns with the number of time windows defined in a year, while + temporal aligns with the time of the data. + + See Also + -------- + sup3r.bias.qdm.QuantileDeltaMappingCorrection : + Estimate probability distributions required by QDM method + + Notes + ----- + Be careful selecting `bias_fp`. Usually, the input `data` used here would + be related to the dataset used to estimate + "bias_fut_{feature_name}_params". + + Keeping arguments as consistent as possible with `local_linear_bc()`, thus + a 4D data (spatial, spatial, time_window, temporal), and lat_lon (n_lats, + n_lons, [lat, lon]). But `QuantileDeltaMapping()`, from rex library, + expects an array, (time, space), thus we need to re-organize our input to + match that, and in the end bring it back to (spatial, spatial, time_window, + temporal). This is still better than maintaining the same functionality + consistent in two libraries. + + Also, :class:`rex.utilities.bc_utils.QuantileDeltaMapping` expects params + to be 2D (space, N-params). + + See Also + -------- + rex.utilities.bc_utils.QuantileDeltaMapping : + Core QDM transformation. + + Examples + -------- + >>> unbiased = local_qdm_bc(biased_array, lat_lon_array, "ghi", "rsds", + ... "./dist_params.hdf") + + """ + # Confirm that the given time matches the expected data size + msg = f'data was expected to be a 3D array but got shape {data.shape}' + assert data.ndim == 3, msg + time_index = make_time_index_from_kws(date_range_kwargs) + msg = (f'Time should align with data 3rd dimension but got data ' + f'{data.shape} and time_index length ' + f'{time_index.size}: {time_index}') + assert data.shape[-1] == time_index.size, msg + + params = _get_spatial_bc_quantiles( + lat_lon=lat_lon, + base_dset=base_dset, + feature_name=feature_name, + bias_fp=bias_fp, + threshold=threshold, + ) + data = np.asarray(data) + base = np.asarray(params['base']) + bias = np.asarray(params['bias']) + bias_fut = np.asarray(params['bias_fut']) + cfg = params['cfg'] + + if lr_padded_slice is not None: + spatial_slice = (lr_padded_slice[0], lr_padded_slice[1]) + base = base[spatial_slice] + bias = bias[spatial_slice] + bias_fut = bias_fut[spatial_slice] + + output = np.full_like(data, np.nan) + nearest_window_idx = [ + np.argmin(abs(d - cfg['time_window_center'])) + for d in time_index.day_of_year + ] + for window_idx in set(nearest_window_idx): + # Naming following the paper: observed historical + oh = base[:, :, window_idx] + # Modeled historical + mh = bias[:, :, window_idx] + # Modeled future + mf = bias_fut[:, :, window_idx] + + # This satisfies the rex's QDM design + mf = None if no_trend else mf.reshape(-1, mf.shape[-1]) + # The distributions at this point, after selected the respective + # time window with `window_idx`, are 3D (space, space, N-params) + # Collapse 3D (space, space, N) into 2D (space**2, N) + QDM = QuantileDeltaMapping( + oh.reshape(-1, oh.shape[-1]), + mh.reshape(-1, mh.shape[-1]), + mf, + dist=cfg['dist'], + relative=relative, + sampling=cfg['sampling'], + log_base=cfg['log_base'], + delta_denom_min=delta_denom_min, + delta_denom_zero=delta_denom_zero, + delta_range=delta_range, + ) + + subset_idx = nearest_window_idx == window_idx + subset = data[:, :, subset_idx] + # input 3D shape (spatial, spatial, temporal) + # QDM expects input arr with shape (time, space) + tmp = subset.reshape(-1, subset.shape[-1]).T + # Apply QDM correction + tmp = QDM(tmp, max_workers=max_workers) + # Reorgnize array back from (time, space) + # to (spatial, spatial, temporal) + tmp = tmp.T.reshape(subset.shape) + # Position output respecting original time axis sequence + output[:, :, subset_idx] = tmp + + if out_range is not None: + output = np.maximum(output, np.min(out_range)) + output = np.minimum(output, np.max(out_range)) + + if np.isnan(output).any(): + msg = ('QDM bias correction resulted in NaN values! If this is a ' + 'relative QDM, you may try setting ``delta_denom_min`` or ' + '``delta_denom_zero``') + logger.error(msg) + raise RuntimeError(msg) + + return output
+ + + +def _get_spatial_bc_presrat( + lat_lon: np.array, + base_dset: str, + feature_name: str, + bias_fp: str, + threshold: float = 0.1, +): + """Statistical distributions previously estimated for given lat/lon points + + Recover the parameters that describe the statistical distribution + previously estimated with :class:`~sup3r.bias.PresRat` for three datasets: + ``base`` (historical reference), ``bias`` (historical biased reference), + and ``bias_fut`` (the future biased dataset, usually the data to correct). + + Parameters + ---------- + lat_lon : ndarray + Array of latitudes and longitudes for the domain to bias correct + (n_lats, n_lons, 2) + base_dset : str + Name of feature used as historical reference. A Dataset with name + "base_{base_dset}_params" will be retrieved from ``bias_fp``. + feature_name : str + Name of the feature that is being corrected. Datasets with names + "bias_{feature_name}_params" and "bias_fut_{feature_name}_params" will + be retrieved from ``bias_fp``. + bias_fp : str + Filepath to bias correction file from the bias calc module. Must have + datasets "base_{base_dset}_params", "bias_{feature_name}_params", and + "bias_fut_{feature_name}_params" that define the statistical + distributions. + threshold : float + Nearest neighbor euclidean distance threshold. If the coordinates are + more than this value away from the bias correction lat/lon, an error + is raised. + + Returns + ------- + dict : + A dictionary containing: + - base : np.array + Parameters used to define the statistical distribution estimated for + the ``base_dset``. It has a shape of (I, J, T, P), where (I, J) are + the same first two dimensions of the given `lat_lon`; T is time in + intervals equally spaced along a year. Check + `cfg['time_window_center']` below to map each T to a day of the + year; and P is the number of parameters and depends on the type of + distribution. See :class:`~sup3r.bias.PresRat` for more details. + - bias : np.array + Parameters used to define the statistical distribution estimated for + (historical) ``feature_name``. It has a shape of (I, J, T, P), where + (I, J) are the same first two dimensions of the given `lat_lon`; T + is time in intervals equally spaced along a year. Check + `cfg['time_window_center']` to map each T to a day of the year; and P + is the number of parameters and depends on the type of distribution. + See :class:`~sup3r.bias.PresRat` for more details. + - bias_fut : np.array + Parameters used to define the statistical distribution estimated for + (future) ``feature_name``. It has a shape of (I, J, T, P), where + (I, J) are the same first two dimensions of the given `lat_lon`; T + is time in intervals equally spaced along a year. Check + `cfg['time_window_center']` to map each T to a day of the year; and + P is the number of parameters used and depends on the type of + distribution. + See :class:`~sup3r.bias.PresRat` for more details. + - bias_tau_fut : np.array + The threshold for negligible magnitudes. Any value smaller than that + should be replaced by zero to preserve the zero (precipitation) + rate. I has dimension of (I, J, dummy), where (I, J) are the same + first two dimensions of the given `lat_lon`; and a dummy 3rd + dimension required due to the way sup3r saves data in an HDF. + - k_factor : np.array + The K factor used to preserve the mean rate of change from the model + (see [Pierce2015]_). It has a shape of (I, J, T), where (I, J) are + the same first two dimensions of the given `lat_lon`; T is time in + intervals equally spaced along a year. Check + `cfg['time_window_center']` to map each T to a day of the year. + - cfg + Metadata used to guide how to use of the previous parameters on + reconstructing the statistical distributions. For instance, + `cfg['dist']` defines the type of distribution, and + `cfg['time_window_center']` maps the dimension T in days of the + year for the dimension T of the parameters above. See + :class:`~sup3r.bias.PresRat` for more details, including which + metadata is saved. + + Warnings + -------- + Be careful selecting which `bias_fp` to use. In particular, if + "bias_fut_{feature_name}_params" is representative for the desired target + period. + + See Also + -------- + sup3r.bias.PresRat + Estimate the statistical distributions loaded here. + + References + ---------- + .. [Pierce2015] Pierce, D. W., Cayan, D. R., Maurer, E. P., Abatzoglou, J. + T., & Hegewisch, K. C. (2015). Improved bias correction techniques for + hydrological simulations of climate change. Journal of Hydrometeorology, + 16(6), 2421-2442. + + Examples + -------- + >>> lat_lon = np.array([ + ... [39.649033, -105.46875 ], + ... [39.649033, -104.765625]]) + >>> params = _get_spatial_bc_quantiles( + ... lat_lon, "ghi", "rsds", "./dist_params.hdf") + + """ + var_names = { + 'base': f'base_{base_dset}_params', + 'bias': f'bias_{feature_name}_params', + 'bias_fut': f'bias_fut_{feature_name}_params', + 'bias_tau_fut': f'{feature_name}_tau_fut', + 'k_factor': f'{feature_name}_k_factor', + } + target = lat_lon[-1, 0, :] + shape = lat_lon.shape[:-1] + return _get_factors( + target=target, + shape=shape, + var_names=var_names, + bias_fp=bias_fp, + threshold=threshold, + ) + + +def _apply_presrat_bc(data, time_index, base_params, bias_params, + bias_fut_params, bias_tau_fut, k_factor, + time_window_center, dist='empirical', sampling='invlog', + log_base=10, relative=True, no_trend=False, + delta_denom_min=None, delta_range=None, out_range=None, + max_workers=1): + """Run PresRat to bias correct data from input parameters and not from bias + correction file on disk. + + Parameters + ---------- + data : np.ndarray + Sup3r input data to be bias corrected, assumed to be 3D with shape + (spatial, spatial, temporal) for a single feature. + time_index : pd.DatetimeIndex + A DatetimeIndex object associated with the input data temporal axis + (assumed 3rd axis e.g. axis=2). + base_params : np.ndarray + 4D array of **observed historical** distribution parameters created + from a multi-year set of data where the shape is + (space, space, time, N). This can be the + output of a parametric distribution fit like + ``scipy.stats.weibull_min.fit()`` where N is the number of parameters + for that distribution, or this can define the x-values of N points from + an empirical CDF that will be linearly interpolated between. If this is + an empirical CDF, this must include the 0th and 100th percentile values + and have even percentile spacing between values. + bias_params : np.ndarray + Same requirements as params_oh. This input arg is for the **modeled + historical distribution**. + bias_fut_params : np.ndarray | None + Same requirements as params_oh. This input arg is for the **modeled + future distribution**. If this is None, this defaults to params_mh + (no future data, just corrected to modeled historical distribution) + bias_tau_fut : np.ndarray + Zero precipitation threshold for future data calculated from presrat + without temporal dependence with shape (spatial, spatial, 1) + k_factor : np.ndarray + K factor from the presrat method with shape (spatial, spatial, N) where + N is the number of time observations at which the bias correction is + calculated + time_window_center : np.ndarray + Sequence of days of the year equally spaced and shifted by half + window size, thus `ntimes`=12 results in approximately [15, 45, + ...]. It includes the fraction of a day, thus 15.5 is equivalent + to January 15th, 12:00h. Shape is (N,) + dist : str + Probability distribution name to use to model the data which + determines how the param args are used. This can "empirical" or any + continuous distribution name from ``scipy.stats``. + sampling : str | np.ndarray + If dist="empirical", this is an option for how the quantiles were + sampled to produce the params inputs, e.g., how to sample the + y-axis of the distribution (see sampling functions in + ``rex.utilities.bc_utils``). "linear" will do even spacing, "log" + will concentrate samples near quantile=0, and "invlog" will + concentrate samples near quantile=1. Can also be a 1D array of dist + inputs if being used from reV, but they must all be the same + option. + log_base : int | float | np.ndarray + Log base value if sampling is "log" or "invlog". A higher value + will concentrate more samples at the extreme sides of the + distribution. Can also be a 1D array of dist inputs if being used + from reV, but they must all be the same option. + relative : bool | np.ndarray + Flag to preserve relative rather than absolute changes in + quantiles. relative=False (default) will multiply by the change in + quantiles while relative=True will add. See Equations 4-6 from + Cannon et al., 2015 for more details. Can also be a 1D array of + dist inputs if being used from reV, but they must all be the same + option. + no_trend : bool, default=False + An option to ignore the trend component of the correction, thus + resulting in an ordinary Quantile Mapping, i.e. corrects the bias by + comparing the distributions of the biased dataset with a reference + datasets, without reinforcing the zero rate or applying the k-factor. + See ``params_mf`` of + :class:`rex.utilities.bc_utils.QuantileDeltaMapping`. Note that this + assumes that params_mh is the data distribution representative for the + target data. + delta_denom_min : float | None + Option to specify a minimum value for the denominator term in the + calculation of a relative delta value. This prevents division by a + very small number making delta blow up and resulting in very large + output bias corrected values. See equation 4 of Cannon et al., 2015 + for the delta term. If this is not set, the ``zero_rate_threshold`` + calculated as part of the presrat bias calculation will be used + delta_range : tuple | None + Option to set a (min, max) on the delta term in QDM. This can help + prevent QDM from making non-realistic increases/decreases in + otherwise physical values. See equation 4 of Cannon et al., 2015 for + the delta term. + out_range : None | tuple + Option to set floor/ceiling values on the output data. + max_workers : int | None + Max number of workers to use for QDM process pool + """ + + data_unbiased = np.full_like(data, np.nan) + closest_time_idx = abs(time_window_center[:, np.newaxis] + - np.array(time_index.day_of_year)) + closest_time_idx = closest_time_idx.argmin(axis=0) + + for nt in set(closest_time_idx): + subset_idx = closest_time_idx == nt + subset = data[:, :, subset_idx] + oh = base_params[:, :, nt] + mh = bias_params[:, :, nt] + mf = bias_fut_params[:, :, nt] + + mf = None if no_trend else mf.reshape(-1, mf.shape[-1]) + + # The distributions are 3D (space, space, N-params) + # Collapse 3D (space, space, N) into 2D (space**2, N) + QDM = QuantileDeltaMapping(oh.reshape(-1, oh.shape[-1]), + mh.reshape(-1, mh.shape[-1]), + mf, + dist=dist, + relative=relative, + sampling=sampling, + log_base=log_base, + delta_denom_min=delta_denom_min, + delta_range=delta_range, + ) + + # input 3D shape (spatial, spatial, temporal) + # QDM expects input arr with shape (time, space) + tmp = subset.reshape(-1, subset.shape[-1]).T + # Apply QDM correction + tmp = QDM(tmp, max_workers=max_workers) + # Reorgnize array back from (time, space) + # to (spatial, spatial, temporal) + subset = tmp.T.reshape(subset.shape) + + # If no trend, it doesn't make sense to correct for zero rate or + # apply the k-factor, but limit to QDM only. + if not no_trend: + subset = np.where(subset < bias_tau_fut, 0, subset) + subset = subset * np.asarray(k_factor[:, :, nt:nt + 1]) + + data_unbiased[:, :, subset_idx] = subset + + if out_range is not None: + data_unbiased = np.maximum(data_unbiased, np.min(out_range)) + data_unbiased = np.minimum(data_unbiased, np.max(out_range)) + + if np.isnan(data_unbiased).any(): + msg = ('Presrat bias correction resulted in NaN values! If this is a ' + 'relative QDM, you may try setting ``delta_denom_min`` or ' + '``delta_denom_zero``') + logger.error(msg) + raise RuntimeError(msg) + + return data_unbiased + + +
+[docs] +def local_presrat_bc(data: np.ndarray, + lat_lon: np.ndarray, + base_dset: str, + feature_name: str, + bias_fp, + date_range_kwargs: dict, + lr_padded_slice=None, + threshold=0.1, + relative=True, + no_trend=False, + delta_denom_min=None, + delta_range=None, + k_range=None, + out_range=None, + max_workers=1, + ): + """Bias correction using PresRat + + Parameters + ---------- + data : np.ndarray + Sup3r input data to be bias corrected, assumed to be 3D with shape + (spatial, spatial, temporal) for a single feature. + lat_lon : np.ndarray + Array of latitudes and longitudes for the domain to bias correct + (n_lats, n_lons, 2) + base_dset : + Name of feature that is used as (historical) reference. Dataset with + names "base_{base_dset}_params" will be retrieved. + feature_name : str + Name of feature that is being corrected. Datasets with names + "bias_{feature_name}_params" and "bias_fut_{feature_name}_params" will + be retrieved. + bias_fp : str + Filepath to statistical distributions file from the bias calc module. + Must have datasets "bias_{feature_name}_params", + "bias_fut_{feature_name}_params", and "base_{base_dset}_params" that + are the parameters to define the statistical distributions to be used + to correct the given `data`. + date_range_kwargs : dict + Keyword args for pd.date_range to produce a DatetimeIndex object + associated with the input data temporal axis (assumed 3rd axis e.g. + axis=2). Note that if this method is called as part of a sup3r + resolution forward pass, the date_range_kwargs will be included + automatically for the current chunk. + lr_padded_slice : tuple | None + Tuple of length four that slices (spatial_1, spatial_2, temporal, + features) where each tuple entry is a slice object for that axes. + Note that if this method is called as part of a sup3r forward pass, the + lr_padded_slice will be included automatically in the kwargs for the + active chunk. If this is None, no slicing will be done and the full + bias correction source shape will be used. + threshold : float + Nearest neighbor euclidean distance threshold. If the coordinates are + more than this value away from the bias correction lat/lon, an error + is raised. + relative : bool + Apply QDM correction as a relative factor (product), otherwise, it is + applied as an offset (sum). + no_trend : bool, default=False + An option to ignore the trend component of the correction, thus + resulting in an ordinary Quantile Mapping, i.e. corrects the bias by + comparing the distributions of the biased dataset with a reference + datasets, without reinforcing the zero rate or applying the k-factor. + See ``params_mf`` of + :class:`rex.utilities.bc_utils.QuantileDeltaMapping`. Note that this + assumes that params_mh is the data distribution representative for the + target data. + delta_denom_min : float | None + Option to specify a minimum value for the denominator term in the + calculation of a relative delta value. This prevents division by a + very small number making delta blow up and resulting in very large + output bias corrected values. See equation 4 of Cannon et al., 2015 + for the delta term. If this is not set, the ``zero_rate_threshold`` + calculated as part of the presrat bias calculation will be used + delta_range : tuple | None + Option to set a (min, max) on the delta term in QDM. This can help + prevent QDM from making non-realistic increases/decreases in + otherwise physical values. See equation 4 of Cannon et al., 2015 for + the delta term. + k_range : tuple | None + Option to set a (min, max) value for the k-factor multiplier + out_range : None | tuple + Option to set floor/ceiling values on the output data. + max_workers : int | None + Max number of workers to use for QDM process pool + """ + time_index = make_time_index_from_kws(date_range_kwargs) + msg = f'data was expected to be a 3D array but got shape {data.shape}' + assert data.ndim == 3, msg + msg = (f'Time should align with data 3rd dimension but got data ' + f'{data.shape} and time_index length ' + f'{time_index.size}: {time_index}') + assert data.shape[-1] == time_index.size, msg + + params = _get_spatial_bc_presrat( + lat_lon, base_dset, feature_name, bias_fp, threshold + ) + cfg = params['cfg'] + time_window_center = cfg['time_window_center'] + data = np.asarray(data) + base_params = np.asarray(params['base']) + bias_params = np.asarray(params['bias']) + bias_fut_params = np.asarray(params['bias_fut']) + bias_tau_fut = np.asarray(params['bias_tau_fut']) + k_factor = params['k_factor'] + dist = cfg['dist'] + sampling = cfg['sampling'] + log_base = cfg['log_base'] + zero_rate_threshold = cfg['zero_rate_threshold'] + delta_denom_min = delta_denom_min or zero_rate_threshold + + if k_range is not None: + k_factor = np.maximum(k_factor, np.min(k_range)) + k_factor = np.minimum(k_factor, np.max(k_range)) + + logger.debug(f'Presrat K Factor has shape {k_factor.shape} and ranges ' + f'from {k_factor.min()} to {k_factor.max()}') + + if lr_padded_slice is not None: + spatial_slice = (lr_padded_slice[0], lr_padded_slice[1]) + base_params = base_params[spatial_slice] + bias_params = bias_params[spatial_slice] + bias_fut_params = bias_fut_params[spatial_slice] + + data_unbiased = _apply_presrat_bc(data, time_index, base_params, + bias_params, bias_fut_params, + bias_tau_fut, k_factor, + time_window_center, dist=dist, + sampling=sampling, log_base=log_base, + relative=relative, no_trend=no_trend, + delta_denom_min=delta_denom_min, + delta_range=delta_range, + out_range=out_range, + max_workers=max_workers) + + return data_unbiased
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/bias/mixins.html b/_modules/sup3r/bias/mixins.html new file mode 100644 index 000000000..defbe4691 --- /dev/null +++ b/_modules/sup3r/bias/mixins.html @@ -0,0 +1,833 @@ + + + + + + + + + + sup3r.bias.mixins — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.bias.mixins

+"""Mixin classes to support bias calculation"""
+
+import logging
+
+import numpy as np
+from scipy.ndimage import gaussian_filter
+
+from sup3r.utilities.utilities import nn_fill_array
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class FillAndSmoothMixin: + """Fill and extend parameters for calibration on missing positions + + TODO: replace nn_fill_array call with `Sup3rX.interpolate_na` method + """ + +
+[docs] + def fill_and_smooth( + self, out, fill_extend=True, smooth_extend=0, smooth_interior=0 + ): + """For a given set of parameters, fill and extend missing positions + + Fill data extending beyond the base meta data extent by doing a + nearest neighbor gap fill. Smooth interior and extended region with + given smoothing values. + Interior smoothing can reduce the affect of extreme values + within aggregations over large number of pixels. + The interior is assumed to be defined by the region without nan values. + The extended region is assumed to be the region with nan values. + + Parameters + ---------- + out : dict + Dictionary of values defining the mean/std of the bias + base + data and the scalar + adder factors to correct the biased data + like: bias_data * scalar + adder. Each value is of shape + (lat, lon, time). + fill_extend : bool + Whether to fill data extending beyond the base meta data with + nearest neighbor values. + smooth_extend : float + Option to smooth the scalar/adder data outside of the spatial + domain set by the threshold input. This alleviates the weird seams + far from the domain of interest. This value is the standard + deviation for the gaussian_filter kernel + smooth_interior : float + Value to use to smooth the scalar/adder data inside of the spatial + domain set by the threshold input. This can reduce the effect of + extreme values within aggregations over large number of pixels. + This value is the standard deviation for the gaussian_filter + kernel. + + Returns + ------- + out : dict + Dictionary of values defining the mean/std of the bias + base + data and the scalar + adder factors to correct the biased data + like: bias_data * scalar + adder. Each value is of shape + (lat, lon, time). + """ + if len(self.bad_bias_gids) > 0: + logger.info( + 'Found {} bias gids that are out of bounds: {}'.format( + len(self.bad_bias_gids), self.bad_bias_gids + ) + ) + + for key, arr in out.items(): + nan_mask = np.isnan(arr[..., 0]) + for idt in range(arr.shape[-1]): + arr_smooth = arr[..., idt] + + needs_fill = ( + np.isnan(arr_smooth).any() and fill_extend + ) or smooth_interior > 0 + + if needs_fill: + logger.info( + 'Filling NaN values outside of valid spatial ' + 'extent for dataset "{}" for timestep {}'.format( + key, idt + ) + ) + arr_smooth = nn_fill_array(arr_smooth) + + arr_smooth_int = arr_smooth_ext = arr_smooth + + if smooth_extend > 0: + arr_smooth_ext = gaussian_filter( + arr_smooth_ext, smooth_extend, mode='nearest' + ) + + if smooth_interior > 0: + arr_smooth_int = gaussian_filter( + arr_smooth_int, smooth_interior, mode='nearest' + ) + + out[key][nan_mask, idt] = arr_smooth_ext[nan_mask] + out[key][~nan_mask, idt] = arr_smooth_int[~nan_mask] + + return out
+
+ + + +
+[docs] +class ZeroRateMixin: + """Estimate zero rate + + [Pierce2015]_. + + References + ---------- + .. [Pierce2015] Pierce, D. W., Cayan, D. R., Maurer, E. P., Abatzoglou, J. + T., & Hegewisch, K. C. (2015). Improved bias correction techniques for + hydrological simulations of climate change. Journal of Hydrometeorology, + 16(6), 2421-2442. + """ + +
+[docs] + @staticmethod + def zero_precipitation_rate(arr: np.ndarray, threshold: float = 0.0): + """Rate of (nearly) zero precipitation days + + Estimate the rate of values less than a given ``threshold``. In concept + the threshold would be zero (thus the name zero precipitation rate) + but it is often used a small threshold to truncate negligible values. + For instance, [Pierce2015]_ uses 0.01 mm/day for PresRat correction. + + Parameters + ---------- + arr : np.ndarray + An array of values to be analyzed. Usually precipitation but it + could be applied to other quantities. + threshold : float + Minimum value accepted. Less than that is assumed to be zero. + + Returns + ------- + rate : float + Rate of days with negligible precipitation (see Z_gf in + [Pierce2015]_). + + Notes + ----- + The ``NaN`` are ignored for the rate estimate. Therefore, a large + number of ``NaN`` might compromise the confidence of the estimator. + + If the input values are all non-finite, it returns NaN. + + Examples + -------- + >>> ZeroRateMixin().zero_precipitation_rate([2, 3, 4], 1) + 0.0 + + >>> ZeroRateMixin().zero_precipitation_rate([0, 1, 2, 3], 1) + 0.25 + + >>> ZeroRateMixin().zero_precipitation_rate([0, 1, 2, 3, np.nan], 1) + 0.25 + """ + idx = np.isfinite(arr) + + return np.mean((arr[idx] <= threshold).astype('i'))
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/bias/presrat.html b/_modules/sup3r/bias/presrat.html new file mode 100644 index 000000000..f7d3ceb48 --- /dev/null +++ b/_modules/sup3r/bias/presrat.html @@ -0,0 +1,1238 @@ + + + + + + + + + + sup3r.bias.presrat — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.bias.presrat

+"""QDM related methods to correct and bias and trend
+
+Procedures to apply Quantile Delta Method correction and derived methods such
+as PresRat.
+"""
+
+import copy
+import json
+import logging
+import os
+from concurrent.futures import ProcessPoolExecutor, as_completed
+from typing import Optional
+
+import h5py
+import numpy as np
+from rex.utilities.bc_utils import (
+    QuantileDeltaMapping,
+)
+
+from sup3r.preprocessing import DataHandler
+
+from .mixins import ZeroRateMixin
+from .qdm import QuantileDeltaMappingCorrection
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class PresRat(ZeroRateMixin, QuantileDeltaMappingCorrection): + """PresRat bias correction method (precipitation) + + The PresRat correction [Pierce2015]_ is defined as the combination of + three steps: + * Use the model-predicted change ratio (with the CDFs); + * The treatment of zero-precipitation days (with the fraction of dry days); + * The final correction factor (K) to preserve the mean (ratio between both + estimated means); + + To keep consistency with the full sup3r pipeline, PresRat was implemented + as follows: + + 1) Define zero rate from observations (oh) + + Using the historical observations, estimate the zero rate precipitation + for each gridpoint. It is expected a long time series here, such as + decadal or longer. A threshold larger than zero is an option here. + + The result is a 2D (space) `zero_rate` (non-dimensional). + + 2) Find the threshold for each gridpoint (mh) + + Using the zero rate from the previous step, identify the magnitude + threshold for each gridpoint that satisfies that dry days rate. + + Note that Pierce (2015) impose `tau` >= 0.01 mm/day for precipitation. + + The result is a 2D (space) threshold `tau` with the same dimensions + of the data been corrected. For instance, it could be mm/day for + precipitation. + + 3) Define `Z_fg` using `tau` (mf) + + The `tau` that was defined with the *modeled historical*, is now + used as a threshold on *modeled future* before any correction to define + the equivalent zero rate in the future. + + The result is a 2D (space) rate (non-dimensional) + + 4) Estimate `tau_fut` using `Z_fg` + + Since sup3r process data in smaller chunks, it wouldn't be possible to + apply the rate `Z_fg` directly. To address that, all *modeled future* + data is corrected with QDM, and applying `Z_fg` it is defined the + `tau_fut`. + + References + ---------- + .. [Pierce2015] Pierce, D. W., Cayan, D. R., Maurer, E. P., Abatzoglou, J. + T., & Hegewisch, K. C. (2015). Improved bias correction techniques for + hydrological simulations of climate change. Journal of Hydrometeorology, + 16(6), 2421-2442. + """ + + def _init_out(self): + super()._init_out() + + shape = (*self.bias_gid_raster.shape, 1) + self.out[f'{self.base_dset}_zero_rate'] = np.full(shape, + np.nan, + np.float32) + self.out[f'{self.bias_feature}_tau_fut'] = np.full(shape, + np.nan, + np.float32) + shape = (*self.bias_gid_raster.shape, self.n_time_steps) + self.out[f'{self.bias_feature}_k_factor'] = np.full( + shape, np.nan, np.float32) + +
+[docs] + @classmethod + def calc_tau_fut(cls, base_data, bias_data, bias_fut_data, + corrected_fut_data, zero_rate_threshold=1.157e-7): + """Calculate a precipitation threshold (tau) that preserves the + model-predicted changes in fraction of dry days at a single spatial + location. + + Parameters + ---------- + base_data : np.ndarray + A 1D array of (usually) daily precipitation observations from a + historical dataset like Daymet + bias_data : np.ndarray + A 1D array of (usually) daily precipitation historical climate + simulation outputs from a GCM dataset from CMIP6 + bias_future_data : np.ndarray + A 1D array of (usually) daily precipitation future (e.g., ssp245) + climate simulation outputs from a GCM dataset from CMIP6 + corrected_fut_data : np.ndarray + Bias corrected bias_future_data usually with relative QDM + zero_rate_threshold : float, default=1.157e-7 + Threshold value used to determine the zero rate in the observed + historical dataset. For instance, 0.01 means that anything less + than that will be considered negligible, hence equal to zero. Dai + 2006 defined this as 1mm/day. Pierce 2015 used 0.01mm/day. We + recommend 0.01mm/day (1.157e-7 kg/m2/s). + + Returns + ------- + tau_fut : float + Precipitation threshold that will preserve the model predicted + changes in fraction of dry days. Precipitation less than this value + in the modeled future data can be set to zero. + obs_zero_rate : float + Rate of dry days in the observed historical data. + """ + + # Step 1: Define zero rate from observations + assert base_data.ndim == 1 + obs_zero_rate = cls.zero_precipitation_rate( + base_data, zero_rate_threshold) + + # Step 2: Find tau for each grid point + # Removed NaN handling, thus reinforce finite-only data. + assert np.isfinite(bias_data).all(), "Unexpected invalid values" + assert bias_data.ndim == 1, "Assumed bias_data to be 1D" + n_threshold = round(obs_zero_rate * bias_data.size) + n_threshold = min(n_threshold, bias_data.size - 1) + tau = np.sort(bias_data)[n_threshold] + # Pierce (2015) imposes 0.01 mm/day + # tau = max(tau, 0.01) + + # Step 3: Find Z_gf as the zero rate in mf + assert np.isfinite(bias_fut_data).all(), "Unexpected invalid values" + z_fg = (bias_fut_data < tau).astype('i').sum() / bias_fut_data.size + + # Step 4: Estimate tau_fut with corrected mf + tau_fut = np.sort(corrected_fut_data)[round( + z_fg * corrected_fut_data.size)] + + return tau_fut, obs_zero_rate
+ + +
+[docs] + @classmethod + def calc_k_factor(cls, base_data, bias_data, bias_fut_data, + corrected_fut_data, base_ti, bias_ti, bias_fut_ti, + window_center, window_size, n_time_steps, + zero_rate_threshold): + """Calculate the K factor at a single spatial location that will + preserve the original model-predicted mean change in precipitation + + Parameters + ---------- + base_data : np.ndarray + A 1D array of (usually) daily precipitation observations from a + historical dataset like Daymet + bias_data : np.ndarray + A 1D array of (usually) daily precipitation historical climate + simulation outputs from a GCM dataset from CMIP6 + bias_future_data : np.ndarray + A 1D array of (usually) daily precipitation future (e.g., ssp245) + climate simulation outputs from a GCM dataset from CMIP6 + corrected_fut_data : np.ndarray + Bias corrected bias_future_data usually with relative QDM + base_ti : pd.DatetimeIndex + Datetime index associated with bias_data and of the same length + bias_fut_ti : pd.DatetimeIndex + Datetime index associated with bias_fut_data and of the same length + window_center : np.ndarray + Sequence of days of the year equally spaced and shifted by half + window size, thus `ntimes`=12 results in approximately [15, 45, + ...]. It includes the fraction of a day, thus 15.5 is equivalent + to January 15th, 12:00h. Shape is (N,) + window_size : int + Total time window period in days to be considered for each time QDM + is calculated. For instance, `window_size=30` with + `n_time_steps=12` would result in approximately monthly estimates. + n_time_steps : int + Number of times to calculate QDM parameters equally distributed + along a year. For instance, `n_time_steps=1` results in a single + set of parameters while `n_time_steps=12` is approximately every + month. + zero_rate_threshold : float, default=1.157e-7 + Threshold value used to determine the zero rate in the observed + historical dataset. For instance, 0.01 means that anything less + than that will be considered negligible, hence equal to zero. Dai + 2006 defined this as 1mm/day. Pierce 2015 used 0.01mm/day. We + recommend 0.01mm/day (1.157e-7 kg/m2/s). + + Returns + ------- + k : np.ndarray + K factor from the Pierce 2015 paper with shape (n_time_steps,) + for a single spatial location. + """ + + k = np.full(n_time_steps, np.nan, np.float32) + for nt, t in enumerate(window_center): + base_idt = cls.window_mask(base_ti.day_of_year, t, window_size) + bias_idt = cls.window_mask(bias_ti.day_of_year, t, window_size) + bias_fut_idt = cls.window_mask(bias_fut_ti.day_of_year, t, + window_size) + + oh = base_data[base_idt].mean() + mh = bias_data[bias_idt].mean() + mf = bias_fut_data[bias_fut_idt].mean() + mf_unbiased = corrected_fut_data[bias_fut_idt].mean() + + oh = np.maximum(oh, zero_rate_threshold) + mh = np.maximum(mh, zero_rate_threshold) + mf = np.maximum(mf, zero_rate_threshold) + mf_unbiased = np.maximum(mf_unbiased, zero_rate_threshold) + + x = mf / mh + x_hat = mf_unbiased / oh + k[nt] = x / x_hat + return k
+ + + # pylint: disable=W0613 + @classmethod + def _run_single(cls, + bias_data, + bias_fut_data, + base_fps, + bias_feature, + base_dset, + base_gid, + base_handler, + daily_reduction, + *, + bias_ti, + bias_fut_ti, + decimals, + dist, + relative, + sampling, + n_samples, + log_base, + n_time_steps, + window_size, + zero_rate_threshold, + base_dh_inst=None, + ): + """Estimate probability distributions at a single site + + TODO! This should be refactored. There is too much redundancy in + the code. Let's make it work first, and optimize later. + """ + base_data, base_ti = cls.get_base_data( + base_fps, + base_dset, + base_gid, + base_handler, + daily_reduction=daily_reduction, + decimals=decimals, + base_dh_inst=base_dh_inst, + ) + + window_size = window_size or 365 / n_time_steps + window_center = cls._window_center(n_time_steps) + + template = np.full((n_time_steps, n_samples), np.nan, np.float32) + out = {} + corrected_fut_data = np.full_like(bias_fut_data, np.nan) + for nt, t in enumerate(window_center): + # Define indices for which data goes in the current time window + base_idx = cls.window_mask(base_ti.day_of_year, t, window_size) + bias_idx = cls.window_mask(bias_ti.day_of_year, t, window_size) + bias_fut_idx = cls.window_mask(bias_fut_ti.day_of_year, + t, + window_size) + + if any(base_idx) and any(bias_idx) and any(bias_fut_idx): + tmp = cls.get_qdm_params(bias_data[bias_idx], + bias_fut_data[bias_fut_idx], + base_data[base_idx], + bias_feature, + base_dset, + sampling, + n_samples, + log_base) + for k, v in tmp.items(): + if k not in out: + out[k] = template.copy() + out[k][(nt), :] = v + + QDM = QuantileDeltaMapping( + np.asarray(out[f'base_{base_dset}_params'][nt]), + np.asarray(out[f'bias_{bias_feature}_params'][nt]), + np.asarray(out[f'bias_fut_{bias_feature}_params'][nt]), + dist=dist, + relative=relative, + sampling=sampling, + log_base=log_base, + delta_denom_min=zero_rate_threshold, + ) + subset = bias_fut_data[bias_fut_idx] + corrected_fut_data[bias_fut_idx] = QDM(subset).squeeze() + + tau_fut, obs_zero_rate = cls.calc_tau_fut(base_data, bias_data, + bias_fut_data, + corrected_fut_data, + zero_rate_threshold) + k = cls.calc_k_factor(base_data, bias_data, bias_fut_data, + corrected_fut_data, base_ti, bias_ti, + bias_fut_ti, window_center, window_size, + n_time_steps, zero_rate_threshold) + + out[f'{bias_feature}_k_factor'] = k + out[f'{base_dset}_zero_rate'] = obs_zero_rate + out[f'{bias_feature}_tau_fut'] = tau_fut + + return out + +
+[docs] + def run( + self, + fp_out=None, + max_workers=None, + daily_reduction='avg', + fill_extend=True, + smooth_extend=0, + smooth_interior=0, + zero_rate_threshold=1.157e-7, + ): + """Estimate the required information for PresRat correction + + Parameters + ---------- + fp_out : str | None + Optional .h5 output file to write scalar and adder arrays. + max_workers : int, optional + Number of workers to run in parallel. 1 is serial and None is all + available. + daily_reduction : None | str + Option to do a reduction of the hourly+ source base data to daily + data. Can be None (no reduction, keep source time frequency), "avg" + (daily average), "max" (daily max), "min" (daily min), + "sum" (daily sum/total) + fill_extend : bool + Whether to fill data extending beyond the base meta data with + nearest neighbor values. + smooth_extend : float + Option to smooth the scalar/adder data outside of the spatial + domain set by the threshold input. This alleviates the weird seams + far from the domain of interest. This value is the standard + deviation for the gaussian_filter kernel + smooth_interior : float + Value to use to smooth the scalar/adder data inside of the spatial + domain set by the threshold input. This can reduce the effect of + extreme values within aggregations over large number of pixels. + This value is the standard deviation for the gaussian_filter + kernel. + zero_rate_threshold : float, default=1.157e-7 + Threshold value used to determine the zero rate in the observed + historical dataset. For instance, 0.01 means that anything less + than that will be considered negligible, hence equal to zero. Dai + 2006 defined this as 1mm/day. Pierce 2015 used 0.01mm/day. We + recommend 0.01mm/day (1.157e-7 kg/m2/s). + + Returns + ------- + out : dict + Dictionary with parameters defining the statistical distributions + for each of the three given datasets. Each value has dimensions + (lat, lon, n-parameters). + """ + logger.debug('Calculating CDF parameters for QDM') + + logger.info( + 'Initialized params with shape: {}'.format( + self.bias_gid_raster.shape + ) + ) + self.bad_bias_gids = [] + + # sup3r DataHandler opening base files will load all data in parallel + # during the init and should not be passed in parallel to workers + if isinstance(self.base_dh, DataHandler): + max_workers = 1 + + if max_workers == 1: + logger.debug('Running serial calculation.') + for i, bias_gid in enumerate(self.bias_meta.index): + raster_loc = np.where(self.bias_gid_raster == bias_gid) + _, base_gid = self.get_base_gid(bias_gid) + + if not base_gid.any(): + self.bad_bias_gids.append(bias_gid) + logger.debug( + f'No base data for bias_gid: {bias_gid}. ' + 'Adding it to bad_bias_gids' + ) + else: + bias_data = self.get_bias_data(bias_gid) + bias_fut_data = self.get_bias_data( + bias_gid, self.bias_fut_dh + ) + single_out = self._run_single( + bias_data, + bias_fut_data, + self.base_fps, + self.bias_feature, + self.base_dset, + base_gid, + self.base_handler, + daily_reduction, + bias_ti=self.bias_dh.time_index, + bias_fut_ti=self.bias_fut_dh.time_index, + decimals=self.decimals, + dist=self.dist, + relative=self.relative, + sampling=self.sampling, + n_samples=self.n_quantiles, + log_base=self.log_base, + n_time_steps=self.n_time_steps, + window_size=self.window_size, + base_dh_inst=self.base_dh, + zero_rate_threshold=zero_rate_threshold, + ) + for key, arr in single_out.items(): + self.out[key][raster_loc] = arr + + logger.info( + 'Completed bias calculations for {} out of {} ' + 'sites'.format(i + 1, len(self.bias_meta)) + ) + + else: + logger.debug( + 'Running parallel calculation with {} workers.'.format( + max_workers + ) + ) + with ProcessPoolExecutor(max_workers=max_workers) as exe: + futures = {} + for bias_gid in self.bias_meta.index: + raster_loc = np.where(self.bias_gid_raster == bias_gid) + _, base_gid = self.get_base_gid(bias_gid) + + if not base_gid.any(): + self.bad_bias_gids.append(bias_gid) + else: + bias_data = self.get_bias_data(bias_gid) + bias_fut_data = self.get_bias_data( + bias_gid, self.bias_fut_dh + ) + future = exe.submit( + self._run_single, + bias_data, + bias_fut_data, + self.base_fps, + self.bias_feature, + self.base_dset, + base_gid, + self.base_handler, + daily_reduction, + bias_ti=self.bias_dh.time_index, + bias_fut_ti=self.bias_fut_dh.time_index, + decimals=self.decimals, + dist=self.dist, + relative=self.relative, + sampling=self.sampling, + n_samples=self.n_quantiles, + log_base=self.log_base, + n_time_steps=self.n_time_steps, + window_size=self.window_size, + zero_rate_threshold=zero_rate_threshold, + ) + futures[future] = raster_loc + + logger.debug('Finished launching futures.') + for i, future in enumerate(as_completed(futures)): + raster_loc = futures[future] + single_out = future.result() + for key, arr in single_out.items(): + self.out[key][raster_loc] = arr + + logger.info( + 'Completed bias calculations for {} out of {} ' + 'sites'.format(i + 1, len(futures)) + ) + + logger.info('Finished calculating bias correction factors.') + + self.out = self.fill_and_smooth( + self.out, fill_extend, smooth_extend, smooth_interior + ) + + extra_attrs = { + 'zero_rate_threshold': zero_rate_threshold, + 'time_window_center': self.time_window_center, + } + self.write_outputs(fp_out, + self.out, + extra_attrs=extra_attrs, + ) + + return copy.deepcopy(self.out)
+ + +
+[docs] + def write_outputs(self, fp_out: str, + out: Optional[dict] = None, + extra_attrs: Optional[dict] = None): + """Write outputs to an .h5 file. + + Parameters + ---------- + fp_out : str | None + An HDF5 filename to write the estimated statistical distributions. + out : dict, optional + A dictionary with the three statistical distribution parameters. + If not given, it uses :attr:`.out`. + extra_attrs: dict, optional + Extra attributes to be exported together with the dataset. + + Examples + -------- + >>> mycalc = PresRat(...) + >>> mycalc.write_outputs(fp_out="myfile.h5", out=mydictdataset, + ... extra_attrs={'zero_rate_threshold': 0.01}) + """ + + out = out or self.out + + if fp_out is not None: + if not os.path.exists(os.path.dirname(fp_out)): + os.makedirs(os.path.dirname(fp_out), exist_ok=True) + + with h5py.File(fp_out, 'w') as f: + # pylint: disable=E1136 + lat = self.bias_dh.lat_lon[..., 0] + lon = self.bias_dh.lat_lon[..., 1] + f.create_dataset('latitude', data=lat) + f.create_dataset('longitude', data=lon) + for dset, data in out.items(): + f.create_dataset(dset, data=data) + + for k, v in self.meta.items(): + f.attrs[k] = json.dumps(v) + f.attrs['dist'] = self.dist + f.attrs['sampling'] = self.sampling + f.attrs['log_base'] = self.log_base + f.attrs['base_fps'] = self.base_fps + f.attrs['bias_fps'] = self.bias_fps + f.attrs['bias_fut_fps'] = self.bias_fut_fps + if extra_attrs is not None: + for a, v in extra_attrs.items(): + f.attrs[a] = v + logger.info('Wrote quantiles to file: {}'.format(fp_out))
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/bias/qdm.html b/_modules/sup3r/bias/qdm.html new file mode 100644 index 000000000..991800241 --- /dev/null +++ b/_modules/sup3r/bias/qdm.html @@ -0,0 +1,1339 @@ + + + + + + + + + + sup3r.bias.qdm — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.bias.qdm

+"""QDM related methods to correct and bias and trend
+
+Procedures to apply Quantile Delta Method correction and derived methods such
+as PresRat.
+"""
+
+import copy
+import json
+import logging
+import os
+from concurrent.futures import ProcessPoolExecutor, as_completed
+
+import h5py
+import numpy as np
+from rex.utilities.bc_utils import (
+    sample_q_invlog,
+    sample_q_linear,
+    sample_q_log,
+)
+
+from sup3r.preprocessing import DataHandler
+from sup3r.preprocessing.utilities import expand_paths
+
+from .base import DataRetrievalBase
+from .mixins import FillAndSmoothMixin
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class QuantileDeltaMappingCorrection(FillAndSmoothMixin, DataRetrievalBase): + """Estimate probability distributions required by Quantile Delta Mapping + + The main purpose of this class is to estimate the probability + distributions required by Quantile Delta Mapping (QDM) ([Cannon2015]_) + technique. Therefore, the name 'Correction' can be misleading since it is + not the correction *per se*, but that was used to keep consistency within + this module. + + The QDM technique corrects bias and trend by comparing the data + distributions of three datasets: a historical reference, a biased + reference, and a biased target to correct (in Cannon et. al. (2015) + called: historical observed, historical modeled, and future modeled + respectively). Those three probability distributions provided here + can be, for instance, used by + :func:`~sup3r.bias.bias_transforms.local_qdm_bc` to actually correct + a dataset. + """ + + def __init__(self, + base_fps, + bias_fps, + bias_fut_fps, + base_dset, + bias_feature, + distance_upper_bound=None, + target=None, + shape=None, + base_handler='Resource', + bias_handler='DataHandlerNCforCC', + base_handler_kwargs=None, + bias_handler_kwargs=None, + bias_fut_handler_kwargs=None, + decimals=None, + match_zero_rate=False, + n_quantiles=101, + dist='empirical', + relative=True, + sampling='linear', + log_base=10, + n_time_steps=24, + window_size=120, + pre_load=True, + ): + """ + Parameters + ---------- + base_fps : list | str + One or more baseline .h5 filepaths representing non-biased data to + use + to correct the biased dataset (observed historical in Cannon et. + al. (2015)). This is typically several years of WTK or NSRDB files. + bias_fps : list | str + One or more biased .nc or .h5 filepaths representing the biased + data + to be compared with the baseline data (modeled historical in Cannon + et. al. (2015)). This is typically several years of GCM .nc files. + bias_fut_fps : list | str + Consistent data to `bias_fps` but for a different time period + (modeled + future in Cannon et. al. (2015)). This is the dataset that would be + corrected, while `bias_fsp` is used to provide a transformation map + with the baseline data. + base_dset : str + A single dataset from the base_fps to retrieve. In the case of wind + components, this can be U_100m or V_100m which will retrieve + windspeed and winddirection and derive the U/V component. + bias_feature : str + This is the biased feature from bias_fps to retrieve. This should + be a single feature name corresponding to base_dset + distance_upper_bound : float + Upper bound on the nearest neighbor distance in decimal degrees. + This should be the approximate resolution of the low-resolution + bias data. None (default) will calculate this based on the median + distance between points in bias_fps + target : tuple + (lat, lon) lower left corner of raster to retrieve from bias_fps. + If None then the lower left corner of the full domain will be used. + shape : tuple + (rows, cols) grid size to retrieve from bias_fps. If None then the + full domain shape will be used. + base_handler : str + Name of rex resource handler or sup3r.preprocessing.data_handlers + class to be retrieved from the rex/sup3r library. If a + sup3r.preprocessing.data_handlers class is used, all data will be + loaded in this class' initialization and the subsequent bias + calculation will be done in serial + bias_handler : str + Name of the bias data handler class to be retrieved from the + sup3r.preprocessing.data_handlers library. + base_handler_kwargs : dict | None + Optional kwargs to send to the initialization of the base_handler + class + bias_handler_kwargs : dict | None + Optional kwargs to send to the initialization of the bias_handler + class with the bias_fps + bias_fut_handler_kwargs : dict | None + Optional kwargs to send to the initialization of the + bias_handler class with the bias_fut_fps + decimals : int | None + Option to round bias and base data to this number of + decimals, this gets passed to np.around(). If decimals + is negative, it specifies the number of positions to + the left of the decimal point. + match_zero_rate : bool + Option to fix the frequency of zero values in the biased data. The + lowest percentile of values in the biased data will be set to zero + to match the percentile of zeros in the base data. If + SkillAssessment is being run and this is True, the distributions + will not be mean-centered. This helps resolve the issue where + global climate models produce too many days with small + precipitation totals e.g., the "drizzle problem" [Polade2014]_. + dist : str, default="empirical", + Define the type of distribution, which can be "empirical" or any + parametric distribution defined in "scipy". + n_quantiles : int, default=101 + Defines the number of quantiles (between 0 and 1) for an empirical + distribution. + sampling : str, default="linear", + Defines how the quantiles are sampled. For instance, 'linear' will + result in a linearly spaced quantiles. Other options are: 'log' + and 'invlog'. + log_base : int or float, default=10 + Log base value if sampling is "log" or "invlog". + n_time_steps : int + Number of times to calculate QDM parameters equally distributed + along a year. For instance, `n_time_steps=1` results in a single + set of parameters while `n_time_steps=12` is approximately every + month. + window_size : int + Total time window period in days to be considered for each time QDM + is calculated. For instance, `window_size=30` with + `n_time_steps=12` would result in approximately monthly estimates. + pre_load : bool + Flag to preload all data needed for bias correction. This is + currently recommended to improve performance with the new sup3r + data handler access patterns + + See Also + -------- + sup3r.bias.bias_transforms.local_qdm_bc : + Bias correction using QDM. + sup3r.preprocessing.data_handlers.DataHandler : + Bias correction using QDM directly from a derived handler. + rex.utilities.bc_utils.QuantileDeltaMapping + Quantile Delta Mapping method and support functions. Since + :mod:`rex.utilities.bc_utils` is used here, the arguments + ``dist``, ``n_quantiles``, ``sampling``, and ``log_base`` + must be consitent with that package/module. + + Notes + ----- + One way of using this class is by saving the distributions definitions + obtained here with the method :meth:`.write_outputs` and then use that + file with :func:`~sup3r.bias.bias_transforms.local_qdm_bc` or through + a derived :class:`~sup3r.preprocessing.data_handlers.DataHandler`. + **ATTENTION**, be careful handling that file of parameters. There is + no checking process and one could missuse the correction estimated for + the wrong dataset. + + References + ---------- + .. [Cannon2015] Cannon, A. J., Sobie, S. R., & Murdock, T. Q. (2015). + Bias correction of GCM precipitation by quantile mapping: how well + do methods preserve changes in quantiles and extremes?. Journal of + Climate, 28(17), 6938-6959. + """ + + self.n_quantiles = n_quantiles + self.dist = dist + self.relative = relative + self.sampling = sampling + self.log_base = log_base + self.n_time_steps = n_time_steps + self.window_size = window_size + + super().__init__(base_fps=base_fps, + bias_fps=bias_fps, + base_dset=base_dset, + bias_feature=bias_feature, + distance_upper_bound=distance_upper_bound, + target=target, + shape=shape, + base_handler=base_handler, + bias_handler=bias_handler, + base_handler_kwargs=base_handler_kwargs, + bias_handler_kwargs=bias_handler_kwargs, + decimals=decimals, + match_zero_rate=match_zero_rate, + pre_load=False, + ) + + self.bias_fut_fps = bias_fut_fps + self.bias_fut_fps = expand_paths(self.bias_fut_fps) + + self.bias_fut_handler_kwargs = bias_fut_handler_kwargs or {} + self.bias_fut_dh = self.bias_handler(self.bias_fut_fps, + [self.bias_feature], + target=self.target, + shape=self.shape, + **self.bias_fut_handler_kwargs) + + if pre_load: + self.pre_load() + +
+[docs] + def pre_load(self): + """Preload all data needed for bias correction. This is currently + recommended to improve performance with the new sup3r data handler + access patterns""" + super().pre_load() + if hasattr(self.bias_fut_dh, 'compute'): + logger.info('Pre loading future biased data into memory...') + self.bias_fut_dh.data.compute() + logger.info('Finished pre loading future biased data.')
+ + + def _init_out(self): + """Initialize output arrays `self.out` + + Three datasets are saved here with information to reconstruct the + probability distributions for the three datasets (see class + documentation). + """ + keys = [f'bias_{self.bias_feature}_params', + f'bias_fut_{self.bias_feature}_params', + f'base_{self.base_dset}_params', + ] + shape = (*self.bias_gid_raster.shape, self.n_time_steps, + self.n_quantiles) + arr = np.full(shape, np.nan, np.float32) + self.out = {k: arr.copy() for k in keys} + + self.time_window_center = self._window_center(self.n_time_steps) + + @staticmethod + def _window_center(ntimes: int): + """A sequence of equally spaced `ntimes` day of year + + This is used to identify the center of moving windows to apply filters + and masks. For instance, if `ntimes` equal to 12, it would return + 12 equal periods, i.e. approximately the months' center time. + + This is conveniently shifted one half time interval so that December + 31st would be closest to the last interval of the year instead of the + first. + + Leap years are neglected, but if processed here, the doy 366 is + included in the relevant windows as an extra data, which would + cause a negligible error. + + Parameters + ---------- + ntimes : int + Number of intervals in one year. Choose 12 for approximately + monthly time scales. + + Returns + ------- + np.ndarray : + Sequence of days of the year equally spaced and shifted by half + window size, thus `ntimes`=12 results in approximately [15, 45, + ...]. It includes the fraction of a day, thus 15.5 is equivalent + to January 15th, 12:00h. + """ + assert ntimes > 0, "Requires a positive number of intervals" + + dt = 365 / ntimes + return np.arange(dt / 2, 366, dt) + + # pylint: disable=W0613 + @classmethod + def _run_single(cls, + bias_data, + bias_fut_data, + base_fps, + bias_feature, + base_dset, + base_gid, + base_handler, + daily_reduction, + *, + bias_ti, + bias_fut_ti, + decimals, + dist, + relative, + sampling, + n_samples, + log_base, + n_time_steps, + window_size, + base_dh_inst=None, + ): + """Estimate probability distributions at a single site""" + + base_data, base_ti = cls.get_base_data(base_fps, + base_dset, + base_gid, + base_handler, + daily_reduction=daily_reduction, + decimals=decimals, + base_dh_inst=base_dh_inst) + + window_size = window_size or 365 / n_time_steps + window_center = cls._window_center(n_time_steps) + + template = np.full((n_time_steps, n_samples), np.nan, np.float32) + out = {} + + for nt, idt in enumerate(window_center): + base_idx = cls.window_mask(base_ti.day_of_year, idt, window_size) + bias_idx = cls.window_mask(bias_ti.day_of_year, idt, window_size) + bias_fut_idx = cls.window_mask(bias_fut_ti.day_of_year, + idt, + window_size) + + if any(base_idx) and any(bias_idx) and any(bias_fut_idx): + tmp = cls.get_qdm_params(bias_data[bias_idx], + bias_fut_data[bias_fut_idx], + base_data[base_idx], + bias_feature, + base_dset, + sampling, + n_samples, + log_base) + for k, v in tmp.items(): + if k not in out: + out[k] = template.copy() + out[k][(nt), :] = v + + return out + +
+[docs] + @staticmethod + def get_qdm_params(bias_data, + bias_fut_data, + base_data, + bias_feature, + base_dset, + sampling, + n_samples, + log_base, + ): + """Get quantiles' cut point for given datasets + + Estimate the quantiles' cut points for each of the three given + datasets. Lacking a good analytical approximation, such as one of + the parametric distributions, those quantiles can be used to + approximate the statistical distribution of those datasets. + + Parameters + ---------- + bias_data : np.ndarray + 1D array of biased data observations. + bias_fut_data : np.ndarray + 1D array of biased data observations. + base_data : np.ndarray + 1D array of base data observations. + bias_feature : str + This is the biased feature from bias_fps to retrieve. This should + be a single feature name corresponding to base_dset. + base_dset : str + A single dataset from the base_fps to retrieve. In the case of wind + components, this can be U_100m or V_100m which will retrieve + windspeed and winddirection and derive the U/V component. + sampling : str + Defines how the quantiles are sampled. For instance, 'linear' will + result in a linearly spaced quantiles. Other options are: 'log' + and 'invlog'. + n_samples : int + Number of points to sample between 0 and 1, i.e. number of + quantiles. + log_base : int | float + Log base value. + + Returns + ------- + out : dict + Dictionary of the quantiles' cut points. Note that to make sense + of those cut point values, one need to know the given arguments + such as `log_base`. For instance, the sequence [-1, 0, 2] are, + if sampling was linear, the minimum, median, and maximum values + respectively. The expected keys are "bias_{bias_feature}_params", + "bias_fut_{bias_feature}_params", and "base_{base_dset}_params". + + See Also + -------- + rex.utilities.bc_utils : Sampling scales, such as `sample_q_linear()` + """ + + if sampling == 'linear': + quantiles = sample_q_linear(n_samples) + elif sampling == 'log': + quantiles = sample_q_log(n_samples, log_base) + elif sampling == 'invlog': + quantiles = sample_q_invlog(n_samples, log_base) + else: + msg = ('sampling option must be linear, log, or invlog, but ' + 'received: {}'.format(sampling) + ) + logger.error(msg) + raise KeyError(msg) + + out = { + f'bias_{bias_feature}_params': np.quantile(bias_data, quantiles), + f'bias_fut_{bias_feature}_params': np.quantile(bias_fut_data, + quantiles), + f'base_{base_dset}_params': np.quantile(base_data, quantiles), + } + + return out
+ + +
+[docs] + def write_outputs(self, fp_out, out=None): + """Write outputs to an .h5 file. + + Parameters + ---------- + fp_out : str | None + An HDF5 filename to write the estimated statistical distributions. + out : dict, optional + A dictionary with the three statistical distribution parameters. + If not given, it uses :attr:`.out`. + """ + + out = out or self.out + + if fp_out is not None: + if not os.path.exists(os.path.dirname(fp_out)): + os.makedirs(os.path.dirname(fp_out), exist_ok=True) + + with h5py.File(fp_out, 'w') as f: + # pylint: disable=E1136 + lat = self.bias_dh.lat_lon[..., 0] + lon = self.bias_dh.lat_lon[..., 1] + f.create_dataset('latitude', data=lat) + f.create_dataset('longitude', data=lon) + for dset, data in out.items(): + f.create_dataset(dset, data=data) + + for k, v in self.meta.items(): + f.attrs[k] = json.dumps(v) + f.attrs["dist"] = self.dist + f.attrs["sampling"] = self.sampling + f.attrs["log_base"] = self.log_base + f.attrs["base_fps"] = self.base_fps + f.attrs["bias_fps"] = self.bias_fps + f.attrs["bias_fut_fps"] = self.bias_fut_fps + f.attrs["time_window_center"] = self.time_window_center + logger.info( + 'Wrote quantiles to file: {}'.format(fp_out))
+ + +
+[docs] + def run(self, + fp_out=None, + max_workers=None, + daily_reduction='avg', + fill_extend=True, + smooth_extend=0, + smooth_interior=0, + ): + """Estimate the statistical distributions for each location + + Parameters + ---------- + fp_out : str | None + Optional .h5 output file to write scalar and adder arrays. + max_workers : int, optional + Number of workers to run in parallel. 1 is serial and None is all + available. + daily_reduction : None | str + Option to do a reduction of the hourly+ source base data to daily + data. Can be None (no reduction, keep source time frequency), "avg" + (daily average), "max" (daily max), "min" (daily min), + "sum" (daily sum/total) + + Returns + ------- + out : dict + Dictionary with parameters defining the statistical distributions + for each of the three given datasets. Each value has dimensions + (lat, lon, n-parameters). + """ + + logger.debug('Calculating CDF parameters for QDM') + + logger.info('Initialized params with shape: {}' + .format(self.bias_gid_raster.shape)) + self.bad_bias_gids = [] + + # sup3r DataHandler opening base files will load all data in parallel + # during the init and should not be passed in parallel to workers + if isinstance(self.base_dh, DataHandler): + max_workers = 1 + + if max_workers == 1: + logger.debug('Running serial calculation.') + for i, bias_gid in enumerate(self.bias_meta.index): + raster_loc = np.where(self.bias_gid_raster == bias_gid) + _, base_gid = self.get_base_gid(bias_gid) + + if not base_gid.any(): + self.bad_bias_gids.append(bias_gid) + logger.debug(f'No base data for bias_gid: {bias_gid}. ' + 'Adding it to bad_bias_gids') + else: + bias_data = self.get_bias_data(bias_gid) + bias_fut_data = self.get_bias_data(bias_gid, + self.bias_fut_dh) + single_out = self._run_single( + bias_data, + bias_fut_data, + self.base_fps, + self.bias_feature, + self.base_dset, + base_gid, + self.base_handler, + daily_reduction, + bias_ti=self.bias_dh.time_index, + bias_fut_ti=self.bias_fut_dh.time_index, + decimals=self.decimals, + dist=self.dist, + relative=self.relative, + sampling=self.sampling, + n_samples=self.n_quantiles, + log_base=self.log_base, + n_time_steps=self.n_time_steps, + window_size=self.window_size, + base_dh_inst=self.base_dh, + ) + for key, arr in single_out.items(): + self.out[key][raster_loc] = arr + + logger.info('Completed bias calculations for {} out of {} ' + 'sites'.format(i + 1, len(self.bias_meta))) + + else: + logger.debug( + 'Running parallel calculation with {} workers.'.format( + max_workers)) + with ProcessPoolExecutor(max_workers=max_workers) as exe: + futures = {} + for bias_gid in self.bias_meta.index: + raster_loc = np.where(self.bias_gid_raster == bias_gid) + _, base_gid = self.get_base_gid(bias_gid) + + if not base_gid.any(): + self.bad_bias_gids.append(bias_gid) + else: + bias_data = self.get_bias_data(bias_gid) + bias_fut_data = self.get_bias_data(bias_gid, + self.bias_fut_dh) + future = exe.submit( + self._run_single, + bias_data, + bias_fut_data, + self.base_fps, + self.bias_feature, + self.base_dset, + base_gid, + self.base_handler, + daily_reduction, + bias_ti=self.bias_dh.time_index, + bias_fut_ti=self.bias_fut_dh.time_index, + decimals=self.decimals, + dist=self.dist, + relative=self.relative, + sampling=self.sampling, + n_samples=self.n_quantiles, + log_base=self.log_base, + n_time_steps=self.n_time_steps, + window_size=self.window_size, + ) + futures[future] = raster_loc + + logger.debug('Finished launching futures.') + for i, future in enumerate(as_completed(futures)): + raster_loc = futures[future] + single_out = future.result() + for key, arr in single_out.items(): + self.out[key][raster_loc] = arr + + logger.info('Completed bias calculations for {} out of {} ' + 'sites'.format(i + 1, len(futures))) + + logger.info('Finished calculating bias correction factors.') + + self.out = self.fill_and_smooth(self.out, fill_extend, smooth_extend, + smooth_interior) + + self.write_outputs(fp_out, self.out) + + return copy.deepcopy(self.out)
+ + +
+[docs] + @staticmethod + def window_mask(doy, d0, window_size): + """An index of elements within a given time window + + Create an index of days of the year within the target time window. It + only considers the day of the year (doy), hence, it is limited to time + scales smaller than annual. + + Parameters + ---------- + doy : np.ndarray + An unordered array of days of year, i.e. January 1st is 1. + d0 : int + Center point of the target window [day of year]. + window_size : float + Total size of the target window, i.e. the window covers half this + value on each side of d0. Note that it has the same units of doy, + thus it is equal to the number of points only if doy is daily. + + Returns + ------- + np.array + An boolean array with the same shape of the given `doy`, where + True means that position is within the target window. + + Notes + ----- + Leap years have the day 366 included in the output index, but a + precise shift is not calculated, resulting in a negligible error + in large datasets. + """ + d_start = d0 - window_size / 2 + d_end = d0 + window_size / 2 + + if d_start < 0: + idx = (doy > 365 + d_start) | (doy < d_end) + elif d_end > 365: + idx = (doy > d_start) | (doy < d_end - 365) + else: + idx = (doy > d_start) & (doy < d_end) + + return idx
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/bias/utilities.html b/_modules/sup3r/bias/utilities.html new file mode 100644 index 000000000..c3555993f --- /dev/null +++ b/_modules/sup3r/bias/utilities.html @@ -0,0 +1,967 @@ + + + + + + + + + + sup3r.bias.utilities — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.bias.utilities

+"""Bias correction methods which can be applied to data handler data."""
+
+import logging
+import os
+from inspect import signature
+from warnings import warn
+
+import numpy as np
+from rex import Resource
+
+import sup3r.bias.bias_transforms
+from sup3r.bias.bias_transforms import _get_spatial_bc_factors, local_qdm_bc
+from sup3r.preprocessing.utilities import (
+    _parse_time_slice,
+    get_date_range_kwargs,
+)
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +def lin_bc(handler, bc_files, bias_feature=None, threshold=0.1): + """Bias correct the data in this DataHandler in place using linear bias + correction factors from files output by MonthlyLinearCorrection or + LinearCorrection from sup3r.bias.bias_calc + + Parameters + ---------- + handler : DataHandler + DataHandler instance with `.data` attribute containing data to + bias correct + bc_files : list | tuple | str + One or more filepaths to .h5 files output by + MonthlyLinearCorrection or LinearCorrection. These should contain + datasets named "{feature}_scalar" and "{feature}_adder" where + {feature} is one of the features contained by this DataHandler and + the data is a 3D array of shape (lat, lon, time) where time is + length 1 for annual correction or 12 for monthly correction. + bias_feature : str | None + Name of the feature used as a reference. Dataset with + name "base_{bias_feature}_scalar" and + "base_{bias_feature}_adder" will be retrieved from ``bc_files``. + threshold : float + Nearest neighbor euclidean distance threshold. If the DataHandler + coordinates are more than this value away from the bias correction + lat/lon, an error is raised. + """ + + if isinstance(bc_files, str): + bc_files = [bc_files] + + completed = [] + for feature in handler.features: + for fp in bc_files: + ref_feature = ( + feature if bias_feature is None else bias_feature + ) + dset_scalar = f'{ref_feature}_scalar' + dset_adder = f'{ref_feature}_adder' + with Resource(fp) as res: + dsets = [dset.lower() for dset in res.dsets] + check = ( + dset_scalar.lower() in dsets + and dset_adder.lower() in dsets + ) + if feature not in completed and check: + out = _get_spatial_bc_factors( + lat_lon=handler.lat_lon, + feature_name=ref_feature, + bias_fp=fp, + threshold=threshold, + ) + scalar, adder = out['scalar'], out['adder'] + + if scalar.shape[-1] == 1: + scalar = np.repeat(scalar, handler.shape[2], axis=2) + adder = np.repeat(adder, handler.shape[2], axis=2) + elif scalar.shape[-1] == 12: + idm = handler.time_index.month.values - 1 + scalar = scalar[..., idm] + adder = adder[..., idm] + else: + msg = ( + 'Can only accept bias correction factors ' + 'with last dim equal to 1 or 12 but ' + 'received bias correction factors with ' + 'shape {}'.format(scalar.shape) + ) + logger.error(msg) + raise RuntimeError(msg) + + logger.info( + 'Bias correcting "{}" with linear ' + 'correction from "{}"'.format( + feature, os.path.basename(fp) + ) + ) + handler.data[feature] = ( + scalar * handler.data[feature][...] + adder + ) + completed.append(feature)
+ + + +
+[docs] +def qdm_bc( + handler, + bc_files, + bias_feature, + relative=True, + threshold=0.1, + no_trend=False, +): + """Bias Correction using Quantile Delta Mapping + + Bias correct this DataHandler's data with Quantile Delta Mapping. The + required statistical distributions should be pre-calculated using + :class:`sup3r.bias.bias_calc.QuantileDeltaMappingCorrection`. + + Warning: There is no guarantee that the coefficients from ``bc_files`` + match the resource processed here. Be careful choosing ``bc_files``. + + Parameters + ---------- + handler : DataHandler + DataHandler instance with `.data` attribute containing data to + bias correct + bc_files : list | tuple | str + One or more filepaths to .h5 files output by + :class:`bias_calc.QuantileDeltaMappingCorrection`. These should + contain datasets named "base_{bias_feature}_params", + "bias_{feature}_params", and "bias_fut_{feature}_params" where + {feature} is one of the features contained by this DataHandler and + the data is a 3D array of shape (lat, lon, time) where time. + bias_feature : str + Name of the feature used as (historical) reference. Dataset with + name "base_{bias_feature}_params" will be retrieved from + ``bc_files``. + relative : bool, default=True + Switcher to apply QDM as a relative (use True) or absolute (use + False) correction value. + threshold : float, default=0.1 + Nearest neighbor euclidean distance threshold. If the DataHandler + coordinates are more than this value away from the bias correction + lat/lon, an error is raised. + no_trend: bool, default=False + An option to ignore the trend component of the correction, thus + resulting in an ordinary Quantile Mapping, i.e. corrects the bias + by comparing the distributions of the biased dataset with a + reference datasets. See ``params_mf`` of + :class:`rex.utilities.bc_utils.QuantileDeltaMapping`. + Note that this assumes that "bias_{feature}_params" + (``params_mh``) is the data distribution representative for the + target data. + """ + + if isinstance(bc_files, str): + bc_files = [bc_files] + + completed = [] + dr_kwargs = get_date_range_kwargs(handler.time_index) + for feature in handler.features: + dset_bc_hist = f'bias_{feature}_params' + dset_bc_fut = f'bias_fut_{feature}_params' + + for fp in bc_files: + with Resource(fp) as res: + check = dset_bc_hist in res.dsets and dset_bc_fut in res.dsets + + if feature not in completed and check: + logger.info( + 'Bias correcting "{}" with QDM ' + 'correction from "{}"'.format( + feature, os.path.basename(fp) + ) + ) + handler.data[feature] = local_qdm_bc( + handler.data[feature][...], + handler.lat_lon, + bias_feature, + feature, + bias_fp=fp, + date_range_kwargs=dr_kwargs, + threshold=threshold, + relative=relative, + no_trend=no_trend, + ) + completed.append(feature)
+ + + +
+[docs] +def bias_correct_feature( + source_feature, + input_handler, + bc_method, + bc_kwargs, + time_slice=None, +): + """Bias correct data using a method defined by the bias_correct_method + input to :class:`ForwardPassStrategy` + + Parameters + ---------- + source_feature : str + The source feature name corresponding to the output feature name + input_handler : DataHandler + DataHandler storing raw input data previously used as input for + forward passes. This is assumed to have data with shape (lats, lons, + time, features), which can be accessed through the handler with + handler[feature, lat_slice, lon_slice, time_slice] + bc_method : Callable + Bias correction method from `bias_transforms.py` + bc_kwargs : dict + Dictionary of keyword arguments for bc_method + time_slice : slice | None + Optional time slice to restrict bias correction domain + + Returns + ------- + data : Union[np.ndarray, da.core.Array] + Data corrected by the bias_correct_method ready for input to the + forward pass through the generative model. + """ + time_slice = _parse_time_slice(time_slice) + data = input_handler[source_feature][..., time_slice] + lat_lon = input_handler.lat_lon + if bc_method is not None: + bc_method = getattr(sup3r.bias.bias_transforms, bc_method) + logger.info(f'Running bias correction with: {bc_method}.') + feature_kwargs = bc_kwargs[source_feature] + + if 'date_range_kwargs' in signature(bc_method).parameters: + ti = input_handler.time_index[time_slice] + feature_kwargs['date_range_kwargs'] = get_date_range_kwargs(ti) + if ( + 'lr_padded_slice' in signature(bc_method).parameters + and 'lr_padded_slice' not in feature_kwargs + ): + feature_kwargs['lr_padded_slice'] = None + if ( + 'temporal_avg' in signature(bc_method).parameters + and 'temporal_avg' not in feature_kwargs + ): + msg = ( + 'The kwarg "temporal_avg" was not provided in the bias ' + 'correction kwargs but is present in the bias ' + 'correction function "{}". If this is not set ' + 'appropriately, especially for monthly bias ' + 'correction, it could result in QA results that look ' + 'worse than they actually are.'.format(bc_method) + ) + logger.warning(msg) + warn(msg) + + msg = ( + f'Bias correcting source_feature "{source_feature}" using ' + f'function: {bc_method} with kwargs: {feature_kwargs}' + ) + logger.debug(msg) + + data = bc_method(data, lat_lon, **feature_kwargs) + return data
+ + + +
+[docs] +def bias_correct_features( + features, + input_handler, + bc_method, + bc_kwargs, + time_slice=None, +): + """Bias correct all feature data using a method defined by the + bias_correct_method input to :class:`ForwardPassStrategy` + + See Also + -------- + :func:`bias_correct_feature` + """ + + time_slice = _parse_time_slice(time_slice) + for feat in features: + try: + input_handler[feat][..., time_slice] = bias_correct_feature( + source_feature=feat, + input_handler=input_handler, + time_slice=time_slice, + bc_method=bc_method, + bc_kwargs=bc_kwargs, + ) + except Exception as e: + msg = ( + f'Could not run bias correction method {bc_method} on ' + f'feature {feat} time slice {time_slice} with input ' + f'handler of class {type(input_handler)} with shape ' + f'{input_handler.shape}. Received error: {e}' + ) + logger.exception(msg) + raise RuntimeError(msg) from e + return input_handler
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/models/abstract.html b/_modules/sup3r/models/abstract.html new file mode 100644 index 000000000..14455949a --- /dev/null +++ b/_modules/sup3r/models/abstract.html @@ -0,0 +1,2383 @@ + + + + + + + + + + sup3r.models.abstract — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.models.abstract

+"""Abstract class defining the required interface for Sup3r model subclasses"""
+
+import json
+import locale
+import logging
+import os
+import pprint
+import re
+import time
+from abc import ABC, abstractmethod
+from concurrent.futures import ThreadPoolExecutor
+from inspect import signature
+from warnings import warn
+
+import numpy as np
+import tensorflow as tf
+from phygnn import CustomNetwork
+from phygnn.layers.custom_layers import Sup3rAdder, Sup3rConcat
+from rex.utilities.utilities import safe_json_load
+from tensorflow.keras import optimizers
+
+import sup3r.utilities.loss_metrics
+from sup3r.preprocessing.data_handlers import ExoData
+from sup3r.preprocessing.utilities import numpy_if_tensor
+from sup3r.utilities import VERSION_RECORD
+from sup3r.utilities.utilities import Timer, safe_cast
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class TensorboardMixIn: + """MixIn class for tensorboard logging and profiling.""" + + def __init__(self): + self._tb_writer = None + self._tb_log_dir = None + self._write_tb_profile = False + self._total_batches = None + self._history = None + self.timer = Timer() + + @property + def total_batches(self): + """Record of total number of batches for logging.""" + if self._total_batches is None and self._history is None: + self._total_batches = 0 + elif self._history is None and 'total_batches' in self._history: + self._total_batches = self._history['total_batches'].values[-1] + elif self._total_batches is None and self._history is not None: + self._total_batches = 0 + return self._total_batches + + @total_batches.setter + def total_batches(self, value): + """Set total number of batches.""" + self._total_batches = value + +
+[docs] + def dict_to_tensorboard(self, entry): + """Write data to tensorboard log file. This is usually a loss_details + dictionary. + + Parameters + ---------- + entry: dict + Dictionary of values to write to tensorboard log file + """ + if self._tb_writer is not None: + with self._tb_writer.as_default(): + for name, value in entry.items(): + if isinstance(value, str): + tf.summary.text(name, value, self.total_batches) + else: + tf.summary.scalar(name, value, self.total_batches)
+ + +
+[docs] + def profile_to_tensorboard(self, name): + """Write profile data to tensorboard log file. + + Parameters + ---------- + name : str + Tag name to use for profile info + """ + if self._tb_writer is not None and self._write_tb_profile: + with self._tb_writer.as_default(): + tf.summary.trace_export( + name=name, + step=self.total_batches, + profiler_outdir=self._tb_log_dir, + )
+ + + def _init_tensorboard_writer(self, out_dir): + """Initialize the ``tf.summary.SummaryWriter`` to use for writing + tensorboard compatible log files. + + Parameters + ---------- + out_dir : str + Standard out_dir where model epochs are saved. e.g. './gan_{epoch}' + """ + tb_log_pardir = os.path.abspath(os.path.join(out_dir, os.pardir)) + self._tb_log_dir = os.path.join(tb_log_pardir, 'logs') + os.makedirs(self._tb_log_dir, exist_ok=True) + self._tb_writer = tf.summary.create_file_writer(self._tb_log_dir)
+ + + +
+[docs] +class AbstractInterface(ABC): + """ + Abstract class to define the required interface for Sup3r model subclasses + + Note that this only sets the required interfaces for a GAN that can be + loaded from disk and used to predict synthetic outputs. The interface for + models that can be trained will be set in another class. + """ + +
+[docs] + @classmethod + @abstractmethod + def load(cls, model_dir, verbose=True): + """Load the GAN with its sub-networks from a previously saved-to output + directory. + + Parameters + ---------- + model_dir + Directory to load GAN model files from. + verbose : bool + Flag to log information about the loaded model. + + Returns + ------- + out : BaseModel + Returns a pretrained gan model that was previously saved to + model_dir + """
+ + +
+[docs] + @abstractmethod + def generate( + self, low_res, norm_in=True, un_norm_out=True, exogenous_data=None + ): + """Use the generator model to generate high res data from low res + input. This is the public generate function."""
+ + +
+[docs] + @staticmethod + def seed(s=0): + """ + Set the random seed for reproducible results. + + Parameters + ---------- + s : int + Random seed + """ + CustomNetwork.seed(s=s)
+ + + @property + def input_dims(self): + """Get dimension of model generator input. This is usually 4D for + spatial models and 5D for spatiotemporal models. This gives the input + to the first step if the model is multi-step. Returns 5 for linear + models. + + Returns + ------- + int + """ + # pylint: disable=E1101 + if hasattr(self, '_gen'): + return self._gen.layers[0].rank + if hasattr(self, 'models'): + return self.models[0].input_dims + return 5 + + @property + def is_5d(self): + """Check if model expects spatiotemporal input""" + return self.input_dims == 5 + + @property + def is_4d(self): + """Check if model expects spatial only input""" + return self.input_dims == 4 + + # pylint: disable=E1101 +
+[docs] + def get_s_enhance_from_layers(self): + """Compute factor by which model will enhance spatial resolution from + layer attributes. Used in model training during high res coarsening""" + s_enhance = None + if hasattr(self, '_gen'): + s_enhancements = [ + getattr(layer, '_spatial_mult', 1) + for layer in self._gen.layers + ] + s_enhance = int(np.prod(s_enhancements)) + return s_enhance
+ + + # pylint: disable=E1101 +
+[docs] + def get_t_enhance_from_layers(self): + """Compute factor by which model will enhance temporal resolution from + layer attributes. Used in model training during high res coarsening""" + t_enhance = None + if hasattr(self, '_gen'): + t_enhancements = [ + getattr(layer, '_temporal_mult', 1) + for layer in self._gen.layers + ] + t_enhance = int(np.prod(t_enhancements)) + return t_enhance
+ + + @property + def s_enhance(self): + """Factor by which model will enhance spatial resolution. Used in + model training during high res coarsening and also in forward pass + routine to determine shape of needed exogenous data""" + models = getattr(self, 'models', [self]) + s_enhances = [m.meta.get('s_enhance', None) for m in models] + s_enhance = ( + self.get_s_enhance_from_layers() + if any(s is None for s in s_enhances) + else int(np.prod(s_enhances)) + ) + if len(models) == 1: + self.meta['s_enhance'] = s_enhance + return s_enhance + + @property + def t_enhance(self): + """Factor by which model will enhance temporal resolution. Used in + model training during high res coarsening and also in forward pass + routine to determine shape of needed exogenous data""" + models = getattr(self, 'models', [self]) + t_enhances = [m.meta.get('t_enhance', None) for m in models] + t_enhance = ( + self.get_t_enhance_from_layers() + if any(t is None for t in t_enhances) + else int(np.prod(t_enhances)) + ) + if len(models) == 1: + self.meta['t_enhance'] = t_enhance + return t_enhance + + @property + def s_enhancements(self): + """List of spatial enhancement factors. In the case of a single step + model this is just ``[self.s_enhance]``. This is used to determine + shapes of needed exogenous data in forward pass routine""" + if hasattr(self, 'models'): + return [model.s_enhance for model in self.models] + return [self.s_enhance] + + @property + def t_enhancements(self): + """List of temporal enhancement factors. In the case of a single step + model this is just ``[self.t_enhance]``. This is used to determine + shapes of needed exogenous data in forward pass routine""" + if hasattr(self, 'models'): + return [model.t_enhance for model in self.models] + return [self.t_enhance] + + @property + def input_resolution(self): + """Resolution of input data. Given as a dictionary + ``{'spatial': '...km', 'temporal': '...min'}``. The numbers are + required to be integers in the units specified. The units are not + strict as long as the resolution of the exogenous data, when extracting + exogenous data, is specified in the same units.""" + input_resolution = self.meta.get('input_resolution', None) + msg = 'model.input_resolution is None. This needs to be set.' + assert input_resolution is not None, msg + return input_resolution + + def _get_numerical_resolutions(self): + """Get the input and output resolutions without units. e.g. for + ``{"spatial": "30km", "temporal": "60min"}`` this returns + ``{"spatial": 30, "temporal": 60}``""" + ires_num = { + k: int(re.search(r'\d+', v).group(0)) + for k, v in self.input_resolution.items() + } + enhancements = {'spatial': self.s_enhance, 'temporal': self.t_enhance} + ores_num = {k: v // enhancements[k] for k, v in ires_num.items()} + return ires_num, ores_num + + def _ensure_valid_input_resolution(self): + """Ensure ehancement factors evenly divide input_resolution""" + + if self.input_resolution is None: + return + + ires_num, ores_num = self._get_numerical_resolutions() + s_enhance = self.meta['s_enhance'] + t_enhance = self.meta['t_enhance'] + check = ( + ires_num['temporal'] / ores_num['temporal'] == t_enhance + and ires_num['spatial'] / ores_num['spatial'] == s_enhance + ) + msg = ( + f'Enhancement factors (s_enhance={s_enhance}, ' + f't_enhance={t_enhance}) do not evenly divide ' + f'input resolution ({self.input_resolution})' + ) + if not check: + logger.error(msg) + raise RuntimeError(msg) + + def _ensure_valid_enhancement_factors(self): + """Ensure user provided enhancement factors are the same as those + computed from layer attributes""" + t_enhance = self.meta.get('t_enhance', None) + s_enhance = self.meta.get('s_enhance', None) + if s_enhance is None or t_enhance is None: + return + + layer_se = self.get_s_enhance_from_layers() + layer_te = self.get_t_enhance_from_layers() + layer_se = layer_se if layer_se is not None else self.meta['s_enhance'] + layer_te = layer_te if layer_te is not None else self.meta['t_enhance'] + msg = ( + f'Enhancement factors computed from layer attributes ' + f'(s_enhance={layer_se}, t_enhance={layer_te}) ' + f'conflict with user provided values (s_enhance={s_enhance}, ' + f't_enhance={t_enhance})' + ) + check = layer_se == s_enhance or layer_te == t_enhance + if not check: + logger.error(msg) + raise RuntimeError(msg) + + @property + def output_resolution(self): + """Resolution of output data. Given as a dictionary + {'spatial': '...km', 'temporal': '...min'}. This is computed from the + input resolution and the enhancement factors.""" + output_res = self.meta.get('output_resolution', None) + if self.input_resolution is not None and output_res is None: + ires_num, ores_num = self._get_numerical_resolutions() + output_res = { + k: v.replace(str(ires_num[k]), str(ores_num[k])) + for k, v in self.input_resolution.items() + } + self.meta['output_resolution'] = output_res + return output_res + + def _combine_fwp_input(self, low_res, exogenous_data=None): + """Combine exogenous_data at input resolution with low_res data prior + to forward pass through generator + + Parameters + ---------- + low_res : np.ndarray + Low-resolution input data, usually a 4D or 5D array of shape: + (n_obs, spatial_1, spatial_2, n_features) + (n_obs, spatial_1, spatial_2, n_temporal, n_features) + exogenous_data : dict | ExoData | None + Special dictionary (class:`ExoData`) of exogenous feature data with + entries describing whether features should be combined at input, a + mid network layer, or with output. This doesn't have to include + the 'model' key since this data is for a single step model. + + Returns + ------- + low_res : np.ndarray + Low-resolution input data combined with exogenous_data, usually a + 4D or 5D array of shape: + (n_obs, spatial_1, spatial_2, n_features) + (n_obs, spatial_1, spatial_2, n_temporal, n_features) + """ + if exogenous_data is None: + return low_res + + if ( + not isinstance(exogenous_data, ExoData) + and exogenous_data is not None + ): + exogenous_data = ExoData(exogenous_data) + + fnum_diff = len(self.lr_features) - low_res.shape[-1] + exo_feats = [] if fnum_diff <= 0 else self.lr_features[-fnum_diff:] + msg = ( + f'Provided exogenous_data: {exogenous_data} is missing some ' + f'required features ({exo_feats})' + ) + assert all(feature in exogenous_data for feature in exo_feats), msg + if exogenous_data is not None and fnum_diff > 0: + for feature in exo_feats: + exo_input = exogenous_data.get_combine_type_data( + feature, 'input' + ) + if exo_input is not None: + low_res = np.concatenate((low_res, exo_input), axis=-1) + + return low_res + + def _combine_fwp_output(self, hi_res, exogenous_data=None): + """Combine exogenous_data at output resolution with generated hi_res + data following forward pass output. + + Parameters + ---------- + hi_res : np.ndarray + High-resolution output data, usually a 4D or 5D array of shape: + (n_obs, spatial_1, spatial_2, n_features) + (n_obs, spatial_1, spatial_2, n_temporal, n_features) + exogenous_data : dict | ExoData | None + Special dictionary (class:`ExoData`) of exogenous feature data with + entries describing whether features should be combined at input, a + mid network layer, or with output. This doesn't have to include + the 'model' key since this data is for a single step model. + + Returns + ------- + hi_res : np.ndarray + High-resolution output data combined with exogenous_data, usually a + 4D or 5D array of shape: + (n_obs, spatial_1, spatial_2, n_features) + (n_obs, spatial_1, spatial_2, n_temporal, n_features) + """ + if exogenous_data is None: + return hi_res + + if ( + not isinstance(exogenous_data, ExoData) + and exogenous_data is not None + ): + exogenous_data = ExoData(exogenous_data) + + fnum_diff = len(self.hr_out_features) - hi_res.shape[-1] + exo_feats = [] if fnum_diff <= 0 else self.hr_out_features[-fnum_diff:] + msg = ( + 'Provided exogenous_data is missing some required features ' + f'({exo_feats})' + ) + assert all(feature in exogenous_data for feature in exo_feats), msg + if exogenous_data is not None and fnum_diff > 0: + for feature in exo_feats: + exo_output = exogenous_data.get_combine_type_data( + feature, 'output' + ) + if exo_output is not None: + hi_res = np.concatenate((hi_res, exo_output), axis=-1) + return hi_res + + @tf.function + def _combine_loss_input(self, high_res_true, high_res_gen): + """Combine exogenous feature data from high_res_true with high_res_gen + for loss calculation + + Parameters + ---------- + high_res_true : tf.Tensor + Ground truth high resolution spatiotemporal data. + high_res_gen : tf.Tensor + Superresolved high resolution spatiotemporal data generated by the + generative model. + + Returns + ------- + high_res_gen : tf.Tensor + Same as input with exogenous data combined with high_res input + """ + if high_res_true.shape[-1] > high_res_gen.shape[-1]: + for feature in self.hr_exo_features: + f_idx = self.hr_exo_features.index(feature) + f_idx += len(self.hr_out_features) + exo_data = high_res_true[..., f_idx : f_idx + 1] + high_res_gen = tf.concat((high_res_gen, exo_data), axis=-1) + return high_res_gen + + @property + @abstractmethod + def meta(self): + """Get meta data dictionary that defines how the model was created""" + + @property + def lr_features(self): + """Get a list of low-resolution features input to the generative model. + This includes low-resolution features that might be supplied + exogenously at inference time but that were in the low-res batches + during training""" + return self.meta.get('lr_features', []) + + @property + def hr_out_features(self): + """Get the list of high-resolution output feature names that the + generative model outputs.""" + return self.meta.get('hr_out_features', []) + + @property + def hr_exo_features(self): + """Get list of high-resolution exogenous filter names the model uses. + If the model has N concat or add layers this list will be the last N + features in the training features list. The ordering is assumed to be + the same as the order of concat or add layers. If training features is + [..., topo, sza], and the model has 2 concat or add layers, exo + features will be [topo, sza]. Topo will then be used in the first + concat layer and sza will be used in the second""" + # pylint: disable=E1101 + features = [] + if hasattr(self, '_gen'): + features = [ + layer.name + for layer in self._gen.layers + if isinstance(layer, (Sup3rAdder, Sup3rConcat)) + ] + return features + + @property + def smoothing(self): + """Value of smoothing parameter used in gaussian filtering of coarsened + high res data.""" + return self.meta.get('smoothing', None) + + @property + def smoothed_features(self): + """Get the list of smoothed input feature names that the generative + model was trained on.""" + return self.meta.get('smoothed_features', []) + + @property + def model_params(self): + """ + Model parameters, used to save model to disc + + Returns + ------- + dict + """ + return {'meta': self.meta} + + @property + def version_record(self): + """A record of important versions that this model was built with. + + Returns + ------- + dict + """ + return VERSION_RECORD + +
+[docs] + def set_model_params(self, **kwargs): + """Set parameters used for training the model + + Parameters + ---------- + kwargs : dict + Keyword arguments including 'input_resolution', + 'lr_features', 'hr_exo_features', 'hr_out_features', + 'smoothed_features', 's_enhance', 't_enhance', 'smoothing' + """ + + keys = ( + 'input_resolution', + 'lr_features', + 'hr_exo_features', + 'hr_out_features', + 'smoothed_features', + 's_enhance', + 't_enhance', + 'smoothing', + ) + keys = [k for k in keys if k in kwargs] + + hr_exo_feat = kwargs.get('hr_exo_features', []) + msg = ( + f'Expected high-res exo features {self.hr_exo_features} ' + f'based on model architecture but received "hr_exo_features" ' + f'from data handler: {hr_exo_feat}' + ) + assert list(self.hr_exo_features) == list(hr_exo_feat), msg + + for var in keys: + val = self.meta.get(var, None) + if val is None: + self.meta[var] = kwargs[var] + elif val != kwargs[var]: + msg = ( + 'Model was previously trained with {var}={} but ' + 'received new {var}={}'.format(val, kwargs[var], var=var) + ) + logger.warning(msg) + warn(msg) + + self._ensure_valid_enhancement_factors() + self._ensure_valid_input_resolution()
+ + +
+[docs] + def save_params(self, out_dir): + """ + Parameters + ---------- + out_dir : str + Directory to save linear model params. This directory will be + created if it does not already exist. + """ + if not os.path.exists(out_dir): + os.makedirs(out_dir, exist_ok=True) + + fp_params = os.path.join(out_dir, 'model_params.json') + with open( + fp_params, 'w', encoding=locale.getpreferredencoding(False) + ) as f: + params = self.model_params + json.dump(params, f, sort_keys=True, indent=2, default=safe_cast)
+
+ + + +# pylint: disable=E1101,W0201,E0203 +
+[docs] +class AbstractSingleModel(ABC, TensorboardMixIn): + """ + Abstract class to define the required training interface + for Sup3r model subclasses + """ + + def __init__(self): + super().__init__() + self.gpu_list = tf.config.list_physical_devices('GPU') + self.default_device = '/cpu:0' if len(self.gpu_list) == 0 else '/gpu:0' + self._version_record = VERSION_RECORD + self.name = None + self._meta = None + self.loss_name = None + self.loss_fun = None + self._history = None + self._optimizer = None + self._gen = None + self._means = None + self._stdevs = None + +
+[docs] + def load_network(self, model, name): + """Load a CustomNetwork object from hidden layers config, .json file + config, or .pkl file saved pre-trained model. + + Parameters + ---------- + model : str | dict + Model hidden layers config, a .json with "hidden_layers" key, or a + .pkl for a saved pre-trained model. + name : str + Name of the model to be loaded + + Returns + ------- + model : phygnn.CustomNetwork + CustomNetwork object initialized from the model input. + """ + + if isinstance(model, str) and model.endswith('.json'): + model = safe_json_load(model) + self._meta[f'config_{name}'] = model + if 'hidden_layers' in model: + model = model['hidden_layers'] + elif ( + 'meta' in model + and f'config_{name}' in model['meta'] + and 'hidden_layers' in model['meta'][f'config_{name}'] + ): + model = model['meta'][f'config_{name}']['hidden_layers'] + else: + msg = ( + 'Could not load model from json config, need ' + '"hidden_layers" key or ' + f'"meta/config_{name}/hidden_layers" ' + ' at top level but only found: {}'.format(model.keys()) + ) + logger.error(msg) + raise KeyError(msg) + + elif isinstance(model, str) and model.endswith('.pkl'): + with tf.device(self.default_device): + model = CustomNetwork.load(model) + + if isinstance(model, list): + model = CustomNetwork(hidden_layers=model, name=name) + + if not isinstance(model, CustomNetwork): + msg = ( + 'Something went wrong. Tried to load a custom network ' + 'but ended up with a model of type "{}"'.format(type(model)) + ) + logger.error(msg) + raise TypeError(msg) + + return model
+ + + @property + def means(self): + """Get the data normalization mean values. + + Returns + ------- + np.ndarray + """ + return self._means + + @property + def stdevs(self): + """Get the data normalization standard deviation values. + + Returns + ------- + np.ndarray + """ + return self._stdevs + +
+[docs] + def set_norm_stats(self, new_means, new_stdevs): + """Set the normalization statistics associated with a data batch + handler to model attributes. + + Parameters + ---------- + new_means : dict | None + Set of mean values for data normalization keyed by feature name. + Can be used to maintain a consistent normalization scheme between + transfer learning domains. + new_stdevs : dict | None + Set of stdev values for data normalization keyed by feature name. + Can be used to maintain a consistent normalization scheme between + transfer learning domains. + """ + + if new_means is not None and new_stdevs is not None: + logger.info('Setting new normalization statistics...') + logger.info( + "Model's previous data mean values:\n%s", + pprint.pformat(self._means, indent=2), + ) + logger.info( + "Model's previous data stdev values:\n%s", + pprint.pformat(self._stdevs, indent=2), + ) + + self._means = {k: np.float32(v) for k, v in new_means.items()} + self._stdevs = {k: np.float32(v) for k, v in new_stdevs.items()} + + if not isinstance(self._means, dict) or not isinstance( + self._stdevs, dict + ): + msg = ( + 'Means and stdevs need to be dictionaries with keys as ' + 'feature names but received means of type ' + f'{type(self._means)} and ' + f'stdevs of type {type(self._stdevs)}' + ) + logger.error(msg) + raise TypeError(msg) + + missing = [f for f in self.lr_features if f not in self._means] + missing += [ + f for f in self.hr_exo_features if f not in self._means + ] + missing += [ + f for f in self.hr_out_features if f not in self._means + ] + if any(missing): + msg = ( + f'Need means for features "{missing}" but did not find ' + f'in new means array: {self._means}' + ) + + logger.info( + 'Set data normalization mean values:\n%s', + pprint.pformat(self._means, indent=2), + ) + logger.info( + 'Set data normalization stdev values:\n%s', + pprint.pformat(self._stdevs, indent=2), + )
+ + +
+[docs] + def norm_input(self, low_res): + """Normalize low resolution data being input to the generator. + + Parameters + ---------- + low_res : np.ndarray + Un-normalized low-resolution input data in physical units, usually + a 4D or 5D array of shape: + (n_obs, spatial_1, spatial_2, n_features) + (n_obs, spatial_1, spatial_2, n_temporal, n_features) + + Returns + ------- + low_res : np.ndarray + Normalized low-resolution input data, usually a 4D or 5D array of + shape: + (n_obs, spatial_1, spatial_2, n_features) + (n_obs, spatial_1, spatial_2, n_temporal, n_features) + """ + if self._means is not None: + if isinstance(low_res, tf.Tensor): + low_res = low_res.numpy() + + missing = [fn for fn in self.lr_features if fn not in self._means] + if any(missing): + msg = ( + f'Could not find low-res input features {missing} in ' + f'means/stdevs: {self._means}/{self._stdevs}' + ) + logger.error(msg) + raise KeyError(msg) + + means = np.array([self._means[fn] for fn in self.lr_features]) + stdevs = np.array([self._stdevs[fn] for fn in self.lr_features]) + if any(stdevs == 0): + stdevs = np.where(stdevs == 0, 1, stdevs) + msg = 'Some standard deviations are zero.' + logger.warning(msg) + warn(msg) + low_res = (low_res.copy() - means) / stdevs + + return low_res
+ + +
+[docs] + def un_norm_output(self, output): + """Un-normalize synthetically generated output data to physical units + + Parameters + ---------- + output : tf.Tensor | np.ndarray + Synthetically generated high-resolution data + + Returns + ------- + output : np.ndarray + Synthetically generated high-resolution data + """ + if self._means is not None: + if isinstance(output, tf.Tensor): + output = output.numpy() + + missing = [ + fn for fn in self.hr_out_features if fn not in self._means + ] + if any(missing): + msg = ( + f'Could not find high-res output features {missing} in ' + f'means/stdevs: {self._means}/{self._stdevs}' + ) + logger.error(msg) + raise KeyError(msg) + + means = [self._means[fn] for fn in self.hr_out_features] + stdevs = [self._stdevs[fn] for fn in self.hr_out_features] + means = np.array(means) + stdevs = np.array(stdevs) + + output = (output * stdevs) + means + + return output
+ + + @property + def optimizer(self): + """Get the tensorflow optimizer to perform gradient descent + calculations for the generative network. This is functionally identical + to optimizer_disc is no special optimizer model or learning rate was + specified for the disc. + + Returns + ------- + tf.keras.optimizers.Optimizer + """ + return self._optimizer + + @property + def history(self): + """ + Model training history DataFrame (None if not yet trained) + + Returns + ------- + pandas.DataFrame | None + """ + return self._history + + @property + def generator(self): + """Get the generative model. + + Returns + ------- + phygnn.base.CustomNetwork + """ + return self._gen + + @property + def generator_weights(self): + """Get a list of layer weights and bias terms for the generator model. + + Returns + ------- + list + """ + return self.generator.weights + +
+[docs] + @staticmethod + def init_optimizer(optimizer, learning_rate): + """Initialize keras optimizer object. + + Parameters + ---------- + optimizer : tf.keras.optimizers.Optimizer | dict | None | str + Instantiated tf.keras.optimizers object or a dict optimizer config + from tf.keras.optimizers.get_config(). None defaults to Adam. + learning_rate : float, optional + Optimizer learning rate. Not used if optimizer input arg is a + pre-initialized object or if optimizer input arg is a config dict. + + Returns + ------- + optimizer : tf.keras.optimizers.Optimizer + Initialized optimizer object. + """ + if isinstance(optimizer, dict): + class_name = optimizer['name'] + optimizer_class = getattr(optimizers, class_name) + sig = signature(optimizer_class) + optimizer_kwargs = { + k: v for k, v in optimizer.items() if k in sig.parameters + } + optimizer = optimizer_class.from_config(optimizer_kwargs) + elif optimizer is None: + optimizer = optimizers.Adam(learning_rate=learning_rate) + + return optimizer
+ + +
+[docs] + @staticmethod + def load_saved_params(out_dir, verbose=True): + """Load saved model_params (you need this and the gen+disc models + to load a full model). + + Parameters + ---------- + out_dir : str + Directory to load model files from. + verbose : bool + Flag to log information about the loaded model. + + Returns + ------- + params : dict + Model parameters loaded from disk json file. This should be the + same as self.model_params with and additional 'history' entry. + Should be all the kwargs you need to init a model. + """ + + fp_params = os.path.join(out_dir, 'model_params.json') + with open(fp_params) as f: + params = json.load(f) + + # using the saved model dir makes this more portable + fp_history = os.path.join(out_dir, 'history.csv') + if os.path.exists(fp_history): + params['history'] = fp_history + else: + params['history'] = None + + if 'version_record' in params: + version_record = params.pop('version_record') + if verbose: + logger.info( + 'Loading model from disk ' + 'that was created with the ' + 'following package versions: \n{}'.format( + pprint.pformat(version_record, indent=2) + ) + ) + + means = params.get('means', None) + stdevs = params.get('stdevs', None) + if means is not None and stdevs is not None: + means = {k: np.float32(v) for k, v in means.items()} + stdevs = {k: np.float32(v) for k, v in stdevs.items()} + params['means'] = means + params['stdevs'] = stdevs + + return params
+ + +
+[docs] + def get_high_res_exo_input(self, high_res): + """Get exogenous feature data from high_res + + Parameters + ---------- + high_res : tf.Tensor + Ground truth high resolution spatiotemporal data. + + Returns + ------- + exo_data : dict + Dictionary of exogenous feature data used as input to tf_generate. + e.g. ``{'topography': tf.Tensor(...)}`` + """ + exo_data = {} + for feature in self.hr_exo_features: + f_idx = self.hr_exo_features.index(feature) + f_idx += len(self.hr_out_features) + exo_fdata = high_res[..., f_idx : f_idx + 1] + exo_data[feature] = exo_fdata + return exo_data
+ + +
+[docs] + @staticmethod + def get_loss_fun(loss): + """Get the initialized loss function class from the sup3r loss library + or the tensorflow losses. + + Parameters + ---------- + loss : str | dict + Loss function class name from sup3r.utilities.loss_metrics + (prioritized) or tensorflow.keras.losses. Defaults to + tf.keras.losses.MeanSquaredError. This can be provided as a dict + with kwargs for loss functions with extra parameters. + e.g. {'SpatialExtremesLoss': {'weight': 0.5}} + + Returns + ------- + out : tf.keras.losses.Loss + Initialized loss function class that is callable, e.g. if + "MeanSquaredError" is requested, this will return + an instance of tf.keras.losses.MeanSquaredError() + """ + kwargs = {} + if isinstance(loss, dict): + loss, kwargs = next(iter(loss.items())) + + out = getattr(sup3r.utilities.loss_metrics, loss, None) + if out is None: + out = getattr(tf.keras.losses, loss, None) + + if out is None: + msg = ( + 'Could not find requested loss function "{}" in ' + 'sup3r.utilities.loss_metrics or tf.keras.losses.'.format(loss) + ) + logger.error(msg) + raise KeyError(msg) + + return out(**kwargs)
+ + +
+[docs] + @staticmethod + def get_optimizer_config(optimizer): + """Get a config that defines the current model optimizer + + Parameters + ---------- + optimizer : tf.keras.optimizers.Optimizer + TF-Keras optimizer object (e.g., Adam) + + Returns + ------- + config : dict + Optimizer config + """ + conf = optimizer.get_config() + for k, v in conf.items(): + # need to convert numpy dtypes to float/int for json.dump() + if np.issubdtype(type(v), np.floating): + conf[k] = float(v) + elif np.issubdtype(type(v), np.integer): + conf[k] = int(v) + return conf
+ + +
+[docs] + @classmethod + def get_optimizer_state(cls, optimizer): + """Get a set of state variables for the optimizer + + Parameters + ---------- + optimizer : tf.keras.optimizers.Optimizer + TF-Keras optimizer object (e.g., Adam) + + Returns + ------- + state : dict + Optimizer state variables + """ + lr = cls.get_optimizer_config(optimizer)['learning_rate'] + state = {'learning_rate': lr} + for var in optimizer.variables: + name = var.name + var = var.numpy().flatten() + var = np.abs(var).mean() # collapse ndarrays into mean absolute + state[name] = float(var) + return state
+ + +
+[docs] + @staticmethod + def update_loss_details(loss_details, new_data, batch_len, prefix=None): + """Update a dictionary of loss_details with loss information from a new + batch. + + Parameters + ---------- + loss_details : dict + Namespace of the breakdown of loss components where each value is a + running average at the current state in the epoch. + new_data : dict + Namespace of the breakdown of loss components for a single new + batch. + batch_len : int + Length of the incoming batch. + prefix : None | str + Option to prefix the names of the loss data when saving to the + loss_details dictionary. + + Returns + ------- + loss_details : dict + Same as input loss_details but with running averages updated. + """ + assert 'n_obs' in loss_details, 'loss_details must have n_obs to start' + prior_n_obs = loss_details['n_obs'] + new_n_obs = prior_n_obs + batch_len + + for k, v in new_data.items(): + key = k if prefix is None else prefix + k + new_value = numpy_if_tensor(v) + + if key in loss_details: + saved_value = loss_details[key] + saved_value *= prior_n_obs + saved_value += batch_len * new_value + saved_value /= new_n_obs + loss_details[key] = saved_value + else: + loss_details[key] = new_value + + loss_details['n_obs'] = new_n_obs + + return loss_details
+ + +
+[docs] + @staticmethod + def log_loss_details(loss_details, level='INFO'): + """Log the loss details to the module logger. + + Parameters + ---------- + loss_details : dict + Namespace of the breakdown of loss components where each value is a + running average at the current state in the epoch. + level : str + Log level (e.g. INFO, DEBUG) + """ + for k, v in sorted(loss_details.items()): + if k != 'n_obs': + if isinstance(v, str): + msg_format = '\t{}: {}' + else: + msg_format = '\t{}: {:.2e}' + if level.lower() == 'info': + logger.info(msg_format.format(k, v)) + else: + logger.debug(msg_format.format(k, v))
+ + +
+[docs] + @staticmethod + def early_stop(history, column, threshold=0.005, n_epoch=5): + """Determine whether to stop training early based on nearly no change + to validation loss for a certain number of consecutive epochs. + + Parameters + ---------- + history : pd.DataFrame | None + Model training history + column : str + Column from the model training history to evaluate for early + termination. + threshold : float + The absolute relative fractional difference in validation loss + between subsequent epochs below which an early termination is + warranted. E.g. if val losses were 0.1 and 0.0998 the relative + diff would be calculated as 0.0002 / 0.1 = 0.002 which would be + less than the default thresold of 0.01 and would satisfy the + condition for early termination. + n_epoch : int + The number of consecutive epochs that satisfy the threshold that + warrants an early stop. + + Returns + ------- + stop : bool + Flag to stop training (True) or keep going (False). + """ + stop = False + + if history is not None and len(history) > n_epoch + 1: + diffs = np.abs(np.diff(history[column])) + if all(diffs[-n_epoch:] < threshold): + stop = True + logger.info( + 'Found early stop condition, loss values "{}" ' + 'have absolute relative differences less than ' + 'threshold {}: {}'.format( + column, threshold, diffs[-n_epoch:] + ) + ) + + return stop
+ + +
+[docs] + @abstractmethod + def save(self, out_dir): + """Save the model with its sub-networks to a directory. + + Parameters + ---------- + out_dir : str + Directory to save model files. This directory will be created + if it does not already exist. + """
+ + +
+[docs] + def finish_epoch( + self, + epoch, + epochs, + t0, + loss_details, + checkpoint_int, + out_dir, + early_stop_on, + early_stop_threshold, + early_stop_n_epoch, + extras=None, + ): + """Perform finishing checks after an epoch is done training + + Parameters + ---------- + epoch : int + Epoch number that is finishing + epochs : list + List of epochs being iterated through + t0 : float + Starting time of training. + loss_details : dict + Namespace of the breakdown of loss components + checkpoint_int : int | None + Epoch interval at which to save checkpoint models. + out_dir : str + Directory to save checkpoint models. Should have {epoch} in + the directory name. This directory will be created if it does not + already exist. + early_stop_on : str | None + If not None, this should be a column in the training history to + evaluate for early stopping (e.g. validation_loss_gen, + validation_loss_disc). If this value in this history decreases by + an absolute fractional relative difference of less than 0.01 for + more than 5 epochs in a row, the training will stop early. + early_stop_threshold : float + The absolute relative fractional difference in validation loss + between subsequent epochs below which an early termination is + warranted. E.g. if val losses were 0.1 and 0.0998 the relative + diff would be calculated as 0.0002 / 0.1 = 0.002 which would be + less than the default thresold of 0.01 and would satisfy the + condition for early termination. + early_stop_n_epoch : int + The number of consecutive epochs that satisfy the threshold that + warrants an early stop. + extras : dict | None + Extra kwargs/parameters to save in the epoch history. + + Returns + ------- + stop : bool + Flag to early stop training. + """ + self.log_loss_details(loss_details) + self._history.at[epoch, 'elapsed_time'] = time.time() - t0 + for key, value in loss_details.items(): + if key != 'n_obs': + self._history.at[epoch, key] = value + + last_epoch = epoch == epochs[-1] + chp = checkpoint_int is not None and (epoch % checkpoint_int) == 0 + if last_epoch or chp: + msg = ( + 'Model output dir for checkpoint models should have ' + f'{"{epoch}"} but did not: {out_dir}' + ) + assert '{epoch}' in out_dir, msg + self.save(out_dir.format(epoch=epoch)) + + stop = False + if early_stop_on is not None and early_stop_on in self._history: + stop = self.early_stop( + self._history, + early_stop_on, + threshold=early_stop_threshold, + n_epoch=early_stop_n_epoch, + ) + if stop: + self.save(out_dir.format(epoch=epoch)) + + if extras is not None: + for k, v in extras.items(): + self._history.at[epoch, k] = safe_cast(v) + + return stop
+ + +
+[docs] + def run_gradient_descent( + self, + low_res, + hi_res_true, + training_weights, + optimizer=None, + multi_gpu=False, + **calc_loss_kwargs, + ): + # pylint: disable=E0602 + """Run gradient descent for one mini-batch of (low_res, hi_res_true) + and update weights + + Parameters + ---------- + low_res : np.ndarray + Real low-resolution data in a 4D or 5D array: + (n_observations, spatial_1, spatial_2, features) + (n_observations, spatial_1, spatial_2, temporal, features) + hi_res_true : np.ndarray + Real high-resolution data in a 4D or 5D array: + (n_observations, spatial_1, spatial_2, features) + (n_observations, spatial_1, spatial_2, temporal, features) + training_weights : list + A list of layer weights that are to-be-trained based on the + current loss weight values. + optimizer : tf.keras.optimizers.Optimizer + Optimizer class to use to update weights. This can be different if + you're training just the generator or one of the discriminator + models. Defaults to the generator optimizer. + multi_gpu : bool + Flag to break up the batch for parallel gradient descent + calculations on multiple gpus. If True and multiple GPUs are + present, each batch from the batch_handler will be divided up + between the GPUs and the resulting gradient from each GPU will + constitute a single gradient descent step with the nominal learning + rate that the model was initialized with. + calc_loss_kwargs : dict + Kwargs to pass to the self.calc_loss() method + + Returns + ------- + loss_details : dict + Namespace of the breakdown of loss components + """ + self.timer.start() + if optimizer is None: + optimizer = self.optimizer + + if not multi_gpu or len(self.gpu_list) < 2: + grad, loss_details = self.get_single_grad( + low_res, + hi_res_true, + training_weights, + device_name=self.default_device, + **calc_loss_kwargs, + ) + optimizer.apply_gradients(zip(grad, training_weights)) + self.timer.stop() + logger.debug( + 'Finished single gradient descent step in %s', + self.timer.elapsed_str, + ) + else: + futures = [] + lr_chunks = np.array_split(low_res, len(self.gpu_list)) + hr_true_chunks = np.array_split(hi_res_true, len(self.gpu_list)) + split_mask = False + mask_chunks = None + if 'mask' in calc_loss_kwargs: + split_mask = True + mask_chunks = np.array_split( + calc_loss_kwargs['mask'], len(self.gpu_list) + ) + + with ThreadPoolExecutor(max_workers=len(self.gpu_list)) as exe: + for i in range(len(self.gpu_list)): + if split_mask: + calc_loss_kwargs['mask'] = mask_chunks[i] + futures.append( + exe.submit( + self.get_single_grad, + lr_chunks[i], + hr_true_chunks[i], + training_weights, + device_name=f'/gpu:{i}', + **calc_loss_kwargs, + ) + ) + for _, future in enumerate(futures): + grad, loss_details = future.result() + optimizer.apply_gradients(zip(grad, training_weights)) + + self.timer.stop() + logger.debug( + 'Finished %s gradient descent steps on %s GPUs in %s', + len(futures), + len(self.gpu_list), + self.timer.elapsed_str, + ) + return loss_details
+ + + def _reshape_norm_exo(self, hi_res, hi_res_exo, exo_name, norm_in=True): + """Reshape the hi_res_topo to match the hi_res tensor (if necessary) + and normalize (if requested). + + Parameters + ---------- + hi_res : ndarray + Synthetically generated high-resolution data, usually a 4D or 5D + array with shape: + (n_obs, spatial_1, spatial_2, n_features) + (n_obs, spatial_1, spatial_2, n_temporal, n_features) + hi_res_exo : np.ndarray + This should be a 4D array for spatial enhancement model or 5D array + for a spatiotemporal enhancement model (obs, spatial_1, spatial_2, + (temporal), features) corresponding to the high-resolution + spatial_1, spatial_2, temporal. This data will be input to the + custom phygnn Sup3rAdder or Sup3rConcat layer if found in the + generative network. This differs from the exogenous_data input in + that exogenous_data always matches the low-res input. For this + function, hi_res_exo can also be a 3D array (spatial_1, spatial_2, + 1). Note that this input gets normalized if norm_in=True. + exo_name : str + Name of feature corresponding to hi_res_exo data. + norm_in : bool + Flag to normalize low_res input data if the self._means, + self._stdevs attributes are available. The generator should always + received normalized data with mean=0 stdev=1. This also normalizes + hi_res_topo. + + Returns + ------- + hi_res_topo : np.ndarray + Same as input but reshaped to match hi_res (if necessary) and + normalized (if requested) + """ + if hi_res_exo is None: + return hi_res_exo + + if norm_in and self._means is not None: + hi_res_exo = ( + hi_res_exo.copy() - self._means[exo_name] + ) / self._stdevs[exo_name] + + if len(hi_res_exo.shape) == 3: + hi_res_exo = np.expand_dims(hi_res_exo, axis=0) + hi_res_exo = np.repeat(hi_res_exo, hi_res.shape[0], axis=0) + if len(hi_res_exo.shape) == 4 and len(hi_res.shape) == 5: + hi_res_exo = np.expand_dims(hi_res_exo, axis=3) + hi_res_exo = np.repeat(hi_res_exo, hi_res.shape[3], axis=3) + + if len(hi_res_exo.shape) != len(hi_res.shape): + msg = ( + 'hi_res and hi_res_exo arrays are not of the same rank: ' + '{} and {}'.format(hi_res.shape, hi_res_exo.shape) + ) + logger.error(msg) + raise RuntimeError(msg) + + return hi_res_exo + +
+[docs] + def generate( + self, low_res, norm_in=True, un_norm_out=True, exogenous_data=None + ): + """Use the generator model to generate high res data from low res + input. This is the public generate function. + + Parameters + ---------- + low_res : np.ndarray + Low-resolution input data, usually a 4D or 5D array of shape: + (n_obs, spatial_1, spatial_2, n_features) + (n_obs, spatial_1, spatial_2, n_temporal, n_features) + norm_in : bool + Flag to normalize low_res input data if the self._means, + self._stdevs attributes are available. The generator should always + received normalized data with mean=0 stdev=1. This also normalizes + hi_res_topo. + un_norm_out : bool + Flag to un-normalize synthetically generated output data to physical + units + exogenous_data : dict | ExoData | None + Special dictionary (class:`ExoData`) of exogenous feature data with + entries describing whether features should be combined at input, a + mid network layer, or with output. This doesn't have to include + the 'model' key since this data is for a single step model. + + Returns + ------- + hi_res : ndarray + Synthetically generated high-resolution data, usually a 4D or 5D + array with shape: + (n_obs, spatial_1, spatial_2, n_features) + (n_obs, spatial_1, spatial_2, n_temporal, n_features) + """ + if ( + not isinstance(exogenous_data, ExoData) + and exogenous_data is not None + ): + exogenous_data = ExoData(exogenous_data) + + low_res = self._combine_fwp_input(low_res, exogenous_data) + if norm_in and self._means is not None: + low_res = self.norm_input(low_res) + + hi_res = self.generator.layers[0](low_res) + layer_num = 1 + try: + for i, layer in enumerate(self.generator.layers[1:]): + layer_num = i + 1 + if isinstance(layer, (Sup3rAdder, Sup3rConcat)): + msg = ( + f'layer.name = {layer.name} does not match any ' + 'features in exogenous_data ' + f'({list(exogenous_data)})' + ) + assert layer.name in exogenous_data, msg + hi_res_exo = exogenous_data.get_combine_type_data( + layer.name, 'layer' + ) + hi_res_exo = self._reshape_norm_exo( + hi_res, hi_res_exo, layer.name, norm_in=norm_in + ) + hi_res = layer(hi_res, hi_res_exo) + else: + hi_res = layer(hi_res) + except Exception as e: + msg = 'Could not run layer #{} "{}" on tensor of shape {}'.format( + layer_num, layer, hi_res.shape + ) + logger.error(msg) + raise RuntimeError(msg) from e + + hi_res = hi_res.numpy() + + if un_norm_out and self._means is not None: + hi_res = self.un_norm_output(hi_res) + + return self._combine_fwp_output(hi_res, exogenous_data)
+ + + @tf.function + def _tf_generate(self, low_res, hi_res_exo=None): + """Use the generator model to generate high res data from low res input + + Parameters + ---------- + low_res : np.ndarray + Real low-resolution data. The generator should always + received normalized data with mean=0 stdev=1. + hi_res_exo : dict + Dictionary of exogenous_data with same resolution as high_res data + e.g. ``{'topography': np.array}`` + The arrays in this dictionary should be a 4D array for spatial + enhancement model or 5D array for a spatiotemporal enhancement + model ``(obs, spatial_1, spatial_2, (temporal), features)`` + corresponding to the high-resolution spatial_1 and spatial_2. This + data will be input to the custom phygnn Sup3rAdder or Sup3rConcat + layer if found in the generative network. This differs from the + exogenous_data input in that exogenous_data always matches the + low-res input. + + Returns + ------- + hi_res : tf.Tensor + Synthetically generated high-resolution data + """ + hi_res = self.generator.layers[0](low_res) + layer_num = 1 + try: + for i, layer in enumerate(self.generator.layers[1:]): + layer_num = i + 1 + if isinstance(layer, (Sup3rAdder, Sup3rConcat)): + msg = ( + f'layer.name = {layer.name} does not match any ' + f'features in exogenous_data ({list(hi_res_exo)})' + ) + assert layer.name in hi_res_exo, msg + hr_exo = hi_res_exo[layer.name] + hi_res = layer(hi_res, hr_exo) + else: + hi_res = layer(hi_res) + except Exception as e: + msg = 'Could not run layer #{} "{}" on tensor of shape {}'.format( + layer_num, layer, hi_res.shape + ) + logger.error(msg) + raise RuntimeError(msg) from e + + return hi_res + +
+[docs] + @tf.function + def get_single_grad( + self, + low_res, + hi_res_true, + training_weights, + device_name=None, + **calc_loss_kwargs, + ): + """Run gradient descent for one mini-batch of (low_res, hi_res_true), + do not update weights, just return gradient details. + + Parameters + ---------- + low_res : np.ndarray + Real low-resolution data in a 4D or 5D array: + (n_observations, spatial_1, spatial_2, features) + (n_observations, spatial_1, spatial_2, temporal, features) + hi_res_true : np.ndarray + Real high-resolution data in a 4D or 5D array: + (n_observations, spatial_1, spatial_2, features) + (n_observations, spatial_1, spatial_2, temporal, features) + training_weights : list + A list of layer weights that are to-be-trained based on the + current loss weight values. + device_name : None | str + Optional tensorflow device name for GPU placement. Note that if a + GPU is available, variables will be placed on that GPU even if + device_name=None. + calc_loss_kwargs : dict + Kwargs to pass to the self.calc_loss() method + + Returns + ------- + grad : list + a list or nested structure of Tensors (or IndexedSlices, or None, + or CompositeTensor) representing the gradients for the + training_weights + loss_details : dict + Namespace of the breakdown of loss components + """ + with tf.device(device_name), tf.GradientTape( + watch_accessed_variables=False + ) as tape: + tape.watch(training_weights) + hi_res_exo = self.get_high_res_exo_input(hi_res_true) + hi_res_gen = self._tf_generate(low_res, hi_res_exo) + loss_out = self.calc_loss( + hi_res_true, hi_res_gen, **calc_loss_kwargs + ) + loss, loss_details = loss_out + grad = tape.gradient(loss, training_weights) + return grad, loss_details
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/models/base.html b/_modules/sup3r/models/base.html new file mode 100644 index 000000000..c9a24e75b --- /dev/null +++ b/_modules/sup3r/models/base.html @@ -0,0 +1,1760 @@ + + + + + + + + + + sup3r.models.base — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.models.base

+"""Sup3r model software"""
+
+import copy
+import logging
+import os
+import pprint
+import time
+from warnings import warn
+
+import numpy as np
+import pandas as pd
+import tensorflow as tf
+
+from sup3r.preprocessing.utilities import get_class_kwargs
+from sup3r.utilities import VERSION_RECORD
+
+from .abstract import AbstractInterface, AbstractSingleModel
+from .utilities import get_optimizer_class
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class Sup3rGan(AbstractSingleModel, AbstractInterface): + """Basic sup3r GAN model.""" + + def __init__( + self, + gen_layers, + disc_layers, + loss='MeanSquaredError', + optimizer=None, + learning_rate=1e-4, + optimizer_disc=None, + learning_rate_disc=None, + history=None, + meta=None, + means=None, + stdevs=None, + default_device=None, + name=None, + ): + """ + Parameters + ---------- + gen_layers : list | str + Hidden layers input argument to phygnn.base.CustomNetwork for the + generative super resolving model. Can also be a str filepath to a + .json config file containing the input layers argument or a .pkl + for a saved pre-trained model. + disc_layers : list | str + Hidden layers input argument to phygnn.base.CustomNetwork for the + discriminative model (spatial or spatiotemporal discriminator). Can + also be a str filepath to a .json config file containing the input + layers argument or a .pkl for a saved pre-trained model. + loss : str | dict + Loss function class name from sup3r.utilities.loss_metrics + (prioritized) or tensorflow.keras.losses. Defaults to + tf.keras.losses.MeanSquaredError. This can be provided as a dict + with kwargs for loss functions with extra parameters. + e.g. {'SpatialExtremesLoss': {'weight': 0.5}} + optimizer : tf.keras.optimizers.Optimizer | dict | None | str + Instantiated tf.keras.optimizers object or a dict optimizer config + from tf.keras.optimizers.get_config(). None defaults to Adam. + learning_rate : float, optional + Optimizer learning rate. Not used if optimizer input arg is a + pre-initialized object or if optimizer input arg is a config dict. + optimizer_disc : tf.keras.optimizers.Optimizer | dict | None + Same as optimizer input, but if specified this makes a different + optimizer just for the discriminator network (spatial or + spatiotemporal disc). + learning_rate_disc : float, optional + Same as learning_rate input, but if specified this makes a + different learning_rate just for the discriminator network (spatial + or spatiotemporal disc). + history : pd.DataFrame | str | None + Model training history with "epoch" index, str pointing to a saved + history csv file with "epoch" as first column, or None for clean + history + meta : dict | None + Model meta data that describes how the model was created. + means : dict | None + Set of mean values for data normalization keyed by feature name. + Can be used to maintain a consistent normalization scheme between + transfer learning domains. + stdevs : dict | None + Set of stdev values for data normalization keyed by feature name. + Can be used to maintain a consistent normalization scheme between + transfer learning domains. + default_device : str | None + Option for default device placement of model weights. If None and a + single GPU exists, that GPU will be the default device. If None and + multiple GPUs exist, the CPU will be the default device (this was + tested as most efficient given the custom multi-gpu strategy + developed in self.run_gradient_descent()). Examples: "/gpu:0" or + "/cpu:0" + name : str | None + Optional name for the GAN. + """ + super().__init__() + + self.default_device = default_device + if self.default_device is None and len(self.gpu_list) >= 1: + self.default_device = '/gpu:0' + + self.name = name if name is not None else self.__class__.__name__ + self._meta = meta if meta is not None else {} + + self.loss_name = loss + self.loss_fun = self.get_loss_fun(loss) + + self._history = history + if isinstance(self._history, str): + self._history = pd.read_csv(self._history, index_col=0) + + optimizer_disc = optimizer_disc or copy.deepcopy(optimizer) + learning_rate_disc = learning_rate_disc or learning_rate + self._optimizer = self.init_optimizer(optimizer, learning_rate) + self._optimizer_disc = self.init_optimizer( + optimizer_disc, learning_rate_disc + ) + + self._gen = self.load_network(gen_layers, 'generator') + self._disc = self.load_network(disc_layers, 'discriminator') + + self._means = means + self._stdevs = stdevs + +
+[docs] + def save(self, out_dir): + """Save the GAN with its sub-networks to a directory. + + Parameters + ---------- + out_dir : str + Directory to save GAN model files. This directory will be created + if it does not already exist. + """ + + if not os.path.exists(out_dir): + os.makedirs(out_dir, exist_ok=True) + + fp_gen = os.path.join(out_dir, 'model_gen.pkl') + self.generator.save(fp_gen) + + fp_disc = os.path.join(out_dir, 'model_disc.pkl') + self.discriminator.save(fp_disc) + + fp_history = None + if isinstance(self.history, pd.DataFrame): + fp_history = os.path.join(out_dir, 'history.csv') + self.history.to_csv(fp_history) + + self.save_params(out_dir) + + logger.info('Saved GAN to disk in directory: {}'.format(out_dir))
+ + + @classmethod + def _load(cls, model_dir, verbose=True): + """Get gen, disc, and params for given model_dir. + + Parameters + ---------- + model_dir : str + Directory to load GAN model files from. + verbose : bool + Flag to log information about the loaded model. + + Returns + ------- + fp_gen : str + Path to generator model + fp_disc : str + Path to discriminator model + params : dict + Dictionary of model params to be used in model initialization + """ + if verbose: + logger.info( + 'Loading GAN from disk in directory: {}'.format(model_dir) + ) + msg = 'Active python environment versions: \n{}'.format( + pprint.pformat(VERSION_RECORD, indent=4) + ) + logger.info(msg) + + fp_gen = os.path.join(model_dir, 'model_gen.pkl') + fp_disc = os.path.join(model_dir, 'model_disc.pkl') + params = cls.load_saved_params(model_dir, verbose=verbose) + + return fp_gen, fp_disc, params + +
+[docs] + @classmethod + def load(cls, model_dir, verbose=True): + """Load the GAN with its sub-networks from a previously saved-to output + directory. + + Parameters + ---------- + model_dir : str + Directory to load GAN model files from. + verbose : bool + Flag to log information about the loaded model. + + Returns + ------- + out : BaseModel + Returns a pretrained gan model that was previously saved to out_dir + """ + fp_gen, fp_disc, params = cls._load(model_dir, verbose=verbose) + return cls(fp_gen, fp_disc, **params)
+ + + @property + def discriminator(self): + """Get the discriminator model. + + Returns + ------- + phygnn.base.CustomNetwork + """ + return self._disc + + @property + def discriminator_weights(self): + """Get a list of layer weights and bias terms for the discriminator + model. + + Returns + ------- + list + """ + return self.discriminator.weights + +
+[docs] + def discriminate(self, hi_res, norm_in=False): + """Run the discriminator model on a hi resolution input field. + + Parameters + ---------- + hi_res : np.ndarray + Real or fake high res data in a 4D or 5D tensor: + (n_obs, spatial_1, spatial_2, n_features) + (n_obs, spatial_1, spatial_2, n_temporal, n_features) + norm_in : bool + Flag to normalize low_res input data if the self._means, + self._stdevs attributes are available. The disc should always + received normalized data with mean=0 stdev=1. + + Returns + ------- + out : np.ndarray + Discriminator output logits + """ + + if isinstance(hi_res, tf.Tensor): + hi_res = hi_res.numpy() + + if norm_in and self._means is not None: + mean_arr = [self._means[fn] for fn in self.hr_out_features] + std_arr = [self._stdevs[fn] for fn in self.hr_out_features] + mean_arr = np.array(mean_arr, dtype=np.float32) + std_arr = np.array(std_arr, dtype=np.float32) + hi_res = hi_res if isinstance(hi_res, tf.Tensor) else hi_res.copy() + hi_res = (hi_res - mean_arr) / std_arr + + out = self.discriminator.layers[0](hi_res) + layer_num = 1 + try: + for i, layer in enumerate(self.discriminator.layers[1:]): + out = layer(out) + layer_num = i + 1 + except Exception as e: + msg = 'Could not run layer #{} "{}" on tensor of shape {}'.format( + layer_num, layer, out.shape + ) + logger.error(msg) + raise RuntimeError(msg) from e + + return out.numpy()
+ + + @tf.function + def _tf_discriminate(self, hi_res): + """Run the discriminator model on a hi resolution input field. + + Parameters + ---------- + hi_res : np.ndarray + Real or fake high res data in a 4D or 5D tensor: + (n_obs, spatial_1, spatial_2, n_features) + (n_obs, spatial_1, spatial_2, n_temporal, n_features) + This input should always be normalized with mean=0 and stdev=1 + + Returns + ------- + out : np.ndarray + Discriminator output logits + """ + out = self.discriminator.layers[0](hi_res) + layer_num = 1 + try: + for i, layer in enumerate(self.discriminator.layers[1:]): + layer_num = i + 1 + out = layer(out) + except Exception as e: + msg = 'Could not run layer #{} "{}" on tensor of shape {}'.format( + layer_num, layer, out.shape + ) + logger.error(msg) + raise RuntimeError(msg) from e + + return out + + @property + def optimizer_disc(self): + """Get the tensorflow optimizer to perform gradient descent + calculations for the discriminator network. + + Returns + ------- + tf.keras.optimizers.Optimizer + """ + return self._optimizer_disc + +
+[docs] + def update_optimizer(self, option='generator', **kwargs): + """Update optimizer by changing current configuration + + Parameters + ---------- + option : str + Which optimizer to update. Can be "generator", "discriminator", or + "all" + kwargs : dict + kwargs to use for optimizer configuration update + """ + + if 'gen' in option.lower() or 'all' in option.lower(): + conf = self.get_optimizer_config(self.optimizer) + conf.update(**kwargs) + optimizer_class = get_optimizer_class(conf) + self._optimizer = optimizer_class.from_config( + get_class_kwargs(optimizer_class, conf) + ) + + if 'disc' in option.lower() or 'all' in option.lower(): + conf = self.get_optimizer_config(self.optimizer_disc) + conf.update(**kwargs) + optimizer_class = get_optimizer_class(conf) + self._optimizer_disc = optimizer_class.from_config( + get_class_kwargs(optimizer_class, conf) + )
+ + + @property + def meta(self): + """Get meta data dictionary that defines how the model was created""" + + if 'class' not in self._meta: + self._meta['class'] = self.__class__.__name__ + + return self._meta + + @property + def model_params(self): + """ + Model parameters, used to save model to disc + + Returns + ------- + dict + """ + + means = self._means + stdevs = self._stdevs + if means is not None and stdevs is not None: + means = {k: float(v) for k, v in means.items()} + stdevs = {k: float(v) for k, v in stdevs.items()} + + return { + 'name': self.name, + 'loss': self.loss_name, + 'version_record': self.version_record, + 'optimizer': self.get_optimizer_config(self.optimizer), + 'optimizer_disc': self.get_optimizer_config(self.optimizer_disc), + 'means': means, + 'stdevs': stdevs, + 'meta': self.meta, + 'default_device': self.default_device, + } + + @property + def weights(self): + """Get a list of all the layer weights and bias terms for the + generator and discriminator networks + """ + return self.generator_weights + self.discriminator_weights + +
+[docs] + def init_weights(self, lr_shape, hr_shape, device=None): + """Initialize the generator and discriminator weights with device + placement. + + Parameters + ---------- + lr_shape : tuple + Shape of one batch of low res input data for sup3r resolution. Note + that the batch size (axis=0) must be included, but the actual batch + size doesnt really matter. + hr_shape : tuple + Shape of one batch of high res input data for sup3r resolution. + Note that the batch size (axis=0) must be included, but the actual + batch size doesnt really matter. + device : str | None + Option to place model weights on a device. If None, + self.default_device will be used. + """ + + if device is None: + device = self.default_device + + logger.info('Initializing model weights on device "{}"'.format(device)) + low_res = np.ones(lr_shape).astype(np.float32) + hi_res = np.ones(hr_shape).astype(np.float32) + + hr_exo_shape = hr_shape[:-1] + (1,) + hr_exo = np.ones(hr_exo_shape).astype(np.float32) + + with tf.device(device): + hr_exo_data = {} + for feature in self.hr_exo_features: + hr_exo_data[feature] = hr_exo + _ = self._tf_generate(low_res, hr_exo_data) + _ = self._tf_discriminate(hi_res)
+ + +
+[docs] + @staticmethod + def get_weight_update_fraction( + history, comparison_key, update_bounds=(0.5, 0.95), update_frac=0.0 + ): + """Get the factor by which to multiply previous adversarial loss + weight + + Parameters + ---------- + history : dict + Dictionary with information on how often discriminators + were trained during previous epoch. + comparison_key : str + history key to use for update check + update_bounds : tuple + Tuple specifying allowed range for history[comparison_key]. If + history[comparison_key] < update_bounds[0] then the weight will be + increased by (1 + update_frac). If history[comparison_key] > + update_bounds[1] then the weight will be decreased by 1 / (1 + + update_frac). + update_frac : float + Fraction by which to increase/decrease adversarial loss weight + + Returns + ------- + float + Factor by which to multiply old weight to get updated weight + """ + + val = history[comparison_key] + if isinstance(val, (list, tuple, np.ndarray)): + val = val[-1] + + if val < update_bounds[0]: + return 1 + update_frac + if val > update_bounds[1]: + return 1 / (1 + update_frac) + return 1
+ + +
+[docs] + @tf.function + def calc_loss_gen_content(self, hi_res_true, hi_res_gen): + """Calculate the content loss term for the generator model. + + Parameters + ---------- + hi_res_true : tf.Tensor + Ground truth high resolution spatiotemporal data. + hi_res_gen : tf.Tensor + Superresolved high resolution spatiotemporal data generated by the + generative model. + + Returns + ------- + loss_gen_s : tf.Tensor + 0D tensor generator model loss for the content loss comparing the + hi res ground truth to the hi res synthetically generated output. + """ + hi_res_gen = self._combine_loss_input(hi_res_true, hi_res_gen) + return self.loss_fun(hi_res_true, hi_res_gen)
+ + +
+[docs] + @staticmethod + @tf.function + def calc_loss_gen_advers(disc_out_gen): + """Calculate the adversarial component of the loss term for the + generator model. + + Parameters + ---------- + disc_out_gen : tf.Tensor + Raw discriminator outputs from the discriminator model + predicting only on hi_res_gen (not on hi_res_true). + + Returns + ------- + loss_gen_advers : tf.Tensor + 0D tensor generator model loss for the adversarial component of the + generator loss term. + """ + + # note that these have flipped labels from the discriminator + # loss because of the opposite optimization goal + loss_gen_advers = tf.nn.sigmoid_cross_entropy_with_logits( + logits=disc_out_gen, labels=tf.ones_like(disc_out_gen) + ) + return tf.reduce_mean(loss_gen_advers)
+ + +
+[docs] + @staticmethod + @tf.function + def calc_loss_disc(disc_out_true, disc_out_gen): + """Calculate the loss term for the discriminator model (either the + spatial or temporal discriminator). + + Parameters + ---------- + disc_out_true : tf.Tensor + Raw discriminator outputs from the discriminator model predicting + only on ground truth data hi_res_true (not on hi_res_gen). + disc_out_gen : tf.Tensor + Raw discriminator outputs from the discriminator model predicting + only on synthetic data hi_res_gen (not on hi_res_true). + + Returns + ------- + loss_disc : tf.Tensor + 0D tensor discriminator model loss for either the spatial or + temporal component of the super resolution generated output. + """ + + # note that these have flipped labels from the generator + # loss because of the opposite optimization goal + logits = tf.concat([disc_out_true, disc_out_gen], axis=0) + labels = tf.concat( + [tf.ones_like(disc_out_true), tf.zeros_like(disc_out_gen)], axis=0 + ) + + loss_disc = tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits, labels=labels + ) + return tf.reduce_mean(loss_disc)
+ + +
+[docs] + @tf.function + def calc_loss( + self, + hi_res_true, + hi_res_gen, + weight_gen_advers=0.001, + train_gen=True, + train_disc=False, + ): + """Calculate the GAN loss function using generated and true high + resolution data. + + Parameters + ---------- + hi_res_true : tf.Tensor + Ground truth high resolution spatiotemporal data. + hi_res_gen : tf.Tensor + Superresolved high resolution spatiotemporal data generated by the + generative model. + weight_gen_advers : float + Weight factor for the adversarial loss component of the generator + vs. the discriminator. + train_gen : bool + True if generator is being trained, then loss=loss_gen + train_disc : bool + True if disc is being trained, then loss=loss_disc + + Returns + ------- + loss : tf.Tensor + 0D tensor representing the loss value for the network being trained + (either generator or one of the discriminators) + loss_details : dict + Namespace of the breakdown of loss components + """ + hi_res_gen = self._combine_loss_input(hi_res_true, hi_res_gen) + + if hi_res_gen.shape != hi_res_true.shape: + msg = ( + 'The tensor shapes of the synthetic output {} and ' + 'true high res {} did not have matching shape! ' + 'Check the spatiotemporal enhancement multipliers in your ' + 'your model config and data handlers.'.format( + hi_res_gen.shape, hi_res_true.shape + ) + ) + logger.error(msg) + raise RuntimeError(msg) + + disc_out_true = self._tf_discriminate(hi_res_true) + disc_out_gen = self._tf_discriminate(hi_res_gen) + + loss_gen_content = self.calc_loss_gen_content(hi_res_true, hi_res_gen) + loss_gen_advers = self.calc_loss_gen_advers(disc_out_gen) + loss_gen = loss_gen_content + weight_gen_advers * loss_gen_advers + + loss_disc = self.calc_loss_disc(disc_out_true, disc_out_gen) + + loss = None + if train_gen: + loss = loss_gen + elif train_disc: + loss = loss_disc + + loss_details = { + 'loss_gen': loss_gen, + 'loss_gen_content': loss_gen_content, + 'loss_gen_advers': loss_gen_advers, + 'loss_disc': loss_disc, + } + + return loss, loss_details
+ + +
+[docs] + def calc_val_loss(self, batch_handler, weight_gen_advers, loss_details): + """Calculate the validation loss at the current state of model training + + Parameters + ---------- + batch_handler : sup3r.preprocessing.BatchHandler + BatchHandler object to iterate through + weight_gen_advers : float + Weight factor for the adversarial loss component of the generator + vs. the discriminator. + loss_details : dict + Namespace of the breakdown of loss components + + Returns + ------- + loss_details : dict + Same as input but now includes val_* loss info + """ + logger.debug('Starting end-of-epoch validation loss calculation...') + loss_details['n_obs'] = 0 + for val_batch in batch_handler.val_data: + val_exo_data = self.get_high_res_exo_input(val_batch.high_res) + high_res_gen = self._tf_generate(val_batch.low_res, val_exo_data) + _, v_loss_details = self.calc_loss( + val_batch.high_res, + high_res_gen, + weight_gen_advers=weight_gen_advers, + train_gen=False, + train_disc=False, + ) + + loss_details = self.update_loss_details( + loss_details, v_loss_details, len(val_batch), prefix='val_' + ) + return loss_details
+ + +
+[docs] + def train_epoch( + self, + batch_handler, + weight_gen_advers, + train_gen, + train_disc, + disc_loss_bounds, + multi_gpu=False, + ): + """Train the GAN for one epoch. + + Parameters + ---------- + batch_handler : sup3r.preprocessing.BatchHandler + BatchHandler object to iterate through + weight_gen_advers : float + Weight factor for the adversarial loss component of the generator + vs. the discriminator. + train_gen : bool + Flag whether to train the generator for this set of epochs + train_disc : bool + Flag whether to train the discriminator for this set of epochs + disc_loss_bounds : tuple + Lower and upper bounds for the discriminator loss outside of which + the discriminators will not train unless train_disc=True or + and train_gen=False. + multi_gpu : bool + Flag to break up the batch for parallel gradient descent + calculations on multiple gpus. If True and multiple GPUs are + present, each batch from the batch_handler will be divided up + between the GPUs and the resulting gradient from each GPU will + constitute a single gradient descent step with the nominal learning + rate that the model was initialized with. If true and multiple gpus + are found, default_device device should be set to /cpu:0 + + Returns + ------- + loss_details : dict + Namespace of the breakdown of loss components + """ + + disc_th_low = np.min(disc_loss_bounds) + disc_th_high = np.max(disc_loss_bounds) + loss_details = {'n_obs': 0, 'train_loss_disc': 0} + + only_gen = train_gen and not train_disc + only_disc = train_disc and not train_gen + + if self._write_tb_profile: + tf.summary.trace_on(graph=True, profiler=True) + + for ib, batch in enumerate(batch_handler): + trained_gen = False + trained_disc = False + b_loss_details = {} + loss_disc = loss_details['train_loss_disc'] + disc_too_good = loss_disc <= disc_th_low + disc_too_bad = (loss_disc > disc_th_high) and train_disc + gen_too_good = disc_too_bad + + if not self.generator_weights: + self.init_weights(batch.low_res.shape, batch.high_res.shape) + + if only_gen or (train_gen and not gen_too_good): + trained_gen = True + b_loss_details = self.timer(self.run_gradient_descent)( + batch.low_res, + batch.high_res, + self.generator_weights, + weight_gen_advers=weight_gen_advers, + optimizer=self.optimizer, + train_gen=True, + train_disc=False, + multi_gpu=multi_gpu, + ) + + if only_disc or (train_disc and not disc_too_good): + trained_disc = True + b_loss_details = self.timer(self.run_gradient_descent)( + batch.low_res, + batch.high_res, + self.discriminator_weights, + weight_gen_advers=weight_gen_advers, + optimizer=self.optimizer_disc, + train_gen=False, + train_disc=True, + multi_gpu=multi_gpu, + ) + + b_loss_details['gen_trained_frac'] = float(trained_gen) + b_loss_details['disc_trained_frac'] = float(trained_disc) + + self.dict_to_tensorboard(b_loss_details) + self.dict_to_tensorboard(self.timer.log) + + loss_details = self.update_loss_details( + loss_details, + b_loss_details, + batch_handler.batch_size, + prefix='train_', + ) + logger.debug( + 'Batch {} out of {} has epoch-average ' + '(gen / disc) loss of: ({:.2e} / {:.2e}). ' + 'Trained (gen / disc): ({} / {})'.format( + ib + 1, + len(batch_handler), + loss_details['train_loss_gen'], + loss_details['train_loss_disc'], + trained_gen, + trained_disc, + ) + ) + if all([not trained_gen, not trained_disc]): + msg = ( + 'For some reason none of the GAN networks trained ' + 'during batch {} out of {}!'.format(ib, len(batch_handler)) + ) + logger.warning(msg) + warn(msg) + self.total_batches += 1 + + loss_details['total_batches'] = int(self.total_batches) + self.profile_to_tensorboard('training_epoch') + return loss_details
+ + +
+[docs] + def update_adversarial_weights( + self, + history, + adaptive_update_fraction, + adaptive_update_bounds, + weight_gen_advers, + train_disc, + ): + """Update spatial / temporal adversarial loss weights based on training + fraction history. + + Parameters + ---------- + history : dict + Dictionary with information on how often discriminators + were trained during current and previous epochs. + adaptive_update_fraction : float + Amount by which to increase or decrease adversarial loss weights + for adaptive updates + adaptive_update_bounds : tuple + Tuple specifying allowed range for history[comparison_key]. If + history[comparison_key] < update_bounds[0] then the weight will be + increased by (1 + update_frac). If history[comparison_key] > + update_bounds[1] then the weight will be decreased by 1 / (1 + + update_frac). + weight_gen_advers : float + Weight factor for the adversarial loss component of the generator + vs. the discriminator. + train_disc : bool + Whether the discriminator was set to be trained during the + previous epoch + + Returns + ------- + weight_gen_advers : float + Updated weight factor for the adversarial loss component of the + generator vs. the discriminator. + """ + + if adaptive_update_fraction > 0: + update_frac = 1 + if train_disc: + update_frac = self.get_weight_update_fraction( + history, + 'train_disc_trained_frac', + update_frac=adaptive_update_fraction, + update_bounds=adaptive_update_bounds, + ) + weight_gen_advers *= update_frac + + if update_frac != 1: + logger.debug( + f'New discriminator weight: {weight_gen_advers:.4e}' + ) + + return weight_gen_advers
+ + +
+[docs] + @staticmethod + def check_batch_handler_attrs(batch_handler): + """Not all batch handlers have the following attributes. So we perform + some sanitation before sending to `set_model_params`""" + return { + k: getattr(batch_handler, k, None) + for k in [ + 'smoothing', + 'lr_features', + 'hr_exo_features', + 'hr_out_features', + 'smoothed_features', + ] + if hasattr(batch_handler, k) + }
+ + +
+[docs] + def train( + self, + batch_handler, + input_resolution, + n_epoch, + weight_gen_advers=0.001, + train_gen=True, + train_disc=True, + disc_loss_bounds=(0.45, 0.6), + checkpoint_int=None, + out_dir='./gan_{epoch}', + early_stop_on=None, + early_stop_threshold=0.005, + early_stop_n_epoch=5, + adaptive_update_bounds=(0.9, 0.99), + adaptive_update_fraction=0.0, + multi_gpu=False, + tensorboard_log=True, + tensorboard_profile=False, + ): + """Train the GAN model on real low res data and real high res data + + Parameters + ---------- + batch_handler : sup3r.preprocessing.BatchHandler + BatchHandler object to iterate through + input_resolution : dict + Dictionary specifying spatiotemporal input resolution. e.g. + {'temporal': '60min', 'spatial': '30km'} + n_epoch : int + Number of epochs to train on + weight_gen_advers : float + Weight factor for the adversarial loss component of the generator + vs. the discriminator. + train_gen : bool + Flag whether to train the generator for this set of epochs + train_disc : bool + Flag whether to train the discriminator for this set of epochs + disc_loss_bounds : tuple + Lower and upper bounds for the discriminator loss outside of which + the discriminator will not train unless train_disc=True and + train_gen=False. + checkpoint_int : int | None + Epoch interval at which to save checkpoint models. + out_dir : str + Directory to save checkpoint GAN models. Should have {epoch} in + the directory name. This directory will be created if it does not + already exist. + early_stop_on : str | None + If not None, this should be a column in the training history to + evaluate for early stopping (e.g. validation_loss_gen, + validation_loss_disc). If this value in this history decreases by + an absolute fractional relative difference of less than 0.01 for + more than 5 epochs in a row, the training will stop early. + early_stop_threshold : float + The absolute relative fractional difference in validation loss + between subsequent epochs below which an early termination is + warranted. E.g. if val losses were 0.1 and 0.0998 the relative + diff would be calculated as 0.0002 / 0.1 = 0.002 which would be + less than the default thresold of 0.01 and would satisfy the + condition for early termination. + early_stop_n_epoch : int + The number of consecutive epochs that satisfy the threshold that + warrants an early stop. + adaptive_update_bounds : tuple + Tuple specifying allowed range for loss_details[comparison_key]. If + history[comparison_key] < threshold_range[0] then the weight will + be increased by (1 + update_frac). If history[comparison_key] > + threshold_range[1] then the weight will be decreased by 1 / (1 + + update_frac). + adaptive_update_fraction : float + Amount by which to increase or decrease adversarial weights for + adaptive updates + multi_gpu : bool + Flag to break up the batch for parallel gradient descent + calculations on multiple gpus. If True and multiple GPUs are + present, each batch from the batch_handler will be divided up + between the GPUs and the resulting gradient from each GPU will + constitute a single gradient descent step with the nominal learning + rate that the model was initialized with. If true and multiple gpus + are found, default_device device should be set to /cpu:0 + tensorboard_log : bool + Whether to write log file for use with tensorboard. Log data can + be viewed with ``tensorboard --logdir <logdir>`` where ``<logdir>`` + is the parent directory of ``out_dir``, and pointing the browser to + the printed address. + tensorboard_profile : bool + Whether to export profiling information to tensorboard. This can + then be viewed in the tensorboard dashboard under the profile tab + + TODO: (1) args here are getting excessive. Might be time for some + refactoring. + (2) cal_val_loss should be done in a separate thread from train_epoch + so they can be done concurrently. This would be especially important + for batch handlers which require val data, like dc handlers. + (3) Would like an automatic way to exit the batch handler thread + instead of manually calling .stop() here. + """ + if tensorboard_log: + self._init_tensorboard_writer(out_dir) + if tensorboard_profile: + self._write_tb_profile = True + + self.set_norm_stats(batch_handler.means, batch_handler.stds) + params = self.check_batch_handler_attrs(batch_handler) + self.set_model_params( + input_resolution=input_resolution, + s_enhance=batch_handler.s_enhance, + t_enhance=batch_handler.t_enhance, + **params, + ) + + epochs = list(range(n_epoch)) + + if self._history is None: + self._history = pd.DataFrame(columns=['elapsed_time']) + self._history.index.name = 'epoch' + else: + epochs += self._history.index.values[-1] + 1 + + t0 = time.time() + logger.info( + 'Training model with adversarial weight: {} ' + 'for {} epochs starting at epoch {}'.format( + weight_gen_advers, n_epoch, epochs[0] + ) + ) + + for epoch in epochs: + loss_details = self.train_epoch( + batch_handler, + weight_gen_advers, + train_gen, + train_disc, + disc_loss_bounds, + multi_gpu=multi_gpu, + ) + train_n_obs = loss_details['n_obs'] + loss_details = self.calc_val_loss( + batch_handler, weight_gen_advers, loss_details + ) + val_n_obs = loss_details['n_obs'] + + msg = f'Epoch {epoch} of {epochs[-1]} ' + msg += 'gen/disc train loss: {:.2e}/{:.2e} '.format( + loss_details['train_loss_gen'], loss_details['train_loss_disc'] + ) + + check1 = 'val_loss_gen' in loss_details + check2 = 'val_loss_disc' in loss_details + if check1 and check2: + msg += 'gen/disc val loss: {:.2e}/{:.2e} '.format( + loss_details['val_loss_gen'], loss_details['val_loss_disc'] + ) + + logger.info(msg) + + extras = { + 'train_n_obs': train_n_obs, + 'val_n_obs': val_n_obs, + 'weight_gen_advers': weight_gen_advers, + 'disc_loss_bound_0': disc_loss_bounds[0], + 'disc_loss_bound_1': disc_loss_bounds[1], + } + + opt_g = self.get_optimizer_state(self.optimizer) + opt_d = self.get_optimizer_state(self.optimizer_disc) + opt_g = {f'OptmGen/{key}': val for key, val in opt_g.items()} + opt_d = {f'OptmDisc/{key}': val for key, val in opt_d.items()} + extras.update(opt_g) + extras.update(opt_d) + + weight_gen_advers = self.update_adversarial_weights( + loss_details, + adaptive_update_fraction, + adaptive_update_bounds, + weight_gen_advers, + train_disc, + ) + + stop = self.finish_epoch( + epoch, + epochs, + t0, + loss_details, + checkpoint_int, + out_dir, + early_stop_on, + early_stop_threshold, + early_stop_n_epoch, + extras=extras, + ) + if stop: + break + + batch_handler.stop()
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/models/conditional.html b/_modules/sup3r/models/conditional.html new file mode 100644 index 000000000..806615107 --- /dev/null +++ b/_modules/sup3r/models/conditional.html @@ -0,0 +1,1175 @@ + + + + + + + + + + sup3r.models.conditional — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.models.conditional

+"""Sup3r conditional moment model software"""
+
+import logging
+import os
+import pprint
+import time
+
+import numpy as np
+import pandas as pd
+import tensorflow as tf
+from tensorflow.keras import optimizers
+
+from sup3r.utilities import VERSION_RECORD
+
+from .abstract import AbstractInterface, AbstractSingleModel
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class Sup3rCondMom(AbstractSingleModel, AbstractInterface): + """Basic Sup3r conditional moments model.""" + + def __init__( + self, + gen_layers, + optimizer=None, + learning_rate=1e-4, + num_par=None, + history=None, + meta=None, + means=None, + stdevs=None, + default_device=None, + name=None, + ): + """ + Parameters + ---------- + gen_layers : list | str + Hidden layers input argument to phygnn.base.CustomNetwork for the + generative super resolving model. Can also be a str filepath to a + .json config file containing the input layers argument or a .pkl + for a saved pre-trained model. + optimizer : tf.keras.optimizers.Optimizer | dict | None | str + Instantiated tf.keras.optimizers object or a dict optimizer config + from tf.keras.optimizers.get_config(). None defaults to Adam. + learning_rate : float, optional + Optimizer learning rate. Not used if optimizer input arg is a + pre-initialized object or if optimizer input arg is a config dict. + num_par : int | None + Number of trainable parameters in the model + history : pd.DataFrame | str | None + Model training history with "epoch" index, str pointing to a saved + history csv file with "epoch" as first column, or None for clean + history + meta : dict | None + Model meta data that describes how the model was created. + means : dict | None + Set of mean values for data normalization keyed by feature name. + Can be used to maintain a consistent normalization scheme between + transfer learning domains. + stdevs : dict | None + Set of stdev values for data normalization keyed by feature name. + Can be used to maintain a consistent normalization scheme between + transfer learning domains. + default_device : str | None + Option for default device placement of model weights. If None and a + single GPU exists, that GPU will be the default device. If None and + multiple GPUs exist, the CPU will be the default device (this was + tested as most efficient given the custom multi-gpu strategy + developed in self.run_gradient_descent()) + name : str | None + Optional name for the model. + """ + super().__init__() + + self.default_device = default_device + if self.default_device is None and len(self.gpu_list) == 1: + self.default_device = '/gpu:0' + elif self.default_device is None and len(self.gpu_list) > 1: + self.default_device = '/cpu:0' + + self.name = name if name is not None else self.__class__.__name__ + self._meta = meta if meta is not None else {} + self._num_par = num_par if num_par is not None else 0 + self.loss_name = 'MeanSquaredError' + self.loss_fun = self.get_loss_fun(self.loss_name) + + self._history = history + if isinstance(self._history, str): + self._history = pd.read_csv(self._history, index_col=0) + + self._optimizer = self.init_optimizer(optimizer, learning_rate) + + self._gen = self.load_network(gen_layers, 'generator') + + self._means = means + self._stdevs = stdevs + +
+[docs] + def save(self, out_dir): + """Save the model with its sub-networks to a directory. + + Parameters + ---------- + out_dir : str + Directory to save model files. This directory will be created + if it does not already exist. + """ + + if not os.path.exists(out_dir): + os.makedirs(out_dir, exist_ok=True) + + fp_gen = os.path.join(out_dir, 'model_gen.pkl') + self.generator.save(fp_gen) + + fp_history = None + if isinstance(self.history, pd.DataFrame): + fp_history = os.path.join(out_dir, 'history.csv') + self.history.to_csv(fp_history) + + self.save_params(out_dir) + + logger.info('Saved model to disk in directory: {}'.format(out_dir))
+ + +
+[docs] + @classmethod + def load(cls, model_dir, verbose=True): + """Load the model with its sub-networks from a previously saved-to + output directory. + + Parameters + ---------- + model_dir : str + Directory to load model files from. + verbose : bool + Flag to log information about the loaded model. + + Returns + ------- + out : BaseModel + Returns a pretrained gan model that was previously saved to out_dir + """ + if verbose: + logger.info( + 'Loading model from disk in directory: {}'.format(model_dir) + ) + msg = 'Active python environment versions: \n{}'.format( + pprint.pformat(VERSION_RECORD, indent=4) + ) + logger.info(msg) + + fp_gen = os.path.join(model_dir, 'model_gen.pkl') + params = cls.load_saved_params(model_dir, verbose=verbose) + return cls(fp_gen, **params)
+ + +
+[docs] + def update_optimizer(self, **kwargs): + """Update optimizer by changing current configuration + + Parameters + ---------- + kwargs : dict + kwargs to use for optimizer configuration update + """ + + conf = self.get_optimizer_config(self.optimizer) + conf.update(**kwargs) + OptimizerClass = getattr(optimizers, conf['name']) + self._optimizer = OptimizerClass.from_config(conf)
+ + + @property + def meta(self): + """Get meta data dictionary that defines how the model was created""" + + if 'class' not in self._meta: + self._meta['class'] = self.__class__.__name__ + + return self._meta + + @property + def model_params(self): + """ + Model parameters, used to save model to disc + + Returns + ------- + dict + """ + + config_optm_g = self.get_optimizer_config(self.optimizer) + + num_par = int( + np.sum([np.prod(v.get_shape().as_list()) for v in self.weights]) + ) + + means = self._means + stdevs = self._stdevs + if means is not None and stdevs is not None: + means = {k: float(v) for k, v in means.items()} + stdevs = {k: float(v) for k, v in stdevs.items()} + + model_params = { + 'name': self.name, + 'num_par': num_par, + 'version_record': self.version_record, + 'optimizer': config_optm_g, + 'means': means, + 'stdevs': stdevs, + 'meta': self.meta, + } + + return model_params + + @property + def weights(self): + """Get a list of all the layer weights and bias terms for the + generator network + """ + return self.generator_weights + +
+[docs] + @tf.function + def calc_loss_cond_mom(self, output_true, output_gen, mask): + """Calculate the loss of the moment predictor + + Parameters + ---------- + output_true : tf.Tensor + True realization output + output_gen : tf.Tensor + Predicted realization output + mask : tf.Tensor + Mask to apply + + Returns + ------- + loss : tf.Tensor + 0D tensor generator model loss for the MSE loss of the + moment predictor + """ + + loss = self.loss_fun(output_true * mask, output_gen * mask) + + return loss
+ + +
+[docs] + def calc_loss(self, output_true, output_gen, mask): + """Calculate the total moment predictor loss + + Parameters + ---------- + output_true : tf.Tensor + True realization output + output_gen : tf.Tensor + Predicted realization output + mask : tf.Tensor + Mask to apply + + Returns + ------- + loss : tf.Tensor + 0D tensor representing the loss value for the + moment predictor + loss_details : dict + Namespace of the breakdown of loss components + """ + output_gen = self._combine_loss_input(output_true, output_gen) + + if output_gen.shape != output_true.shape: + msg = ( + 'The tensor shapes of the synthetic output {} and ' + 'true output {} did not have matching shape! ' + 'Check the spatiotemporal enhancement multipliers in your ' + 'your model config and data handlers.'.format( + output_gen.shape, output_true.shape + ) + ) + logger.error(msg) + raise RuntimeError(msg) + + loss = self.calc_loss_cond_mom(output_true, output_gen, mask) + + loss_details = {'loss_gen': loss} + + return loss, loss_details
+ + +
+[docs] + def calc_val_loss(self, batch_handler, loss_details): + """Calculate the validation loss at the current state of model training + + Parameters + ---------- + batch_handler : sup3r.preprocessing.BatchHandler + BatchHandler object to iterate through + loss_details : dict + Namespace of the breakdown of loss components + + Returns + ------- + loss_details : dict + Same as input but now includes val_* loss info + """ + logger.debug('Starting end-of-epoch validation loss calculation...') + loss_details['n_obs'] = 0 + for val_batch in batch_handler.val_data: + val_exo_data = self.get_high_res_exo_input(val_batch.high_res) + output_gen = self._tf_generate(val_batch.low_res, val_exo_data) + _, v_loss_details = self.calc_loss( + val_batch.output, output_gen, val_batch.mask + ) + + loss_details = self.update_loss_details( + loss_details, v_loss_details, len(val_batch), prefix='val_' + ) + + return loss_details
+ + +
+[docs] + def train_epoch(self, batch_handler, multi_gpu=False): + """Train the model for one epoch. + + Parameters + ---------- + batch_handler : sup3r.preprocessing.BatchHandler + BatchHandler object to iterate through + multi_gpu : bool + Flag to break up the batch for parallel gradient descent + calculations on multiple gpus. If True and multiple GPUs are + present, each batch from the batch_handler will be divided up + between the GPUs and the resulting gradient from each GPU will + constitute a single gradient descent step with the nominal learning + rate that the model was initialized with. + + Returns + ------- + loss_details : dict + Namespace of the breakdown of loss components + """ + + loss_details = {'n_obs': 0} + + for ib, batch in enumerate(batch_handler): + b_loss_details = {} + b_loss_details = self.run_gradient_descent( + batch.low_res, + batch.output, + self.generator_weights, + optimizer=self.optimizer, + multi_gpu=multi_gpu, + mask=batch.mask, + ) + + loss_details = self.update_loss_details( + loss_details, + b_loss_details, + batch_handler.batch_size, + prefix='train_', + ) + + logger.debug( + 'Batch {} out of {} has epoch-average ' + 'gen loss of: {:.2e}. '.format( + ib, len(batch_handler), loss_details['train_loss_gen'] + ) + ) + + return loss_details
+ + +
+[docs] + def train( + self, + batch_handler, + input_resolution, + n_epoch, + checkpoint_int=None, + out_dir='./condMom_{epoch}', + early_stop_on=None, + early_stop_threshold=0.005, + early_stop_n_epoch=5, + multi_gpu=False, + tensorboard_log=True, + ): + """Train the model on real low res data and real high res data + + Parameters + ---------- + batch_handler : sup3r.preprocessing.BatchHandler + BatchHandler object to iterate through + input_resolution : dict + Dictionary specifying spatiotemporal input resolution. e.g. + {'temporal': '60min', 'spatial': '30km'} + n_epoch : int + Number of epochs to train on + checkpoint_int : int | None + Epoch interval at which to save checkpoint models. + out_dir : str + Directory to save checkpoint models. Should have {epoch} in + the directory name. This directory will be created if it does not + already exist. + early_stop_on : str | None + If not None, this should be a column in the training history to + evaluate for early stopping (e.g. validation_loss_gen). + If this value in this history decreases by + an absolute fractional relative difference of less than 0.01 for + more than 5 epochs in a row, the training will stop early. + early_stop_threshold : float + The absolute relative fractional difference in validation loss + between subsequent epochs below which an early termination is + warranted. E.g. if val losses were 0.1 and 0.0998 the relative + diff would be calculated as 0.0002 / 0.1 = 0.002 which would be + less than the default thresold of 0.01 and would satisfy the + condition for early termination. + early_stop_n_epoch : int + The number of consecutive epochs that satisfy the threshold that + warrants an early stop. + multi_gpu : bool + Flag to break up the batch for parallel gradient descent + calculations on multiple gpus. If True and multiple GPUs are + present, each batch from the batch_handler will be divided up + between the GPUs and the resulting gradient from each GPU will + constitute a single gradient descent step with the nominal learning + rate that the model was initialized with. + tensorboard_log : bool + Whether to write log file for use with tensorboard. Log data can + be viewed with ``tensorboard --logdir <logdir>`` where ``<logdir>`` + is the parent directory of ``out_dir``, and pointing the browser to + the printed address. + """ + if tensorboard_log: + self._init_tensorboard_writer(out_dir) + + self.set_norm_stats(batch_handler.means, batch_handler.stds) + self.set_model_params( + input_resolution=input_resolution, + s_enhance=batch_handler.s_enhance, + t_enhance=batch_handler.t_enhance, + smoothing=batch_handler.smoothing, + lr_features=batch_handler.lr_features, + hr_exo_features=batch_handler.hr_exo_features, + hr_out_features=batch_handler.hr_out_features, + smoothed_features=batch_handler.smoothed_features, + ) + + epochs = list(range(n_epoch)) + + if self._history is None: + self._history = pd.DataFrame(columns=['elapsed_time']) + self._history.index.name = 'epoch' + else: + epochs += self._history.index.values[-1] + 1 + + t0 = time.time() + logger.info( + 'Training model ' 'for {} epochs starting at epoch {}'.format( + n_epoch, epochs[0] + ) + ) + + for epoch in epochs: + loss_details = self.train_epoch(batch_handler, multi_gpu=multi_gpu) + + loss_details = self.calc_val_loss(batch_handler, loss_details) + + msg = f'Epoch {epoch} of {epochs[-1]} ' + msg += 'gen train loss: {:.2e} '.format( + loss_details['train_loss_gen'] + ) + + if all(loss in loss_details for loss in ['val_loss_gen']): + msg += 'gen val loss: {:.2e} '.format( + loss_details['val_loss_gen'] + ) + + logger.info(msg) + + lr_g = self.get_optimizer_config(self.optimizer)['learning_rate'] + + extras = {'learning_rate_gen': lr_g} + + stop = self.finish_epoch( + epoch, + epochs, + t0, + loss_details, + checkpoint_int, + out_dir, + early_stop_on, + early_stop_threshold, + early_stop_n_epoch, + extras=extras, + ) + + if stop: + break + batch_handler.stop()
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/models/dc.html b/_modules/sup3r/models/dc.html new file mode 100644 index 000000000..9533a8066 --- /dev/null +++ b/_modules/sup3r/models/dc.html @@ -0,0 +1,814 @@ + + + + + + + + + + sup3r.models.dc — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.models.dc

+"""Sup3r data-centric model software"""
+
+import logging
+
+import numpy as np
+
+from .base import Sup3rGan
+
+np.set_printoptions(precision=3)
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class Sup3rGanDC(Sup3rGan): + """Data-centric model using loss across time bins to select training + observations""" + +
+[docs] + def calc_val_loss_gen(self, batch_handler, weight_gen_advers): + """Calculate the validation total loss across the validation + samples. e.g. If the sample domain has 100 steps and the + validation set has 10 bins then this will get a list of losses across + step 0 to 10, 10 to 20, etc. Use this to determine performance + within bins and to update how observations are selected from these + bins. + + Parameters + ---------- + batch_handler : sup3r.preprocessing.BatchHandlerDC + BatchHandler object to iterate through + weight_gen_advers : float + Weight factor for the adversarial loss component of the generator + vs. the discriminator. + + Returns + ------- + array + Array of total losses for all sample bins, with shape + (n_space_bins, n_time_bins) + """ + losses = np.zeros( + (batch_handler.n_space_bins, batch_handler.n_time_bins), + dtype=np.float32, + ) + for i, batch in enumerate(batch_handler.val_data): + exo_data = self.get_high_res_exo_input(batch.high_res) + loss, _ = self.calc_loss( + hi_res_true=batch.high_res, + hi_res_gen=self._tf_generate(batch.low_res, exo_data), + weight_gen_advers=weight_gen_advers, + train_gen=True, + train_disc=True, + ) + row = i // batch_handler.n_time_bins + col = i % batch_handler.n_time_bins + losses[row, col] = loss + return losses
+ + +
+[docs] + def calc_val_loss_gen_content(self, batch_handler): + """Calculate the validation content loss across the validation + samples. e.g. If the sample domain has 100 steps and the + validation set has 10 bins then this will get a list of losses across + step 0 to 10, 10 to 20, etc. Use this to determine performance + within bins and to update how observations are selected from these + bins. + + Parameters + ---------- + batch_handler : sup3r.preprocessing.BatchHandlerDC + BatchHandler object to iterate through + + Returns + ------- + list + List of content losses for all sample bins + """ + losses = np.zeros( + (batch_handler.n_space_bins, batch_handler.n_time_bins), + dtype=np.float32, + ) + for i, batch in enumerate(batch_handler.val_data): + exo_data = self.get_high_res_exo_input(batch.high_res) + loss = self.calc_loss_gen_content( + hi_res_true=batch.high_res, + hi_res_gen=self._tf_generate(batch.low_res, exo_data), + ) + row = i // batch_handler.n_time_bins + col = i % batch_handler.n_time_bins + losses[row, col] = loss + return losses
+ + +
+[docs] + def calc_val_loss(self, batch_handler, weight_gen_advers, loss_details): + """Overloading the base calc_val_loss method. Method updates the + temporal weights for the batch handler based on the losses across the + time bins + + Parameters + ---------- + batch_handler : sup3r.preprocessing.BatchHandler + BatchHandler object to iterate through + weight_gen_advers : float + Weight factor for the adversarial loss component of the generator + vs. the discriminator. + loss_details : dict + Namespace of the breakdown of loss components where each value is a + running average at the current state in the epoch. + + Returns + ------- + dict + Updated loss_details with mean validation loss calculated using + the validation samples across the time bins + """ + total_losses = self.calc_val_loss_gen(batch_handler, weight_gen_advers) + content_losses = self.calc_val_loss_gen_content(batch_handler) + + t_weights = total_losses.mean(axis=0) + t_weights /= t_weights.sum() + + s_weights = total_losses.mean(axis=1) + s_weights /= s_weights.sum() + + logger.debug( + f'Previous spatial weights: {batch_handler.spatial_weights}' + ) + logger.debug( + f'Previous temporal weights: {batch_handler.temporal_weights}' + ) + batch_handler.update_weights( + spatial_weights=s_weights, temporal_weights=t_weights + ) + logger.debug( + 'New spatiotemporal weights (space, time):\n' + f'{total_losses / total_losses.sum()}' + ) + logger.debug(f'New spatial weights: {s_weights}') + logger.debug(f'New temporal weights: {t_weights}') + + loss_details['mean_val_loss_gen'] = round(np.mean(total_losses), 3) + loss_details['mean_val_loss_gen_content'] = round( + np.mean(content_losses), 3 + ) + return loss_details
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/models/linear.html b/_modules/sup3r/models/linear.html new file mode 100644 index 000000000..f83f1bb65 --- /dev/null +++ b/_modules/sup3r/models/linear.html @@ -0,0 +1,843 @@ + + + + + + + + + + sup3r.models.linear — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.models.linear

+"""Simple models for super resolution such as linear interp models."""
+import json
+import logging
+import os
+from inspect import signature
+
+import numpy as np
+
+from .abstract import AbstractInterface
+from .utilities import st_interp
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class LinearInterp(AbstractInterface): + """Simple model to do linear interpolation on the spatial and temporal axes + """ + + def __init__(self, lr_features, s_enhance, t_enhance, t_centered=False, + input_resolution=None): + """ + Parameters + ---------- + lr_features : list + List of feature names that this model will operate on for both + input and output. This must match the feature axis ordering in the + array input to generate(). + s_enhance : int + Integer factor by which the spatial axes is to be enhanced. + t_enhance : int + Integer factor by which the temporal axes is to be enhanced. + t_centered : bool + Flag to switch time axis from time-beginning (Default, e.g. + interpolate 00:00 01:00 to 00:00 00:30 01:00 01:30) to + time-centered (e.g. interp 01:00 02:00 to 00:45 01:15 01:45 02:15) + input_resolution : dict | None + Resolution of the input data. e.g. {'spatial': '30km', 'temporal': + '60min'}. This is used to determine how to aggregate + high-resolution topography data. + """ + + self._lr_features = lr_features + self._s_enhance = s_enhance + self._t_enhance = t_enhance + self._t_centered = t_centered + self._input_resolution = input_resolution + +
+[docs] + @classmethod + def load(cls, model_dir, verbose=False): + """Load the LinearInterp model with its params saved to the model_dir + created with LinearInterp.save(model_dir) + + Parameters + ---------- + model_dir : str + Directory to load LinearInterp model files from. Must + have a model_params.json file containing "meta" key with all of the + class init args. + verbose : bool + Flag to log information about the loaded model. + + Returns + ------- + out : LinearInterp + Returns an initialized LinearInterp model + """ + fp_params = os.path.join(model_dir, 'model_params.json') + assert os.path.exists(fp_params), f'Could not find: {fp_params}' + with open(fp_params) as f: + params = json.load(f) + + meta = params['meta'] + args = signature(cls.__init__).parameters + kwargs = {k: v for k, v in meta.items() if k in args} + model = cls(**kwargs) + + if verbose: + logger.info('Loading LinearInterp with meta data: {}' + .format(model.meta)) + + return model
+ + + @property + def meta(self): + """Get meta data dictionary that defines the model params""" + return {'input_resolution': self._input_resolution, + 'lr_features': self._lr_features, + 's_enhance': self._s_enhance, + 't_enhance': self._t_enhance, + 't_centered': self._t_centered, + 'hr_out_features': self.hr_out_features, + 'class': self.__class__.__name__, + } + + @property + def lr_features(self): + """Get the list of input feature names that the generative model was + trained on. + """ + return self._lr_features + + @property + def hr_out_features(self): + """Get the list of output feature names that the generative model + outputs""" + return self._lr_features + + @property + def hr_exo_features(self): + """Returns an empty list for LinearInterp model""" + return [] + +
+[docs] + def save(self, out_dir): + """ + Parameters + ---------- + out_dir : str + Directory to save linear model params. This directory will be + created if it does not already exist. + """ + self.save_params(out_dir)
+ + + # pylint: disable=unused-argument +
+[docs] + def generate(self, low_res, norm_in=False, un_norm_out=False, + exogenous_data=None): + """Use the generator model to generate high res data from low res + input. This is the public generate function. + + Parameters + ---------- + low_res : np.ndarray + Low-resolution spatiotemporal input data, a 5D array of shape: + (n_obs, spatial_1, spatial_2, temporal, n_features) + norm_in : bool + This doesnt do anything for this LinearInterp, but is + kept to keep the same interface as Sup3rGan + un_norm_out : bool + This doesnt do anything for this LinearInterp, but is + kept to keep the same interface as Sup3rGan + exogenous_data : list + This doesnt do anything for this LinearInterp, but is + kept to keep the same interface as Sup3rGan + + Returns + ------- + hi_res : ndarray + high-resolution spatial output data, a 5D array of shape: + (n_obs, spatial_1, spatial_2, temporal, n_features) + """ + + hr_shape = (len(low_res), + int(low_res.shape[1] * self._s_enhance), + int(low_res.shape[2] * self._s_enhance), + int(low_res.shape[3] * self._t_enhance), + len(self.hr_out_features)) + logger.debug('LinearInterp model with s_enhance of {} ' + 'and t_enhance of {} ' + 'downscaling low-res shape {} to high-res shape {}' + .format(self._s_enhance, self._t_enhance, + low_res.shape, hr_shape)) + + hi_res = np.zeros(hr_shape, dtype=np.float32) + + for iobs in range(len(low_res)): + for idf in range(low_res.shape[-1]): + hi_res[iobs, ..., idf] = st_interp(low_res[iobs, ..., idf], + self.s_enhance, + self.t_enhance, + t_centered=self._t_centered) + + return hi_res
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/models/multi_step.html b/_modules/sup3r/models/multi_step.html new file mode 100644 index 000000000..dbe692e8a --- /dev/null +++ b/_modules/sup3r/models/multi_step.html @@ -0,0 +1,1577 @@ + + + + + + + + + + sup3r.models.multi_step — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.models.multi_step

+"""Sup3r multi step model frameworks
+
+TODO: SolarMultiStepGan can be cleaned up a little with the output padding and
+t_enhance argument moved to SolarCC.
+"""
+
+import json
+import logging
+import os
+
+import numpy as np
+
+# pylint: disable=cyclic-import
+import sup3r.models
+from sup3r.preprocessing.data_handlers import ExoData
+
+from .abstract import AbstractInterface
+from .base import Sup3rGan
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class MultiStepGan(AbstractInterface): + """Multi-Step GAN, which is really just an abstraction layer on top of one + or more Sup3rGan models that will perform their forward passes in + serial.""" + + def __init__(self, models): + """ + Parameters + ---------- + models : list | tuple + An ordered list/tuple of one or more trained Sup3rGan models + """ + self._models = tuple(models) + + def __len__(self): + """Get number of model steps""" + return len(self._models) + +
+[docs] + @classmethod + def load(cls, model_dirs, model_kwargs=None, verbose=True): + """Load the GANs with its sub-networks from a previously saved-to + output directory. + + Parameters + ---------- + model_dirs : list | tuple + An ordered list/tuple of one or more directories containing trained + + saved Sup3rGan models created using the Sup3rGan.save() method. + model_kwargs : list | tuple + An ordered list/tuple of one or more dictionaries containing kwargs + for the corresponding model in model_dirs + verbose : bool + Flag to log information about the loaded model. + + Returns + ------- + out : MultiStepGan + Returns a pretrained gan model that was previously saved to + model_dirs + """ + + models = [] + if isinstance(model_dirs, str): + model_dirs = [model_dirs] + + model_kwargs = model_kwargs or [{}] * len(model_dirs) + if isinstance(model_kwargs, dict): + model_kwargs = [model_kwargs] + + for model_dir, kwargs in zip(model_dirs, model_kwargs): + fp_params = os.path.join(model_dir, 'model_params.json') + assert os.path.exists(fp_params), f'Could not find: {fp_params}' + with open(fp_params) as f: + params = json.load(f) + + meta = params.get('meta', {'class': 'Sup3rGan'}) + class_name = meta.get('class', 'Sup3rGan') + Sup3rClass = getattr(sup3r.models, class_name) + models.append( + Sup3rClass.load(model_dir, verbose=verbose, **kwargs) + ) + + return cls(models)
+ + + @property + def models(self): + """Get an ordered tuple of the Sup3rGan models that are part of this + MultiStepGan + """ + return self._models + + @property + def means(self): + """Get the data normalization mean values. This is a tuple of means + from all models. + + Returns + ------- + tuple | np.ndarray + """ + return tuple(model.means for model in self.models) + + @property + def stdevs(self): + """Get the data normalization standard deviation values. This is a + tuple of stdevs from all models. + + Returns + ------- + tuple | np.ndarray + """ + return tuple(model.stdevs for model in self.models) + +
+[docs] + @staticmethod + def seed(s=0): + """ + Set the random seed for reproducible results. + + Parameters + ---------- + s : int + Random seed + """ + Sup3rGan.seed(s=s)
+ + + def _transpose_model_input(self, model, hi_res): + """Transpose input data according to mdel input dimensions. + + Note + ---- + If hi_res.shape == 4, it is assumed that the dimensions have the + ordering (n_obs, spatial_1, spatial_2, features) + + If hi_res.shape == 5, it is assumed that the dimensions have the + ordering (1, spatial_1, spatial_2, temporal, features) + + Parameters + ---------- + model : Sup3rGan + A single step model with the attribute model.input_dims + hi_res : ndarray + Synthetically generated high-resolution data, usually a 4D or 5D + array with shape: + (n_obs, spatial_1, spatial_2, n_features) + (n_obs, spatial_1, spatial_2, n_temporal, n_features) + + Returns + ------- + hi_res : ndarray + Synthetically generated high-resolution data transposed according + to the number of model input dimensions + """ + if model.is_5d and len(hi_res.shape) == 4: + hi_res = np.transpose(hi_res, axes=(1, 2, 0, 3))[np.newaxis] + elif model.is_4d and len(hi_res.shape) == 5: + msg = ( + 'Recieved 5D input data with shape ' + f'({hi_res.shape}) to a 4D model.' + ) + assert hi_res.shape[0] == 1, msg + hi_res = np.transpose(hi_res[0], axes=(2, 0, 1, 3)) + else: + msg = ( + 'Recieved input data with shape ' + f'{hi_res.shape} to a {model.input_dims}D model.' + ) + assert model.input_dims == len(hi_res.shape), msg + return hi_res + +
+[docs] + def generate( + self, low_res, norm_in=True, un_norm_out=True, exogenous_data=None + ): + """Use the generator model to generate high res data from low res + input. This is the public generate function. + + Parameters + ---------- + low_res : np.ndarray + Low-resolution input data, usually a 4D or 5D array of shape: + (n_obs, spatial_1, spatial_2, n_features) + (n_obs, spatial_1, spatial_2, n_temporal, n_features) + norm_in : bool + Flag to normalize low_res input data if the self.means, + self.stdevs attributes are available. The generator should always + received normalized data with mean=0 stdev=1. + un_norm_out : bool + Flag to un-normalize synthetically generated output data to physical + units + exogenous_data : ExoData + :class:`ExoData` object, which is a special dictionary containing + exogenous data for each model step and info about how to use the + data at each step. + + Returns + ------- + hi_res : ndarray + Synthetically generated high-resolution data, usually a 4D or 5D + array with shape: + (n_obs, spatial_1, spatial_2, n_features) + (n_obs, spatial_1, spatial_2, n_temporal, n_features) + """ + if isinstance(exogenous_data, dict) and not isinstance( + exogenous_data, ExoData + ): + exogenous_data = ExoData(exogenous_data) + + hi_res = low_res.copy() + for i, model in enumerate(self.models): + i_norm_in = not (i == 0 and not norm_in) + i_un_norm_out = not (i + 1 == len(self.models) and not un_norm_out) + + i_exo_data = ( + None + if exogenous_data is None + else exogenous_data.get_model_step_exo(i) + ) + + try: + hi_res = self._transpose_model_input(model, hi_res) + logger.debug( + 'Data input to model #{} of {} has shape {}'.format( + i + 1, len(self.models), hi_res.shape + ) + ) + hi_res = model.generate( + hi_res, + norm_in=i_norm_in, + un_norm_out=i_un_norm_out, + exogenous_data=i_exo_data, + ) + logger.debug( + 'Data output from model #{} of {} has shape {}'.format( + i + 1, len(self.models), hi_res.shape + ) + ) + except Exception as e: + msg = ( + 'Could not run model #{} of {} "{}" ' + 'on tensor of shape {}'.format( + i + 1, len(self.models), model, hi_res.shape + ) + ) + logger.exception(msg) + raise RuntimeError(msg) from e + + return hi_res
+ + + @property + def version_record(self): + """Get a tuple of version records from all models + + Returns + ------- + tuple + """ + return tuple(model.version_record for model in self.models) + + @property + def meta(self): + """Get a tuple of meta data dictionaries for all models + + Returns + ------- + tuple + """ + return tuple(model.meta for model in self.models) + + @property + def lr_features(self): + """Get a list of low-resolution features input to the generative model. + This includes low-resolution features that might be supplied + exogenously at inference time but that were in the low-res batches + during training""" + return self.models[0].lr_features + + @property + def hr_out_features(self): + """Get the list of high-resolution output feature names that the + generative model outputs.""" + return self.models[-1].hr_out_features + + @property + def hr_exo_features(self): + """Get list of high-resolution exogenous filter names the model uses. + For the multi-step model, each entry in this list corresponds to one of + the single-step models and is itself a list of hr_exo_features for that + model. + """ + return [model.hr_exo_features for model in self.models] + + @property + def model_params(self): + """Get a tuple of model parameters for all models + + Returns + ------- + tuple + """ + return tuple(model.model_params for model in self.models)
+ + + +
+[docs] +class MultiStepSurfaceMetGan(MultiStepGan): + """A two-step GAN where the first step is a spatial-only enhancement on a + 4D tensor of near-surface temperature and relative humidity data, and the + second step is a (spatio)temporal enhancement on a 5D tensor. + + Note + ---- + No inputs are needed for the first spatial-only surface meteorology + model. The spatial enhancement is determined by the low and high res + topography inputs in the exogenous_data kwargs in the + MultiStepSurfaceMetGan.generate() method. + + The low res input to the spatial enhancement should be a 4D tensor of + the shape (temporal, spatial_1, spatial_2, features) where temporal + (usually the observation index) is a series of sequential timesteps that + will be transposed to a 5D tensor of shape + (1, spatial_1, spatial_2, temporal, features) tensor and then fed to the + 2nd-step (spatio)temporal GAN. + """ + +
+[docs] + def generate( + self, low_res, norm_in=True, un_norm_out=True, exogenous_data=None + ): + """Use the generator model to generate high res data from low res + input. This is the public generate function. + + Parameters + ---------- + low_res : np.ndarray + Low-resolution spatial input data, a 4D array of shape: + (n_obs, spatial_1, spatial_2, n_features), Where the feature + channel can include temperature_*m, relativehumidity_*m, and/or + pressure_*m + norm_in : bool + Flag to normalize low_res input data if the self.means, + self.stdevs attributes are available. The generator should always + received normalized data with mean=0 stdev=1. + un_norm_out : bool + Flag to un-normalize synthetically generated output data to physical + units + exogenous_data : dict + For the MultiStepSurfaceMetGan, this must be a nested dictionary + with a main 'topography' key and two entries for + exogenous_data['topography']['steps']. The first entry includes a + 2D (lat, lon) array of low-resolution surface elevation data in + meters (must match spatial_1, spatial_2 from low_res), and the + second entry includes a 2D (lat, lon) array of high-resolution + surface elevation data in meters. e.g. + .. code-block:: JSON + {'topography': { + 'steps': [ + {'model': 0, + 'combine_type': + 'input', + 'data': lr_topo}, + {'model': 0, + 'combine_type': + 'output', + 'data': hr_topo'} + ] + } + } + + Returns + ------- + hi_res : ndarray + Synthetically generated high-resolution data output from the 2nd + step (spatio)temporal GAN with a 5D array shape: + (1, spatial_1, spatial_2, n_temporal, n_features), Where the + feature channel can include temperature_*m, relativehumidity_*m, + and/or pressure_*m + """ + logger.debug( + 'Data input to the 1st step spatial-only ' + 'enhancement has shape {}'.format(low_res.shape) + ) + + msg = ( + 'MultiStepSurfaceMetGan needs exogenous_data with two ' + 'topography steps, for low and high res topography inputs.' + ) + exo_check = ( + exogenous_data is not None + and len(exogenous_data['topography']['steps']) == 2 + ) + assert exo_check, msg + + return super().generate(low_res, norm_in, un_norm_out, exogenous_data)
+ + +
+[docs] + @classmethod + def load( + cls, + surface_model_class='SurfaceSpatialMetModel', + temporal_model_class='MultiStepGan', + surface_model_kwargs=None, + temporal_model_kwargs=None, + verbose=True, + ): + """Load the GANs with its sub-networks from a previously saved-to + output directory. + + Parameters + ---------- + surface_model_class : str + Name of surface model class to be retrieved from sup3r.models, this + is typically "SurfaceSpatialMetModel" + temporal_model_class : str + Name of temporal model class to be retrieved from sup3r.models, + this is typically "Sup3rGan" + surface_model_kwargs : None | dict + Optional additional kwargs to surface_model_class.load() + temporal_model_kwargs : None | dict + Optional additional kwargs to temporal_model_class.load() + verbose : bool + Flag to log information about the loaded model. + + Returns + ------- + out : MultiStepGan + Returns a pretrained gan model that was previously saved to + model_dirs + """ + + if surface_model_kwargs is None: + surface_model_kwargs = {} + + if temporal_model_kwargs is None: + temporal_model_kwargs = {} + + SpatialModelClass = getattr(sup3r.models, surface_model_class) + s_models = SpatialModelClass.load( + verbose=verbose, **surface_model_kwargs + ) + + TemporalModelClass = getattr(sup3r.models, temporal_model_class) + t_models = TemporalModelClass.load( + verbose=verbose, **temporal_model_kwargs + ) + + s_models = getattr(s_models, 'models', [s_models]) + t_models = getattr(t_models, 'models', [t_models]) + return cls([*s_models, *t_models])
+
+ + + +
+[docs] +class SolarMultiStepGan(MultiStepGan): + """Special multi step model for solar clearsky ratio super resolution. + + This model takes in two parallel models for wind-only and solar-only + spatial super resolutions, then combines them into a 3-channel + high-spatial-resolution input (clearsky_ratio, u_200m, v_200m) for a solar + temporal super resolution model. + """ + + def __init__( + self, + spatial_solar_models, + spatial_wind_models, + temporal_solar_models, + t_enhance=None, + ): + """ + Parameters + ---------- + spatial_solar_models : MultiStepGan + A loaded MultiStepGan object representing the one or more spatial + super resolution steps in this composite MultiStepGan + model that inputs and outputs clearsky_ratio + spatial_wind_models : MultiStepGan + A loaded MultiStepGan object representing the one or more spatial + super resolution steps in this composite MultiStepGan + model that inputs and outputs wind u/v features and must include + u_200m + v_200m as output features. + temporal_solar_models : MultiStepGan + A loaded MultiStepGan object representing the one or more + (spatio)temporal super resolution steps in this composite + SolarMultiStepGan model. This is the final step in the custom solar + downscaling methodology. + t_enhance : int | None + Optional argument to fix or update the temporal enhancement of the + model. This can be used to manipulate the output shape to match + whatever padded shape the sup3r forward pass module expects. If + this differs from the t_enhance value based on model layers the + output will be padded so that the output shape matches low_res * + t_enhance for the time dimension. + """ + + # Initializing parent without spatial solar models since this just + # defines self.models. self.models is used to determine the aggregate + # enhancement factors so including both spatial enhancement models + # will results in an incorrect calculation. + super().__init__( + models=[ + *spatial_wind_models.models, + *temporal_solar_models.models, + ] + ) + self._spatial_solar_models = spatial_solar_models + self._spatial_wind_models = spatial_wind_models + self._temporal_solar_models = temporal_solar_models + self._t_enhance = t_enhance + + self.preflight() + + if self._t_enhance is not None: + msg = ( + 'Can only update t_enhance for a ' + 'single temporal solar model.' + ) + assert len(self.temporal_solar_models) == 1, msg + model = self.temporal_solar_models.models[0] + model.meta['t_enhance'] = self._t_enhance + +
+[docs] + def preflight(self): + """Run some preflight checks to make sure the loaded models can work + together.""" + + s_enh = self.spatial_solar_models.s_enhancements + w_enh = self.spatial_wind_models.s_enhancements + msg = ( + 'Solar and wind spatial enhancements must be equivalent but ' + 'received models that do spatial enhancements of ' + '{} (solar) and {} (wind)'.format(s_enh, w_enh) + ) + assert np.prod(s_enh) == np.prod(w_enh), msg + + s_t_feat = self.spatial_solar_models.lr_features + s_o_feat = self.spatial_solar_models.hr_out_features + msg = ( + 'Solar spatial enhancement models need to take ' + '"clearsky_ratio" as the only input and output feature but ' + 'received models that need {} and output {}'.format( + s_t_feat, s_o_feat + ) + ) + assert s_t_feat == ['clearsky_ratio'], msg + assert s_o_feat == ['clearsky_ratio'], msg + + temp_solar_feats = self.temporal_solar_models.lr_features + msg = ( + 'Input feature 0 for the temporal_solar_models should be ' + '"clearsky_ratio" but received: {}'.format(temp_solar_feats) + ) + assert temp_solar_feats[0] == 'clearsky_ratio', msg + + spatial_out_features = ( + self.spatial_wind_models.hr_out_features + + self.spatial_solar_models.hr_out_features + ) + missing = [ + fn for fn in temp_solar_feats if fn not in spatial_out_features + ] + msg = ( + 'Solar temporal model needs features {} that were not ' + 'found in the solar + wind model output feature list {}'.format( + missing, spatial_out_features + ) + ) + assert not any(missing), msg
+ + + @property + def spatial_solar_models(self): + """Get the MultiStepGan object for the spatial-only solar model(s) + + Returns + ------- + MultiStepGan + """ + return self._spatial_solar_models + + @property + def spatial_wind_models(self): + """Get the MultiStepGan object for the spatial-only wind model(s) + + Returns + ------- + MultiStepGan + """ + return self._spatial_wind_models + + @property + def temporal_solar_models(self): + """Get the MultiStepGan object for the (spatio)temporal model(s) + + Returns + ------- + MultiStepGan + """ + return self._temporal_solar_models + + @property + def meta(self): + """Get a tuple of meta data dictionaries for all models + + Returns + ------- + tuple + """ + return ( + self.spatial_solar_models.meta + + self.spatial_wind_models.meta + + self.temporal_solar_models.meta + ) + + @property + def lr_features(self): + """Get a list of low-resolution features input to the generative model. + This includes low-resolution features that might be supplied + exogenously at inference time but that were in the low-res batches + during training""" + return ( + self.spatial_solar_models.lr_features + + self.spatial_wind_models.lr_features + ) + + @property + def hr_out_features(self): + """Get the list of output feature names that the last solar + spatiotemporal generative model in this SolarMultiStepGan outputs.""" + return self.temporal_solar_models.hr_out_features + + @property + def idf_wind(self): + """Get an array of feature indices for the subset of features required + for the spatial_wind_models. This excludes topography which is assumed + to be provided as exogenous_data.""" + return np.array( + [ + self.lr_features.index(fn) + for fn in self.spatial_wind_models.lr_features + if fn != 'topography' + ] + ) + + @property + def idf_wind_out(self): + """Get an array of spatial_wind_models output feature indices that are + required for input to the temporal_solar_models. Typically this is the + indices of u_200m + v_200m from the output features of + spatial_wind_models""" + temporal_solar_features = self.temporal_solar_models.lr_features + return np.array( + [ + self.spatial_wind_models.hr_out_features.index(fn) + for fn in temporal_solar_features[1:] + ] + ) + + @property + def idf_solar(self): + """Get an array of feature indices for the subset of features required + for the spatial_solar_models. This excludes topography which is assumed + to be provided as exogenous_data.""" + return np.array( + [ + self.lr_features.index(fn) + for fn in self.spatial_solar_models.lr_features + if fn != 'topography' + ] + ) + +
+[docs] + def generate( + self, low_res, norm_in=True, un_norm_out=True, exogenous_data=None + ): + """Use the generator model to generate high res data from low res + input. This is the public generate function. + + Parameters + ---------- + low_res : np.ndarray + Low-resolution input data to the 1st step spatial GAN, which is a + 4D array of shape: (temporal, spatial_1, spatial_2, n_features). + This should include all of the ``self.lr_features`` which is a + concatenation of both the solar and wind spatial model features. + The topography feature might be removed from this input and present + in the exogenous_data input. + norm_in : bool + Flag to normalize low_res input data if the self.means, + self.stdevs attributes are available. The generator should always + received normalized data with mean=0 stdev=1. + un_norm_out : bool + Flag to un-normalize synthetically generated output data to physical + units + exogenous_data : ExoData | dict + :class:`ExoData` object (or dict that will be cast to `ExoData`) + with data arrays for each exogenous data step. Each array has 3D or + 4D shape: + (spatial_1, spatial_2, n_features) + (temporal, spatial_1, spatial_2, n_features) + It's assumed that the spatial_solar_models do not require + exogenous_data and only use clearsky_ratio. + + Returns + ------- + hi_res : ndarray + Synthetically generated high-resolution data output from the 2nd + step (spatio)temporal GAN with a 5D array shape: + (1, spatial_1, spatial_2, n_temporal, n_features) + """ + + logger.debug( + 'Data input to the SolarMultiStepGan has shape {} which ' + 'will be split up for solar- and wind-only features.'.format( + low_res.shape + ) + ) + if isinstance(exogenous_data, dict) and not isinstance( + exogenous_data, ExoData + ): + exogenous_data = ExoData(exogenous_data) + + if exogenous_data is not None: + s_exo, t_exo = exogenous_data.split( + split_steps=[len(self.spatial_wind_models)] + ) + else: + s_exo = t_exo = None + + try: + hi_res_wind = self.spatial_wind_models.generate( + low_res[..., self.idf_wind], + norm_in=norm_in, + un_norm_out=True, + exogenous_data=s_exo, + ) + except Exception as e: + msg = ( + 'Could not run the 1st step spatial-wind-only GAN on ' + 'input shape {}'.format(low_res.shape) + ) + logger.exception(msg) + raise RuntimeError(msg) from e + + try: + hi_res_solar = self.spatial_solar_models.generate( + low_res[..., self.idf_solar], norm_in=norm_in, un_norm_out=True + ) + except Exception as e: + msg = ( + 'Could not run the 1st step spatial-solar-only GAN on ' + 'input shape {}'.format(low_res.shape) + ) + logger.exception(msg) + raise RuntimeError(msg) from e + + logger.debug( + 'Data output from the 1st step spatial enhancement has ' + 'shape {} (solar) and shape {} (wind)'.format( + hi_res_solar.shape, hi_res_wind.shape + ) + ) + + hi_res = (hi_res_solar, hi_res_wind[..., self.idf_wind_out]) + hi_res = np.concatenate(hi_res, axis=3) + + logger.debug( + 'Data output from the concatenated solar + wind 1st step ' + 'spatial-only enhancement has shape {}'.format(hi_res.shape) + ) + hi_res = np.transpose(hi_res, axes=(1, 2, 0, 3)) + hi_res = np.expand_dims(hi_res, axis=0) + logger.debug( + 'Data from the concatenated solar + wind 1st step ' + 'spatial-only enhancement has been reshaped to {}'.format( + hi_res.shape + ) + ) + + try: + hi_res = self.temporal_solar_models.generate( + hi_res, + norm_in=True, + un_norm_out=un_norm_out, + exogenous_data=t_exo, + ) + except Exception as e: + msg = ( + 'Could not run the 2nd step (spatio)temporal solar GAN on ' + 'input shape {}'.format(low_res.shape) + ) + logger.exception(msg) + raise RuntimeError(msg) from e + + hi_res = self.temporal_pad(low_res, hi_res) + + logger.debug( + 'Final SolarMultiStepGan output has shape: {}'.format(hi_res.shape) + ) + + return hi_res
+ + +
+[docs] + def temporal_pad(self, low_res, hi_res, mode='reflect'): + """Optionally add temporal padding to the 5D generated output array + + Parameters + ---------- + low_res : np.ndarray + Low-resolution input data to the 1st step spatial GAN, which is a + 4D array of shape: (temporal, spatial_1, spatial_2, n_features). + hi_res : ndarray + Synthetically generated high-resolution data output from the 2nd + step (spatio)temporal GAN with a 5D array shape: + (1, spatial_1, spatial_2, n_temporal, n_features) + mode : str + Padding mode for np.pad() + + Returns + ------- + hi_res : ndarray + Synthetically generated high-resolution data output from the 2nd + step (spatio)temporal GAN with a 5D array shape: + (1, spatial_1, spatial_2, n_temporal, n_features) + With the temporal axis padded with self._temporal_pad on either + side. + """ + t_shape = low_res.shape[0] * self.t_enhance + t_pad = int((t_shape - hi_res.shape[-2]) / 2) + pad_width = ((0, 0), (0, 0), (0, 0), (t_pad, t_pad), (0, 0)) + hi_res = np.pad(hi_res, pad_width, mode=mode) + return hi_res
+ + +
+[docs] + @classmethod + def load( + cls, + spatial_solar_model_dirs, + spatial_wind_model_dirs, + temporal_solar_model_dirs, + t_enhance=None, + verbose=True, + ): + """Load the GANs with its sub-networks from a previously saved-to + output directory. + + Parameters + ---------- + spatial_solar_model_dirs : str | list | tuple + An ordered list/tuple of one or more directories containing trained + + saved Sup3rGan models created using the Sup3rGan.save() method. + This must contain only spatial solar models that input/output 4D + tensors. + spatial_wind_model_dirs : str | list | tuple + An ordered list/tuple of one or more directories containing trained + + saved Sup3rGan models created using the Sup3rGan.save() method. + This must contain only spatial wind models that input/output 4D + tensors. + temporal_solar_model_dirs : str | list | tuple + An ordered list/tuple of one or more directories containing trained + + saved Sup3rGan models created using the Sup3rGan.save() method. + This must contain only (spatio)temporal solar models that + input/output 5D tensors that are the concatenated output of the + spatial_solar_models and the spatial_wind_models. + t_enhance : int | None + Optional argument to fix or update the temporal enhancement of the + model. This can be used to manipulate the output shape to match + whatever padded shape the sup3r forward pass module expects. If + this differs from the t_enhance value based on model layers the + output will be padded so that the output shape matches low_res * + t_enhance for the time dimension. + verbose : bool + Flag to log information about the loaded model. + + Returns + ------- + out : SolarMultiStepGan + Returns a pretrained gan model that was previously saved to + model_dirs + """ + if isinstance(spatial_solar_model_dirs, str): + spatial_solar_model_dirs = [spatial_solar_model_dirs] + if isinstance(spatial_wind_model_dirs, str): + spatial_wind_model_dirs = [spatial_wind_model_dirs] + if isinstance(temporal_solar_model_dirs, str): + temporal_solar_model_dirs = [temporal_solar_model_dirs] + + ssm = MultiStepGan.load(spatial_solar_model_dirs, verbose=verbose) + swm = MultiStepGan.load(spatial_wind_model_dirs, verbose=verbose) + tsm = MultiStepGan.load(temporal_solar_model_dirs, verbose=verbose) + + return cls(ssm, swm, tsm, t_enhance=t_enhance)
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/models/solar_cc.html b/_modules/sup3r/models/solar_cc.html new file mode 100644 index 000000000..5caa4ace7 --- /dev/null +++ b/_modules/sup3r/models/solar_cc.html @@ -0,0 +1,948 @@ + + + + + + + + + + sup3r.models.solar_cc — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.models.solar_cc

+"""Sup3r model software"""
+
+import logging
+
+import numpy as np
+import tensorflow as tf
+
+from sup3r.models.base import Sup3rGan
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class SolarCC(Sup3rGan): + """Solar climate change model. + + Note + ---- + *Modifications to standard Sup3rGan* + - Content loss is only on the n_days of the center 8 daylight hours of + the daily true+synthetic high res samples + - Discriminator only sees n_days of the center 8 daylight hours of the + daily true high res sample. + - Discriminator sees random n_days of 8-hour samples of the daily + synthetic high res sample. + - Includes padding on high resolution output of :meth:`generate` so + that forward pass always outputs a multiple of 24 hours. + """ + + # starting hour is the hour that daylight starts at, daylight hours is the + # number of daylight hours to sample, so for example if 8 and 8, the + # daylight slice will be slice(8, 16). The stride length is the step size + # for sampling the temporal axis of the generated data to send to the + # discriminator for the adversarial loss component of the generator. For + # example, if the generator produces 24 timesteps and stride is 4 and the + # daylight hours is 8, slices of (0, 8) (4, 12), (8, 16), (12, 20), and + # (16, 24) will be sent to the disc. + STARTING_HOUR = 8 + DAYLIGHT_HOURS = 8 + STRIDE_LEN = 4 + + def __init__(self, *args, t_enhance=None, **kwargs): + """Add optional t_enhance adjustment. + + Parameters + ---------- + *args : list + List of arguments to parent class + t_enhance : int | None + Optional argument to fix or update the temporal enhancement of the + model. This can be used to manipulate the output shape to match + whatever padded shape the sup3r forward pass module expects. If + this differs from the t_enhance value based on model layers the + output will be padded so that the output shape matches low_res * + t_enhance for the time dimension. + **kwargs : Mappable + Keyword arguments for parent class + """ + super().__init__(*args, **kwargs) + self._t_enhance = t_enhance or self.t_enhance + self.meta['t_enhance'] = self._t_enhance + +
+[docs] + def init_weights(self, lr_shape, hr_shape, device=None): + """Initialize the generator and discriminator weights with device + placement. + + Parameters + ---------- + lr_shape : tuple + Shape of one batch of low res input data for sup3r resolution. Note + that the batch size (axis=0) must be included, but the actual batch + size doesn't really matter. + hr_shape : tuple + Shape of one batch of high res input data for sup3r resolution. + Note that the batch size (axis=0) must be included, but the actual + batch size doesn't really matter. + device : str | None + Option to place model weights on a device. If None, + self.default_device will be used. + """ + + # The high resolution data passed to the discriminator should only have + # daylight hours in the temporal axis=3 + if hr_shape[3] != self.DAYLIGHT_HOURS: + hr_shape = hr_shape[0:3] + (self.DAYLIGHT_HOURS,) + hr_shape[-1:] + + super().init_weights(lr_shape, hr_shape, device=device)
+ + +
+[docs] + @tf.function + def calc_loss( + self, + hi_res_true, + hi_res_gen, + weight_gen_advers=0.001, + train_gen=True, + train_disc=False, + ): + """Calculate the GAN loss function using generated and true high + resolution data. + + Parameters + ---------- + hi_res_true : tf.Tensor + Ground truth high resolution spatiotemporal data. + hi_res_gen : tf.Tensor + Super-resolved high resolution spatiotemporal data generated by the + generative model. + weight_gen_advers : float + Weight factor for the adversarial loss component of the generator + vs. the discriminator. + train_gen : bool + True if generator is being trained, then loss=loss_gen + train_disc : bool + True if disc is being trained, then loss=loss_disc + + Returns + ------- + loss : tf.Tensor + 0D tensor representing the loss value for the network being trained + (either generator or one of the discriminators) + loss_details : dict + Namespace of the breakdown of loss components + """ + + if hi_res_gen.shape != hi_res_true.shape: + msg = ( + 'The tensor shapes of the synthetic output {} and ' + 'true high res {} did not have matching shape! ' + 'Check the spatiotemporal enhancement multipliers in your ' + 'your model config and data handlers.'.format( + hi_res_gen.shape, hi_res_true.shape + ) + ) + logger.error(msg) + raise RuntimeError(msg) + + msg = ( + 'Special SolarCC model can only accept multi-day hourly ' + '(multiple of 24) true / synthetic high res data in the axis=3 ' + 'position but received shape {}'.format(hi_res_true.shape) + ) + assert hi_res_true.shape[3] % 24 == 0 + + t_len = hi_res_true.shape[3] + n_days = int(t_len // 24) + day_slices = [ + slice( + self.STARTING_HOUR + x, + self.STARTING_HOUR + x + self.DAYLIGHT_HOURS, + ) + for x in range(0, 24 * n_days, 24) + ] + + # sample only daylight hours for disc training and gen content loss + disc_out_true = [] + disc_out_gen = [] + loss_gen_content = 0.0 + for tslice in day_slices: + disc_t = self._tf_discriminate(hi_res_true[:, :, :, tslice, :]) + gen_c = self.calc_loss_gen_content( + hi_res_true[:, :, :, tslice, :], hi_res_gen[:, :, :, tslice, :] + ) + disc_out_true.append(disc_t) + loss_gen_content += gen_c + + # Randomly sample daylight windows from generated data. Better than + # strided samples covering full day because the random samples will + # provide an evenly balanced training set for the disc + logits = [[1.0] * (t_len - self.DAYLIGHT_HOURS)] + time_samples = tf.random.categorical(logits, len(day_slices)) + for i in range(len(day_slices)): + t0 = time_samples[0, i] + t1 = t0 + self.DAYLIGHT_HOURS + disc_g = self._tf_discriminate(hi_res_gen[:, :, :, t0:t1, :]) + disc_out_gen.append(disc_g) + + disc_out_true = tf.concat([disc_out_true], axis=0) + disc_out_gen = tf.concat([disc_out_gen], axis=0) + loss_disc = self.calc_loss_disc(disc_out_true, disc_out_gen) + + loss_gen_content /= len(day_slices) + loss_gen_advers = self.calc_loss_gen_advers(disc_out_gen) + loss_gen = loss_gen_content + weight_gen_advers * loss_gen_advers + + loss = None + if train_gen: + loss = loss_gen + elif train_disc: + loss = loss_disc + + loss_details = { + 'loss_gen': loss_gen, + 'loss_gen_content': loss_gen_content, + 'loss_gen_advers': loss_gen_advers, + 'loss_disc': loss_disc, + } + + return loss, loss_details
+ + +
+[docs] + def temporal_pad(self, low_res, hi_res, mode='reflect'): + """Optionally add temporal padding to the 5D generated output array + + Parameters + ---------- + low_res : np.ndarray + Low-resolution input data to the spatio(temporal) GAN, which is a + 5D array of shape: (1, spatial_1, spatial_2, n_temporal, + n_features). + hi_res : ndarray + Synthetically generated high-resolution data output from the + (spatio)temporal GAN with a 5D array shape: + (1, spatial_1, spatial_2, n_temporal, n_features) + mode : str + Padding mode for np.pad() + + Returns + ------- + hi_res : ndarray + Synthetically generated high-resolution data output from the + (spatio)temporal GAN with a 5D array shape: + (1, spatial_1, spatial_2, n_temporal, n_features) + With the temporal axis padded with self._temporal_pad on either + side. + """ + t_shape = low_res.shape[-2] * self._t_enhance + t_pad = int((t_shape - hi_res.shape[-2]) / 2) + pad_width = ((0, 0), (0, 0), (0, 0), (t_pad, t_pad), (0, 0)) + prepad_shape = hi_res.shape + hi_res = np.pad(hi_res, pad_width, mode=mode) + logger.debug( + 'Padded hi_res output from %s to %s', prepad_shape, hi_res.shape + ) + return hi_res
+ + +
+[docs] + def generate(self, low_res, **kwargs): + """Override parent method to apply padding on high res output.""" + + hi_res = self.temporal_pad( + low_res, super().generate(low_res=low_res, **kwargs) + ) + + logger.debug('Final SolarCC output has shape: {}'.format(hi_res.shape)) + + return hi_res
+ + +
+[docs] + @classmethod + def load(cls, model_dir, t_enhance=None, verbose=True): + """Load the GAN with its sub-networks from a previously saved-to output + directory. + + Parameters + ---------- + model_dir : str + Directory to load GAN model files from. + t_enhance : int | None + Optional argument to fix or update the temporal enhancement of the + model. This can be used to manipulate the output shape to match + whatever padded shape the sup3r forward pass module expects. If + this differs from the t_enhance value based on model layers the + output will be padded so that the output shape matches low_res * + t_enhance for the time dimension. + verbose : bool + Flag to log information about the loaded model. + + Returns + ------- + out : BaseModel + Returns a pretrained gan model that was previously saved to out_dir + """ + fp_gen, fp_disc, params = cls._load(model_dir, verbose=verbose) + return cls(fp_gen, fp_disc, t_enhance=t_enhance, **params)
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/models/surface.html b/_modules/sup3r/models/surface.html new file mode 100644 index 000000000..2a379fb3b --- /dev/null +++ b/_modules/sup3r/models/surface.html @@ -0,0 +1,1500 @@ + + + + + + + + + + sup3r.models.surface — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.models.surface

+"""Special models for surface meteorological data."""
+
+import logging
+from fnmatch import fnmatch
+from warnings import warn
+
+import numpy as np
+from PIL import Image
+from sklearn import linear_model
+
+from sup3r.utilities.utilities import RANDOM_GENERATOR, spatial_coarsening
+
+from .linear import LinearInterp
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class SurfaceSpatialMetModel(LinearInterp): + """Model to spatially downscale daily-average near-surface temperature, + relative humidity, and pressure + + Note that this model can also operate on temperature_*m, + relativehumidity_*m, and pressure_*m datasets that are not + strictly at the surface but could be near hub height. + """ + + TEMP_LAPSE = 6.5 / 1000 + """Temperature lapse rate: change in degrees C/K per meter""" + + PRES_DIV = 44307.69231 + """Air pressure scaling equation divisor variable: + 101325 * (1 - (1 - topo / PRES_DIV)**PRES_EXP)""" + + PRES_EXP = 5.25328 + """Air pressure scaling equation exponent variable: + 101325 * (1 - (1 - topo / PRES_DIV)**PRES_EXP)""" + + W_DELTA_TEMP = -3.99242830 + """Weight for the delta-temperature feature for the relative humidity + linear regression model.""" + + W_DELTA_TOPO = -0.01736911 + """Weight for the delta-topography feature for the relative humidity linear + regression model.""" + + def __init__( + self, + lr_features, + s_enhance, + noise_adders=None, + temp_lapse=None, + w_delta_temp=None, + w_delta_topo=None, + pres_div=None, + pres_exp=None, + interp_method='LANCZOS', + input_resolution=None, + fix_bias=True, + ): + """ + Parameters + ---------- + lr_features : list + List of feature names that this model will operate on for both + input and output. This must match the feature axis ordering in the + array input to generate(). Typically this is a list containing: + temperature_*m, relativehumidity_*m, and pressure_*m. The list can + contain multiple instances of each variable at different heights. + relativehumidity_*m entries must have corresponding temperature_*m + entires at the same hub height. + s_enhance : int + Integer factor by which the spatial axes are to be enhanced. + noise_adders : float | list | None + Option to add gaussian noise to spatial model output. Noise will be + normally distributed with mean of 0 and standard deviation = + noise_adders. noise_adders can be a single value or a list + corresponding to the lr_features list. None is no noise. The + addition of noise has been shown to help downstream temporal-only + models produce diurnal cycles in regions where there is minimal + change in topography. A noise_adders around 0.07C (temperature) and + 0.1% (relative humidity) have been shown to be effective. This is + unnecessary if daily min/max temperatures are provided as low res + training features. + temp_lapse : None | float + Temperature lapse rate: change in degrees C/K per meter. Defaults + to the cls.TEMP_LAPSE attribute. + w_delta_temp : None | float + Weight for the delta-temperature feature for the relative humidity + linear regression model. Defaults to the cls.W_DELTA_TEMP + attribute. + w_delta_topo : None | float + Weight for the delta-topography feature for the relative humidity + linear regression model. Defaults to the cls.W_DELTA_TOPO + attribute. + pres_div : None | float + Divisor factor in the pressure scale height equation. Defaults to + the cls.PRES_DIV attribute. + pres_exp : None | float + Exponential factor in the pressure scale height equation. Defaults + to the cls.PRES_EXP attribute. + interp_method : str + Name of the interpolation method to use from PIL.Image.Resampling + (NEAREST, BILINEAR, BICUBIC, LANCZOS) + LANCZOS is default and has been tested to work best for + SurfaceSpatialMetModel. + fix_bias : bool + Some local bias can be introduced by the bilinear interp + lapse + rate, this flag will attempt to correct that bias by using the + low-resolution deviation from the input data + input_resolution : dict | None + Resolution of the input data. e.g. {'spatial': '30km', 'temporal': + '60min'}. This is used to determine how to aggregate + high-resolution topography data. + """ + self._lr_features = lr_features + self._s_enhance = s_enhance + self._noise_adders = noise_adders + self._temp_lapse = temp_lapse or self.TEMP_LAPSE + self._w_delta_temp = w_delta_temp or self.W_DELTA_TEMP + self._w_delta_topo = w_delta_topo or self.W_DELTA_TOPO + self._pres_div = pres_div or self.PRES_DIV + self._pres_exp = pres_exp or self.PRES_EXP + self._fix_bias = fix_bias + self._input_resolution = input_resolution + self._interp_name = interp_method + self._interp_method = getattr(Image.Resampling, interp_method) + + if isinstance(self._noise_adders, (int, float)): + self._noise_adders = [self._noise_adders] * len(self._lr_features) + + def __len__(self): + """Get number of model steps (match interface of MultiStepGan)""" + return 1 + + @staticmethod + def _get_s_enhance(topo_lr, topo_hr): + """Get the spatial enhancement factor given low-res and high-res + spatial rasters. + + Parameters + ---------- + topo_lr : np.ndarray + low-resolution surface elevation data in meters with shape + (lat, lon) + topo_hr : np.ndarray + High-resolution surface elevation data in meters with shape + (lat, lon) + + Returns + ------- + s_enhance : int + Integer factor by which the spatial axes are to be enhanced. + """ + assert len(topo_lr.shape) == 2, 'topo_lr must be 2D' + assert len(topo_hr.shape) == 2, 'topo_hr must be 2D' + + se0 = topo_hr.shape[0] / topo_lr.shape[0] + se1 = topo_hr.shape[1] / topo_lr.shape[1] + + assert se0 % 1 == 0, f'Bad calculated s_enhance on axis 0: {se0}' + assert se1 % 1 == 0, f'Bad calculated s_enhance on axis 1: {se1}' + assert se0 == se1, 'Calculated s_enhance does not match along axis' + + return int(se0) + + @property + def feature_inds_temp(self): + """Get the feature index values for the temperature features.""" + inds = [ + i + for i, name in enumerate(self._lr_features) + if fnmatch(name, 'temperature_*') + ] + return inds + + @property + def feature_inds_pres(self): + """Get the feature index values for the pressure features.""" + inds = [ + i + for i, name in enumerate(self._lr_features) + if fnmatch(name, 'pressure_*') + ] + return inds + + @property + def feature_inds_rh(self): + """Get the feature index values for the relative humidity features.""" + inds = [ + i + for i, name in enumerate(self._lr_features) + if fnmatch(name, 'relativehumidity_*') + ] + return inds + + @property + def feature_inds_other(self): + """Get the feature index values for the features that are not + temperature, pressure, or relativehumidity.""" + finds_tprh = ( + self.feature_inds_temp + + self.feature_inds_pres + + self.feature_inds_rh + ) + inds = [ + i + for i, name in enumerate(self._lr_features) + if i not in finds_tprh + ] + return inds + + def _get_temp_rh_ind(self, idf_rh): + """Get the feature index value for the temperature feature + corresponding to a relative humidity feature at the same hub height. + + Parameters + ---------- + idf_rh : int + Index in the feature list for a relativehumidity_*m feature + + Returns + ------- + idf_temp : int + Index in the feature list for a temperature_*m feature with the + same hub height as the idf_rh input. + """ + name_rh = self._lr_features[idf_rh] + hh_suffix = name_rh.split('_')[-1] + idf_temp = None + for i in self.feature_inds_temp: + same_hh = self._lr_features[i].endswith(hh_suffix) + not_minmax = not any(mm in name_rh for mm in ('_min_', '_max_')) + both_mins = '_min_' in name_rh and '_min_' in self._lr_features[i] + both_maxs = '_max_' in name_rh and '_max_' in self._lr_features[i] + + if same_hh and (not_minmax or both_mins or both_maxs): + idf_temp = i + break + + if idf_temp is None: + msg = ( + 'Could not find temperature feature corresponding to ' + '"{}" in feature list: {}'.format(name_rh, self._lr_features) + ) + logger.error(msg) + raise KeyError(msg) + + return idf_temp + +
+[docs] + @classmethod + def fix_downscaled_bias( + cls, single_lr, single_hr, method=Image.Resampling.LANCZOS + ): + """Fix any bias introduced by the spatial downscaling with lapse rate. + + Parameters + ---------- + single_lr : np.ndarray + Single timestep raster data with shape + (lat, lon) matching the low-resolution input data. + single_hr : np.ndarray + Single timestep downscaled raster data with shape + (lat, lon) matching the high-resolution input data. + method : Image.Resampling.LANCZOS + An Image.Resampling method (NEAREST, BILINEAR, BICUBIC, LANCZOS). + NEAREST enforces zero bias but makes slightly more spatial seams. + + Returns + ------- + single_hr : np.ndarray + Single timestep downscaled raster data with shape + (lat, lon) matching the high-resolution input data. + """ + + s_enhance = len(single_hr) // len(single_lr) + re_coarse = spatial_coarsening( + np.expand_dims(single_hr, axis=-1), + s_enhance=s_enhance, + obs_axis=False, + )[..., 0] + bias = re_coarse - single_lr + bc = cls.downscale_arr(bias, s_enhance=s_enhance, method=method) + single_hr -= bc + return single_hr
+ + +
+[docs] + @classmethod + def downscale_arr( + cls, arr, s_enhance, method=Image.Resampling.LANCZOS, fix_bias=False + ): + """Downscale a 2D array of data Image.resize() method + + Parameters + ---------- + arr : np.ndarray + 2D raster data, typically spatial daily average data with shape + (lat, lon) + s_enhance : int + Integer factor by which the spatial axes are to be enhanced. + method : Image.Resampling.LANCZOS + An Image.Resampling method (NEAREST, BILINEAR, BICUBIC, LANCZOS). + LANCZOS is default and has been tested to work best for + SurfaceSpatialMetModel. + fix_bias : bool + Some local bias can be introduced by the bilinear interp + lapse + rate, this flag will attempt to correct that bias by using the + low-resolution deviation from the input data + """ + im = Image.fromarray(arr) + im = im.resize( + (arr.shape[1] * s_enhance, arr.shape[0] * s_enhance), + resample=method, + ) + out = np.array(im) + + if fix_bias: + out = cls.fix_downscaled_bias(arr, out, method=method) + + return out
+ + +
+[docs] + def downscale_temp(self, single_lr_temp, topo_lr, topo_hr): + """Downscale temperature raster data at a single observation. + + This model uses a simple lapse rate that adjusts temperature as a + function of elevation. The process is as follows: + - add a scale factor to the low-res temperature field: + topo_lr * TEMP_LAPSE + - perform bilinear interpolation of the scaled temperature field. + - subtract the scale factor from the high-res temperature field: + topo_hr * TEMP_LAPSE + + Parameters + ---------- + single_lr_temp : np.ndarray + Single timestep temperature (deg C) raster data with shape + (lat, lon) matching the low-resolution input data. + topo_lr : np.ndarray + low-resolution surface elevation data in meters with shape + (lat, lon) + topo_hr : np.ndarray + High-resolution surface elevation data in meters with shape + (lat, lon) + + Returns + ------- + hi_res_temp : np.ndarray + Single timestep temperature (deg C) raster data with shape + (lat, lon) matching the high-resolution output data. + """ + + assert len(single_lr_temp.shape) == 2, 'Bad shape for single_lr_temp' + assert len(topo_lr.shape) == 2, 'Bad shape for topo_lr' + assert len(topo_hr.shape) == 2, 'Bad shape for topo_hr' + + lower_data = single_lr_temp.copy() + topo_lr * self._temp_lapse + hi_res_temp = self.downscale_arr( + lower_data, self._s_enhance, method=self._interp_method + ) + hi_res_temp -= topo_hr * self._temp_lapse + + if self._fix_bias: + hi_res_temp = self.fix_downscaled_bias( + single_lr_temp, hi_res_temp, method=self._interp_method + ) + + return hi_res_temp
+ + +
+[docs] + def downscale_rh( + self, single_lr_rh, single_lr_temp, single_hr_temp, topo_lr, topo_hr + ): + """Downscale relative humidity raster data at a single observation. + + Here's a description of the humidity scaling model: + - Take low-resolution and high-resolution daily average + temperature, relative humidity, and topography data. + - Downscale all low-resolution variables using a bilinear image + enhancement + - The target model output is the difference between the + interpolated low-res relative humidity and the true high-res + relative humidity. Calculate this difference as a linear function + of the same difference in the temperature and topography fields. + - Use this linear regression model to calculate high-res relative + humidity fields when provided low-res input of relative humidity + along with low-res/high-res pairs of temperature and topography. + + Parameters + ---------- + single_lr_rh : np.ndarray + Single timestep relative humidity (%) raster data with shape + (lat, lon) matching the low-resolution input data. + single_lr_temp : np.ndarray + Single timestep temperature (deg C) raster data with shape + (lat, lon) matching the low-resolution input data. + single_hr_temp : np.ndarray + Single timestep temperature (deg C) raster data with shape + (lat, lon) matching the high-resolution output data. + topo_lr : np.ndarray + low-resolution surface elevation data in meters with shape + (lat, lon) + topo_hr : np.ndarray + High-resolution surface elevation data in meters with shape + (lat, lon) + + Returns + ------- + hi_res_rh : np.ndarray + Single timestep relative humidity (%) raster data with shape + (lat, lon) matching the high-resolution output data. + """ + + assert len(single_lr_rh.shape) == 2, 'Bad shape for single_lr_rh' + assert len(single_hr_temp.shape) == 2, 'Bad shape for single_hr_temp' + assert len(topo_lr.shape) == 2, 'Bad shape for topo_lr' + assert len(topo_hr.shape) == 2, 'Bad shape for topo_hr' + + interp_rh = self.downscale_arr( + single_lr_rh, self._s_enhance, method=self._interp_method + ) + interp_temp = self.downscale_arr( + single_lr_temp, self._s_enhance, method=self._interp_method + ) + interp_topo = self.downscale_arr( + topo_lr, self._s_enhance, method=self._interp_method + ) + + delta_temp = single_hr_temp - interp_temp + delta_topo = topo_hr - interp_topo + + hi_res_rh = ( + interp_rh + + self._w_delta_temp * delta_temp + + self._w_delta_topo * delta_topo + ) + + if self._fix_bias: + hi_res_rh = self.fix_downscaled_bias( + single_lr_rh, hi_res_rh, method=self._interp_method + ) + + return hi_res_rh
+ + +
+[docs] + def downscale_pres(self, single_lr_pres, topo_lr, topo_hr): + """Downscale pressure raster data at a single observation. + + This model uses a simple exponential scale height that adjusts + temperature as a function of elevation. The process is as follows: + - add a scale factor to the low-res pressure field: + 101325 * (1 - (1 - topo_lr / PRES_DIV)**PRES_EXP) + - perform bilinear interpolation of the scaled pressure field. + - subtract the scale factor from the high-res pressure field: + 101325 * (1 - (1 - topo_hr / PRES_DIV)**PRES_EXP) + + Parameters + ---------- + single_lr_pres : np.ndarray + Single timestep pressure (Pa) raster data with shape + (lat, lon) matching the low-resolution input data. + topo_lr : np.ndarray + low-resolution surface elevation data in meters with shape + (lat, lon) + topo_hr : np.ndarray + High-resolution surface elevation data in meters with shape + (lat, lon) + + Returns + ------- + hi_res_pres : np.ndarray + Single timestep pressure (Pa) raster data with shape + (lat, lon) matching the high-resolution output data. + """ + + if np.max(single_lr_pres) < 10000: + msg = ( + 'Pressure data appears to not be in Pa with min/mean/max: ' + '{:.1f}/{:.1f}/{:.1f}'.format( + single_lr_pres.min(), + single_lr_pres.mean(), + single_lr_pres.max(), + ) + ) + logger.warning(msg) + warn(msg) + + const = 101325 * (1 - (1 - topo_lr / self._pres_div) ** self._pres_exp) + lr_pres_adj = single_lr_pres.copy() + const + + if np.min(lr_pres_adj) < 0.0: + msg = ( + 'Spatial interpolation of surface pressure ' + 'resulted in negative values. Incorrectly ' + 'scaled/unscaled values or incorrect units are ' + 'the most likely causes. All pressure data should be ' + 'in Pascals.' + ) + logger.error(msg) + raise ValueError(msg) + + hi_res_pres = self.downscale_arr( + lr_pres_adj, self._s_enhance, method=self._interp_method + ) + + const = 101325 * (1 - (1 - topo_hr / self._pres_div) ** self._pres_exp) + hi_res_pres -= const + + if self._fix_bias: + hi_res_pres = self.fix_downscaled_bias( + single_lr_pres, hi_res_pres, method=self._interp_method + ) + + if np.min(hi_res_pres) < 0.0: + msg = ( + 'Spatial interpolation of surface pressure ' + 'resulted in negative values. Incorrectly ' + 'scaled/unscaled values or incorrect units are ' + 'the most likely causes.' + ) + logger.error(msg) + raise ValueError(msg) + + return hi_res_pres
+ + + @property + def input_dims(self): + """Get dimension of model input. This is 4 for linear and surface + models (n_obs, spatial_1, spatial_2, temporal) + + Returns + ------- + int + """ + return 4 + + def _get_topo_from_exo(self, exogenous_data): + """Get lr_topo and hr_topo from exo_data dictionary. + + Parameters + ---------- + exogenous_data : dict + For the SurfaceSpatialMetModel, this must be a nested dictionary + with a main 'topography' key and two entries for + exogenous_data['topography']['steps']. The first entry includes a + 2D (lat, lon) or 4D (n_obs, lat, lon, temporal) array of + low-resolution surface elevation data in meters (lat, lon must + match spatial_1, spatial_2 from low_res), and the second entry + includes a 2D (lat, lon) or 4D (n_obs, lat, lon, temporal) array of + high-resolution surface elevation data in meters. e.g. + .. code-block:: JSON + {'topography': { + 'steps': [{'data': lr_topo}, {'data': hr_topo'}] + } + } + + Returns + ------- + lr_topo : ndarray + (lat, lon) + hr_topo : ndarray + (lat, lon) + """ + exo_data = [ + step['data'] for step in exogenous_data['topography']['steps'] + ] + msg = 'exogenous_data is of a bad type {}!'.format(type(exo_data)) + assert isinstance(exo_data, (list, tuple)), msg + msg = 'exogenous_data is of a bad length {}!'.format(len(exo_data)) + assert len(exo_data) == 2, msg + + lr_topo = exo_data[0] + hr_topo = exo_data[1] + + if len(lr_topo.shape) == 4: + lr_topo = lr_topo[0, :, :, 0] + if len(hr_topo.shape) == 4: + hr_topo = hr_topo[0, :, :, 0] + + return lr_topo, hr_topo + + # pylint: disable=unused-argument +
+[docs] + def generate( + self, low_res, norm_in=False, un_norm_out=False, exogenous_data=None + ): + """Use the generator model to generate high res data from low res + input. This is the public generate function. + + Parameters + ---------- + low_res : np.ndarray + Low-resolution spatial input data, a 4D array of shape: + (n_obs, spatial_1, spatial_2, n_features), Where the feature + channel can include temperature_*m, relativehumidity_*m, and/or + pressure_*m + norm_in : bool + This doesnt do anything for this SurfaceSpatialMetModel, but is + kept to keep the same interface as Sup3rGan + un_norm_out : bool + This doesnt do anything for this SurfaceSpatialMetModel, but is + kept to keep the same interface as Sup3rGan + exogenous_data : dict + For the SurfaceSpatialMetModel, this must be a nested dictionary + with a main 'topography' key and two entries for + exogenous_data['topography']['steps']. The first entry includes a + 2D (lat, lon) array of low-resolution surface elevation data in + meters (must match spatial_1, spatial_2 from low_res), and the + second entry includes a 2D (lat, lon) array of high-resolution + surface elevation data in meters. e.g. + .. code-block:: JSON + {'topography': { + 'steps': [{'data': lr_topo}, {'data': hr_topo'}] + } + } + + Returns + ------- + hi_res : ndarray + high-resolution spatial output data, a 4D array of shape: + (n_obs, spatial_1, spatial_2, n_features), Where the feature + channel can include temperature_*m, relativehumidity_*m, and/or + pressure_*m + """ + low_res = np.asarray(low_res) + lr_topo, hr_topo = self._get_topo_from_exo(exogenous_data) + lr_topo = np.asarray(lr_topo) + hr_topo = np.asarray(hr_topo) + logger.debug( + 'SurfaceSpatialMetModel received low/high res topo ' + 'shapes of {} and {}'.format(lr_topo.shape, hr_topo.shape) + ) + + msg = f'topo_lr needs to be 2d but has shape {lr_topo.shape}' + assert len(lr_topo.shape) == 2, msg + msg = f'topo_hr needs to be 2d but has shape {hr_topo.shape}' + assert len(hr_topo.shape) == 2, msg + msg = ( + 'lr_topo.shape needs to match lr_res.shape[:2] but received ' + f'{lr_topo.shape} and {low_res.shape}' + ) + assert lr_topo.shape[0] == low_res.shape[1], msg + assert lr_topo.shape[1] == low_res.shape[2], msg + s_enhance = self._get_s_enhance(lr_topo, hr_topo) + msg = ( + 'Topo shapes of {} and {} did not match desired spatial ' + 'enhancement of {}'.format( + lr_topo.shape, hr_topo.shape, self._s_enhance + ) + ) + assert self._s_enhance == s_enhance, msg + + hr_shape = ( + len(low_res), + int(low_res.shape[1] * self._s_enhance), + int(low_res.shape[2] * self._s_enhance), + len(self.hr_out_features), + ) + logger.debug( + 'SurfaceSpatialMetModel with s_enhance of {} ' + 'downscaling low-res shape {} to high-res shape {}'.format( + self._s_enhance, low_res.shape, hr_shape + ) + ) + + hi_res = np.zeros(hr_shape, dtype=np.float32) + for iobs in range(len(low_res)): + for idf_temp in self.feature_inds_temp: + _tmp = self.downscale_temp( + low_res[iobs, :, :, idf_temp], lr_topo, hr_topo + ) + hi_res[iobs, :, :, idf_temp] = _tmp + + for idf_pres in self.feature_inds_pres: + _tmp = self.downscale_pres( + low_res[iobs, :, :, idf_pres], lr_topo, hr_topo + ) + hi_res[iobs, :, :, idf_pres] = _tmp + + for idf_rh in self.feature_inds_rh: + idf_temp = self._get_temp_rh_ind(idf_rh) + _tmp = self.downscale_rh( + low_res[iobs, :, :, idf_rh], + low_res[iobs, :, :, idf_temp], + hi_res[iobs, :, :, idf_temp], + lr_topo, + hr_topo, + ) + hi_res[iobs, :, :, idf_rh] = _tmp + + for idf_rh in self.feature_inds_rh: + idf_temp = self._get_temp_rh_ind(idf_rh) + _tmp = self.downscale_rh( + low_res[iobs, :, :, idf_rh], + low_res[iobs, :, :, idf_temp], + hi_res[iobs, :, :, idf_temp], + lr_topo, + hr_topo, + ) + hi_res[iobs, :, :, idf_rh] = _tmp + + for idf_other in self.feature_inds_other: + _arr = self.downscale_arr( + low_res[iobs, :, :, idf_other], + self._s_enhance, + method=self._interp_method, + fix_bias=self._fix_bias, + ) + hi_res[iobs, :, :, idf_other] = _arr + + if self._noise_adders is not None: + for idf, stdev in enumerate(self._noise_adders): + if stdev is not None: + noise = RANDOM_GENERATOR.uniform( + 0, stdev, hi_res.shape[:-1] + ) + hi_res[..., idf] += noise + + return hi_res
+ + + @property + def meta(self): + """Get meta data dictionary that defines the model params""" + return { + 'temp_lapse_rate': self._temp_lapse, + 's_enhance': self._s_enhance, + 't_enhance': 1, + 'noise_adders': self._noise_adders, + 'input_resolution': self._input_resolution, + 'weight_for_delta_temp': self._w_delta_temp, + 'weight_for_delta_topo': self._w_delta_topo, + 'pressure_divisor': self._pres_div, + 'pressure_exponent': self._pres_exp, + 'lr_features': self.lr_features, + 'hr_out_features': self.hr_out_features, + 'interp_method': self._interp_name, + 'fix_bias': self._fix_bias, + 'class': self.__class__.__name__, + } + +
+[docs] + def train(self, true_hr_temp, true_hr_rh, true_hr_topo, input_resolution): + """Trains the relative humidity linear model. The temperature and + surface lapse rate models are parameterizations taken from the NSRDB + and are not trained. + + Parameters + ---------- + true_hr_temp : np.ndarray + True high-resolution daily average temperature data in a 3D array + of shape (lat, lon, n_days) + true_hr_rh : np.ndarray + True high-resolution daily average relative humidity data in a 3D + array of shape (lat, lon, n_days) + true_hr_topo : np.ndarray + High-resolution surface elevation data in meters with shape + (lat, lon) + input_resolution : dict + Dictionary of spatial and temporal input resolution. e.g. + {'spatial': '20km': 'temporal': '60min'} + + Returns + ------- + w_delta_temp : float + Weight for the delta-temperature feature for the relative humidity + linear regression model. + w_delta_topo : float + Weight for the delta-topography feature for the relative humidity + linear regression model. + """ + self._input_resolution = input_resolution + assert len(true_hr_temp.shape) == 3, 'Bad true_hr_temp shape' + assert len(true_hr_rh.shape) == 3, 'Bad true_hr_rh shape' + assert len(true_hr_topo.shape) == 2, 'Bad true_hr_topo shape' + + true_hr_topo = np.expand_dims(true_hr_topo, axis=-1) + true_hr_topo = np.repeat(true_hr_topo, true_hr_temp.shape[-1], axis=-1) + + true_lr_temp = spatial_coarsening( + true_hr_temp, s_enhance=self._s_enhance, obs_axis=False + ) + true_lr_rh = spatial_coarsening( + true_hr_rh, s_enhance=self._s_enhance, obs_axis=False + ) + true_lr_topo = spatial_coarsening( + true_hr_topo, s_enhance=self._s_enhance, obs_axis=False + ) + + interp_hr_temp = np.full(true_hr_temp.shape, np.nan, dtype=np.float32) + interp_hr_rh = np.full(true_hr_rh.shape, np.nan, dtype=np.float32) + interp_hr_topo = np.full(true_hr_topo.shape, np.nan, dtype=np.float32) + + for i in range(interp_hr_temp.shape[-1]): + interp_hr_temp[..., i] = self.downscale_arr( + true_lr_temp[..., i], self._s_enhance + ) + interp_hr_rh[..., i] = self.downscale_arr( + true_lr_rh[..., i], self._s_enhance + ) + interp_hr_topo[..., i] = self.downscale_arr( + true_lr_topo[..., i], self._s_enhance + ) + + x1 = true_hr_temp - interp_hr_temp + x2 = true_hr_topo - interp_hr_topo + x = np.vstack((x1.flatten(), x2.flatten())).T + y = (true_hr_rh - interp_hr_rh).flatten() + + regr = linear_model.LinearRegression() + regr.fit(x, y) + if np.abs(regr.intercept_) > 1e-6: + msg = ( + 'Relative humidity linear model should have an intercept ' + 'of zero but the model fit an intercept of {}'.format( + regr.intercept_ + ) + ) + logger.warning(msg) + warn(msg) + + w_delta_temp, w_delta_topo = regr.coef_[0], regr.coef_[1] + + return w_delta_temp, w_delta_topo
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/models/utilities.html b/_modules/sup3r/models/utilities.html new file mode 100644 index 000000000..27e1a8a58 --- /dev/null +++ b/_modules/sup3r/models/utilities.html @@ -0,0 +1,794 @@ + + + + + + + + + + sup3r.models.utilities — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.models.utilities

+"""Utilities shared across the `sup3r.models` module"""
+
+import logging
+import sys
+import threading
+
+import numpy as np
+from scipy.interpolate import RegularGridInterpolator
+from tensorflow.keras import optimizers
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class TrainingSession: + """Wrapper to gracefully exit batch handler thread during training, upon a + keyboard interruption.""" + + def __init__(self, batch_handler, model, **kwargs): + """ + Parameters + ---------- + batch_handler: BatchHandler + Batch iterator + model: Sup3rGan + Gan model to run in new thread + **kwargs : dict + Model keyword args + """ + self.batch_handler = batch_handler + self.model = model + self.kwargs = kwargs + +
+[docs] + def run(self): + """Wrap model.train().""" + model_thread = threading.Thread( + target=self.model.train, + args=(self.batch_handler,), + kwargs=self.kwargs, + ) + try: + logger.info('Starting training session.') + self.batch_handler.start() + model_thread.start() + except KeyboardInterrupt: + logger.info('Ending training session.') + self.batch_handler.stop() + model_thread.join() + sys.exit() + except Exception as e: + logger.info('Ending training session. %s', e) + self.batch_handler.stop() + model_thread.join() + sys.exit() + + logger.info('Finished training') + self.batch_handler.stop() + model_thread.join()
+
+ + + +
+[docs] +def get_optimizer_class(conf): + """Get optimizer class from keras""" + if hasattr(optimizers, conf['name']): + optimizer_class = getattr(optimizers, conf['name']) + else: + msg = '%s not found in keras optimizers.' + logger.error(msg, conf['name']) + raise ValueError(msg) + return optimizer_class
+ + + +
+[docs] +def st_interp(low, s_enhance, t_enhance, t_centered=False): + """Spatiotemporal bilinear interpolation for low resolution field on a + regular grid. Used to provide baseline for comparison with gan output + + Parameters + ---------- + low : ndarray + Low resolution field to interpolate. + (spatial_1, spatial_2, temporal) + s_enhance : int + Factor by which to enhance the spatial domain + t_enhance : int + Factor by which to enhance the temporal domain + t_centered : bool + Flag to switch time axis from time-beginning (Default, e.g. + interpolate 00:00 01:00 to 00:00 00:30 01:00 01:30) to + time-centered (e.g. interp 01:00 02:00 to 00:45 01:15 01:45 02:15) + + Returns + ------- + ndarray + Spatiotemporally interpolated low resolution output + """ + assert len(low.shape) == 3, 'Input to st_interp must be 3D array' + msg = 'Input to st_interp cannot include axes with length 1' + assert not any(s <= 1 for s in low.shape), msg + + lr_y, lr_x, lr_t = low.shape + hr_y, hr_x, hr_t = lr_y * s_enhance, lr_x * s_enhance, lr_t * t_enhance + + # assume outer bounds of mesh (0, 10) w/ points on inside of that range + y = np.arange(0, 10, 10 / lr_y) + 5 / lr_y + x = np.arange(0, 10, 10 / lr_x) + 5 / lr_x + + # remesh (0, 10) with high res spacing + new_y = np.arange(0, 10, 10 / hr_y) + 5 / hr_y + new_x = np.arange(0, 10, 10 / hr_x) + 5 / hr_x + + t = np.arange(0, 10, 10 / lr_t) + new_t = np.arange(0, 10, 10 / hr_t) + if t_centered: + t += 5 / lr_t + new_t += 5 / hr_t + + # set RegularGridInterpolator to do extrapolation + interp = RegularGridInterpolator( + (y, x, t), low, bounds_error=False, fill_value=None + ) + + # perform interp + X, Y, T = np.meshgrid(new_x, new_y, new_t) + return interp((Y, X, T))
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/pipeline/forward_pass.html b/_modules/sup3r/pipeline/forward_pass.html new file mode 100644 index 000000000..cbd7c5814 --- /dev/null +++ b/_modules/sup3r/pipeline/forward_pass.html @@ -0,0 +1,1334 @@ + + + + + + + + + + sup3r.pipeline.forward_pass — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.pipeline.forward_pass

+"""Sup3r forward pass handling module."""
+
+import logging
+import pprint
+from concurrent.futures import as_completed
+from datetime import datetime as dt
+from typing import ClassVar
+
+import numpy as np
+from rex.utilities.execution import SpawnProcessPool
+from rex.utilities.fun_utils import get_fun_call_str
+
+from sup3r.pipeline.strategy import ForwardPassChunk, ForwardPassStrategy
+from sup3r.pipeline.utilities import get_model
+from sup3r.postprocessing import (
+    OutputHandlerH5,
+    OutputHandlerNC,
+)
+from sup3r.preprocessing.utilities import (
+    _mem_check,
+    get_source_type,
+    log_args,
+    lowered,
+)
+from sup3r.utilities import ModuleName
+from sup3r.utilities.cli import BaseCLI
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class ForwardPass: + """Class to run forward passes on all chunks provided by the given + ForwardPassStrategy. The chunks provided by the strategy are all passed + through the GAN generator to produce high resolution output. + """ + + OUTPUT_HANDLER_CLASS: ClassVar = { + 'nc': OutputHandlerNC, + 'h5': OutputHandlerH5, + } + + @log_args + def __init__(self, strategy, node_index=0): + """Initialize ForwardPass with ForwardPassStrategy. The strategy + provides the data chunks to run forward passes on + + Parameters + ---------- + strategy : ForwardPassStrategy + ForwardPassStrategy instance with information on data chunks to run + forward passes on. + node_index : int + Index of node used to run forward pass + """ + self.strategy = strategy + self.model = get_model(strategy.model_class, strategy.model_kwargs) + self.node_index = node_index + + output_type = get_source_type(strategy.out_pattern) + msg = f'Received bad output type {output_type}' + assert output_type is None or output_type in list( + self.OUTPUT_HANDLER_CLASS + ), msg + +
+[docs] + def get_input_chunk(self, chunk_index=0, mode='reflect'): + """Get :class:`FowardPassChunk` instance for the given chunk index.""" + + chunk = self.strategy.init_chunk(chunk_index) + chunk.input_data, chunk.exo_data = self.pad_source_data( + chunk.input_data, chunk.pad_width, chunk.exo_data, mode=mode + ) + return chunk
+ + + @property + def meta(self): + """Meta data dictionary for the forward pass run (to write to output + files).""" + meta_data = { + 'node_index': self.node_index, + 'creation_date': dt.now().strftime('%d/%m/%Y %H:%M:%S'), + 'model_meta': self.model.meta, + 'gan_params': self.model.model_params, + 'strategy_meta': self.strategy.meta, + } + return meta_data + + def _get_step_enhance(self, step): + """Get enhancement factors for a given step and combine type. + + Parameters + ---------- + step : dict + Model step dictionary. e.g. + ``{'model': 0, 'combine_type': 'input'}`` + + Returns + ------- + s_enhance : int + Spatial enhancement factor for given step and combine type + t_enhance : int + Temporal enhancement factor for given step and combine type + """ + combine_type = step['combine_type'] + model_step = step['model'] + msg = f'Received weird combine_type {combine_type} for step: {step}' + assert combine_type in ('input', 'output', 'layer'), msg + if combine_type.lower() == 'input': + if model_step == 0: + s_enhance = 1 + t_enhance = 1 + else: + s_enhance = np.prod(self.model.s_enhancements[:model_step]) + t_enhance = np.prod(self.model.t_enhancements[:model_step]) + + else: + s_enhance = np.prod(self.model.s_enhancements[: model_step + 1]) + t_enhance = np.prod(self.model.t_enhancements[: model_step + 1]) + return s_enhance, t_enhance + +
+[docs] + def pad_source_data(self, input_data, pad_width, exo_data, mode='reflect'): + """Pad the edges of the source data from the data handler. + + Parameters + ---------- + input_data : Union[np.ndarray, da.core.Array] + Source input data from data handler class, shape is: + (spatial_1, spatial_2, temporal, features) + pad_width : tuple + Tuple of tuples with padding width for spatial and temporal + dimensions. Each tuple includes the start and end of padding for + that dimension. Ordering is spatial_1, spatial_2, temporal. + exo_data: dict + Full exo_handler_kwargs dictionary with all feature entries. See + :meth:`run_generator` for more information. + mode : str + Mode to use for padding. e.g. 'reflect'. + + Returns + ------- + out : Union[np.ndarray, da.core.Array] + Padded copy of source input data from data handler class, shape is: + (spatial_1, spatial_2, temporal, features) + exo_data : dict + Same as input dictionary with s_enhance, t_enhance added to each + step entry for all features + + """ + out = np.pad(input_data, (*pad_width, (0, 0)), mode=mode) + logger.info( + 'Padded input data shape from %s to %s using mode "%s" ' + 'with padding argument: %s', + input_data.shape, + out.shape, + mode, + pad_width, + ) + + if exo_data is not None: + for feature in exo_data: + for i, step in enumerate(exo_data[feature]['steps']): + s_enhance, t_enhance = self._get_step_enhance(step) + exo_pad_width = ( + *( + (en * pw[0], en * pw[1]) + for en, pw in zip( + [s_enhance, s_enhance, t_enhance], pad_width + ) + ), + (0, 0), + ) + new_exo = step['data'] + if len(new_exo.shape) == 3: + new_exo = np.expand_dims(new_exo, axis=2) + new_exo = np.repeat( + new_exo, + step['t_enhance'] * input_data.shape[2], + axis=2, + ) + new_exo = np.pad(new_exo, exo_pad_width, mode=mode) + exo_data[feature]['steps'][i]['data'] = new_exo + logger.info( + f'Got exo data for feature: {feature}, model step: {i}' + ) + return out, exo_data
+ + +
+[docs] + @classmethod + def run_generator( + cls, + data_chunk, + hr_crop_slices, + model, + s_enhance=None, + t_enhance=None, + exo_data=None, + ): + """Run forward pass of the generator on smallest data chunk. Each chunk + has a maximum shape given by self.strategy.fwp_chunk_shape. + + Parameters + ---------- + data_chunk : ndarray + Low res data chunk to go through generator + hr_crop_slices : list + List of slices for extracting cropped region of interest from + output. Output can include an extra overlapping boundary to + reduce chunking error. The cropping cuts off this padded region + before stitching chunks. + model : Sup3rGan + A loaded Sup3rGan model (any model imported from sup3r.models). + t_enhance : int + Factor by which to enhance temporal resolution + s_enhance : int + Factor by which to enhance spatial resolution + exo_data : dict | None + Dictionary of exogenous feature data with entries describing + whether features should be combined at input, a mid network layer, + or with output. e.g. + .. code-block:: JSON + { + 'topography': {'steps': [ + {'combine_type': 'input', 'model': 0, 'data': ...}, + {'combine_type': 'layer', 'model': 0, 'data': ...}]} + } + + Returns + ------- + ndarray + High resolution data generated by GAN + """ + temp = cls._reshape_data_chunk(model, data_chunk, exo_data) + data_chunk, exo_data, i_lr_t, i_lr_s = temp + try: + hi_res = model.generate(data_chunk, exogenous_data=exo_data) + except Exception as e: + msg = 'Forward pass failed on chunk with shape {}.'.format( + data_chunk.shape + ) + logger.exception(msg) + raise RuntimeError(msg) from e + + if len(hi_res.shape) == 4: + hi_res = np.expand_dims(np.transpose(hi_res, (1, 2, 0, 3)), axis=0) + + if ( + s_enhance is not None + and hi_res.shape[1] != s_enhance * data_chunk.shape[i_lr_s] + ): + msg = ( + 'The stated spatial enhancement of {}x did not match ' + 'the low res / high res shapes of {} -> {}'.format( + s_enhance, data_chunk.shape, hi_res.shape + ) + ) + logger.error(msg) + raise RuntimeError(msg) + + if ( + t_enhance is not None + and hi_res.shape[3] != t_enhance * data_chunk.shape[i_lr_t] + ): + msg = ( + 'The stated temporal enhancement of {}x did not match ' + 'the low res / high res shapes of {} -> {}'.format( + t_enhance, data_chunk.shape, hi_res.shape + ) + ) + logger.error(msg) + raise RuntimeError(msg) + + return hi_res[0][hr_crop_slices]
+ + + @staticmethod + def _reshape_data_chunk(model, data_chunk, exo_data): + """Reshape and transpose data chunk and exogenous data before being + passed to the sup3r model. + + Note + ---- + Exo data needs to be different shapes for 5D (Spatiotemporal) / + 4D (Spatial / Surface) models, and different models use different + indices for spatial and temporal dimensions. These differences are + handled here. + + Parameters + ---------- + model : Sup3rGan + Sup3rGan or similar sup3r model + data_chunk : Union[np.ndarray, da.core.Array] + Low resolution data for a single spatiotemporal chunk that is going + to be passed to the model generate function. + exo_data : dict | None + Full exo_handler_kwargs dictionary with all feature entries. See + :meth:`ForwardPass.run_generator` for more information. + + Returns + ------- + data_chunk : Union[np.ndarray, da.core.Array] + Same as input but reshaped to (temporal, spatial_1, spatial_2, + features) if the model is a spatial-first model or + (n_obs, spatial_1, spatial_2, temporal, features) if the + model is spatiotemporal + exo_data : dict | None + Same reshaping procedure as for data_chunk applied to + exo_data[feature]['steps'][...]['data'] + i_lr_t : int + Axis index for the low-resolution temporal dimension + i_lr_s : int + Axis index for the low-resolution spatial_1 dimension + """ + if exo_data is not None: + for feature in exo_data: + for i, entry in enumerate(exo_data[feature]['steps']): + models = getattr(model, 'models', [model]) + msg = ( + f'model index ({entry["model"]}) for exo step {i} ' + 'exceeds the number of model steps' + ) + assert entry['model'] < len(models), msg + current_model = models[entry['model']] + if current_model.is_4d: + out = np.transpose(entry['data'], axes=(2, 0, 1, 3)) + else: + out = np.expand_dims(entry['data'], axis=0) + exo_data[feature]['steps'][i]['data'] = np.asarray(out) + + if model.is_4d: + i_lr_t = 0 + i_lr_s = 1 + data_chunk = np.transpose(data_chunk, axes=(2, 0, 1, 3)) + else: + i_lr_t = 3 + i_lr_s = 1 + data_chunk = np.expand_dims(data_chunk, axis=0) + + return np.asarray(data_chunk), exo_data, i_lr_t, i_lr_s + +
+[docs] + @classmethod + def get_node_cmd(cls, config): + """Get a CLI call to initialize ForwardPassStrategy and run ForwardPass + on a single node based on an input config. + + Parameters + ---------- + config : dict + sup3r forward pass config with all necessary args and kwargs to + initialize ForwardPassStrategy and run ForwardPass on a single + node. + """ + use_cpu = config.get('use_cpu', True) + import_str = '' + if use_cpu: + import_str += 'import os;\n' + import_str += 'os.environ["CUDA_VISIBLE_DEVICES"] = "-1";\n' + import_str += 'import time;\n' + import_str += 'from gaps import Status;\n' + import_str += 'from rex import init_logger;\n' + import_str += ( + 'from sup3r.pipeline.forward_pass ' + f'import ForwardPassStrategy, {cls.__name__}' + ) + + fwps_init_str = get_fun_call_str(ForwardPassStrategy, config) + + node_index = config['node_index'] + log_file = config.get('log_file', None) + log_level = config.get('log_level', 'INFO') + log_arg_str = f'"sup3r", log_level="{log_level}"' + if log_file is not None: + log_arg_str += f', log_file="{log_file}"' + + cmd = ( + f"python -c '{import_str};\n" + 't0 = time.time();\n' + f'logger = init_logger({log_arg_str});\n' + f'strategy = {fwps_init_str};\n' + f'{cls.__name__}.run(strategy, {node_index});\n' + 't_elap = time.time() - t0;\n' + ) + + pipeline_step = config.get('pipeline_step') or ModuleName.FORWARD_PASS + cmd = BaseCLI.add_status_cmd(config, pipeline_step, cmd) + cmd += ";'\n" + + return cmd.replace('\\', '/')
+ + + @classmethod + def _constant_output_check(cls, out_data, allowed_const): + """Check if forward pass output is constant. This can happen when the + chunk going through the forward pass is too big. + + Parameters + ---------- + out_data : ndarray + Forward pass output corresponding to the given chunk index + allowed_const : list | bool + If your model is allowed to output a constant output, set this to + True to allow any constant output or a list of allowed possible + constant outputs. See :class:`ForwardPassStrategy` for more + information on this argument. + """ + failed = False + if allowed_const is True: + return failed + if allowed_const is False: + allowed_const = [] + elif not isinstance(allowed_const, (list, tuple)): + allowed_const = [allowed_const] + + for i in range(out_data.shape[-1]): + msg = f'All values are the same for feature channel {i}!' + value0 = out_data[0, 0, 0, i] + all_same = (value0 == out_data[..., i]).all() + if all_same and value0 not in allowed_const: + failed = True + logger.error(msg) + break + return failed + +
+[docs] + @classmethod + def run(cls, strategy, node_index): + """Runs forward passes on all spatiotemporal chunks for the given node + index. + + Parameters + ---------- + strategy : ForwardPassStrategy + ForwardPassStrategy instance with information on data chunks to run + forward passes on. + node_index : int + Index of node on which the forward passes for spatiotemporal chunks + will be run. + """ + if not strategy.node_finished(node_index): + if strategy.pass_workers == 1: + cls._run_serial(strategy, node_index) + else: + cls._run_parallel(strategy, node_index) + logger.debug( + 'Timing report:\n%s', + pprint.pformat(strategy.timer.log, indent=2), + )
+ + + @classmethod + def _run_serial(cls, strategy, node_index): + """Runs forward passes, on all spatiotemporal chunks for the given node + index, in serial. + + Parameters + ---------- + strategy : ForwardPassStrategy + ForwardPassStrategy instance with information on data chunks to run + forward passes on. + node_index : int + Index of node on which the forward passes for spatiotemporal chunks + will be run. + """ + + start = dt.now() + logger.debug(f'Running forward passes on node {node_index} in serial.') + fwp = cls(strategy, node_index=node_index) + for i, chunk_index in enumerate(strategy.node_chunks[node_index]): + now = dt.now() + if not strategy.chunk_finished(chunk_index): + chunk = fwp.get_input_chunk(chunk_index=chunk_index) + failed, _ = cls.run_chunk( + chunk=chunk, + model_kwargs=strategy.model_kwargs, + model_class=strategy.model_class, + allowed_const=strategy.allowed_const, + output_workers=strategy.output_workers, + invert_uv=strategy.invert_uv, + meta=fwp.meta, + ) + logger.info( + 'Finished forward pass on chunk_index=' + f'{chunk_index} in {dt.now() - now}. {i + 1} of ' + f'{len(strategy.node_chunks[node_index])} ' + f'complete. {_mem_check()}.' + ) + if failed: + msg = ( + f'Forward pass for chunk_index {chunk_index} failed ' + 'with constant output.' + ) + raise MemoryError(msg) + + logger.info( + 'Finished forward passes on ' + f'{len(strategy.node_chunks[node_index])} chunks in ' + f'{dt.now() - start}' + ) + + @classmethod + def _run_parallel(cls, strategy, node_index): + """Runs forward passes, on all spatiotemporal chunks for the given node + index, with data extraction and forward pass routines in parallel. + + Parameters + ---------- + strategy : ForwardPassStrategy + ForwardPassStrategy instance with information on data chunks to run + forward passes on. + node_index : int + Index of node on which the forward passes for spatiotemporal chunks + will be run. + """ + + logger.info( + f'Running parallel forward passes on node {node_index}' + f' with pass_workers={strategy.pass_workers}.' + ) + + futures = {} + start = dt.now() + pool_kws = {'max_workers': strategy.pass_workers, 'loggers': ['sup3r']} + fwp = cls(strategy, node_index=node_index) + with SpawnProcessPool(**pool_kws) as exe: + now = dt.now() + for _, chunk_index in enumerate(strategy.node_chunks[node_index]): + if not strategy.chunk_finished(chunk_index): + chunk = fwp.get_input_chunk(chunk_index=chunk_index) + fut = exe.submit( + fwp.run_chunk, + chunk=chunk, + model_kwargs=strategy.model_kwargs, + model_class=strategy.model_class, + allowed_const=strategy.allowed_const, + output_workers=strategy.output_workers, + meta=fwp.meta, + ) + futures[fut] = { + 'chunk_index': chunk_index, + 'start_time': dt.now(), + } + + logger.info( + f'Started {len(futures)} forward pass runs in ' + f'{dt.now() - now}.' + ) + + try: + for i, future in enumerate(as_completed(futures)): + failed, _ = future.result() + chunk_idx = futures[future]['chunk_index'] + start_time = futures[future]['start_time'] + if failed: + msg = ( + f'Forward pass for chunk_index {chunk_idx} failed ' + 'with constant output.' + ) + raise MemoryError(msg) + msg = ( + 'Finished forward pass on chunk_index=' + f'{chunk_idx} in {dt.now() - start_time}. ' + f'{i + 1} of {len(futures)} complete. {_mem_check()}' + ) + logger.info(msg) + except Exception as e: + msg = ( + 'Error running forward pass on chunk_index=' + f'{futures[future]["chunk_index"]}.' + ) + logger.exception(msg) + raise RuntimeError(msg) from e + + logger.info( + 'Finished asynchronous forward passes on ' + f'{len(strategy.node_chunks[node_index])} chunks in ' + f'{dt.now() - start}' + ) + +
+[docs] + @classmethod + def run_chunk( + cls, + chunk: ForwardPassChunk, + model_kwargs, + model_class, + allowed_const, + invert_uv=None, + meta=None, + output_workers=None, + ): + """Run a forward pass on single spatiotemporal chunk. + + Parameters + ---------- + chunk : :class:`FowardPassChunk` + Struct with chunk data (including exo data if applicable) and + chunk attributes (e.g. chunk specific slices, times, lat/lon, etc) + model_kwargs : str | list + Keyword arguments to send to `model_class.load(**model_kwargs)` to + initialize the GAN. Typically this is just the string path to the + model directory, but can be multiple models or arguments for more + complex models. + model_class : str + Name of the sup3r model class for the GAN model to load. The + default is the basic spatial / spatiotemporal Sup3rGan model. This + will be loaded from sup3r.models + allowed_const : list | bool + If your model is allowed to output a constant output, set this to + True to allow any constant output or a list of allowed possible + constant outputs. See :class:`ForwardPassStrategy` for more + information on this argument. + invert_uv : bool + Whether to convert uv to windspeed and winddirection for writing + output. This defaults to True for H5 output and False for NETCDF + output. + meta : dict | None + Meta data to write to forward pass output file. + output_workers : int | None + Max number of workers to use for writing forward pass output. + + Returns + ------- + failed : bool + Whether the forward pass failed due to constant output. + output_data : ndarray + Array of high-resolution output from generator + """ + + msg = f'Running forward pass for chunk_index={chunk.index}.' + logger.info(msg) + + model = get_model(model_class, model_kwargs) + + output_data = cls.run_generator( + data_chunk=chunk.input_data, + hr_crop_slices=chunk.hr_crop_slice, + s_enhance=model.s_enhance, + t_enhance=model.t_enhance, + exo_data=chunk.exo_data, + model=model, + ) + + failed = cls._constant_output_check( + output_data, allowed_const=allowed_const + ) + + if chunk.out_file is not None and not failed: + logger.info(f'Saving forward pass output to {chunk.out_file}.') + output_type = get_source_type(chunk.out_file) + cls.OUTPUT_HANDLER_CLASS[output_type]._write_output( + data=output_data, + features=lowered(model.hr_out_features), + lat_lon=chunk.hr_lat_lon, + times=chunk.hr_times, + out_file=chunk.out_file, + meta_data=meta, + invert_uv=invert_uv, + max_workers=output_workers, + gids=chunk.gids, + ) + return failed, output_data
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/pipeline/forward_pass_cli.html b/_modules/sup3r/pipeline/forward_pass_cli.html new file mode 100644 index 000000000..4a83133fa --- /dev/null +++ b/_modules/sup3r/pipeline/forward_pass_cli.html @@ -0,0 +1,828 @@ + + + + + + + + + + sup3r.pipeline.forward_pass_cli — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.pipeline.forward_pass_cli

+"""sup3r forward pass CLI entry points."""
+
+import copy
+import logging
+import os
+from inspect import signature
+
+import click
+
+from sup3r import __version__
+from sup3r.pipeline.forward_pass import ForwardPass, ForwardPassStrategy
+from sup3r.utilities import ModuleName
+from sup3r.utilities.cli import AVAILABLE_HARDWARE_OPTIONS, BaseCLI
+
+logger = logging.getLogger(__name__)
+
+
+@click.group()
+@click.version_option(version=__version__)
+@click.option(
+    '-v',
+    '--verbose',
+    is_flag=True,
+    help='Flag to turn on debug logging. Default is not verbose.',
+)
+@click.pass_context
+def main(ctx, verbose):
+    """Sup3r Forward Pass Command Line Interface"""
+    ctx.ensure_object(dict)
+    ctx.obj['VERBOSE'] = verbose
+
+
+@main.command()
+@click.option(
+    '--config_file',
+    '-c',
+    required=True,
+    type=click.Path(exists=True),
+    help='sup3r forward pass configuration .json file.',
+)
+@click.option(
+    '-v',
+    '--verbose',
+    is_flag=True,
+    help='Flag to turn on debug logging. Default is not verbose.',
+)
+@click.pass_context
+def from_config(ctx, config_file, verbose=False, pipeline_step=None):
+    """Run sup3r forward pass from a config file."""
+
+    config = BaseCLI.from_config_preflight(
+        ModuleName.FORWARD_PASS, ctx, config_file, verbose
+    )
+
+    exec_kwargs = config.get('execution_control', {})
+    hardware_option = exec_kwargs.pop('option', 'local')
+    node_index = config.get('node_index', None)
+    basename = config.get('job_name')
+    log_pattern = config.get('log_pattern', None)
+
+    sig = signature(ForwardPassStrategy)
+    strategy_kwargs = {k: v for k, v in config.items() if k in sig.parameters}
+    strategy = ForwardPassStrategy(**strategy_kwargs, head_node=True)
+
+    if node_index is not None:
+        nodes = (
+            [node_index] if not isinstance(node_index, list) else node_index
+        )
+    else:
+        nodes = range(len(strategy.node_chunks))
+    for i_node in nodes:
+        node_config = copy.deepcopy(config)
+        node_config['node_index'] = i_node
+        node_config['log_file'] = (
+            log_pattern
+            if log_pattern is None
+            else os.path.normpath(log_pattern.format(node_index=i_node))
+        )
+        name = '{}_{}'.format(basename, str(i_node).zfill(6))
+        ctx.obj['NAME'] = name
+        node_config['job_name'] = name
+        node_config['pipeline_step'] = pipeline_step
+        cmd = ForwardPass.get_node_cmd(node_config)
+
+        if hardware_option.lower() in AVAILABLE_HARDWARE_OPTIONS:
+            kickoff_slurm_job(ctx, cmd, pipeline_step, **exec_kwargs)
+        else:
+            kickoff_local_job(ctx, cmd, pipeline_step)
+
+
+
+[docs] +def kickoff_slurm_job( + ctx, + cmd, + pipeline_step=None, + alloc='sup3r', + memory=None, + walltime=4, + feature=None, + stdout_path='./stdout/', +): + """Run sup3r on HPC via SLURM job submission. + + Parameters + ---------- + ctx : click.pass_context + Click context object where ctx.obj is a dictionary + cmd : str + Command to be submitted in SLURM shell script. Example: + 'python -m sup3r.cli forward_pass -c <config_file>' + pipeline_step : str, optional + Name of the pipeline step being run. If ``None``, the + ``pipeline_step`` will be set to the ``module_name``, + mimicking old reV behavior. By default, ``None``. + alloc : str + HPC project (allocation) handle. Example: 'sup3r'. + memory : int + Node memory request in GB. + walltime : float + Node walltime request in hours. + feature : str + Additional flags for SLURM job. Format is "--qos=high" + or "--depend=[state:job_id]". Default is None. + stdout_path : str + Path to print .stdout and .stderr files. + """ + BaseCLI.kickoff_slurm_job( + ModuleName.FORWARD_PASS, + ctx, + cmd, + alloc, + memory, + walltime, + feature, + stdout_path, + pipeline_step, + )
+ + + +
+[docs] +def kickoff_local_job(ctx, cmd, pipeline_step=None): + """Run sup3r forward pass locally. + + Parameters + ---------- + ctx : click.pass_context + Click context object where ctx.obj is a dictionary + cmd : str + Command to be submitted in shell script. Example: + 'python -m sup3r.cli forward_pass -c <config_file>' + pipeline_step : str, optional + Name of the pipeline step being run. If ``None``, the + ``pipeline_step`` will be set to the ``module_name``, + mimicking old reV behavior. By default, ``None``. + """ + BaseCLI.kickoff_local_job(ModuleName.FORWARD_PASS, ctx, cmd, pipeline_step)
+ + + +if __name__ == '__main__': + try: + main(obj={}) + except Exception: + logger.exception('Error running sup3r forward pass CLI') + raise +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/pipeline/slicer.html b/_modules/sup3r/pipeline/slicer.html new file mode 100644 index 000000000..ab146ca35 --- /dev/null +++ b/_modules/sup3r/pipeline/slicer.html @@ -0,0 +1,1212 @@ + + + + + + + + + + sup3r.pipeline.slicer — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.pipeline.slicer

+"""Slicer class for chunking forward pass input"""
+
+import logging
+from dataclasses import dataclass
+from typing import Union
+
+import numpy as np
+
+from sup3r.pipeline.utilities import (
+    get_chunk_slices,
+)
+from sup3r.preprocessing.utilities import _parse_time_slice, log_args
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +@dataclass +class ForwardPassSlicer: + """Get slices for sending data chunks through generator. + + Parameters + ---------- + coarse_shape : tuple + Shape of full domain for low res data + time_steps : int + Number of time steps for full temporal domain of low res data. This + is used to construct a dummy_time_index from np.arange(time_steps) + time_slice : slice | list + Slice to use to extract range from time_index. Can be a ``slice(start, + stop, step)`` or list ``[start, stop, step]`` + chunk_shape : tuple + Max shape (spatial_1, spatial_2, temporal) of an unpadded coarse + chunk to use for a forward pass. The number of nodes that the + ForwardPassStrategy is set to distribute to is calculated by + dividing up the total time index from all file_paths by the + temporal part of this chunk shape. Each node will then be + parallelized accross parallel processes by the spatial chunk shape. + If temporal_pad / spatial_pad are non zero the chunk sent + to the generator can be bigger than this shape. If running in + serial set this equal to the shape of the full spatiotemporal data + volume for best performance. + s_enhance : int + Spatial enhancement factor + t_enhance : int + Temporal enhancement factor + spatial_pad : int + Size of spatial overlap between coarse chunks passed to forward + passes for subsequent spatial stitching. This overlap will pad both + sides of the fwp_chunk_shape. Note that the first and last chunks + in any of the spatial dimension will not be padded. + temporal_pad : int + Size of temporal overlap between coarse chunks passed to forward + passes for subsequent temporal stitching. This overlap will pad + both sides of the fwp_chunk_shape. Note that the first and last + chunks in the temporal dimension will not be padded. + """ + + coarse_shape: Union[tuple, list] + time_steps: int + s_enhance: int + t_enhance: int + time_slice: slice + temporal_pad: int + spatial_pad: int + chunk_shape: Union[tuple, list] + + @log_args + def __post_init__(self): + self.dummy_time_index = np.arange(self.time_steps) + self.time_slice = _parse_time_slice(self.time_slice) + + self._chunk_lookup = None + self._s1_lr_slices = None + self._s2_lr_slices = None + self._s1_lr_pad_slices = None + self._s2_lr_pad_slices = None + self._s_lr_slices = None + self._s_lr_pad_slices = None + self._s_lr_crop_slices = None + self._t_lr_pad_slices = None + self._t_lr_crop_slices = None + self._s_hr_slices = None + self._s_hr_crop_slices = None + self._t_hr_crop_slices = None + self._hr_crop_slices = None + +
+[docs] + def get_spatial_slices(self): + """Get spatial slices for small data chunks that are passed through + generator + + Returns + ------- + s_lr_slices: list + List of slices for low res data chunks which have not been padded. + data_handler.data[s_lr_slice] corresponds to an unpadded low res + input to the model. + s_lr_pad_slices : list + List of slices which have been padded so that high res output + can be stitched together. data_handler.data[s_lr_pad_slice] + corresponds to a padded low res input to the model. + s_hr_slices : list + List of slices for high res data corresponding to the + lr_slices regions. output_array[s_hr_slice] corresponds to the + cropped generator output. + """ + return (self.s_lr_slices, self.s_lr_pad_slices, self.s_hr_slices)
+ + +
+[docs] + def get_time_slices(self): + """Calculate the number of time chunks across the full time index + + Returns + ------- + t_lr_slices : list + List of low-res non-padded time index slices. e.g. If + fwp_chunk_size[2] is 5 then the size of these slices will always + be 5. + t_lr_pad_slices : list + List of low-res padded time index slices. e.g. If fwp_chunk_size[2] + is 5 the size of these slices will be 15, with exceptions at the + start and end of the full time index. + """ + return self.t_lr_slices, self.t_lr_pad_slices
+ + + @property + def s_lr_slices(self): + """Get low res spatial slices for small data chunks that are passed + through generator + + Returns + ------- + _s_lr_slices : list + List of spatial slices corresponding to the unpadded spatial region + going through the generator + """ + if self._s_lr_slices is None: + self._s_lr_slices = [ + (s1, s2) + for s1 in self.s1_lr_slices + for s2 in self.s2_lr_slices + ] + return self._s_lr_slices + + @property + def s_lr_pad_slices(self): + """Get low res padded slices for small data chunks that are passed + through generator + + Returns + ------- + _s_lr_pad_slices : list + List of slices which have been padded so that high res output + can be stitched together. Each entry in this list has a slice for + each spatial dimension. data_handler.data[s_lr_pad_slice] gives the + padded data volume passed through the generator + """ + if self._s_lr_pad_slices is None: + self._s_lr_pad_slices = [ + (s1, s2) + for s1 in self.s1_lr_pad_slices + for s2 in self.s2_lr_pad_slices + ] + return self._s_lr_pad_slices + + @property + def t_lr_pad_slices(self): + """Get low res temporal padded slices for distributing time chunks + across nodes. These slices correspond to the time chunks sent to each + node and are padded according to temporal_pad. + + Returns + ------- + _t_lr_pad_slices : list + List of low res temporal slices which have been padded so that high + res output can be stitched together + """ + if self._t_lr_pad_slices is None: + self._t_lr_pad_slices = self.get_padded_slices( + self.t_lr_slices, + self.time_steps, + 1, + self.temporal_pad, + self.time_slice.step, + ) + return self._t_lr_pad_slices + + @property + def t_lr_crop_slices(self): + """Get low res temporal cropped slices for cropping time index of + padded input data. + + Returns + ------- + _t_lr_crop_slices : list + List of low res temporal slices for cropping padded input data + """ + if self._t_lr_crop_slices is None: + self._t_lr_crop_slices = self.get_cropped_slices( + self.t_lr_slices, self.t_lr_pad_slices, 1 + ) + + return self._t_lr_crop_slices + + @property + def t_hr_crop_slices(self): + """Get high res temporal cropped slices for cropping forward pass + output before stitching together + + Returns + ------- + _t_hr_crop_slices : list + List of high res temporal slices for cropping padded generator + output + """ + hr_crop_start = None + hr_crop_stop = None + if self.temporal_pad > 0: + hr_crop_start = self.t_enhance * self.temporal_pad + hr_crop_stop = -hr_crop_start + + if self._t_hr_crop_slices is None: + # don't use self.get_cropped_slices() here because temporal padding + # gets weird at beginning and end of timeseries and the temporal + # axis should always be evenly chunked. + self._t_hr_crop_slices = [ + slice(hr_crop_start, hr_crop_stop) + for _ in range(len(self.t_lr_slices)) + ] + + return self._t_hr_crop_slices + + @property + def s1_hr_slices(self): + """Get high res spatial slices for first spatial dimension""" + return self.get_hr_slices(self.s1_lr_slices, self.s_enhance) + + @property + def s2_hr_slices(self): + """Get high res spatial slices for second spatial dimension""" + return self.get_hr_slices(self.s2_lr_slices, self.s_enhance) + + @property + def s_hr_slices(self): + """Get high res slices for indexing full generator output array + + Returns + ------- + _s_hr_slices : list + List of high res slices. Each entry in this list has a slice for + each spatial dimension. output[hr_slice] gives the superresolved + domain corresponding to data_handler.data[lr_slice] + """ + if self._s_hr_slices is None: + self._s_hr_slices = [] + self._s_hr_slices = [ + (s1, s2) + for s1 in self.s1_hr_slices + for s2 in self.s2_hr_slices + ] + return self._s_hr_slices + + @property + def s_lr_crop_slices(self): + """Get low res cropped slices for cropping input chunk domain + + Returns + ------- + _s_lr_crop_slices : list + List of low res cropped slices. Each entry in this list has a + slice for each spatial dimension. + """ + if self._s_lr_crop_slices is None: + self._s_lr_crop_slices = [] + s1_crop_slices = self.get_cropped_slices( + self.s1_lr_slices, self.s1_lr_pad_slices, 1 + ) + s2_crop_slices = self.get_cropped_slices( + self.s2_lr_slices, self.s2_lr_pad_slices, 1 + ) + self._s_lr_crop_slices = [ + (s1, s2) for s1 in s1_crop_slices for s2 in s2_crop_slices + ] + return self._s_lr_crop_slices + + @property + def s_hr_crop_slices(self): + """Get high res cropped slices for cropping generator output + + Returns + ------- + _s_hr_crop_slices : list + List of high res cropped slices. Each entry in this list has a + slice for each spatial dimension. + """ + hr_crop_start = None + hr_crop_stop = None + if self.spatial_pad > 0: + hr_crop_start = self.s_enhance * self.spatial_pad + hr_crop_stop = -hr_crop_start + + if self._s_hr_crop_slices is None: + self._s_hr_crop_slices = [] + s1_hr_crop_slices = [ + slice(hr_crop_start, hr_crop_stop) + for _ in range(len(self.s1_lr_slices)) + ] + s2_hr_crop_slices = [ + slice(hr_crop_start, hr_crop_stop) + for _ in range(len(self.s2_lr_slices)) + ] + + self._s_hr_crop_slices = [ + (s1, s2) + for s1 in s1_hr_crop_slices + for s2 in s2_hr_crop_slices + ] + return self._s_hr_crop_slices + + @property + def hr_crop_slices(self): + """Get high res spatiotemporal cropped slices for cropping generator + output + + Returns + ------- + _hr_crop_slices : list + List of high res spatiotemporal cropped slices. Each entry in this + list has a crop slice for each spatial dimension and temporal + dimension and then slice(None) for the feature dimension. + model.generate()[hr_crop_slice] gives the cropped generator output + corresponding to outpuUnion[np.ndarray, da.core.Array][hr_slice] + """ + if self._hr_crop_slices is None: + self._hr_crop_slices = [] + for t in self.t_hr_crop_slices: + node_slices = [ + (s[0], s[1], t, slice(None)) for s in self.s_hr_crop_slices + ] + self._hr_crop_slices.append(node_slices) + return self._hr_crop_slices + + @property + def s1_lr_pad_slices(self): + """List of low resolution spatial slices with padding for first + spatial dimension""" + if self._s1_lr_pad_slices is None: + self._s1_lr_pad_slices = self.get_padded_slices( + self.s1_lr_slices, + self.coarse_shape[0], + 1, + padding=self.spatial_pad, + ) + return self._s1_lr_pad_slices + + @property + def s2_lr_pad_slices(self): + """List of low resolution spatial slices with padding for second + spatial dimension""" + if self._s2_lr_pad_slices is None: + self._s2_lr_pad_slices = self.get_padded_slices( + self.s2_lr_slices, + self.coarse_shape[1], + 1, + padding=self.spatial_pad, + ) + return self._s2_lr_pad_slices + + @property + def s1_lr_slices(self): + """List of low resolution spatial slices for first spatial dimension + considering padding on all sides of the spatial raster.""" + ind = slice(0, self.coarse_shape[0]) + slices = get_chunk_slices( + self.coarse_shape[0], self.chunk_shape[0], index_slice=ind + ) + return slices + + @property + def s2_lr_slices(self): + """List of low resolution spatial slices for second spatial dimension + considering padding on all sides of the spatial raster.""" + ind = slice(0, self.coarse_shape[1]) + slices = get_chunk_slices( + self.coarse_shape[1], self.chunk_shape[1], index_slice=ind + ) + return slices + + @property + def t_lr_slices(self): + """Low resolution temporal slices""" + n_tsteps = len(self.dummy_time_index[self.time_slice]) + n_chunks = n_tsteps / self.chunk_shape[2] + n_chunks = int(np.ceil(n_chunks)) + ti_slices = self.dummy_time_index[self.time_slice] + ti_slices = np.array_split(ti_slices, n_chunks) + ti_slices = [ + slice(c[0], c[-1] + 1, self.time_slice.step) for c in ti_slices + ] + return ti_slices + +
+[docs] + @staticmethod + def get_hr_slices(slices, enhancement, step=None): + """Get high resolution slices for temporal or spatial slices + + Parameters + ---------- + slices : list + Low resolution slices to be enhanced + enhancement : int + Enhancement factor + step : int | None + Step size for slices + + Returns + ------- + hr_slices : list + High resolution slices + """ + hr_slices = [] + if step is not None: + step *= enhancement + for sli in slices: + start = sli.start * enhancement + stop = sli.stop * enhancement + hr_slices.append(slice(start, stop, step)) + return hr_slices
+ + + @property + def chunk_lookup(self): + """Get a 3D array with shape + (n_spatial_1_chunks, n_spatial_2_chunks, n_time_chunks) + where each value is the chunk index.""" + if self._chunk_lookup is None: + n_s1 = len(self.s1_lr_slices) + n_s2 = len(self.s2_lr_slices) + n_t = self.n_time_chunks + lookup = np.arange(self.n_chunks).reshape((n_t, n_s1, n_s2)) + self._chunk_lookup = np.transpose(lookup, axes=(1, 2, 0)) + return self._chunk_lookup + + @property + def spatial_chunk_lookup(self): + """Get a 2D array with shape (n_spatial_1_chunks, n_spatial_2_chunks) + where each value is the spatial chunk index.""" + n_s1 = len(self.s1_lr_slices) + n_s2 = len(self.s2_lr_slices) + return np.arange(self.n_spatial_chunks).reshape((n_s1, n_s2)) + + @property + def n_spatial_chunks(self): + """Get the number of spatial chunks""" + return len(self.hr_crop_slices[0]) + + @property + def n_time_chunks(self): + """Get the number of temporal chunks""" + return len(self.t_hr_crop_slices) + + @property + def n_chunks(self): + """Get total number of spatiotemporal chunks""" + return self.n_spatial_chunks * self.n_time_chunks + +
+[docs] + @staticmethod + def get_padded_slices(slices, shape, enhancement, padding, step=None): + """Get padded slices with the specified padding size, max shape, + enhancement, and step size + + Parameters + ---------- + slices : list + List of low res unpadded slice + shape : int + max possible index of a padded slice. e.g. if the slices are + indexing a dimension with size 10 then a padded slice cannot have + an index greater than 10. + enhancement : int + Enhancement factor. e.g. If these slices are indexing a spatial + dimension which will be enhanced by 2x then enhancement=2. + padding : int + Padding factor. e.g. If these slices are indexing a spatial + dimension and the spatial_pad is 10 this is 10. It will be + multiplied by the enhancement factor if the slices are to be used + to index an enhanced dimension. + step : int | None + Step size for slices. e.g. If these slices are indexing a temporal + dimension and time_slice.step = 3 then step=3. + + Returns + ------- + list + Padded slices for temporal or spatial dimensions. + """ + step = step or 1 + pad = step * padding * enhancement + pad_slices = [] + for _, s in enumerate(slices): + start = np.max([0, s.start * enhancement - pad]) + end = np.min([enhancement * shape, s.stop * enhancement + pad]) + pad_slices.append(slice(start, end, step)) + return pad_slices
+ + +
+[docs] + @staticmethod + def get_cropped_slices(unpadded_slices, padded_slices, enhancement): + """Get cropped slices to cut off padded output + + Parameters + ---------- + unpadded_slices : list + List of unpadded slices + padded_slices : list + List of padded slices + enhancement : int + Enhancement factor for the data to be cropped. + + Returns + ------- + list + Cropped slices for temporal or spatial dimensions. + """ + cropped_slices = [] + for ps, us in zip(padded_slices, unpadded_slices): + start = us.start + stop = us.stop + step = us.step or 1 + if start is not None: + start = enhancement * (us.start - ps.start) // step + if stop is not None: + stop = enhancement * (us.stop - ps.stop) // step + if start is not None and start <= 0: + start = None + if stop is not None and stop >= 0: + stop = None + cropped_slices.append(slice(start, stop)) + return cropped_slices
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/pipeline/strategy.html b/_modules/sup3r/pipeline/strategy.html new file mode 100644 index 000000000..a60913160 --- /dev/null +++ b/_modules/sup3r/pipeline/strategy.html @@ -0,0 +1,1370 @@ + + + + + + + + + + sup3r.pipeline.strategy — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.pipeline.strategy

+""":class:`ForwardPassStrategy` class. This sets up chunks and needed generator
+inputs to distribute forward passes."""
+
+import copy
+import logging
+import os
+import pathlib
+import pprint
+import warnings
+from dataclasses import dataclass
+from functools import cached_property
+from typing import Dict, Optional, Tuple, Union
+from warnings import warn
+
+import dask.array as da
+import numpy as np
+import pandas as pd
+
+from sup3r.bias.utilities import bias_correct_features
+from sup3r.pipeline.slicer import ForwardPassSlicer
+from sup3r.pipeline.utilities import get_model
+from sup3r.postprocessing import OutputHandler
+from sup3r.preprocessing import ExoData, ExoDataHandler, Rasterizer
+from sup3r.preprocessing.names import Dimension
+from sup3r.preprocessing.utilities import (
+    _parse_time_slice,
+    expand_paths,
+    get_class_kwargs,
+    get_date_range_kwargs,
+    get_input_handler_class,
+    log_args,
+)
+from sup3r.utilities.utilities import Timer
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +@dataclass +class ForwardPassChunk: + """Structure storing chunk data and attributes for a specific chunk going + through the generator.""" + + input_data: Union[np.ndarray, da.core.Array] + exo_data: Dict + hr_crop_slice: slice + lr_pad_slice: slice + hr_lat_lon: Union[np.ndarray, da.core.Array] + hr_times: pd.DatetimeIndex + gids: Union[np.ndarray, da.core.Array] + out_file: str + pad_width: Tuple[tuple, tuple, tuple] + index: int + + def __post_init__(self): + self.shape = self.input_data.shape
+ + + +
+[docs] +@dataclass +class ForwardPassStrategy: + """Class to prepare data for forward passes through generator. + + A full file list of contiguous times is provided. The corresponding data is + split into spatiotemporal chunks which can overlap in time and space. These + chunks are distributed across nodes according to the max nodes input or + number of temporal chunks. This strategy stores information on these + chunks, how they overlap, how they are distributed to nodes, and how to + crop generator output to stich the chunks back together. + + Use the following inputs to initialize data handlers on different nodes and + to define the size of the data chunks that will be passed through the + generator. + + Parameters + ---------- + file_paths : list | str + A list of low-resolution source files to extract raster data from. + Each file must have the same number of timesteps. Can also pass a + string with a unix-style file path which will be passed through + glob.glob. + + Note: These files can also include a 2D (lat, lon) "mask" variable + which is True for grid points which can be skipped in the forward pass + and False otherwise. This will be used to skip running the forward pass + for chunks which only include masked points. e.g. chunks covering only + ocean. Chunks with even a single unmasked point will still be sent + through the forward pass. + model_kwargs : str | list + Keyword arguments to send to ``model_class.load(**model_kwargs)`` to + initialize the GAN. Typically this is just the string path to the + model directory, but can be multiple models or arguments for more + complex models. + fwp_chunk_shape : tuple + Max shape (spatial_1, spatial_2, temporal) of an unpadded coarse chunk + to use for a forward pass. The number of nodes that the + :class:`.ForwardPassStrategy` is set to distribute to is calculated by + dividing up the total time index from all file_paths by the temporal + part of this chunk shape. Each node will then be parallelized across + parallel processes by the spatial chunk shape. If temporal_pad / + spatial_pad are non zero the chunk sent to the generator can be bigger + than this shape. If running in serial set this equal to the shape of + the full spatiotemporal data volume for best performance. + spatial_pad : int + Size of spatial overlap between coarse chunks passed to forward passes + for subsequent spatial stitching. This overlap will pad both sides of + the fwp_chunk_shape. + temporal_pad : int + Size of temporal overlap between coarse chunks passed to forward passes + for subsequent temporal stitching. This overlap will pad both sides of + the fwp_chunk_shape. + model_class : str + Name of the sup3r model class for the GAN model to load. The default is + the basic spatial / spatiotemporal ``Sup3rGan`` model. This will be + loaded from ``sup3r.models`` + out_pattern : str + Output file pattern. Must include {file_id} format key. Each output + file will have a unique file_id filled in and the ext determines the + output type. If pattern is None then data will be returned in an array + and not saved. + input_handler_name : str | None + Class to use for input data. Provide a string name to match an + rasterizer or handler class in ``sup3r.preprocessing`` + input_handler_kwargs : dict | None + Any kwargs for initializing the ``input_handler_name`` class. + exo_handler_kwargs : dict | None + Dictionary of args to pass to + :class:`~sup3r.preprocessing.data_handlers.ExoDataHandler` for + extracting exogenous features for foward passes. This should be + a nested dictionary with keys for each exogenous feature. The + dictionaries corresponding to the feature names should include the path + to exogenous data source and the files used for input to the forward + passes, at minimum. Can also provide a dictionary of + ``input_handler_kwargs`` used for the handler which opens the + exogenous data. e.g.:: + {'topography': { + 'source_file': ..., + 'input_files': ..., + 'input_handler_kwargs': {'target': ..., 'shape': ...}}} + bias_correct_method : str | None + Optional bias correction function name that can be imported from the + :mod:`sup3r.bias.bias_transforms` module. This will transform the + source data according to some predefined bias correction transformation + along with the bias_correct_kwargs. As the first argument, this method + must receive a generic numpy array of data to be bias corrected + bias_correct_kwargs : dict | None + Optional namespace of kwargs to provide to bias_correct_method. If + this is provided, it must be a dictionary where each key is a feature + name and each value is a dictionary of kwargs to correct that feature. + You can bias correct only certain input features by only including + those feature names in this dict. + allowed_const : list | bool + Tensorflow has a tensor memory limit of 2GB (result of protobuf + limitation) and when exceeded can return a tensor with a constant + output. sup3r will raise a ``MemoryError`` in response. If your model + is allowed to output a constant output, set this to True to allow any + constant output or a list of allowed possible constant outputs. For + example, a precipitation model should be allowed to output all zeros so + set this to ``[0]``. For details on this limit: + https://github.com/tensorflow/tensorflow/issues/51870 + incremental : bool + Allow the forward pass iteration to skip spatiotemporal chunks that + already have an output file (default = True) or iterate through all + chunks and overwrite any pre-existing outputs (False). + output_workers : int | None + Max number of workers to use for writing forward pass output. + invert_uv : bool | None + Whether to convert u and v wind components to windspeed and direction + for writing to output. This defaults to True for H5 output and False + for NETCDF output. + pass_workers : int | None + Max number of workers to use for performing forward passes on a single + node. If 1 then all forward passes on chunks distributed to a single + node will be run serially. pass_workers=2 is the minimum number of + workers required to run the ForwardPass initialization and + :meth:`~.forward_pass.ForwardPass.run_chunk()` methods concurrently. + max_nodes : int | None + Maximum number of nodes to distribute spatiotemporal chunks across. If + None then a node will be used for each temporal chunk. + head_node : bool + Whether initialization is taking place on the head node of a multi node + job launch. When this is true :class:`.ForwardPassStrategy` is only + partially initialized to provide the head node enough information for + how to distribute jobs across nodes. Preflight tasks like bias + correction will be skipped because they will be performed on the nodes + jobs are distributed to by the head node. + """ + + file_paths: Union[str, list, pathlib.Path] + model_kwargs: dict + fwp_chunk_shape: tuple = (None, None, None) + spatial_pad: int = 0 + temporal_pad: int = 0 + model_class: str = 'Sup3rGan' + out_pattern: Optional[str] = None + input_handler_name: Optional[str] = None + input_handler_kwargs: Optional[dict] = None + exo_handler_kwargs: Optional[dict] = None + bias_correct_method: Optional[str] = None + bias_correct_kwargs: Optional[dict] = None + allowed_const: Optional[Union[list, bool]] = None + incremental: bool = True + output_workers: int = 1 + invert_uv: Optional[bool] = None + pass_workers: int = 1 + max_nodes: int = 1 + head_node: bool = False + + @log_args + def __post_init__(self): + self.file_paths = expand_paths(self.file_paths) + self.bias_correct_kwargs = self.bias_correct_kwargs or {} + self.timer = Timer() + + model = get_model(self.model_class, self.model_kwargs) + self.s_enhancements = model.s_enhancements + self.t_enhancements = model.t_enhancements + self.s_enhance, self.t_enhance = model.s_enhance, model.t_enhance + self.input_features = model.lr_features + self.output_features = model.hr_out_features + self.features, self.exo_features = self._init_features(model) + self.input_handler = self.timer(self.init_input_handler, log=True)() + self.time_slice = _parse_time_slice( + self.input_handler_kwargs.get('time_slice', slice(None)) + ) + self.fwp_chunk_shape = self._get_fwp_chunk_shape() + + self.fwp_slicer = ForwardPassSlicer( + coarse_shape=self.input_handler.grid_shape, + time_steps=len(self.input_handler.time_index), + time_slice=self.time_slice, + chunk_shape=self.fwp_chunk_shape, + s_enhance=self.s_enhance, + t_enhance=self.t_enhance, + spatial_pad=self.spatial_pad, + temporal_pad=self.temporal_pad, + ) + self.n_chunks = self.fwp_slicer.n_chunks + + msg = ( + 'The same exogenous data is used by all nodes, so it will be ' + 'cached on the head_node. This can take a long time and might be ' + 'worth doing as an independent preprocessing step instead.' + ) + if self.head_node and not all( + os.path.exists(fp) for fp in self.get_exo_cache_files(model) + ): + logger.warning(msg) + warn(msg) + _ = self.timer(self.load_exo_data, log=True)(model) + + if not self.head_node: + hr_shape = self.hr_lat_lon.shape[:-1] + self.gids = np.arange(np.prod(hr_shape)).reshape(hr_shape) + self.exo_data = self.timer(self.load_exo_data, log=True)(model) + + self.preflight() + + @property + def meta(self): + """Meta data dictionary for the strategy. Used to add info to forward + pass output meta.""" + meta_data = { + 'fwp_chunk_shape': self.fwp_chunk_shape, + 'spatial_pad': self.spatial_pad, + 'temporal_pad': self.temporal_pad, + 'model_kwargs': self.model_kwargs, + 'model_class': self.model_class, + 'spatial_enhance': int(self.s_enhance), + 'temporal_enhance': int(self.t_enhance), + 'input_files': self.file_paths, + 'input_features': self.features, + 'output_features': self.output_features, + 'input_shape': self.input_handler.grid_shape, + 'input_time_range': get_date_range_kwargs( + self.input_handler.time_index[self.time_slice] + ), + } + return meta_data + +
+[docs] + def init_input_handler(self): + """Get input handler instance for given input kwargs. If self.head_node + is False we get all requested features. Otherwise this is part of + initialization on a head node and just used to get the shape of the + input domain, so we don't need to get any features yet.""" + self.input_handler_kwargs = self.input_handler_kwargs or {} + self.input_handler_kwargs['file_paths'] = self.file_paths + self.input_handler_kwargs['features'] = self.features + + InputHandler = get_input_handler_class(self.input_handler_name) + input_handler_kwargs = copy.deepcopy(self.input_handler_kwargs) + + input_handler_kwargs['features'] = self.features + if self.head_node: + input_handler_kwargs['features'] = [] + input_handler_kwargs['chunks'] = 'auto' + + input_handler_kwargs['time_slice'] = slice(None) + return InputHandler(**input_handler_kwargs)
+ + + def _init_features(self, model): + """Initialize feature attributes.""" + self.exo_handler_kwargs = self.exo_handler_kwargs or {} + exo_features = list(self.exo_handler_kwargs) + features = [f for f in model.lr_features if f not in exo_features] + return features, exo_features + + @cached_property + def node_chunks(self): + """Get array of lists such that node_chunks[i] is a list of + indices for the chunks that will be sent through the generator on the + ith node.""" + node_chunks = min(self.max_nodes or np.inf, len(self.unmasked_chunks)) + return np.array_split(self.unmasked_chunks, node_chunks) + + @property + def unmasked_chunks(self): + """List of chunk indices that are not masked from the input spatial + region. These chunks are those that will go through the forward pass. + Masked chunks will be skipped.""" + return [ + idx + for idx in np.arange(self.n_chunks) + if not self.chunk_masked(idx, log=False) + ] + + def _get_fwp_chunk_shape(self): + """Get fwp_chunk_shape with default shape equal to the input handler + shape""" + grid_shape = self.input_handler.grid_shape + tsteps = len(self.input_handler.time_index[self.time_slice]) + shape_iter = zip(self.fwp_chunk_shape, (*grid_shape, tsteps)) + return tuple(fs or ffs for fs, ffs in shape_iter) + +
+[docs] + def preflight(self): + """Prelight logging and sanity checks""" + + out = self.fwp_slicer.get_time_slices() + self.ti_slices, self.ti_pad_slices = out + + fwp_tsteps = self.fwp_chunk_shape[2] + 2 * self.temporal_pad + tsteps = len(self.input_handler.time_index[self.time_slice]) + msg = ( + f'Using a padded chunk size ({fwp_tsteps}) larger than the full ' + f'temporal domain ({tsteps}). Should just run without temporal ' + 'chunking. ' + ) + if fwp_tsteps > tsteps: + logger.warning(msg) + warnings.warn(msg) + out = self.fwp_slicer.get_spatial_slices() + self.lr_slices, self.lr_pad_slices, self.hr_slices = out + + non_masked = self.fwp_slicer.n_spatial_chunks - sum(self.fwp_mask) + non_masked *= int(self.fwp_slicer.n_time_chunks) + log_dict = { + 'n_nodes': len(self.node_chunks), + 'n_spatial_chunks': self.fwp_slicer.n_spatial_chunks, + 'n_time_chunks': self.fwp_slicer.n_time_chunks, + 'n_total_chunks': self.fwp_slicer.n_chunks, + 'non_masked_chunks': non_masked, + } + logger.info( + f'Chunk strategy description:\n' + f'{pprint.pformat(log_dict, indent=2)}' + )
+ + +
+[docs] + def get_chunk_indices(self, chunk_index): + """Get (spatial, temporal) indices for the given chunk index""" + return ( + chunk_index % self.fwp_slicer.n_spatial_chunks, + chunk_index // self.fwp_slicer.n_spatial_chunks, + )
+ + + @cached_property + def hr_lat_lon(self): + """Get high resolution lat lons""" + lr_lat_lon = self.input_handler.lat_lon + shape = tuple(d * self.s_enhance for d in lr_lat_lon.shape[:-1]) + logger.info( + f'Getting high-resolution grid for full output domain: {shape}' + ) + return OutputHandler.get_lat_lon(lr_lat_lon, shape) + + @cached_property + def out_files(self): + """Get list of output file names for each file chunk forward pass.""" + file_ids = [ + f'{str(i).zfill(6)}_{str(j).zfill(6)}' + for i in range(self.fwp_slicer.n_time_chunks) + for j in range(self.fwp_slicer.n_spatial_chunks) + ] + out_file_list = [None] * len(file_ids) + if self.out_pattern is not None: + msg = 'out_pattern must include a {file_id} format key' + assert '{file_id}' in self.out_pattern, msg + os.makedirs(os.path.dirname(self.out_pattern), exist_ok=True) + out_file_list = [ + self.out_pattern.format(file_id=file_id) + for file_id in file_ids + ] + return out_file_list + + @staticmethod + def _get_pad_width(window, max_steps, max_pad): + """ + Parameters + ---------- + window : slice + Slice with start and stop of window to pad. + max_steps : int + Maximum number of steps available. Padding cannot extend past this + max_pad : int + Maximum amount of padding to apply. + + Returns + ------- + tuple + Tuple of pad width for the given window. + """ + start = window.start or 0 + stop = window.stop or max_steps + start = int(np.maximum(0, (max_pad - start))) + stop = int(np.maximum(0, max_pad + stop - max_steps)) + return (start, stop) + +
+[docs] + def get_pad_width(self, chunk_index): + """Get padding for the current spatiotemporal chunk + + Returns + ------- + padding : tuple + Tuple of tuples with padding width for spatial and temporal + dimensions. Each tuple includes the start and end of padding for + that dimension. Ordering is spatial_1, spatial_2, temporal. + """ + s_chunk_idx, t_chunk_idx = self.get_chunk_indices(chunk_index) + ti_slice = self.ti_slices[t_chunk_idx] + lr_slice = self.lr_slices[s_chunk_idx] + + return ( + self._get_pad_width( + lr_slice[0], self.input_handler.grid_shape[0], self.spatial_pad + ), + self._get_pad_width( + lr_slice[1], self.input_handler.grid_shape[1], self.spatial_pad + ), + self._get_pad_width( + ti_slice, len(self.input_handler.time_index), self.temporal_pad + ), + )
+ + +
+[docs] + def prep_chunk_data(self, chunk_index=0): + """Get low res input data and exo data for given chunk index and bias + correct low res data if requested. + + Note + ---- + ``input_data.load()`` is called here to load chunk data into memory + """ + + s_chunk_idx, t_chunk_idx = self.get_chunk_indices(chunk_index) + lr_pad_slice = self.lr_pad_slices[s_chunk_idx] + ti_pad_slice = self.ti_pad_slices[t_chunk_idx] + exo_data = ( + self.timer(self.exo_data.get_chunk, log=True, call_id=chunk_index)( + [lr_pad_slice[0], lr_pad_slice[1], ti_pad_slice], + ) + if self.exo_data is not None + else None + ) + + kwargs = dict(zip(Dimension.dims_2d(), lr_pad_slice)) + kwargs[Dimension.TIME] = ti_pad_slice + input_data = self.input_handler.isel(**kwargs) + logger.info( + 'Loading data for chunk_index=%s into memory.', chunk_index + ) + input_data.load() + + if self.bias_correct_kwargs != {}: + logger.info( + f'Bias correcting data for chunk_index={chunk_index}, ' + f'with shape={input_data.shape}' + ) + input_data = self.timer( + bias_correct_features, log=True, call_id=chunk_index + )( + features=list(self.bias_correct_kwargs), + input_handler=input_data, + bc_method=self.bias_correct_method, + bc_kwargs=self.bias_correct_kwargs, + ) + return input_data, exo_data
+ + +
+[docs] + def init_chunk(self, chunk_index=0): + """Get :class:`FowardPassChunk` instance for the given chunk index. + + This selects the appropriate data from `self.input_handler` and + `self.exo_data` and returns a structure object (`ForwardPassChunk`) + with that data and other chunk specific attributes. + """ + + s_chunk_idx, t_chunk_idx = self.get_chunk_indices(chunk_index) + + msg = ( + f'Requested forward pass on chunk_index={chunk_index} > ' + f'n_chunks={self.fwp_slicer.n_chunks}' + ) + assert chunk_index <= self.fwp_slicer.n_chunks, msg + + hr_slice = self.hr_slices[s_chunk_idx] + ti_slice = self.ti_slices[t_chunk_idx] + lr_times = self.input_handler.time_index[ti_slice] + lr_pad_slice = self.lr_pad_slices[s_chunk_idx] + ti_pad_slice = self.ti_pad_slices[t_chunk_idx] + + args_dict = { + 'chunk': chunk_index, + 'temporal_chunk': t_chunk_idx, + 'spatial_chunk': s_chunk_idx, + 'n_node_chunks': self.fwp_slicer.n_chunks, + 'fwp_chunk_shape': self.fwp_chunk_shape, + 'temporal_pad': self.temporal_pad, + 'spatial_pad': self.spatial_pad, + 'lr_pad_slice': lr_pad_slice, + 'ti_pad_slice': ti_pad_slice, + } + logger.info( + 'Initializing ForwardPassChunk with: ' + f'{pprint.pformat(args_dict, indent=2)}' + ) + + logger.info(f'Getting input data for chunk_index={chunk_index}.') + + input_data, exo_data = self.timer( + self.prep_chunk_data, log=True, call_id=chunk_index + )(chunk_index=chunk_index) + + return ForwardPassChunk( + input_data=input_data.as_array(), + exo_data=exo_data, + lr_pad_slice=lr_pad_slice, + hr_crop_slice=self.fwp_slicer.hr_crop_slices[t_chunk_idx][ + s_chunk_idx + ], + hr_lat_lon=self.hr_lat_lon[hr_slice[:2]], + hr_times=OutputHandler.get_times( + lr_times, self.t_enhance * len(lr_times) + ), + gids=self.gids[hr_slice[:2]], + out_file=self.out_files[chunk_index], + pad_width=self.get_pad_width(chunk_index), + index=chunk_index, + )
+ + +
+[docs] + def get_exo_kwargs(self, model): + """Get list of exo kwargs for all exo features.""" + exo_kwargs_list = [] + if self.exo_handler_kwargs: + for feature in self.exo_features: + exo_kwargs = copy.deepcopy(self.exo_handler_kwargs[feature]) + exo_kwargs['feature'] = feature + exo_kwargs['model'] = model + input_handler_kwargs = exo_kwargs.get( + 'input_handler_kwargs', {} + ) + input_handler_kwargs['target'] = self.input_handler.target + input_handler_kwargs['shape'] = self.input_handler.grid_shape + _ = input_handler_kwargs.pop('time_slice', None) + exo_kwargs['input_handler_kwargs'] = input_handler_kwargs + exo_kwargs = get_class_kwargs(ExoDataHandler, exo_kwargs) + exo_kwargs_list.append(exo_kwargs) + return exo_kwargs_list
+ + +
+[docs] + def get_exo_cache_files(self, model): + """Get list of exo cache files so we can check if they exist or not.""" + cache_files = [] + for exo_kwargs in self.get_exo_kwargs(model): + cache_files.extend(ExoDataHandler(**exo_kwargs).cache_files) + return cache_files
+ + +
+[docs] + def load_exo_data(self, model): + """Extract exogenous data for each exo feature and store data in + dictionary with key for each exo feature + + Returns + ------- + exo_data : ExoData + :class:`ExoData` object composed of multiple + :class:`SingleExoDataStep` objects. This is the exo data for the + full spatiotemporal extent. + """ + data = {} + exo_data = None + for exo_kwargs in self.get_exo_kwargs(model): + data.update(ExoDataHandler(**exo_kwargs).data) + exo_data = ExoData(data) + return exo_data
+ + + @cached_property + def fwp_mask(self): + """Cached spatial mask which returns whether a given spatial chunk + should be skipped by the forward pass or not. This is used to skip + running the forward pass for area with just ocean, for example. + + Note: This is True for grid points which can be skipped in the + forward pass and False otherwise. + + See Also + -------- + sup3r.pipeline.strategy.ForwardPassStrategy + """ + + mask = np.zeros(len(self.lr_pad_slices)) + input_handler_kwargs = copy.deepcopy(self.input_handler_kwargs) + input_handler_kwargs['features'] = 'all' + handler = Rasterizer( + **get_class_kwargs(Rasterizer, input_handler_kwargs) + ) + if 'mask' in handler.data: + logger.info( + 'Found "mask" in DataHandler. Computing forward pass ' + 'chunk mask for %s chunks', + len(self.lr_pad_slices), + ) + mask_vals = handler.data['mask'].values + for s_chunk_idx, lr_slices in enumerate(self.lr_pad_slices): + mask_check = mask_vals[lr_slices[0], lr_slices[1]] + mask[s_chunk_idx] = bool(np.prod(mask_check.flatten())) + return mask + +
+[docs] + def node_finished(self, node_idx): + """Check if all out files for a given node have been saved""" + return all(self.chunk_finished(i) for i in self.node_chunks[node_idx])
+ + +
+[docs] + def chunk_finished(self, chunk_idx, log=True): + """Check if process for given chunk_index has already been run. + Considered finished if there is already an output file and incremental + is False.""" + + out_file = self.out_files[chunk_idx] + check = ( + out_file is not None + and os.path.exists(out_file) + and self.incremental + ) + if check and log: + logger.info( + '%s already exists and incremental = True. Skipping forward ' + 'pass for chunk index %s.', + out_file, + chunk_idx, + ) + return check
+ + +
+[docs] + def chunk_masked(self, chunk_idx, log=True): + """Check if the region for this chunk is masked. This is used to skip + running the forward pass for region with just ocean, for example.""" + + s_chunk_idx, _ = self.get_chunk_indices(chunk_idx) + mask_check = self.fwp_mask[s_chunk_idx] + if mask_check and log: + logger.info( + 'Chunk %s has spatial chunk index %s, which corresponds to a ' + 'masked spatial region. Skipping forward pass for this chunk.', + chunk_idx, + s_chunk_idx, + ) + return mask_check
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/pipeline/utilities.html b/_modules/sup3r/pipeline/utilities.html new file mode 100644 index 000000000..f10b44727 --- /dev/null +++ b/_modules/sup3r/pipeline/utilities.html @@ -0,0 +1,725 @@ + + + + + + + + + + sup3r.pipeline.utilities — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.pipeline.utilities

+"""Methods used by :class:`ForwardPass` and :class:`ForwardPassStrategy`"""
+import logging
+
+import numpy as np
+
+import sup3r.models
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +def get_model(model_class, kwargs): + """Instantiate model after check on class name.""" + model_class = getattr(sup3r.models, model_class, None) + if isinstance(kwargs, str): + kwargs = {'model_dir': kwargs} + + if model_class is None: + msg = ( + 'Could not load requested model class "{}" from ' + 'sup3r.models, Make sure you typed in the model class ' + 'name correctly.'.format(model_class) + ) + logger.error(msg) + raise KeyError(msg) + return model_class.load(**kwargs, verbose=True)
+ + + +
+[docs] +def get_chunk_slices(arr_size, chunk_size, index_slice=slice(None)): + """Get array slices of corresponding chunk size + + Parameters + ---------- + arr_size : int + Length of array to slice + chunk_size : int + Size of slices to split array into + index_slice : slice + Slice specifying starting and ending index of slice list + + Returns + ------- + list + List of slices corresponding to chunks of array + """ + + indices = np.arange(0, arr_size) + indices = indices[slice(index_slice.start, index_slice.stop)] + step = 1 if index_slice.step is None else index_slice.step + slices = [] + start = indices[0] + stop = start + step * chunk_size + stop = np.min([stop, indices[-1] + 1]) + + while start < indices[-1] + 1: + slices.append(slice(start, stop, step)) + start = stop + stop += step * chunk_size + stop = np.min([stop, indices[-1] + 1]) + return slices
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/postprocessing/collectors/base.html b/_modules/sup3r/postprocessing/collectors/base.html new file mode 100644 index 000000000..b407efa41 --- /dev/null +++ b/_modules/sup3r/postprocessing/collectors/base.html @@ -0,0 +1,770 @@ + + + + + + + + + + sup3r.postprocessing.collectors.base — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.postprocessing.collectors.base

+"""H5/NETCDF file collection."""
+
+import glob
+import logging
+import re
+from abc import ABC, abstractmethod
+
+from rex.utilities.fun_utils import get_fun_call_str
+
+from sup3r.postprocessing.writers.base import OutputMixin
+from sup3r.utilities import ModuleName
+from sup3r.utilities.cli import BaseCLI
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class BaseCollector(OutputMixin, ABC): + """Base collector class for H5/NETCDF collection""" + + def __init__(self, file_paths): + """Parameters + ---------- + file_paths : list | str + Explicit list of str file paths that will be sorted and collected + or a single string with unix-style /search/patt*ern.<ext>. Files + should have non-overlapping time_index and spatial domains. + """ + if not isinstance(file_paths, list): + file_paths = glob.glob(file_paths) + self.file_paths = file_paths + self.flist = sorted(file_paths) + self.data = None + self.file_attrs = {} + msg = ( + 'File names must end with two zero padded integers, denoting ' + 'the spatial chunk index and the temporal chunk index ' + 'respectively. e.g. sup3r_chunk_000000_000000.h5' + ) + + assert all(self.get_chunk_indices(file) for file in self.flist), msg + +
+[docs] + @staticmethod + def get_chunk_indices(file): + """Get spatial and temporal chunk indices from the given file name. + + Returns + ------- + temporal_chunk_index : str + Zero padded integer for the temporal chunk index + spatial_chunk_index : str + Zero padded integer for the spatial chunk index + """ + return re.match(r'.*_([0-9]+)_([0-9]+).*\w+$', file).groups()
+ + +
+[docs] + @classmethod + @abstractmethod + def collect(cls, *args, **kwargs): + """Collect data files from a dir to one output file."""
+ + +
+[docs] + @classmethod + def get_node_cmd(cls, config): + """Get a CLI call to collect data. + + Parameters + ---------- + config : dict + sup3r collection config with all necessary args and kwargs to + run data collection. + """ + import_str = ( + 'from sup3r.postprocessing.collectors ' + f'import {cls.__name__};\n' + 'from rex import init_logger;\n' + 'import time;\n' + 'from gaps import Status' + ) + + dc_fun_str = get_fun_call_str(cls.collect, config) + + log_file = config.get('log_file', None) + log_level = config.get('log_level', 'INFO') + log_arg_str = f'"sup3r", log_level="{log_level}"' + if log_file is not None: + log_arg_str += f', log_file="{log_file}"' + + cmd = ( + f"python -c '{import_str};\n" + 't0 = time.time();\n' + f'logger = init_logger({log_arg_str});\n' + f'{dc_fun_str};\n' + 't_elap = time.time() - t0;\n' + ) + + pipeline_step = config.get('pipeline_step') or ModuleName.DATA_COLLECT + cmd = BaseCLI.add_status_cmd(config, pipeline_step, cmd) + cmd += ";'\n" + + return cmd.replace('\\', '/')
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/postprocessing/collectors/h5.html b/_modules/sup3r/postprocessing/collectors/h5.html new file mode 100644 index 000000000..800bb77c1 --- /dev/null +++ b/_modules/sup3r/postprocessing/collectors/h5.html @@ -0,0 +1,1507 @@ + + + + + + + + + + sup3r.postprocessing.collectors.h5 — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.postprocessing.collectors.h5

+"""H5 file collection."""
+
+import logging
+import os
+from warnings import warn
+
+import dask
+import numpy as np
+import pandas as pd
+from rex.utilities.loggers import init_logger
+from scipy.spatial import KDTree
+
+from sup3r.postprocessing.writers.base import RexOutputs
+from sup3r.preprocessing.utilities import _mem_check
+
+from .base import BaseCollector
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class CollectorH5(BaseCollector): + """Sup3r H5 file collection framework""" + +
+[docs] + @classmethod + def get_slices( + cls, final_time_index, final_meta, new_time_index, new_meta + ): + """Get index slices where the new ti/meta belong in the final ti/meta. + + Parameters + ---------- + final_time_index : pd.Datetimeindex + Time index of the final file that new_time_index is being written + to. + final_meta : pd.DataFrame + Meta data of the final file that new_meta is being written to. + new_time_index : pd.Datetimeindex + Chunk time index that is a subset of the final_time_index. + new_meta : pd.DataFrame + Chunk meta data that is a subset of the final_meta. + + Returns + ------- + row_slice : slice + final_time_index[row_slice] = new_time_index + col_slice : slice + final_meta[col_slice] = new_meta + """ + final_index = final_meta.index + new_index = new_meta.index + row_loc = np.where(final_time_index.isin(new_time_index))[0] + col_loc = np.where(final_meta['gid'].isin(new_meta['gid']))[0] + + if not len(row_loc) > 0: + msg = ( + 'Could not find row locations in file collection. ' + 'New time index: {} final time index: {}'.format( + new_time_index, final_time_index + ) + ) + logger.error(msg) + raise RuntimeError(msg) + + if not len(col_loc) > 0: + msg = ( + 'Could not find col locations in file collection. ' + 'New index: {} final index: {}'.format(new_index, final_index) + ) + logger.error(msg) + raise RuntimeError(msg) + + row_slice = slice(np.min(row_loc), np.max(row_loc) + 1) + + msg = ( + f'row_slice={row_slice} conflict with row_indices={row_loc}. ' + 'Indices do not seem to be increasing and/or contiguous.' + ) + assert (row_slice.stop - row_slice.start) == len(row_loc), msg + + return row_slice, col_loc
+ + +
+[docs] + def get_coordinate_indices(self, target_meta, full_meta, threshold=1e-4): + """Get coordindate indices in meta data for given targets + + Parameters + ---------- + target_meta : pd.DataFrame + Dataframe of coordinates to find within the full meta + full_meta : pd.DataFrame + Dataframe of full set of coordinates for unfiltered dataset + threshold : float + Threshold distance for finding target coordinates within full meta + """ + ll2 = np.vstack( + (full_meta.latitude.values, full_meta.longitude.values) + ).T + tree = KDTree(ll2) + targets = np.vstack( + (target_meta.latitude.values, target_meta.longitude.values) + ).T + _, indices = tree.query(targets, distance_upper_bound=threshold) + indices = indices[indices < len(full_meta)] + return indices
+ + +
+[docs] + def get_data( + self, + file_path, + feature, + time_index, + meta, + scale_factor, + dtype, + threshold=1e-4, + ): + """Retreive a data array from a chunked file. + + Parameters + ---------- + file_path : str + h5 file to get data from + feature : str + dataset to retrieve data from fpath. + time_index : pd.Datetimeindex + Time index of the final file. + meta : pd.DataFrame + Meta data of the final file. + scale_factor : int | float + Final destination scale factor after collection. If the data + retrieval from the files to be collected has a different scale + factor, the collected data will be rescaled and returned as + float32. + dtype : np.dtype + Final dtype to return data as + threshold : float + Threshold distance for finding target coordinates within full meta + + Returns + ------- + f_data : Union[np.ndarray, da.core.Array] + Data array from the fpath cast as input dtype. + row_slice : slice + final_time_index[row_slice] = new_time_index + col_slice : slice + final_meta[col_slice] = new_meta + """ + with RexOutputs(file_path, unscale=False, mode='r') as f: + f_ti = f.time_index + f_meta = f.meta + source_scale_factor = f.attrs[feature].get('scale_factor', 1) + + if feature not in f.dsets: + e = ( + 'Trying to collect dataset "{}" but cannot find in ' + 'available: {}'.format(feature, f.dsets) + ) + logger.error(e) + raise KeyError(e) + + mask = self.get_coordinate_indices( + meta, f_meta, threshold=threshold + ) + f_meta = f_meta.iloc[mask] + f_data = f[feature][:, mask] + + if len(mask) == 0: + msg = ( + 'No target coordinates found in masked meta. ' + f'Skipping collection for {file_path}.' + ) + logger.warning(msg) + warn(msg) + + else: + row_slice, col_slice = self.get_slices( + time_index, meta, f_ti, f_meta + ) + + if scale_factor != source_scale_factor: + f_data = f_data.astype(np.float32) + f_data *= scale_factor / source_scale_factor + + if np.issubdtype(dtype, np.integer): + f_data = np.round(f_data) + + f_data = f_data.astype(dtype) + + try: + self.data[row_slice, col_slice] = f_data + except Exception as e: + msg = ( + f'Failed to add data to self.data[{row_slice}, ' + f'{col_slice}] for feature={feature}, ' + f'file_path={file_path}, time_index={time_index}, ' + f'meta={meta}. {e}' + ) + logger.error(msg) + raise OSError(msg) from e
+ + + def _get_file_time_index(self, file): + """Get time index for a single file. Simple method used in thread pool + for attribute collection.""" + with RexOutputs(file, mode='r') as f: + time_index = f.time_index + logger.debug( + 'Finished getting time index for file: %s. %s', + file, + _mem_check(), + ) + return time_index + + def _get_file_meta(self, file): + """Get meta for a single file. Simple method used in thread pool for + attribute collection.""" + with RexOutputs(file, mode='r') as f: + meta = f.meta + logger.debug( + 'Finished getting meta for file: %s. %s', file, _mem_check() + ) + return meta + +
+[docs] + def get_unique_chunk_files(self, file_paths): + """We get files for the unique spatial and temporal extents covered by + all collection files. Since the files have a suffix + ``_{temporal_chunk_index}_{spatial_chunk_index}.h5`` we just use all + files with a single ``spatial_chunk_index`` for the full time index and + all files with a single ``temporal_chunk_index`` for the full meta. + + Parameters + ---------- + t_files : list + Explicit list of str file paths which, when combined, provide the + entire spatial domain. + s_files : list + Explicit list of str file paths which, when combined, provide the + entire temporal extent. + """ + t_files = {} + s_files = {} + for f in file_paths: + t_chunk, s_chunk = self.get_chunk_indices(f) + if t_chunk not in t_files: + t_files[t_chunk] = f + if s_chunk not in s_files: + s_files[s_chunk] = f + + t_files = list(t_files.values()) + s_files = list(s_files.values()) + logger.info('Found %s unique temporal chunks', len(t_files)) + logger.info('Found %s unique spatial chunks', len(s_files)) + return t_files, s_files
+ + + def _get_collection_attrs(self, file_paths, max_workers=None): + """Get important dataset attributes from a file list to be collected. + + Assumes the file list is chunked in time (row chunked). + + Parameters + ---------- + file_paths : list | str + Explicit list of str file paths that will be sorted and collected + or a single string with unix-style /search/patt*ern.h5. + max_workers : int | None + Number of workers to use in parallel. 1 runs serial, + None uses all available. + + Returns + ------- + time_index : pd.datetimeindex + Concatenated full size datetime index from the flist that is + being collected + meta : pd.DataFrame + Concatenated full size meta data from the flist that is being + collected or provided target meta + """ + + t_files, s_files = self.get_unique_chunk_files(file_paths) + meta_tasks = [dask.delayed(self._get_file_meta)(fn) for fn in s_files] + ti_tasks = [ + dask.delayed(self._get_file_time_index)(fn) for fn in t_files + ] + + if max_workers == 1: + meta = dask.compute(*meta_tasks, scheduler='single-threaded') + time_index = dask.compute(*ti_tasks, scheduler='single-threaded') + else: + meta = dask.compute( + *meta_tasks, scheduler='threads', num_workers=max_workers + ) + time_index = dask.compute( + *ti_tasks, scheduler='threads', num_workers=max_workers + ) + logger.info( + 'Finished getting meta and time_index for all unique chunks.' + ) + time_index = pd.DatetimeIndex(np.concatenate(time_index)) + time_index = time_index.sort_values() + unique_ti = time_index.drop_duplicates() + msg = 'Found duplicate time steps from supposedly unique time periods.' + assert len(unique_ti) == len(time_index), msg + meta = pd.concat(meta) + + if 'latitude' in meta and 'longitude' in meta: + meta = meta.drop_duplicates(subset=['latitude', 'longitude']) + meta = meta.sort_values('gid') + + logger.info('Finished building full meta and time index.') + return time_index, meta + +
+[docs] + def get_target_and_masked_meta( + self, meta, target_meta_file=None, threshold=1e-4 + ): + """Use combined meta for all files and target_meta_file to get + mapping from the full meta to the target meta and the mapping from the + target meta to the full meta, both of which are masked to remove + coordinates not present in the target_meta. + + Parameters + ---------- + meta : pd.DataFrame + Concatenated full size meta data from the flist that is being + collected or provided target meta + target_meta_file : str + Path to target final meta containing coordinates to keep from the + full list of coordinates present in the collected meta for the full + file list. + threshold : float + Threshold distance for finding target coordinates within full meta + + Returns + ------- + target_meta : pd.DataFrame + Concatenated full size meta data from the flist that is being + collected or provided target meta + masked_meta : pd.DataFrame + Concatenated full size meta data from the flist that is being + collected masked against target_meta + """ + if target_meta_file is not None and os.path.exists(target_meta_file): + target_meta = pd.read_csv(target_meta_file) + if 'gid' in target_meta.columns: + target_meta = target_meta.drop('gid', axis=1) + mask = self.get_coordinate_indices( + target_meta, meta, threshold=threshold + ) + masked_meta = meta.iloc[mask] + logger.info(f'Masked meta coordinates: {len(masked_meta)}') + mask = self.get_coordinate_indices( + masked_meta, target_meta, threshold=threshold + ) + target_meta = target_meta.iloc[mask] + logger.info(f'Target meta coordinates: {len(target_meta)}') + else: + target_meta = masked_meta = meta + + return target_meta, masked_meta
+ + +
+[docs] + def get_collection_attrs( + self, + file_paths, + max_workers=None, + target_meta_file=None, + threshold=1e-4, + ): + """Get important dataset attributes from a file list to be collected. + + Assumes the file list is chunked in time (row chunked). + + Parameters + ---------- + file_paths : list | str + Explicit list of str file paths that will be sorted and collected + or a single string with unix-style /search/patt*ern.h5. + max_workers : int | None + Number of workers to use in parallel. 1 runs serial, + None will use all available workers. + target_meta_file : str + Path to target final meta containing coordinates to keep from the + full list of coordinates present in the collected meta for the full + file list. + threshold : float + Threshold distance for finding target coordinates within full meta + + Returns + ------- + time_index : pd.datetimeindex + Concatenated full size datetime index from the flist that is + being collected + target_meta : pd.DataFrame + Concatenated full size meta data from the flist that is being + collected or provided target meta + masked_meta : pd.DataFrame + Concatenated full size meta data from the flist that is being + collected masked against target_meta + shape : tuple + Output (collected) dataset shape + global_attrs : dict + Global attributes from the first file in file_paths (it's assumed + that all the files in file_paths have the same global file + attributes). + """ + logger.info(f'Using target_meta_file={target_meta_file}') + if isinstance(target_meta_file, str): + msg = f'Provided target meta ({target_meta_file}) does not exist.' + assert os.path.exists(target_meta_file), msg + + time_index, meta = self._get_collection_attrs( + file_paths, max_workers=max_workers + ) + logger.info('Getting target and masked meta.') + target_meta, masked_meta = self.get_target_and_masked_meta( + meta, target_meta_file, threshold=threshold + ) + + shape = (len(time_index), len(target_meta)) + + logger.info('Getting global attrs from %s', file_paths[0]) + with RexOutputs(file_paths[0], mode='r') as fin: + global_attrs = fin.global_attrs + + return time_index, target_meta, masked_meta, shape, global_attrs
+ + + def _write_flist_data( + self, + out_file, + feature, + time_index, + subset_masked_meta, + target_masked_meta, + ): + """Write spatiotemporal file list data to output file for given + feature + + Parameters + ---------- + out_file : str + Name of output file + feature : str + Name of feature for output chunk + time_index : pd.DateTimeIndex + Time index for corresponding file list data + subset_masked_meta : pd.DataFrame + Meta for corresponding file list data + target_masked_meta : pd.DataFrame + Meta for full output file + """ + with RexOutputs(out_file, mode='r') as f: + target_ti = f.time_index + y_write_slice, x_write_slice = self.get_slices( + target_ti, + target_masked_meta, + time_index, + subset_masked_meta, + ) + self._ensure_dset_in_output(out_file, feature) + + with RexOutputs(out_file, mode='a') as f: + try: + f[feature, y_write_slice, x_write_slice] = self.data + except Exception as e: + msg = ( + f'Problem with writing data to {out_file} with ' + f't_slice={y_write_slice}, ' + f's_slice={x_write_slice}. {e}' + ) + logger.error(msg) + raise OSError(msg) from e + + logger.debug( + 'Finished writing "{}" for row {} and col {} to: {}'.format( + feature, + y_write_slice, + x_write_slice, + os.path.basename(out_file), + ) + ) + + def _collect_flist( + self, + feature, + subset_masked_meta, + time_index, + shape, + file_paths, + out_file, + target_masked_meta, + max_workers=None, + ): + """Collect a dataset from a file list without getting attributes first. + This file list can be a subset of a full file list to be collected. + + Parameters + ---------- + feature : str + Dataset name to collect. + subset_masked_meta : pd.DataFrame + Meta data containing the list of coordinates present in both the + given file paths and the target_meta. This can be a subset of + the coordinates present in the full file list. The coordinates + contained in this dataframe have the same gids as those present in + the meta for the full file list. + time_index : pd.datetimeindex + Concatenated datetime index for the given file paths. + shape : tuple + Output (collected) dataset shape + file_paths : list | str + File list to be collected. This can be a subset of a full file list + to be collected. + out_file : str + File path of final output file. + target_masked_meta : pd.DataFrame + Same as subset_masked_meta but instead for the entire list of files + to be collected. + max_workers : int | None + Number of workers to use in parallel. 1 runs serial, + None uses all available. + """ + if len(subset_masked_meta) > 0: + attrs, final_dtype = self.get_dset_attrs(feature) + scale_factor = attrs.get('scale_factor', 1) + + logger.debug( + 'Collecting file list of shape %s: %s', shape, file_paths + ) + + self.data = np.zeros(shape, dtype=final_dtype) + logger.debug( + 'Initializing output dataset "%s" in-memory with ' + 'shape %s and dtype %s. %s', + feature, + shape, + final_dtype, + _mem_check(), + ) + tasks = [] + for fname in file_paths: + task = dask.delayed(self.get_data)( + fname, + feature, + time_index, + subset_masked_meta, + scale_factor, + final_dtype, + ) + tasks.append(task) + if max_workers == 1: + logger.info( + 'Running serial collection on %s files', len(file_paths) + ) + dask.compute(*tasks, scheduler='single-threaded') + else: + logger.info( + 'Running parallel collection on %s files with ' + 'max_workers=%s.', + len(file_paths), + max_workers, + ) + dask.compute( + *tasks, scheduler='threads', num_workers=max_workers + ) + logger.info('Finished collection of %s files.', len(file_paths)) + self._write_flist_data( + out_file, + feature, + time_index, + subset_masked_meta, + target_masked_meta, + ) + else: + msg = ( + 'No target coordinates found in masked meta. Skipping ' + f'collection for {file_paths}.' + ) + logger.warning(msg) + warn(msg) + +
+[docs] + def get_flist_chunks(self, file_paths, n_writes=None): + """Group files by temporal_chunk_index and then combines these groups + if ``n_writes`` is less than the number of time_chunks. Assumes + file_paths have a suffix format like + ``_{temporal_chunk_index}_{spatial_chunk_index}.h5`` + + Parameters + ---------- + file_paths : list + List of file paths each with a suffix + ``_{temporal_chunk_index}_{spatial_chunk_index}.h5`` + n_writes : int | None + Number of writes to use for collection + + Returns + ------- + flist_chunks : list + List of file list chunks. Used to split collection and writing into + multiple steps. + """ + file_chunks = {} + for file in file_paths: + t_chunk, _ = self.get_chunk_indices(file) + file_chunks[t_chunk] = [*file_chunks.get(t_chunk, []), file] + + if n_writes is not None and n_writes > len(file_chunks): + logger.info( + f'n_writes ({n_writes}) too big, setting to the number ' + f'of temporal chunks ({len(file_chunks)}).' + ) + n_writes = len(file_chunks) + + n_writes = n_writes or len(file_chunks) + tc_groups = np.array_split(list(file_chunks.keys()), n_writes) + fp_groups = [[file_chunks[tc] for tc in tcs] for tcs in tc_groups] + flist_chunks = [np.concatenate(group) for group in fp_groups] + logger.debug( + 'Split file list into %s chunks according to n_writes=%s', + len(flist_chunks), + n_writes, + ) + + logger.debug(f'Grouped file list into {len(file_chunks)} time chunks.') + + return flist_chunks
+ + +
+[docs] + def collect_feature( + self, + dset, + target_masked_meta, + target_meta_file, + time_index, + shape, + flist_chunks, + out_file, + threshold=1e-4, + max_workers=None, + ): + """Collect chunks for single feature + + dset : str + Dataset name to collect. + target_masked_meta : pd.DataFrame + Same as subset_masked_meta but instead for the entire list of files + to be collected. + target_meta_file : str + Path to target final meta containing coordinates to keep from the + full file list collected meta. This can be but is not necessarily a + subset of the full list of coordinates for all files in the file + list. This is used to remove coordinates from the full file list + which are not present in the target_meta. Either this full + meta or a subset, depending on which coordinates are present in + the data to be collected, will be the final meta for the collected + output files. + time_index : pd.datetimeindex + Concatenated datetime index for the given file paths. + shape : tuple + Output (collected) dataset shape + flist_chunks : list + List of file list chunks. Used to split collection and writing into + multiple steps. + out_file : str + File path of final output file. + threshold : float + Threshold distance for finding target coordinates within full meta + max_workers : int | None + Number of workers to use in parallel. 1 runs serial, + None will use all available workers. + """ + logger.debug('Collecting dataset "%s".', dset) + + if len(flist_chunks) == 1: + self._collect_flist( + dset, + target_masked_meta, + time_index, + shape, + flist_chunks[0], + out_file, + target_masked_meta, + max_workers=max_workers, + ) + + else: + for i, flist in enumerate(flist_chunks): + logger.info( + 'Collecting file list chunk %s out of %s ', + i + 1, + len(flist_chunks), + ) + out = self.get_collection_attrs( + flist, + max_workers=max_workers, + target_meta_file=target_meta_file, + threshold=threshold, + ) + time_index, _, masked_meta, shape, _ = out + self._collect_flist( + dset, + masked_meta, + time_index, + shape, + flist, + out_file, + target_masked_meta, + max_workers=max_workers, + )
+ + +
+[docs] + @classmethod + def collect( + cls, + file_paths, + out_file, + features, + max_workers=None, + log_level=None, + log_file=None, + target_meta_file=None, + n_writes=None, + overwrite=True, + threshold=1e-4, + ): + """Collect data files from a dir to one output file. + + Filename requirements: + - Should end with ".h5" + + Parameters + ---------- + file_paths : list | str + Explicit list of str file paths that will be sorted and collected + or a single string with unix-style /search/patt*ern.h5. Files + resolved by this argument must be of the form + ``*_{temporal_chunk_index}_{spatial_chunk_index}.h5``. + out_file : str + File path of final output file. + features : list + List of dsets to collect + max_workers : int | None + Number of workers to use in parallel. 1 runs serial, + None will use all available workers. + log_level : str | None + Desired log level, None will not initialize logging. + log_file : str | None + Target log file. None logs to stdout. + write_status : bool + Flag to write status file once complete if running from pipeline. + job_name : str + Job name for status file if running from pipeline. + pipeline_step : str, optional + Name of the pipeline step being run. If ``None``, the + ``pipeline_step`` will be set to ``"collect``, mimicking old reV + behavior. By default, ``None``. + target_meta_file : str + Path to target final meta containing coordinates to keep from the + full file list collected meta. This can be but is not necessarily a + subset of the full list of coordinates for all files in the file + list. This is used to remove coordinates from the full file list + which are not present in the target_meta. Either this full + meta or a subset, depending on which coordinates are present in + the data to be collected, will be the final meta for the collected + output files. + n_writes : int | None + Number of writes to split full file list into. Must be less than + or equal to the number of temporal chunks if chunks have different + time indices. + overwrite : bool + Whether to overwrite existing output file + threshold : float + Threshold distance for finding target coordinates within full meta + """ + logger.info( + 'Initializing collection for file_paths=%s with max_workers=%s', + file_paths, + max_workers, + ) + + if log_level is not None: + init_logger( + 'sup3r.preprocessing', log_file=log_file, log_level=log_level + ) + + if not os.path.exists(os.path.dirname(out_file)): + os.makedirs(os.path.dirname(out_file), exist_ok=True) + + collector = cls(file_paths) + logger.info( + 'Collecting %s files to %s', len(collector.flist), out_file + ) + if overwrite and os.path.exists(out_file): + logger.info('overwrite=True, removing %s', out_file) + os.remove(out_file) + + out = collector.get_collection_attrs( + collector.flist, + max_workers=max_workers, + target_meta_file=target_meta_file, + threshold=threshold, + ) + logger.info('Finished building full spatiotemporal collection extent.') + time_index, target_meta, target_masked_meta = out[:3] + shape, global_attrs = out[3:] + + flist_chunks = collector.get_flist_chunks( + collector.flist, n_writes=n_writes + ) + if not os.path.exists(out_file): + collector._init_h5(out_file, time_index, target_meta, global_attrs) + for dset in features: + logger.debug('Collecting dataset "%s".', dset) + collector.collect_feature( + dset=dset, + target_masked_meta=target_masked_meta, + target_meta_file=target_meta_file, + time_index=time_index, + shape=shape, + flist_chunks=flist_chunks, + out_file=out_file, + threshold=threshold, + max_workers=max_workers, + ) + logger.info('Finished file collection.')
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/postprocessing/collectors/nc.html b/_modules/sup3r/postprocessing/collectors/nc.html new file mode 100644 index 000000000..e7a485066 --- /dev/null +++ b/_modules/sup3r/postprocessing/collectors/nc.html @@ -0,0 +1,768 @@ + + + + + + + + + + sup3r.postprocessing.collectors.nc — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.postprocessing.collectors.nc

+"""NETCDF file collection.
+
+TODO: Integrate this with Cacher class
+"""
+
+import logging
+import os
+
+from rex.utilities.loggers import init_logger
+
+from sup3r.preprocessing.cachers import Cacher
+from sup3r.utilities.utilities import xr_open_mfdataset
+
+from .base import BaseCollector
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class CollectorNC(BaseCollector): + """Sup3r NETCDF file collection framework""" + +
+[docs] + @classmethod + def collect( + cls, + file_paths, + out_file, + features='all', + log_level=None, + log_file=None, + overwrite=True, + res_kwargs=None, + ): + """Collect data files from a dir to one output file. + + Filename requirements: + - Should end with ".nc" + + Parameters + ---------- + file_paths : list | str + Explicit list of str file paths that will be sorted and collected + or a single string with unix-style /search/patt*ern.nc. + out_file : str + File path of final output file. + features : list | str + List of dsets to collect. If 'all' then all ``data_vars`` will be + collected. + log_level : str | None + Desired log level, None will not initialize logging. + log_file : str | None + Target log file. None logs to stdout. + write_status : bool + Flag to write status file once complete if running from pipeline. + job_name : str + Job name for status file if running from pipeline. + overwrite : bool + Whether to overwrite existing output file + res_kwargs : dict | None + Dictionary of kwargs to pass to xarray.open_mfdataset. + """ + logger.info(f'Initializing collection for file_paths={file_paths}') + + if log_level is not None: + init_logger( + 'sup3r.preprocessing', log_file=log_file, log_level=log_level + ) + + if not os.path.exists(os.path.dirname(out_file)): + os.makedirs(os.path.dirname(out_file), exist_ok=True) + + collector = cls(file_paths) + logger.info( + 'Collecting {} files to {}'.format(len(collector.flist), out_file) + ) + if overwrite and os.path.exists(out_file): + logger.info(f'overwrite=True, removing {out_file}.') + os.remove(out_file) + + tmp_file = out_file + '.tmp' + if not os.path.exists(tmp_file): + res_kwargs = res_kwargs or {} + out = xr_open_mfdataset(collector.flist, **res_kwargs) + Cacher.write_netcdf(tmp_file, data=out, features=features) + + os.replace(tmp_file, out_file) + logger.info('Moved %s to %s.', tmp_file, out_file) + + logger.info('Finished file collection.')
+ + +
+[docs] + def group_spatial_chunks(self): + """Group same spatial chunks together so each chunk has same spatial + footprint but different times""" + chunks = {} + for file in self.flist: + s_chunk = file.split('_')[0] + dirname = os.path.dirname(file) + s_file = os.path.join(dirname, f's_{s_chunk}.nc') + chunks[s_file] = [*chunks.get(s_file, []), s_file] + return chunks
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/postprocessing/data_collect_cli.html b/_modules/sup3r/postprocessing/data_collect_cli.html new file mode 100644 index 000000000..a9b36411e --- /dev/null +++ b/_modules/sup3r/postprocessing/data_collect_cli.html @@ -0,0 +1,797 @@ + + + + + + + + + + sup3r.postprocessing.data_collect_cli — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.postprocessing.data_collect_cli

+"""sup3r data collection CLI entry points."""
+import copy
+import logging
+
+import click
+
+from sup3r import __version__
+from sup3r.postprocessing.collectors import CollectorH5, CollectorNC
+from sup3r.preprocessing.utilities import get_source_type
+from sup3r.utilities import ModuleName
+from sup3r.utilities.cli import AVAILABLE_HARDWARE_OPTIONS, BaseCLI
+
+logger = logging.getLogger(__name__)
+
+
+@click.group()
+@click.version_option(version=__version__)
+@click.option('-v', '--verbose', is_flag=True,
+              help='Flag to turn on debug logging. Default is not verbose.')
+@click.pass_context
+def main(ctx, verbose):
+    """Sup3r Data Collection Command Line Interface"""
+    ctx.ensure_object(dict)
+    ctx.obj['VERBOSE'] = verbose
+
+
+@main.command()
+@click.option('--config_file', '-c', required=True,
+              type=click.Path(exists=True),
+              help='sup3r data collection configuration json file.')
+@click.option('-v', '--verbose', is_flag=True,
+              help='Flag to turn on debug logging. Default is not verbose.')
+@click.pass_context
+def from_config(ctx, config_file, verbose=False, pipeline_step=None):
+    """Run sup3r data collection from a config file. If dset_split is True this
+    each feature will be collected into a separate file."""
+    config = BaseCLI.from_config_preflight(ModuleName.DATA_COLLECT, ctx,
+                                           config_file, verbose)
+
+    dset_split = config.get('dset_split', False)
+    exec_kwargs = config.get('execution_control', {})
+    hardware_option = exec_kwargs.pop('option', 'local')
+    source_type = get_source_type(config['file_paths'])
+    collector_types = {'h5': CollectorH5, 'nc': CollectorNC}
+    Collector = collector_types[source_type]
+
+    configs = [config]
+    if dset_split:
+        configs = []
+        for feature in config['features']:
+            f_config = copy.deepcopy(config)
+            f_out_file = config['out_file'].replace(
+                f'.{source_type}', f'_{feature}.{source_type}')
+            f_job_name = config['job_name'] + f'_{feature}'
+            f_log_file = config.get('log_file', None)
+            if f_log_file is not None:
+                f_log_file = f_log_file.replace('.log', f'_{feature}.log')
+            f_config.update({'features': [feature],
+                             'out_file': f_out_file,
+                             'job_name': f_job_name,
+                             'log_file': f_log_file})
+
+            configs.append(f_config)
+
+    for config in configs:
+        ctx.obj['NAME'] = config['job_name']
+        config['pipeline_step'] = pipeline_step
+        cmd = Collector.get_node_cmd(config)
+
+        if hardware_option.lower() in AVAILABLE_HARDWARE_OPTIONS:
+            kickoff_slurm_job(ctx, cmd, pipeline_step, **exec_kwargs)
+        else:
+            kickoff_local_job(ctx, cmd, pipeline_step)
+
+
+
+[docs] +def kickoff_local_job(ctx, cmd, pipeline_step=None): + """Run sup3r data collection locally. + + Parameters + ---------- + ctx : click.pass_context + Click context object where ctx.obj is a dictionary + cmd : str + Command to be submitted in shell script. Example: + 'python -m sup3r.cli data_collect -c <config_file>' + pipeline_step : str, optional + Name of the pipeline step being run. If ``None``, the + ``pipeline_step`` will be set to the ``module_name``, + mimicking old reV behavior. By default, ``None``. + """ + BaseCLI.kickoff_local_job(ModuleName.DATA_COLLECT, ctx, cmd, pipeline_step)
+ + + +
+[docs] +def kickoff_slurm_job(ctx, cmd, pipeline_step=None, alloc='sup3r', + memory=None, walltime=4, feature=None, + stdout_path='./stdout/'): + """Run sup3r on HPC via SLURM job submission. + + Parameters + ---------- + ctx : click.pass_context + Click context object where ctx.obj is a dictionary + cmd : str + Command to be submitted in SLURM shell script. Example: + 'python -m sup3r.cli data-collect -c <config_file>' + pipeline_step : str, optional + Name of the pipeline step being run. If ``None``, the + ``pipeline_step`` will be set to the ``module_name``, + mimicking old reV behavior. By default, ``None``. + alloc : str + HPC project (allocation) handle. Example: 'sup3r'. + memory : int + Node memory request in GB. + walltime : float + Node walltime request in hours. + feature : str + Additional flags for SLURM job. Format is "--qos=high" + or "--depend=[state:job_id]". Default is None. + stdout_path : str + Path to print .stdout and .stderr files. + """ + BaseCLI.kickoff_slurm_job(ModuleName.DATA_COLLECT, ctx, cmd, alloc, memory, + walltime, feature, stdout_path, pipeline_step)
+ + + +if __name__ == '__main__': + try: + main(obj={}) + except Exception: + logger.exception('Error running sup3r data collection CLI') + raise +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/postprocessing/writers/base.html b/_modules/sup3r/postprocessing/writers/base.html new file mode 100644 index 000000000..28dd796e1 --- /dev/null +++ b/_modules/sup3r/postprocessing/writers/base.html @@ -0,0 +1,1395 @@ + + + + + + + + + + sup3r.postprocessing.writers.base — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.postprocessing.writers.base

+"""Output handling
+
+TODO: OutputHandlers should be combined with Cacher objects.
+"""
+
+import json
+import logging
+import os
+import re
+from abc import abstractmethod
+from warnings import warn
+
+import dask
+import numpy as np
+import pandas as pd
+from rex.outputs import Outputs as BaseRexOutputs
+from scipy.interpolate import griddata
+
+from sup3r.preprocessing.derivers.utilities import (
+    invert_uv,
+    parse_feature,
+)
+from sup3r.utilities import VERSION_RECORD
+from sup3r.utilities.utilities import (
+    pd_date_range,
+    safe_serialize,
+    xr_open_mfdataset,
+)
+
+logger = logging.getLogger(__name__)
+
+ATTR_DIR = os.path.dirname(os.path.realpath(__file__))
+ATTR_FP = os.path.join(ATTR_DIR, 'output_attrs.json')
+with open(ATTR_FP) as f:
+    OUTPUT_ATTRS = json.load(f)
+
+
+
+[docs] +class OutputMixin: + """Methods used by various Output and Collection classes""" + +
+[docs] + @staticmethod + def get_time_dim_name(filepath): + """Get the name of the time dimension in the given file + + Parameters + ---------- + filepath : str + Path to the file + + Returns + ------- + time_key : str + Name of the time dimension in the given file + """ + + handle = xr_open_mfdataset(filepath) + valid_vars = set(handle.dims) + time_key = list({'time', 'Time'}.intersection(valid_vars)) + if len(time_key) > 0: + return time_key[0] + return 'time'
+ + +
+[docs] + @staticmethod + def get_dset_attrs(feature): + """Get attrributes for output feature + + Parameters + ---------- + feature : str + Name of feature to write + + Returns + ------- + attrs : dict + Dictionary of attributes for requested dset + dtype : str + Data type for requested dset. Defaults to float32 + """ + feat_base_name = parse_feature(feature).basename + if feat_base_name in OUTPUT_ATTRS: + attrs = OUTPUT_ATTRS[feat_base_name] + dtype = attrs.get('dtype', 'float32') + else: + attrs = {} + dtype = 'float32' + msg = ( + 'Could not find feature "{}" with base name "{}" in ' + 'OUTPUT_ATTRS global variable. Writing with float32 and no ' + 'chunking.'.format(feature, feat_base_name) + ) + logger.warning(msg) + warn(msg) + + return attrs, dtype
+ + + @staticmethod + def _init_h5(out_file, time_index, meta, global_attrs): + """Initialize the output h5 file to save data to. + + Parameters + ---------- + out_file : str + Output file path - must not yet exist. + time_index : pd.datetimeindex + Full datetime index of final output data. + meta : pd.DataFrame + Full meta dataframe for the final output data. + global_attrs : dict + Namespace of file-global attributes for the final output data. + """ + + with RexOutputs(out_file, mode='w-') as f: + logger.info('Initializing output file: {}'.format(out_file)) + logger.info( + 'Initializing output file with shape {} ' + 'and meta data:\n{}'.format((len(time_index), len(meta)), meta) + ) + f.time_index = time_index + f.meta = meta + f.run_attrs = global_attrs + + @classmethod + def _ensure_dset_in_output(cls, out_file, dset, data=None): + """Ensure that dset is initialized in out_file and initialize if not. + + Parameters + ---------- + out_file : str + Pre-existing H5 file output path + dset : str + Dataset name + data : Union[np.ndarray, da.core.Array] | None + Optional data to write to dataset if initializing. + """ + + with RexOutputs(out_file, mode='a') as f: + if dset not in f.dsets: + attrs, dtype = cls.get_dset_attrs(dset) + logger.info( + 'Initializing dataset "{}" with shape {} and ' + 'dtype {}'.format(dset, f.shape, dtype) + ) + f._create_dset( + dset, + f.shape, + dtype, + attrs=attrs, + data=data, + chunks=attrs.get('chunks', None), + ) + +
+[docs] + @classmethod + def write_data( + cls, out_file, dsets, time_index, data_list, meta, global_attrs=None + ): + """Write list of datasets to out_file. + + Parameters + ---------- + out_file : str + Pre-existing H5 file output path + dsets : list + list of datasets to write to out_file + time_index : pd.DatetimeIndex() + Pandas datetime index to use for file time_index. + data_list : list + List of np.ndarray objects to write to out_file + meta : pd.DataFrame + Full meta dataframe for the final output data. + global_attrs : dict + Namespace of file-global attributes for the final output data. + """ + tmp_file = out_file.replace('.h5', '.h5.tmp') + with RexOutputs(tmp_file, 'w') as fh: + fh.meta = meta + fh.time_index = time_index + + for dset, data in zip(dsets, data_list): + attrs, dtype = cls.get_dset_attrs(dset) + fh.add_dataset( + tmp_file, + dset, + data, + dtype=dtype, + attrs=attrs, + chunks=attrs['chunks'], + ) + logger.info(f'Added {dset} to output file {out_file}.') + + if global_attrs is not None: + attrs = { + k: v if isinstance(v, str) else safe_serialize(v) + for k, v in global_attrs.items() + } + fh.run_attrs = attrs + + os.replace(tmp_file, out_file) + msg = ( + 'Saved output of size ' + f'{(len(data_list), *data_list[0].shape)} to: {out_file}' + ) + logger.info(msg)
+
+ + + +
+[docs] +class RexOutputs(BaseRexOutputs): + """Base class to handle NREL h5 formatted output data""" + + @property + def full_version_record(self): + """Get record of versions for dependencies + + Returns + ------- + dict + Dictionary of package versions for dependencies + """ + versions = super().full_version_record + versions.update(VERSION_RECORD) + return versions + +
+[docs] + def set_version_attr(self): + """Set the version attribute to the h5 file.""" + self.h5.attrs['version'] = VERSION_RECORD['sup3r'] + self.h5.attrs['full_version_record'] = json.dumps( + self.full_version_record + ) + self.h5.attrs['package'] = 'sup3r'
+
+ + + +
+[docs] +class OutputHandler(OutputMixin): + """Class to handle forward pass output. This includes transforming features + back to their original form and outputting to the correct file format. + """ + +
+[docs] + @classmethod + def get_renamed_features(cls, features): + """Rename features based on transformation from u/v to + windspeed/winddirection + + Parameters + ---------- + features : list + List of output features + + Returns + ------- + list + List of renamed features u/v -> windspeed/winddirection for each + height + """ + heights = [ + parse_feature(f).height + for f in features + if re.match('u_(.*?)m'.lower(), f.lower()) + ] + renamed_features = features.copy() + + for height in heights: + u_idx = features.index(f'u_{height}m') + v_idx = features.index(f'v_{height}m') + + renamed_features[u_idx] = f'windspeed_{height}m' + renamed_features[v_idx] = f'winddirection_{height}m' + + return renamed_features
+ + +
+[docs] + @classmethod + def invert_uv_features(cls, data, features, lat_lon, max_workers=None): + """Invert U/V to windspeed and winddirection. Performed in place. + + Parameters + ---------- + data : ndarray + High res data from forward pass + (spatial_1, spatial_2, temporal, features) + features : list + List of output features. If this doesn't contain any names matching + u_*m, this method will do nothing. + lat_lon : ndarray + High res lat/lon array + (spatial_1, spatial_2, 2) + max_workers : int | None + Max workers to use for inverse transform. If None the maximum + possible will be used + """ + + heights = [ + parse_feature(f).height + for f in features + if re.match('u_(.*?)m'.lower(), f.lower()) + ] + + if heights: + logger.info( + 'Converting u/v to ws/wd for H5 output with max_workers=%s', + max_workers, + ) + logger.debug( + 'Found heights %s for output features %s', heights, features + ) + + tasks = [] + for height in heights: + u_idx = features.index(f'u_{height}m') + v_idx = features.index(f'v_{height}m') + task = dask.delayed(cls.invert_uv_single_pair)( + data, lat_lon, u_idx, v_idx + ) + tasks.append(task) + logger.info('Added %s futures to convert u/v to ws/wd', len(tasks)) + if max_workers == 1: + dask.compute(*tasks, scheduler='single-threaded') + else: + dask.compute(*tasks, scheduler='threads', num_workers=max_workers) + logger.info('Finished converting u/v to ws/wd')
+ + +
+[docs] + @staticmethod + def invert_uv_single_pair(data, lat_lon, u_idx, v_idx): + """Perform inverse transform in place on a single u/v pair. + + Parameters + ---------- + data : ndarray + High res data from forward pass + (spatial_1, spatial_2, temporal, features) + lat_lon : ndarray + High res lat/lon array + (spatial_1, spatial_2, 2) + u_idx : int + Index in data for U component to transform + v_idx : int + Index in data for V component to transform + """ + ws, wd = invert_uv(data[..., u_idx], data[..., v_idx], lat_lon) + data[..., u_idx] = ws + data[..., v_idx] = wd
+ + + @classmethod + def _transform_output( + cls, data, features, lat_lon, invert_uv=None, max_workers=None + ): + """Transform output data before writing to H5 file + + Parameters + ---------- + data : ndarray + (spatial_1, spatial_2, temporal, features) + High resolution forward pass output + features : list + List of feature names corresponding to the last dimension of data + lat_lon : ndarray + Array of high res lat/lon for output data. + (spatial_1, spatial_2, 2) + Last dimension has ordering (lat, lon) + invert_uv : bool | None + Whether to convert u and v wind components to windspeed and + direction + max_workers : int | None + Max workers to use for inverse transform. If None the max_workers + will be estimated based on memory limits. + """ + if invert_uv and any( + re.match('u_(.*?)m'.lower(), f.lower()) + or re.match('v_(.*?)m'.lower(), f.lower()) + for f in features + ): + cls.invert_uv_features( + data, features, lat_lon, max_workers=max_workers + ) + features = cls.get_renamed_features(features) + data = cls.enforce_limits(features=features, data=data) + return data, features + +
+[docs] + @staticmethod + def enforce_limits(features, data): + """Enforce physical limits for feature data + + Parameters + ---------- + features : list + List of features with ordering corresponding to last channel of + data array. + data : ndarray + Array of feature data + + Returns + ------- + data : ndarray + Array of feature data with physical limits enforced + """ + maxes = [] + mins = [] + for fidx, fn in enumerate(features): + dset_name = parse_feature(fn).basename + if dset_name not in OUTPUT_ATTRS: + msg = f'Could not find "{dset_name}" in OUTPUT_ATTRS dict!' + logger.error(msg) + raise KeyError(msg) + + max_val = OUTPUT_ATTRS[dset_name].get('max', np.inf) + min_val = OUTPUT_ATTRS[dset_name].get('min', -np.inf) + enforcing_msg = ( + f'Enforcing range of ({min_val}, {max_val} for "{fn}")' + ) + + f_max = data[..., fidx].max() + f_min = data[..., fidx].min() + msg = f'{fn} has a max of {f_max} > {max_val}. {enforcing_msg}' + if f_max > max_val: + logger.warning(msg) + warn(msg) + msg = f'{fn} has a min of {f_min} < {min_val}. {enforcing_msg}' + if f_min < min_val: + logger.warning(msg) + warn(msg) + maxes.append(max_val) + mins.append(min_val) + + data = np.maximum(data, mins) + return np.minimum(data, maxes).astype(np.float32)
+ + +
+[docs] + @staticmethod + def pad_lat_lon(lat_lon): + """Pad lat lon grid with additional rows and columns to use for + interpolation routine + + Parameters + ---------- + lat_lon : ndarray + Array of lat/lon for input data. + (spatial_1, spatial_2, 2) + Last dimension has ordering (lat, lon) + shape : tuple + (lons, lats) Shape of high res grid + + Returns + ------- + lat_lon : ndarray + Array of padded lat lons + (spatial_1, spatial_2, 2) + Last dimension has ordering (lat, lon) + """ + + # add row and column to boundaries + padded_grid = np.zeros((2 + lat_lon.shape[0], 2 + lat_lon.shape[1], 2)) + + # fill in interior values + padded_grid[1:-1, 1:-1, :] = lat_lon + + # define edge spacing + left_diffs = padded_grid[:, 2, 1] - padded_grid[:, 1, 1] + right_diffs = padded_grid[:, -2, 1] - padded_grid[:, -3, 1] + top_diffs = padded_grid[1, :, 0] - padded_grid[2, :, 0] + bottom_diffs = padded_grid[-3, :, 0] - padded_grid[-2, :, 0] + + # use edge spacing to define new boundary values + padded_grid[:, 0, 1] = padded_grid[:, 1, 1] - left_diffs + padded_grid[:, 0, 0] = padded_grid[:, 1, 0] + padded_grid[:, -1, 1] = padded_grid[:, -2, 1] + right_diffs + padded_grid[:, -1, 0] = padded_grid[:, -2, 0] + padded_grid[0, :, 0] = padded_grid[1, :, 0] + top_diffs + padded_grid[0, :, 1] = padded_grid[1, :, 1] + padded_grid[-1, :, 0] = padded_grid[-2, :, 0] - bottom_diffs + padded_grid[-1, :, 1] = padded_grid[-2, :, 1] + + # use surrounding cells to define corner values + # top left + padded_grid[0, 0, 0] = padded_grid[0, 1, 0] + padded_grid[0, 0, 1] = padded_grid[1, 0, 1] + # top right + padded_grid[0, -1, 0] = padded_grid[0, -2, 0] + padded_grid[0, -1, 1] = padded_grid[1, -1, 1] + # bottom left + padded_grid[-1, 0, 0] = padded_grid[-1, 1, 0] + padded_grid[-1, 0, 1] = padded_grid[-2, 0, 1] + # bottom right + padded_grid[-1, -1, 0] = padded_grid[-1, -2, 0] + padded_grid[-1, -1, 1] = padded_grid[-2, -1, 1] + + return padded_grid
+ + +
+[docs] + @staticmethod + def is_increasing_lons(lat_lon): + """Check if longitudes are in increasing order. Need to check this + for interpolation routine. This is primarily to identify whether the + lons go through the 180 -> -180 boundary, which creates a + discontinuity. For example, [130, 180, -130, -80]. If any lons go from + positive to negative the lons need to be shifted to the range 0-360. + + Parameters + ---------- + lat_lon : ndarray + Array of lat/lon for input data. + (spatial_1, spatial_2, 2) + Last dimension has ordering (lat, lon) + + Returns + ------- + bool + Whether all lons are in increasing order or not + + """ + for i in range(lat_lon.shape[0]): + if lat_lon[i, :, 1][-1] < lat_lon[i, :, 1][0]: + return False + return True
+ + +
+[docs] + @classmethod + def get_lat_lon(cls, low_res_lat_lon, shape): + """Get lat lon arrays for high res output file + + Parameters + ---------- + low_res_lat_lon : ndarray + Array of lat/lon for input data. Longitudes must be arranged in + a counter-clockwise direction (when looking down from above the + north pole). e.g. [-50, -25, 25, 50] or [130, 180, -130, -80]. The + latter passes through the 180 -> -180 boundary and will be + temporarily shifted to the 0-360 range before interpolation. + (spatial_1, spatial_2, 2) + Last dimension has ordering (lat, lon) + shape : tuple + (lons, lats) Shape of high res grid + + Returns + ------- + lat_lon : ndarray + Array of lat lons for high res output file + (spatial_1, spatial_2, 2) + Last dimension has ordering (lat, lon) + """ + logger.debug('Getting high resolution lat / lon grid') + + # ensure lons are between -180 and 180 + logger.debug('Ensuring correct longitude range.') + low_res_lat_lon[..., 1] = (low_res_lat_lon[..., 1] + 180) % 360 - 180 + + # check if lons go through the 180 -> -180 boundary. + if not cls.is_increasing_lons(low_res_lat_lon): + logger.debug('Ensuring increasing longitudes.') + low_res_lat_lon[..., 1] = (low_res_lat_lon[..., 1] + 360) % 360 + + # pad lat lon grid + logger.debug('Padding low-res lat / lon grid.') + padded_grid = cls.pad_lat_lon(low_res_lat_lon) + lats = padded_grid[..., 0].flatten() + lons = padded_grid[..., 1].flatten() + + lr_y, lr_x = low_res_lat_lon.shape[:-1] + hr_y, hr_x = shape + + # assume outer bounds of mesh (0, 10) w/ points on inside of that range + y = np.arange(0, 10, 10 / lr_y) + 5 / lr_y + x = np.arange(0, 10, 10 / lr_x) + 5 / lr_x + + # add values due to padding + y = np.concatenate([[y[0] - 10 / lr_y], y, [y[-1] + 10 / lr_y]]) + x = np.concatenate([[x[0] - 10 / lr_x], x, [x[-1] + 10 / lr_x]]) + + # remesh (0, 10) with high res spacing + new_y = np.arange(0, 10, 10 / hr_y) + 5 / hr_y + new_x = np.arange(0, 10, 10 / hr_x) + 5 / hr_x + + logger.debug('Running meshgrid.') + X, Y = np.meshgrid(x, y) + old = np.array([Y.flatten(), X.flatten()]).T + X, Y = np.meshgrid(new_x, new_y) + new = np.array([Y.flatten(), X.flatten()]).T + + logger.debug('Running griddata.') + lons = griddata(old, lons, new) + lats = griddata(old, lats, new) + + lons = (lons + 180) % 360 - 180 + lat_lon = np.dstack((lats.reshape(shape), lons.reshape(shape))) + logger.debug('Finished getting high resolution lat / lon grid') + + return lat_lon
+ + +
+[docs] + @staticmethod + def get_times(low_res_times, shape): + """Get array of times for high res output file + + Parameters + ---------- + low_res_times : pd.Datetimeindex + List of times for low res input data. If there is only a single low + res timestep, it is assumed the data is daily. + shape : int + Number of time steps for high res time array + + Returns + ------- + ndarray + Array of times for high res output file. + """ + logger.debug('Getting high resolution time indices') + logger.debug( + f'Low res times: {low_res_times[0]} to ' f'{low_res_times[-1]}' + ) + t_enhance = int(shape / len(low_res_times)) + if len(low_res_times) > 1: + offset = low_res_times[1] - low_res_times[0] + else: + offset = np.timedelta64(24, 'h') + + freq = offset / np.timedelta64(1, 's') + freq = int(60 * np.round(freq / 60) / t_enhance) + times = [ + low_res_times[0] + i * np.timedelta64(freq, 's') + for i in range(shape) + ] + freq = pd.tseries.offsets.DateOffset(seconds=freq) + times = pd_date_range(times[0], times[-1], freq=freq) + logger.debug(f'High res times: {times[0]} to {times[-1]}') + return times
+ + + @classmethod + @abstractmethod + def _write_output( + cls, + data, + features, + lat_lon, + times, + out_file, + meta_data, + invert_uv=True, + max_workers=None, + gids=None, + ): + """Write output to file with specified times and lats/lons""" + +
+[docs] + @classmethod + def write_output( + cls, + data, + features, + low_res_lat_lon, + low_res_times, + out_file, + meta_data=None, + invert_uv=None, + max_workers=None, + gids=None, + ): + """Write forward pass output to file + + Parameters + ---------- + data : ndarray + (spatial_1, spatial_2, temporal, features) + High resolution forward pass output + features : list + List of feature names corresponding to the last dimension of data + low_res_lat_lon : ndarray + Array of lat/lon for input data. (spatial_1, spatial_2, 2) + Last dimension has ordering (lat, lon) + low_res_times : pd.Datetimeindex + List of times for low res source data + out_file : string + Output file path + meta_data : dict | None + Dictionary of meta data from model + invert_uv : bool | None + Whether to convert u and v wind components to windspeed and + direction + max_workers : int | None + Max workers to use for inverse uv transform. If None the + max_workers will be estimated based on memory limits. + gids : list + List of coordinate indices used to label each lat lon pair and to + help with spatial chunk data collection + """ + lat_lon = cls.get_lat_lon(low_res_lat_lon, data.shape[:2]) + times = cls.get_times(low_res_times, data.shape[-2]) + cls._write_output( + data, + features, + lat_lon, + times, + out_file, + meta_data=meta_data, + invert_uv=invert_uv, + max_workers=max_workers, + gids=gids, + )
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/postprocessing/writers/h5.html b/_modules/sup3r/postprocessing/writers/h5.html new file mode 100644 index 000000000..55473d2a7 --- /dev/null +++ b/_modules/sup3r/postprocessing/writers/h5.html @@ -0,0 +1,758 @@ + + + + + + + + + + sup3r.postprocessing.writers.h5 — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.postprocessing.writers.h5

+"""Output handling
+
+TODO: Remove redundant code re. Cachers
+"""
+
+import logging
+
+import numpy as np
+import pandas as pd
+
+from .base import OutputHandler
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class OutputHandlerH5(OutputHandler): + """Class to handle writing output to H5 file""" + + @classmethod + def _write_output( + cls, + data, + features, + lat_lon, + times, + out_file, + meta_data=None, + invert_uv=None, + max_workers=None, + gids=None, + ): + """Write forward pass output to H5 file + + Parameters + ---------- + data : ndarray + (spatial_1, spatial_2, temporal, features) + High resolution forward pass output + features : list + List of feature names corresponding to the last dimension of data + lat_lon : ndarray + Array of high res lat/lon for output data. + (spatial_1, spatial_2, 2) + Last dimension has ordering (lat, lon) + times : pd.Datetimeindex + List of times for high res output data + out_file : string + Output file path + meta_data : dict | None + Dictionary of meta data from model + invert_uv : bool | None + Whether to convert u and v wind components to windspeed and + direction + max_workers : int | None + Max workers to use for inverse transform. + gids : list + List of coordinate indices used to label each lat lon pair and to + help with spatial chunk data collection + """ + msg = ( + f'Output data shape ({data.shape}) and lat_lon shape ' + f'({lat_lon.shape}) conflict.' + ) + assert data.shape[:2] == lat_lon.shape[:-1], msg + msg = ( + f'Output data shape ({data.shape}) and times shape ' + f'({len(times)}) conflict.' + ) + assert data.shape[-2] == len(times), msg + invert_uv = True if invert_uv is None else invert_uv + data, features = cls._transform_output( + data.copy(), + features, + lat_lon, + max_workers=max_workers, + invert_uv=invert_uv, + ) + gids = ( + gids + if gids is not None + else np.arange(np.prod(lat_lon.shape[:-1])) + ) + meta = pd.DataFrame( + { + 'gid': gids.flatten(), + 'latitude': lat_lon[..., 0].flatten(), + 'longitude': lat_lon[..., 1].flatten(), + } + ) + data_list = [] + for i, _ in enumerate(features): + flat_data = data[..., i].reshape((-1, len(times))) + flat_data = np.transpose(flat_data, (1, 0)) + data_list.append(flat_data) + cls.write_data(out_file, features, times, data_list, meta, meta_data)
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/postprocessing/writers/nc.html b/_modules/sup3r/postprocessing/writers/nc.html new file mode 100644 index 000000000..7c8115ff9 --- /dev/null +++ b/_modules/sup3r/postprocessing/writers/nc.html @@ -0,0 +1,759 @@ + + + + + + + + + + sup3r.postprocessing.writers.nc — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.postprocessing.writers.nc

+"""Output handling"""
+
+import logging
+from datetime import datetime as dt
+
+import numpy as np
+import xarray as xr
+
+from sup3r.preprocessing.cachers import Cacher
+from sup3r.preprocessing.names import Dimension
+
+from .base import OutputHandler
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class OutputHandlerNC(OutputHandler): + """Forward pass OutputHandler for NETCDF files""" + + @classmethod + def _write_output( + cls, + data, + features, + lat_lon, + times, + out_file, + meta_data=None, + max_workers=None, + invert_uv=None, + gids=None, + ): + """Write forward pass output to NETCDF file + + Parameters + ---------- + data : ndarray + (spatial_1, spatial_2, temporal, features) + High resolution forward pass output + features : list + List of feature names corresponding to the last dimension of data + lat_lon : ndarray + Array of high res lat/lon for output data. + (spatial_1, spatial_2, 2) + Last dimension has ordering (lat, lon) + times : pd.Datetimeindex + List of times for high res output data + out_file : string + Output file path + meta_data : dict | None + Dictionary of meta data from model + max_workers : int | None + Max workers to use for inverse transform. + invert_uv : bool | None + Whether to convert u and v wind components to windspeed and + direction + gids : list + List of coordinate indices used to label each lat lon pair and to + help with spatial chunk data collection + """ + + invert_uv = False if invert_uv is None else invert_uv + data, features = cls._transform_output( + data=data, + features=features, + lat_lon=lat_lon, + invert_uv=invert_uv, + max_workers=max_workers, + ) + + coords = { + Dimension.TIME: times, + Dimension.LATITUDE: (Dimension.dims_2d(), lat_lon[:, :, 0]), + Dimension.LONGITUDE: (Dimension.dims_2d(), lat_lon[:, :, 1]), + } + data_vars = {} + if gids is not None: + data_vars = {'gids': (Dimension.dims_2d(), gids)} + for i, f in enumerate(features): + data_vars[f] = ( + (Dimension.TIME, *Dimension.dims_2d()), + np.transpose(data[..., i], axes=(2, 0, 1)), + ) + + attrs = meta_data or {} + now = dt.utcnow().isoformat() + attrs['date_modified'] = now + attrs['date_created'] = attrs.get('date_created', now) + + ds = xr.Dataset(data_vars=data_vars, coords=coords, attrs=attrs) + Cacher.write_netcdf( + out_file=out_file, + data=ds, + features=features, + max_workers=max_workers, + )
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/accessor.html b/_modules/sup3r/preprocessing/accessor.html new file mode 100644 index 000000000..280f6c9da --- /dev/null +++ b/_modules/sup3r/preprocessing/accessor.html @@ -0,0 +1,1338 @@ + + + + + + + + + + sup3r.preprocessing.accessor — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.accessor

+"""Accessor for xarray. This defines the basic data object contained by all
+``Container`` objects."""
+
+import logging
+from typing import Dict, Union
+from warnings import warn
+
+import dask.array as da
+import numpy as np
+import pandas as pd
+import xarray as xr
+from scipy.stats import mode
+from typing_extensions import Self
+
+from sup3r.preprocessing.names import Dimension
+from sup3r.preprocessing.utilities import (
+    _lowered,
+    _mem_check,
+    compute_if_dask,
+    dims_array_tuple,
+    is_type_of,
+    ordered_array,
+    ordered_dims,
+    parse_keys,
+)
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +@xr.register_dataset_accessor('sx') +class Sup3rX: + """Accessor for xarray - the suggested way to extend xarray functionality. + + References + ---------- + https://docs.xarray.dev/en/latest/internals/extending-xarray.html + + Note + ---- + (1) This is an ``xr.Dataset`` style object with all ``xr.Dataset`` + methods, plus more. The way to access these methods is either through + appending ``.sx.<method>`` on an ``xr.Dataset`` or by wrapping an + ``xr.Dataset`` with ``Sup3rX``, e.g. ``Sup3rX(xr.Dataset(...)).<method>``. + Throughout the ``sup3r`` codebase we prefer to use the latter. The + most important part of this interface is parsing ``__getitem__`` calls of + the form ``ds.sx[keys]``. + + (i) ``keys`` can be a single feature name, list of features, or numpy + style indexing for dimensions. e.g. ``ds.sx['u'][slice(0, 10), + ...]`` or ``ds.sx[['u', 'v']][..., slice(0, 10)]``. + + (ii) If ``ds[keys]`` returns an ``xr.Dataset`` object then + ``ds.sx[keys]`` will return a ``Sup3rX`` object. e.g. + ``ds.sx[['u','v']]``) will return a :class:`Sup3rX` instance but + ``ds.sx['u']`` will return an ``xr.DataArray`` + + (ii) Providing only numpy style indexing without features will return + an array with all contained features in the last dimension with a + spatiotemporal shape corresponding to the indices. e.g. + ``ds[slice(0, 10), 0, 1]`` will return an array of shape ``(10, 1, + 1, n_features)``. This array will be a dask.array or numpy.array, + depending on whether data is still on disk or loaded into memory. + + (2) The ``__getitem__`` and ``__getattr__`` methods will cast back to + ``type(self)`` if ``self._ds.__getitem__`` or ``self._ds.__getattr__`` + returns an instance of ``type(self._ds)`` (e.g. an ``xr.Dataset``). This + means we do not have to constantly append ``.sx`` for successive calls to + accessor methods. + + Examples + -------- + >>> # To use as an accessor: + >>> ds = xr.Dataset(...) + >>> feature_data = ds.sx[features] + >>> ti = ds.sx.time_index + >>> lat_lon_array = ds.sx.lat_lon + + >>> # Use as wrapper: + >>> ds = Sup3rX(xr.Dataset(data_vars={'windspeed': ...}, ...)) + >>> np_array = ds['windspeed'].values + >>> dask_array = ds['windspeed'][...] == ds['windspeed'].as_array() + """ + + def __init__(self, ds: Union[xr.Dataset, Self]): + """Initialize accessor. + + Parameters + ---------- + ds : xr.Dataset | xr.DataArray + xarray Dataset instance to access with the following methods + """ + self._ds = ds + self._features = None + self._meta = None + self.time_slice = None + + def __getitem__( + self, keys + ) -> Union[Union[np.ndarray, da.core.Array], Self]: + """Method for accessing variables. keys can optionally include a + feature name or list of feature names as the first entry of a keys + tuple. + + Notes + ----- + This returns a ``Sup3rX`` object when keys is only an iterable of + features. e.g. an instance of ``(set, list, tuple)``. When keys is + only a single feature (a string) this returns an ``xr.DataArray``. + Otherwise keys must be dimension indices or slices and this will return + an np.ndarray or dask.array, depending on whether data is loaded into + memory or not. This array will have features stacked over the last + dimension. + """ + + features, slices = parse_keys( + keys, + default_coords=self.coords, + default_dims=self._ds.dims, + default_features=self.features, + ) + single_feat = isinstance(features, str) + + out = self._ds[features] if len(features) > 0 else self._ds + out = self.ordered(out) if single_feat else type(self)(out) + + if len(features) > 0: + return out + + slices = {k: v for k, v in slices.items() if k in out.dims} + is_fancy = self._needs_fancy_indexing(slices.values()) + + if not is_fancy: + out = out.isel(**slices) + + out = out.as_array() + + if is_fancy and self.loaded: + # DataArray coord or Numpy indexing + return out[tuple(slices.values())] + + if is_fancy: + # DataArray + Dask indexing + return out.vindex[tuple(slices.values())] + + return out + + def __getattr__(self, attr): + """Get attribute and cast to ``type(self)`` if an ``xr.Dataset`` is + returned first.""" + out = getattr(self._ds, attr) + return type(self)(out) if isinstance(out, xr.Dataset) else out + + def __setitem__(self, keys, data): + """ + Parameters + ---------- + keys : str | list | tuple + keys to set. This can be a string like 'temperature' or a list + like ``['u', 'v']``. ``data`` will be iterated over in the latter + case. + data : Union[np.ndarray, da.core.Array] | xr.DataArray + array object used to set variable data. If ``variable`` is a list + then this is expected to have a trailing dimension with length + equal to the length of the list. + """ + if is_type_of(keys, str): + if isinstance(keys, (list, tuple)) and hasattr(data, 'data_vars'): + data_dict = {v: data[v] for v in keys} + elif isinstance(keys, (list, tuple)): + data_dict = {v: data[..., i] for i, v in enumerate(keys)} + else: + data_dict = {keys.lower(): data} + _ = self.assign(data_dict) + else: + msg = f'Cannot set values for keys {keys}' + logger.error(msg) + raise KeyError(msg) + + def __contains__(self, vals): + """Check if ``self._ds`` contains ``vals``. + + Parameters + ---------- + vals : str | list + Values to check. Can be a list of strings or a single string. + + Examples + -------- + >>> bool(['u', 'v'] in self) + >>> bool('u' in self) + """ + feature_check = isinstance(vals, (list, tuple)) and all( + isinstance(s, str) for s in vals + ) + if feature_check: + return all(s.lower() in self._ds for s in vals) + return self._ds.__contains__(vals) + + @property + def values(self): + """Return numpy values in standard dimension order ``(lats, lons, time, + ..., features)``""" + out = self.as_array() + if not self.loaded: + return np.asarray(out) + return out + +
+[docs] + def to_dataarray(self) -> Union[np.ndarray, da.core.Array]: + """Return xr.DataArray for the contained xr.Dataset.""" + if not self.features: + coords = [self._ds[f] for f in Dimension.coords_2d()] + return da.stack(coords, axis=-1) + return self.ordered(self._ds.to_array())
+ + +
+[docs] + def as_array(self): + """Return ``.data`` attribute of an xarray.DataArray with our standard + dimension order ``(lats, lons, time, ..., features)``""" + out = self.to_dataarray() + return getattr(out, 'data', out)
+ + + def _stack_features(self, arrs): + if self.loaded: + return np.stack(arrs, axis=-1) + return da.stack(arrs, axis=-1) + +
+[docs] + def compute(self, **kwargs): + """Load `._ds` into memory. This updates the internal `xr.Dataset` if + it has not been loaded already.""" + if not self.loaded: + logger.debug(f'Loading dataset into memory: {self._ds}') + logger.debug(f'Pre-loading: {_mem_check()}') + + for f in self._ds.data_vars: + self._ds[f] = self._ds[f].compute(**kwargs) + logger.debug( + f'Loaded {f} into memory with shape ' + f'{self._ds[f].shape}. {_mem_check()}' + ) + logger.debug(f'Loaded dataset into memory: {self._ds}') + logger.debug(f'Post-loading: {_mem_check()}') + return self
+ + + @property + def loaded(self): + """Check if data has been loaded as numpy arrays.""" + return all( + isinstance(self._ds[f].data, np.ndarray) + for f in list(self._ds.data_vars) + ) + + @property + def flattened(self): + """Check if the contained data is flattened 2D data or 3D rasterized + data.""" + return Dimension.FLATTENED_SPATIAL in self.dims + + @property + def time_independent(self): + """Check if the contained data is time independent.""" + return Dimension.TIME not in self.dims + +
+[docs] + def update_ds(self, new_dset, attrs=None): + """Update `self._ds` with coords and data_vars replaced with those + provided. These are both provided as dictionaries {name: dask.array}. + + Parameters + ---------- + new_dset : Dict[str, dask.array] + Can contain any existing or new variable / coordinate as long as + they all have a consistent shape. + + Returns + ------- + _ds : xr.Dataset + Updated dataset with provided coordinates and data_vars with + variables in our standard dimension order. + """ + coords = dict(self._ds.coords) + data_vars = dict(self._ds.data_vars) + new_coords = { + k: dims_array_tuple(v) for k, v in new_dset.items() if k in coords + } + coords.update(new_coords) + new_data = { + k: dims_array_tuple(v) + for k, v in new_dset.items() + if k not in coords + } + data_vars.update(new_data) + + self._ds = xr.Dataset(coords=coords, data_vars=data_vars, attrs=attrs) + return self
+ + + @property + def name(self): + """Name of dataset. Used to label datasets when grouped in + :class:`Data` objects. e.g. for low / high res pairs or daily / hourly + data.""" + return self._ds.attrs.get('name', None) + +
+[docs] + def ordered(self, data): + """Return data with dimensions in standard order ``(lats, lons, time, + ..., features)``""" + if data.dims != ordered_dims(data.dims): + return data.transpose(*ordered_dims(data.dims), ...) + return data
+ + +
+[docs] + def sample(self, idx): + """Get sample from ``self._ds``. The idx should be a tuple of slices + for the dimensions ``(south_north, west_east, time)`` and a list of + feature names. e.g. + ``(slice(0, 3), slice(1, 10), slice(None), ['u_10m', 'v_10m'])``""" + isel_kwargs = dict(zip(Dimension.dims_3d(), idx[:-1])) + features = ( + _lowered(idx[-1]) if is_type_of(idx[-1], str) else self.features + ) + + out = self._ds[features].isel(**isel_kwargs) + return self.ordered(out.to_array()).data
+ + + @name.setter + def name(self, value): + """Set name of dataset.""" + self._ds.attrs['name'] = value + +
+[docs] + def isel(self, *args, **kwargs): + """Override xr.Dataset.isel to cast back to Sup3rX object.""" + return type(self)(self._ds.isel(*args, **kwargs))
+ + +
+[docs] + def coarsen(self, *args, **kwargs): + """Override xr.Dataset.coarsen to cast back to Sup3rX object.""" + return type(self)(self._ds.coarsen(*args, **kwargs))
+ + +
+[docs] + def mean(self, **kwargs): + """Get mean directly from dataset object.""" + features = kwargs.pop('features', None) + out = ( + self._ds[features].mean(**kwargs) + if features is not None + else self._ds.mean(**kwargs) + ) + return type(self)(out) if isinstance(out, xr.Dataset) else out
+ + +
+[docs] + def std(self, **kwargs): + """Get std directly from dataset object.""" + features = kwargs.pop('features', None) + out = ( + self._ds[features].std(**kwargs) + if features is not None + else self._ds.std(**kwargs) + ) + return type(self)(out) if isinstance(out, xr.Dataset) else out
+ + +
+[docs] + def normalize(self, means, stds): + """Normalize dataset using given means and stds. These are provided as + dictionaries.""" + feats = set(self._ds.data_vars).intersection(means).intersection(stds) + for f in feats: + self._ds[f] = (self._ds[f] - means[f]) / stds[f]
+ + +
+[docs] + def interpolate_na(self, **kwargs): + """Use `xr.DataArray.interpolate_na` to fill NaN values with a dask + compatible method.""" + features = kwargs.pop('features', list(self.data_vars)) + kwargs['fill_value'] = kwargs.get('fill_value', 'extrapolate') + for feat in features: + if 'dim' in kwargs: + if kwargs['dim'] == Dimension.TIME: + kwargs['use_coordinate'] = kwargs.get( + 'use_coordinate', False + ) + self._ds[feat] = self._ds[feat].interpolate_na(**kwargs) + else: + horiz = ( + self._ds[feat] + .chunk({Dimension.WEST_EAST: -1}) + .interpolate_na(dim=Dimension.WEST_EAST, **kwargs) + ) + vert = ( + self._ds[feat] + .chunk({Dimension.SOUTH_NORTH: -1}) + .interpolate_na(dim=Dimension.SOUTH_NORTH, **kwargs) + ) + new_var = (self._ds[feat].dims, (horiz.data + vert.data) / 2) + self._ds[feat] = new_var + return self
+ + + @staticmethod + def _needs_fancy_indexing(keys) -> Union[np.ndarray, da.core.Array]: + """We use `.vindex` if keys require fancy indexing.""" + where_list = [ + ind for ind in keys if isinstance(ind, np.ndarray) and ind.ndim > 0 + ] + return len(where_list) > 1 + +
+[docs] + def add_dims_to_data_vars(self, vals): + """Add dimensions to vals entries if needed. This is used to set values + of `self._ds` which can require dimensions to be explicitly specified + for the data being set. e.g. self._ds['u_100m'] = (('south_north', + 'west_east', 'time'), data). We make guesses on the correct dims if + they are missing and give a warning. We add attributes if available in + vals, as well + + Parameters + ---------- + vals : Dict[Str, Union] + Dictionary of feature names and arrays to use for setting feature + data. When arrays are >2 dimensions xarray needs explicit dimension + info, so we need to add these if not provided. + """ + new_vals = {} + for k, v in vals.items(): + if isinstance(v, tuple): + new_vals[k] = v + elif isinstance(v, (xr.DataArray, xr.Dataset)): + dat = v if isinstance(v, xr.DataArray) else v[k] + data = ( + ordered_array(dat).squeeze(dim='variable').data + if 'variable' in dat.dims + else ordered_array(dat).data + ) + new_vals[k] = ( + ordered_dims(dat.dims), + data, + getattr(dat, 'attrs', {}), + ) + elif k in self._ds.data_vars or len(v.shape) > 1: + if k in self._ds.data_vars: + val = (ordered_dims(self._ds[k].dims), v) + else: + val = dims_array_tuple(v) + msg = ( + f'Setting data for variable "{k}" without explicitly ' + f'providing dimensions. Using dims = {tuple(val[0])}.' + ) + logger.warning(msg) + warn(msg) + new_vals[k] = val + else: + new_vals[k] = v + return new_vals
+ + +
+[docs] + def assign( + self, vals: Dict[str, Union[Union[np.ndarray, da.core.Array], tuple]] + ): + """Override xarray assign and assign_coords methods to enable update + without explicitly providing dimensions if variable already exists. + + Parameters + ---------- + vals : dict + Dictionary of variable names and either arrays or tuples of (dims, + array). If dims are not provided this will try to use stored dims + of the variable, if it exists already. + """ + data_vars = self.add_dims_to_data_vars(vals) + if all(f in self.coords for f in vals): + self._ds = self._ds.assign_coords(data_vars) + else: + self._ds = self._ds.assign(data_vars) + return self
+ + + @property + def features(self): + """Features in this container.""" + return list(self._ds.data_vars) + + @property + def dtype(self): + """Get dtype of underlying array.""" + return self.to_array().dtype + + @property + def shape(self): + """Get shape of underlying xr.DataArray, using our standard dimension + order.""" + dim_dict = dict(self._ds.sizes) + dim_vals = [dim_dict[k] for k in Dimension.order() if k in dim_dict] + return (*dim_vals, len(self._ds.data_vars)) + + @property + def size(self): + """Get size of data contained to use in weight calculations.""" + return np.prod(self.shape) + + @property + def time_index(self): + """Base time index for contained data.""" + return ( + pd.to_datetime(self._ds.indexes['time']) + if 'time' in self._ds.indexes + else None + ) + + @time_index.setter + def time_index(self, value): + """Update the time_index attribute with given index.""" + self._ds.indexes['time'] = value + + @property + def time_step(self): + """Get time step in seconds.""" + sec_diff = (self.time_index[1:] - self.time_index[:-1]).total_seconds() + return float(mode(sec_diff, keepdims=False).mode) + + @property + def lat_lon(self) -> Union[np.ndarray, da.core.Array]: + """Base lat lon for contained data.""" + coords = [self._ds[d] for d in Dimension.coords_2d()] + return self._stack_features(coords) + + @lat_lon.setter + def lat_lon(self, lat_lon): + """Update the lat_lon attribute with array values.""" + self[Dimension.coords_2d()] = lat_lon + + @property + def target(self): + """Return the value of the lower left hand coordinate.""" + return np.asarray(self.lat_lon[-1, 0]) + + @property + def grid_shape(self): + """Return the shape of the spatial dimensions.""" + return self.lat_lon.shape[:-1] + + @property + def meta(self): + """Return dataframe of flattened lat / lon values. Can also be set to + include additional data like elevation, country, state, etc""" + if self._meta is None: + self._meta = pd.DataFrame( + columns=Dimension.coords_2d(), + data=self.lat_lon.reshape((-1, 2)), + ) + return self._meta + + @meta.setter + def meta(self, meta): + """Set meta data. Used to update meta with additional info from + datasets like WTK and NSRDB.""" + self._meta = meta + +
+[docs] + def unflatten(self, grid_shape): + """Convert flattened dataset into rasterized dataset with the given + grid shape.""" + if self.flattened: + ind = pd.MultiIndex.from_product( + (np.arange(grid_shape[0]), np.arange(grid_shape[1])), + names=Dimension.dims_2d(), + ) + self._ds = self._ds.assign({Dimension.FLATTENED_SPATIAL: ind}) + self._ds = self._ds.unstack(Dimension.FLATTENED_SPATIAL) + else: + msg = 'Dataset is already unflattened' + logger.warning(msg) + warn(msg) + return self
+ + +
+[docs] + def flatten(self): + """Flatten rasterized dataset so that there is only a single spatial + dimension.""" + if not self.flattened: + self._ds = self._ds.stack( + {Dimension.FLATTENED_SPATIAL: Dimension.dims_2d()} + ) + self._ds = self._ds.assign( + { + Dimension.FLATTENED_SPATIAL: np.arange( + len(self._ds[Dimension.FLATTENED_SPATIAL]) + ) + } + ) + else: + msg = 'Dataset is already flattened' + logger.warning(msg) + warn(msg) + return self
+ + + def _qa(self, feature): + """Get qa info for given feature.""" + info = {} + logger.info('Running qa on feature: %s', feature) + nan_count = 100 * np.isnan(self[feature].data).sum() + nan_perc = nan_count / self[feature].size + info['nan_perc'] = compute_if_dask(nan_perc) + info['std'] = compute_if_dask(self[feature].std().data) + info['mean'] = compute_if_dask(self[feature].mean().data) + info['min'] = compute_if_dask(self[feature].min().data) + info['max'] = compute_if_dask(self[feature].max().data) + return info + +
+[docs] + def qa(self): + """Check NaNs and stats for all features.""" + qa_info = {} + for f in self.features: + qa_info[f] = self._qa(f) + return qa_info
+ + +
+[docs] + def __mul__(self, other): + """Multiply ``Sup3rX`` object by other. Used to compute weighted means + and stdevs.""" + try: + return type(self)(other * self._ds) + except Exception as e: + raise NotImplementedError( + f'Multiplication not supported for type {type(other)}.' + ) from e
+ + + def __rmul__(self, other): + return self.__mul__(other) + + def __pow__(self, other): + """Raise ``Sup3rX`` object to an integer power. Used to compute + weighted standard deviations.""" + try: + return type(self)(self._ds**other) + except Exception as e: + raise NotImplementedError( + f'Exponentiation not supported for type {type(other)}.' + ) from e
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/base.html b/_modules/sup3r/preprocessing/base.html new file mode 100644 index 000000000..639c886dc --- /dev/null +++ b/_modules/sup3r/preprocessing/base.html @@ -0,0 +1,1109 @@ + + + + + + + + + + sup3r.preprocessing.base — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.base

+"""Base classes - fundamental dataset objects and the base :class:`Container`
+object, which just contains dataset objects. All objects that interact with
+data are containers. e.g. loaders, rasterizers, data handlers, samplers, batch
+queues, batch handlers.
+
+TODO: https://github.com/xarray-contrib/datatree might be a better approach
+for Sup3rDataset concept. Consider migrating once datatree has been fully
+integrated into xarray (in progress as of 8/8/2024)
+"""
+
+import logging
+import pprint
+from abc import ABCMeta
+from collections import namedtuple
+from typing import Mapping, Tuple, Union
+from warnings import warn
+
+import numpy as np
+import xarray as xr
+
+import sup3r.preprocessing.accessor  # noqa: F401 # pylint: disable=W0611
+from sup3r.preprocessing.accessor import Sup3rX
+from sup3r.preprocessing.utilities import (
+    composite_info,
+    is_type_of,
+)
+
+logger = logging.getLogger(__name__)
+
+
+def _get_class_info(namespace):
+    sig_objs = namespace.get('_signature_objs', None)
+    skips = namespace.get('_skip_params', None)
+    _sig = _doc = None
+    if sig_objs:
+        _sig, _doc = composite_info(sig_objs, skip_params=skips)
+    return _sig, _doc
+
+
+
+[docs] +class Sup3rMeta(ABCMeta, type): + """Meta class to define ``__name__``, ``__signature__``, and + ``__subclasscheck__`` of composite and derived classes. This allows us to + still resolve a signature for classes which pass through parent args / + kwargs as ``*args`` / ``**kwargs`` or those built through factory + composition, for example.""" + + def __new__(mcs, name, bases, namespace, **kwargs): # noqa: N804 + """Define __name__ and __signature__""" + _sig, _doc = _get_class_info(namespace) + name = namespace.get('__name__', name) + if _sig: + namespace['__signature__'] = _sig + if '__init__' in namespace and _sig: + namespace['__init__'].__signature__ = _sig + if '__init__' in namespace and _doc: + namespace['__init__'].__doc__ = _doc + return super().__new__(mcs, name, bases, namespace, **kwargs) + + def __subclasscheck__(cls, subclass): + """Check if factory built class shares base classes.""" + if super().__subclasscheck__(subclass): + return True + if hasattr(subclass, '_signature_objs'): + return {obj.__name__ for obj in cls._signature_objs} == { + obj.__name__ for obj in subclass._signature_objs + } + return False + + def __repr__(cls): + return f"<class '{cls.__module__}.{cls.__name__}'>"
+ + + +
+[docs] +class Sup3rDataset: + """Interface for interacting with one or two ``xr.Dataset`` instances. + This is a wrapper around one or two ``Sup3rX`` objects so they work well + with Dual objects like ``DualSampler``, ``DualRasterizer``, + ``DualBatchHandler``, etc...) + + Examples + -------- + >>> # access high_res or low_res: + >>> hr = xr.Dataset(...) + >>> lr = xr.Dataset(...) + >>> ds = Sup3rDataset(low_res=lr, high_res=hr) + >>> ds.high_res; ds.low_res # returns Sup3rX objects + >>> ds[feature] # returns a tuple of dataarray (low_res, high_res) + + >>> # access hourly or daily: + >>> daily = xr.Dataset(...) + >>> hourly = xr.Dataset(...) + >>> ds = Sup3rDataset(daily=daily, hourly=hourly) + >>> ds.hourly; ds.daily # returns Sup3rX objects + >>> ds[feature] # returns a tuple of dataarray (daily, hourly) + + >>> # single resolution data access: + >>> xds = xr.Dataset(...) + >>> ds = Sup3rDataset(hourly=xds) + >>> ds.hourly # returns Sup3rX object + >>> ds[feature] # returns a single dataarray + + Note + ---- + (1) This may seem similar to + :class:`~sup3r.preprocessing.collections.base.Collection`, which also can + contain multiple data members, but members of + :class:`~sup3r.preprocessing.collections.base.Collection` objects are + completely independent while here there are at most two members which are + related as low / high res versions of the same underlying data. + + (2) Here we make an important choice to use high_res members to compute + means / stds. It would be reasonable to instead use the average of high_res + and low_res means / stds for aggregate stats but we want to preserve the + relationship between coarsened variables after normalization (e.g. + temperature_2m, temperature_max_2m, temperature_min_2m). This means all + these variables should have the same means and stds, which ultimately come + from the high_res non coarsened variable. + """ + + def __init__( + self, + **dsets: Mapping[str, Union[xr.Dataset, Sup3rX]], + ): + """ + Parameters + ---------- + dsets : Mapping[str, xr.Dataset | Sup3rX | Sup3rDataset] + ``Sup3rDataset`` is initialized from a flexible kwargs input. The + keys will be used as names in a named tuple and the values will be + the dataset members. These names will also be used to define + attributes which point to these dataset members. You can provide + ``name=data`` or ``name1=data1, name2=data2`` and then access these + datasets as ``.name1`` or ``.name2``. If dsets values are + xr.Dataset objects these will be cast to ``Sup3rX`` objects first. + We also check if dsets values are ``Sup3rDataset`` objects and if + they only include one data member we use those to reinitialize a + ``Sup3rDataset`` + """ + + for name, dset in dsets.items(): + if isinstance(dset, xr.Dataset): + dsets[name] = Sup3rX(dset) + elif isinstance(dset, type(self)): + msg = ( + 'Initializing Sup3rDataset with Sup3rDataset objects ' + 'which contain more than one member is not allowed.' + ) + assert len(dset) == 1, msg + dsets[name] = dset._ds[0] + + self._ds = namedtuple('Dataset', list(dsets))(**dsets) + + def __iter__(self): + yield from self._ds + + @property + def dtype(self): + """Get datatype of first member. Assumed to be constant for all + members.""" + return self._ds[0].dtype + + def __len__(self): + return len(self._ds) + + def __getattr__(self, attr): + """Get attribute through accessor if available. Otherwise use standard + xarray interface.""" + if hasattr(self._ds, attr): + return getattr(self._ds, attr) + out = [self._getattr(ds, attr) for ds in self._ds] + if len(self._ds) == 1: + out = out[0] + return out + + def _getattr(self, dset, attr): + """Get attribute from single data member.""" + return getattr(dset.sx, attr, getattr(dset, attr)) + + def _getitem(self, dset, item): + """Get item from single data member.""" + return dset.sx[item] if hasattr(dset, 'sx') else dset[item] + +
+[docs] + def rewrap(self, data): + """Rewrap data as ``Sup3rDataset`` after calling parent method.""" + if isinstance(data, type(self)): + return data + return ( + type(self)(low_res=data[0], high_res=data[1]) + if len(data) > 1 + else type(self)(high_res=data[0]) + )
+ + +
+[docs] + def sample(self, idx): + """Get samples from ``self._ds`` members. idx should be either a tuple + of slices for the dimensions (south_north, west_east, time) and a list + of feature names or a 2-tuple of the same, for dual datasets.""" + if len(self._ds) == 2: + return tuple(d.sample(idx[i]) for i, d in enumerate(self)) + return self._ds[-1].sample(idx)
+ + +
+[docs] + def isel(self, *args, **kwargs): + """Return new Sup3rDataset with isel applied to each member.""" + return self.rewrap(tuple(d.isel(*args, **kwargs) for d in self))
+ + + def __getitem__(self, keys): + """If keys is an int this is interpreted as a request for that member + of ``self._ds``. Otherwise, if there's only a single member of + ``self._ds`` we get self._ds[-1][keys]. If there's two members we get + ``(self._ds[0][keys], self._ds[1][keys])`` and cast this back to a + ``Sup3rDataset`` if each of ``self._ds[i][keys]`` is a ``Sup3rX`` + object""" + if isinstance(keys, int): + return self._ds[keys] + + out = tuple(self._getitem(d, keys) for d in self._ds) + if len(self._ds) == 1: + return out[-1] + if all(isinstance(o, Sup3rX) for o in out): + return type(self)(**dict(zip(self._ds._fields, out))) + return out + + @property + def shape(self): + """We use the shape of the largest data member. These are assumed to be + ordered as (low-res, high-res) if there are two members.""" + return self._ds[-1].shape + + @property + def features(self): + """The features are determined by the set of features from all data + members.""" + feats = [ + f for f in self._ds[0].features if f not in self._ds[-1].features + ] + feats += self._ds[-1].features + return feats + + @property + def size(self): + """Return number of elements in the largest data member.""" + return np.prod(self.shape) + + def __contains__(self, vals): + """Check for vals in all of the dset members.""" + return any(d.sx.__contains__(vals) for d in self._ds) + + def __setitem__(self, keys, data): + """Set dset member values. Check if values is a tuple / list and if + so interpret this as sending a tuple / list element to each dset + member. e.g. ``vals[0] -> dsets[0]``, ``vals[1] -> dsets[1]``, etc""" + if len(self._ds) == 1: + self._ds[-1].__setitem__(keys, data) + else: + for i, self_i in enumerate(self): + dat = data[i] if isinstance(data, (tuple, list)) else data + self_i.__setitem__(keys, dat) + +
+[docs] + def mean(self, **kwargs): + """Use the high_res members to compute the means. These are used for + normalization during training.""" + kwargs['skipna'] = kwargs.get('skipna', True) + return self._ds[-1].mean(**kwargs)
+ + +
+[docs] + def std(self, **kwargs): + """Use the high_res members to compute the stds. These are used for + normalization during training.""" + kwargs['skipna'] = kwargs.get('skipna', True) + return self._ds[-1].std(**kwargs)
+ + +
+[docs] + def normalize(self, means, stds): + """Normalize dataset using the given mean and stds. These are provided + as dictionaries.""" + _ = [d.normalize(means=means, stds=stds) for d in self._ds]
+ + +
+[docs] + def compute(self, **kwargs): + """Load data into memory for each data member.""" + _ = [d.compute(**kwargs) for d in self._ds]
+ + + @property + def loaded(self): + """Check if all data members have been loaded into memory.""" + return all(d.loaded for d in self._ds)
+ + + +
+[docs] +class Container(metaclass=Sup3rMeta): + """Basic fundamental object used to build preprocessing objects. Contains + an xarray-like Dataset (:class:`~.accessor.Sup3rX`), wrapped tuple of + ``Sup3rX`` objects (:class:`.Sup3rDataset`), or a tuple of such objects. + """ + + __slots__ = ['_data'] + + def __init__( + self, + data: Union[ + Sup3rX, Sup3rDataset, Tuple[Sup3rX, ...], Tuple[Sup3rDataset, ...] + ] = None, + ): + """ + Parameters + ---------- + data: Union[Sup3rX, Sup3rDataset, Tuple[Sup3rX, ...], + Tuple[Sup3rDataset, ...] + Can be an ``xr.Dataset``, a :class:`~.accessor.Sup3rX` object, a + :class:`.Sup3rDataset` object, or a tuple of such objects. + + Note + ---- + ``.data`` will return a :class:`~.Sup3rDataset` object or tuple of + such. This is a tuple when the `.data` attribute belongs to a + :class:`~.collections.base.Collection` object like + :class:`~.batch_handlers.factory.BatchHandler`. Otherwise this is + :class:`~.Sup3rDataset` object, which is either a wrapped 2-tuple + or 1-tuple (e.g. ``len(data) == 2`` or ``len(data) == 1)``. This is + a 2-tuple when ``.data`` belongs to a dual container object like + :class:`~.samplers.DualSampler` and a 1-tuple otherwise. + """ + self.data = data + + @property + def data(self): + """Return underlying data. + + Returns + ------- + :class:`.Sup3rDataset` + + See Also + -------- + :py:meth:`.wrap` + """ + return self._data + + @data.setter + def data(self, data): + """Set data value. Wrap given value depending on type. + + See Also + -------- + :py:meth:`.wrap`""" + self._data = self.wrap(data) + +
+[docs] + def wrap(self, data): + """ + Return a :class:`~.Sup3rDataset` object or tuple of such. This is a + tuple when the `.data` attribute belongs to a + :class:`~.collections.base.Collection` object like + :class:`~.batch_handlers.factory.BatchHandler`. Otherwise this is + :class:`~.Sup3rDataset` object, which is either a wrapped 2-tuple or + 1-tuple (e.g. ``len(data) == 2`` or ``len(data) == 1)``. This is a + 2-tuple when ``.data`` belongs to a dual container object like + :class:`~.samplers.DualSampler` and a 1-tuple otherwise. + """ + if data is None: + return data + + if is_type_of(data, Sup3rDataset): + return data + + if isinstance(data, tuple) and len(data) == 2: + msg = ( + f'{self.__class__.__name__}.data is being set with a ' + '2-tuple without explicit dataset names. We will assume ' + 'first tuple member is low-res and second is high-res.' + ) + logger.warning(msg) + warn(msg) + data = Sup3rDataset(low_res=data[0], high_res=data[1]) + elif not isinstance(data, Sup3rDataset): + name = getattr(data, 'name', None) or 'high_res' + data = Sup3rDataset(**{name: data}) + return data
+ + +
+[docs] + def post_init_log(self, args_dict=None): + """Log additional arguments after initialization.""" + if args_dict is not None: + logger.info( + f'Finished initializing {self.__class__.__name__} with:\n' + f'{pprint.pformat(args_dict, indent=2)}' + )
+ + + @property + def shape(self): + """Get shape of underlying data.""" + return self.data.shape + + def __contains__(self, vals): + return vals in self.data + + def __getitem__(self, keys): + """Get item from underlying data. ``.data`` is a ``Sup3rX`` or + ``Sup3rDataset`` object, so this uses those ``__getitem__`` methods. + + See Also + -------- + :py:meth:`.accessor.Sup3rX.__getitem__`, + :py:meth:`.Sup3rDataset.__getitem__` + """ + return self.data[keys] + + def __setitem__(self, keys, data): + """Set item in underlying data.""" + self.data.__setitem__(keys, data) + + def __getattr__(self, attr): + """Check if attribute is available from ``.data``""" + if attr in dir(self): + return self.__getattribute__(attr) + try: + data = self.__getattribute__('_data') + return getattr(data, attr) + except Exception as e: + msg = f'{self.__class__.__name__} object has no attribute "{attr}"' + raise AttributeError(msg) from e
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/batch_handlers/dc.html b/_modules/sup3r/preprocessing/batch_handlers/dc.html new file mode 100644 index 000000000..0936788f4 --- /dev/null +++ b/_modules/sup3r/preprocessing/batch_handlers/dc.html @@ -0,0 +1,729 @@ + + + + + + + + + + sup3r.preprocessing.batch_handlers.dc — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.batch_handlers.dc

+"""Data centric batch handlers. Sample contained data according to
+spatiotemporal weights, which are derived from losses on validation data during
+training and updated each epoch.
+
+TODO: Easy to implement dual dc batch handler - Just need to use DualBatchQueue
+and override SamplerDC get_sample_index method.
+"""
+
+import logging
+
+from ..batch_queues.dc import BatchQueueDC, ValBatchQueueDC
+from ..samplers.dc import SamplerDC
+from ..utilities import log_args
+from .factory import BatchHandlerFactory
+
+logger = logging.getLogger(__name__)
+
+
+BaseDC = BatchHandlerFactory(
+    BatchQueueDC, SamplerDC, ValBatchQueueDC, name='BaseDC'
+)
+
+
+
+[docs] +class BatchHandlerDC(BaseDC): + """Data-Centric BatchHandler. This is used to adaptively select data + from lower performing spatiotemporal extents during training. To do this, + validation data is required, as it is used to compute losses within fixed + spatiotemporal bins which are then used as sampling probabilities for those + same regions when building batches. + + See Also + -------- + :class:`~sup3r.preprocessing.batch_queues.dc.BatchQueueDC`, + :class:`~sup3r.preprocessing.batch_queues.dc.ValBatchQueueDC`, + :class:`~sup3r.preprocessing.samplers.dc.SamplerDC`, + :func:`~.factory.BatchHandlerFactory` + """ + + @log_args + def __init__(self, train_containers, val_containers, **kwargs): + msg = ( + f'{self.__class__.__name__} requires validation data. If you ' + 'do not plan to sample training data based on performance ' + 'across validation data use another type of batch handler.' + ) + assert val_containers is not None and val_containers != [], msg + super().__init__( + train_containers=train_containers, + val_containers=val_containers, + **kwargs, + ) + max_space_bins = (self.data[0].shape[0] - self.sample_shape[0] + 1) * ( + self.data[0].shape[1] - self.sample_shape[1] + 1 + ) + max_time_bins = self.data[0].shape[2] - self.sample_shape[2] + 1 + msg = ( + f'The requested sample_shape {self.sample_shape} is too large ' + f'for the requested number of bins (space = {self.n_space_bins}, ' + f'time = {self.n_time_bins}) and the shape of the sample data ' + f'{self.data[0].shape[:3]}.' + ) + assert self.n_space_bins <= max_space_bins, msg + assert self.n_time_bins <= max_time_bins, msg + + _signature_objs = (__init__, BaseDC) + _skip_params = ('samplers', 'data', 'thread_name', 'kwargs')
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/batch_handlers/factory.html b/_modules/sup3r/preprocessing/batch_handlers/factory.html new file mode 100644 index 000000000..4ed83b106 --- /dev/null +++ b/_modules/sup3r/preprocessing/batch_handlers/factory.html @@ -0,0 +1,1001 @@ + + + + + + + + + + sup3r.preprocessing.batch_handlers.factory — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.batch_handlers.factory

+"""BatchHandler factory. Builds BatchHandler objects from batch queues and
+samplers."""
+
+import logging
+from typing import TYPE_CHECKING, Dict, List, Optional, Type, Union
+
+from sup3r.preprocessing.batch_queues.base import SingleBatchQueue
+from sup3r.preprocessing.batch_queues.conditional import (
+    QueueMom1,
+    QueueMom1SF,
+    QueueMom2,
+    QueueMom2Sep,
+    QueueMom2SepSF,
+    QueueMom2SF,
+)
+from sup3r.preprocessing.batch_queues.dual import DualBatchQueue
+from sup3r.preprocessing.collections.stats import StatsCollection
+from sup3r.preprocessing.samplers.base import Sampler
+from sup3r.preprocessing.samplers.cc import DualSamplerCC
+from sup3r.preprocessing.samplers.dual import DualSampler
+from sup3r.preprocessing.utilities import (
+    check_signatures,
+    get_class_kwargs,
+    log_args,
+)
+
+if TYPE_CHECKING:
+    from sup3r.preprocessing.base import Container
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +def BatchHandlerFactory( + MainQueueClass, SamplerClass, ValQueueClass=None, name='BatchHandler' +): + """BatchHandler factory. Can build handlers from different queue classes + and sampler classes. For example, to build a standard + :class:`.BatchHandler` use + :class:`~sup3r.preprocessing.batch_queues.SingleBatchQueue` and + :class:`~sup3r.preprocessing.samplers.Sampler`. To build a + :class:`~.DualBatchHandler` use + :class:`~sup3r.preprocessing.batch_queues.DualBatchQueue` and + :class:`~sup3r.preprocessing.samplers.DualSampler`. To build a + :class:`~..dc.BatchHandlerDC` use a + :class:`~sup3r.preprocessing.batch_queues.BatchQueueDC`, + :class:`~sup3r.preprocessing.batch_queues.ValBatchQueueDC` and + :class:`~sup3r.preprocessing.samplers.SamplerDC` + + Note + ---- + (1) BatchHandlers include a queue for training samples and a queue for + validation samples. + (2) There is no need to generate "Spatial" batch handlers. Using + :class:`Sampler` objects with a single time step in the sample shape will + produce batches without a time dimension. + """ + + class BatchHandler(MainQueueClass): + """BatchHandler object built from two lists of + :class:`~sup3r.preprocessing.base.Container` objects, one with + training data and one with validation data. These lists will be used + to initialize lists of class:`Sampler` objects that will then be used + to build batches at run time. + + Notes + ----- + These lists of containers can contain data from the same underlying + data source (e.g. CONUS WTK) (e.g. initialize train / val containers + with different time period and / or regions, or they can be used to + sample from completely different data sources (e.g. train on CONUS WTK + while validating on Canada WTK). + + See Also + -------- + :class:`~sup3r.preprocessing.samplers.Sampler`, + :class:`~sup3r.preprocessing.batch_queues.abstract.AbstractBatchQueue`, + :class:`~sup3r.preprocessing.collections.StatsCollection` + """ + + TRAIN_QUEUE = MainQueueClass + VAL_QUEUE = ValQueueClass or MainQueueClass + SAMPLER = SamplerClass + + __name__ = name + + @log_args + def __init__( + self, + train_containers: List['Container'], + val_containers: Optional[List['Container']] = None, + sample_shape: Optional[tuple] = None, + batch_size: int = 16, + n_batches: int = 64, + s_enhance: int = 1, + t_enhance: int = 1, + means: Optional[Union[Dict, str]] = None, + stds: Optional[Union[Dict, str]] = None, + queue_cap: Optional[int] = None, + transform_kwargs: Optional[dict] = None, + max_workers: int = 1, + mode: str = 'lazy', + feature_sets: Optional[dict] = None, + **kwargs, + ): + """ + Parameters + ---------- + train_containers : List[Container] + List of objects with a `.data` attribute, which will be used + to initialize Sampler objects and then used to initialize a + batch queue of training data. The data can be a Sup3rX or + Sup3rDataset object. + val_containers : List[Container] + List of objects with a `.data` attribute, which will be used + to initialize Sampler objects and then used to initialize a + batch queue of validation data. The data can be a Sup3rX or a + Sup3rDataset object. + batch_size : int + Number of samples to get to build a single batch. A sample of + (sample_shape[0], sample_shape[1], batch_size * + sample_shape[2]) is first selected from underlying dataset + and then reshaped into (batch_size, *sample_shape) to get a + single batch. This is more efficient than getting N = + batch_size samples and then stacking. + n_batches : int + Number of batches in an epoch, this sets the iteration limit + for this object. + s_enhance : int + Integer factor by which the spatial axes is to be enhanced. + t_enhance : int + Integer factor by which the temporal axes is to be enhanced. + means : str | dict | None + Usually a file path for loading / saving results, or None for + just calculating stats and not saving. Can also be a dict. + stds : str | dict | None + Usually a file path for loading / saving results, or None for + just calculating stats and not saving. Can also be a dict. + queue_cap : int + Maximum number of batches the batch queue can store. Changing + this can effect the speed with which batches move through + training. + transform_kwargs : Union[Dict, None] + Dictionary of kwargs to be passed to `self.transform`. This + method performs smoothing / coarsening. + max_workers : int + Number of workers / threads to use for getting batches to fill + queue + mode : str + Loading mode. Default is 'lazy', which only loads data into + memory as batches are queued. 'eager' will load all data into + memory right away. + feature_sets : Optional[dict] + Optional dictionary describing how the full set of features is + split between `lr_only_features` and `hr_exo_features`. + + features : list | tuple + List of full set of features to use for sampling. If no + entry is provided then all data_vars from container data + will be used. + lr_only_features : list | tuple + List of feature names or patt*erns that should only be + included in the low-res training set and not the high-res + observations. This + hr_exo_features : list | tuple + List of feature names or patt*erns that should be included + in the high-resolution observation but not expected to be + output from the generative model. An example is high-res + topography that is to be injected mid-network. + kwargs : dict + Additional keyword arguments for BatchQueue and / or Samplers. + This can vary depending on the type of BatchQueue / Sampler + given to the Factory. For example, to build a + :class:`~sup3r.preprocessing.batch_handlers.BatchHandlerDC` + object (data-centric batch handler) we use a queue and sampler + which takes spatial and temporal weight / bin arguments used to + determine how to weigh spatiotemporal regions when sampling. + Using + :class:`~sup3r.preprocessing.batch_queues.ConditionalBatchQueue` + will result in arguments for computing moments from batches and + how to pad batch data to enable these calculations. + """ # pylint: disable=line-too-long + + check_signatures((self.TRAIN_QUEUE, self.VAL_QUEUE, self.SAMPLER)) + + add_kwargs = { + 's_enhance': s_enhance, + 't_enhance': t_enhance, + **kwargs, + } + + sampler_kwargs = get_class_kwargs(SamplerClass, add_kwargs) + val_kwargs = get_class_kwargs(self.VAL_QUEUE, add_kwargs) + main_kwargs = get_class_kwargs(MainQueueClass, add_kwargs) + + bad_kwargs = ( + set(kwargs) + - set(sampler_kwargs) + - set(val_kwargs) + - set(main_kwargs) + ) + + msg = ( + f'{self.__class__.__name__} received bad ' + f'kwargs = {bad_kwargs}.' + ) + assert not bad_kwargs, msg + + train_samplers, val_samplers = self.init_samplers( + train_containers, + val_containers, + sample_shape=sample_shape, + feature_sets=feature_sets, + batch_size=batch_size, + sampler_kwargs=sampler_kwargs, + ) + + logger.info('Normalizing training samplers') + stats = StatsCollection( + containers=train_samplers, + means=means, + stds=stds, + ) + self.means = stats.means + self.stds = stats.stds + + if not val_samplers: + self.val_data: Union[List, Type[self.VAL_QUEUE]] = [] + else: + logger.info('Normalizing validation samplers.') + stats.normalize(val_samplers) + self.val_data = self.VAL_QUEUE( + samplers=val_samplers, + n_batches=n_batches, + thread_name='validation', + batch_size=batch_size, + queue_cap=queue_cap, + transform_kwargs=transform_kwargs, + max_workers=max_workers, + mode=mode, + **val_kwargs, + ) + super().__init__( + samplers=train_samplers, + n_batches=n_batches, + batch_size=batch_size, + queue_cap=queue_cap, + transform_kwargs=transform_kwargs, + max_workers=max_workers, + mode=mode, + **main_kwargs, + ) + + _skip_params = ('samplers', 'data', 'containers', 'thread_name') + _signature_objs = (__init__, SAMPLER, VAL_QUEUE, TRAIN_QUEUE) + + def init_samplers( + self, + train_containers, + val_containers, + sample_shape, + feature_sets, + batch_size, + sampler_kwargs, + ): + """Initialize samplers from given data containers.""" + train_samplers = [ + self.SAMPLER( + data=c.data, + sample_shape=sample_shape, + feature_sets=feature_sets, + batch_size=batch_size, + **sampler_kwargs, + ) + for c in train_containers + ] + val_samplers = ( + [] + if val_containers is None + else [ + self.SAMPLER( + data=c.data, + sample_shape=sample_shape, + feature_sets=feature_sets, + batch_size=batch_size, + **sampler_kwargs, + ) + for c in val_containers + ] + ) + return train_samplers, val_samplers + + def start(self): + """Start the val data batch queue in addition to the train batch + queue.""" + if hasattr(self.val_data, 'start'): + self.val_data.start() + super().start() + + def stop(self): + """Stop the val data batch queue in addition to the train batch + queue.""" + self._training_flag.clear() + if self.val_data != []: + self.val_data._training_flag.clear() + if hasattr(self.val_data, 'stop'): + self.val_data.stop() + super().stop() + + return BatchHandler
+ + + +BatchHandler = BatchHandlerFactory( + SingleBatchQueue, Sampler, name='BatchHandler' +) +DualBatchHandler = BatchHandlerFactory( + DualBatchQueue, DualSampler, name='DualBatchHandler' +) +BatchHandlerCC = BatchHandlerFactory( + DualBatchQueue, DualSamplerCC, name='BatchHandlerCC' +) +BatchHandlerMom1 = BatchHandlerFactory( + QueueMom1, Sampler, name='BatchHandlerMom1' +) +BatchHandlerMom1SF = BatchHandlerFactory( + QueueMom1SF, Sampler, name='BatchHandlerMom1SF' +) +BatchHandlerMom2 = BatchHandlerFactory( + QueueMom2, Sampler, name='BatchHandlerMom2' +) +BatchHandlerMom2Sep = BatchHandlerFactory( + QueueMom2Sep, Sampler, name='BatchHandlerMom2Sep' +) +BatchHandlerMom2SF = BatchHandlerFactory( + QueueMom2SF, Sampler, name='BatchHandlerMom2F' +) +BatchHandlerMom2SepSF = BatchHandlerFactory( + QueueMom2SepSF, Sampler, name='BatchHandlerMom2SepSF' +) +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/batch_queues/abstract.html b/_modules/sup3r/preprocessing/batch_queues/abstract.html new file mode 100644 index 000000000..edb30bae6 --- /dev/null +++ b/_modules/sup3r/preprocessing/batch_queues/abstract.html @@ -0,0 +1,1040 @@ + + + + + + + + + + sup3r.preprocessing.batch_queues.abstract — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.batch_queues.abstract

+"""Abstract batch queue class used for multi-threaded batching / training.
+
+TODO:
+    (1) Figure out apparent "blocking" issue with threaded enqueue batches.
+        max_workers=1 is the fastest?
+    (2) Setup distributed data handling so this can work with data distributed
+    over multiple nodes.
+"""
+
+import logging
+import threading
+import time
+from abc import ABC, abstractmethod
+from collections import namedtuple
+from concurrent.futures import ThreadPoolExecutor
+from typing import TYPE_CHECKING, List, Optional, Union
+
+import numpy as np
+import tensorflow as tf
+
+from sup3r.preprocessing.collections.base import Collection
+from sup3r.utilities.utilities import RANDOM_GENERATOR, Timer
+
+if TYPE_CHECKING:
+    from sup3r.preprocessing.samplers import DualSampler, Sampler
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class AbstractBatchQueue(Collection, ABC): + """Abstract BatchQueue class. This class gets batches from a dataset + generator and maintains a queue of batches in a dedicated thread so the + training routine can proceed as soon as batches are available.""" + + Batch = namedtuple('Batch', ['low_res', 'high_res']) + + def __init__( + self, + samplers: Union[List['Sampler'], List['DualSampler']], + batch_size: int = 16, + n_batches: int = 64, + s_enhance: int = 1, + t_enhance: int = 1, + queue_cap: Optional[int] = None, + transform_kwargs: Optional[dict] = None, + max_workers: int = 1, + thread_name: str = 'training', + mode: str = 'lazy', + ): + """ + Parameters + ---------- + samplers : List[Sampler] + List of Sampler instances + batch_size : int + Number of observations / samples in a batch + n_batches : int + Number of batches in an epoch, this sets the iteration limit for + this object. + s_enhance : int + Integer factor by which the spatial axes is to be enhanced. + t_enhance : int + Integer factor by which the temporal axes is to be enhanced. + queue_cap : int + Maximum number of batches the batch queue can store. + transform_kwargs : Union[Dict, None] + Dictionary of kwargs to be passed to `self.transform`. This method + performs smoothing / coarsening. + max_workers : int + Number of workers / threads to use for getting batches to fill + queue + thread_name : str + Name of the queue thread. Default is 'training'. Used to set name + to 'validation' for :class:`BatchHandler`, which has a training and + validation queue. + mode : str + Loading mode. Default is 'lazy', which only loads data into memory + as batches are queued. 'eager' will load all data into memory right + away. + """ + msg = ( + f'{self.__class__.__name__} requires a list of samplers. ' + f'Received type {type(samplers)}' + ) + assert isinstance(samplers, list), msg + super().__init__(containers=samplers) + self._batch_count = 0 + self._queue_thread = None + self._training_flag = threading.Event() + self._thread_name = thread_name + self.mode = mode + self.s_enhance = s_enhance + self.t_enhance = t_enhance + self.batch_size = batch_size + self.n_batches = n_batches + self.queue_cap = n_batches if queue_cap is None else queue_cap + self.max_workers = max_workers + self.container_index = self.get_container_index() + self.queue = self.get_queue() + self.transform_kwargs = transform_kwargs or { + 'smoothing_ignore': [], + 'smoothing': None, + } + self.timer = Timer() + self.preflight() + + @property + @abstractmethod + def queue_shape(self): + """Shape of objects stored in the queue. e.g. for single dataset queues + this is (batch_size, *sample_shape, len(features)). For dual dataset + queues this is [(batch_size, *lr_shape), (batch_size, *hr_shape)]""" + +
+[docs] + def get_queue(self): + """Return FIFO queue for storing batches.""" + return tf.queue.FIFOQueue( + self.queue_cap, + dtypes=[tf.float32] * len(self.queue_shape), + shapes=self.queue_shape, + )
+ + +
+[docs] + def preflight(self): + """Run checks before kicking off the queue.""" + self.check_features() + self.check_enhancement_factors() + _ = self.check_shared_attr('sample_shape') + + sampler_bs = self.check_shared_attr('batch_size') + msg = ( + f'Samplers have a different batch_size: {sampler_bs} than the ' + f'BatchQueue: {self.batch_size}' + ) + assert sampler_bs == self.batch_size, msg + + if self.mode == 'eager': + logger.info('Received mode = "eager".') + _ = [c.compute() for c in self.containers]
+ + + @property + def queue_thread(self): + """Get new queue thread.""" + if self._queue_thread is None or self._queue_thread._is_stopped: + self._queue_thread = threading.Thread( + target=self.enqueue_batches, + name=self._thread_name, + ) + return self._queue_thread + +
+[docs] + def check_features(self): + """Make sure all samplers have the same sets of features.""" + features = [list(c.features) for c in self.containers] + msg = 'Received samplers with different sets of features.' + assert all(feats == features[0] for feats in features), msg
+ + +
+[docs] + def check_enhancement_factors(self): + """Make sure the enhancement factors evenly divide the sample_shape.""" + msg = ( + f'The sample_shape {self.sample_shape} is not consistent with ' + f'the enhancement factors {self.s_enhance, self.t_enhance}.' + ) + assert all( + samp % enhance == 0 + for samp, enhance in zip( + self.sample_shape, + [self.s_enhance, self.s_enhance, self.t_enhance], + ) + ), msg
+ + +
+[docs] + @abstractmethod + def transform(self, samples, **kwargs): + """Apply transform on batch samples. This can include smoothing / + coarsening depending on the type of queue. e.g. coarsening could be + included for a single dataset queue where low res samples are coarsened + high res samples. For a dual dataset queue this will just include + smoothing."""
+ + +
+[docs] + def post_proc(self, samples) -> Batch: + """Performs some post proc on dequeued samples before sending out for + training. Post processing can include coarsening on high-res data (if + :class:`Collection` consists of :class:`Sampler` objects and not + :class:`DualSampler` objects), smoothing, etc + + Returns + ------- + Batch : namedtuple + namedtuple with `low_res` and `high_res` attributes + """ + lr, hr = self.transform(samples, **self.transform_kwargs) + return self.Batch(low_res=lr, high_res=hr)
+ + +
+[docs] + def start(self) -> None: + """Start thread to keep sample queue full for batches.""" + self._training_flag.set() + if ( + not self.queue_thread.is_alive() + and self.mode == 'lazy' + and self.queue_cap > 0 + ): + logger.info(f'Starting {self._thread_name} queue.') + self.queue_thread.start()
+ + +
+[docs] + def stop(self) -> None: + """Stop loading batches.""" + self._training_flag.clear() + if self.queue_thread.is_alive(): + logger.info(f'Stopping {self._thread_name} queue.') + self.queue_thread.join()
+ + + def __len__(self): + return self.n_batches + + def __iter__(self): + self._batch_count = 0 + self.start() + return self + +
+[docs] + def get_batch(self) -> Batch: + """Get batch from queue or directly from a ``Sampler`` through + ``sample_batch``.""" + if ( + self.mode == 'eager' + or self.queue_cap == 0 + or self.queue.size().numpy() == 0 + ): + return self.sample_batch() + return self.queue.dequeue()
+ + + @property + def running(self): + """Boolean to check whether to keep enqueueing batches.""" + return ( + self._training_flag.is_set() + and self.queue_thread.is_alive() + and not self.queue.is_closed() + ) + +
+[docs] + def enqueue_batches(self) -> None: + """Callback function for queue thread. While training, the queue is + checked for empty spots and filled. In the training thread, batches are + removed from the queue.""" + log_time = time.time() + while self.running: + needed = self.queue_cap - self.queue.size().numpy() + if needed == 1 or self.max_workers == 1: + self.enqueue_batch() + elif needed > 0: + with ThreadPoolExecutor(self.max_workers) as exe: + _ = [exe.submit(self.enqueue_batch) for _ in range(needed)] + logger.debug( + 'Added %s enqueue futures to %s queue.', + needed, + self._thread_name, + ) + if time.time() > log_time + 10: + logger.debug(self.log_queue_info()) + log_time = time.time()
+ + + def __next__(self) -> Batch: + """Dequeue batch samples, squeeze if for a spatial only model, perform + some post-proc like smoothing, coarsening, etc, and then send out for + training as a namedtuple of low_res / high_res arrays. + + Returns + ------- + batch : Batch + Batch object with batch.low_res and batch.high_res attributes + """ + if self._batch_count < self.n_batches: + self.timer.start() + samples = self.get_batch() + if self.sample_shape[2] == 1: + if isinstance(samples, (list, tuple)): + samples = tuple(s[..., 0, :] for s in samples) + else: + samples = samples[..., 0, :] + batch = self.post_proc(samples) + self.timer.stop() + self._batch_count += 1 + logger.debug( + 'Batch step %s finished in %s.', + self._batch_count, + self.timer.elapsed_str, + ) + else: + raise StopIteration + return batch + +
+[docs] + def get_container_index(self): + """Get random container index based on weights""" + indices = np.arange(0, len(self.containers)) + return RANDOM_GENERATOR.choice(indices, p=self.container_weights)
+ + +
+[docs] + def get_random_container(self): + """Get random container based on container weights""" + self.container_index = self.get_container_index() + return self.containers[self.container_index]
+ + +
+[docs] + def sample_batch(self): + """Get random sampler from collection and return a batch of samples + from that sampler. + + Notes + ----- + These samples are wrapped in an ``np.asarray`` call, so they have been + loaded into memory. + """ + return next(self.get_random_container())
+ + +
+[docs] + def log_queue_info(self): + """Log info about queue size.""" + return '{} queue length: {} / {}.'.format( + self._thread_name.title(), + self.queue.size().numpy(), + self.queue_cap, + )
+ + +
+[docs] + def enqueue_batch(self): + """Build batch and send to queue.""" + if self.running and self.queue.size().numpy() < self.queue_cap: + self.queue.enqueue(self.sample_batch())
+ + + @property + def lr_shape(self): + """Shape of low resolution sample in a low-res / high-res pair. (e.g. + (spatial_1, spatial_2, temporal, features))""" + return (*self.lr_sample_shape, len(self.lr_features)) + + @property + def hr_shape(self): + """Shape of high resolution sample in a low-res / high-res pair. (e.g. + (spatial_1, spatial_2, temporal, features))""" + return (*self.hr_sample_shape, len(self.hr_features))
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/batch_queues/base.html b/_modules/sup3r/preprocessing/batch_queues/base.html new file mode 100644 index 000000000..313605f64 --- /dev/null +++ b/_modules/sup3r/preprocessing/batch_queues/base.html @@ -0,0 +1,753 @@ + + + + + + + + + + sup3r.preprocessing.batch_queues.base — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.batch_queues.base

+"""Base objects which generate, build, and operate on batches. Also can
+interface with models."""
+
+import logging
+
+from sup3r.preprocessing.utilities import numpy_if_tensor
+from sup3r.utilities.utilities import spatial_coarsening, temporal_coarsening
+
+from .abstract import AbstractBatchQueue
+from .utilities import smooth_data
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class SingleBatchQueue(AbstractBatchQueue): + """Base BatchQueue class for single dataset containers + + Note + ---- + Here we use `len(self.features)` for the last dimension of samples, since + samples in :class:`SingleBatchQueue` queues are coarsened to produce + low-res samples, and then the `lr_only_features` are removed with + `hr_features_ind`. In contrast, for samples in :class:`DualBatchQueue` + queues there are low / high res pairs and the high-res only stores the + `hr_features`""" + + @property + def queue_shape(self): + """Shape of objects stored in the queue.""" + return [(self.batch_size, *self.hr_sample_shape, len(self.features))] + +
+[docs] + def transform( + self, + samples, + smoothing=None, + smoothing_ignore=None, + temporal_coarsening_method='subsample', + ): + """Coarsen high res data to get corresponding low res batch. + + Parameters + ---------- + samples : Union[np.ndarray, da.core.Array] + High resolution batch of samples. + 4D | 5D array + (batch_size, spatial_1, spatial_2, features) + (batch_size, spatial_1, spatial_2, temporal, features) + smoothing : float | None + Standard deviation to use for gaussian filtering of the coarse + data. This can be tuned by matching the kinetic energy of a low + resolution simulation with the kinetic energy of a coarsened and + smoothed high resolution simulation. If None no smoothing is + performed. + smoothing_ignore : list | None + List of features to ignore for the smoothing filter. None will + smooth all features if smoothing kwarg is not None + temporal_coarsening_method : str + Method to use for temporal coarsening. Can be subsample, average, + min, max, or total + + Returns + ------- + low_res : Union[np.ndarray, da.core.Array] + 4D | 5D array + (batch_size, spatial_1, spatial_2, features) + (batch_size, spatial_1, spatial_2, temporal, features) + high_res : Union[np.ndarray, da.core.Array] + 4D | 5D array + (batch_size, spatial_1, spatial_2, features) + (batch_size, spatial_1, spatial_2, temporal, features) + """ + low_res = spatial_coarsening(samples, self.s_enhance) + low_res = ( + low_res + if self.t_enhance == 1 + else temporal_coarsening( + low_res, self.t_enhance, temporal_coarsening_method + ) + ) + smoothing_ignore = ( + smoothing_ignore if smoothing_ignore is not None else [] + ) + low_res = smooth_data( + low_res, self.features, smoothing_ignore, smoothing + ) + high_res = numpy_if_tensor(samples)[..., self.hr_features_ind] + return low_res, high_res
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/batch_queues/conditional.html b/_modules/sup3r/preprocessing/batch_queues/conditional.html new file mode 100644 index 000000000..1810eb2ac --- /dev/null +++ b/_modules/sup3r/preprocessing/batch_queues/conditional.html @@ -0,0 +1,1002 @@ + + + + + + + + + + sup3r.preprocessing.batch_queues.conditional — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.batch_queues.conditional

+"""Abstract batch queue class used for conditional moment estimation."""
+
+import logging
+from abc import abstractmethod
+from collections import namedtuple
+from typing import TYPE_CHECKING, Dict, List, Optional, Union
+
+import numpy as np
+
+from sup3r.models.conditional import Sup3rCondMom
+from sup3r.preprocessing.utilities import numpy_if_tensor
+
+from .base import SingleBatchQueue
+from .utilities import spatial_simple_enhancing, temporal_simple_enhancing
+
+if TYPE_CHECKING:
+    from sup3r.preprocessing.samplers import DualSampler, Sampler
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class ConditionalBatchQueue(SingleBatchQueue): + """BatchQueue class for conditional moment estimation.""" + + ConditionalBatch = namedtuple( + 'ConditionalBatch', ['low_res', 'high_res', 'output', 'mask'] + ) + + def __init__( + self, + samplers: Union[List['Sampler'], List['DualSampler']], + time_enhance_mode: str = 'constant', + lower_models: Optional[Dict[int, Sup3rCondMom]] = None, + s_padding: int = 0, + t_padding: int = 0, + end_t_padding: bool = False, + **kwargs, + ): + """ + Parameters + ---------- + samplers : List[Sampler] | List[DualSampler] + List of samplers to use for queue. + time_enhance_mode : str + [constant, linear] + Method to enhance temporally when constructing subfilter. At every + temporal location, a low-res temporal data is subtracted from the + high-res temporal data predicted. constant will assume that the + low-res temporal data is constant between landmarks. linear will + linearly interpolate between landmarks to generate the low-res data + to remove from the high-res. + lower_models : Dict[int, Sup3rCondMom] | None + Dictionary of models that predict lower moments. For example, if + this queue is part of a handler to estimate the 3rd moment + `lower_models` could include models that estimate the 1st and 2nd + moments. These lower moments can be required in higher order moment + calculations. + s_padding : int | None + Width of spatial padding to predict only middle part. If None, no + padding is used + t_padding : int | None + Width of temporal padding to predict only middle part. If None, no + padding is used + end_t_padding : bool | False + Zero pad the end of temporal space. Ensures that loss is + calculated only if snapshot is surrounded by temporal landmarks. + False by default + kwargs : dict + Keyword arguments for parent class + """ + self.low_res = None + self.high_res = None + self.output = None + self.s_padding = s_padding + self.t_padding = t_padding + self.end_t_padding = end_t_padding + self.time_enhance_mode = time_enhance_mode + self.lower_models = lower_models + super().__init__(samplers, **kwargs) + + _signature_objs = (__init__, SingleBatchQueue) + +
+[docs] + def make_mask(self, high_res): + """Make mask for output. This is used to ensure consistency when + training conditional moments. + + Note + ---- + Consider the case of learning E(HR|LR) where HR is the high_res and LR + is the low_res. In theory, the conditional moment estimation works if + the full LR is passed as input and predicts the full HR. In practice, + only the LR data that overlaps and surrounds the HR data is useful, ie + E(HR|LR) = E(HR|LR_nei) where LR_nei is the LR data that surrounds the + HR data. Physically, this is equivalent to saying that data far away + from a region of interest does not matter. This allows learning the + conditional moments on spatial and temporal chunks only if one + restricts the high_res output as being overlapped and surrounded by the + input low_res. The role of the mask is to ensure that the input + low_res always surrounds the output high_res. + + Parameters + ---------- + high_res : Union[np.ndarray, da.core.Array] + 4D | 5D array + (batch_size, spatial_1, spatial_2, features) + (batch_size, spatial_1, spatial_2, temporal, features) + + Returns + ------- + mask: Union[np.ndarray, da.core.Array] + 4D | 5D array + (batch_size, spatial_1, spatial_2, features) + (batch_size, spatial_1, spatial_2, temporal, features) + """ + mask = np.zeros(high_res.shape, dtype=high_res.dtype) + s_min = self.s_padding + t_min = self.t_padding + s_max = None if self.s_padding == 0 else -self.s_padding + t_max = None if self.t_padding == 0 else -self.t_padding + if self.end_t_padding and self.t_enhance > 1: + if t_max is None: + t_max = 1 - self.t_enhance + else: + t_max = 1 - self.t_enhance - self.t_padding + + if len(high_res.shape) == 4: + mask[:, s_min:s_max, s_min:s_max, :] = 1.0 + elif len(high_res.shape) == 5: + mask[:, s_min:s_max, s_min:s_max, t_min:t_max, :] = 1.0 + + return mask
+ + +
+[docs] + @abstractmethod + def make_output(self, samples): + """Make custom batch output. This depends on the moment type being + estimated. e.g. This could be the 1st moment, which is just high_res + or the 2nd moment, which is (high_res - 1st moment) ** 2 + + Parameters + ---------- + samples : Tuple[Union[np.ndarray, da.core.Array], ...] + Tuple of low_res, high_res. Each array is: + 4D | 5D array + (batch_size, spatial_1, spatial_2, features) + (batch_size, spatial_1, spatial_2, temporal, features) + + Returns + ------- + output: Union[np.ndarray, da.core.Array] + 4D | 5D array + (batch_size, spatial_1, spatial_2, features) + (batch_size, spatial_1, spatial_2, temporal, features) + """
+ + +
+[docs] + def post_proc(self, samples): + """Returns normalized collection of samples / observations along with + mask and target output for conditional moment estimation. Performs + coarsening on high-res data if :class:`Collection` consists of + :class:`Sampler` objects and not :class:`DualSampler` objects + + Returns + ------- + namedtuple + Named tuple with `low_res`, `high_res`, `mask`, and `output` + attributes + """ + lr, hr = self.transform(samples, **self.transform_kwargs) + mask = self.make_mask(high_res=hr) + output = self.make_output(samples=(lr, hr)) + return self.ConditionalBatch( + low_res=lr, high_res=hr, output=output, mask=mask + )
+
+ + + +
+[docs] +class QueueMom1(ConditionalBatchQueue): + """Batch handling class for conditional estimation of first moment""" + +
+[docs] + def make_output(self, samples): + """For the 1st moment the output is simply the high_res""" + _, hr = samples + return hr
+
+ + + +
+[docs] +class QueueMom1SF(ConditionalBatchQueue): + """Batch handling class for conditional estimation of first moment + of subfilter velocity""" + +
+[docs] + def make_output(self, samples): + """ + Returns + ------- + SF: Union[np.ndarray, da.core.Array] + 4D | 5D array + (batch_size, spatial_1, spatial_2, features) + (batch_size, spatial_1, spatial_2, temporal, features) + SF is subfilter, HR is high-res and LR is low-res + SF = HR - LR + """ + # Remove LR from HR + lr, hr = samples + enhanced_lr = spatial_simple_enhancing(lr, s_enhance=self.s_enhance) + enhanced_lr = temporal_simple_enhancing( + enhanced_lr, + t_enhance=self.t_enhance, + mode=self.time_enhance_mode, + ) + enhanced_lr = enhanced_lr[..., self.hr_features_ind] + + return hr - enhanced_lr
+
+ + + +
+[docs] +class QueueMom2(ConditionalBatchQueue): + """Batch handling class for conditional estimation of second moment""" + +
+[docs] + def make_output(self, samples): + """ + Returns + ------- + (HR - <HR|LR>)**2: Union[np.ndarray, da.core.Array] + 4D | 5D array + (batch_size, spatial_1, spatial_2, features) + (batch_size, spatial_1, spatial_2, temporal, features) + HR is high-res and LR is low-res + """ + # Remove first moment from HR and square it + lr, hr = samples + exo_data = self.lower_models[1].get_high_res_exo_input(hr) + out = numpy_if_tensor(self.lower_models[1]._tf_generate(lr, exo_data)) + out = self.lower_models[1]._combine_loss_input(hr, out) + return (hr - out) ** 2
+
+ + + +
+[docs] +class QueueMom2Sep(QueueMom1): + """Batch handling class for conditional estimation of second moment + without subtraction of first moment""" + +
+[docs] + def make_output(self, samples): + """ + Returns + ------- + HR**2: Union[np.ndarray, da.core.Array] + 4D | 5D array + (batch_size, spatial_1, spatial_2, features) + (batch_size, spatial_1, spatial_2, temporal, features) + HR is high-res + """ + return super().make_output(samples) ** 2
+
+ + + +
+[docs] +class QueueMom2SF(ConditionalBatchQueue): + """Batch handling class for conditional estimation of second moment of + subfilter velocity.""" + +
+[docs] + def make_output(self, samples): + """ + Returns + ------- + (SF - <SF|LR>)**2: Union[np.ndarray, da.core.Array] + 4D | 5D array + (batch_size, spatial_1, spatial_2, features) + (batch_size, spatial_1, spatial_2, temporal, features) + SF is subfilter, HR is high-res and LR is low-res + SF = HR - LR + """ + # Remove LR and first moment from HR and square it + lr, hr = samples + exo_data = self.lower_models[1].get_high_res_exo_input(hr) + out = numpy_if_tensor(self.lower_models[1]._tf_generate(lr, exo_data)) + out = self.lower_models[1]._combine_loss_input(hr, out) + enhanced_lr = spatial_simple_enhancing(lr, s_enhance=self.s_enhance) + enhanced_lr = temporal_simple_enhancing( + enhanced_lr, t_enhance=self.t_enhance, mode=self.time_enhance_mode + ) + enhanced_lr = enhanced_lr[..., self.hr_features_ind] + return (hr - enhanced_lr - out) ** 2
+
+ + + +
+[docs] +class QueueMom2SepSF(QueueMom1SF): + """Batch of low_res, high_res and output data when learning second moment + of subfilter vel separate from first moment""" + +
+[docs] + def make_output(self, samples): + """ + Returns + ------- + SF**2: Union[np.ndarray, da.core.Array] + 4D | 5D array + (batch_size, spatial_1, spatial_2, features) + (batch_size, spatial_1, spatial_2, temporal, features) + SF is subfilter, HR is high-res and LR is low-res + SF = HR - LR + """ + # Remove LR from HR and square it + return super().make_output(samples) ** 2
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/batch_queues/dc.html b/_modules/sup3r/preprocessing/batch_queues/dc.html new file mode 100644 index 000000000..1e70a2e91 --- /dev/null +++ b/_modules/sup3r/preprocessing/batch_queues/dc.html @@ -0,0 +1,799 @@ + + + + + + + + + + sup3r.preprocessing.batch_queues.dc — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.batch_queues.dc

+"""Data centric batch handler for dynamic batching strategy based on
+performance during training"""
+
+import logging
+
+import numpy as np
+
+from .base import SingleBatchQueue
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class BatchQueueDC(SingleBatchQueue): + """Sample from data based on spatial and temporal weights. These weights + can be derived from validation training losses and updated during training + or set a priori to construct a validation queue""" + + def __init__(self, samplers, n_space_bins=1, n_time_bins=1, **kwargs): + """ + Parameters + ---------- + samplers : List[Sampler] + List of Sampler instances + n_space_bins : int + Number of spatial bins to use for weighted sampling. e.g. if this + is 4 the spatial domain will be divided into 4 equal regions and + losses will be calculated across these regions during traning in + order to adaptively sample from lower performing regions. + n_time_bins : int + Number of time bins to use for weighted sampling. e.g. if this + is 4 the temporal domain will be divided into 4 equal periods and + losses will be calculated across these periods during traning in + order to adaptively sample from lower performing time periods. + kwargs : dict + Keyword arguments for parent class. + """ + self.n_space_bins = n_space_bins + self.n_time_bins = n_time_bins + self._spatial_weights = np.ones(n_space_bins) / n_space_bins + self._temporal_weights = np.ones(n_time_bins) / n_time_bins + super().__init__(samplers, **kwargs) + + _signature_objs = (__init__, SingleBatchQueue) + +
+[docs] + def sample_batch(self): + """Update weights and get batch of samples from sampled container.""" + sampler = self.get_random_container() + sampler.update_weights(self.spatial_weights, self.temporal_weights) + return next(sampler)
+ + + @property + def spatial_weights(self): + """Get weights used to sample spatial bins.""" + return self._spatial_weights + + @property + def temporal_weights(self): + """Get weights used to sample temporal bins.""" + return self._temporal_weights + +
+[docs] + def update_weights(self, spatial_weights, temporal_weights): + """Set weights used to sample spatial and temporal bins. This is called + by :class:`Sup3rGanDC` after an epoch to update weights based on model + performance across validation samples.""" + self._spatial_weights = spatial_weights + self._temporal_weights = temporal_weights
+
+ + + +
+[docs] +class ValBatchQueueDC(BatchQueueDC): + """Queue to construct a single batch for each spatiotemporal validation + bin. e.g. If we have 4 time bins and 1 space bin this will get `batch_size` + samples for 4 batches, with `batch_size` samples from each bin. The model + performance across these batches will determine the weights for how the + training batch queue is sampled.""" + + def __init__(self, samplers, n_space_bins=1, n_time_bins=1, **kwargs): + """ + Parameters + ---------- + samplers : List[Sampler] + List of Sampler instances + n_space_bins : int + Number of spatial bins to use for weighted sampling. e.g. if this + is 4 the spatial domain will be divided into 4 equal regions and + losses will be calculated across these regions during traning in + order to adaptively sample from lower performing regions. + n_time_bins : int + Number of time bins to use for weighted sampling. e.g. if this + is 4 the temporal domain will be divided into 4 equal periods and + losses will be calculated across these periods during traning in + order to adaptively sample from lower performing time periods. + kwargs : dict + Keyword arguments for parent class. + """ + super().__init__( + samplers, + n_space_bins=n_space_bins, + n_time_bins=n_time_bins, + **kwargs, + ) + self.n_batches = n_space_bins * n_time_bins + + _signature_objs = (__init__, BatchQueueDC) + + @property + def spatial_weights(self): + """Sample entirely from this spatial bin determined by the batch + number.""" + self._spatial_weights = np.eye( + 1, + self.n_space_bins, + self._batch_count % self.n_space_bins, + dtype=np.float32, + )[0] + return self._spatial_weights + + @property + def temporal_weights(self): + """Sample entirely from this temporal bin determined by the batch + number.""" + self._temporal_weights = np.eye( + 1, + self.n_time_bins, + self._batch_count % self.n_time_bins, + dtype=np.float32, + )[0] + return self._temporal_weights
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/batch_queues/dual.html b/_modules/sup3r/preprocessing/batch_queues/dual.html new file mode 100644 index 000000000..90724046b --- /dev/null +++ b/_modules/sup3r/preprocessing/batch_queues/dual.html @@ -0,0 +1,743 @@ + + + + + + + + + + sup3r.preprocessing.batch_queues.dual — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.batch_queues.dual

+"""Base objects which generate, build, and operate on batches. Also can
+interface with models."""
+
+import logging
+
+from scipy.ndimage import gaussian_filter
+
+from .abstract import AbstractBatchQueue
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class DualBatchQueue(AbstractBatchQueue): + """Base BatchQueue for use with + :class:`~sup3r.preprocessing.samplers.DualSampler` objects.""" + + def __init__(self, samplers, **kwargs): + """ + See Also + -------- + :class:`~sup3r.preprocessing.batch_queues.abstract.AbstractBatchQueue` + """ + super().__init__(samplers, **kwargs) + self.check_enhancement_factors() + + _signature_objs = (AbstractBatchQueue,) + + @property + def queue_shape(self): + """Shape of objects stored in the queue.""" + return [ + (self.batch_size, *self.lr_shape), + (self.batch_size, *self.hr_shape), + ] + +
+[docs] + def check_enhancement_factors(self): + """Make sure each DualSampler has the same enhancment factors and they + match those provided to the BatchQueue.""" + + s_factors = [c.s_enhance for c in self.containers] + msg = ( + f'Received s_enhance = {self.s_enhance} but not all ' + f'DualSamplers in the collection have the same value: {s_factors}.' + ) + assert all(self.s_enhance == s for s in s_factors), msg + t_factors = [c.t_enhance for c in self.containers] + msg = ( + f'Received t_enhance = {self.t_enhance} but not all ' + f'DualSamplers in the collection have the same value.' + ) + assert all(self.t_enhance == t for t in t_factors), msg
+ + +
+[docs] + def transform(self, samples, smoothing=None, smoothing_ignore=None): + """Perform smoothing if requested. + + Note + ---- + This does not include temporal or spatial coarsening like + :class:`SingleBatchQueue` + """ + low_res, high_res = samples + + if smoothing is not None: + feat_iter = [ + j + for j in range(low_res.shape[-1]) + if self.features[j] not in smoothing_ignore + ] + for i in range(low_res.shape[0]): + for j in feat_iter: + low_res[i, ..., j] = gaussian_filter( + low_res[i, ..., j], smoothing, mode='nearest' + ) + return low_res, high_res
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/batch_queues/utilities.html b/_modules/sup3r/preprocessing/batch_queues/utilities.html new file mode 100644 index 000000000..ff7032592 --- /dev/null +++ b/_modules/sup3r/preprocessing/batch_queues/utilities.html @@ -0,0 +1,842 @@ + + + + + + + + + + sup3r.preprocessing.batch_queues.utilities — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.batch_queues.utilities

+"""Miscellaneous utilities shared across the batch_queues module"""
+
+import logging
+
+import numpy as np
+from scipy.interpolate import interp1d
+from scipy.ndimage import gaussian_filter, zoom
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +def temporal_simple_enhancing(data, t_enhance=4, mode='constant'): + """Upsample data according to t_enhance resolution + + Parameters + ---------- + data : Union[np.ndarray, da.core.Array] + 5D array with dimensions + (observations, spatial_1, spatial_2, temporal, features) + t_enhance : int + factor by which to enhance temporal dimension + mode : str + interpolation method for enhancement. + + Returns + ------- + enhanced_data : Union[np.ndarray, da.core.Array] + 5D array with same dimensions as data with new enhanced resolution + """ + + if t_enhance in [None, 1]: + enhanced_data = data + elif t_enhance not in [None, 1] and len(data.shape) == 5: + if mode == 'constant': + enhancement = [1, 1, 1, t_enhance, 1] + enhanced_data = zoom( + data, enhancement, order=0, mode='nearest', grid_mode=True + ) + elif mode == 'linear': + index_t_hr = np.array(list(range(data.shape[3] * t_enhance))) + index_t_lr = index_t_hr[::t_enhance] + enhanced_data = interp1d( + index_t_lr, data, axis=3, fill_value='extrapolate' + )(index_t_hr) + enhanced_data = np.array(enhanced_data, dtype=np.float32) + elif len(data.shape) != 5: + msg = ( + 'Data must be 5D to do temporal enhancing, but ' + f'received: {data.shape}' + ) + logger.error(msg) + raise ValueError(msg) + + return enhanced_data
+ + + +
+[docs] +def smooth_data(low_res, training_features, smoothing_ignore, smoothing=None): + """Smooth data using a gaussian filter + + Parameters + ---------- + low_res : Union[np.ndarray, da.core.Array] + 4D | 5D array + (batch_size, spatial_1, spatial_2, features) + (batch_size, spatial_1, spatial_2, temporal, features) + training_features : list | None + Ordered list of training features input to the generative model + smoothing_ignore : list | None + List of features to ignore for the smoothing filter. None will + smooth all features if smoothing kwarg is not None + smoothing : float | None + Standard deviation to use for gaussian filtering of the coarse + data. This can be tuned by matching the kinetic energy of a low + resolution simulation with the kinetic energy of a coarsened and + smoothed high resolution simulation. If None no smoothing is + performed. + + Returns + ------- + low_res : Union[np.ndarray, da.core.Array] + 4D | 5D array + (batch_size, spatial_1, spatial_2, features) + (batch_size, spatial_1, spatial_2, temporal, features) + """ + + if smoothing is not None: + feat_iter = [ + j + for j in range(low_res.shape[-1]) + if training_features[j] not in smoothing_ignore + ] + for i in range(low_res.shape[0]): + for j in feat_iter: + if len(low_res.shape) == 5: + for t in range(low_res.shape[-2]): + low_res[i, ..., t, j] = gaussian_filter( + low_res[i, ..., t, j], smoothing, mode='nearest' + ) + else: + low_res[i, ..., j] = gaussian_filter( + low_res[i, ..., j], smoothing, mode='nearest' + ) + return low_res
+ + + +
+[docs] +def spatial_simple_enhancing(data, s_enhance=2, obs_axis=True): + """Simple enhancing according to s_enhance resolution + + Parameters + ---------- + data : Union[np.ndarray, da.core.Array] + 5D | 4D | 3D array with dimensions: + (n_obs, spatial_1, spatial_2, temporal, features) (obs_axis=True) + (n_obs, spatial_1, spatial_2, features) (obs_axis=True) + (spatial_1, spatial_2, temporal, features) (obs_axis=False) + (spatial_1, spatial_2, temporal_or_features) (obs_axis=False) + s_enhance : int + factor by which to enhance spatial dimensions + obs_axis : bool + Flag for if axis=0 is the observation axis. If True (default) + spatial axis=(1, 2) (zero-indexed), if False spatial axis=(0, 1) + + Returns + ------- + enhanced_data : Union[np.ndarray, da.core.Array] + 3D | 4D | 5D array with same dimensions as data with new enhanced + resolution + """ + + if len(data.shape) < 3: + msg = ( + 'Data must be 3D, 4D, or 5D to do spatial enhancing, but ' + f'received: {data.shape}' + ) + logger.error(msg) + raise ValueError(msg) + + if s_enhance is not None and s_enhance > 1: + if obs_axis and len(data.shape) == 5: + enhancement = [1, s_enhance, s_enhance, 1, 1] + enhanced_data = zoom( + data, enhancement, order=0, mode='nearest', grid_mode=True + ) + + elif obs_axis and len(data.shape) == 4: + enhancement = [1, s_enhance, s_enhance, 1] + enhanced_data = zoom( + data, enhancement, order=0, mode='nearest', grid_mode=True + ) + + elif not obs_axis and len(data.shape) == 4: + enhancement = [s_enhance, s_enhance, 1, 1] + enhanced_data = zoom( + data, enhancement, order=0, mode='nearest', grid_mode=True + ) + + elif not obs_axis and len(data.shape) == 3: + enhancement = [s_enhance, s_enhance, 1] + enhanced_data = zoom( + data, enhancement, order=0, mode='nearest', grid_mode=True + ) + else: + msg = ( + 'Data must be 3D, 4D, or 5D to do spatial enhancing, but ' + f'received: {data.shape}' + ) + logger.error(msg) + raise ValueError(msg) + + else: + enhanced_data = data + + return enhanced_data
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/cachers/base.html b/_modules/sup3r/preprocessing/cachers/base.html new file mode 100644 index 000000000..91889ae0d --- /dev/null +++ b/_modules/sup3r/preprocessing/cachers/base.html @@ -0,0 +1,1189 @@ + + + + + + + + + + sup3r.preprocessing.cachers.base — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.cachers.base

+"""Basic objects that can cache rasterized / derived data."""
+
+# netCDF4 has to be imported before h5py
+# isort: skip_file
+import pandas as pd
+import copy
+import itertools
+import logging
+import os
+from typing import Dict, Optional, Union, TYPE_CHECKING
+import netCDF4 as nc4  # noqa
+import h5py
+import dask
+import dask.array as da
+import numpy as np
+from warnings import warn
+from sup3r.preprocessing.base import Container
+from sup3r.preprocessing.names import Dimension
+from sup3r.preprocessing.utilities import _mem_check, log_args, _lowered
+from sup3r.utilities.utilities import safe_cast, safe_serialize
+from rex.utilities.utilities import to_records_array
+
+from .utilities import _check_for_cache
+
+if TYPE_CHECKING:
+    from sup3r.preprocessing.accessor import Sup3rX
+    from sup3r.preprocessing.base import Sup3rDataset
+
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class Cacher(Container): + """Base cacher object. Simply writes given data to H5 or NETCDF files. By + default every feature will be written to a separate file. To write multiple + features to the same file call :meth:`write_netcdf` or :meth:`write_h5` + directly""" + + @log_args + def __init__( + self, + data: Union['Sup3rX', 'Sup3rDataset'], + cache_kwargs: Optional[Dict] = None, + ): + """ + Parameters + ---------- + data : Union[Sup3rX, Sup3rDataset] + Data to write to file + cache_kwargs : dict + Dictionary with kwargs for caching wrangled data. This should at + minimum include a 'cache_pattern' key, value. This pattern must + have a {feature} format key and either a h5 or nc file extension, + based on desired output type. + + Can also include a ``max_workers`` key and ``chunks`` key. + ``max_workers`` is an inteeger specifying number of threads to use + for writing chunks to output files and ``chunks`` is a dictionary + of dictionaries for each feature (or a single dictionary to use + for all features). e.g. + .. code-block:: JSON + {'cache_pattern': ..., + 'chunks': { + 'u_10m': { + 'time': 20, + 'south_north': 100, + 'west_east': 100 + } + } + } + + Note + ---- + This is only for saving cached data. If you want to reload the + cached files load them with a ``Loader`` object. ``DataHandler`` + objects can cache and reload from cache automatically. + """ + super().__init__(data=data) + if ( + cache_kwargs is not None + and cache_kwargs.get('cache_pattern', None) is not None + ): + self.out_files = self.cache_data(**cache_kwargs) + + def _write_single( + self, + feature, + out_file, + chunks, + max_workers=None, + mode='w', + attrs=None, + verbose=False, + ): + """Write single NETCDF or H5 cache file.""" + if os.path.exists(out_file): + logger.info( + f'{out_file} already exists. Delete if you want to overwrite.' + ) + else: + _, ext = os.path.splitext(out_file) + os.makedirs(os.path.dirname(out_file), exist_ok=True) + tmp_file = out_file + '.tmp' + logger.info( + 'Writing %s to %s. %s', feature, tmp_file, _mem_check() + ) + if ext == '.h5': + func = self.write_h5 + elif ext == '.nc': + func = self.write_netcdf + else: + msg = ( + 'cache_pattern must have either h5 or nc extension. ' + f'Received {ext}.' + ) + logger.error(msg) + raise ValueError(msg) + func( + out_file=tmp_file, + data=self.data, + features=[feature], + chunks=chunks, + max_workers=max_workers, + mode=mode, + attrs=attrs, + verbose=verbose, + ) + os.replace(tmp_file, out_file) + logger.info('Moved %s to %s', tmp_file, out_file) + +
+[docs] + def cache_data( + self, + cache_pattern, + chunks=None, + max_workers=None, + mode='w', + attrs=None, + verbose=False, + ): + """Cache data to file with file type based on user provided + cache_pattern. + + Parameters + ---------- + cache_pattern : str + Cache file pattern. Must have a {feature} format key. The extension + (.h5 or .nc) specifies which format to use for caching. + chunks : dict | None + Chunk sizes for coordinate dimensions. e.g. + ``{'u_10m': {'time': 10, 'south_north': 100, 'west_east': 100}}`` + max_workers : int | None + Number of workers to use for parallel writing of chunks + mode : str + Write mode for ``out_file``. Defaults to write. + attrs : dict | None + Optional attributes to write to file. Can specify dataset specific + attributes by adding a dictionary with the dataset name as a key. + e.g. {**global_attrs, dset: {...}} + verbose : bool + Whether to log progress for each chunk written to output files. + """ + msg = 'cache_pattern must have {feature} format key.' + assert '{feature}' in cache_pattern, msg + + cached_files, _, missing_files, missing_features = _check_for_cache( + features=self.features, + cache_kwargs={'cache_pattern': cache_pattern}, + ) + + if any(cached_files): + logger.info( + 'Cache files %s already exist. Delete to overwrite.', + cached_files, + ) + + if any(missing_files): + for feature, out_file in zip(missing_features, missing_files): + self._write_single( + feature=feature, + out_file=out_file, + chunks=chunks, + max_workers=max_workers, + mode=mode, + verbose=verbose, + attrs=attrs, + ) + logger.info('Finished writing %s', missing_files) + return missing_files + cached_files
+ + +
+[docs] + @staticmethod + def parse_chunks(feature, chunks, dims): + """Parse chunks input to Cacher. Needs to be a dictionary of dimensions + and chunk values but parsed to a tuple for H5 caching.""" + + if isinstance(chunks, dict) and feature.lower() in _lowered(chunks): + chunks = {k.lower(): v for k, v in chunks.items()} + fchunks = chunks.get(feature, {}) + else: + fchunks = copy.deepcopy(chunks) + if isinstance(fchunks, dict): + fchunks = {d: fchunks.get(d, None) for d in dims} + if isinstance(fchunks, int): + fchunks = {feature: fchunks} + if any(chk is None for chk in fchunks): + fchunks = 'auto' + return fchunks
+ + +
+[docs] + @classmethod + def get_chunksizes(cls, dset, data, chunks): + """Get chunksizes after rechunking (could be undetermined beforehand + if ``chunks == 'auto'``) and return rechunked data.""" + data_var = data.coords[dset] if dset in data.coords else data[dset] + fchunk = cls.parse_chunks(dset, chunks, data_var.dims) + if isinstance(fchunk, dict): + fchunk = {k: v for k, v in fchunk.items() if k in data_var.dims} + + data_var = data_var.chunk(fchunk) + data_var = data_var.unify_chunks() + + chunksizes = tuple(d[0] for d in data_var.chunksizes.values()) + chunksizes = chunksizes if chunksizes else None + if chunksizes is not None: + chunkmem = np.prod(chunksizes) * data_var.dtype.itemsize / 1e9 + chunkmem = round(chunkmem, 3) + if chunkmem > 4: + msg = ( + 'Chunks cannot be larger than 4GB. Given chunksizes %s ' + 'result in %sGB. Will use chunksizes = None' + ) + logger.warning(msg, chunksizes, chunkmem) + warn(msg % (chunksizes, chunkmem)) + chunksizes = None + return data_var, chunksizes
+ + +
+[docs] + @classmethod + def add_coord_meta(cls, out_file, data): + """Add flattened coordinate meta to out_file. This is used for h5 + caching.""" + meta = pd.DataFrame() + for coord in Dimension.coords_2d(): + if coord in data: + meta[coord] = data[coord].data.flatten() + logger.info('Adding coordinate meta to %s', out_file) + with h5py.File(out_file, 'a') as f: + meta = to_records_array(meta) + f.create_dataset( + '/meta', shape=meta.shape, dtype=meta.dtype, data=meta + )
+ + +
+[docs] + @classmethod + def write_h5( + cls, + out_file, + data, + features='all', + chunks=None, + max_workers=None, + mode='w', + attrs=None, + verbose=False, # noqa # pylint: disable=W0613 + ): + """Cache data to h5 file using user provided chunks value. + + Parameters + ---------- + out_file : str + Name of file to write. Must have a .h5 extension. + data : Sup3rDataset | Sup3rX | xr.Dataset + Data to write to file. Comes from ``self.data``, so an + ``xr.Dataset`` like object with ``.dims`` and ``.coords`` + features : str | list + Name of feature(s) to write to file. + chunks : dict | None + Chunk sizes for coordinate dimensions. e.g. + ``{'u_10m': {'time': 10, 'south_north': 100, 'west_east': 100}}`` + max_workers : int | None + Number of workers to use for parallel writing of chunks + mode : str + Write mode for ``out_file``. Defaults to write. + attrs : dict | None + Optional attributes to write to file. Can specify dataset specific + attributes by adding a dictionary with the dataset name as a key. + e.g. {**global_attrs, dset: {...}} + verbose : bool + Dummy arg to match ``write_netcdf`` signature + """ + if len(data.dims) == 3: + data = data.transpose(Dimension.TIME, *Dimension.dims_2d()) + if features == 'all': + features = list(data.data_vars) + features = features if isinstance(features, list) else [features] + chunks = chunks or 'auto' + global_attrs = data.attrs.copy() + global_attrs.update(attrs or {}) + attrs = {k: safe_cast(v) for k, v in global_attrs.items()} + with h5py.File(out_file, mode) as f: + for k, v in attrs.items(): + f.attrs[k] = v + + coord_names = [ + crd for crd in data.coords if crd in Dimension.coords_4d() + ] + for dset in [*coord_names, *features]: + data_var, chunksizes = cls.get_chunksizes(dset, data, chunks) + + if dset == Dimension.TIME: + data_var = da.asarray(data_var.astype(int).data) + else: + data_var = data_var.data + + dset_name = dset + if dset == Dimension.TIME: + dset_name = 'time_index' + + logger.debug( + 'Adding %s to %s with chunks=%s and max_workers=%s', + dset, + out_file, + chunksizes, + max_workers, + ) + + d = f.create_dataset( + f'/{dset_name}', + dtype=data_var.dtype, + shape=data_var.shape, + chunks=chunksizes, + ) + if max_workers == 1: + da.store(data_var, d, scheduler='single-threaded') + else: + da.store( + data_var, + d, + scheduler='threads', + num_workers=max_workers, + ) + cls.add_coord_meta(out_file=out_file, data=data)
+ + +
+[docs] + @staticmethod + def get_chunk_slices(chunks, shape): + """Get slices used to write xarray data to netcdf file in chunks.""" + slices = [] + for i in range(len(shape)): + slice_ranges = [ + (slice(k, min(k + chunks[i], shape[i]))) + for k in range(0, shape[i], chunks[i]) + ] + slices.append(slice_ranges) + return list(itertools.product(*slices))
+ + +
+[docs] + @staticmethod + def write_chunk(out_file, dset, chunk_slice, chunk_data, msg=None): + """Add chunk to netcdf file.""" + if msg is not None: + logger.debug(msg) + with nc4.Dataset(out_file, 'a') as ds: + var = ds.variables[dset] + var[chunk_slice] = chunk_data
+ + +
+[docs] + @classmethod + def write_netcdf_chunks( + cls, + out_file, + feature, + data, + chunks=None, + max_workers=None, + verbose=False, + ): + """Write netcdf chunks with delayed dask tasks.""" + tasks = [] + data_var = data[feature] + data_var, chunksizes = cls.get_chunksizes(feature, data, chunks) + chunksizes = data_var.shape if chunksizes is None else chunksizes + chunk_slices = cls.get_chunk_slices(chunksizes, data_var.shape) + logger.info( + 'Adding %s to %s with %s chunks and max_workers=%s. %s', + feature, + out_file, + len(chunk_slices), + max_workers, + _mem_check(), + ) + for i, chunk_slice in enumerate(chunk_slices): + msg = f'Writing chunk {i} / {len(chunk_slices)} to {out_file}' + msg = None if not verbose else msg + chunk = data_var.data[chunk_slice] + task = dask.delayed(cls.write_chunk)( + out_file, feature, chunk_slice, chunk, msg + ) + tasks.append(task) + if max_workers == 1: + dask.compute(*tasks, scheduler='single-threaded') + else: + dask.compute(*tasks, scheduler='threads', num_workers=max_workers)
+ + +
+[docs] + @classmethod + def write_netcdf( + cls, + out_file, + data, + features='all', + chunks=None, + max_workers=None, + mode='w', + attrs=None, + verbose=False, + ): + """Cache data to a netcdf file. + + Parameters + ---------- + out_file : str + Name of file to write. Must have a ``.nc`` extension. + data : Sup3rDataset + Data to write to file. Comes from ``self.data``, so a + ``Sup3rDataset`` with coords attributes + features : str | list + Names of feature(s) to write to file. + chunks : dict | None + Chunk sizes for coordinate dimensions. e.g. ``{'south_north': 100, + 'west_east': 100, 'time': 10}`` Can also include dataset specific + values. e.g. ``{'windspeed': {'south_north': 100, 'west_east': 100, + 'time': 10}}`` + max_workers : int | None + Number of workers to use for parallel writing of chunks + mode : str + Write mode for ``out_file``. Defaults to write. + attrs : dict | None + Optional attributes to write to file. Can specify dataset specific + attributes by adding a dictionary with the dataset name as a key. + e.g. {**global_attrs, dset: {...}} + verbose : bool + Whether to log output after each chunk is written. + """ + chunks = chunks or 'auto' + global_attrs = data.attrs.copy() + global_attrs.update(attrs or {}) + attrs = global_attrs.copy() + if features == 'all': + features = list(data.data_vars) + features = features if isinstance(features, list) else [features] + with nc4.Dataset(out_file, mode, format='NETCDF4') as ncfile: + for dim_name, dim_size in data.sizes.items(): + ncfile.createDimension(dim_name, dim_size) + + coord_names = [ + crd for crd in data.coords if crd in Dimension.coords_4d() + ] + for dset in [*coord_names, *features]: + data_var, chunksizes = cls.get_chunksizes(dset, data, chunks) + + if dset == Dimension.TIME: + data_var = data_var.astype(int) + + dout = ncfile.createVariable( + dset, data_var.dtype, data_var.dims, chunksizes=chunksizes + ) + var_attrs = data_var.attrs.copy() + var_attrs.update(attrs.pop(dset, {})) + for attr_name, attr_value in var_attrs.items(): + dout.setncattr(attr_name, safe_cast(attr_value)) + + dout.coordinates = ' '.join(list(coord_names)) + + logger.debug( + 'Adding %s to %s with chunks=%s', + dset, + out_file, + chunksizes, + ) + + if dset in data.coords: + ncfile.variables[dset][:] = np.asarray(data_var.data) + + for attr_name, attr_value in attrs.items(): + attr_value = safe_cast(attr_value) + try: + ncfile.setncattr(attr_name, attr_value) + except Exception as e: + msg = (f'Could not write {attr_name} as attribute, ' + f'serializing with json dumps, ' + f'received error: "{e}"') + logger.warning(msg) + warn(msg) + ncfile.setncattr(attr_name, safe_serialize(attr_value)) + + for feature in features: + cls.write_netcdf_chunks( + out_file=out_file, + feature=feature, + data=data, + chunks=chunks, + max_workers=max_workers, + verbose=verbose, + ) + + logger.info('Finished writing %s to %s', features, out_file)
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/collections/base.html b/_modules/sup3r/preprocessing/collections/base.html new file mode 100644 index 000000000..c1add78a7 --- /dev/null +++ b/_modules/sup3r/preprocessing/collections/base.html @@ -0,0 +1,746 @@ + + + + + + + + + + sup3r.preprocessing.collections.base — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.collections.base

+"""Base collection classes. These are objects that contain sets / lists of
+containers like batch handlers. Of course these also contain data so they're
+containers too!
+
+TODO: https://github.com/xarray-contrib/datatree could unify Sup3rDataset and
+collections of data. Consider migrating once datatree has been fully
+integrated into xarray (in progress as of 8/8/2024)
+"""
+
+from typing import TYPE_CHECKING, List, Union
+
+import numpy as np
+
+from sup3r.preprocessing.base import Container
+
+if TYPE_CHECKING:
+    from sup3r.preprocessing.samplers.base import Sampler
+    from sup3r.preprocessing.samplers.dual import DualSampler
+
+
+
+[docs] +class Collection(Container): + """Object consisting of a set of containers. These objects are distinct + from :class:`~sup3r.preprocessing.base.Sup3rDataset` objects, which also + contain multiple data members, because these members are completely + independent of each other. They are collected together for the purpose of + expanding a training dataset (e.g. BatchHandlers).""" + + def __init__( + self, + containers: Union[ + List['Container'], + List['Sampler'], + List['DualSampler'], + ], + ): + super().__init__() + self.data = tuple(c.data for c in containers) + self.containers = containers + self._features: List = [] + + @property + def features(self): + """Get all features contained in data.""" + if not self._features: + _ = [ + self._features.append(f) + for f in np.concatenate([d.features for d in self.data]) + if f not in self._features + ] + return self._features + + @property + def container_weights(self): + """Get weights used to sample from different containers based on + relative sizes""" + sizes = [c.size for c in self.containers] + weights = sizes / np.sum(sizes) + return weights.astype(np.float32) + + def __getattr__(self, attr): + """Get attributes from self or the first container in the + collection.""" + if attr in dir(self): + return self.__getattribute__(attr) + return self.check_shared_attr(attr) + +
+[docs] + def check_shared_attr(self, attr): + """Check if all containers have the same value for `attr`. If they do + the collection effectively inherits those attributes.""" + msg = f'Not all collection containers have the same value for {attr}' + out = getattr(self.containers[0], attr, None) + if isinstance(out, (np.ndarray, list, tuple)): + check = all( + np.array_equal(getattr(c, attr, None), out) + for c in self.containers + ) + else: + check = all(getattr(c, attr, None) == out for c in self.containers) + assert check, msg + return out
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/collections/stats.html b/_modules/sup3r/preprocessing/collections/stats.html new file mode 100644 index 000000000..48184a923 --- /dev/null +++ b/_modules/sup3r/preprocessing/collections/stats.html @@ -0,0 +1,844 @@ + + + + + + + + + + sup3r.preprocessing.collections.stats — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.collections.stats

+"""Collection object with methods to compute and save stats."""
+
+import logging
+import os
+import pprint
+from warnings import warn
+
+import numpy as np
+import xarray as xr
+from rex import safe_json_load
+
+from sup3r.preprocessing.utilities import log_args
+from sup3r.utilities.utilities import safe_serialize
+
+from .base import Collection
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class StatsCollection(Collection): + """Extended collection object with methods for computing means and stds and + saving these to files. + + Note + ---- + We write stats as float64 because float32 is not json serializable + """ + + @log_args + def __init__(self, containers, means=None, stds=None): + """ + Parameters + ---------- + containers: List[Rasterizer] + List of containers to compute stats for. + means : str | dict | None + Usually a file path for saving results, or None for just + calculating stats and not saving. Can also be a dict, which will + just get returned as the "result". + stds : str | dict | None + Usually a file path for saving results, or None for just + calculating stats and not saving. Can also be a dict, which will + just get returned as the "result". + """ + super().__init__(containers=containers) + self.means = self.get_means(means) + self.stds = self.get_stds(stds) + self.save_stats(stds=stds, means=means) + self.normalize(containers) + + def _get_stat(self, stat_type, needed_features='all'): + """Get either mean or std for all features and all containers.""" + all_feats = ( + self.features if needed_features == 'all' else needed_features + ) + hr_feats = set(self.containers[0].high_res.features).intersection( + all_feats + ) + lr_feats = set(all_feats) - set(hr_feats) + + cstats = [ + getattr(c.high_res[hr_feats], stat_type)(skipna=True) + for c in self.containers + ] + if any(lr_feats): + cstats_lr = [ + getattr(c.low_res[lr_feats], stat_type)(skipna=True) + for c in self.containers + ] + cstats = [ + xr.merge([c._ds, c_lr._ds]) + for c, c_lr in zip(cstats, cstats_lr) + ] + return cstats + + def _init_stats_dict(self, stats): + """Initialize dictionary for stds or means from given input. Check if + any existing stats are provided.""" + if isinstance(stats, str) and os.path.exists(stats): + stats = safe_json_load(stats) + elif stats is None or isinstance(stats, str): + stats = {} + else: + msg = ( + f'Received incompatible type {type(stats)}. Need a file ' + 'path or dictionary' + ) + assert isinstance(stats, dict), msg + if ( + isinstance(stats, dict) + and stats != {} + and any(f not in stats for f in self.features) + ): + msg = ( + f'Not all features ({self.features}) are found in the given ' + f'stats dictionary {stats}. This is obviously from a prior ' + 'run so you better be sure these stats carry over.' + ) + logger.warning(msg) + warn(msg) + return stats + +
+[docs] + def get_means(self, means): + """Dictionary of means for each feature, computed across all data + handlers.""" + means = self._init_stats_dict(means) + needed_features = set(self.features) - set(means) + if any(needed_features): + logger.info(f'Getting means for {needed_features}.') + cmeans = [ + cm * w + for cm, w in zip( + self._get_stat('mean', needed_features), + self.container_weights, + ) + ] + for f in needed_features: + logger.info(f'Computing mean for {f}.') + means[f] = np.float32(np.sum([cm[f] for cm in cmeans])) + return means
+ + +
+[docs] + def get_stds(self, stds): + """Dictionary of standard deviations for each feature, computed across + all data handlers.""" + stds = self._init_stats_dict(stds) + needed_features = set(self.features) - set(stds) + if any(needed_features): + logger.info(f'Getting stds for {needed_features}.') + cstds = [ + w * cm**2 + for cm, w in zip(self._get_stat('std'), self.container_weights) + ] + for f in needed_features: + logger.info(f'Computing std for {f}.') + stds[f] = np.float32(np.sqrt(np.sum([cs[f] for cs in cstds]))) + return stds
+ + + @staticmethod + def _added_stats(fp, stat_dict): + """Check if stats were added to the given file or not.""" + return any(f not in safe_json_load(fp) for f in stat_dict) + +
+[docs] + def save_stats(self, stds, means): + """Save stats to json files.""" + if isinstance(stds, str) and ( + not os.path.exists(stds) or self._added_stats(stds, self.stds) + ): + with open(stds, 'w') as f: + f.write(safe_serialize(self.stds)) + logger.info( + f'Saved standard deviations {self.stds} to {stds}.' + ) + if isinstance(means, str) and ( + not os.path.exists(means) or self._added_stats(means, self.means) + ): + with open(means, 'w') as f: + f.write(safe_serialize(self.means)) + logger.info(f'Saved means {self.means} to {means}.')
+ + +
+[docs] + def normalize(self, containers): + """Normalize container data with computed stats.""" + logger.debug( + 'Normalizing containers with:\n' + f'means: {pprint.pformat(self.means, indent=2)}\n' + f'stds: {pprint.pformat(self.stds, indent=2)}' + ) + for i, c in enumerate(containers): + logger.info(f'Normalizing container {i + 1}') + c.normalize(means=self.means, stds=self.stds)
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/data_handlers/exo.html b/_modules/sup3r/preprocessing/data_handlers/exo.html new file mode 100644 index 000000000..1dc1a9c31 --- /dev/null +++ b/_modules/sup3r/preprocessing/data_handlers/exo.html @@ -0,0 +1,1196 @@ + + + + + + + + + + sup3r.preprocessing.data_handlers.exo — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.data_handlers.exo

+"""Exogenous data handler and related objects. The ExoDataHandler performs
+exogenous data rasterization for one or more model steps for requested
+features."""
+
+import logging
+import pathlib
+from dataclasses import dataclass
+from typing import TYPE_CHECKING, Optional, Union
+
+import numpy as np
+
+from sup3r.preprocessing.rasterizers import ExoRasterizer
+from sup3r.preprocessing.utilities import _lowered, log_args
+
+if TYPE_CHECKING:
+    from sup3r.models import MultiStepGan, Sup3rGan
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class SingleExoDataStep(dict): + """Special dictionary class for exogenous_data step""" + + def __init__(self, feature, combine_type, model, data): + """exogenous_data step dictionary for a given model step + + Parameters + ---------- + feature : str + Name of feature corresponding to ``data``. + combine_type : str + Specifies how the exogenous_data should be used for this step. e.g. + "input", "layer", "output". For example, if tis equals "input" the + ``data`` will be used as input to the forward pass for the model + step given by ``model`` + model : int + Specifies the model index which will use the ``data``. For example, + if ``model`` == 1 then the ``data`` will be used according to + ``combine_type`` in the 2nd model step in a MultiStepGan. + data : Union[np.ndarray, da.core.Array] + The data to be used for the given model step. + """ + step = {'model': model, 'combine_type': combine_type, 'data': data} + for k, v in step.items(): + self.__setitem__(k, v) + self.feature = feature + + @property + def shape(self): + """Shape of data array for this model step.""" + return self['data'].shape
+ + + +
+[docs] +class ExoData(dict): + """Special dictionary class for multiple exogenous_data steps + + TODO: Can we simplify this by relying more on xr.Dataset meta data instead + of storing enhancement factors for each step? Seems like we could take the + highest res data and coarsen based on s/t enhance, also. + """ + + def __init__(self, steps): + """Combine multiple SingleExoDataStep objects + + Parameters + ---------- + + steps : dict + Dictionary with feature keys each with entries describing whether + features should be combined at input, a mid network layer, or with + output. e.g.:: + + \b + { + 'topography': { + 'steps': [ + {'combine_type': 'input', 'model': 0, 'data': ...}, + {'combine_type': 'layer', 'model': 0, 'data': ...}] + } + } + + Each array in in 'data' key has 3D or 4D shape: + (spatial_1, spatial_2, 1) + (spatial_1, spatial_2, n_temporal, 1) + """ # noqa : D301 + if isinstance(steps, dict): + self.update(steps) + + else: + msg = 'ExoData must be initialized with a dictionary of features.' + logger.error(msg) + raise ValueError(msg) + +
+[docs] + def get_model_step_exo(self, model_step): + """Get the exogenous data for the given model_step from the full list + of steps + + Parameters + ---------- + model_step : int + Index of the model to get exogenous data for. + + Returns + ------- + model_step_exo : dict + Dictionary of features each with list of steps which match the + given model_step + """ + model_step_exo = {} + for feature, entry in self.items(): + steps = [ + step for step in entry['steps'] if step['model'] == model_step + ] + if steps: + model_step_exo[feature] = {'steps': steps} + return ExoData(model_step_exo)
+ + + @staticmethod + def _get_bounded_steps(steps, min_step, max_step=None): + """Get the steps within `steps` which have a model index between min + and max step.""" + if max_step is not None: + return [ + s + for s in steps + if (s['model'] < max_step and min_step <= s['model']) + ] + return [s for s in steps if min_step <= s['model']] + +
+[docs] + def split(self, split_steps): + """Split ``self`` into multiple ``ExoData`` objects based on + ``split_steps``. The splits are done such that the steps in the ith + entry of the returned list all have a + ``model number < split_steps[i].`` + + Note + ---- + This is used for multi-step models to correctly distribute the set of + all exo data steps to the appropriate models. For example, + :class:`~sup3r.models.MultiStepGan` models with some + temporal (spatial) steps followed by some spatial (temporal) steps. The + temporal (spatial) models might take the first N exo data steps and + then the spatial (temporal) models will take the remaining exo data + steps. + + Parameters + ---------- + split_steps : list + Step index list to use for splitting. To split this into exo data + for spatial models and temporal models split_steps should be + ``[len(spatial_models)]``. If this is for a + :class:`~sup3r.models.MultiStepGan` model with temporal steps + first, ``split_steps`` should be ``[len(temporal_models)]``. If + this is for a multi step model composed of more than two models + (e.g. :class:`~sup3r.models.SolarMultiStepGan`) ``split_steps`` + should be ``[len(spatial_solar_models), len(spatial_solar_models) + + len(spatial_wind_models)]`` + + Returns + ------- + split_list : List[ExoData] + List of ``ExoData`` objects coming from the split of ``self``, + according to ``split_steps`` + """ + split_dict = {i: {} for i in range(len(split_steps) + 1)} + split_steps = [0, *split_steps] if split_steps[0] != 0 else split_steps + for feature, entry in self.items(): + for i, min_step in enumerate(split_steps): + max_step = ( + None if min_step == split_steps[-1] else split_steps[i + 1] + ) + steps_i = self._get_bounded_steps( + steps=entry['steps'], min_step=min_step, max_step=max_step + ) + for s in steps_i: + s.update({'model': s['model'] - min_step}) + if any(steps_i): + split_dict[i][feature] = {'steps': steps_i} + return [ExoData(split) for split in split_dict.values()]
+ + +
+[docs] + def get_combine_type_data(self, feature, combine_type, model_step=None): + """Get exogenous data for given feature which is used according to the + given combine_type (input/output/layer) for this model_step. + + Parameters + ---------- + feature : str + Name of exogenous feature to get data for + combine_type : str + Usage type for requested data. e.g input/output/layer + model_step : int | None + Model step the data will be used for. If this is not None then + only steps with self[feature]['steps'][:]['model'] == model_step + will be searched for data. + + Returns + ------- + data : tf.Tensor | np.ndarray + Exogenous data for given parameters + """ + tmp = self[feature] + if model_step is not None: + tmp = {k: v for k, v in tmp.items() if v['model'] == model_step} + combine_types = [step['combine_type'] for step in tmp['steps']] + msg = ( + 'Received exogenous_data without any combine_type ' + f'= "{combine_type}" steps' + ) + assert combine_type in combine_types, msg + return tmp['steps'][combine_types.index(combine_type)]['data']
+ + + @staticmethod + def _get_enhanced_slices(lr_slices, step): + """Get lr_slices enhanced by the ratio of exo_data_shape to + input_data_shape. Used to slice exo data for each model step.""" + exo_slices = [] + for enhance, lr_slice in zip( + [step['s_enhance'], step['s_enhance'], step['t_enhance']], + lr_slices, + ): + exo_slc = slice(lr_slice.start * enhance, lr_slice.stop * enhance) + exo_slices.append(exo_slc) + return exo_slices + +
+[docs] + def get_chunk(self, lr_slices): + """Get the data for all model steps corresponding to the low res extent + selected by `lr_slices` + + Parameters + ---------- + lr_slices : list List of spatiotemporal slices which specify extent of + the low-resolution input data. + + Returns + ------- + exo_data : ExoData + :class:`ExoData` object composed of multiple + :class:`SingleExoDataStep` objects. This is the sliced exo data for + the extent specified by `lr_slices`. + """ + logger.debug(f'Getting exo data chunk for lr_slices={lr_slices}.') + exo_chunk = {f: {'steps': []} for f in self} + for feature in self: + for step in self[feature]['steps']: + exo_slices = self._get_enhanced_slices( + lr_slices=lr_slices, step=step + ) + chunk_step = {} + for k, v in step.items(): + if k == 'data': + # last dimension is feature channel, so we use only the + # spatial slices if data is 2d and all slices otherwise + chunk_step[k] = v[ + tuple(exo_slices)[: len(v.shape) - 1] + ] + else: + chunk_step[k] = v + exo_chunk[feature]['steps'].append(chunk_step) + return exo_chunk
+
+ + + +
+[docs] +@dataclass +class ExoDataHandler: + """Class to rasterize exogenous features for multistep forward passes. e.g. + Multiple topography arrays at different resolutions for multiple spatial + enhancement steps. + + This takes a list of models and uses the different sets of models features + to retrieve and rasterize exogenous data according to the requested target + coordinate and grid shape, for each model step. + + Parameters + ---------- + file_paths : str | list + A single source h5 file or netcdf file to extract raster data from. The + string can be a unix-style file path which will be passed through + glob.glob. This is typically low-res WRF output or GCM netcdf data that + is source low-resolution data intended to be sup3r resolved. + feature : str + Exogenous feature to extract from file_paths + model : Sup3rGan | MultiStepGan + Model used to get exogenous data. If a ``MultiStepGan`` + ``lr_features``, ``hr_exo_features``, and ``hr_out_features`` will be + checked for each model in ``model.models`` and exogenous data will be + retrieved based on the resolution required for that type of feature. + e.g. If a model has topography as a lr and hr_exo feature, and the + model performs 5x spatial enhancement with an input resolution of 30km + then topography at 30km and at 6km will be retrieved. Either this or + list of steps needs to be provided. + steps : list + List of dictionaries containing info on which models to use for a given + step index and what type of exo data the step requires. e.g.:: + [{'model': 0, 'combine_type': 'input'}, + {'model': 0, 'combine_type': 'layer'}] + Each step entry can also contain enhancement factors. e.g.:: + [{'model': 0, 'combine_type': 'input', 's_enhance': 1, 't_enhance': 1}, + {'model': 0, 'combine_type': 'layer', 's_enhance': 3, 't_enhance': 1}] + source_file : str + Filepath to source wtk, nsrdb, or netcdf file to get hi-res data from + which will be mapped to the enhanced grid of the file_paths input. + Pixels from this file will be mapped to their nearest low-res pixel in + the file_paths input. Accordingly, the input should be a significantly + higher resolution than file_paths. Warnings will be raised if the + low-resolution pixels in file_paths do not have unique nearest pixels + from this exo source data. + input_handler_name : str + data handler class used by the exo handler. Provide a string name to + match a :class:`~sup3r.preprocessing.rasterizers.Rasterizer`. If None + the correct handler will be guessed based on file type and time series + properties. This is passed directly to the exo handler, along with + input_handler_kwargs + input_handler_kwargs : dict | None + Any kwargs for initializing the ``input_handler_name`` class used by + the exo handler. + cache_dir : str | None + Directory for storing cache data. Default is './exo_cache'. If None + then no data will be cached. + chunks : str | dict + Dictionary of dimension chunk sizes for returned exo data. e.g. + {'time': 100, 'south_north': 100, 'west_east': 100}. This can also just + be "auto". This is passed to ``.chunk()`` before returning exo data + through ``.data`` attribute + distance_upper_bound : float | None + Maximum distance to map high-resolution data from source_file to the + low-resolution file_paths input. None (default) will calculate this + based on the median distance between points in source_file + """ + + file_paths: Union[str, list, pathlib.Path] + feature: str + model: Optional[Union['Sup3rGan', 'MultiStepGan']] = None + steps: Optional[list] = None + source_file: Optional[str] = None + input_handler_name: Optional[str] = None + input_handler_kwargs: Optional[dict] = None + cache_dir: str = './exo_cache' + chunks: Optional[Union[str, dict]] = 'auto' + distance_upper_bound: Optional[int] = None + + @log_args + def __post_init__(self): + """Get list of steps with types of exogenous data needed for retrieval, + initialize `self.data`, and update `self.data` for each model step with + rasterized exo data.""" + self.models = getattr(self.model, 'models', [self.model]) + if self.steps is None: + self.steps = self.get_exo_steps(self.feature, self.models) + else: + en_check = all('s_enhance' in v for v in self.steps) + en_check = en_check and all('t_enhance' in v for v in self.steps) + en_check = en_check or self.models is not None + msg = ( + f'{self.__class__.__name__} needs s_enhance and t_enhance ' + 'provided in each step in steps list or models' + ) + assert en_check, msg + self.s_enhancements, self.t_enhancements = self._get_all_enhancement() + msg = ( + 'Need to provide s_enhance and t_enhance for each model' + 'step. If the step is temporal only (spatial only) then ' + 's_enhance = 1 (t_enhance = 1).' + ) + self.data = self.get_all_step_data() + +
+[docs] + @classmethod + def get_exo_steps(cls, feature, models): + """Get list of steps describing how to use exogenous data for the given + feature in the list of given models. This checks the input and + exo feature lists for each model step and adds that step if the + given feature is found in the list.""" + steps = [] + for i, model in enumerate(models): + is_sfc_model = model.__class__.__name__ == 'SurfaceSpatialMetModel' + if feature.lower() in _lowered(model.lr_features) or is_sfc_model: + steps.append({'model': i, 'combine_type': 'input'}) + if feature.lower() in _lowered(model.hr_exo_features): + steps.append({'model': i, 'combine_type': 'layer'}) + if ( + feature.lower() in _lowered(model.hr_out_features) + or is_sfc_model + ): + steps.append({'model': i, 'combine_type': 'output'}) + return steps
+ + +
+[docs] + def get_exo_rasterizer(self, s_enhance, t_enhance): + """Get exo rasterizer instance for given enhancement factors""" + return ExoRasterizer( + file_paths=self.file_paths, + source_file=self.source_file, + feature=self.feature, + s_enhance=s_enhance, + t_enhance=t_enhance, + input_handler_name=self.input_handler_name, + input_handler_kwargs=self.input_handler_kwargs, + cache_dir=self.cache_dir, + chunks=self.chunks, + distance_upper_bound=self.distance_upper_bound, + )
+ + +
+[docs] + def get_single_step_data(self, s_enhance, t_enhance): + """Get exo data for a single model step, with specific enhancement + factors.""" + return self.get_exo_rasterizer(s_enhance, t_enhance).data
+ + + @property + def cache_files(self): + """Get exo data cache file for all enhancement factors""" + return [ + self.get_exo_rasterizer(s_en, t_en).cache_file + for s_en, t_en in zip(self.s_enhancements, self.t_enhancements) + ] + +
+[docs] + def get_all_step_data(self): + """Get exo data for each model step.""" + data = {self.feature: {'steps': []}} + for i, (s_enhance, t_enhance) in enumerate( + zip(self.s_enhancements, self.t_enhancements) + ): + step_data = self.get_single_step_data( + s_enhance=s_enhance, t_enhance=t_enhance + ) + step = SingleExoDataStep( + self.feature, + self.steps[i]['combine_type'], + self.steps[i]['model'], + data=step_data.as_array(), + ) + step['s_enhance'] = s_enhance + step['t_enhance'] = t_enhance + data[self.feature]['steps'].append(step) + shapes = [ + None if step is None else step.shape + for step in data[self.feature]['steps'] + ] + logger.info( + 'Got exogenous_data of length {} with shapes: {}'.format( + len(data[self.feature]['steps']), shapes + ) + ) + return data
+ + + def _get_single_step_enhance(self, step): + """Get enhancement factors for exogenous data extraction + using exo_kwargs single model step. These factors are computed using + stored enhance attributes of each model and the model step provided. + If enhancement factors are already provided in step they are not + overwritten. + + Parameters + ---------- + step : dict + Model step dictionary. e.g. {'model': 0, 'combine_type': 'input'} + + Returns + ------- + updated_step : dict + Same as input dictionary with s_enhance, t_enhance added + """ + if all(key in step for key in ['s_enhance', 't_enhance']): + return step + + mstep = step['model'] + combine_type = step.get('combine_type', None) + msg = ( + f'Model index from exo_kwargs ({mstep} exceeds number ' + f'of model steps ({len(self.models)})' + ) + assert len(self.models) > mstep, msg + msg = ( + 'Received exo_kwargs entry without valid combine_type ' + '(input/layer/output)' + ) + assert combine_type.lower() in ('input', 'output', 'layer'), msg + if combine_type.lower() == 'input': + if mstep == 0: + s_enhance = 1 + t_enhance = 1 + else: + s_enhance = int(np.prod(self.model.s_enhancements[:mstep])) + t_enhance = int(np.prod(self.model.t_enhancements[:mstep])) + + else: + s_enhance = int(np.prod(self.model.s_enhancements[: mstep + 1])) + t_enhance = int(np.prod(self.model.t_enhancements[: mstep + 1])) + step.update({'s_enhance': s_enhance, 't_enhance': t_enhance}) + return step + + def _get_all_enhancement(self): + """Compute enhancement factors for all model steps. + + Returns + ------- + s_enhancements: list + List of s_enhance factors for all model steps + t_enhancements: list + List of t_enhance factors for all model steps + """ + for i, step in enumerate(self.steps): + out = self._get_single_step_enhance(step) + self.steps[i] = out + s_enhancements = [step['s_enhance'] for step in self.steps] + t_enhancements = [step['t_enhance'] for step in self.steps] + return s_enhancements, t_enhancements
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/data_handlers/factory.html b/_modules/sup3r/preprocessing/data_handlers/factory.html new file mode 100644 index 000000000..ad17ad577 --- /dev/null +++ b/_modules/sup3r/preprocessing/data_handlers/factory.html @@ -0,0 +1,1050 @@ + + + + + + + + + + sup3r.preprocessing.data_handlers.factory — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.data_handlers.factory

+"""DataHandler objects, which are built through composition of
+:class:`~sup3r.preprocessing.rasterizers.Rasterizer`,
+:class:`~sup3r.preprocessing.loaders.Loader`,
+:class:`~sup3r.preprocessing.derivers.Deriver`, and
+:class:`~sup3r.preprocessing.cachers.Cacher` classes.
+
+TODO: If ``.data`` is a ``Sup3rDataset`` with more than one member only the
+high resolution data is cached. We could extend caching and loading to write /
+load both with groups
+"""
+
+import logging
+from typing import Callable, Dict, Optional, Union
+
+from rex import MultiFileNSRDBX
+
+from sup3r.preprocessing.base import (
+    Sup3rDataset,
+)
+from sup3r.preprocessing.cachers import Cacher
+from sup3r.preprocessing.cachers.utilities import _check_for_cache
+from sup3r.preprocessing.derivers import Deriver
+from sup3r.preprocessing.derivers.methods import (
+    RegistryH5SolarCC,
+    RegistryH5WindCC,
+)
+from sup3r.preprocessing.loaders import Loader
+from sup3r.preprocessing.rasterizers import Rasterizer
+from sup3r.preprocessing.utilities import (
+    expand_paths,
+    log_args,
+    parse_to_list,
+)
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class DataHandler(Deriver): + """Base DataHandler. Composes + :class:`~sup3r.preprocessing.rasterizers.Rasterizer`, + :class:`~sup3r.preprocessing.loaders.Loader`, + :class:`~sup3r.preprocessing.derivers.Deriver`, and + :class:`~sup3r.preprocessing.cachers.Cacher` classes.""" + + @log_args + def __init__( + self, + file_paths, + features='all', + res_kwargs: Optional[dict] = None, + chunks: Union[str, Dict[str, int]] = 'auto', + target: Optional[tuple] = None, + shape: Optional[tuple] = None, + time_slice: Union[slice, tuple, list, None] = slice(None), + threshold: Optional[float] = None, + time_roll: int = 0, + time_shift: Optional[int] = None, + hr_spatial_coarsen: int = 1, + nan_method_kwargs: Optional[dict] = None, + BaseLoader: Optional[Callable] = None, + FeatureRegistry: Optional[dict] = None, + interp_kwargs: Optional[dict] = None, + cache_kwargs: Optional[dict] = None, + **kwargs, + ): + """ + Parameters + ---------- + file_paths : str | list | pathlib.Path + file_paths input to LoaderClass + features : list | str + Features to load and / or derive. If 'all' then all available raw + features will be loaded. Specify explicit feature names for + derivations. + res_kwargs : dict + Additional keyword arguments passed through to the ``BaseLoader``. + BaseLoader is usually xr.open_mfdataset for NETCDF files and + MultiFileResourceX for H5 files. + chunks : dict | str + Dictionary of chunk sizes to pass through to + ``dask.array.from_array()`` or ``xr.Dataset().chunk()``. Will be + converted to a tuple when used in ``from_array()``. These are the + methods for H5 and NETCDF data, respectively. This argument can + be "auto" or None in addition to a dictionary. None will not do + any chunking and load data into memory as ``np.array`` + target : tuple + (lat, lon) lower left corner of raster. Either need target+shape + or raster_file. + shape : tuple + (rows, cols) grid size. Either need target+shape or raster_file. + time_slice : slice | list + Slice specifying extent and step of temporal extraction. e.g. + slice(start, stop, step). If equal to slice(None, None, 1) the full + time dimension is selected. Can be also be a list ``[start, stop, + step]`` + threshold : float + Nearest neighbor euclidean distance threshold. If the coordinates + are more than this value away from the target lat/lon, an error is + raised. + time_roll : int + Number of steps to roll along the time axis. `Passed to + xr.Dataset.roll()` + time_shift : int | None + Number of minutes to shift time axis. This can be used, for + example, to shift the time index for daily data so that the time + stamp for a given day starts at the zeroth minute instead of at + noon, as is the case for most GCM data. + hr_spatial_coarsen : int + Spatial coarsening factor. Passed to `xr.Dataset.coarsen()` + nan_method_kwargs : str | dict | None + Keyword arguments for nan handling. If 'mask', time steps with nans + will be dropped. Otherwise this should be a dict of kwargs which + will be passed to + :py:meth:`sup3r.preprocessing.accessor.Sup3rX.interpolate_na`. + BaseLoader : Callable + Base level file loader wrapped by + :class:`~sup3r.preprocessing.loaders.Loader`. This is usually + xr.open_mfdataset for NETCDF files and MultiFileResourceX for H5 + files. + FeatureRegistry : dict + Dictionary of + :class:`~sup3r.preprocessing.derivers.methods.DerivedFeature` + objects used for derivations + interp_kwargs : dict | None + Dictionary of kwargs for level interpolation. Can include "method" + and "run_level_check" keys. Method specifies how to perform height + interpolation. e.g. Deriving u_20m from u_10m and u_100m. Options + are "linear" and "log". See + :py:meth:`sup3r.preprocessing.derivers.Deriver.do_level_interpolation` + cache_kwargs : dict | None + Dictionary with kwargs for caching wrangled data. This should at + minimum include a `cache_pattern` key, value. This pattern must + have a {feature} format key and either a h5 or nc file extension, + based on desired output type. See class:`Cacher` for description + of more arguments. + kwargs : dict + Dictionary of additional keyword args for + :class:`~sup3r.preprocessing.rasterizers.Rasterizer`, used + specifically for rasterizing flattened data + """ # pylint: disable=line-too-long + + features = parse_to_list(features=features) + cached_files, cached_features, _, missing_features = _check_for_cache( + features=features, cache_kwargs=cache_kwargs + ) + + just_coords = not features + raster_feats = 'all' if any(missing_features) else [] + self.rasterizer = self.loader = self.cache = None + + if any(cached_features): + self.cache = Loader( + file_paths=cached_files, + res_kwargs=res_kwargs, + chunks=chunks, + BaseLoader=BaseLoader, + ) + self.rasterizer = self.loader = self.cache + + if any(missing_features) or just_coords: + self.rasterizer = Rasterizer( + file_paths=file_paths, + res_kwargs=res_kwargs, + features=raster_feats, + chunks=chunks, + target=target, + shape=shape, + time_slice=time_slice, + threshold=threshold, + BaseLoader=BaseLoader, + **kwargs, + ) + self.loader = self.rasterizer.loader + + self.time_slice = self.rasterizer.time_slice + self.lat_lon = self.rasterizer.lat_lon + self._rasterizer_hook() + self.data = self.rasterizer + + if missing_features: + super().__init__( + data=self.data, + features=features, + time_roll=time_roll, + time_shift=time_shift, + hr_spatial_coarsen=hr_spatial_coarsen, + nan_method_kwargs=nan_method_kwargs, + FeatureRegistry=FeatureRegistry, + interp_kwargs=interp_kwargs, + ) + + if self.cache is not None: + self.data[cached_features] = self.cache.data[cached_features] + self.rasterizer.file_paths = ( + expand_paths(file_paths) + cached_files + ) + + if cache_kwargs is not None and 'cache_pattern' in cache_kwargs: + _ = Cacher(data=self.data, cache_kwargs=cache_kwargs) + self._deriver_hook() + + def _rasterizer_hook(self): + """Hook in after rasterizer initialization. Implement this to + extend class functionality with operations after default rasterizer + initialization. e.g. If special methods are required to add more + data to the rasterized data or to perform some pre-processing + before derivations. + + Examples + -------- + - adding a special method to extract / regrid clearsky_ghi from an + nsrdb source file prior to derivation of clearsky_ratio. + - apply bias correction to rasterized data before deriving new + features + """ + + def _deriver_hook(self): + """Hook in after deriver initialization. Implement this to extend + class functionality with operations after default deriver + initialization. e.g. If special methods are required to derive + additional features which might depend on non-standard inputs (e.g. + other source files than those used by the loader)."""
+ + + +
+[docs] +class DailyDataHandler(DataHandler): + """General data handler class with daily data as an additional attribute. + xr.Dataset coarsen method employed to compute averages / mins / maxes over + daily windows. Special treatment of clearsky_ratio, which requires + derivation from total clearsky_ghi and total ghi. + + TODO: + (1) Not a fan of manually adding cs_ghi / ghi and then removing. Maybe + this could be handled through a derivation instead + + (2) We assume daily and hourly data here but we could generalize this to + go from daily -> any time step. This would then enable the CC models to do + arbitrary temporal enhancement. + """ + + def __init__(self, file_paths, features, **kwargs): + """Add features required for daily cs ratio derivation if not + requested.""" + + features = parse_to_list(features=features) + self.requested_features = features.copy() + if 'clearsky_ratio' in features: + needed = [ + f + for f in self.FEATURE_REGISTRY['clearsky_ratio'].inputs + if f not in features + ] + features.extend(needed) + super().__init__(file_paths=file_paths, features=features, **kwargs) + + _signature_objs = (__init__, DataHandler) + + def _deriver_hook(self): + """Hook to run daily coarsening calculations after derivations of + hourly variables. Replaces data with daily averages / maxes / mins + / sums""" + msg = ( + 'Data needs to be hourly with at least 24 hours, but data ' + 'shape is {}.'.format(self.data.shape) + ) + + day_steps = int(24 * 3600 / self.time_step) + assert len(self.time_index) % day_steps == 0, msg + assert len(self.time_index) > day_steps, msg + + n_data_days = int(len(self.time_index) / day_steps) + + logger.info( + 'Calculating daily average datasets for {} training ' + 'data days.'.format(n_data_days) + ) + daily_data = self.data.coarsen(time=day_steps).mean() + feats = [f for f in self.features if 'clearsky_ratio' not in f] + feats = ( + feats + if 'clearsky_ratio' not in self.features + else [*feats, 'total_clearsky_ghi', 'total_ghi'] + ) + for fname in feats: + if '_max_' in fname: + daily_data[fname] = ( + self.data[fname].coarsen(time=day_steps).max() + ) + if '_min_' in fname: + daily_data[fname] = ( + self.data[fname].coarsen(time=day_steps).min() + ) + if 'total_' in fname: + daily_data[fname] = ( + self.data[fname.split('total_')[-1]] + .coarsen(time=day_steps) + .sum() + ) + + if 'clearsky_ratio' in self.features: + daily_data['clearsky_ratio'] = ( + daily_data['total_ghi'] / daily_data['total_clearsky_ghi'] + ) + + logger.info( + 'Finished calculating daily average datasets for {} ' + 'training data days.'.format(n_data_days) + ) + hourly_data = self.data[self.requested_features] + daily_data = daily_data[self.requested_features] + hourly_data.attrs.update({'name': 'hourly'}) + daily_data.attrs.update({'name': 'daily'}) + self.data = Sup3rDataset(daily=daily_data, hourly=hourly_data)
+ + + +
+[docs] +def DataHandlerFactory(cls, BaseLoader=None, FeatureRegistry=None, name=None): + """Build composite objects that load from file_paths, rasterize a specified + region, derive new features, and cache derived data. + + Parameters + ---------- + BaseLoader : Callable + Optional base loader update. The default for H5 is MultiFileWindX and + for NETCDF the default is xarray + FeatureRegistry : Dict[str, DerivedFeature] + Dictionary of compute methods for features. This is used to look up how + to derive features that are not contained in the raw loaded data. + name : str + Optional class name, used to resolve `repr(Class)` and distinguish + partially initialized DataHandlers with different FeatureRegistrys + """ + + class FactoryDataHandler(cls): + """FactoryDataHandler object. Is a partially initialized instance with + `BaseLoader`, `FeatureRegistry`, and `name` set.""" + + FEATURE_REGISTRY = FeatureRegistry or None + BASE_LOADER = BaseLoader or None + __name__ = name or 'FactoryDataHandler' + + def __init__(self, file_paths, features='all', **kwargs): + """ + Parameters + ---------- + file_paths : str | list | pathlib.Path + file_paths input to LoaderClass + features : list | str + Features to load and / or derive. If 'all' then all available + raw features will be loaded. Specify explicit feature names for + derivations. + kwargs : dict + kwargs for parent class, except for FeatureRegistry and + BaseLoader + """ + super().__init__( + file_paths, + features=features, + BaseLoader=self.BASE_LOADER, + FeatureRegistry=self.FEATURE_REGISTRY, + **kwargs, + ) + + _signature_objs = (cls,) + _skip_params = ('FeatureRegistry', 'BaseLoader') + + return FactoryDataHandler
+ + + +DataHandlerH5SolarCC = DataHandlerFactory( + DailyDataHandler, + BaseLoader=MultiFileNSRDBX, + FeatureRegistry=RegistryH5SolarCC, + name='DataHandlerH5SolarCC', +) + + +DataHandlerH5WindCC = DataHandlerFactory( + DailyDataHandler, + BaseLoader=MultiFileNSRDBX, + FeatureRegistry=RegistryH5WindCC, + name='DataHandlerH5WindCC', +) +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/data_handlers/nc_cc.html b/_modules/sup3r/preprocessing/data_handlers/nc_cc.html new file mode 100644 index 000000000..c40d58d55 --- /dev/null +++ b/_modules/sup3r/preprocessing/data_handlers/nc_cc.html @@ -0,0 +1,912 @@ + + + + + + + + + + sup3r.preprocessing.data_handlers.nc_cc — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.data_handlers.nc_cc

+"""NETCDF DataHandler for climate change applications."""
+
+import logging
+import os
+
+import dask.array as da
+import numpy as np
+import pandas as pd
+from scipy.spatial import KDTree
+from scipy.stats import mode
+
+from sup3r.preprocessing.derivers.methods import (
+    RegistryNCforCC,
+    RegistryNCforCCwithPowerLaw,
+)
+from sup3r.preprocessing.loaders import Loader
+from sup3r.preprocessing.names import Dimension
+from sup3r.preprocessing.utilities import log_args
+
+from .factory import DataHandler, DataHandlerFactory
+
+logger = logging.getLogger(__name__)
+
+
+BaseNCforCC = DataHandlerFactory(DataHandler, FeatureRegistry=RegistryNCforCC)
+
+
+
+[docs] +class DataHandlerNCforCC(BaseNCforCC): + """Extended NETCDF data handler. This implements a rasterizer hook to add + "clearsky_ghi" to the rasterized data if "clearsky_ghi" is requested.""" + + @log_args + def __init__( + self, + file_paths, + features='all', + nsrdb_source_fp=None, + nsrdb_agg=1, + nsrdb_smoothing=0, + **kwargs, + ): + """ + Parameters + ---------- + file_paths : str | list | pathlib.Path + file_paths input to + :class:`~sup3r.preprocessing.rasterizers.Rasterizer` + features : list + Features to derive from loaded data. + nsrdb_source_fp : str | None + Optional NSRDB source h5 file to retrieve clearsky_ghi from to + calculate CC clearsky_ratio along with rsds (ghi) from the CC + netcdf file. + nsrdb_agg : int + Optional number of NSRDB source pixels to aggregate clearsky_ghi + from to a single climate change netcdf pixel. This can be used if + the CC.nc data is at a much coarser resolution than the source + nsrdb data. + nsrdb_smoothing : float + Optional gaussian filter smoothing factor to smooth out + clearsky_ghi from high-resolution nsrdb source data. This is + typically done because spatially aggregated nsrdb data is still + usually rougher than CC irradiance data. + kwargs : list + Same optional keyword arguments as parent class. + """ + self._nsrdb_source_fp = nsrdb_source_fp + self._nsrdb_agg = nsrdb_agg + self._nsrdb_smoothing = nsrdb_smoothing + self._features = features + super().__init__(file_paths=file_paths, features=features, **kwargs) + + _signature_objs = (__init__, BaseNCforCC) + _skip_params = ('name', 'FeatureRegistry') + + def _rasterizer_hook(self): + """Rasterizer hook implementation to add 'clearsky_ghi' data to + rasterized data, which will then be used when the :class:`Deriver` is + called.""" + cs_feats = ['clearsky_ratio', 'clearsky_ghi'] + need_ghi = any( + f in self._features and f not in self.rasterizer for f in cs_feats + ) + if need_ghi: + self.rasterizer.data['clearsky_ghi'] = self.get_clearsky_ghi() + +
+[docs] + def run_input_checks(self): + """Run checks on the files provided for extracting clearsky_ghi. Make + sure the loaded data is daily data and the step size is one day.""" + + msg = ( + 'Need nsrdb_source_fp input arg as a valid filepath to ' + 'retrieve clearsky_ghi (maybe for clearsky_ratio) but ' + 'received: {}'.format(self._nsrdb_source_fp) + ) + assert self._nsrdb_source_fp is not None and os.path.exists( + self._nsrdb_source_fp + ), msg + + msg = ( + 'Can only handle source CC data in hourly frequency but ' + 'received daily frequency of {}hrs (should be 24) ' + 'with raw time index: {}'.format( + self.loader.time_step / 3600, self.rasterizer.time_index + ) + ) + assert self.loader.time_step / 3600 == 24.0, msg + + msg = ( + 'Can only handle source CC data with time_slice.step == 1 ' + 'but received: {}'.format(self.rasterizer.time_slice.step) + ) + assert (self.rasterizer.time_slice.step is None) | ( + self.rasterizer.time_slice.step == 1 + ), msg
+ + +
+[docs] + def run_wrap_checks(self, cs_ghi): + """Run check on rasterized data from clearsky_ghi source.""" + logger.info( + 'Reshaped clearsky_ghi data to final shape {} to ' + 'correspond with CC daily average data over source ' + 'time_slice {} with (lat, lon) grid shape of {}'.format( + cs_ghi.shape, + self.rasterizer.time_slice, + self.rasterizer.grid_shape, + ) + ) + msg = ( + 'nsrdb clearsky GHI time dimension {} ' + 'does not match the GCM time dimension {}'.format( + cs_ghi.shape[2], len(self.rasterizer.time_index) + ) + ) + assert cs_ghi.shape[2] == len(self.rasterizer.time_index), msg
+ + +
+[docs] + def get_time_slice(self, ti_nsrdb): + """Get nsrdb data time slice consistent with self.time_index.""" + t_start = np.where( + (self.rasterizer.time_index[0].month == ti_nsrdb.month) + & (self.rasterizer.time_index[0].day == ti_nsrdb.day) + )[0][0] + t_end = ( + 1 + + np.where( + (self.rasterizer.time_index[-1].month == ti_nsrdb.month) + & (self.rasterizer.time_index[-1].day == ti_nsrdb.day) + )[0][-1] + ) + t_slice = slice(t_start, t_end) + return t_slice
+ + +
+[docs] + def get_clearsky_ghi(self): + """Get clearsky ghi from an exogenous NSRDB source h5 file at the + target CC meta data and time index. + + TODO: Replace some of this with call to Regridder? Perform daily + means with self.loader.coarsen? + + Returns + ------- + cs_ghi : Union[np.ndarray, da.core.Array] + Clearsky ghi (W/m2) from the nsrdb_source_fp h5 source file. Data + shape is (lat, lon, time) where time is daily average values. + """ + self.run_input_checks() + + res = Loader(self._nsrdb_source_fp) + ti_nsrdb = res.time_index + t_slice = self.get_time_slice(ti_nsrdb) + cc_meta = self.lat_lon.reshape((-1, 2)) + + tree = KDTree(res.lat_lon) + _, i = tree.query(cc_meta, k=self._nsrdb_agg) + i = np.expand_dims(i, axis=1) if len(i.shape) == 1 else i + + logger.info( + 'Extracting clearsky_ghi data from "{}" with time slice ' + '{} and {} locations with agg factor {}.'.format( + os.path.basename(self._nsrdb_source_fp), + t_slice, + i.shape[0], + i.shape[1], + ) + ) + + cs_ghi = ( + res.data[['clearsky_ghi']] + .isel( + { + Dimension.FLATTENED_SPATIAL: i.flatten(), + Dimension.TIME: t_slice, + } + ) + .coarsen({Dimension.FLATTENED_SPATIAL: self._nsrdb_agg}) + .mean() + ) + time_freq = float( + mode( + (ti_nsrdb[1:] - ti_nsrdb[:-1]).seconds / 3600, keepdims=False + ).mode + ) + + cs_ghi = cs_ghi.coarsen({Dimension.TIME: int(24 // time_freq)}).mean() + lat_idx, lon_idx = ( + np.arange(self.rasterizer.grid_shape[0]), + np.arange(self.rasterizer.grid_shape[1]), + ) + ind = pd.MultiIndex.from_product( + (lat_idx, lon_idx), names=Dimension.dims_2d() + ) + cs_ghi = cs_ghi.assign({Dimension.FLATTENED_SPATIAL: ind}).unstack( + Dimension.FLATTENED_SPATIAL + ) + + cs_ghi = cs_ghi.transpose(*Dimension.dims_3d()) + + cs_ghi = cs_ghi['clearsky_ghi'].data + if cs_ghi.shape[-1] < len(self.rasterizer.time_index): + n = int( + da.ceil(len(self.rasterizer.time_index) / cs_ghi.shape[-1]) + ) + cs_ghi = da.repeat(cs_ghi, n, axis=2) + + cs_ghi = cs_ghi[..., : len(self.rasterizer.time_index)] + + self.run_wrap_checks(cs_ghi) + + return cs_ghi
+
+ + + +
+[docs] +class DataHandlerNCforCCwithPowerLaw(DataHandlerNCforCC): + """Add power law wind methods to feature registry.""" + + FEATURE_REGISTRY = RegistryNCforCCwithPowerLaw
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/derivers/base.html b/_modules/sup3r/preprocessing/derivers/base.html new file mode 100644 index 000000000..e510969c7 --- /dev/null +++ b/_modules/sup3r/preprocessing/derivers/base.html @@ -0,0 +1,1144 @@ + + + + + + + + + + sup3r.preprocessing.derivers.base — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.derivers.base

+"""Basic objects that can perform derivations of new features from loaded /
+rasterized features."""
+
+import logging
+import re
+from inspect import signature
+from typing import Type, Union
+
+import dask.array as da
+import numpy as np
+import xarray as xr
+
+from sup3r.preprocessing.accessor import Sup3rX
+from sup3r.preprocessing.base import Container, Sup3rDataset
+from sup3r.preprocessing.names import Dimension
+from sup3r.preprocessing.utilities import (
+    _rechunk_if_dask,
+    parse_to_list,
+)
+from sup3r.utilities.interpolation import Interpolator
+
+from .methods import DerivedFeature, RegistryBase
+from .utilities import parse_feature
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class BaseDeriver(Container): + """Container subclass with additional methods for transforming / deriving + data exposed through an :class:`Rasterizer` object.""" + + FEATURE_REGISTRY = RegistryBase + + def __init__( + self, + data: Union[Sup3rX, Sup3rDataset], + features, + FeatureRegistry=None, + interp_kwargs=None, + ): + """ + Parameters + ---------- + data : Union[Sup3rX, Sup3rDataset] + Data to use for derivations. Usually comes from the `.data` + attribute of a :class:`Rasterizer` object. + features : list + List of feature names to derive from the :class:`Rasterizer` data. + The :class:`Rasterizer` object contains the features available to + use in the derivation. e.g. rasterizer.features = ['windspeed', + 'winddirection'] with self.features = ['U', 'V'] + FeatureRegistry : Dict + Optional FeatureRegistry dictionary to use for derivation method + lookups. When the :class:`Deriver` is asked to derive a feature + that is not found in the :class:`Rasterizer` data it will look for + a method to derive the feature in the registry. + interp_kwargs : dict | None + Dictionary of kwargs for level interpolation. Can include "method" + and "run_level_check" keys. Method specifies how to perform height + interpolation. e.g. Deriving u_20m from u_10m and u_100m. Options + are "linear" and "log". See + :py:meth:`sup3r.preprocessing.derivers.Deriver.do_level_interpolation` + """ # pylint: disable=line-too-long + if FeatureRegistry is not None: + self.FEATURE_REGISTRY = FeatureRegistry + + super().__init__(data=data) + self.interp_kwargs = interp_kwargs + features = parse_to_list(data=data, features=features) + new_features = [f for f in features if f not in self.data] + for f in new_features: + self.data[f] = self.derive(f) + logger.info('Finished deriving %s.', f) + self.data = ( + self.data[list(self.data.coords)] + if not features + else self.data + if features == 'all' + else self.data[features] + ) + + def _check_registry(self, feature) -> Union[Type[DerivedFeature], None]: + """Check if feature or matching pattern is in the feature registry + keys. Return the corresponding value if found.""" + if feature.lower() in self.FEATURE_REGISTRY: + return self.FEATURE_REGISTRY[feature.lower()] + for pattern in self.FEATURE_REGISTRY: + if re.match(pattern.lower(), feature.lower()): + return self.FEATURE_REGISTRY[pattern] + return None + + def _get_inputs(self, feature): + """Get method inputs and map any wildcards to height or pressure + (depending on the name of "feature")""" + method = self._check_registry(feature) + fstruct = parse_feature(feature) + return [fstruct.map_wildcard(i) for i in getattr(method, 'inputs', [])] + +
+[docs] + def get_inputs(self, feature): + """Get inputs for the given feature and inputs for those inputs.""" + inputs = self._get_inputs(feature) + more_inputs = [] + for inp in inputs: + more_inputs.extend(self._get_inputs(inp)) + return inputs + more_inputs
+ + +
+[docs] + def no_overlap(self, feature): + """Check if any of the nested inputs for 'feature' contain 'feature'""" + return feature not in self.get_inputs(feature)
+ + +
+[docs] + def check_registry( + self, feature + ) -> Union[np.ndarray, da.core.Array, str, None]: + """Get compute method from the registry if available. Will check for + pattern feature match in feature registry. e.g. if u_100m matches a + feature registry entry of u_(.*)m + """ + method = self._check_registry(feature) + if isinstance(method, str): + return method + if hasattr(method, 'inputs'): + fstruct = parse_feature(feature) + inputs = [fstruct.map_wildcard(i) for i in method.inputs] + missing = [f for f in inputs if f not in self.data] + can_derive = all( + (self.no_overlap(m) or self.has_interp_variables(m)) + for m in missing + ) + logger.debug('Found compute method (%s) for %s.', method, feature) + msg = 'Missing required features %s. Trying to derive these first.' + if any(missing) and can_derive: + logger.debug(msg, missing) + for f in missing: + self.data[f] = self.derive(f) + return self._run_compute(feature, method) + msg = 'All required features %s found. Proceeding.' + if not missing: + logger.debug(msg, inputs) + return self._run_compute(feature, method) + msg = ('Some of the method inputs reference %s itself. We will ' + 'try height interpolation instead.') + if not can_derive: + logger.debug(msg, feature) + return None
+ + + def _run_compute(self, feature, method): + """If we have all the inputs we can run the compute method.""" + compute = method.compute + params = signature(compute).parameters + fstruct = parse_feature(feature) + kwargs = { + k: getattr(fstruct, k) for k in params if hasattr(fstruct, k) + } + return compute(self.data, **kwargs) + +
+[docs] + def map_new_name(self, feature, pattern): + """If the search for a derivation method first finds an alternative + name for the feature we want to derive, by matching a wildcard pattern, + we need to replace the wildcard with the specific height or pressure we + want and continue the search for a derivation method with this new + name.""" + fstruct = parse_feature(feature) + pstruct = parse_feature(pattern) + if '*' not in pattern: + new_feature = pattern + elif fstruct.height is not None: + new_feature = pstruct.basename + f'_{fstruct.height}m' + elif fstruct.pressure is not None: + new_feature = pstruct.basename + f'_{fstruct.pressure}pa' + else: + msg = ( + 'Found matching pattern "%s" for feature "%s" but could not ' + 'construct a valid new feature name' + ) + logger.error(msg, pattern, feature) + raise RuntimeError(msg) + logger.debug( + 'Found alternative name "%s" for "%s". Continuing derivation ' + 'for %s.', + feature, + new_feature, + new_feature, + ) + return new_feature
+ + +
+[docs] + def has_interp_variables(self, feature): + """Check if the given feature can be interpolated from values at nearby + heights or from pressure level data. e.g. If ``u_10m`` and ``u_50m`` + exist then ``u_30m`` can be interpolated from these. If a pressure + level array ``u`` is available this can also be used, in conjunction + with height data.""" + fstruct = parse_feature(feature) + count = 0 + for feat in self.data.features: + fstruct_check = parse_feature(feat) + height = fstruct_check.height + + if ( + fstruct_check.basename == fstruct.basename + and height is not None + ): + count += 1 + return count > 1 or fstruct.basename in self.data
+ + +
+[docs] + def derive(self, feature) -> Union[np.ndarray, da.core.Array]: + """Routine to derive requested features. Employs a little recursion to + locate differently named features with a name map in the feature + registry. i.e. if `FEATURE_REGISTRY` contains a key, value pair like + "windspeed": "wind_speed" then requesting "windspeed" will ultimately + return a compute method (or fetch from raw data) for "wind_speed + + Note + ---- + Features are all saved as lower case names and __contains__ checks will + use feature.lower() + """ + if feature not in self.data: + compute_check = self.check_registry(feature) + if compute_check is not None and isinstance(compute_check, str): + new_feature = self.map_new_name(feature, compute_check) + return self.derive(new_feature) + + if compute_check is not None: + return compute_check + + if self.has_interp_variables(feature): + logger.debug( + 'Attempting level interpolation for "%s"', feature + ) + return self.do_level_interpolation( + feature, interp_kwargs=self.interp_kwargs + ) + + msg = ( + 'Could not find "%s" in contained data or in the available ' + 'compute methods.' + ) + logger.error(msg, feature) + raise RuntimeError(msg % feature) + + return self.data[feature]
+ + +
+[docs] + def get_single_level_data(self, feature): + """When doing level interpolation we should include the single level + data available. e.g. If we have u_100m already and want to interpolate + u_40m from multi-level data U we should add u_100m at height 100m + before doing interpolation, since 100 could be a closer level to 40m + than those available in U.""" + fstruct = parse_feature(feature) + pattern = fstruct.basename + '_(.*)' + var_list = [] + lev_list = [] + lev_array = None + var_array = None + for f in list(self.data.data_vars): + if re.match(pattern.lower(), f): + var_list.append(self.data[f]) + pstruct = parse_feature(f) + lev = ( + pstruct.height + if pstruct.height is not None + else pstruct.pressure + ) + lev_list.append(np.float32(lev)) + + if len(var_list) > 0: + var_array = da.stack(var_list, axis=-1) + sl_shape = (*var_array.shape[:-1], len(lev_list)) + lev_array = da.broadcast_to(da.from_array(lev_list), sl_shape) + + return var_array, lev_array
+ + +
+[docs] + def get_multi_level_data(self, feature): + """Get data stored in multi-level arrays, like u stored on pressure + levels.""" + fstruct = parse_feature(feature) + var_array = None + lev_array = None + + if fstruct.basename in self.data: + var_array = self.data[fstruct.basename].data.astype(np.float32) + + if fstruct.height is not None and var_array is not None: + msg = ( + f'To interpolate {fstruct.basename} to {feature} the loaded ' + 'data needs to include "zg" and "topography" or have a ' + f'"{Dimension.HEIGHT}" dimension.' + ) + can_calc_height = ( + 'zg' in self.data.features + and 'topography' in self.data.features + ) + have_height = Dimension.HEIGHT in self.data.dims + assert can_calc_height or have_height, msg + + if can_calc_height: + lev_array = self.data[['zg', 'topography']].as_array() + lev_array = lev_array[..., 0] - lev_array[..., 1] + else: + lev_array = da.broadcast_to( + self.data[Dimension.HEIGHT].astype(np.float32), + var_array.shape, + ) + elif var_array is not None: + msg = ( + f'To interpolate {fstruct.basename} to {feature} the loaded ' + 'data needs to include "level" (a.k.a pressure at multiple ' + 'levels).' + ) + assert Dimension.PRESSURE_LEVEL in self.data, msg + lev_array = da.broadcast_to( + self.data[Dimension.PRESSURE_LEVEL].astype(np.float32), + var_array.shape, + ) + return var_array, lev_array
+ + +
+[docs] + def do_level_interpolation( + self, feature, interp_kwargs=None + ) -> xr.DataArray: + """Interpolate over height or pressure to derive the given feature.""" + ml_var, ml_levs = self.get_multi_level_data(feature) + sl_var, sl_levs = self.get_single_level_data(feature) + + fstruct = parse_feature(feature) + attrs = {} + for feat in self.data.features: + if parse_feature(feat).basename == fstruct.basename: + attrs = self.data[feat].attrs + + level = ( + [fstruct.height] + if fstruct.height is not None + else [fstruct.pressure] + ) + + if ml_var is not None and sl_var is None: + var_array = ml_var + lev_array = ml_levs + elif sl_var is not None and ml_var is None: + var_array = sl_var + lev_array = sl_levs + elif ml_var is not None and sl_var is not None: + var_array = np.concatenate([ml_var, sl_var], axis=-1) + lev_array = np.concatenate([ml_levs, sl_levs], axis=-1) + else: + msg = 'Neither single level nor multi level data was found for %s' + logger.error(msg, feature) + raise RuntimeError(msg % feature) + + out = Interpolator.interp_to_level( + lev_array=lev_array, + var_array=var_array, + level=np.float32(level), + interp_kwargs=interp_kwargs, + ) + return xr.DataArray( + data=_rechunk_if_dask(out), + dims=Dimension.dims_3d()[: len(out.shape)], + attrs=attrs, + )
+
+ + + +
+[docs] +class Deriver(BaseDeriver): + """Extends base :class:`BaseDeriver` class with time_roll and + hr_spatial_coarsen args.""" + + def __init__( + self, + data: Union[Sup3rX, Sup3rDataset], + features, + time_roll=0, + time_shift=None, + hr_spatial_coarsen=1, + nan_method_kwargs=None, + FeatureRegistry=None, + interp_kwargs=None, + ): + """ + Parameters + ---------- + data : Union[Sup3rX, Sup3rDataset] + Data used for derivations + features: list + List of features to derive + time_roll: int + Number of steps to roll along the time axis. `Passed to + xr.Dataset.roll()` + time_shift: int | None + Number of minutes to shift time axis. This can be used, for + example, to shift the time index for daily data so that the time + stamp for a given day starts at the zeroth minute instead of at + noon, as is the case for most GCM data. + hr_spatial_coarsen: int + Spatial coarsening factor. Passed to `xr.Dataset.coarsen()` + nan_method_kwargs: str | dict | None + Keyword arguments for nan handling. If 'mask', time steps with nans + will be dropped. Otherwise this should be a dict of kwargs which + will be passed to :meth:`Sup3rX.interpolate_na`. + FeatureRegistry : dict + Dictionary of :class:`DerivedFeature` objects used for derivations + interp_kwargs : dict | None + Dictionary of kwargs for level interpolation. Can include "method" + and "run_level_check" keys. Method specifies how to perform height + interpolation. e.g. Deriving u_20m from u_10m and u_100m. Options + are "linear" and "log". See + :py:meth:`sup3r.preprocessing.derivers.Deriver.do_level_interpolation` + """ # pylint: disable=line-too-long + + super().__init__( + data=data, + features=features, + FeatureRegistry=FeatureRegistry, + interp_kwargs=interp_kwargs, + ) + + if time_roll != 0: + logger.debug('Applying time_roll=%s to data array', time_roll) + self.data = self.data.roll(**{Dimension.TIME: time_roll}) + + if time_shift is not None: + logger.debug('Applying time_shift=%s to time index', time_shift) + self.data.time_index = self.data.time_index.shift( + time_shift, freq='min' + ) + + if hr_spatial_coarsen > 1: + logger.debug( + 'Applying hr_spatial_coarsen=%s to data.', hr_spatial_coarsen + ) + self.data = self.data.coarsen( + { + Dimension.SOUTH_NORTH: hr_spatial_coarsen, + Dimension.WEST_EAST: hr_spatial_coarsen, + }, + boundary='trim', + ).mean() + + if nan_method_kwargs is not None: + if nan_method_kwargs['method'] == 'mask': + dim = nan_method_kwargs.get('dim', Dimension.TIME) + arr = self.data.to_dataarray() + dims = set(arr.dims) - {dim} + mask = np.isnan(arr).any(dims).data + self.data = self.data.drop_isel(**{dim: mask}) + + elif np.isnan(self.data.as_array()).any(): + logger.info( + f'Filling nan values with nan_method_kwargs=' + f'{nan_method_kwargs}' + ) + self.data = self.data.interpolate_na(**nan_method_kwargs)
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/derivers/methods.html b/_modules/sup3r/preprocessing/derivers/methods.html new file mode 100644 index 000000000..29fb6d900 --- /dev/null +++ b/_modules/sup3r/preprocessing/derivers/methods.html @@ -0,0 +1,1213 @@ + + + + + + + + + + sup3r.preprocessing.derivers.methods — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.derivers.methods

+"""Derivation methods for deriving features from raw data."""
+
+import copy
+import logging
+from abc import ABC, abstractmethod
+from typing import Tuple, Union
+
+import numpy as np
+
+from sup3r.preprocessing.accessor import Sup3rX
+from sup3r.preprocessing.base import Sup3rDataset
+
+from .utilities import SolarZenith, invert_uv, transform_rotate_wind
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class DerivedFeature(ABC): + """Abstract class for special features which need to be derived from raw + features + + Note + ---- + `inputs` list will be used to search already derived / loaded data so this + should include all features required for a successful `.compute` call. + """ + + inputs: Tuple[str, ...] = () + +
+[docs] + @classmethod + @abstractmethod + def compute(cls, data: Union[Sup3rX, Sup3rDataset], **kwargs): + """Compute method for derived feature. This can use any of the features + contained in the xr.Dataset data and the attributes (e.g. + `.lat_lon`, `.time_index` accessed through Sup3rX accessor). + + Parameters + ---------- + data : Union[Sup3rX, Sup3rDataset] + Initialized and standardized through a :class:`Loader` with a + specific spatiotemporal extent rasterized for the features + contained using a :class:`Rasterizer`. + kwargs : dict + Optional keyword arguments used in derivation. height is a typical + example. Could also be pressure. + """
+
+ + + +
+[docs] +class SurfaceRH(DerivedFeature): + """Surface Relative humidity feature for computing rh from dewpoint + temperature and ambient temperature. This is in a 0 - 100 scale to match + the ERA5 pressure level relative humidity scale. + + https://earthscience.stackexchange.com/questions/24156/era5-single-level-calculate-relative-humidity + + https://journals.ametsoc.org/view/journals/bams/86/2/bams-86-2-225.xml?tab_body=pdf + """ + + inputs = ('d2m', 'temperature_2m') + +
+[docs] + @classmethod + def compute(cls, data): + """Compute surface relative humidity.""" + water_vapor_pressure = 6.1078 * np.exp( + 17.1 * data['d2m'] / (235 + data['d2m']) + ) + saturation_water_vapor_pressure = 6.1078 * np.exp( + 17.1 * data['temperature_2m'] / (235 + data['temperature_2m']) + ) + return 100 * water_vapor_pressure / saturation_water_vapor_pressure
+
+ + + +
+[docs] +class ClearSkyRatio(DerivedFeature): + """Clear Sky Ratio feature class. Inputs here are typically found in H5 + data like the NSRDB""" + + inputs = ('ghi', 'clearsky_ghi') + +
+[docs] + @classmethod + def compute(cls, data): + """Compute the clearsky ratio + + Returns + ------- + cs_ratio : ndarray + Clearsky ratio, e.g. the all-sky ghi / the clearsky ghi. NaN where + nighttime. + """ + # need to use a nightime threshold of 1 W/m2 because cs_ghi is stored + # in integer format and weird binning patterns happen in the clearsky + # ratio and cloud mask between 0 and 1 W/m2 and sunrise/sunset + night_mask = data['clearsky_ghi'] <= 1 + + # set any timestep with any nighttime equal to NaN to avoid weird + # sunrise/sunset artifacts. + night_mask = np.asarray(night_mask.any(axis=(0, 1))) + + cs_ratio = data['ghi'] / data['clearsky_ghi'] + cs_ratio[..., night_mask] = np.nan + return cs_ratio.astype(np.float32)
+
+ + + +
+[docs] +class ClearSkyRatioCC(DerivedFeature): + """Clear Sky Ratio feature class for computing from climate change netcdf + data + """ + + inputs = ('rsds', 'clearsky_ghi') + +
+[docs] + @classmethod + def compute(cls, data): + """Compute the daily average climate change clearsky ratio + + Parameters + ---------- + data : Union[Sup3rX, Sup3rDataset] + xarray dataset used for this compuation, must include clearsky_ghi + and rsds (rsds==ghi for cc datasets) + + Returns + ------- + cs_ratio : ndarray + Clearsky ratio, e.g. the all-sky ghi / the clearsky ghi. This is + assumed to be daily average data for climate change source data. + """ + cs_ratio = data['rsds'] / data['clearsky_ghi'] + cs_ratio = np.minimum(cs_ratio, 1) + return np.maximum(cs_ratio, 0)
+
+ + + +
+[docs] +class CloudMask(DerivedFeature): + """Cloud Mask feature class. Inputs here are typically found in H5 data + like the NSRDB.""" + + inputs = ('ghi', 'clearky_ghi') + +
+[docs] + @classmethod + def compute(cls, data): + """ + Returns + ------- + cloud_mask : ndarray + Cloud mask, e.g. 1 where cloudy, 0 where clear. NaN where + nighttime. Data is float32 so it can be normalized without any + integer weirdness. + """ + # need to use a nightime threshold of 1 W/m2 because cs_ghi is stored + # in integer format and weird binning patterns happen in the clearsky + # ratio and cloud mask between 0 and 1 W/m2 and sunrise/sunset + night_mask = data['clearsky_ghi'] <= 1 + + # set any timestep with any nighttime equal to NaN to avoid weird + # sunrise/sunset artifacts. + night_mask = np.asarray(night_mask.any(axis=(0, 1))) + + cloud_mask = data['ghi'] < data['clearsky_ghi'] + cloud_mask = cloud_mask.astype(np.float32) + cloud_mask[night_mask] = np.nan + return cloud_mask.astype(np.float32)
+
+ + + +
+[docs] +class PressureWRF(DerivedFeature): + """Pressure feature class for WRF data. Needed since P is perturbation + pressure. + """ + + inputs = ('p_(.*)', 'pb_(.*)') + +
+[docs] + @classmethod + def compute(cls, data, height): + """Method to compute pressure from NETCDF data""" + return data[f'p_{height}m'] + data[f'pb_{height}m']
+
+ + + +
+[docs] +class Windspeed(DerivedFeature): + """Windspeed feature from rasterized data""" + + inputs = ('u_(.*)', 'v_(.*)') + +
+[docs] + @classmethod + def compute(cls, data, height): + """Compute windspeed""" + + ws, _ = invert_uv( + data[f'u_{height}m'], + data[f'v_{height}m'], + data.lat_lon, + ) + return ws
+
+ + + +
+[docs] +class Winddirection(DerivedFeature): + """Winddirection feature from rasterized data""" + + inputs = ('u_(.*)', 'v_(.*)') + +
+[docs] + @classmethod + def compute(cls, data, height): + """Compute winddirection""" + _, wd = invert_uv( + data[f'u_{height}m'], + data[f'v_{height}m'], + data.lat_lon, + ) + return wd
+
+ + + +
+[docs] +class UWindPowerLaw(DerivedFeature): + """U wind component feature class with needed inputs method and compute + method. Uses power law extrapolation to get values above surface + + https://csl.noaa.gov/projects/lamar/windshearformula.html + https://www.tandfonline.com/doi/epdf/10.1080/00022470.1977.10470503 + """ + + ALPHA = 0.2 + NEAR_SFC_HEIGHT = 10 + + inputs = ('uas',) + +
+[docs] + @classmethod + def compute(cls, data, height): + """Method to compute U wind component from data + + Parameters + ---------- + data : Union[Sup3rX, Sup3rDataset] + Initialized and standardized through a :class:`Loader` with a + specific spatiotemporal extent rasterized for the features + contained using a :class:`Rasterizer`. + height : str | int + Height at which to compute the derived feature + + Returns + ------- + ndarray + Derived feature array + + """ + return data['uas'] * (float(height) / cls.NEAR_SFC_HEIGHT) ** cls.ALPHA
+
+ + + +
+[docs] +class VWindPowerLaw(DerivedFeature): + """V wind component feature class with needed inputs method and compute + method. Uses power law extrapolation to get values above surface + + https://csl.noaa.gov/projects/lamar/windshearformula.html + https://www.tandfonline.com/doi/epdf/10.1080/00022470.1977.10470503 + """ + + ALPHA = 0.2 + NEAR_SFC_HEIGHT = 10 + + inputs = ('vas',) + +
+[docs] + @classmethod + def compute(cls, data, height): + """Method to compute V wind component from data""" + + return data['vas'] * (float(height) / cls.NEAR_SFC_HEIGHT) ** cls.ALPHA
+
+ + + +
+[docs] +class UWind(DerivedFeature): + """U wind component feature class with needed inputs method and compute + method + """ + + inputs = ('windspeed_(.*)', 'winddirection_(.*)') + +
+[docs] + @classmethod + def compute(cls, data, height): + """Method to compute U wind component from data""" + u, _ = transform_rotate_wind( + data[f'windspeed_{height}m'], + data[f'winddirection_{height}m'], + data.lat_lon, + ) + return u
+
+ + + +
+[docs] +class VWind(DerivedFeature): + """V wind component feature class with needed inputs method and compute + method + """ + + inputs = ('windspeed_(.*)', 'winddirection_(.*)') + +
+[docs] + @classmethod + def compute(cls, data, height): + """Method to compute V wind component from data""" + + _, v = transform_rotate_wind( + data[f'windspeed_{height}m'], + data[f'winddirection_{height}m'], + data.lat_lon, + ) + return v
+
+ + + +
+[docs] +class USolar(DerivedFeature): + """U wind component feature class with needed inputs method and compute + method for NSRDB data (which has just a single windspeed hub height) + """ + + inputs = ('wind_speed', 'wind_direction') + +
+[docs] + @classmethod + def compute(cls, data): + """Method to compute U wind component from data""" + u, _ = transform_rotate_wind( + data['wind_speed'], + data['wind_direction'], + data.lat_lon, + ) + return u
+
+ + + +
+[docs] +class VSolar(DerivedFeature): + """V wind component feature class with needed inputs method and compute + method for NSRDB data (which has just a single windspeed hub height) + """ + + inputs = ('wind_speed', 'wind_direction') + +
+[docs] + @classmethod + def compute(cls, data): + """Method to compute U wind component from data""" + _, v = transform_rotate_wind( + data['wind_speed'], + data['wind_direction'], + data.lat_lon, + ) + return v
+
+ + + +
+[docs] +class TempNCforCC(DerivedFeature): + """Air temperature variable from climate change nc files""" + + inputs = ('ta_(.*)',) + +
+[docs] + @classmethod + def compute(cls, data, height): + """Method to compute ta in Celsius from ta source in Kelvin""" + out = data[f'ta_{height}m'] + units = out.attrs.get('units', 'K') + if units == 'K': + out -= 273.15 + out.attrs['units'] = 'C' + return out
+
+ + + +
+[docs] +class Tas(DerivedFeature): + """Air temperature near surface variable from climate change nc files""" + + inputs = ('tas',) + +
+[docs] + @classmethod + def compute(cls, data): + """Method to compute tas in Celsius from tas source in Kelvin""" + out = data[cls.inputs[0]] + units = out.attrs.get('units', 'K') + if units == 'K': + out -= 273.15 + out.attrs['units'] = 'C' + return out
+
+ + + +
+[docs] +class TasMin(Tas): + """Daily min air temperature near surface variable from climate change nc + files + """ + + inputs = ('tasmin',)
+ + + +
+[docs] +class TasMax(Tas): + """Daily max air temperature near surface variable from climate change nc + files + """ + + inputs = ('tasmax',)
+ + + +
+[docs] +class Sza(DerivedFeature): + """Solar zenith angle derived feature.""" + + inputs = () + +
+[docs] + @classmethod + def compute(cls, data): + """Compute method for sza.""" + sza = SolarZenith.get_zenith(data.time_index, data.lat_lon) + return sza.astype(np.float32)
+
+ + + +RegistryBase = { + 'u_(.*)': UWind, + 'v_(.*)': VWind, + 'relativehumidity_2m': SurfaceRH, + 'windspeed_(.*)': Windspeed, + 'winddirection_(.*)': Winddirection, + 'cloud_mask': CloudMask, + 'clearsky_ratio': ClearSkyRatio, + 'sza': Sza, +} + +RegistryH5WindCC = { + **RegistryBase, + 'temperature_max_(.*)m': 'temperature_(.*)m', + 'temperature_min_(.*)m': 'temperature_(.*)m', + 'relativehumidity_max_(.*)m': 'relativehumidity_(.*)m', + 'relativehumidity_min_(.*)m': 'relativehumidity_(.*)m', +} + +RegistryH5SolarCC = { + **RegistryH5WindCC, + 'windspeed': 'wind_speed', + 'winddirection': 'wind_direction', + 'U': USolar, + 'V': VSolar, +} + +RegistryNCforCC = copy.deepcopy(RegistryBase) +RegistryNCforCC.update( + { + 'u_(.*)': 'ua_(.*)', + 'v_(.*)': 'va_(.*)', + 'relativehumidity_2m': 'hurs', + 'relativehumidity_min_2m': 'hursmin', + 'relativehumidity_max_2m': 'hursmax', + 'clearsky_ratio': ClearSkyRatioCC, + 'temperature_(.*)': TempNCforCC, + 'temperature_2m': Tas, + 'temperature_max_2m': TasMax, + 'temperature_min_2m': TasMin, + 'pressure_(.*)': 'level_(.*)', + } +) + + +RegistryNCforCCwithPowerLaw = { + **RegistryNCforCC, + 'u_(.*)': UWindPowerLaw, + 'v_(.*)': VWindPowerLaw, +} +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/derivers/utilities.html b/_modules/sup3r/preprocessing/derivers/utilities.html new file mode 100644 index 000000000..ba4b6f1bb --- /dev/null +++ b/_modules/sup3r/preprocessing/derivers/utilities.html @@ -0,0 +1,941 @@ + + + + + + + + + + sup3r.preprocessing.derivers.utilities — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.derivers.utilities

+"""Miscellaneous utilities shared across the derivers module"""
+
+import logging
+import re
+
+import dask.array as da
+import numpy as np
+import pandas as pd
+from rex.utilities.solar_position import SolarPosition
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class SolarZenith: + """ + Class to compute solar zenith angle. Use SPA from rex and wrap some of + those methods in ``dask.array.map_blocks`` so this can be computed in + parallel across chunks. + """ + + @staticmethod + def _get_zenith(n, zulu, lat_lon): + """ + Compute solar zenith angle from days, hours, and location + + Parameters + ---------- + n : da.core.Array + Days since Greenwich Noon + zulu : da.core.Array + Decimal hour in UTC (Zulu Hour) + lat_lon : da.core.Array + (latitude, longitude, 2) for site(s) of interest + + Returns + ------- + zenith : ndarray + Solar zenith angle in degrees + """ + lat, lon = lat_lon[..., 0], lat_lon[..., 1] + lat = lat.flatten()[..., None] + lon = lon.flatten()[..., None] + ra, dec = SolarPosition._calc_sun_pos(n) + zen = da.map_blocks(SolarPosition._calc_hour_angle, n, zulu, ra, lon) + zen = da.map_blocks(SolarPosition._calc_elevation, dec, zen, lat) + zen = da.map_blocks(SolarPosition._atm_correction, zen) + zen = np.degrees(np.pi / 2 - zen) + return zen + +
+[docs] + @staticmethod + def get_zenith(time_index, lat_lon, ll_chunks=(10, 10, 1)): + """ + Compute solar zenith angle from time_index and location + + Parameters + ---------- + time_index : ndarray | pandas.DatetimeIndex | str + Datetime stamps of interest + lat_lon : ndarray, da.core.Array + (latitude, longitude, 2) for site(s) of interest + ll_chunks : tuple + Chunks for lat_lon array. To run this on a large domain, even with + delayed computations through dask, we need to use small chunks for + the lat lon array. + + Returns + ------- + zenith : da.core.Array + Solar zenith angle in degrees + """ + if not isinstance(time_index, pd.DatetimeIndex): + if isinstance(time_index, str): + time_index = [time_index] + + time_index = pd.to_datetime(time_index) + + out_shape = (*lat_lon.shape[:-1], len(time_index)) + lat_lon = da.asarray(lat_lon, chunks=ll_chunks) + n, zulu = SolarPosition._parse_time(time_index) + n = da.asarray(n).astype(np.float32) + zulu = da.asarray(zulu).astype(np.float32) + zen = SolarZenith._get_zenith(n, zulu, lat_lon) + return zen.reshape(out_shape)
+
+ + + +
+[docs] +def parse_feature(feature): + """Parse feature name to get the "basename" (i.e. U for u_100m), the height + (100 for u_100m), and pressure if available (1000 for u_1000pa).""" + + class FeatureStruct: + """Feature structure storing `basename`, `height`, and `pressure`.""" + + def __init__(self): + height = re.findall(r'_\d+m', feature) + press = re.findall(r'_\d+pa', feature) + self.basename = ( + feature.replace(height[0], '') + if height + else feature.replace(press[0], '') + if press + else feature.split('_(.*)')[0] + if '_(.*)' in feature + else feature + ) + self.height = ( + int(round(float(height[0][1:-1]))) if height else None + ) + self.pressure = ( + int(round(float(press[0][1:-2]))) if press else None + ) + + def map_wildcard(self, pattern): + """Return given pattern with wildcard replaced with height if + available, pressure if available, or just return the basename.""" + if '(.*)' not in pattern: + return pattern + return ( + f"{pattern.split('_(.*)')[0]}_{self.height}m" + if self.height is not None + else f"{pattern.split('_(.*)')[0]}_{self.pressure}pa" + if self.pressure is not None + else f"{pattern.split('_(.*)')[0]}" + ) + + return FeatureStruct()
+ + + +
+[docs] +def windspeed_log_law(z, a, b, c): + """Windspeed log profile. + + Parameters + ---------- + z : float + Height above ground in meters + a : float + Proportional to friction velocity + b : float + Related to zero-plane displacement in meters (height above the ground + at which zero mean wind speed is achieved as a result of flow obstacles + such as trees or buildings) + c : float + Proportional to stability term. + + Returns + ------- + ws : float + Value of windspeed at a given height. + """ + return a * np.log(z + b) + c
+ + + +
+[docs] +def transform_rotate_wind(ws, wd, lat_lon): + """Transform windspeed/direction to u and v and align u and v with grid + + Parameters + ---------- + ws : Union[np.ndarray, da.core.Array] + 3D array of high res windspeed data + (spatial_1, spatial_2, temporal) + wd : Union[np.ndarray, da.core.Array] + 3D array of high res winddirection data. Angle is in degrees and + measured relative to the south_north direction. + (spatial_1, spatial_2, temporal) + lat_lon : Union[np.ndarray, da.core.Array] + 3D array of lat lon + (spatial_1, spatial_2, 2) + Last dimension has lat / lon in that order + + Returns + ------- + u : Union[np.ndarray, da.core.Array] + 3D array of high res U data + (spatial_1, spatial_2, temporal) + v : Union[np.ndarray, da.core.Array] + 3D array of high res V data + (spatial_1, spatial_2, temporal) + """ + # get the dy/dx to the nearest vertical neighbor + invert_lat = False + if lat_lon[-1, 0, 0] > lat_lon[0, 0, 0]: + invert_lat = True + lat_lon = lat_lon[::-1] + ws = ws[::-1] + wd = wd[::-1] + dy = lat_lon[:, :, 0] - np.roll(lat_lon[:, :, 0], 1, axis=0) + dx = lat_lon[:, :, 1] - np.roll(lat_lon[:, :, 1], 1, axis=0) + dy = (dy + 90) % 180 - 90 + dx = (dx + 180) % 360 - 180 + + # calculate the angle from the vertical + theta = (np.pi / 2) - np.arctan2(dy, dx) + + if len(theta) > 1: + theta[0] = theta[1] # fix the roll row + wd = np.radians(wd) + + u_rot = np.cos(theta)[:, :, np.newaxis] * ws * np.sin(wd) + u_rot += np.sin(theta)[:, :, np.newaxis] * ws * np.cos(wd) + + v_rot = -np.sin(theta)[:, :, np.newaxis] * ws * np.sin(wd) + v_rot += np.cos(theta)[:, :, np.newaxis] * ws * np.cos(wd) + + if invert_lat: + u_rot = u_rot[::-1] + v_rot = v_rot[::-1] + return u_rot, v_rot
+ + + +
+[docs] +def invert_uv(u, v, lat_lon): + """Transform u and v back to windspeed and winddirection + + Parameters + ---------- + u : Union[np.ndarray, da.core.Array] + 3D array of high res U data + (spatial_1, spatial_2, temporal) + v : Union[np.ndarray, da.core.Array] + 3D array of high res V data + (spatial_1, spatial_2, temporal) + lat_lon : Union[np.ndarray, da.core.Array] + 3D array of lat lon + (spatial_1, spatial_2, 2) + Last dimension has lat / lon in that order + + Returns + ------- + ws : Union[np.ndarray, da.core.Array] + 3D array of high res windspeed data + (spatial_1, spatial_2, temporal) + wd : Union[np.ndarray, da.core.Array] + 3D array of high res winddirection data. Angle is in degrees and + measured relative to the south_north direction. + (spatial_1, spatial_2, temporal) + """ + invert_lat = False + if lat_lon[-1, 0, 0] > lat_lon[0, 0, 0]: + invert_lat = True + lat_lon = lat_lon[::-1] + u = u[::-1] + v = v[::-1] + dy = lat_lon[:, :, 0] - np.roll(lat_lon[:, :, 0], 1, axis=0) + dx = lat_lon[:, :, 1] - np.roll(lat_lon[:, :, 1], 1, axis=0) + dy = (dy + 90) % 180 - 90 + dx = (dx + 180) % 360 - 180 + + # calculate the angle from the vertical + theta = (np.pi / 2) - np.arctan2(dy, dx) + if len(theta) > 1: + theta[0] = theta[1] # fix the roll row + + u_rot = np.cos(theta)[:, :, np.newaxis] * u + u_rot -= np.sin(theta)[:, :, np.newaxis] * v + + v_rot = np.sin(theta)[:, :, np.newaxis] * u + v_rot += np.cos(theta)[:, :, np.newaxis] * v + + ws = np.hypot(u_rot, v_rot) + wd = (np.degrees(np.arctan2(u_rot, v_rot)) + 360) % 360 + + if invert_lat: + ws = ws[::-1] + wd = wd[::-1] + return ws, wd
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/loaders.html b/_modules/sup3r/preprocessing/loaders.html new file mode 100644 index 000000000..5ba880f8f --- /dev/null +++ b/_modules/sup3r/preprocessing/loaders.html @@ -0,0 +1,694 @@ + + + + + + + + + + sup3r.preprocessing.loaders — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.loaders

+"""Container subclass with additional methods for loading the contained
+data."""
+
+from typing import ClassVar
+
+from sup3r.preprocessing.utilities import composite_info, get_source_type
+
+from .base import BaseLoader
+from .h5 import LoaderH5
+from .nc import LoaderNC
+
+
+
+[docs] +class Loader: + """`Loader` class which parses input file type and returns + appropriate `TypeSpecificLoader`.""" + + TypeSpecificClasses: ClassVar = {'nc': LoaderNC, 'h5': LoaderH5} + + def __new__(cls, file_paths, **kwargs): + """Override parent class to return type specific class based on + `source_file`""" + SpecificClass = cls.TypeSpecificClasses[get_source_type(file_paths)] + return SpecificClass(file_paths, **kwargs) + + __signature__, __doc__ = composite_info(list(TypeSpecificClasses.values())) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, trace): + self.res.close()
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/loaders/base.html b/_modules/sup3r/preprocessing/loaders/base.html new file mode 100644 index 000000000..7022ed860 --- /dev/null +++ b/_modules/sup3r/preprocessing/loaders/base.html @@ -0,0 +1,818 @@ + + + + + + + + + + sup3r.preprocessing.loaders.base — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.loaders.base

+"""Abstract Loader class merely for loading data from file paths. This data is
+always loaded lazily."""
+
+import copy
+import logging
+from abc import ABC, abstractmethod
+from datetime import datetime as dt
+from typing import Callable
+
+import numpy as np
+
+from sup3r.preprocessing.base import Container
+from sup3r.preprocessing.names import FEATURE_NAMES
+from sup3r.preprocessing.utilities import (
+    expand_paths,
+    log_args,
+    lower_names,
+    ordered_dims,
+)
+from sup3r.utilities.utilities import xr_open_mfdataset
+
+from .utilities import (
+    standardize_names,
+    standardize_values,
+)
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class BaseLoader(Container, ABC): + """Base loader. "Loads" files so that a `.data` attribute provides access + to the data in the files as a dask array with shape (lats, lons, time, + features). This object provides a `__getitem__` method that can be used by + :class:`~sup3r.preprocessing.samplers.Sampler` objects to build batches or + by :class:`~sup3r.preprocessing.rasterizers.Rasterizer` objects to derive / + extract specific features / regions / time_periods.""" + + BASE_LOADER: Callable = xr_open_mfdataset + + @log_args + def __init__( + self, + file_paths, + features='all', + res_kwargs=None, + chunks='auto', + BaseLoader=None, + ): + """ + Parameters + ---------- + file_paths : str | pathlib.Path | list + Location(s) of files to load + features : list | str + Features to return in loaded dataset. If 'all' then all available + features will be returned. + res_kwargs : dict + Additional keyword arguments passed through to the ``BaseLoader``. + BaseLoader is usually xr.open_mfdataset for NETCDF files and + MultiFileResourceX for H5 files. + chunks : dict | str | None + Dictionary of chunk sizes to pass through to + ``dask.array.from_array()`` or ``xr.Dataset().chunk()``. Will be + converted to a tuple when used in ``from_array()``. These are the + methods for H5 and NETCDF data, respectively. This argument can + be "auto" in additional to a dictionary. If this is None then the + data will not be chunked and instead loaded directly into memory. + BaseLoader : Callable + Optional base loader update. The default for H5 files is + MultiFileResourceX and for NETCDF is xarray.open_mfdataset + """ + super().__init__() + self.res_kwargs = res_kwargs or {} + self.file_paths = file_paths + self.chunks = chunks + BASE_LOADER = BaseLoader or self.BASE_LOADER + self.res = BASE_LOADER(self.file_paths, **self.res_kwargs) + data = lower_names(self._load()) + data = self._add_attrs(data) + data = standardize_values(data) + data = standardize_names(data, FEATURE_NAMES).astype(np.float32) + data = data.transpose(*ordered_dims(data.dims), ...) + features = list(data.dims) if features == [] else features + self.data = data[features] if features != 'all' else data + + if 'meta' in self.res: + self.data.meta = self.res.meta + + if self.chunks is None: + logger.info(f'Pre-loading data into memory for: {features}') + self.data.compute() + + def _parse_chunks(self, dims, feature=None): + """Get chunks for given dimensions from ``self.chunks``.""" + chunks = copy.deepcopy(self.chunks) + if ( + isinstance(chunks, dict) + and feature is not None + and feature in chunks + ): + chunks = chunks[feature] + if isinstance(chunks, dict): + chunks = {k: v for k, v in chunks.items() if k in dims} + return chunks + + def _add_attrs(self, data): + """Add meta data to dataset.""" + attrs = {'source_files': self.file_paths} + attrs['global_attrs'] = getattr(self.res, 'global_attrs', []) + attrs.update(getattr(self.res, 'attrs', {})) + attrs['date_modified'] = attrs.get( + 'date_modified', dt.utcnow().isoformat() + ) + data.attrs.update(attrs) + return data + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, trace): + self.res.close() + + @property + def file_paths(self): + """Get file paths for input data""" + return self._file_paths + + @file_paths.setter + def file_paths(self, file_paths): + """Set file paths attr and do initial glob / sort + + Parameters + ---------- + file_paths : str | list + A list of files to extract raster data from. Each file must have + the same number of timesteps. Can also pass a string or list of + strings with a unix-style file path which will be passed through + glob.glob + """ + self._file_paths = expand_paths(file_paths) + msg = ( + f'No valid files provided to {self.__class__.__name__}. ' + f'Received file_paths={file_paths}. Aborting.' + ) + assert file_paths is not None and len(self._file_paths) > 0, msg + + @abstractmethod + def _load(self): + """'Load' data into this container. Does not actually load from disk + into memory. Just wraps data from files in an xarray.Dataset. + + Returns + ------- + xr.Dataset + """
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/loaders/h5.html b/_modules/sup3r/preprocessing/loaders/h5.html new file mode 100644 index 000000000..b0cfd9c12 --- /dev/null +++ b/_modules/sup3r/preprocessing/loaders/h5.html @@ -0,0 +1,891 @@ + + + + + + + + + + sup3r.preprocessing.loaders.h5 — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.loaders.h5

+"""Base loading class for H5 files.
+
+TODO: Explore replacing rex handlers with xarray. xarray should be able to
+load H5 files fine. We would still need get_raster_index method in Rasterizers
+though.
+"""
+
+import logging
+from typing import Dict, Tuple
+from warnings import warn
+
+import dask.array as da
+import numpy as np
+import xarray as xr
+from rex import MultiFileWindX
+
+from sup3r.preprocessing.names import Dimension
+
+from .base import BaseLoader
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class LoaderH5(BaseLoader): + """Base H5 loader. "Loads" h5 files so that a `.data` attribute + provides access to the data in the files. This object provides a + `__getitem__` method that can be used by + :class:`~sup3r.preprocessing.samplers.Sampler` objects to build batches or + by :class:`~sup3r.preprocessing.rasterizers.Rasterizer` objects to derive / + extract specific features / regions / time_periods. + """ + + BASE_LOADER = MultiFileWindX + + @property + def _time_independent(self): + return 'time_index' not in self.res + + @property + def _time_steps(self): + return ( + len(self.res['time_index']) if not self._time_independent else None + ) + + def _meta_shape(self): + """Get shape of spatial domain only.""" + if 'latitude' in self.res.h5: + return self.res.h5['latitude'].shape + return self.res.h5['meta']['latitude'].shape + + def _is_spatial_dset(self, data): + """Check if given data is spatial only. We compare against the size of + the meta.""" + return len(data.shape) == 1 and len(data) == self._meta_shape()[0] + + def _res_shape(self): + """Get shape of H5 file. + + Note + ---- + Flattened files are 2D but we have 3D H5 files available through + caching and bias correction factor calculations.""" + return ( + self._meta_shape() + if self._time_independent + else (self._time_steps, *self._meta_shape()) + ) + + def _get_coords(self, dims): + """Get coords dict for xr.Dataset construction.""" + coords: Dict[str, Tuple] = {} + if not self._time_independent: + coords[Dimension.TIME] = self.res['time_index'] + coord_base = ( + self.res.h5 if 'latitude' in self.res.h5 else self.res.h5['meta'] + ) + coord_dims = dims[-len(self._meta_shape()) :] + chunks = self._parse_chunks(coord_dims) + lats = da.asarray( + coord_base['latitude'], dtype=np.float32, chunks=chunks + ) + lats = (coord_dims, lats) + lons = da.asarray( + coord_base['longitude'], dtype=np.float32, chunks=chunks + ) + lons = (coord_dims, lons) + coords.update({Dimension.LATITUDE: lats, Dimension.LONGITUDE: lons}) + return coords + + def _get_dset_tuple(self, dset, dims, chunks): + """Get tuple of (dims, array, attrs) for given dataset. Used in + data_vars entries. This accounts for multiple data shapes and + dimensions. e.g. Data can be 2D spatial only, 2D flattened + spatiotemporal, 3D spatiotemporal, 4D spatiotemporal (with presssure + levels), etc + """ + # if self.res includes time-dependent and time-independent variables + # and chunks is 3-tuple we only use the spatial chunk for + # time-indepdent variables + dset_chunks = chunks + if len(chunks) == 3 and len(self.res.h5[dset].shape) == 2: + dset_chunks = chunks[-len(self.res.h5[dset].shape)] + arr = da.asarray( + self.res.h5[dset], dtype=np.float32, chunks=dset_chunks + ) + arr /= self.scale_factor(dset) + if len(arr.shape) == 4: + msg = ( + f'{dset} array is 4 dimensional. Assuming this is an array ' + 'of spatiotemporal quantiles.' + ) + logger.warning(msg) + warn(msg) + arr_dims = Dimension.dims_4d_bc() + elif len(arr.shape) == 3 and self._time_independent: + msg = ( + f'{dset} array is 3 dimensional but {self.file_paths} has ' + f'no time index. Assuming this is an array of bias correction ' + 'factors.' + ) + logger.warning(msg) + warn(msg) + if arr.shape[-1] == 1: + arr_dims = (*Dimension.dims_2d(), Dimension.GLOBAL_TIME) + else: + arr_dims = Dimension.dims_3d() + elif self._is_spatial_dset(arr): + arr_dims = (Dimension.FLATTENED_SPATIAL,) + elif len(arr.shape) == 2: + arr_dims = dims[-len(arr.shape) :] + elif len(arr.shape) == 1: + msg = ( + f'Received 1D feature "{dset}" with shape that does not ' + 'the length of the meta nor the time_index.' + ) + is_ts = not self._time_independent and len(arr) == self._time_steps + assert is_ts, msg + arr_dims = (Dimension.TIME,) + else: + arr_dims = dims[: len(arr.shape)] + return (arr_dims, arr, dict(self.res.h5[dset].attrs)) + + def _parse_chunks(self, dims, feature=None): + """Get chunks for given dimensions from ``self.chunks``.""" + chunks = super()._parse_chunks(dims=dims, feature=feature) + if not isinstance(chunks, dict): + return chunks + return tuple(chunks.get(d, 'auto') for d in dims) + + def _check_for_elevation(self, data_vars, dims, chunks): + """Check if this is a flattened h5 file with elevation data and add + elevation to data_vars if it is.""" + + flattened_with_elevation = ( + len(self._meta_shape()) == 1 + and hasattr(self.res, 'meta') + and 'elevation' in self.res.meta + ) + if flattened_with_elevation: + elev = self.res.meta['elevation'].values.astype(np.float32) + elev = da.asarray(elev) + if not self._time_independent: + t_steps = len(self.res['time_index']) + elev = da.repeat(elev[None, ...], t_steps, axis=0) + elev = elev.rechunk(chunks) + data_vars['elevation'] = (dims, elev) + return data_vars + + def _get_data_vars(self, dims): + """Define data_vars dict for xr.Dataset construction.""" + data_vars = {} + logger.debug(f'Rechunking features with chunks: {self.chunks}') + chunks = self._parse_chunks(dims) + data_vars = self._check_for_elevation( + data_vars, dims=dims, chunks=chunks + ) + + feats = set(self.res.h5.datasets) + exclude = { + 'meta', + 'time_index', + 'coordinates', + 'latitude', + 'longitude', + } + for f in feats - exclude: + data_vars[f] = self._get_dset_tuple( + dset=f, dims=dims, chunks=chunks + ) + return data_vars + + def _get_dims(self): + """Get tuple of named dims for dataset.""" + if len(self._meta_shape()) == 2: + dims = Dimension.dims_2d() + else: + dims = (Dimension.FLATTENED_SPATIAL,) + if not self._time_independent: + dims = (Dimension.TIME, *dims) + return dims + + def _load(self) -> xr.Dataset: + """Wrap data in xarray.Dataset(). Handle differences with flattened and + cached h5.""" + dims = self._get_dims() + coords = self._get_coords(dims) + data_vars = { + k: v + for k, v in self._get_data_vars(dims).items() + if k not in coords + } + return xr.Dataset(coords=coords, data_vars=data_vars).astype( + np.float32 + ) + +
+[docs] + def scale_factor(self, feature): + """Get scale factor for given feature. Data is stored in scaled form to + reduce memory.""" + feat = feature if feature in self.res.datasets else feature.lower() + feat = self.res.h5[feat] + return np.float32( + 1.0 + if not hasattr(feat, 'attrs') + else feat.attrs.get('scale_factor', 1.0) + )
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/loaders/nc.html b/_modules/sup3r/preprocessing/loaders/nc.html new file mode 100644 index 000000000..4e8a107ea --- /dev/null +++ b/_modules/sup3r/preprocessing/loaders/nc.html @@ -0,0 +1,820 @@ + + + + + + + + + + sup3r.preprocessing.loaders.nc — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.loaders.nc

+"""Base loading classes. These are containers which also load data from
+file_paths and include some sampling ability to interface with batcher
+classes."""
+
+import logging
+from warnings import warn
+
+import dask.array as da
+import numpy as np
+
+from sup3r.preprocessing.names import COORD_NAMES, DIM_NAMES, Dimension
+from sup3r.preprocessing.utilities import lower_names, ordered_dims
+from sup3r.utilities.utilities import xr_open_mfdataset
+
+from .base import BaseLoader
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class LoaderNC(BaseLoader): + """Base NETCDF loader. "Loads" netcdf files so that a ``.data`` attribute + provides access to the data in the files. This object provides a + ``__getitem__`` method that can be used by Sampler objects to build batches + or by other objects to derive / extract specific features / regions / + time_periods.""" + +
+[docs] + def BASE_LOADER(self, file_paths, **kwargs): + """Lowest level interface to data.""" + return xr_open_mfdataset(file_paths, **kwargs)
+ + + def _enforce_descending_lats(self, dset): + """Make sure latitudes are in descending order so that min lat / lon is + at ``lat_lon[-1, 0]``.""" + invert_lats = ( + dset[Dimension.LATITUDE][-1, 0] > dset[Dimension.LATITUDE][0, 0] + ) + if invert_lats: + for var in [*list(Dimension.coords_2d()), *list(dset.data_vars)]: + if Dimension.SOUTH_NORTH in dset[var].dims: + new_var = dset[var].isel(south_north=slice(None, None, -1)) + dset.update({var: new_var}) + return dset + + def _enforce_descending_levels(self, dset): + """Make sure levels are in descending order so that max pressure is at + ``level[0]``.""" + invert_levels = ( + dset[Dimension.PRESSURE_LEVEL][-1] + > dset[Dimension.PRESSURE_LEVEL][0] + if Dimension.PRESSURE_LEVEL in dset + else False + ) + if invert_levels: + for var in list(dset.data_vars): + if Dimension.PRESSURE_LEVEL in dset[var].dims: + new_var = dset[var].isel( + {Dimension.PRESSURE_LEVEL: slice(None, None, -1)} + ) + dset.update( + {var: (dset[var].dims, new_var.data, dset[var].attrs)} + ) + new_press = dset[Dimension.PRESSURE_LEVEL][::-1] + dset.update({Dimension.PRESSURE_LEVEL: new_press}) + return dset + +
+[docs] + @staticmethod + def get_coords(res): + """Get coordinate dictionary to use in + ``xr.Dataset().assign_coords()``.""" + lats = res[Dimension.LATITUDE].data.astype(np.float32) + lons = res[Dimension.LONGITUDE].data.astype(np.float32) + + # remove time dimension if there's a single time step + if lats.ndim == 3: + lats = lats.squeeze() + if lons.ndim == 3: + lons = lons.squeeze() + + if len(lats.shape) == 1: + lons, lats = da.meshgrid(lons, lats) + + lats = ((Dimension.SOUTH_NORTH, Dimension.WEST_EAST), lats) + lons = ((Dimension.SOUTH_NORTH, Dimension.WEST_EAST), lons) + coords = {Dimension.LATITUDE: lats, Dimension.LONGITUDE: lons} + + if Dimension.TIME in res: + if Dimension.TIME in res.indexes: + times = res.indexes[Dimension.TIME] + else: + times = res[Dimension.TIME] + + if hasattr(times, 'to_datetimeindex'): + times = times.to_datetimeindex() + + coords[Dimension.TIME] = times + return coords
+ + +
+[docs] + @staticmethod + def get_dims(res): + """Get dimension name map using our standard mappping and the names + used for coordinate dimensions.""" + rename_dims = {k: v for k, v in DIM_NAMES.items() if k in res.dims} + lat_dims = res[Dimension.LATITUDE].dims + lon_dims = res[Dimension.LONGITUDE].dims + if len(lat_dims) == 1 and len(lon_dims) == 1: + rename_dims[lat_dims[0]] = Dimension.SOUTH_NORTH + rename_dims[lon_dims[0]] = Dimension.WEST_EAST + else: + msg = ( + 'Latitude and Longitude dimension names are different. ' + 'This is weird.' + ) + if lon_dims != lat_dims: + logger.warning(msg) + warn(msg) + else: + rename_dims.update( + dict(zip(ordered_dims(lat_dims), Dimension.dims_2d())) + ) + return rename_dims
+ + + def _rechunk_dsets(self, res): + """Apply given chunk values for each field in res.coords and + res.data_vars.""" + for dset in [*list(res.coords), *list(res.data_vars)]: + chunks = self._parse_chunks(dims=res[dset].dims, feature=dset) + if chunks != 'auto': + res[dset] = res[dset].chunk(chunks) + return res + + def _load(self): + """Load netcdf ``xarray.Dataset()``.""" + res = lower_names(self.res) + rename_coords = { + k: v for k, v in COORD_NAMES.items() if k in res and v not in res + } + res = res.rename(rename_coords) + + if not all(coord in res for coord in Dimension.coords_2d()): + err = 'Could not find valid coordinates in given files: %s' + logger.error(err, self.file_paths) + raise OSError(err % (self.file_paths)) + + res = res.swap_dims(self.get_dims(res)) + res = res.assign_coords(self.get_coords(res)) + res = self._enforce_descending_lats(res) + res = self._rechunk_dsets(res) + return self._enforce_descending_levels(res).astype(np.float32)
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/loaders/utilities.html b/_modules/sup3r/preprocessing/loaders/utilities.html new file mode 100644 index 000000000..15e9350fc --- /dev/null +++ b/_modules/sup3r/preprocessing/loaders/utilities.html @@ -0,0 +1,704 @@ + + + + + + + + + + sup3r.preprocessing.loaders.utilities — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.loaders.utilities

+"""Utilities used by Loaders."""
+
+import pandas as pd
+
+from sup3r.preprocessing.names import Dimension
+
+
+
+[docs] +def standardize_names(data, standard_names): + """Standardize fields in the dataset using the `standard_names` + dictionary.""" + data = data.rename({k: v for k, v in standard_names.items() if k in data}) + return data
+ + + +
+[docs] +def standardize_values(data): + """Standardize units and coordinate values. e.g. All temperatures in + celsius, all longitudes between -180 and 180, etc. + + data : xr.Dataset + xarray dataset to be updated with standardized values. + """ + for var in data.data_vars: + attrs = data[var].attrs + if 'units' in attrs and attrs['units'] == 'K': + data.update({var: data[var] - 273.15}) + attrs['units'] = 'C' + data[var].attrs.update(attrs) + + lons = (data[Dimension.LONGITUDE] + 180.0) % 360.0 - 180.0 + data[Dimension.LONGITUDE] = lons + + if Dimension.TIME in data.coords: + if isinstance(data[Dimension.TIME].values[0], bytes): + times = [t.decode('utf-8') for t in data[Dimension.TIME].values] + data[Dimension.TIME] = times + data[Dimension.TIME] = pd.to_datetime(data[Dimension.TIME]) + + return data
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/names.html b/_modules/sup3r/preprocessing/names.html new file mode 100644 index 000000000..8d8b98924 --- /dev/null +++ b/_modules/sup3r/preprocessing/names.html @@ -0,0 +1,892 @@ + + + + + + + + + + sup3r.preprocessing.names — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.names

+"""Mappings from coord / dim / feature names to standard names and Dimension
+class for standardizing dimension orders and names."""
+
+from enum import Enum
+
+
+
+[docs] +class Dimension(str, Enum): + """Dimension names used for Sup3rX accessor.""" + + FLATTENED_SPATIAL = 'space' + SOUTH_NORTH = 'south_north' + WEST_EAST = 'west_east' + TIME = 'time' + PRESSURE_LEVEL = 'level' + HEIGHT = 'height' + VARIABLE = 'variable' + LATITUDE = 'latitude' + LONGITUDE = 'longitude' + QUANTILE = 'quantile' + GLOBAL_TIME = 'global_time' + + def __str__(self): + return self.value + +
+[docs] + @classmethod + def order(cls): + """Return standard dimension order.""" + return ( + cls.FLATTENED_SPATIAL, + cls.SOUTH_NORTH, + cls.WEST_EAST, + cls.TIME, + cls.PRESSURE_LEVEL, + cls.HEIGHT, + cls.VARIABLE, + )
+ + +
+[docs] + @classmethod + def flat_2d(cls): + """Return ordered tuple for 2d flattened data.""" + return (cls.FLATTENED_SPATIAL, cls.TIME)
+ + +
+[docs] + @classmethod + def dims_2d(cls): + """Return ordered tuple for 2d spatial dimensions. Usually + (south_north, west_east)""" + return (cls.SOUTH_NORTH, cls.WEST_EAST)
+ + +
+[docs] + @classmethod + def coords_2d(cls): + """Return ordered tuple for 2d spatial coordinates.""" + return (cls.LATITUDE, cls.LONGITUDE)
+ + +
+[docs] + @classmethod + def coords_3d(cls): + """Return ordered tuple for 3d spatiotemporal coordinates.""" + return (cls.LATITUDE, cls.LONGITUDE, cls.TIME)
+ + +
+[docs] + @classmethod + def dims_3d(cls): + """Return ordered tuple for 3d spatiotemporal dimensions.""" + return (cls.SOUTH_NORTH, cls.WEST_EAST, cls.TIME)
+ + +
+[docs] + @classmethod + def dims_4d(cls): + """Return ordered tuple for 4d spatiotemporal dimensions.""" + return (cls.SOUTH_NORTH, cls.WEST_EAST, cls.TIME, cls.HEIGHT)
+ + +
+[docs] + @classmethod + def coords_4d_pres(cls): + """Return ordered tuple for 4d coordinates, with a pressure level.""" + return (cls.LATITUDE, cls.LONGITUDE, cls.TIME, cls.PRESSURE_LEVEL)
+ + +
+[docs] + @classmethod + def coords_4d(cls): + """Return ordered tuple for 4d coordinates, with a height level.""" + return (cls.LATITUDE, cls.LONGITUDE, cls.TIME, cls.HEIGHT)
+ + +
+[docs] + @classmethod + def dims_4d_pres(cls): + """Return ordered tuple for 4d spatiotemporal dimensions with vertical + pressure levels""" + return (cls.SOUTH_NORTH, cls.WEST_EAST, cls.TIME, cls.PRESSURE_LEVEL)
+ + +
+[docs] + @classmethod + def dims_3d_bc(cls): + """Return ordered tuple for 3d spatiotemporal dimensions.""" + return (cls.SOUTH_NORTH, cls.WEST_EAST, cls.TIME)
+ + +
+[docs] + @classmethod + def dims_4d_bc(cls): + """Return ordered tuple for 4d spatiotemporal dimensions specifically + for bias correction factor files.""" + return (cls.SOUTH_NORTH, cls.WEST_EAST, cls.TIME, cls.QUANTILE)
+
+ + + +# mapping from common feature names to our standard ones +FEATURE_NAMES = { + 'elevation': 'topography', + 'orog': 'topography', + 'hgt': 'topography', +} + + +# mapping from common coordinate names to our standard names +COORD_NAMES = { + 'lat': Dimension.LATITUDE, + 'lon': Dimension.LONGITUDE, + 'xlat': Dimension.LATITUDE, + 'xlong': Dimension.LONGITUDE, + 'plev': Dimension.PRESSURE_LEVEL, + 'isobaricInhPa': Dimension.PRESSURE_LEVEL, + 'pressure_level': Dimension.PRESSURE_LEVEL, + 'xtime': Dimension.TIME, + 'valid_time': Dimension.TIME, + 'west_east': Dimension.LONGITUDE, + 'south_north': Dimension.LATITUDE +} + +# mapping from common dimension names to our standard names +DIM_NAMES = { + 'lat': Dimension.SOUTH_NORTH, + 'lon': Dimension.WEST_EAST, + 'xlat': Dimension.SOUTH_NORTH, + 'xlong': Dimension.WEST_EAST, + 'latitude': Dimension.SOUTH_NORTH, + 'longitude': Dimension.WEST_EAST, + 'plev': Dimension.PRESSURE_LEVEL, + 'isobaricInhPa': Dimension.PRESSURE_LEVEL, + 'pressure_level': Dimension.PRESSURE_LEVEL, + 'xtime': Dimension.TIME, + 'valid_time': Dimension.TIME +} + + +# ERA5 variable names + +# variables available on a single level (e.g. surface) +SFC_VARS = [ + 'surface_sensible_heat_flux', + '10m_u_component_of_wind', + '10m_v_component_of_wind', + '100m_u_component_of_wind', + '100m_v_component_of_wind', + 'surface_pressure', + '2m_temperature', + 'geopotential', + 'total_precipitation', + 'convective_available_potential_energy', + '2m_dewpoint_temperature', + 'convective_inhibition', + 'surface_latent_heat_flux', + 'instantaneous_moisture_flux', + 'mean_total_precipitation_rate', + 'mean_sea_level_pressure', + 'friction_velocity', + 'lake_cover', + 'high_vegetation_cover', + 'land_sea_mask', + 'k_index', + 'forecast_surface_roughness', + 'northward_turbulent_surface_stress', + 'eastward_turbulent_surface_stress', + 'sea_surface_temperature', +] + +# variables available on multiple pressure levels +LEVEL_VARS = [ + 'u_component_of_wind', + 'v_component_of_wind', + 'geopotential', + 'temperature', + 'relative_humidity', + 'specific_humidity', + 'divergence', + 'vertical_velocity', + 'pressure', + 'potential_vorticity', +] + +ERA_NAME_MAP = { + 'u10': 'u_10m', + 'v10': 'v_10m', + 'u100': 'u_100m', + 'v100': 'v_100m', + 't': 'temperature', + 't2m': 'temperature_2m', + 'sp': 'pressure_0m', + 'r': 'relativehumidity', + 'relative_humidity': 'relativehumidity', + 'q': 'specifichumidity', + 'd': 'divergence', +} +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/rasterizers/base.html b/_modules/sup3r/preprocessing/rasterizers/base.html new file mode 100644 index 000000000..0b00cf871 --- /dev/null +++ b/_modules/sup3r/preprocessing/rasterizers/base.html @@ -0,0 +1,908 @@ + + + + + + + + + + sup3r.preprocessing.rasterizers.base — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.rasterizers.base

+"""Objects that can rasterize 3D spatiotemporal data. Examples include WRF,
+ERA5, and GCM data. Can also work with 3D H5 data, just not flattened H5 data
+like WTK and NSRDB."""
+
+import logging
+from warnings import warn
+
+import numpy as np
+
+from sup3r.preprocessing.base import Container
+from sup3r.preprocessing.names import Dimension
+from sup3r.preprocessing.utilities import (
+    _parse_time_slice,
+    compute_if_dask,
+)
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class BaseRasterizer(Container): + """Container subclass with additional methods for extracting a + spatiotemporal extent from contained data. + + Note + ---- + This `Rasterizer` base class is for 3D rasterized data. This usually + comes from NETCDF files but can also be cached H5 files saved from + previously rasterized data. For 3D, whether H5 or NETCDF, the full domain + will be rasterized automatically if no target / shape are provided.""" + + def __init__( + self, + loader, + features='all', + target=None, + shape=None, + time_slice=slice(None), + threshold=None, + ): + """ + Parameters + ---------- + loader : Loader + Loader type container with `.data` attribute exposing data to + extract. + features : list | str + Features to return in loaded dataset. If 'all' then all available + features will be returned. + target : tuple + (lat, lon) lower left corner of raster. Either need target+shape or + raster_file. + shape : tuple + (rows, cols) grid size. Either need target+shape or raster_file. + time_slice : slice | list + Slice specifying extent and step of temporal extraction. e.g. + slice(start, stop, step). If equal to slice(None, None, 1) the full + time dimension is selected. Can be also be a list ``[start, stop, + step]`` + threshold : float + Nearest neighbor euclidean distance threshold. If the coordinates + are more than this value away from the target lat/lon, an error is + raised. + """ + super().__init__(data=loader.data) + self.loader = loader + self.threshold = threshold + self.time_slice = time_slice + self.grid_shape = shape + self.target = target + self.full_lat_lon = self.data.lat_lon + self.raster_index = self.get_raster_index() + self.time_index = ( + loader.time_index[self.time_slice] + if 'time' in loader.data.indexes + else None + ) + self._lat_lon = None + self.data = self.rasterize_data() + self.data = self.data[features] + + @property + def time_slice(self): + """Return time slice for rasterized time period.""" + return self._time_slice + + @time_slice.setter + def time_slice(self, value): + """Set and sanitize the time slice.""" + self._time_slice = _parse_time_slice(value) + + @property + def target(self): + """Return the true value based on the closest lat lon instead of the + user provided value self._target, which is used to find the closest lat + lon.""" + return np.asarray(self.lat_lon[-1, 0]) + + @target.setter + def target(self, value): + """Set the private target attribute. Ultimately target is determined by + lat_lon but _target is set to bottom left corner of the full domain if + None and then used to get the raster_index, which is then used to get + the lat_lon""" + self._target = np.asarray(value) if value is not None else None + + @property + def grid_shape(self): + """Return the grid_shape based on the raster_index, since + self._grid_shape does not need to be provided as an input if the + raster_file is.""" + return self.lat_lon.shape[:-1] + + @grid_shape.setter + def grid_shape(self, value): + """Set the private grid_shape attribute. Ultimately grid_shape is + determined by lat_lon but _grid_shape is set to the full domain if None + and then used to get the raster_index, which is then used to get the + lat_lon""" + self._grid_shape = value + + @property + def lat_lon(self): + """Get 2D grid of coordinates with `target` as the lower left + coordinate. (lats, lons, 2)""" + if self._lat_lon is None: + self._lat_lon = self.get_lat_lon() + return self._lat_lon + +
+[docs] + def rasterize_data(self): + """Get rasterized data.""" + logger.info( + 'Rasterizing data for target / shape: %s / %s', + np.asarray(self._target), + np.asarray(self._grid_shape), + ) + kwargs = dict(zip(Dimension.dims_2d(), self.raster_index)) + if Dimension.TIME in self.loader.dims: + kwargs[Dimension.TIME] = self.time_slice + return self.loader.isel(**kwargs)
+ + +
+[docs] + def check_target_and_shape(self, full_lat_lon): + """The data is assumed to use a regular grid so if either target or + shape is not given we can easily find the values that give the maximum + extent.""" + if self._target is None: + self._target = full_lat_lon[-1, 0, :] + if self._grid_shape is None: + self._grid_shape = full_lat_lon.shape[:-1]
+ + +
+[docs] + def get_raster_index(self): + """Get set of slices or indices selecting the requested region from + the contained data.""" + logger.info( + 'Getting raster index for target / shape: %s / %s', + np.asarray(self._target), + np.asarray(self._grid_shape), + ) + self.check_target_and_shape(self.full_lat_lon) + row, col = self.get_closest_row_col(self.full_lat_lon, self._target) + lat_slice = slice(row - self._grid_shape[0] + 1, row + 1) + lon_slice = slice(col, col + self._grid_shape[1]) + return self._check_raster_index(lat_slice, lon_slice)
+ + + def _check_raster_index(self, lat_slice, lon_slice): + """Check if raster index has bounds which exceed available region and + crop if so.""" + lat_start, lat_end = lat_slice.start, lat_slice.stop + lon_start, lon_end = lon_slice.start, lon_slice.stop + lat_start = max(lat_start, 0) + lat_end = min(lat_end, self.full_lat_lon.shape[0]) + lon_start = max(lon_start, 0) + lon_end = min(lon_end, self.full_lat_lon.shape[1]) + new_lat_slice = slice(lat_start, lat_end) + new_lon_slice = slice(lon_start, lon_end) + msg = ( + f'Computed lat_slice = {compute_if_dask(lat_slice)} exceeds ' + f'available region. Using {compute_if_dask(new_lat_slice)}.' + ) + if lat_slice != new_lat_slice: + logger.warning(msg) + warn(msg) + msg = ( + f'Computed lon_slice = {compute_if_dask(lon_slice)} exceeds ' + f'available region. Using {compute_if_dask(new_lon_slice)}.' + ) + if lon_slice != new_lon_slice: + logger.warning(msg) + warn(msg) + return new_lat_slice, new_lon_slice + +
+[docs] + def get_closest_row_col(self, lat_lon, target): + """Get closest indices to target lat lon + + Parameters + ---------- + lat_lon : ndarray + Array of lat/lon + (spatial_1, spatial_2, 2) + Last dimension in order of (lat, lon) + target : tuple + (lat, lon) for target coordinate + + Returns + ------- + row : int + row index for closest lat/lon to target lat/lon + col : int + col index for closest lat/lon to target lat/lon + """ + dist = np.hypot( + lat_lon[..., 0] - target[0], lat_lon[..., 1] - target[1] + ) + row, col = np.unravel_index(np.argmin(dist, axis=None), dist.shape) + msg = ( + 'The distance between the closest coordinate: ' + f'{np.asarray(lat_lon[row, col])} and the requested ' + f'target: {np.asarray(target)} for files: ' + f'{self.loader.file_paths} is {np.asarray(dist.min())}.' + ) + if self.threshold is not None and dist.min() > self.threshold: + add_msg = f'This exceeds the given threshold: {self.threshold}' + logger.error(f'{msg} {add_msg}') + raise RuntimeError(f'{msg} {add_msg}') + logger.info(msg) + return row, col
+ + +
+[docs] + def get_lat_lon(self): + """Get the 2D array of coordinates corresponding to the requested + target and shape.""" + return self.full_lat_lon[self.raster_index[0], self.raster_index[1]]
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/rasterizers/dual.html b/_modules/sup3r/preprocessing/rasterizers/dual.html new file mode 100644 index 000000000..9aa9ebac7 --- /dev/null +++ b/_modules/sup3r/preprocessing/rasterizers/dual.html @@ -0,0 +1,912 @@ + + + + + + + + + + sup3r.preprocessing.rasterizers.dual — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.rasterizers.dual

+"""Paired rasterizer class for matching separate low_res and high_res
+datasets"""
+
+import logging
+from typing import Tuple, Union
+from warnings import warn
+
+import numpy as np
+import pandas as pd
+import xarray as xr
+from rex.utilities.regridder import Regridder
+
+from sup3r.preprocessing.base import Container, Sup3rDataset
+from sup3r.preprocessing.cachers import Cacher
+from sup3r.preprocessing.names import Dimension
+from sup3r.preprocessing.utilities import log_args
+from sup3r.utilities.utilities import spatial_coarsening
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class DualRasterizer(Container): + """Object containing xr.Dataset instances for low and high-res data. + (Usually ERA5 and WTK, respectively). This essentially just regrids the + low-res data to the coarsened high-res grid. This is useful for caching + prepping data which then can go directly to a + :class:`~sup3r.preprocessing.samplers.dual.DualSampler` + :class:`~sup3r.preprocessing.batch_queues.dual.DualBatchQueue`. + + Note + ---- + When first extracting the low_res data make sure to extract a region that + completely overlaps the high_res region. It is easiest to load the full + low_res domain and let :class:`.DualRasterizer` select the appropriate + region through regridding. + """ + + @log_args + def __init__( + self, + data: Union[Sup3rDataset, Tuple[xr.Dataset, xr.Dataset]], + regrid_workers=1, + regrid_lr=True, + s_enhance=1, + t_enhance=1, + lr_cache_kwargs=None, + hr_cache_kwargs=None, + ): + """Initialize data container lr and hr :class:`Data` instances. + Typically lr = ERA5 data and hr = WTK data. + + Parameters + ---------- + data : Sup3rDataset | Tuple[xr.Dataset, xr.Dataset] + A tuple of xr.Dataset instances. The first must be low-res + and the second must be high-res data + regrid_workers : int | None + Number of workers to use for regridding routine. + regrid_lr : bool + Flag to regrid the low-res data to the high-res grid. This will + take care of any minor inconsistencies in different projections. + Disable this if the grids are known to be the same. + s_enhance : int + Spatial enhancement factor + t_enhance : int + Temporal enhancement factor + lr_cache_kwargs : dict + Cache kwargs for the call to lr_data.cache_data(cache_kwargs). + Must include 'cache_pattern' key if not None, and can also include + dictionary of chunk tuples with feature keys + hr_cache_kwargs : dict + Cache kwargs for the call to hr_data.cache_data(cache_kwargs). + Must include 'cache_pattern' key if not None, and can also include + dictionary of chunk tuples with feature keys + """ + self.s_enhance = s_enhance + self.t_enhance = t_enhance + if isinstance(data, tuple): + data = Sup3rDataset(low_res=data[0], high_res=data[1]) + msg = ( + 'The DualRasterizer requires either a data tuple with two ' + 'members, low and high resolution in that order, or a ' + f'Sup3rDataset instance. Received {type(data)}.' + ) + assert isinstance(data, Sup3rDataset), msg + self.lr_data, self.hr_data = data.low_res, data.high_res + self.regrid_workers = regrid_workers + + lr_step = self.lr_data.time_step + hr_step = self.hr_data.time_step + msg = ( + f'Time steps of high-res data ({hr_step} seconds) and low-res ' + f'data ({lr_step} seconds) are inconsistent with t_enhance = ' + f'{self.t_enhance}.' + ) + assert np.allclose(lr_step, hr_step * self.t_enhance), msg + + self.lr_required_shape = ( + self.hr_data.shape[0] // self.s_enhance, + self.hr_data.shape[1] // self.s_enhance, + self.hr_data.shape[2] // self.t_enhance, + ) + self.hr_required_shape = ( + self.s_enhance * self.lr_required_shape[0], + self.s_enhance * self.lr_required_shape[1], + self.t_enhance * self.lr_required_shape[2], + ) + + msg = ( + f'The required low-res shape {self.lr_required_shape} is ' + 'inconsistent with the shape of the raw data ' + f'{self.lr_data.shape}' + ) + assert all( + req_s <= true_s + for req_s, true_s in zip( + self.lr_required_shape, self.lr_data.shape + ) + ), msg + + self.hr_lat_lon = self.hr_data.lat_lon[ + slice(self.hr_required_shape[0]), slice(self.hr_required_shape[1]) + ] + self.lr_lat_lon = spatial_coarsening( + self.hr_lat_lon, s_enhance=self.s_enhance, obs_axis=False + ) + self._regrid_lr = regrid_lr + + self.update_lr_data() + self.update_hr_data() + super().__init__(data=(self.lr_data, self.hr_data)) + + self.check_regridded_lr_data() + + if lr_cache_kwargs is not None: + Cacher(self.lr_data, lr_cache_kwargs) + + if hr_cache_kwargs is not None: + Cacher(self.hr_data, hr_cache_kwargs) + +
+[docs] + def update_hr_data(self): + """Set the high resolution data attribute and check if + hr_data.shape is divisible by s_enhance. If not, take the largest + shape that can be.""" + msg = ( + f'hr_data.shape: {self.hr_data.shape[:3]} is not ' + f'divisible by s_enhance: {self.s_enhance}. Using shape: ' + f'{self.hr_required_shape} instead.' + ) + need_new_shape = self.hr_data.shape[:3] != self.hr_required_shape[:3] + if need_new_shape: + logger.warning(msg) + warn(msg) + + hr_data_new = {} + for f in self.hr_data.features: + hr_slices = [slice(sh) for sh in self.hr_required_shape] + hr = self.hr_data.to_dataarray().sel(variable=f).data + hr_data_new[f] = hr[tuple(hr_slices)] + + hr_coords_new = { + Dimension.LATITUDE: self.hr_lat_lon[..., 0], + Dimension.LONGITUDE: self.hr_lat_lon[..., 1], + Dimension.TIME: self.hr_data.indexes['time'][ + : self.hr_required_shape[2] + ], + } + logger.info( + 'Updating self.hr_data with new shape: ' + f'{self.hr_required_shape[:3]}' + ) + self.hr_data = self.hr_data.update_ds( + {**hr_coords_new, **hr_data_new} + )
+ + +
+[docs] + def get_regridder(self): + """Get regridder object""" + target_meta = pd.DataFrame( + columns=[Dimension.LATITUDE, Dimension.LONGITUDE], + data=self.lr_lat_lon.reshape((-1, 2)), + ) + return Regridder( + self.lr_data.meta, target_meta, max_workers=self.regrid_workers + )
+ + +
+[docs] + def update_lr_data(self): + """Regrid low_res data for all requested noncached features. Load + cached features if available and overwrite=False""" + + if self._regrid_lr: + logger.info('Regridding low resolution feature data.') + regridder = self.get_regridder() + + lr_data_new = {} + for f in self.lr_data.features: + lr = self.lr_data.to_dataarray().sel(variable=f).data + lr = lr[..., : self.lr_required_shape[2]] + lr_data_new[f] = regridder(lr).reshape(self.lr_required_shape) + + lr_coords_new = { + Dimension.LATITUDE: self.lr_lat_lon[..., 0], + Dimension.LONGITUDE: self.lr_lat_lon[..., 1], + Dimension.TIME: self.lr_data.indexes['time'][ + : self.lr_required_shape[2] + ], + } + logger.info('Updating self.lr_data with regridded data.') + self.lr_data = self.lr_data.update_ds( + {**lr_coords_new, **lr_data_new} + )
+ + +
+[docs] + def check_regridded_lr_data(self): + """Check for NaNs after regridding and do NN fill if needed.""" + fill_feats = [] + logger.info('Checking for NaNs after regridding') + qa_info = self.lr_data.qa() + for f in self.lr_data.features: + nan_perc = qa_info[f]['nan_perc'] + if nan_perc > 0: + msg = f'{f} data has {nan_perc:.3f}% NaN ' 'values!' + if nan_perc < 10: + fill_feats.append(f) + logger.warning(msg) + warn(msg) + if nan_perc >= 10: + logger.error(msg) + raise ValueError(msg) + + if any(fill_feats): + msg = ( + 'Doing nearest neighbor nan fill on low_res data for ' + f'features = {fill_feats}' + ) + logger.info(msg) + self.lr_data = self.lr_data.interpolate_na( + features=fill_feats, method='nearest' + )
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/rasterizers/exo.html b/_modules/sup3r/preprocessing/rasterizers/exo.html new file mode 100644 index 000000000..572563e64 --- /dev/null +++ b/_modules/sup3r/preprocessing/rasterizers/exo.html @@ -0,0 +1,1105 @@ + + + + + + + + + + sup3r.preprocessing.rasterizers.exo — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.rasterizers.exo

+"""Exo data rasterizers for topography and sza
+
+TODO: ExoDataHandler is pretty similar to ExoRasterizer. Maybe a mixin or
+subclass refactor here."""
+
+import logging
+import os
+import shutil
+from abc import ABC, abstractmethod
+from dataclasses import dataclass
+from typing import ClassVar, Optional, Union
+from warnings import warn
+
+import dask.array as da
+import numpy as np
+import pandas as pd
+import xarray as xr
+from scipy.spatial import KDTree
+
+from sup3r.postprocessing.writers.base import OutputHandler
+from sup3r.preprocessing.accessor import Sup3rX
+from sup3r.preprocessing.base import Sup3rMeta
+from sup3r.preprocessing.cachers import Cacher
+from sup3r.preprocessing.derivers.utilities import SolarZenith
+from sup3r.preprocessing.loaders import Loader
+from sup3r.preprocessing.names import Dimension
+from sup3r.utilities.utilities import nn_fill_array
+
+from ..utilities import (
+    get_class_kwargs,
+    get_input_handler_class,
+    get_source_type,
+    log_args,
+)
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +@dataclass +class BaseExoRasterizer(ABC): + """Class to extract high-res (4km+) data rasters for new + spatially-enhanced datasets (e.g. GCM files after spatial enhancement) + using nearest neighbor mapping and aggregation from NREL datasets (e.g. WTK + or NSRDB) + + Parameters + ---------- + file_paths : str | list + A single source h5 file to extract raster data from or a list + of netcdf files with identical grid. The string can be a unix-style + file path which will be passed through glob.glob. This is + typically low-res WRF output or GCM netcdf data files that is + source low-resolution data intended to be sup3r resolved. + source_file : str + Filepath to source data file to get hi-res exogenous data from which + will be mapped to the enhanced grid of the file_paths input. Pixels + from this source_file will be mapped to their nearest low-res pixel in + the file_paths input. Accordingly, source_file should be a + significantly higher resolution than file_paths. Warnings will be + raised if the low-resolution pixels in file_paths do not have unique + nearest pixels from source_file. File format can be .h5 for + ExoRasterizerH5 or .nc for ExoRasterizerNC + feature : str + Name of exogenous feature to rasterize. + s_enhance : int + Factor by which the Sup3rGan model will enhance the spatial + dimensions of low resolution data from file_paths input. For + example, if getting topography data, file_paths has 100km data, and + s_enhance is 4, this class will output a topography raster + corresponding to the file_paths grid enhanced 4x to ~25km + t_enhance : int + Factor by which the Sup3rGan model will enhance the temporal + dimension of low resolution data from file_paths input. For + example, if getting "sza" data, file_paths has hourly data, and + t_enhance is 4, this class will output an "sza" raster + corresponding to ``file_paths``, temporally enhanced 4x to 15 min + input_handler_name : str + data handler class to use for input data. Provide a string name to + match a :class:`~sup3r.preprocessing.rasterizers.Rasterizer`. If None + the correct handler will be guessed based on file type and time series + properties. + input_handler_kwargs : dict | None + Any kwargs for initializing the ``input_handler_name`` class. + cache_dir : str | './exo_cache' + Directory to use for caching rasterized data. + chunks : str | dict + Dictionary of dimension chunk sizes for returned exo data. e.g. + {'time': 100, 'south_north': 100, 'west_east': 100}. This can also just + be "auto". This is passed to ``.chunk()`` before returning exo data + through ``.data`` attribute + distance_upper_bound : float | None + Maximum distance to map high-resolution data from source_file to the + low-resolution file_paths input. None (default) will calculate this + based on the median distance between points in source_file + max_workers : int + Number of workers used for writing data to cache files. Gets passed to + ``Cacher.write_netcdf.`` + verbose : bool + Whether to log output as each chunk is written to cache file. + """ + + file_paths: Optional[str] = None + source_file: Optional[str] = None + feature: Optional[str] = None + s_enhance: int = 1 + t_enhance: int = 1 + input_handler_name: Optional[str] = None + input_handler_kwargs: Optional[dict] = None + cache_dir: str = './exo_cache/' + chunks: Optional[Union[str, dict]] = 'auto' + distance_upper_bound: Optional[int] = None + max_workers: int = 1 + verbose: bool = False + + @log_args + def __post_init__(self): + self._source_data = None + self._tree = None + self._hr_lat_lon = None + self._source_lat_lon = None + self._hr_time_index = None + self._source_handler = None + self.input_handler_kwargs = self.input_handler_kwargs or {} + InputHandler = get_input_handler_class(self.input_handler_name) + self.input_handler = InputHandler( + self.file_paths, + **get_class_kwargs(InputHandler, self.input_handler_kwargs), + ) + + @property + @abstractmethod + def source_data(self): + """Get the 1D array of source data from the source_file_h5""" + + @property + def source_handler(self): + """Get the Loader object that handles the exogenous data file.""" + msg = f'Getting {self.feature} for full domain from {self.source_file}' + if self._source_handler is None: + logger.info(msg) + self._source_handler = Loader( + file_paths=self.source_file, features=[self.feature] + ) + return self._source_handler + + @property + def cache_file(self): + """Get cache file name + + Returns + ------- + cache_fp : str + Name of cache file. This is a netcdf file which will be saved with + :class:`~sup3r.preprocessing.cachers.Cacher` and loaded with + :class:`~sup3r.preprocessing.loaders.Loader` + """ + fn = f'exo_{self.feature}_' + fn += f'{"_".join(map(str, self.input_handler.target))}_' + fn += f'{"x".join(map(str, self.input_handler.grid_shape))}_' + + if len(self.source_data.shape) == 3: + start = str(self.hr_time_index[0]) + start = start.replace(':', '').replace('-', '').replace(' ', '') + end = str(self.hr_time_index[-1]) + end = end.replace(':', '').replace('-', '').replace(' ', '') + fn += f'{start}_{end}_' + + fn += f'{self.s_enhance}x_{self.t_enhance}x.nc' + cache_fp = os.path.join(self.cache_dir, fn) + if self.cache_dir is not None: + os.makedirs(self.cache_dir, exist_ok=True) + return cache_fp + + @property + def coords(self): + """Get coords dictionary for initializing xr.Dataset.""" + coords = { + coord: (Dimension.dims_2d(), self.hr_lat_lon[..., i]) + for i, coord in enumerate(Dimension.coords_2d()) + } + return coords + + @property + def source_lat_lon(self): + """Get the 2D array (n, 2) of lat, lon data from the source_file_h5""" + if self._source_lat_lon is None: + with Loader(self.source_file) as res: + self._source_lat_lon = res.lat_lon + return self._source_lat_lon + + @property + def lr_shape(self): + """Get the low-resolution spatiotemporal shape""" + return ( + *self.input_handler.lat_lon.shape[:2], + len(self.input_handler.time_index), + ) + + @property + def hr_shape(self): + """Get the high-resolution spatiotemporal shape""" + return ( + self.s_enhance * self.input_handler.lat_lon.shape[0], + self.s_enhance * self.input_handler.lat_lon.shape[1], + self.t_enhance * len(self.input_handler.time_index), + ) + + @property + def hr_lat_lon(self): + """Lat lon grid for data in format (spatial_1, spatial_2, 2) Lat/Lon + array with same ordering in last dimension. This corresponds to the + enhanced meta data from the file_paths input * s_enhance. + + Returns + ------- + ndarray + """ + if self._hr_lat_lon is None: + self._hr_lat_lon = ( + OutputHandler.get_lat_lon( + self.input_handler.lat_lon, self.hr_shape[:-1] + ) + if self.s_enhance > 1 + else self.input_handler.lat_lon + ) + return self._hr_lat_lon + + @property + def hr_time_index(self): + """Get the full time index for aggregated source data""" + if self._hr_time_index is None: + self._hr_time_index = ( + OutputHandler.get_times( + self.input_handler.time_index, self.hr_shape[-1] + ) + if self.t_enhance > 1 + else self.input_handler.time_index + ) + return self._hr_time_index + +
+[docs] + def get_distance_upper_bound(self): + """Maximum distance (float) to map high-resolution data from + source_file to the low-resolution file_paths input.""" + if self.distance_upper_bound is None: + diff = da.diff(self.source_lat_lon, axis=0) + diff = da.median(diff, axis=0).max() + self.distance_upper_bound = diff + logger.info( + 'Set distance upper bound to {:.4f}'.format( + np.asarray(self.distance_upper_bound) + ) + ) + return self.distance_upper_bound
+ + + @property + def tree(self): + """Get the KDTree built on the target lat lon data from the file_paths + input with s_enhance""" + if self._tree is None: + self._tree = KDTree(self.hr_lat_lon.reshape((-1, 2))) + return self._tree + + @property + def nn(self): + """Get the nearest neighbor indices. This uses a single neighbor by + default""" + _, nn = self.tree.query( + self.source_lat_lon, + distance_upper_bound=self.get_distance_upper_bound(), + ) + return nn + + @property + def data(self): + """Get a raster of source values corresponding to the + high-resolution grid (the file_paths input grid * s_enhance * + t_enhance). The shape is (lats, lons, temporal, 1)""" + + cache_fp = self.cache_file + if os.path.exists(cache_fp): + data = Loader(cache_fp) + else: + data = self.get_data() + + if not os.path.exists(cache_fp): + tmp_fp = cache_fp + '.tmp' + Cacher.write_netcdf( + tmp_fp, data, max_workers=self.max_workers, chunks=self.chunks, + verbose=self.verbose + ) + shutil.move(tmp_fp, cache_fp) + logger.info('Moved %s to %s', tmp_fp, cache_fp) + + return Sup3rX(data.chunk(self.chunks)) + +
+[docs] + def get_data(self): + """Get a raster of source values corresponding to the + high-resolution grid (the file_paths input grid * s_enhance * + t_enhance). The shape is (lats, lons, 1) + """ + assert ( + len(self.source_data.shape) == 2 and self.source_data.shape[1] == 1 + ) + + df = pd.DataFrame( + {self.feature: self.source_data.flatten(), 'gid_target': self.nn} + ) + n_target = np.prod(self.hr_shape[:-1]) + df = df[df['gid_target'] != n_target] + df = df.sort_values('gid_target') + df = df.groupby('gid_target').mean() + + missing = set(np.arange(n_target)) - set(df.index) + if any(missing): + msg = ( + f'{len(missing)} target pixels did not have unique ' + f'high-resolution {self.feature} source data to map from. If ' + 'there are a lot of target pixels missing source data this ' + 'probably means the source data is not high enough ' + 'resolution. Filling raster with NN.' + ) + logger.warning(msg) + warn(msg) + temp_df = pd.DataFrame( + {self.feature: np.nan}, index=sorted(missing) + ) + df = pd.concat((df, temp_df)).sort_index() + + hr_data = df[self.feature].values.reshape(self.hr_shape[:-1]) + if np.isnan(hr_data).any(): + hr_data = nn_fill_array(hr_data) + + logger.info( + 'Finished mapping raster from %s for "%s"', + self.source_file, + self.feature, + ) + data_vars = { + self.feature: ( + Dimension.dims_2d(), + da.asarray(hr_data, dtype=np.float32), + ) + } + ds = xr.Dataset(coords=self.coords, data_vars=data_vars) + return Sup3rX(ds)
+
+ + + +
+[docs] +class ExoRasterizerH5(BaseExoRasterizer): + """ExoRasterizer for H5 files""" + + @property + def source_data(self): + """Get the 1D array of exogenous data from the source_file_h5""" + if self._source_data is None: + self._source_data = self.source_handler[self.feature] + if 'time' not in self.source_handler[self.feature].dims: + self._source_data = self._source_data.data[:, None] + else: + self._source_data = self._source_data.data[..., slice(0, 1)] + return self._source_data
+ + + +
+[docs] +class ExoRasterizerNC(BaseExoRasterizer): + """ExoRasterizer for netCDF files""" + + @property + def source_data(self): + """Get the 1D array of exogenous data from the source_file_nc""" + return self.source_handler[self.feature].data.flatten()[..., None] + + @property + def source_lat_lon(self): + """Get the 2D array (n, 2) of lat, lon data from the source_file""" + source_lat_lon = self.source_handler.lat_lon.reshape((-1, 2)) + return source_lat_lon
+ + + +
+[docs] +class SzaRasterizer(BaseExoRasterizer): + """SzaRasterizer for H5 files""" + + @property + def source_data(self): + """Get the 1D array of sza data from the source_file_h5""" + return SolarZenith.get_zenith(self.hr_time_index, self.hr_lat_lon) + +
+[docs] + def get_data(self): + """Get a raster of source values corresponding to the high-res grid + (the file_paths input grid * s_enhance * t_enhance). The shape is + (lats, lons, temporal) + """ + logger.info(f'Finished computing {self.feature} data') + data_vars = {self.feature: (Dimension.dims_3d(), self.source_data)} + ds = xr.Dataset(coords=self.coords, data_vars=data_vars) + return Sup3rX(ds)
+
+ + + +
+[docs] +class ExoRasterizer(BaseExoRasterizer, metaclass=Sup3rMeta): + """Type agnostic `ExoRasterizer` class.""" + + TypeSpecificClasses: ClassVar = { + 'nc': ExoRasterizerNC, + 'h5': ExoRasterizerH5, + } + + def __new__(cls, file_paths, source_file, feature, **kwargs): + """Override parent class to return type specific class based on + `source_file`""" + kwargs = { + 'file_paths': file_paths, + 'source_file': source_file, + 'feature': feature, + **kwargs, + } + if feature.lower() == 'sza': + ExoClass = SzaRasterizer + else: + ExoClass = cls.TypeSpecificClasses[get_source_type(source_file)] + return ExoClass(**kwargs) + + _signature_objs = (BaseExoRasterizer,) + __doc__ = BaseExoRasterizer.__doc__
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/rasterizers/extended.html b/_modules/sup3r/preprocessing/rasterizers/extended.html new file mode 100644 index 000000000..d5cdfac42 --- /dev/null +++ b/_modules/sup3r/preprocessing/rasterizers/extended.html @@ -0,0 +1,874 @@ + + + + + + + + + + sup3r.preprocessing.rasterizers.extended — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.rasterizers.extended

+"""Extended ``Rasterizer`` that can rasterize flattened data."""
+
+import logging
+import os
+
+import numpy as np
+import xarray as xr
+
+from sup3r.preprocessing.loaders import Loader
+from sup3r.preprocessing.names import Dimension
+
+from .base import BaseRasterizer
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class Rasterizer(BaseRasterizer): + """Extended `Rasterizer` class which also handles the flattened data format + used for some H5 files (e.g. Wind Toolkit or NSRDB data), and rasterizes + directly from file paths rather than taking a Loader as input""" + + def __init__( + self, + file_paths, + features='all', + res_kwargs=None, + chunks='auto', + target=None, + shape=None, + time_slice=slice(None), + threshold=None, + raster_file=None, + max_delta=20, + BaseLoader=None, + ): + """ + Parameters + ---------- + file_paths : str | list | pathlib.Path + file_paths input to LoaderClass + features : list | str + Features to return in loaded dataset. If 'all' then all + available features will be returned. + res_kwargs : dict + Additional keyword arguments passed through to the ``BaseLoader``. + BaseLoader is usually xr.open_mfdataset for NETCDF files and + MultiFileResourceX for H5 files. + chunks : dict | str | None + Dictionary of chunk sizes to pass through to + ``dask.array.from_array()`` or ``xr.Dataset().chunk()``. Will be + converted to a tuple when used in ``from_array()``. These are the + methods for H5 and NETCDF data, respectively. This argument can + be "auto" in additional to a dictionary. If this is None then the + data will not be chunked and instead loaded directly into memory. + target : tuple + (lat, lon) lower left corner of raster. Either need + target+shape or raster_file. + shape : tuple + (rows, cols) grid size. Either need target+shape or + raster_file. + time_slice : slice | list + Slice specifying extent and step of temporal extraction. e.g. + slice(start, stop, step). If equal to slice(None, None, 1) the full + time dimension is selected. Can be also be a list ``[start, stop, + step]`` + threshold : float + Nearest neighbor euclidean distance threshold. If the + coordinates are more than this value away from the target + lat/lon, an error is raised. + raster_file : str | None + File for raster_index array for the corresponding target and shape. + If specified the raster_index will be loaded from the file if it + exists or written to the file if it does not yet exist. If None and + raster_index is not provided raster_index will be calculated + directly. Either need target+shape, raster_file, or raster_index + input. + max_delta : int + Optional maximum limit on the raster shape that is retrieved at + once. If shape is (20, 20) and max_delta=10, the full raster will + be retrieved in four chunks of (10, 10). This helps adapt to + non-regular grids that curve over large distances. + BaseLoader : Callable + Optional base loader method update. This is a function which + takes `file_paths` and `**kwargs` and returns an initialized + base loader with those arguments. The default for h5 is a + method which returns MultiFileWindX(file_paths, **kwargs) and + for nc the default is + xarray.open_mfdataset(file_paths, **kwargs) + """ + self.raster_file = raster_file + self.max_delta = max_delta + preload = chunks is None + self.loader = Loader( + file_paths, + features=features, + res_kwargs=res_kwargs, + chunks='auto' if preload else chunks, + BaseLoader=BaseLoader, + ) + super().__init__( + loader=self.loader, + features=features, + target=target, + shape=shape, + time_slice=time_slice, + threshold=threshold, + ) + if self.raster_file is not None and not os.path.exists( + self.raster_file + ): + self.save_raster_index() + + if preload: + self.data.compute() + +
+[docs] + def rasterize_data(self): + """Get rasterized data.""" + if not self.loader.flattened: + return super().rasterize_data() + return self._rasterize_flat_data()
+ + + def _rasterize_flat_data(self): + """Rasterize data from flattened source data, usually coming from WTK + or NSRDB data.""" + dims = (Dimension.SOUTH_NORTH, Dimension.WEST_EAST) + coords = { + Dimension.LATITUDE: (dims, self.lat_lon[..., 0]), + Dimension.LONGITUDE: (dims, self.lat_lon[..., 1]), + Dimension.TIME: self.time_index, + } + data_vars = {} + feats = list(self.loader.data_vars) + data = self.loader[feats].isel( + **{Dimension.FLATTENED_SPATIAL: self.raster_index.flatten()} + ) + for f in feats: + if Dimension.TIME in self.loader[f].dims: + dat = data[f].isel({Dimension.TIME: self.time_slice}) + dat = dat.data.reshape( + (*self.grid_shape, len(self.time_index)) + ) + data_vars[f] = ((*dims, Dimension.TIME), dat) + else: + dat = data[f].data.reshape(self.grid_shape) + data_vars[f] = (dims, dat) + return xr.Dataset( + coords=coords, data_vars=data_vars, attrs=self.loader.attrs + ) + +
+[docs] + def save_raster_index(self): + """Save raster index to cache file.""" + os.makedirs(os.path.dirname(self.raster_file), exist_ok=True) + np.savetxt(self.raster_file, self.raster_index) + logger.info(f'Saved raster_index to {self.raster_file}')
+ + +
+[docs] + def get_raster_index(self): + """Get set of slices or indices selecting the requested region from + the contained data.""" + + if not self.loader.flattened: + return super().get_raster_index() + return self._get_flat_data_raster_index()
+ + + def _get_flat_data_raster_index(self): + """Get raster index for the flattened source data, which usually comes + from WTK or NSRDB data.""" + + if self.raster_file is None or not os.path.exists(self.raster_file): + logger.info( + f'Calculating raster_index for target={self._target}, ' + f'shape={self._grid_shape}.' + ) + msg = ('Either shape + target or a raster_file must be provided ' + 'for flattened data rasterization.') + assert ( + self._target is not None and self._grid_shape is not None + ), msg + raster_index = self.loader.res.get_raster_index( + self._target, self._grid_shape, max_delta=self.max_delta + ) + else: + raster_index = np.loadtxt(self.raster_file).astype(np.int32) + logger.info(f'Loaded raster_index from {self.raster_file}') + + return raster_index + +
+[docs] + def get_lat_lon(self): + """Get the 2D array of coordinates corresponding to the requested + target and shape.""" + + if not self.loader.flattened: + return super().get_lat_lon() + return self._get_flat_data_lat_lon()
+ + + def _get_flat_data_lat_lon(self): + """Get lat lon for flattened source data.""" + if hasattr(self.full_lat_lon, 'vindex'): + return self.full_lat_lon.vindex[self.raster_index] + return self.full_lat_lon[self.raster_index.flatten]
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/samplers/base.html b/_modules/sup3r/preprocessing/samplers/base.html new file mode 100644 index 000000000..69ef482fb --- /dev/null +++ b/_modules/sup3r/preprocessing/samplers/base.html @@ -0,0 +1,1027 @@ + + + + + + + + + + sup3r.preprocessing.samplers.base — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.samplers.base

+"""Basic ``Sampler`` objects. These are containers which also can sample from
+the underlying data. These interface with ``BatchQueues`` so they also have
+additional information about how different features are used by models."""
+
+import logging
+from fnmatch import fnmatch
+from typing import Dict, Optional, Tuple
+from warnings import warn
+
+import dask.array as da
+import numpy as np
+
+from sup3r.preprocessing.base import Container
+from sup3r.preprocessing.samplers.utilities import (
+    uniform_box_sampler,
+    uniform_time_sampler,
+)
+from sup3r.preprocessing.utilities import compute_if_dask, log_args, lowered
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class Sampler(Container): + """Basic Sampler class for iterating through batches of samples from the + contained data.""" + + @log_args + def __init__( + self, + data, + sample_shape: Optional[tuple] = None, + batch_size: int = 16, + feature_sets: Optional[Dict] = None, + ): + """ + Parameters + ---------- + data: Union[Sup3rX, Sup3rDataset], + Object with data that will be sampled from. Usually the ``.data`` + attribute of various :class:`~sup3r.preprocessing.base.Container` + objects. i.e. :class:`~sup3r.preprocessing.loaders.Loader`, + :class:`~sup3r.preprocessing.rasterizers.Rasterizer`, + :class:`~sup3r.preprocessing.derivers.Deriver`, as long as the + spatial dimensions are not flattened. + sample_shape : tuple + Size of arrays to sample from the contained data. + batch_size : int + Number of samples to get to build a single batch. A sample of + ``(sample_shape[0], sample_shape[1], batch_size * + sample_shape[2])`` is first selected from underlying dataset and + then reshaped into ``(batch_size, *sample_shape)`` to get a single + batch. This is more efficient than getting ``N = batch_size`` + samples and then stacking. + feature_sets : Optional[dict] + Optional dictionary describing how the full set of features is + split between ``lr_only_features`` and ``hr_exo_features``. + + features : list | tuple + List of full set of features to use for sampling. If no entry + is provided then all data_vars from data will be used. + lr_only_features : list | tuple + List of feature names or patt*erns that should only be + included in the low-res training set and not the high-res + observations. + hr_exo_features : list | tuple + List of feature names or patt*erns that should be included + in the high-resolution observation but not expected to be + output from the generative model. An example is high-res + topography that is to be injected mid-network. + """ + super().__init__(data=data) + feature_sets = feature_sets or {} + self.features = feature_sets.get('features', self.data.features) + self._lr_only_features = feature_sets.get('lr_only_features', []) + self._hr_exo_features = feature_sets.get('hr_exo_features', []) + self.sample_shape = sample_shape or (10, 10, 1) + self.batch_size = batch_size + self.lr_features = self.features + self.preflight() + +
+[docs] + def get_sample_index(self, n_obs=None): + """Randomly gets spatiotemporal sample index. + + Notes + ----- + If ``n_obs > 1`` this will get a time slice with ``n_obs * + self.sample_shape[2]`` time steps, which will then be reshaped into + ``n_obs`` samples each with ``self.sample_shape[2]`` time steps. This + is a much more efficient way of getting batches of samples but only + works if there are enough continuous time steps to sample. + + Returns + ------- + sample_index : tuple + Tuple of latitude slice, longitude slice, time slice, and features. + Used to get single observation like ``self.data[sample_index]`` + """ + n_obs = n_obs or self.batch_size + spatial_slice = uniform_box_sampler(self.shape, self.sample_shape[:2]) + time_slice = uniform_time_sampler( + self.shape, self.sample_shape[2] * n_obs + ) + return (*spatial_slice, time_slice, self.features)
+ + +
+[docs] + def preflight(self): + """Check if the sample_shape is larger than the requested raster + size""" + good_shape = ( + self.sample_shape[0] <= self.data.shape[0] + and self.sample_shape[1] <= self.data.shape[1] + ) + msg = ( + f'spatial_sample_shape {self.sample_shape[:2]} is ' + f'larger than the raster size {self.data.shape[:2]}' + ) + assert good_shape, msg + + msg = ( + f'sample_shape[2] ({self.sample_shape[2]}) cannot be larger ' + 'than the number of time steps in the raw data ' + f'({self.data.shape[2]}).' + ) + + assert self.data.shape[2] >= self.sample_shape[2], msg + + msg = ( + f'sample_shape[2] * batch_size ({self.sample_shape[2]} * ' + f'{self.batch_size}) is larger than the number of time steps in ' + f'the raw data ({self.data.shape[2]}). This prevents us from ' + 'building batches with a single sample with n_time_steps = ' + 'sample_shape[2] * batch_size, which is far more performant than ' + 'building batches with n_samples = batch_size, each with ' + 'n_time_steps = sample_shape[2].' + ) + if self.data.shape[2] < self.sample_shape[2] * self.batch_size: + logger.warning(msg) + warn(msg)
+ + + @property + def sample_shape(self) -> Tuple: + """Shape of the data sample to select when ``__next__()`` is called.""" + return self._sample_shape + + @sample_shape.setter + def sample_shape(self, sample_shape): + """Set the shape of the data sample to select when ``__next__()`` is + called.""" + self._sample_shape = sample_shape + if len(self._sample_shape) == 2: + logger.info( + 'Found 2D sample shape of {}. Adding temporal dim of 1'.format( + self._sample_shape + ) + ) + self._sample_shape = (*self._sample_shape, 1) + + @property + def hr_sample_shape(self) -> Tuple: + """Shape of the data sample to select when `__next__()` is called. Same + as sample_shape""" + return self._sample_shape + + @hr_sample_shape.setter + def hr_sample_shape(self, hr_sample_shape): + """Set the sample shape to select when `__next__()` is called. Same + as sample_shape""" + self._sample_shape = hr_sample_shape + + def _reshape_samples(self, samples): + """Reshape samples into batch shapes, with shape = (batch_size, + *sample_shape, n_features). Samples start out with a time dimension of + shape = batch_size * sample_shape[2] so we need to split this and + reorder the dimensions. + + Parameters + ---------- + samples : Union[np.ndarray, da.core.Array] + Selection from `self.data` with shape: + (samp_shape[0], samp_shape[1], batch_size * samp_shape[2], n_feats) + This is reshaped to: + (batch_size, samp_shape[0], samp_shape[1], samp_shape[2], n_feats) + + Returns + ------- + batch: np.ndarray + Reshaped sample array, with shape: + (batch_size, samp_shape[0], samp_shape[1], samp_shape[2], n_feats) + + """ + new_shape = list(samples.shape) + new_shape = [ + *new_shape[:2], + self.batch_size, + new_shape[2] // self.batch_size, + new_shape[-1], + ] + # (lats, lons, batch_size, times, feats) + out = np.reshape(samples, new_shape) + # (batch_size, lats, lons, times, feats) + return compute_if_dask(np.transpose(out, axes=(2, 0, 1, 3, 4))) + + def _stack_samples(self, samples): + """Used to build batch arrays in the case of independent time samples + (e.g. slow batching) + + Note + ---- + Tuples are in the case of dual datasets. e.g. This sampler is for a + :class:`~sup3r.preprocessing.batch_handlers.DualBatchHandler` + + Parameters + ---------- + samples : Tuple[List[np.ndarray | da.core.Array], ...] | + List[np.ndarray | da.core.Array] + Each list has length = batch_size and each array has shape: + (samp_shape[0], samp_shape[1], samp_shape[2], n_feats) + + Returns + ------- + batch: Tuple[np.ndarray, np.ndarray] | np.ndarray + Stacked sample array(s), each with shape: + (batch_size, samp_shape[0], samp_shape[1], samp_shape[2], n_feats) + """ + if isinstance(samples[0], tuple): + lr = da.stack([s[0] for s in samples], axis=0) + hr = da.stack([s[1] for s in samples], axis=0) + return (lr, hr) + return da.stack(samples, axis=0) + + def _fast_batch(self): + """Get batch of samples with adjacent time slices.""" + out = self.data.sample(self.get_sample_index(n_obs=self.batch_size)) + if isinstance(out, tuple): + return tuple(self._reshape_samples(o) for o in out) + return self._reshape_samples(out) + + def _slow_batch(self): + """Get batch of samples with random time slices.""" + samples = [ + self.data.sample(self.get_sample_index(n_obs=1)) + for _ in range(self.batch_size) + ] + return self._stack_samples(samples) + + def _fast_batch_possible(self): + return self.batch_size * self.sample_shape[2] <= self.data.shape[2] + + def __next__(self): + """Get next batch of samples. This retrieves n_samples = batch_size + with shape = sample_shape from the `.data` (a xr.Dataset or + Sup3rDataset) through the Sup3rX accessor. + + Returns + ------- + samples : tuple(np.ndarray | da.core.Array) | np.ndarray | da.core.Array + Either a tuple or single array of samples. This is a tuple when + this method is sampling from a ``Sup3rDataset`` with two data + members + """ # pylint: disable=line-too-long # noqa + if self._fast_batch_possible(): + return self._fast_batch() + return self._slow_batch() + + def _parse_features(self, unparsed_feats): + """Return a list of parsed feature names without wildcards.""" + if isinstance(unparsed_feats, str): + parsed_feats = [unparsed_feats] + elif isinstance(unparsed_feats, tuple): + parsed_feats = list(unparsed_feats) + elif unparsed_feats is None: + parsed_feats = [] + else: + parsed_feats = unparsed_feats + + if any('*' in fn for fn in parsed_feats): + out = [] + for feature in self.features: + match = any( + fnmatch(feature.lower(), pattern.lower()) + for pattern in parsed_feats + ) + if match: + out.append(feature) + parsed_feats = out + return lowered(parsed_feats) + + @property + def lr_only_features(self): + """List of feature names or patt*erns that should only be included in + the low-res training set and not the high-res observations.""" + return self._parse_features(self._lr_only_features) + + @property + def hr_exo_features(self): + """Get a list of exogenous high-resolution features that are only used + for training e.g., mid-network high-res topo injection. These must come + at the end of the high-res feature set. These can also be input to the + model as low-res features.""" + self._hr_exo_features = self._parse_features(self._hr_exo_features) + + if len(self._hr_exo_features) > 0: + msg = ( + f'High-res train-only features "{self._hr_exo_features}" ' + f'do not come at the end of the full high-res feature set: ' + f'{self.features}' + ) + last_feat = self.features[-len(self._hr_exo_features) :] + assert list(self._hr_exo_features) == list(last_feat), msg + + return self._hr_exo_features + + @property + def hr_out_features(self): + """Get a list of high-resolution features that are intended to be + output by the GAN. Does not include high-resolution exogenous + features""" + + out = [] + for feature in self.features: + lr_only = any( + fnmatch(feature.lower(), pattern.lower()) + for pattern in self.lr_only_features + ) + ignore = lr_only or feature in self.hr_exo_features + if not ignore: + out.append(feature) + + if len(out) == 0: + msg = ( + f'It appears that all handler features "{self.features}" ' + 'were specified as `hr_exo_features` or `lr_only_features` ' + 'and therefore there are no output features!' + ) + logger.error(msg) + raise RuntimeError(msg) + + return lowered(out) + + @property + def hr_features_ind(self): + """Get the high-resolution feature channel indices that should be + included for training. Any high-resolution features that are only + included in the data handler to be coarsened for the low-res input are + removed""" + hr_features = list(self.hr_out_features) + list(self.hr_exo_features) + if list(self.features) == hr_features: + return np.arange(len(self.features)) + return [ + i + for i, feature in enumerate(self.features) + if feature in hr_features + ] + + @property + def hr_features(self): + """Get the high-resolution features corresponding to + `hr_features_ind`""" + return [self.features[ind].lower() for ind in self.hr_features_ind]
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/samplers/cc.html b/_modules/sup3r/preprocessing/samplers/cc.html new file mode 100644 index 000000000..f0ac8d18f --- /dev/null +++ b/_modules/sup3r/preprocessing/samplers/cc.html @@ -0,0 +1,880 @@ + + + + + + + + + + sup3r.preprocessing.samplers.cc — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.samplers.cc

+"""Sampler for climate change applications."""
+
+import logging
+from typing import Dict, Optional
+
+import numpy as np
+
+from sup3r.preprocessing.base import Sup3rDataset
+from sup3r.preprocessing.names import Dimension
+from sup3r.preprocessing.samplers.dual import DualSampler
+from sup3r.preprocessing.samplers.utilities import nsrdb_reduce_daily_data
+from sup3r.utilities.utilities import nn_fill_array
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class DualSamplerCC(DualSampler): + """Special sampling of WTK or NSRDB data for climate change applications + + Note + ---- + This will always give daily / hourly data if ``t_enhance != 1``. The number + of days / hours in the samples is determined by t_enhance. For example, if + ``t_enhance = 8`` and ``sample_shape = (..., 24)`` there will be 3 days in + the low res sample: `lr_sample_shape = (..., 3)`. If + ``1 < t_enhance != 24`` :meth:`reduce_high_res_sub_daily` will be used to + reduce a high res sample shape from + ``(..., sample_shape[2] * 24 // t_enhance)`` to ``(..., sample_shape[2])`` + """ + + def __init__( + self, + data: Sup3rDataset, + sample_shape: Optional[tuple] = None, + batch_size: int = 16, + s_enhance: int = 1, + t_enhance: int = 24, + feature_sets: Optional[Dict] = None, + ): + """ + Parameters + ---------- + data : Sup3rDataset + A :class:`~sup3r.preprocessing.Sup3rDataset` instance with low-res + and high-res data members + sample_shape : tuple + Size of arrays to sample from the high-res data. The sample shape + for the low-res sampler will be determined from the enhancement + factors. + s_enhance : int + Spatial enhancement factor + t_enhance : int + Temporal enhancement factor + feature_sets : Optional[dict] + Optional dictionary describing how the full set of features is + split between ``lr_only_features`` and ``hr_exo_features``. + + lr_only_features : list | tuple + List of feature names or patt*erns that should only be + included in the low-res training set and not the high-res + observations. + hr_exo_features : list | tuple + List of feature names or patt*erns that should be included + in the high-resolution observation but not expected to be + output from the generative model. An example is high-res + topography that is to be injected mid-network. + + See Also + -------- + :class:`~sup3r.preprocessing.DualSampler` + """ + msg = ( + f'{self.__class__.__name__} requires a Sup3rDataset object ' + 'with .daily and .hourly data members, in that order' + ) + assert hasattr(data, 'daily') and hasattr(data, 'hourly'), msg + lr, hr = data.daily, data.hourly + assert lr == data[0] and hr == data[1], msg + if t_enhance == 1: + hr = data.daily + if s_enhance > 1: + lr = lr.coarsen( + { + Dimension.SOUTH_NORTH: s_enhance, + Dimension.WEST_EAST: s_enhance, + } + ).mean() + data = Sup3rDataset(low_res=lr, high_res=hr) + super().__init__( + data=data, + sample_shape=sample_shape, + batch_size=batch_size, + t_enhance=t_enhance, + s_enhance=s_enhance, + feature_sets=feature_sets, + ) + +
+[docs] + def check_for_consistent_shapes(self): + """Make sure container shapes and sample shapes are compatible with + enhancement factors.""" + enhanced_shape = ( + self.lr_data.shape[0] * self.s_enhance, + self.lr_data.shape[1] * self.s_enhance, + self.lr_data.shape[2] * (1 if self.t_enhance == 1 else 24), + ) + msg = ( + f'hr_data.shape {self.hr_data.shape} and enhanced ' + f'lr_data.shape {enhanced_shape} are not compatible with ' + f'the given enhancement factors t_enhance = {self.t_enhance}, ' + f's_enhance = {self.s_enhance}' + ) + assert self.hr_data.shape[:3] == enhanced_shape, msg
+ + +
+[docs] + def reduce_high_res_sub_daily(self, high_res, csr_ind=0): + """Take an hourly high-res observation and reduce the temporal axis + down to lr_sample_shape[2] * t_enhance time steps, using only daylight + hours on the middle part of the high res data. + + Parameters + ---------- + high_res : Union[np.ndarray, da.core.Array] + 5D array with dimensions (n_obs, spatial_1, spatial_2, temporal, + n_features) where temporal >= 24 (set by the data handler). + csr_ind : int + Feature index of clearsky_ratio. e.g. self.data[..., csr_ind] -> + cs_ratio + + Returns + ------- + high_res : Union[np.ndarray, da.core.Array] + 5D array with dimensions (n_obs, spatial_1, spatial_2, temporal, + n_features) where temporal has been reduced down to the integer + lr_sample_shape[2] * t_enhance. For example if hr_sample_shape[2] + is 9 and t_enhance = 8, 72 hourly time steps will be reduced to 9 + using the center daylight 9 hours from the second day. + + Note + ---- + This only does something when ``1 < t_enhance < 24.`` If + ``t_enhance = 24`` there is no need for reduction since every daily + time step will have 24 hourly time steps in the high_res batch data. + Of course, if ``t_enhance = 1``, we are running for a spatial only + model so this routine is unnecessary. + + *Needs review from @grantbuster + """ + if self.t_enhance not in (24, 1): + high_res = self.get_middle_days(high_res, self.hr_sample_shape) + high_res = nsrdb_reduce_daily_data( + high_res, self.hr_sample_shape[-1], csr_ind=csr_ind + ) + return high_res
+ + +
+[docs] + @staticmethod + def get_middle_days(high_res, sample_shape): + """Get middle chunk of high_res data that will then be reduced to day + time steps. This has n_time_steps = 24 if sample_shape[-1] <= 24 + otherwise n_time_steps = sample_shape[-1].""" + if int(high_res.shape[3] / 24) > 1: + mid = int(np.ceil(high_res.shape[3] / 2)) + start = mid - np.max((sample_shape[-1] // 2, 12)) + t_slice = slice(start, start + np.max((sample_shape[-1], 24))) + high_res = high_res[..., t_slice, :] + return high_res
+ + +
+[docs] + def get_sample_index(self, n_obs=None): + """Get sample index for expanded hourly chunk which will be reduced to + the given sample shape.""" + lr_ind, hr_ind = super().get_sample_index(n_obs=n_obs) + upsamp_factor = 1 if self.t_enhance == 1 else 24 + hr_ind = ( + *hr_ind[:2], + slice( + upsamp_factor * lr_ind[2].start, upsamp_factor * lr_ind[2].stop + ), + hr_ind[-1], + ) + return lr_ind, hr_ind
+ + + def _fast_batch_possible(self): + upsamp_factor = 1 if self.t_enhance == 1 else 24 + return ( + upsamp_factor * self.lr_sample_shape[2] * self.batch_size + <= self.data.shape[2] + ) + + def __next__(self): + """Slight modification of `super().__next__()` to first get a sample of + `shape = (..., hr_sample_shape[2] * 24 // t_enhance)` and then reduce + this to `(..., hr_sample_shape[2])` with + :func:`nsrdb_reduce_daily_data.` If this is for a spatial only model + this subroutine is skipped.""" + low_res, high_res = super().__next__() + if ( + self.hr_out_features is not None + and 'clearsky_ratio' in self.hr_out_features + and self.t_enhance != 1 + ): + i_cs = self.hr_out_features.index('clearsky_ratio') + high_res = self.reduce_high_res_sub_daily(high_res, csr_ind=i_cs) + + if np.isnan(high_res[..., i_cs]).any(): + high_res[..., i_cs] = nn_fill_array(high_res[..., i_cs]) + + return low_res, high_res
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/samplers/dc.html b/_modules/sup3r/preprocessing/samplers/dc.html new file mode 100644 index 000000000..1ff94bdcc --- /dev/null +++ b/_modules/sup3r/preprocessing/samplers/dc.html @@ -0,0 +1,783 @@ + + + + + + + + + + sup3r.preprocessing.samplers.dc — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.samplers.dc

+"""Data centric sampler. This samples container data according to weights
+which are updated during training based on performance of the model."""
+
+import logging
+from typing import Dict, List, Optional, Union
+
+import dask.array as da
+import numpy as np
+
+from sup3r.preprocessing.accessor import Sup3rX
+from sup3r.preprocessing.base import Sup3rDataset
+from sup3r.preprocessing.samplers.base import Sampler
+from sup3r.preprocessing.samplers.utilities import (
+    uniform_box_sampler,
+    uniform_time_sampler,
+    weighted_box_sampler,
+    weighted_time_sampler,
+)
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class SamplerDC(Sampler): + """DataCentric Sampler class used for sampling based on weights which can + be updated during training.""" + + def __init__( + self, + data: Union[Sup3rX, Sup3rDataset], + sample_shape: Optional[tuple] = None, + batch_size: int = 16, + feature_sets: Optional[Dict] = None, + spatial_weights: Optional[ + Union[np.ndarray, da.core.Array, List] + ] = None, + temporal_weights: Optional[ + Union[np.ndarray, da.core.Array, List] + ] = None, + ): + """ + Parameters + ---------- + data : Union[Sup3rX, Sup3rDataset], + Object with data that will be sampled from. Usually the `.data` + attribute of various :class:`Container` objects. i.e. + :class:`Loader`, :class:`Rasterizer`, :class:`Deriver`, as long as + the spatial dimensions are not flattened. + sample_shape : tuple + Size of arrays to sample from the contained data. + batch_size : int + Number of samples to get to build a single batch. A sample of + (sample_shape[0], sample_shape[1], batch_size * sample_shape[2]) + is first selected from underlying dataset and then reshaped into + (batch_size, *sample_shape) to get a single batch. This is more + efficient than getting N = batch_size samples and then stacking. + feature_sets : Optional[dict] + Optional dictionary describing how the full set of features is + split between `lr_only_features` and `hr_exo_features`. See + :class:`~sup3r.preprocessing.Sampler` + spatial_weights : Union[np.ndarray, da.core.Array] | List | None + Set of weights used to initialize the spatial sampling. e.g. If we + want to start off sampling across 2 spatial bins evenly this should + be [0.5, 0.5]. During training these weights will be updated based + only performance across the bins associated with these weights. + temporal_weights : Union[np.ndarray, da.core.Array] | List | None + Set of weights used to initialize the temporal sampling. e.g. If we + want to start off sampling only the first season of the year this + should be [1, 0, 0, 0]. During training these weights will be + updated based only performance across the bins associated with + these weights. + """ + self.spatial_weights = spatial_weights or [1] + self.temporal_weights = temporal_weights or [1] + super().__init__( + data=data, + sample_shape=sample_shape, + batch_size=batch_size, + feature_sets=feature_sets, + ) + +
+[docs] + def update_weights(self, spatial_weights, temporal_weights): + """Update spatial and temporal sampling weights.""" + self.spatial_weights = spatial_weights + self.temporal_weights = temporal_weights
+ + +
+[docs] + def get_sample_index(self, n_obs=None): + """Randomly gets weighted spatial sample and time sample indices + + Returns + ------- + observation_index : tuple + Tuple of sampled spatial grid, time slice, and features indices. + Used to get single observation like self.data[observation_index] + """ + n_obs = n_obs or self.batch_size + if self.spatial_weights is not None: + spatial_slice = weighted_box_sampler( + self.shape, self.sample_shape[:2], weights=self.spatial_weights + ) + else: + spatial_slice = uniform_box_sampler( + self.shape, self.sample_shape[:2] + ) + if self.temporal_weights is not None: + time_slice = weighted_time_sampler( + self.shape, + self.sample_shape[2] * n_obs, + weights=self.temporal_weights, + ) + else: + time_slice = uniform_time_sampler( + self.shape, self.sample_shape[2] * n_obs + ) + return (*spatial_slice, time_slice, self.features)
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/samplers/dual.html b/_modules/sup3r/preprocessing/samplers/dual.html new file mode 100644 index 000000000..7365ad182 --- /dev/null +++ b/_modules/sup3r/preprocessing/samplers/dual.html @@ -0,0 +1,811 @@ + + + + + + + + + + sup3r.preprocessing.samplers.dual — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.samplers.dual

+"""Dual Sampler objects. These are used to sample from paired datasets with
+low and high resolution data. These paired datasets are contained in a
+Sup3rDataset object."""
+
+import logging
+from typing import Dict, Optional
+
+from sup3r.preprocessing.base import Sup3rDataset
+from sup3r.preprocessing.utilities import lowered
+
+from .base import Sampler
+from .utilities import uniform_box_sampler, uniform_time_sampler
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class DualSampler(Sampler): + """Sampler for sampling from paired (or dual) datasets. Pairs consist of + low and high resolution data, which are contained by a Sup3rDataset.""" + + def __init__( + self, + data: Sup3rDataset, + sample_shape: Optional[tuple] = None, + batch_size: int = 16, + s_enhance: int = 1, + t_enhance: int = 1, + feature_sets: Optional[Dict] = None, + ): + """ + Parameters + ---------- + data : Sup3rDataset + A :class:`~sup3r.preprocessing.base.Sup3rDataset` instance with + low-res and high-res data members + sample_shape : tuple + Size of arrays to sample from the high-res data. The sample shape + for the low-res sampler will be determined from the enhancement + factors. + s_enhance : int + Spatial enhancement factor + t_enhance : int + Temporal enhancement factor + feature_sets : Optional[dict] + Optional dictionary describing how the full set of features is + split between `lr_only_features` and `hr_exo_features`. + + lr_only_features : list | tuple + List of feature names or patt*erns that should only be + included in the low-res training set and not the high-res + observations. + hr_exo_features : list | tuple + List of feature names or patt*erns that should be included + in the high-resolution observation but not expected to be + output from the generative model. An example is high-res + topography that is to be injected mid-network. + """ + msg = ( + f'{self.__class__.__name__} requires a Sup3rDataset object ' + 'with `.low_res` and `.high_res` data members, in that order' + ) + assert hasattr(data, 'low_res') and hasattr(data, 'high_res'), msg + assert data.low_res == data[0] and data.high_res == data[1], msg + super().__init__( + data=data, sample_shape=sample_shape, batch_size=batch_size + ) + self.lr_data, self.hr_data = self.data.low_res, self.data.high_res + self.hr_sample_shape = self.sample_shape + self.lr_sample_shape = ( + self.sample_shape[0] // s_enhance, + self.sample_shape[1] // s_enhance, + self.sample_shape[2] // t_enhance, + ) + feature_sets = feature_sets or {} + self._lr_only_features = feature_sets.get('lr_only_features', []) + self._hr_exo_features = feature_sets.get('hr_exo_features', []) + self.lr_features = list(self.lr_data.data_vars) + self.features = self.get_features(feature_sets) + self.s_enhance = s_enhance + self.t_enhance = t_enhance + self.check_for_consistent_shapes() + post_init_args = { + 'lr_sample_shape': self.lr_sample_shape, + 'hr_sample_shape': self.hr_sample_shape, + 'lr_features': self.lr_features, + 'hr_features': self.hr_features, + } + self.post_init_log(post_init_args) + +
+[docs] + def get_features(self, feature_sets): + """Return default set of features composed from data vars in low res + and high res data objects or the value provided through the + feature_sets dictionary.""" + features = [] + _ = [ + features.append(f) + for f in [*self.lr_data.features, *self.hr_data.features] + if f not in features and f not in lowered(self._hr_exo_features) + ] + features += lowered(self._hr_exo_features) + return feature_sets.get('features', features)
+ + +
+[docs] + def check_for_consistent_shapes(self): + """Make sure container shapes are compatible with enhancement + factors.""" + enhanced_shape = ( + self.lr_data.shape[0] * self.s_enhance, + self.lr_data.shape[1] * self.s_enhance, + self.lr_data.shape[2] * self.t_enhance, + ) + msg = ( + f'hr_data.shape {self.hr_data.shape} and enhanced ' + f'lr_data.shape {enhanced_shape} are not compatible with ' + 'the given enhancement factors' + ) + assert self.hr_data.shape[:3] == enhanced_shape, msg
+ + +
+[docs] + def get_sample_index(self, n_obs=None): + """Get paired sample index, consisting of index for the low res sample + and the index for the high res sample with the same spatiotemporal + extent.""" + n_obs = n_obs or self.batch_size + spatial_slice = uniform_box_sampler( + self.lr_data.shape, self.lr_sample_shape[:2] + ) + time_slice = uniform_time_sampler( + self.lr_data.shape, self.lr_sample_shape[2] * n_obs + ) + lr_index = (*spatial_slice, time_slice, self.lr_features) + hr_index = [ + slice(s.start * self.s_enhance, s.stop * self.s_enhance) + for s in lr_index[:2] + ] + hr_index += [ + slice(s.start * self.t_enhance, s.stop * self.t_enhance) + for s in lr_index[2:-1] + ] + hr_index = (*hr_index, self.hr_features) + return (lr_index, hr_index)
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/samplers/utilities.html b/_modules/sup3r/preprocessing/samplers/utilities.html new file mode 100644 index 000000000..5ba26822d --- /dev/null +++ b/_modules/sup3r/preprocessing/samplers/utilities.html @@ -0,0 +1,978 @@ + + + + + + + + + + sup3r.preprocessing.samplers.utilities — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.samplers.utilities

+"""Miscellaneous utilities for sampling"""
+
+import logging
+from warnings import warn
+
+import dask.array as da
+import numpy as np
+
+from sup3r.preprocessing.utilities import (
+    _compute_chunks_if_dask,
+)
+from sup3r.utilities.utilities import RANDOM_GENERATOR
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +def uniform_box_sampler(data_shape, sample_shape): + """Returns a 2D spatial slice used to extract a sample from a data array. + + Parameters + ---------- + data_shape : tuple + (rows, cols) Size of full grid available for sampling + sample_shape : tuple + (rows, cols) Size of grid to sample from data + + Returns + ------- + slices : list + List of slices corresponding to row and col extent of arr sample + """ + + shape_1 = ( + data_shape[0] if data_shape[0] < sample_shape[0] else sample_shape[0] + ) + shape_2 = ( + data_shape[1] if data_shape[1] < sample_shape[1] else sample_shape[1] + ) + shape = (shape_1, shape_2) + start_row = RANDOM_GENERATOR.integers( + 0, data_shape[0] - sample_shape[0] + 1 + ) + start_col = RANDOM_GENERATOR.integers( + 0, data_shape[1] - sample_shape[1] + 1 + ) + stop_row = start_row + shape[0] + stop_col = start_col + shape[1] + + return [slice(start_row, stop_row), slice(start_col, stop_col)]
+ + + +
+[docs] +def weighted_box_sampler(data_shape, sample_shape, weights): + """Extracts a temporal slice from data with selection weighted based on + provided weights + + Parameters + ---------- + data_shape : tuple + (rows, cols) Size of full spatial grid available for sampling + sample_shape : tuple + (rows, cols) Size of grid to sample from data + weights : ndarray + Array of weights used to specify selection strategy. e.g. If weights is + [0.2, 0.4, 0.1, 0.3] then the upper left quadrant of the spatial + domain will be sampled 20 percent of the time, the upper right quadrant + will be sampled 40 percent of the time, etc. + + Returns + ------- + slices : list + List of spatial slices [spatial_1, spatial_2] + """ + max_cols = ( + data_shape[1] if data_shape[1] < sample_shape[1] else sample_shape[1] + ) + max_rows = ( + data_shape[0] if data_shape[0] < sample_shape[0] else sample_shape[0] + ) + max_cols = data_shape[1] - max_cols + 1 + max_rows = data_shape[0] - max_rows + 1 + indices = np.arange(0, max_rows * max_cols) + chunks = np.array_split(indices, len(weights)) + weight_list = [] + for i, w in enumerate(weights): + weight_list += [w] * len(chunks[i]) + weight_list /= np.sum(weight_list) + msg = ( + 'Must have a sample_shape with a number of elements greater than ' + 'or equal to the number of spatial weights.' + ) + assert len(indices) >= len(weight_list), msg + start = RANDOM_GENERATOR.choice(indices, p=weight_list) + row = start // max_cols + col = start % max_cols + stop_1 = row + np.min([sample_shape[0], data_shape[0]]) + stop_2 = col + np.min([sample_shape[1], data_shape[1]]) + + slice_1 = slice(row, stop_1) + slice_2 = slice(col, stop_2) + + return [slice_1, slice_2]
+ + + +
+[docs] +def weighted_time_sampler(data_shape, sample_shape, weights): + """Returns a temporal slice with selection weighted based on + provided weights used to extract temporal chunk from data + + Parameters + ---------- + data_shape : tuple + (rows, cols, n_steps) Size of full spatiotemporal data grid available + for sampling + shape : tuple + (time_steps) Size of time slice to sample from data + weights : list + List of weights used to specify selection strategy. e.g. If weights + is [0.2, 0.8] then the start of the temporal slice will be selected + from the first half of the temporal extent with 0.8 probability and + 0.2 probability for the second half. + + Returns + ------- + slice : slice + time slice with size shape + """ + + shape = data_shape[2] if data_shape[2] < sample_shape else sample_shape + t_indices = np.arange(0, data_shape[2] - shape + 1) + t_chunks = np.array_split(t_indices, len(weights)) + + weight_list = [] + for i, w in enumerate(weights): + weight_list += [w] * len(t_chunks[i]) + weight_list /= np.sum(weight_list) + + start = RANDOM_GENERATOR.choice(t_indices, p=weight_list) + stop = start + shape + + return slice(start, stop)
+ + + +
+[docs] +def uniform_time_sampler(data_shape, sample_shape, crop_slice=slice(None)): + """Returns temporal slice used to extract temporal chunk from data. + + Parameters + ---------- + data_shape : tuple + (rows, cols, n_steps) Size of full spatiotemporal data grid available + for sampling + sample_shape : int + (time_steps) Size of time slice to sample from data grid + crop_slice : slice + Optional slice used to restrict the sampling window. + + Returns + ------- + slice : slice + time slice with size shape + """ + shape = data_shape[2] if data_shape[2] < sample_shape else sample_shape + indices = np.arange(data_shape[2] + 1)[crop_slice] + start = RANDOM_GENERATOR.integers(indices[0], indices[-1] - shape + 1) + stop = start + shape + return slice(start, stop)
+ + + +
+[docs] +def daily_time_sampler(data, shape, time_index): + """Finds a random temporal slice from data starting at midnight + + Parameters + ---------- + data : Union[np.ndarray, da.core.Array] + Data array with dimensions + (spatial_1, spatial_2, temporal, features) + shape : int + (time_steps) Size of time slice to sample from data, must be an integer + less than or equal to 24. + time_index : pd.DatetimeIndex + Time index that matches the data axis=2 + + Returns + ------- + slice : slice + time slice with size shape of data starting at the beginning of the day + """ + + msg = ( + f'data {data.shape} and time index ({len(time_index)}) ' + 'shapes do not match, cannot sample daily data.' + ) + assert data.shape[2] == len(time_index), msg + + ti_short = time_index[: -(shape - 1)] + midnight_ilocs = np.where( + (ti_short.hour == 0) & (ti_short.minute == 0) & (ti_short.second == 0) + )[0] + + if not any(midnight_ilocs): + msg = ( + 'Cannot sample time index of shape {} with requested daily ' + 'sample shape {}'.format(len(time_index), shape) + ) + logger.error(msg) + raise RuntimeError(msg) + + start = RANDOM_GENERATOR.integers(0, len(midnight_ilocs)) + start = midnight_ilocs[start] + stop = start + shape + + return slice(start, stop)
+ + + +
+[docs] +def nsrdb_sub_daily_sampler(data, shape, time_index=None): + """Finds a random sample during daylight hours of a day. Nightime is + assumed to be marked as NaN in feature axis == csr_ind in the data input. + + Parameters + ---------- + data : Union[Sup3rX, Sup3rDataset] + Dataset object with 'clearsky_ratio' accessible as + data['clearsky_ratio'] (spatial_1, spatial_2, temporal, features) + shape : int + (time_steps) Size of time slice to sample from data, must be an integer + less than or equal to 24. + time_index : pd.DatetimeIndex + Time index corresponding the the time axis of `data`. If None then + data.time_index will be used. + + Returns + ------- + tslice : slice + time slice with size shape of data starting at the beginning of the day + """ + time_index = time_index if time_index is not None else data.time_index + tslice = daily_time_sampler(data, 24, time_index) + night_mask = da.isnan(data['clearsky_ratio'][..., tslice]).any(axis=(0, 1)) + + if shape >= data.shape[2]: + return tslice + + if (night_mask).all(): + msg = ( + f'No daylight data found for tslice {tslice} ' + f'{time_index[tslice]}' + ) + logger.warning(msg) + warn(msg) + return tslice + + day_ilocs = np.where(np.asarray(~night_mask))[0] + padding = shape - len(day_ilocs) + half_pad = int(np.round(padding / 2)) + new_start = tslice.start + day_ilocs[0] - half_pad + new_end = new_start + shape + return slice(new_start, new_end)
+ + + +
+[docs] +def nsrdb_reduce_daily_data(data, shape, csr_ind=0): + """Takes a 5D array and reduces the axis=3 temporal dim to daylight hours. + + Parameters + ---------- + data : Union[np.ndarray, da.core.Array] + 5D data array, where [..., csr_ind] is assumed to be clearsky ratio + with NaN at night. + (n_obs, spatial_1, spatial_2, temporal, features) + shape : int + (time_steps) Size of time slice to sample from data. If this is + greater than data.shape[-2] data won't be reduced. + csr_ind : int + Index of the feature axis where clearsky ratio is located and NaN's can + be found at night. + + Returns + ------- + data : Union[np.ndarray, da.core.Array] + Same as input but with axis=3 reduced to dailylight hours with + requested shape. + """ + + night_mask = da.isnan(data[:, :, :, :, csr_ind]).any(axis=(0, 1, 2)) + + if shape >= data.shape[3]: + return data + + if night_mask.all(): + msg = f'No daylight data found for data of shape {data.shape}' + logger.warning(msg) + warn(msg) + return data + + day_ilocs = _compute_chunks_if_dask(np.where(~night_mask)[0]) + padding = shape - len(day_ilocs) + half_pad = int(np.ceil(padding / 2)) + start = day_ilocs[0] - half_pad + tslice = slice(start, start + shape) + return data[..., tslice, :]
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/preprocessing/utilities.html b/_modules/sup3r/preprocessing/utilities.html new file mode 100644 index 000000000..d21ae3d4b --- /dev/null +++ b/_modules/sup3r/preprocessing/utilities.html @@ -0,0 +1,1299 @@ + + + + + + + + + + sup3r.preprocessing.utilities — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.preprocessing.utilities

+"""Methods used across container objects."""
+
+import logging
+import os
+import pprint
+from glob import glob
+from importlib import import_module
+from inspect import Parameter, Signature, getfullargspec, signature
+from pathlib import Path
+from typing import ClassVar, Optional, Tuple, Union
+from warnings import warn
+
+import numpy as np
+import pandas as pd
+import psutil
+import xarray as xr
+from gaps.cli.documentation import CommandDocumentation
+
+from .names import Dimension
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +def lower_names(data): + """Set all fields / coords / dims to lower case.""" + rename_map = { + f: f.lower() + for f in [ + *list(data.data_vars), + *list(data.dims), + *list(data.coords), + ] + if f != f.lower() + } + return data.rename(rename_map)
+ + + +
+[docs] +def get_input_handler_class(input_handler_name: Optional[str] = None): + """Get the :class:`~sup3r.preprocessing.data_handlers.DataHandler` or + :class:`~sup3r.preprocessing.rasterizers.Rasterizer` object. + + Parameters + ---------- + input_handler_name : str + Class to use for input data. Provide a string name to match a class in + `sup3r.preprocessing`. If None this will return + :class:`~sup3r.preprocessing.rasterizers.Rasterizer`, which uses + `LoaderNC` or `LoaderH5` depending on file type. This is a simple + handler object which does not derive new features from raw data. + + Returns + ------- + HandlerClass : Rasterizer | DataHandler + DataHandler or Rasterizer class from sup3r.preprocessing. + """ + if input_handler_name is None: + input_handler_name = 'Rasterizer' + + logger.info( + '"input_handler_name" arg was not provided. Using ' + f'"{input_handler_name}". If this is incorrect, please provide ' + 'input_handler_name="DataHandlerName".' + ) + + HandlerClass = ( + getattr(import_module('sup3r.preprocessing'), input_handler_name, None) + if isinstance(input_handler_name, str) + else None + ) + + if HandlerClass is None: + msg = ( + 'Could not find requested data handler class ' + f'"{input_handler_name}" in sup3r.preprocessing.' + ) + logger.error(msg) + raise KeyError(msg) + + return HandlerClass
+ + + +def _mem_check(): + mem = psutil.virtual_memory() + return ( + f'Memory usage is {mem.used / 1e9:.3f} GB out of ' + f'{mem.total / 1e9:.3f} GB' + ) + + +
+[docs] +def log_args(func): + """Decorator to log annotations and args. This can be used to wrap __init__ + methods so we need to pass through the signature and docs""" + + def _get_args_dict(thing, fun, *args, **kwargs): + """Get args dict from given object and object method.""" + + ann_dict = {} + if '__annotations__' in dir(thing): + ann_dict = { + name: getattr(thing, name) + for name, val in thing.__annotations__.items() + if val is not ClassVar + } + arg_spec = getfullargspec(fun) + args = args or [] + names = ( + arg_spec.args if 'self' not in arg_spec.args else arg_spec.args[1:] + ) + names = ['args', *names] if arg_spec.varargs is not None else names + vals = [None] * len(names) + defaults = arg_spec.defaults or [] + vals[-len(defaults) :] = defaults + vals[: len(args)] = args + args_dict = dict(zip(names, vals)) + args_dict.update(kwargs) + args_dict.update(ann_dict) + + return args_dict + + def _log_args(thing, fun, *args, **kwargs): + """Log annotated attributes and args.""" + + args_dict = _get_args_dict(thing, fun, *args, **kwargs) + name = thing.__class__.__name__ + logger.info( + f'Initialized {name} with:\n' + f'{pprint.pformat(args_dict, indent=2)}' + ) + logger.debug(_mem_check()) + + def wrapper(self, *args, **kwargs): + _log_args(self, func, *args, **kwargs) + return func(self, *args, **kwargs) + + wrapper.__signature__, wrapper.__doc__ = ( + signature(func), + getattr(func, '__doc__', ''), + ) + return wrapper
+ + + +
+[docs] +def get_date_range_kwargs(time_index): + """Get kwargs for pd.date_range from a DatetimeIndex. This is used to + provide a concise time_index representation which can be passed through + the cli and avoid logging lengthly time indices. + + Parameters + ---------- + time_index : pd.DatetimeIndex + Output time index. + + Returns + ------- + kwargs : dict + Dictionary to pass to pd.date_range(). Can also include kwarg + ``drop_leap`` + """ + freq = ( + f'{(time_index[-1] - time_index[0]).total_seconds() / 60}min' + if len(time_index) == 2 + else pd.infer_freq(time_index) + ) + + kwargs = { + 'start': time_index[0].strftime('%Y-%m-%d %H:%M:%S'), + 'end': time_index[-1].strftime('%Y-%m-%d %H:%M:%S'), + 'freq': freq, + } + + nominal_ti = pd.date_range(**kwargs) + uneven_freq = len(set(np.diff(time_index))) > 1 + + if uneven_freq and len(nominal_ti) > len(time_index): + kwargs['drop_leap'] = True + + elif uneven_freq: + msg = f'Got uneven frequency for time index: {time_index}' + warn(msg) + logger.warning(msg) + + return kwargs
+ + + +
+[docs] +def make_time_index_from_kws(date_range_kwargs): + """Function to make a pandas DatetimeIndex from the + ``get_date_range_kwargs`` outputs + + Parameters + ---------- + date_range_kwargs : dict + Dictionary to pass to pd.date_range(), typically produced from + ``get_date_range_kwargs()``. Can also include kwarg ``drop_leap`` + + Returns + ------- + time_index : pd.DatetimeIndex + Output time index. + """ + drop_leap = date_range_kwargs.pop('drop_leap', False) + time_index = pd.date_range(**date_range_kwargs) + + if drop_leap: + leap_mask = (time_index.month == 2) & (time_index.day == 29) + time_index = time_index[~leap_mask] + + return time_index
+ + + +def _compute_chunks_if_dask(arr): + return ( + arr.compute_chunk_sizes() + if hasattr(arr, 'compute_chunk_sizes') + else arr + ) + + +
+[docs] +def numpy_if_tensor(arr): + """Cast array to numpy array if it is a tensor.""" + return arr.numpy() if hasattr(arr, 'numpy') else arr
+ + + +
+[docs] +def compute_if_dask(arr): + """Apply compute method to input if it consists of a dask array or slice + with dask elements.""" + if isinstance(arr, slice): + return slice( + compute_if_dask(arr.start), + compute_if_dask(arr.stop), + compute_if_dask(arr.step), + ) + return arr.compute() if hasattr(arr, 'compute') else arr
+ + + +def _rechunk_if_dask(arr, chunks='auto'): + if hasattr(arr, 'rechunk'): + return arr.rechunk(chunks) + return arr + + +def _parse_time_slice(value): + """Parses a value and returns a slice. Input can be a list, tuple, None, or + a slice.""" + return ( + value + if isinstance(value, slice) + else slice(*value) + if isinstance(value, (tuple, list)) + else slice(None) + ) + + +
+[docs] +def expand_paths(fps): + """Expand path(s) + + Parameter + --------- + fps : str or pathlib.Path or any Sequence of those + One or multiple paths to file + + Returns + ------- + list[str] + A list of expanded unique and sorted paths as str + + Examples + -------- + >>> expand_paths("myfile.h5") + + >>> expand_paths(["myfile.h5", "*.hdf"]) + """ + if isinstance(fps, (str, Path)): + fps = (fps,) + + out = [] + for f in fps: + files = glob(str(f)) + assert any(files), f'Unable to resolve file path: {f}' + out.extend(files) + + return sorted(set(out))
+ + + +
+[docs] +def get_source_type(file_paths): + """Get data source type + + Parameters + ---------- + file_paths : list | str + One or more paths to data files, can include a unix-style pat*tern + + Returns + ------- + source_type : str + Either "h5" or "nc" + """ + if file_paths is None: + return None + + if isinstance(file_paths, str) and '*' in file_paths: + temp = glob(file_paths) + if any(temp): + file_paths = temp + + if not isinstance(file_paths, list): + file_paths = [file_paths] + + _, source_type = os.path.splitext(file_paths[0]) + + if source_type in ('.h5', '.hdf'): + return 'h5' + if source_type in ('.nc',): + return 'nc' + msg = ( + f'Can only handle HDF or NETCDF files. Received unknown extension ' + f'"{source_type}" for files: {file_paths}. We will try to open this ' + 'with xarray.' + ) + logger.warning(msg) + warn(msg) + return 'nc'
+ + + +
+[docs] +def get_obj_params(obj): + """Get available signature parameters for obj and obj bases""" + objs = (obj, *getattr(obj, '_signature_objs', ())) + return composite_sig(CommandDocumentation(*objs)).parameters.values()
+ + + +
+[docs] +def get_class_kwargs(obj, kwargs): + """Get kwargs which match obj signature.""" + param_names = [p.name for p in get_obj_params(obj)] + return {k: v for k, v in kwargs.items() if k in param_names}
+ + + +
+[docs] +def composite_sig(docs: CommandDocumentation): + """Get composite signature from command documentation instance.""" + param_names = { + p.name for sig in docs.signatures for p in sig.parameters.values() + } + config = { + k: v for k, v in docs.template_config.items() if k in param_names + } + has_kwargs = config.pop('kwargs', False) + kw_only = [] + pos_or_kw = [] + for k, v in config.items(): + if v != docs.REQUIRED_TAG: + kw_only.append(Parameter(k, Parameter.KEYWORD_ONLY, default=v)) + else: + pos_or_kw.append(Parameter(k, Parameter.POSITIONAL_OR_KEYWORD)) + + params = pos_or_kw + kw_only + if has_kwargs: + params += [Parameter('kwargs', Parameter.VAR_KEYWORD)] + return Signature(parameters=params)
+ + + +
+[docs] +def composite_info(objs, skip_params=None): + """Get composite signature and doc string for given set of objects.""" + objs = objs if isinstance(objs, (tuple, list)) else [objs] + docs = CommandDocumentation(*objs, skip_params=skip_params) + return composite_sig(docs), docs.parameter_help
+ + + +
+[docs] +def check_signatures(objs, skip_params=None): + """Make sure signatures of objects can be parsed for required arguments.""" + docs = CommandDocumentation(*objs, skip_params=skip_params) + for i, sig in enumerate(docs.signatures): + msg = ( + f'The signature of {objs[i]!r} cannot be resolved sufficiently. ' + 'We need a detailed signature to determine how to distribute ' + 'arguments.' + ) + + params = sig.parameters.values() + assert {p.name for p in params} - {'args', 'kwargs'}, msg
+ + + +
+[docs] +def parse_features(features: Optional[Union[str, list]] = None, data=None): + """Parse possible inputs for features (list, str, None, 'all'). If 'all' + this returns all data_vars in data. If None this returns an empty list. + + Note + ---- + Returns a string if input is a string and list otherwise. Need to manually + get [features] if a list is required. + + Parameters + ---------- + features : list | str | None + Feature request to parse. + data : T_Dataset + Data containing available features + """ + features = ( + list(data.data_vars) + if features in ('all', ['all']) and data is not None + else features + ) + features = lowered(features) if features is not None else [] + return features
+ + + +
+[docs] +def parse_keys( + keys, default_coords=None, default_dims=None, default_features=None +): + """Return set of features and slices for all dimensions contained in + dataset that can be passed to isel and transposed to standard dimension + order. If keys is empty then we just want to return the coordinate + data, so features will be set to just the coordinate names.""" + + keys = keys if isinstance(keys, tuple) else (keys,) + has_feats = is_type_of(keys[0], str) + + # return just coords if an empty feature set is requested + just_coords = ( + isinstance(keys[0], (tuple, list, np.ndarray)) and len(keys[0]) == 0 + ) + + if just_coords: + features = list(default_coords) + elif has_feats: + features = lowered(keys[0]) if keys[0] != 'all' else default_features + else: + features = [] + + if len(features) > 0: + dim_keys = () if len(keys) == 1 else keys[1:] + else: + dim_keys = keys + + dim_keys = parse_ellipsis(dim_keys, dim_num=len(default_dims)) + ordd = ordered_dims(default_dims) + + if len(dim_keys) > len(default_dims): + msg = ( + 'Received keys = %s which are incompatible with the ' + 'dimensions = %s. If trying to access features by integer ' + 'index instead use feature names.' + ) + logger.error(msg, keys, ordd) + raise ValueError(msg % (str(keys), ordd)) + + if len(features) > 0 and len(dim_keys) > 0: + msg = ( + 'Received keys = %s which includes both features and ' + 'dimension indexing. The correct access pattern is ' + 'ds[features][indices]' + ) + logger.error(msg, keys) + raise ValueError(msg % str(keys)) + + return features, dict(zip(ordd, dim_keys))
+ + + +
+[docs] +def parse_to_list(features=None, data=None): + """Parse features and return as a list, even if features is a string.""" + features = ( + np.array( + list(features) + if isinstance(features, (set, tuple)) + else features + if isinstance(features, list) + else [features] + ) + .flatten() + .tolist() + ) + return parse_features(features=features, data=data)
+ + + +
+[docs] +def parse_ellipsis(vals, dim_num): + """ + Replace ellipsis with N slices where N is dim_num - len(vals) + 1 + + Parameters + ---------- + vals : list | tuple + Entries that will be used to index an array with dim_num dimensions. + dim_num : int + Number of dimensions of array that will be indexed with given vals. + """ + new_vals = [] + for v in vals: + if v is Ellipsis: + needed = dim_num - len(vals) + 1 + new_vals.extend([slice(None)] * needed) + else: + new_vals.append(v) + return new_vals
+ + + +
+[docs] +def contains_ellipsis(vals): + """Check if vals contain an ellipse. This is used to correctly parse keys + for ``Sup3rX.__getitem__``""" + return vals is Ellipsis or ( + isinstance(vals, (tuple, list)) and any(v is Ellipsis for v in vals) + )
+ + + +
+[docs] +def is_type_of(vals, vtype): + """Check if vals is an instance of type or group of that type.""" + return isinstance(vals, vtype) or ( + isinstance(vals, (set, tuple, list)) + and all(isinstance(v, vtype) for v in vals) + )
+ + + +def _get_strings(vals): + vals = [vals] if isinstance(vals, str) else vals + return [v for v in vals if is_type_of(v, str)] + + +def _lowered(features): + return ( + features.lower() + if isinstance(features, str) + else [f.lower() if isinstance(f, str) else f for f in features] + ) + + +
+[docs] +def lowered(features): + """Return a lower case version of the given str or list of strings. Used to + standardize storage and lookup of features.""" + + feats = _lowered(features) + if _get_strings(features) != _get_strings(feats): + msg = ( + f'Received some upper case features: {_get_strings(features)}. ' + f'Using {_get_strings(feats)} instead.' + ) + logger.warning(msg) + warn(msg) + return feats
+ + + +
+[docs] +def ordered_dims(dims: Tuple): + """Return the order of dims that follows the ordering of Dimension.order() + for the common dim names. e.g dims = ('time', 'south_north', 'dummy', + 'west_east') will return ('south_north', 'west_east', 'time', + 'dummy').""" + standard = [dim for dim in Dimension.order() if dim in dims] + non_standard = [dim for dim in dims if dim not in standard] + if Dimension.VARIABLE in standard: + return tuple(standard[:-1] + non_standard + standard[-1:]) + return tuple(standard + non_standard)
+ + + +
+[docs] +def ordered_array(data: xr.DataArray): + """Transpose arrays so they have a (space, time, ...) or (space, time, + ..., feature) ordering. + + Parameters + ---------- + data : xr.DataArray + xr.DataArray with `.dims` attribute listing all contained dimensions + """ + return data.transpose(*ordered_dims(data.dims))
+ + + +
+[docs] +def dims_array_tuple(arr): + """Return a tuple of (dims, array) with dims equal to the ordered slice + of Dimension.order() with the same len as arr.shape. This is used to set + xr.Dataset entries. e.g. dset[var] = (dims, array)""" + if len(arr.shape) > 1: + arr = (Dimension.order()[1 : len(arr.shape) + 1], arr) + return arr
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/qa/qa.html b/_modules/sup3r/qa/qa.html new file mode 100644 index 000000000..e65418b57 --- /dev/null +++ b/_modules/sup3r/qa/qa.html @@ -0,0 +1,1171 @@ + + + + + + + + + + sup3r.qa.qa — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.qa.qa

+"""sup3r QA module.
+
+TODO: Good initial refactor but can do more cleaning here. Should use Loaders
+and Sup3rX.unflatten() method (for H5) to make things more agnostic to dim
+ordering.
+"""
+
+import logging
+import os
+
+import numpy as np
+from rex import Resource
+from rex.utilities.fun_utils import get_fun_call_str
+
+from sup3r.bias.utilities import bias_correct_feature
+from sup3r.postprocessing import OUTPUT_ATTRS, RexOutputs
+from sup3r.preprocessing.derivers import Deriver
+from sup3r.preprocessing.derivers.utilities import parse_feature
+from sup3r.preprocessing.utilities import (
+    get_input_handler_class,
+    get_source_type,
+    lowered,
+)
+from sup3r.utilities import ModuleName
+from sup3r.utilities.cli import BaseCLI
+from sup3r.utilities.utilities import (
+    spatial_coarsening,
+    temporal_coarsening,
+    xr_open_mfdataset,
+)
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class Sup3rQa: + """Class for doing QA on sup3r forward pass outputs. + + Note + ---- + This only works if the sup3r forward pass output can be reshaped into a 2D + raster dataset (e.g. no sparsifying of the meta data). + """ + + def __init__( + self, + source_file_paths, + out_file_path, + s_enhance, + t_enhance, + temporal_coarsening_method, + features=None, + output_names=None, + input_handler_name=None, + input_handler_kwargs=None, + qa_fp=None, + bias_correct_method=None, + bias_correct_kwargs=None, + save_sources=True, + ): + """ + Parameters + ---------- + source_file_paths : list | str + A list of low-resolution source files to extract raster data from. + Each file must have the same number of timesteps. Can also pass a + string with a unix-style file path which will be passed through + glob.glob + out_file_path : str + A single sup3r-resolved output file (either .nc or .h5) with + high-resolution data corresponding to the + source_file_paths * s_enhance * t_enhance + s_enhance : int + Factor by which the Sup3rGan model will enhance the spatial + dimensions of low resolution data + t_enhance : int + Factor by which the Sup3rGan model will enhance temporal dimension + of low resolution data + temporal_coarsening_method : str | list + [subsample, average, total, min, max] + Subsample will take every t_enhance-th time step, average will + average over t_enhance time steps, total will sum over t_enhance + time steps. This can also be a list of method names corresponding + to the list of features. + features : str | list | None + Explicit list of features to validate. Can be a single feature str, + list of string feature names, or None for all features found in the + out_file_path. + output_names : str | list + Optional output file dataset names corresponding to the features + list input + input_handler_name : str | None + data handler class to use for input data. Provide a string name to + match a class in sup3r.preprocessing.data_handlers. If None the + correct handler will be guessed based on file type. + input_handler_kwargs : dict + Keyword arguments for `input_handler`. See :class:`Rasterizer` + class for argument details. + qa_fp : str | None + Optional filepath to output QA file when you call Sup3rQa.run() + (only .h5 is supported) + bias_correct_method : str | None + Optional bias correction function name that can be imported from + the sup3r.bias.bias_transforms module. This will transform the + source data according to some predefined bias correction + transformation along with the bias_correct_kwargs. As the first + argument, this method must receive a generic numpy array of data to + be bias corrected + bias_correct_kwargs : dict | None + Optional namespace of kwargs to provide to bias_correct_method. + If this is provided, it must be a dictionary where each key is a + feature name and each value is a dictionary of kwargs to correct + that feature. You can bias correct only certain input features by + only including those feature names in this dict. + save_sources : bool + Flag to save re-coarsened synthetic data and true low-res data to + qa_fp in addition to the error dataset + """ + + logger.info('Initializing Sup3rQa and retrieving source data...') + + self.s_enhance = s_enhance + self.t_enhance = t_enhance + self._t_meth = temporal_coarsening_method + self._out_fp = out_file_path + self._features = ( + features if isinstance(features, (list, tuple)) else [features] + ) + self._out_names = ( + output_names + if isinstance(output_names, (list, tuple)) + else [output_names] + ) + self.qa_fp = qa_fp + self.save_sources = save_sources + self.output_handler = ( + xr_open_mfdataset(self._out_fp) + if self.output_type == 'nc' + else Resource(self._out_fp) + ) + + self.bias_correct_method = bias_correct_method + self.bias_correct_kwargs = ( + {} + if bias_correct_kwargs is None + else {k.lower(): v for k, v in bias_correct_kwargs.items()} + ) + self.input_handler_kwargs = input_handler_kwargs or {} + + HandlerClass = get_input_handler_class(input_handler_name) + self.input_handler = self.bias_correct_input_handler( + HandlerClass(source_file_paths, **self.input_handler_kwargs) + ) + self.meta = self.input_handler.data.meta + self.time_index = self.input_handler.time_index + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + if exc_type is not None: + raise + +
+[docs] + def close(self): + """Close any open file handlers""" + self.output_handler.close()
+ + + @property + def features(self): + """Get a list of feature names from the output file, excluding meta and + time index datasets + + Returns + ------- + list + """ + # all lower case + ignore = ('meta', 'time_index', 'gids') + + if self._features is None or self._features == [None]: + if self.output_type == 'nc': + features = list(self.output_handler.data_vars) + elif self.output_type == 'h5': + features = self.output_handler.dsets + features = [f for f in features if f.lower() not in ignore] + + elif isinstance(self._features, (list, tuple)): + features = self._features + + return features + + @property + def output_names(self): + """Get a list of output dataset names corresponding to the features + list + """ + if self._out_names is None or self._out_names == [None]: + return self.features + return self._out_names + + @property + def output_type(self): + """Get output data type + + Returns + ------- + output_type + e.g. 'nc' or 'h5' + """ + ftype = get_source_type(self._out_fp) + if ftype not in ('nc', 'h5'): + msg = 'Did not recognize output file type: {}'.format(self._out_fp) + logger.error(msg) + raise TypeError(msg) + return ftype + +
+[docs] + def bias_correct_input_handler(self, input_handler): + """Apply bias correction to all source features which have bias + correction data and return :class:`Deriver` instance to use for + derivations of features to match output features. + + (1) Check if we need to derive any features included in the + bias_correct_kwargs. + (2) Derive these features using the input_handler.derive method, and + update the stored data. + (3) Apply bias correction to all the features in the + bias_correct_kwargs + (4) Derive the features required for validation from the bias corrected + data and update the stored data + (5) Return the updated input_handler, now a :class:`Deriver` object. + """ + need_derive = list( + set(lowered(self.bias_correct_kwargs)) + - set(input_handler.features) + ) + msg = ( + f'Features {need_derive} need to be derived prior to bias ' + 'correction, but the input_handler has no derive method. ' + 'Request an appropriate input_handler with ' + 'input_handler_name=DataHandlerName.' + ) + assert len(need_derive) == 0 or hasattr(input_handler, 'derive'), msg + for f in need_derive: + input_handler.data[f] = input_handler.derive(f) + bc_feats = list( + set(input_handler.features).intersection( + set(lowered(self.bias_correct_kwargs.keys())) + ) + ) + for f in bc_feats: + input_handler.data[f] = bias_correct_feature( + f, + input_handler, + self.bias_correct_method, + self.bias_correct_kwargs, + ) + + return ( + input_handler + if self.features in input_handler + else Deriver( + input_handler.data, + features=self.features, + FeatureRegistry=getattr( + input_handler, 'FEATURE_REGISTRY', None + ), + ) + )
+ + +
+[docs] + def get_dset_out(self, name): + """Get an output dataset from the forward pass output file. + + TODO: Make this dim order agnostic. If we didnt have the h5 condition + we could just do transpose('south_north', 'west_east', 'time') + + Parameters + ---------- + name : str + Name of the output dataset to retrieve. Must be found in the + features property and the forward pass output file. + + Returns + ------- + out : np.ndarray + A copy of the high-resolution output data as a numpy + array of shape (spatial_1, spatial_2, temporal) + """ + + logger.debug('Getting sup3r output dataset "{}"'.format(name)) + data = self.output_handler[name] + if self.output_type == 'nc': + data = data.values + elif self.output_type == 'h5': + shape = ( + len(self.input_handler.time_index) * self.t_enhance, + int(self.input_handler.shape[0] * self.s_enhance), + int(self.input_handler.shape[1] * self.s_enhance), + ) + data = data.reshape(shape) + # data always needs to be converted from (t, s1, s2) -> (s1, s2, t) + data = np.transpose(data, axes=(1, 2, 0)) + + return np.asarray(data)
+ + +
+[docs] + def coarsen_data(self, idf, feature, data): + """Re-coarsen a high-resolution synthetic output dataset + + Parameters + ---------- + idf : int + Feature index + feature : str + Feature name + data : Union[np.ndarray, da.core.Array] + A copy of the high-resolution output data as a numpy + array of shape (spatial_1, spatial_2, temporal) + + Returns + ------- + data : Union[np.ndarray, da.core.Array] + A spatiotemporally coarsened copy of the input dataset, still with + shape (spatial_1, spatial_2, temporal) + """ + t_meth = ( + self._t_meth + if isinstance(self._t_meth, str) + else self._t_meth[idf] + ) + + logger.info( + f'Coarsening feature "{feature}" with {self.s_enhance}x ' + f'spatial averaging and "{t_meth}" {self.t_enhance}x ' + 'temporal averaging' + ) + + data = spatial_coarsening( + data, s_enhance=self.s_enhance, obs_axis=False + ) + + # t_coarse needs shape to be 5D: (obs, s1, s2, t, f) + data = np.expand_dims(data, axis=0) + data = np.expand_dims(data, axis=4) + data = temporal_coarsening( + data, t_enhance=self.t_enhance, method=t_meth + ) + data = data.squeeze(axis=(0, 4)) + + return data
+ + +
+[docs] + @classmethod + def get_node_cmd(cls, config): + """Get a CLI call to initialize Sup3rQa and execute the Sup3rQa.run() + method based on an input config + + Parameters + ---------- + config : dict + sup3r QA config with all necessary args and kwargs to + initialize Sup3rQa and execute Sup3rQa.run() + """ + import_str = 'import time;\n' + import_str += 'from gaps import Status;\n' + import_str += 'from rex import init_logger;\n' + import_str += 'from sup3r.qa.qa import Sup3rQa' + + qa_init_str = get_fun_call_str(cls, config) + + log_file = config.get('log_file', None) + log_level = config.get('log_level', 'INFO') + + log_arg_str = f'"sup3r", log_level="{log_level}"' + if log_file is not None: + log_arg_str += f', log_file="{log_file}"' + + cmd = ( + f"python -c '{import_str};\n" + 't0 = time.time();\n' + f'logger = init_logger({log_arg_str});\n' + f'qa = {qa_init_str};\n' + 'qa.run();\n' + 't_elap = time.time() - t0;\n' + ) + + pipeline_step = config.get('pipeline_step') or ModuleName.QA + cmd = BaseCLI.add_status_cmd(config, pipeline_step, cmd) + cmd += ";'\n" + + return cmd.replace('\\', '/')
+ + +
+[docs] + def export(self, qa_fp, data, dset_name, dset_suffix=''): + """Export error dictionary to h5 file. + + Parameters + ---------- + qa_fp : str | None + Optional filepath to output QA file (only .h5 is supported) + data : Union[np.ndarray, da.core.Array] + An array with shape (space1, space2, time) that represents the + re-coarsened synthetic data minus the source true low-res data, or + another dataset of the same shape to be written to disk + dset_name : str + Base dataset name to save data to + dset_suffix : str + Optional suffix to append to dset_name with an underscore before + saving. + """ + + if not os.path.exists(qa_fp): + logger.info('Initializing qa output file: "{}"'.format(qa_fp)) + with RexOutputs(qa_fp, mode='w') as f: + f.meta = self.input_handler.meta + f.time_index = self.input_handler.time_index + + shape = ( + len(self.input_handler.time_index), + len(self.input_handler.meta), + ) + attrs = OUTPUT_ATTRS.get(parse_feature(dset_name).basename, {}) + + # dont scale the re-coarsened data or diffs + attrs['scale_factor'] = 1 + attrs['dtype'] = 'float32' + + if dset_suffix: + dset_name = dset_name + '_' + dset_suffix + + logger.info('Adding dataset "{}" to output file.'.format(dset_name)) + + # transpose and flatten to typical h5 (time, space) dimensions + data = np.transpose(np.asarray(data), axes=(2, 0, 1)).reshape(shape) + + RexOutputs.add_dataset( + qa_fp, + dset_name, + data, + dtype=attrs['dtype'], + chunks=attrs.get('chunks', None), + attrs=attrs, + )
+ + +
+[docs] + def run(self): + """Go through all datasets and get the error for the re-coarsened + synthetic minus the true low-res source data. + + Returns + ------- + errors : dict + Dictionary of errors, where keys are the feature names, and each + value is an array with shape (space1, space2, time) that represents + the re-coarsened synthetic data minus the source true low-res data + """ + + errors = {} + ziter = zip(self.features, self.output_names) + for idf, (feature, dset_out) in enumerate(ziter): + logger.info( + 'Running QA on dataset {} of {} for "{}"'.format( + idf + 1, len(self.features), feature + ) + ) + data_syn = self.get_dset_out(feature) + data_syn = self.coarsen_data(idf, feature, data_syn) + data_true = self.input_handler[feature][...] + + if data_syn.shape != data_true.shape: + msg = ( + 'Sup3rQa failed while trying to inspect the "{}" feature. ' + 'The source low-res data had shape {} while the ' + 're-coarsened synthetic data had shape {}.'.format( + feature, data_true.shape, data_syn.shape + ) + ) + logger.error(msg) + raise RuntimeError(msg) + + feature_diff = data_syn - data_true + errors[feature] = feature_diff + + if self.qa_fp is not None: + self.export(self.qa_fp, feature_diff, dset_out, 'error') + if self.save_sources: + self.export(self.qa_fp, data_syn, dset_out, 'synthetic') + self.export(self.qa_fp, data_true, dset_out, 'true') + + logger.info('Finished Sup3rQa run method.') + + return errors
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/qa/utilities.html b/_modules/sup3r/qa/utilities.html new file mode 100644 index 000000000..c7e9040e6 --- /dev/null +++ b/_modules/sup3r/qa/utilities.html @@ -0,0 +1,1071 @@ + + + + + + + + + + sup3r.qa.utilities — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.qa.utilities

+"""Utilities used for QA"""
+import logging
+
+import numpy as np
+from scipy.interpolate import interp1d
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +def tke_frequency_spectrum(u, v, f_range=None): + """Kinetic Energy Spectrum. Gives the portion of kinetic energy + associated with each frequency. + + Parameters + ---------- + u: ndarray + (lat, lon) + U component of wind + v : ndarray + (lat, lon) + V component of wind + f_range : list | None + List with min and max frequency. When comparing spectra for different + domains this needs to be tailored to the specific domain. e.g. f = + [1/max_time, ..., 1/min_time] If this is not specified f with be + set to [0, ..., len(y)] where y is the fft output. + + Returns + ------- + ndarray + 1D array of amplitudes corresponding to the portion of total energy + with a given frequency + """ + v_f = np.fft.fftn(v.reshape((-1, v.shape[-1]))) + u_f = np.fft.fftn(u.reshape((-1, u.shape[-1]))) + E_f = np.abs(v_f)**2 + np.abs(u_f)**2 + E_f = np.mean(E_f, axis=0) + if f_range is None: + f = np.arange(len(E_f)) + else: + f = np.linspace(f_range[0], f_range[1], len(E_f)) + E_f = f**2 * E_f + n_steps = E_f.shape[0] // 2 + E_f_a = E_f[:n_steps] + E_f_b = E_f[-n_steps:][::-1] + E_f = E_f_a + E_f_b + return f[:n_steps], E_f
+ + + +
+[docs] +def frequency_spectrum(var, f_range=None): + """Frequency Spectrum. Gives the portion of the variable + associated with each frequency. + + Parameters + ---------- + var: ndarray + (lat, lon, temporal) + f_range : list | None + List with min and max frequency. When comparing spectra for different + domains this needs to be tailored to the specific domain. e.g. f = + [1/max_time, ..., 1/min_time] If this is not specified f with be + set to [0, ..., len(y)] where y is the fft output. + + Returns + ------- + ndarray + Array of frequencies corresponding to energy amplitudes + ndarray + 1D array of amplitudes corresponding to the portion of the variable + with a given frequency + """ + var_f = np.fft.fftn(var.reshape((-1, var.shape[-1]))) + E_f = np.abs(var_f)**2 + E_f = np.mean(E_f, axis=0) + if f_range is None: + f = np.arange(len(E_f)) + else: + f = np.linspace(f_range[0], f_range[1], len(E_f)) + E_f = f**2 * E_f + n_steps = E_f.shape[0] // 2 + E_f_a = E_f[:n_steps] + E_f_b = E_f[-n_steps:][::-1] + E_f = E_f_a + E_f_b + return f[:n_steps], E_f
+ + + +
+[docs] +def tke_wavenumber_spectrum(u, v, x_range=None, axis=0): + """Turbulent Kinetic Energy Spectrum. Gives the portion of kinetic energy + associated with each wavenumber. + + Parameters + ---------- + u: ndarray + (lat, lon) + U component of wind + v : ndarray + (lat, lon) + V component of wind + x_range : list | None + List with min and max wavenumber. When comparing spectra for different + domains this needs to be tailored to the specific domain. e.g. k = + [1/max_length, ..., 1/min_length] If this is not specified k with be + set to [0, ..., len(y)] where y is the fft output. + axis : int + Axis to average over to get a 1D wind field. If axis=0 this returns + the zonal energy spectrum + + Returns + ------- + ndarray + Array of wavenumbers corresponding to energy amplitudes + ndarray + 1D array of amplitudes corresponding to the portion of total energy + with a given wavenumber + """ + u_k = np.fft.fftn(u) + v_k = np.fft.fftn(v) + E_k = np.mean(np.abs(v_k)**2 + np.abs(u_k)**2, axis=axis) + if x_range is None: + k = np.arange(len(E_k)) + else: + k = np.linspace(x_range[0], x_range[1], len(E_k)) + n_steps = len(k) // 2 + E_k = k**2 * E_k + E_k_a = E_k[1:n_steps + 1] + E_k_b = E_k[-n_steps:][::-1] + E_k = E_k_a + E_k_b + return k[:n_steps], E_k
+ + + +
+[docs] +def wavenumber_spectrum(var, x_range=None, axis=0): + """Wavenumber Spectrum. Gives the portion of the given variable + associated with each wavenumber. + + Parameters + ---------- + var: ndarray + (lat, lon) + x_range : list | None + List with min and max wavenumber. When comparing spectra for different + domains this needs to be tailored to the specific domain. e.g. k = + [1/max_length, ..., 1/min_length] If this is not specified k with be + set to [0, ..., len(y)] where y is the fft output. + axis : int + Axis to average over to get a 1D field. If axis=0 this returns + the zonal spectrum + + Returns + ------- + ndarray + Array of wavenumbers corresponding to amplitudes + ndarray + 1D array of amplitudes corresponding to the portion of the given + variable with a given wavenumber + """ + var_k = np.fft.fftn(var) + E_k = np.mean(np.abs(var_k)**2, axis=axis) + if x_range is None: + k = np.arange(len(E_k)) + else: + k = np.linspace(x_range[0], x_range[1], len(E_k)) + n_steps = len(k) // 2 + E_k = k**2 * E_k + E_k_a = E_k[1:n_steps + 1] + E_k_b = E_k[-n_steps:][::-1] + E_k = E_k_a + E_k_b + return k[:n_steps], E_k
+ + + +
+[docs] +def direct_dist(var, bins=40, range=None, diff_max=None, scale=1, + percentile=99.9, interpolate=False, period=None): + """Returns the direct distribution for the given variable. + + Parameters + ---------- + var: ndarray + (lat, lon, temporal) + bins : int + Number of bins for the direct pdf. + range : tuple | None + Optional min/max range for the direct pdf. + diff_max : float + Max value to keep for given variable + scale : int + Factor to scale the distribution by. This is used so that distributions + from data with different resolutions can be compared. For instance, if + this is calculating a vorticity distribution from data with a spatial + resolution of 4km then the distribution needs to be scaled by 4km to + compare to another scaled vorticity distribution with a different + resolution. + percentile : float + Percentile to use to determine the maximum allowable value in the + distribution. e.g. percentile=99 eliminates values above the 99th + percentile from the histogram. + interpolate : bool + Whether to interpolate over histogram counts. e.g. if a bin has + count = 0 and surrounding bins have count > 0 the bin with count = 0 + will have an interpolated value. + period : float | None + If variable is periodic this gives that period. e.g. If the variable + is winddirection the period is 360 degrees and we need to account for + 0 and 360 being close. + + Returns + ------- + ndarray + var at bin centers + ndarray + var value counts + float + Normalization factor + """ + + if period is not None: + diffs = (var + period) % period + diffs /= scale + else: + diffs = var / scale + diff_max = diff_max or np.percentile(np.abs(diffs), percentile) + diffs = diffs[(np.abs(diffs) < diff_max)] + norm = np.sqrt(np.mean(diffs**2)) + counts, centers = continuous_dist(diffs, bins=bins, range=range, + interpolate=interpolate) + return centers, counts, norm
+ + + +
+[docs] +def gradient_dist(var, bins=40, range=None, diff_max=None, scale=1, + percentile=99.9, interpolate=False, period=None): + """Returns the gradient distribution for the given variable. + + Parameters + ---------- + var: ndarray + (lat, lon, temporal) + bins : int + Number of bins for the gradient pdf. + range : tuple | None + Optional min/max range for the gradient pdf. + diff_max : float + Max value to keep for gradient + scale : int + Factor to scale the distribution by. This is used so that distributions + from data with different resolutions can be compared. For instance, if + this is calculating a velocity gradient distribution from data with a + spatial resolution of 4km then the distribution needs to be scaled by + 4km to compare to another scaled velocity gradient distribution with a + different resolution. + percentile : float + Percentile to use to determine the maximum allowable value in the + distribution. e.g. percentile=99 eliminates values above the 99th + percentile from the histogram. + interpolate : bool + Whether to interpolate over histogram counts. e.g. if a bin has + count = 0 and surrounding bins have count > 0 the bin with count = 0 + will have an interpolated value. + period : float | None + If variable is periodic this gives that period. e.g. If the variable + is winddirection the period is 360 degrees and we need to account for + 0 and 360 being close. + + Returns + ------- + ndarray + d(var) / dx at bin centers + ndarray + d(var) / dx value counts + float + Normalization factor + """ + diffs = np.diff(var, axis=1).flatten() + if period is not None: + diffs = (diffs + period / 2) % period - period / 2 + diffs /= scale + diff_max = diff_max or np.percentile(np.abs(diffs), percentile) + diffs = diffs[(np.abs(diffs) < diff_max)] + norm = np.sqrt(np.mean(diffs**2)) + counts, centers = continuous_dist(diffs, bins=bins, range=range, + interpolate=interpolate) + return centers, counts, norm
+ + + +
+[docs] +def time_derivative_dist(var, bins=40, range=None, diff_max=None, t_steps=1, + scale=1, percentile=99.9, interpolate=False, + period=None): + """Returns the time derivative distribution for the given variable. + + Parameters + ---------- + var: ndarray + (lat, lon, temporal) + bins : int + Number of bins for the time derivative pdf. + range : tuple | None + Optional min/max range for the time derivative pdf. + diff_max : float + Max value to keep for time derivative + t_steps : int + Number of time steps to use for differences. e.g. If t_steps=1 this + uses var[i + 1] - [i] to compute time derivatives. + scale : int + Factor to scale the distribution by. This is used so that distributions + from data with different resolutions can be compared. For instance, if + this is calculating a time derivative distribution from data with a + temporal resolution of 15min then the distribution needs to be scaled + by 15min to compare to another scaled time derivative distribution with + a different resolution + percentile : float + Percentile to use to determine the maximum allowable value in the + distribution. e.g. percentile=99 eliminates values above the 99th + percentile from the histogram. + interpolate : bool + Whether to interpolate over histogram counts. e.g. if a bin has + count = 0 and surrounding bins have count > 0 the bin with count = 0 + will have an interpolated value. + period : float | None + If variable is periodic this gives that period. e.g. If the variable + is winddirection the period is 360 degrees and we need to account for + 0 and 360 being close. + + Returns + ------- + ndarray + d(var) / dt values at bin centers + ndarray + d(var) / dt value counts + float + Normalization factor + """ + + msg = (f'Received t_steps={t_steps} for time derivative calculation but ' + 'data only has {var.shape[-1]} time steps') + assert t_steps < var.shape[-1], msg + diffs = (var[..., t_steps:] - var[..., :-t_steps]).flatten() + if period is not None: + diffs = (diffs + period / 2) % period - period / 2 + diffs /= scale + diff_max = diff_max or np.percentile(np.abs(diffs), percentile) + diffs = diffs[(np.abs(diffs) < diff_max)] + norm = np.sqrt(np.mean(diffs**2)) + counts, centers = continuous_dist(diffs, bins=bins, range=range, + interpolate=interpolate) + return centers, counts, norm
+ + + +
+[docs] +def continuous_dist(diffs, bins=None, range=None, interpolate=False): + """Get interpolated distribution from histogram + + Parameters + ---------- + diffs : ndarray + Array of values to use to construct distribution + bins : int + Number of bins for the distribution. If None then the number of bins + will be determined from the value range and the smallest difference + between values + range : tuple | None + Optional min/max range for the distribution. + interpolate : bool + Whether to interpolate over histogram counts. e.g. if a bin has + count = 0 and surrounding bins have count > 0 the bin with count = 0 + will have an interpolated value. + + Returns + ------- + ndarray + distribution value counts + ndarray + distribution values at bin centers + """ + + if bins is None: + dx = np.abs(np.diff(diffs)) + dx = dx[dx > 0] + dx = np.mean(dx) + bins = int((np.max(diffs) - np.min(diffs)) / dx) + logger.debug(f'Using n_bins={bins} to compute distribution') + counts, edges = np.histogram(diffs, bins=bins, range=range) + centers = edges[:-1] + (np.diff(edges) / 2) + if interpolate: + indices = np.where(counts > 0) + y = counts[indices] + x = centers[indices] + if len(x) > 1: + interp = interp1d(x, y, bounds_error=False, fill_value=0) + counts = interp(centers) + counts = counts.astype(float) / counts.sum() + return counts, centers
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/solar/solar.html b/_modules/sup3r/solar/solar.html new file mode 100644 index 000000000..ca9dee351 --- /dev/null +++ b/_modules/sup3r/solar/solar.html @@ -0,0 +1,1411 @@ + + + + + + + + + + sup3r.solar.solar — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.solar.solar

+"""Custom sup3r solar module. This primarily converts GAN output clearsky ratio
+to GHI, DNI, and DHI using NSRDB data and utility modules like DISC
+
+Note that clearsky_ratio is assumed to be clearsky ghi ratio and is calculated
+as daily average GHI / daily average clearsky GHI.
+"""
+
+import logging
+import os
+
+import numpy as np
+import pandas as pd
+from farms.disc import disc
+from farms.utilities import calc_dhi, dark_night
+from rex import MultiTimeResource, Resource
+from rex.utilities.fun_utils import get_fun_call_str
+from scipy.spatial import KDTree
+
+from sup3r.postprocessing import OUTPUT_ATTRS, RexOutputs
+from sup3r.preprocessing.utilities import expand_paths
+from sup3r.utilities import ModuleName
+from sup3r.utilities.cli import BaseCLI
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class Solar: + """Custom sup3r solar module. This primarily converts GAN output clearsky + ratio to GHI, DNI, and DHI using NSRDB data and utility modules like + DISC""" + + def __init__( + self, + sup3r_fps, + nsrdb_fp, + t_slice=slice(None), + tz=-6, + agg_factor=1, + nn_threshold=0.5, + cloud_threshold=0.99, + ): + """ + Parameters + ---------- + sup3r_fps : str | list + Full .h5 filepath(s) to one or more sup3r GAN output .h5 chunk + files containing clearsky_ratio, time_index, and meta. These files + must have the same meta data but can be sequential and ordered + temporal chunks. The data in this file has been rolled by tz + to a local time (assumed that the GAN was trained on local time + solar data) and will be converted back into UTC, so it's wise to + include some padding in the sup3r_fps file list. + nsrdb_fp : str + Filepath to NSRDB .h5 file containing clearsky_ghi, clearsky_dni, + clearsky_dhi data. + t_slice : slice + Slicing argument to slice the temporal axis of the sup3r_fps source + data after doing the tz roll to UTC but before returning the + irradiance variables. This can be used to effectively pad the solar + irradiance calculation in UTC time. For example, if sup3r_fps is 3 + files each with 24 hours of data, t_slice can be slice(24, 48) to + only output the middle day of irradiance data, but padded by the + other two days for the UTC output. + tz : int + The timezone offset for the data in sup3r_fps. It is assumed that + the GAN is trained on data in local time and therefore the output + in sup3r_fps should be treated as local time. For example, -6 is + CST which is default for CONUS training data. + agg_factor : int + Spatial aggregation factor for nsrdb-to-GAN-meta e.g. the number of + NSRDB spatial pixels to average for a single sup3r GAN output site. + nn_threshold : float + The KDTree nearest neighbor threshold that determines how far the + sup3r GAN output data has to be from the NSRDB source data to get + irradiance=0. Note that is value is in decimal degrees which is a + very approximate way to determine real distance. + cloud_threshold : float + Clearsky ratio threshold below which the data is considered cloudy + and DNI is calculated using DISC. + """ + + self.t_slice = t_slice + self.agg_factor = agg_factor + self.nn_threshold = nn_threshold + self.cloud_threshold = cloud_threshold + self.tz = tz + self._nsrdb_fp = nsrdb_fp + self._sup3r_fps = sup3r_fps + if isinstance(self._sup3r_fps, str): + self._sup3r_fps = [self._sup3r_fps] + + logger.debug( + 'Initializing solar module with sup3r files: {}'.format( + [os.path.basename(fp) for fp in self._sup3r_fps] + ) + ) + logger.debug( + 'Initializing solar module with temporal slice: {}'.format( + self.t_slice + ) + ) + logger.debug( + 'Initializing solar module with NSRDB source fp: {}'.format( + self._nsrdb_fp + ) + ) + + self.gan_data = MultiTimeResource(self._sup3r_fps) + self.nsrdb = Resource(self._nsrdb_fp) + + # cached variables + self._nsrdb_tslice = None + self._idnn = None + self._dist = None + self._sza = None + self._cs_ratio = None + self._ghi = None + self._dni = None + self._dhi = None + + self.preflight() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + if exc_type is not None: + raise + +
+[docs] + def preflight(self): + """Run preflight checks on source data to make sure everything will + work together.""" + assert 'clearsky_ratio' in self.gan_data.dsets + assert 'clearsky_ghi' in self.nsrdb.dsets + assert 'clearsky_dni' in self.nsrdb.dsets + assert 'solar_zenith_angle' in self.nsrdb.dsets + assert 'surface_pressure' in self.nsrdb.dsets + assert isinstance(self.nsrdb_tslice, slice) + + ti_gan = self.gan_data.time_index + delta = pd.Series(ti_gan[1:] - ti_gan[:-1]).mean().total_seconds() + msg = ( + 'Its assumed that the sup3r GAN output solar data will be ' + 'hourly but received time index: {}'.format(ti_gan) + ) + assert delta == 3600, msg
+ + +
+[docs] + def close(self): + """Close all internal file handlers""" + self.gan_data.close() + self.nsrdb.close()
+ + + @property + def idnn(self): + """Get the nearest neighbor meta data indices from the NSRDB data that + correspond to the sup3r GAN data + + Returns + ------- + idnn : Union[np.ndarray, da.core.Array] + 2D array of length (n_sup3r_sites, agg_factor) where the values are + meta data indices from the NSRDB. + """ + if self._idnn is None: + tree = KDTree(self.nsrdb.meta[['latitude', 'longitude']]) + out = tree.query(self.gan_data.meta[['latitude', 'longitude']]) + self._dist, self._idnn = out + if len(self._idnn.shape) == 1: + self._dist = np.expand_dims(self._dist, axis=1) + self._idnn = np.expand_dims(self._idnn, axis=1) + return self._idnn + + @property + def dist(self): + """Get the nearest neighbor distances from the sup3r GAN data sites to + the NSRDB nearest neighbors. + + Returns + ------- + dist : Union[np.ndarray, da.core.Array] + 2D array of length (n_sup3r_sites, agg_factor) where the values are + decimal degree distances from the sup3r sites to the nsrdb nearest + neighbors. + """ + if self._dist is None: + _ = self.idnn + return self._dist + + @property + def time_index(self): + """Time index for the sup3r GAN output data but sliced by t_slice + + Returns + ------- + pd.DatetimeIndex + """ + return self.gan_data.time_index[self.t_slice] + + @property + def out_of_bounds(self): + """Get a boolean mask for the sup3r data that is out of bounds (too far + from the NSRDB data). + + Returns + ------- + out_of_bounds : Union[np.ndarray, da.core.Array] + 1D boolean array with length == number of sup3r GAN sites. True if + the site is too far from the NSRDB. + """ + return (self.dist > self.nn_threshold).any(axis=1) + + @property + def nsrdb_tslice(self): + """Get the time slice of the NSRDB data corresponding to the sup3r GAN + output.""" + + if self._nsrdb_tslice is None: + doy_nsrdb = self.nsrdb.time_index.day_of_year + doy_gan = self.time_index.day_of_year + mask = doy_nsrdb.isin(doy_gan) + + if mask.sum() == 0: + msg = ( + 'Time index intersection of the NSRDB time index and ' + 'sup3r GAN output has only {} common timesteps! ' + 'Something went wrong.\nNSRDB time index: \n{}\nSup3r ' + 'GAN output time index:\n{}'.format( + mask.sum(), self.nsrdb.time_index, self.time_index + ) + ) + logger.error(msg) + raise RuntimeError(msg) + + ilocs = np.where(mask)[0] + t0, t1 = ilocs[0], ilocs[-1] + 1 + + ti_nsrdb = self.nsrdb.time_index + delta = ( + pd.Series(ti_nsrdb[1:] - ti_nsrdb[:-1])[1:] + .mean() + .total_seconds() + ) + + step = int(3600 / delta) + self._nsrdb_tslice = slice(t0, t1, step) + + logger.debug( + 'Found nsrdb_tslice {} with corresponding ' + 'time index:\n\t{}'.format( + self._nsrdb_tslice, + self.nsrdb.time_index[self._nsrdb_tslice], + ) + ) + + return self._nsrdb_tslice + + @property + def clearsky_ratio(self): + """Get the clearsky ghi ratio data from the GAN output, rolled from tz + to UTC. + + Returns + ------- + clearsky_ratio : Union[np.ndarray, da.core.Array] + 2D array with shape (time, sites) in UTC. + """ + if self._cs_ratio is None: + self._cs_ratio = self.gan_data['clearsky_ratio'] + self._cs_ratio = np.roll(self._cs_ratio, -self.tz, axis=0) + + # if tz is negative, roll to utc is positive, and the beginning of + # the dataset is rolled over from the end and must be backfilled, + # otherwise you can get seams + self._cs_ratio[: -self.tz, :] = self._cs_ratio[-self.tz, :] + + # apply temporal slicing of source data, see docstring on t_slice + # for more info + self._cs_ratio = self._cs_ratio[self.t_slice, :] + + return self._cs_ratio + + @property + def solar_zenith_angle(self): + """Get the solar zenith angle (degrees) + + Returns + ------- + solar_zenith_angle : Union[np.ndarray, da.core.Array] + 2D array with shape (time, sites) in UTC. + """ + if self._sza is None: + self._sza = self.get_nsrdb_data('solar_zenith_angle') + return self._sza + + @property + def ghi(self): + """Get the ghi (W/m2) based on the GAN output clearsky ratio + + clearsky_ghi in UTC. + + Returns + ------- + ghi : Union[np.ndarray, da.core.Array] + 2D array with shape (time, sites) in UTC. + """ + if self._ghi is None: + logger.debug('Calculating GHI.') + self._ghi = ( + self.get_nsrdb_data('clearsky_ghi') * self.clearsky_ratio + ) + self._ghi[:, self.out_of_bounds] = 0 + return self._ghi + + @property + def dni(self): + """Get the dni (W/m2) which is clearsky dni (from NSRDB) when the GAN + output is clear (clearsky_ratio is > cloud_threshold), and calculated + from the DISC model when cloudy + + Returns + ------- + dni : Union[np.ndarray, da.core.Array] + 2D array with shape (time, sites) in UTC. + """ + if self._dni is None: + logger.debug('Calculating DNI.') + self._dni = self.get_nsrdb_data('clearsky_dni') + pressure = self.get_nsrdb_data('surface_pressure') + doy = self.time_index.day_of_year.values + cloudy_dni = disc( + self.ghi, self.solar_zenith_angle, doy, pressure=pressure + ) + cloudy_dni = np.minimum(self._dni, cloudy_dni) + self._dni[self.cloud_mask] = cloudy_dni[self.cloud_mask] + self._dni = dark_night(self._dni, self.solar_zenith_angle) + self._dni[:, self.out_of_bounds] = 0 + return self._dni + + @property + def dhi(self): + """Get the dhi (W/m2) which is calculated based on the simple + relationship between GHI, DNI and solar zenith angle. + + Returns + ------- + dhi : Union[np.ndarray, da.core.Array] + 2D array with shape (time, sites) in UTC. + """ + if self._dhi is None: + logger.debug('Calculating DHI.') + self._dhi, self._dni = calc_dhi( + self.dni, self.ghi, self.solar_zenith_angle + ) + self._dhi = dark_night(self._dhi, self.solar_zenith_angle) + self._dhi[:, self.out_of_bounds] = 0 + return self._dhi + + @property + def cloud_mask(self): + """Get the cloud mask (True if cloudy) based on the GAN output clearsky + ratio in UTC. + + Returns + ------- + cloud_mask : Union[np.ndarray, da.core.Array] + 2D array with shape (time, sites) in UTC. + """ + return self.clearsky_ratio < self.cloud_threshold + +
+[docs] + def get_nsrdb_data(self, dset): + """Get an NSRDB dataset with spatial index corresponding to the sup3r + GAN output data, averaged across the agg_factor. + + Parameters + ---------- + dset : str + Name of dataset to retrieve from NSRDB source file + + Returns + ------- + out : Union[np.ndarray, da.core.Array] + Dataset of shape (time, sites) where time and sites correspond to + the same shape as the sup3r GAN output data and if agg_factor > 1 + the sites is an average across multiple NSRDB sites. + """ + + logger.debug('Retrieving "{}" from NSRDB source data.'.format(dset)) + out = None + + for idx in range(self.idnn.shape[1]): + temp = self.nsrdb[dset, self.nsrdb_tslice, self.idnn[:, idx]] + temp = temp.astype(np.float32) + + if out is None: + out = temp + else: + out += temp + + out /= self.idnn.shape[1] + + return out
+ + +
+[docs] + @staticmethod + def get_sup3r_fps(fp_pattern, ignore=None): + """Get a list of file chunks to run in parallel based on a file pattern + + Note + ---- + It's assumed that all source files have the pattern + `sup3r_file_TTTTTT_SSSSSS.h5` where TTTTTT is the zero-padded temporal + chunk index and SSSSSS is the zero-padded spatial chunk index. + + Parameters + ---------- + fp_pattern : str | list + Unix-style file*pattern that matches a set of spatiotemporally + chunked sup3r forward pass output files. + ignore : str | None + Ignore all files that have this string in their filenames. + + Returns + ------- + fp_sets : list + List of file sets where each file set is 3 temporally sequential + files over the same spatial chunk. Each file set overlaps with its + neighbor such that fp_sets[0][-1] == fp_sets[1][0] (this is so + Solar can do temporal padding when rolling from GAN local time + output to UTC). + t_slices : list + List of t_slice arguments corresponding to fp_sets to pass to + Solar class initialization that will slice and reduce the + overlapping time axis when Solar outputs irradiance data. + temporal_ids : list + List of temporal id strings TTTTTT corresponding to the fp_sets + spatial_ids : list + List of spatial id strings SSSSSS corresponding to the fp_sets + target_fps : list + List of actual target files corresponding to fp_sets, so for + example the file set fp_sets[10] sliced by t_slices[10] is designed + to process target_fps[10] + """ + + all_fps = [fp for fp in expand_paths(fp_pattern) if fp.endswith('.h5')] + if ignore is not None: + all_fps = [ + fp for fp in all_fps if ignore not in os.path.basename(fp) + ] + + all_fps = sorted(all_fps) + + source_dir = os.path.dirname(all_fps[0]) + source_fn_base = os.path.basename(all_fps[0]).replace('.h5', '') + source_fn_base = '_'.join(source_fn_base.split('_')[:-2]) + + all_id_spatial = [ + fp.replace('.h5', '').split('_')[-1] for fp in all_fps + ] + all_id_temporal = [ + fp.replace('.h5', '').split('_')[-2] for fp in all_fps + ] + + all_id_spatial = sorted(set(all_id_spatial)) + all_id_temporal = sorted(set(all_id_temporal)) + + fp_sets = [] + t_slices = [] + temporal_ids = [] + spatial_ids = [] + target_fps = [] + for idt, id_temporal in enumerate(all_id_temporal): + start = 0 + single_chunk_id_temps = [id_temporal] + + if idt > 0: + start = 24 + single_chunk_id_temps.insert(0, all_id_temporal[idt - 1]) + + if idt < (len(all_id_temporal) - 1): + single_chunk_id_temps.append(all_id_temporal[idt + 1]) + + for id_spatial in all_id_spatial: + single_fp_set = [] + for t_str in single_chunk_id_temps: + fp = os.path.join(source_dir, source_fn_base) + fp += f'_{t_str}_{id_spatial}.h5' + single_fp_set.append(fp) + + fp_target = os.path.join(source_dir, source_fn_base) + fp_target += f'_{id_temporal}_{id_spatial}.h5' + + fp_sets.append(single_fp_set) + t_slices.append(slice(start, start + 24)) + temporal_ids.append(id_temporal) + spatial_ids.append(id_spatial) + target_fps.append(fp_target) + + return fp_sets, t_slices, temporal_ids, spatial_ids, target_fps
+ + +
+[docs] + @classmethod + def get_node_cmd(cls, config): + """Get a CLI call to run Solar.run_temporal_chunk() on a single + node based on an input config. + + Parameters + ---------- + config : dict + sup3r solar config with all necessary args and kwargs to + run Solar.run_temporal_chunk() on a single node. + """ + import_str = 'import time;\n' + import_str += 'from gaps import Status;\n' + import_str += 'from rex import init_logger;\n' + import_str += f'from sup3r.solar import {cls.__name__}' + + fun_str = get_fun_call_str(cls.run_temporal_chunks, config) + + log_file = config.get('log_file', None) + log_level = config.get('log_level', 'INFO') + log_arg_str = f'"sup3r", log_level="{log_level}"' + if log_file is not None: + log_arg_str += f', log_file="{log_file}"' + + cmd = ( + f"python -c '{import_str};\n" + 't0 = time.time();\n' + f'logger = init_logger({log_arg_str});\n' + f'{fun_str};\n' + 't_elap = time.time() - t0;\n' + ) + + pipeline_step = config.get('pipeline_step') or ModuleName.SOLAR + cmd = BaseCLI.add_status_cmd(config, pipeline_step, cmd) + cmd += ";'\n" + + return cmd.replace('\\', '/')
+ + +
+[docs] + def write(self, fp_out, features=('ghi', 'dni', 'dhi')): + """Write irradiance datasets (ghi, dni, dhi) to output h5 file. + + Parameters + ---------- + fp_out : str + Filepath to an output h5 file to write irradiance variables to. + Parent directory will be created if it does not exist. + features : list | tuple + List of features to write to disk. These have to be attributes of + the Solar class (ghi, dni, dhi). + """ + + if not os.path.exists(os.path.dirname(fp_out)): + os.makedirs(os.path.dirname(fp_out), exist_ok=True) + + with RexOutputs(fp_out, 'w') as fh: + fh.meta = self.gan_data.meta + fh.time_index = self.time_index + + for feature in features: + attrs = OUTPUT_ATTRS[feature] + arr = getattr(self, feature, None) + if arr is None: + msg = ( + 'Feature "{}" was not available from Solar ' + 'module class.'.format(feature) + ) + logger.error(msg) + raise AttributeError(msg) + + fh.add_dataset( + fp_out, + feature, + arr, + dtype=attrs['dtype'], + attrs=attrs, + chunks=attrs['chunks'], + ) + logger.info(f'Added "{feature}" to output file.') + run_attrs = self.gan_data.h5[self._sup3r_fps[0]].global_attrs + run_attrs['nsrdb_source'] = self._nsrdb_fp + fh.run_attrs = run_attrs + + logger.info(f'Finished writing file: {fp_out}')
+ + +
+[docs] + @classmethod + def run_temporal_chunks( + cls, + fp_pattern, + nsrdb_fp, + fp_out_suffix='irradiance', + tz=-6, + agg_factor=1, + nn_threshold=0.5, + cloud_threshold=0.99, + features=('ghi', 'dni', 'dhi'), + temporal_ids=None, + ): + """Run the solar module on all spatial chunks for each temporal + chunk corresponding to the fp_pattern and the given list of + temporal_ids. This typically gets run from the CLI. + + Parameters + ---------- + fp_pattern : str + Unix-style file*pattern that matches a set of spatiotemporally + chunked sup3r forward pass output files. + nsrdb_fp : str + Filepath to NSRDB .h5 file containing clearsky_ghi, clearsky_dni, + clearsky_dhi data. + fp_out_suffix : str + Suffix to add to the input sup3r source files when writing the + processed solar irradiance data to new data files. + tz : int + The timezone offset for the data in sup3r_fps. It is assumed that + the GAN is trained on data in local time and therefore the output + in sup3r_fps should be treated as local time. For example, -6 is + CST which is default for CONUS training data. + agg_factor : int + Spatial aggregation factor for nsrdb-to-GAN-meta e.g. the number of + NSRDB spatial pixels to average for a single sup3r GAN output site. + nn_threshold : float + The KDTree nearest neighbor threshold that determines how far the + sup3r GAN output data has to be from the NSRDB source data to get + irradiance=0. Note that is value is in decimal degrees which is a + very approximate way to determine real distance. + cloud_threshold : float + Clearsky ratio threshold below which the data is considered cloudy + and DNI is calculated using DISC. + features : list | tuple + List of features to write to disk. These have to be attributes of + the Solar class (ghi, dni, dhi). + temporal_ids : list | None + Lise of zero-padded temporal ids from the file chunks that match + fp_pattern. This input typically gets set from the CLI. If None, + this will run all temporal indices. + """ + if temporal_ids is None: + cls._run_temporal_chunk( + fp_pattern=fp_pattern, + nsrdb_fp=nsrdb_fp, + fp_out_suffix=fp_out_suffix, + tz=tz, + agg_factor=agg_factor, + nn_threshold=nn_threshold, + cloud_threshold=cloud_threshold, + features=features, + temporal_id=temporal_ids, + ) + else: + for temporal_id in temporal_ids: + cls._run_temporal_chunk( + fp_pattern=fp_pattern, + nsrdb_fp=nsrdb_fp, + fp_out_suffix=fp_out_suffix, + tz=tz, + agg_factor=agg_factor, + nn_threshold=nn_threshold, + cloud_threshold=cloud_threshold, + features=features, + temporal_id=temporal_id, + )
+ + + @classmethod + def _run_temporal_chunk( + cls, + fp_pattern, + nsrdb_fp, + fp_out_suffix='irradiance', + tz=-6, + agg_factor=1, + nn_threshold=0.5, + cloud_threshold=0.99, + features=('ghi', 'dni', 'dhi'), + temporal_id=None, + ): + """Run the solar module on all spatial chunks for a single temporal + chunk corresponding to the fp_pattern. This typically gets run from the + CLI. + + See Also + -------- + :meth:`run_temporal_chunks` + """ + + temp = cls.get_sup3r_fps(fp_pattern, ignore=f'_{fp_out_suffix}.h5') + fp_sets, t_slices, temporal_ids, _, target_fps = temp + + if temporal_id is not None: + fp_sets = [ + fp_set + for i, fp_set in enumerate(fp_sets) + if temporal_ids[i] == temporal_id + ] + t_slices = [ + t_slice + for i, t_slice in enumerate(t_slices) + if temporal_ids[i] == temporal_id + ] + target_fps = [ + target_fp + for i, target_fp in enumerate(target_fps) + if temporal_ids[i] == temporal_id + ] + + zip_iter = zip(fp_sets, t_slices, target_fps) + for i, (fp_set, t_slice, fp_target) in enumerate(zip_iter): + fp_out = fp_target.replace('.h5', f'_{fp_out_suffix}.h5') + + if os.path.exists(fp_out): + logger.info('%s already exists. Skipping.', fp_out) + + else: + logger.info( + 'Running temporal index {} out of {}.'.format( + i + 1, len(fp_sets) + ) + ) + + kwargs = { + 't_slice': t_slice, + 'tz': tz, + 'agg_factor': agg_factor, + 'nn_threshold': nn_threshold, + 'cloud_threshold': cloud_threshold, + } + tmp_out = fp_out + '.tmp' + with Solar(fp_set, nsrdb_fp, **kwargs) as solar: + solar.write(tmp_out, features=features) + os.replace(tmp_out, fp_out)
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/solar/solar_cli.html b/_modules/sup3r/solar/solar_cli.html new file mode 100644 index 000000000..dfa50fbe1 --- /dev/null +++ b/_modules/sup3r/solar/solar_cli.html @@ -0,0 +1,832 @@ + + + + + + + + + + sup3r.solar.solar_cli — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.solar.solar_cli

+"""sup3r solar CLI entry points."""
+
+import copy
+import logging
+import os
+
+import click
+import numpy as np
+
+from sup3r import __version__
+from sup3r.solar import Solar
+from sup3r.utilities import ModuleName
+from sup3r.utilities.cli import AVAILABLE_HARDWARE_OPTIONS, BaseCLI
+
+logger = logging.getLogger(__name__)
+
+
+@click.group()
+@click.version_option(version=__version__)
+@click.option(
+    '-v',
+    '--verbose',
+    is_flag=True,
+    help='Flag to turn on debug logging. Default is not verbose.',
+)
+@click.pass_context
+def main(ctx, verbose):
+    """Sup3r Solar Command Line Interface"""
+    ctx.ensure_object(dict)
+    ctx.obj['VERBOSE'] = verbose
+
+
+@main.command()
+@click.option(
+    '--config_file',
+    '-c',
+    required=True,
+    type=click.Path(exists=True),
+    help='sup3r solar configuration .json file.',
+)
+@click.option(
+    '-v',
+    '--verbose',
+    is_flag=True,
+    help='Flag to turn on debug logging. Default is not verbose.',
+)
+@click.pass_context
+def from_config(ctx, config_file, verbose=False, pipeline_step=None):
+    """Run sup3r solar from a config file."""
+    config = BaseCLI.from_config_preflight(
+        ModuleName.SOLAR, ctx, config_file, verbose
+    )
+    exec_kwargs = config.get('execution_control', {})
+    hardware_option = exec_kwargs.pop('option', 'local')
+    log_pattern = config.get('log_pattern', None)
+    fp_pattern = config['fp_pattern']
+    basename = config['job_name']
+    fp_sets, _, temporal_ids, _, _ = Solar.get_sup3r_fps(fp_pattern)
+    temporal_ids = sorted(set(temporal_ids))
+    max_nodes = config.get('max_nodes', len(temporal_ids))
+    max_nodes = min((max_nodes, len(temporal_ids)))
+    logger.info(
+        'Solar module found {} sets of chunked source files to run '
+        'on. Submitting to {} nodes based on the number of temporal '
+        'chunks {} and the requested number of nodes {}'.format(
+            len(fp_sets),
+            max_nodes,
+            len(temporal_ids),
+            config.get('max_nodes', None),
+        )
+    )
+
+    temporal_id_chunks = np.array_split(temporal_ids, max_nodes)
+    for i_node, temporal_ids in enumerate(temporal_id_chunks):
+        node_config = copy.deepcopy(config)
+        node_config['log_file'] = (
+            log_pattern
+            if log_pattern is None
+            else os.path.normpath(log_pattern.format(node_index=i_node))
+        )
+        name = '{}_{}'.format(basename, str(i_node).zfill(6))
+        ctx.obj['NAME'] = name
+        node_config['job_name'] = name
+        node_config['pipeline_step'] = pipeline_step
+
+        node_config['temporal_ids'] = list(temporal_ids)
+        cmd = Solar.get_node_cmd(node_config)
+
+        if hardware_option.lower() in AVAILABLE_HARDWARE_OPTIONS:
+            kickoff_slurm_job(ctx, cmd, pipeline_step, **exec_kwargs)
+        else:
+            kickoff_local_job(ctx, cmd, pipeline_step)
+
+
+
+[docs] +def kickoff_slurm_job( + ctx, + cmd, + pipeline_step=None, + alloc='sup3r', + memory=None, + walltime=4, + feature=None, + stdout_path='./stdout/', +): + """Run sup3r on HPC via SLURM job submission. + + Parameters + ---------- + ctx : click.pass_context + Click context object where ctx.obj is a dictionary + cmd : str + Command to be submitted in SLURM shell script. Example: + 'python -m sup3r.cli forward_pass -c <config_file>' + pipeline_step : str, optional + Name of the pipeline step being run. If ``None``, the + ``pipeline_step`` will be set to the ``module_name``, + mimicking old reV behavior. By default, ``None``. + alloc : str + HPC project (allocation) handle. Example: 'sup3r'. + memory : int + Node memory request in GB. + walltime : float + Node walltime request in hours. + feature : str + Additional flags for SLURM job. Format is "--qos=high" + or "--depend=[state:job_id]". Default is None. + stdout_path : str + Path to print .stdout and .stderr files. + """ + BaseCLI.kickoff_slurm_job( + ModuleName.SOLAR, + ctx, + cmd, + alloc, + memory, + walltime, + feature, + stdout_path, + pipeline_step, + )
+ + + +
+[docs] +def kickoff_local_job(ctx, cmd, pipeline_step=None): + """Run sup3r solar locally. + + Parameters + ---------- + ctx : click.pass_context + Click context object where ctx.obj is a dictionary + cmd : str + Command to be submitted in shell script. Example: + 'python -m sup3r.cli forward_pass -c <config_file>' + pipeline_step : str, optional + Name of the pipeline step being run. If ``None``, the + ``pipeline_step`` will be set to the ``module_name``, + mimicking old reV behavior. By default, ``None``. + """ + BaseCLI.kickoff_local_job(ModuleName.SOLAR, ctx, cmd, pipeline_step)
+ + + +if __name__ == '__main__': + try: + main(obj={}) + except Exception: + logger.exception('Error running sup3r solar CLI') + raise +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/utilities.html b/_modules/sup3r/utilities.html new file mode 100644 index 000000000..8f48150cd --- /dev/null +++ b/_modules/sup3r/utilities.html @@ -0,0 +1,738 @@ + + + + + + + + + + sup3r.utilities — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.utilities

+"""Sup3r utilities"""
+
+import os
+import sys
+from enum import Enum
+
+import cftime
+import dask
+import h5netcdf
+import netCDF4
+import numpy as np
+import pandas as pd
+import phygnn
+import rex
+import sklearn
+import tensorflow as tf
+import xarray
+
+from .._version import __version__
+
+VERSION_RECORD = {
+    'sup3r': __version__,
+    'tensorflow': tf.__version__,
+    'sklearn': sklearn.__version__,
+    'pandas': pd.__version__,
+    'numpy': np.__version__,
+    'nrel-phygnn': phygnn.__version__,
+    'nrel-rex': rex.__version__,
+    'python': sys.version,
+    'xarray': xarray.__version__,
+    'h5netcdf': h5netcdf.__version__,
+    'dask': dask.__version__,
+    'netCDF4': netCDF4.__version__,
+    'cftime': cftime.__version__
+}
+
+
+
+[docs] +class ModuleName(str, Enum): + """A collection of the module names available in sup3r. + Each module name should match the name of the click command + that will be used to invoke its respective cli. As of 5/26/2022, + this means that all commands are lowercase with underscores + replaced by dashes. + """ + + FORWARD_PASS = 'forward-pass' + DATA_EXTRACT = 'data-extract' + DATA_COLLECT = 'data-collect' + QA = 'qa' + SOLAR = 'solar' + STATS = 'stats' + BIAS_CALC = 'bias-calc' + VISUAL_QA = 'visual-qa' + REGRID = 'regrid' + + def __str__(self): + return self.value + + def __format__(self, format_spec): + return str.__format__(self.value, format_spec) + +
+[docs] + @classmethod + def all_names(cls): + """All module names. + + Returns + ------- + set + The set of all module name strings. + """ + # pylint: disable=no-member + return {v.value for v in cls.__members__.values()}
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/utilities/cli.html b/_modules/sup3r/utilities/cli.html new file mode 100644 index 000000000..3b2cdf400 --- /dev/null +++ b/_modules/sup3r/utilities/cli.html @@ -0,0 +1,1058 @@ + + + + + + + + + + sup3r.utilities.cli — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.utilities.cli

+"""Sup3r base CLI class."""
+import logging
+import os
+
+import click
+from gaps import Status
+from gaps.config import load_config
+from rex.utilities.execution import SubprocessManager
+from rex.utilities.hpc import SLURM
+from rex.utilities.loggers import init_mult
+
+from . import ModuleName
+from .utilities import safe_serialize
+
+logger = logging.getLogger(__name__)
+AVAILABLE_HARDWARE_OPTIONS = ('kestrel', 'eagle', 'slurm')
+
+
+
+[docs] +class SlurmManager(SLURM): + """GAPs-compliant SLURM manager""" + +
+[docs] + def check_status_using_job_id(self, job_id): + """Check the status of a job using the HPC queue and job ID. + + Parameters + ---------- + job_id : int + Job integer ID number. + + Returns + ------- + status : str | None + Queue job status string or `None` if not found. + """ + return self.check_status(job_id=job_id)
+
+ + + +
+[docs] +class BaseCLI: + """Base CLI class used to create CLI for modules in ModuleName""" + +
+[docs] + @classmethod + def from_config(cls, module_name, module_class, ctx, config_file, verbose, + pipeline_step=None): + """Run sup3r module from a config file. + + + Parameters + ---------- + module_name : str + Module name string from :class:`sup3r.utilities.ModuleName`. + module_class : Object + Class object used to call get_node_cmd(config). + e.g. Sup3rQa.get_node_cmd(config) + ctx : click.pass_context + Click context object where ctx.obj is a dictionary + config_file : str + Path to config file provided all needed inputs to module_class + verbose : bool + Whether to run in verbose mode. + pipeline_step : str, optional + Name of the pipeline step being run. If ``None``, the + ``pipeline_step`` will be set to the ``module_name``, + mimicking old reV behavior. By default, ``None``. + """ + config = cls.from_config_preflight( + module_name, ctx, config_file, verbose + ) + config['pipeline_step'] = pipeline_step + + exec_kwargs = config.get('execution_control', {}) + hardware_option = exec_kwargs.pop('option', 'local') + + cmd = module_class.get_node_cmd(config) + + if hardware_option.lower() in AVAILABLE_HARDWARE_OPTIONS: + cls.kickoff_slurm_job(module_name, ctx, pipeline_step, cmd, + **exec_kwargs) + else: + cls.kickoff_local_job(module_name, ctx, cmd, pipeline_step)
+ + +
+[docs] + @classmethod + def from_config_preflight(cls, module_name, ctx, config_file, verbose): + """Parse conifg file prior to running sup3r module. + + Parameters + ---------- + module_name : str + Module name string from :class:`sup3r.utilities.ModuleName`. + module_class : Object + Class object used to call get_node_cmd(config). + e.g. Sup3rQa.get_node_cmd(config) + ctx : click.pass_context + Click context object where ctx.obj is a dictionary + config_file : str + Path to config file provided all needed inputs to module_class + verbose : bool + Whether to run in verbose mode. + + Returns + ------- + config : dict + Dictionary corresponding to config_file + """ + cls.check_module_name(module_name) + + ctx.ensure_object(dict) + ctx.obj['VERBOSE'] = verbose + status_dir = os.path.dirname(os.path.abspath(config_file)) + ctx.obj['OUT_DIR'] = status_dir + config = load_config(config_file) + config['status_dir'] = status_dir + log_file = config.get('log_file', None) + log_pattern = config.get('log_pattern', None) + config_verbose = config.get('log_level', 'INFO') + config_verbose = config_verbose == 'DEBUG' + verbose = any([verbose, config_verbose, ctx.obj['VERBOSE']]) + exec_kwargs = config.get('execution_control', {}) + hardware_option = exec_kwargs.get('option', 'local') + + log_dir = log_file or log_pattern + log_dir = log_dir if log_dir is None else os.path.dirname(log_dir) + + init_mult( + f'sup3r_{module_name.replace("-", "_")}', + log_dir, + modules=[__name__, 'sup3r'], + verbose=verbose, + ) + + if log_pattern is not None: + os.makedirs(os.path.dirname(log_pattern), exist_ok=True) + if '.log' not in log_pattern: + log_pattern += '.log' + if '{node_index}' not in log_pattern: + log_pattern = log_pattern.replace('.log', '_{node_index}.log') + + exec_kwargs['stdout_path'] = os.path.join(status_dir, 'stdout/') + logger.debug('Found execution kwargs: {}'.format(exec_kwargs)) + logger.debug('Hardware run option: "{}"'.format(hardware_option)) + + name = f'sup3r_{module_name.replace("-", "_")}' + name += '_{}'.format(os.path.basename(status_dir)) + job_name = config.get('job_name', None) + if job_name is not None: + name = job_name + ctx.obj['NAME'] = name + config['job_name'] = name + config['status_dir'] = status_dir + + return config
+ + +
+[docs] + @classmethod + def check_module_name(cls, module_name): + """Make sure module_name is a valid member of the ModuleName class""" + msg = ( + 'Module name must be in ModuleName class. Received ' + f'{module_name}.' + ) + assert module_name in ModuleName, msg
+ + +
+[docs] + @classmethod + def kickoff_slurm_job( + cls, + module_name, + ctx, + cmd, + alloc='sup3r', + memory=None, + walltime=4, + feature=None, + stdout_path='./stdout/', + pipeline_step=None, + ): + """Run sup3r module on HPC via SLURM job submission. + + Parameters + ---------- + module_name : str + Module name string from :class:`sup3r.utilities.ModuleName`. + ctx : click.pass_context + Click context object where ctx.obj is a dictionary + cmd : str + Command to be submitted in SLURM shell script. Example: + 'python -m sup3r.cli <module_name> -c <config_file>' + alloc : str + HPC project (allocation) handle. Example: 'sup3r'. + memory : int + Node memory request in GB. + walltime : float + Node walltime request in hours. + feature : str + Additional flags for SLURM job. Format is "--qos=high" + or "--depend=[state:job_id]". Default is None. + stdout_path : str + Path to print .stdout and .stderr files. + pipeline_step : str, optional + Name of the pipeline step being run. If ``None``, the + ``pipeline_step`` will be set to the ``module_name``, + mimicking old reV behavior. By default, ``None``. + """ + cls.check_module_name(module_name) + if pipeline_step is None: + pipeline_step = module_name + + name = ctx.obj['NAME'] + out_dir = ctx.obj['OUT_DIR'] + slurm_manager = ctx.obj.get('SLURM_MANAGER', None) + if slurm_manager is None: + slurm_manager = SlurmManager() + ctx.obj['SLURM_MANAGER'] = slurm_manager + + status = Status.retrieve_job_status( + out_dir, + pipeline_step=pipeline_step, + job_name=name, + subprocess_manager=slurm_manager, + ) + job_failed = 'fail' in str(status).lower() + job_submitted = status != 'not submitted' + + msg = f'sup3r {module_name} CLI failed to submit jobs!' + if status == 'successful': + msg = ( + f'Job "{name}" is successful in status json found in ' + f'"{out_dir}", not re-running.' + ) + elif not job_failed and job_submitted and status is not None: + msg = ( + f'Job "{name}" was found with status "{status}", not ' + 'resubmitting' + ) + else: + job_info = f"{module_name}" + if pipeline_step != module_name: + job_info = f"{job_info} (pipeline step {pipeline_step!r})" + logger.info( + f'Running sup3r {job_info} on SLURM with node ' + f'name "{name}".' + ) + out = slurm_manager.sbatch( + cmd, + alloc=alloc, + memory=memory, + walltime=walltime, + feature=feature, + name=name, + stdout_path=stdout_path, + )[0] + if out: + msg = ( + f'Kicked off sup3r {job_info} job "{name}" ' + f'(SLURM jobid #{out}).' + ) + + # add job to sup3r status file. + Status.mark_job_as_submitted( + out_dir, + pipeline_step=pipeline_step, + job_name=name, + replace=True, + job_attrs={'job_id': out, 'hardware': 'kestrel'}, + ) + + click.echo(msg) + logger.info(msg)
+ + +
+[docs] + @classmethod + def kickoff_local_job(cls, module_name, ctx, cmd, pipeline_step=None): + """Run sup3r module locally. + + Parameters + ---------- + module_name : str + Module name string from :class:`sup3r.utilities.ModuleName`. + ctx : click.pass_context + Click context object where ctx.obj is a dictionary + cmd : str + Command to be submitted in shell script. Example: + 'python -m sup3r.cli <module_name> -c <config_file>' + pipeline_step : str, optional + Name of the pipeline step being run. If ``None``, the + ``pipeline_step`` will be set to the ``module_name``, + mimicking old reV behavior. By default, ``None``. + """ + cls.check_module_name(module_name) + if pipeline_step is None: + pipeline_step = module_name + + name = ctx.obj['NAME'] + out_dir = ctx.obj['OUT_DIR'] + subprocess_manager = SubprocessManager + + status = Status.retrieve_job_status( + out_dir, pipeline_step=pipeline_step, job_name=name + ) + job_failed = 'fail' in str(status).lower() + job_submitted = status != 'not submitted' + + msg = f'sup3r {module_name} CLI failed to submit jobs!' + if status == 'successful': + msg = ( + f'Job "{name}" is successful in status json found in ' + f'"{out_dir}", not re-running.' + ) + elif not job_failed and job_submitted and status is not None: + msg = ( + f'Job "{name}" was found with status "{status}", not ' + 'resubmitting' + ) + else: + job_info = f"{module_name}" + if pipeline_step != module_name: + job_info = f"{job_info} (pipeline step {pipeline_step!r})" + logger.info( + f'Running sup3r {job_info} locally with job ' + f'name "{name}".' + ) + Status.mark_job_as_submitted( + out_dir, + pipeline_step=pipeline_step, + job_name=name, + replace=True + ) + subprocess_manager.submit(cmd) + msg = f'Completed sup3r {job_info} job "{name}".' + + click.echo(msg) + logger.info(msg)
+ + +
+[docs] + @classmethod + def add_status_cmd(cls, config, pipeline_step, cmd): + """Append status file command to command for executing given module + + Parameters + ---------- + config : dict + sup3r config with all necessary args and kwargs to run given + module. + pipeline_step : str + Name of the pipeline step being run. + cmd : str + String including command to execute given module. + + Returns + ------- + cmd : str + Command string with status file command included if job_name is + not None + """ + job_name = config.get('job_name', None) + status_dir = config.get('status_dir', None) + if job_name is not None and status_dir is not None: + status_file_arg_str = f'"{status_dir}", ' + status_file_arg_str += f'pipeline_step="{pipeline_step}", ' + status_file_arg_str += f'job_name="{job_name}", ' + status_file_arg_str += 'attrs=job_attrs' + + cmd += 'job_attrs = {};\n'.format( + safe_serialize(config) + .replace("null", "None") + .replace("false", "False") + .replace("true", "True") + ) + cmd += 'job_attrs.update({"job_status": "successful"});\n' + cmd += 'job_attrs.update({"time": t_elap});\n' + cmd += f"Status.make_single_job_file({status_file_arg_str})" + + cmd_log = '\n\t'.join(cmd.split('\n')) + logger.debug(f'Running command:\n\t{cmd_log}') + + return cmd
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/utilities/era_downloader.html b/_modules/sup3r/utilities/era_downloader.html new file mode 100644 index 000000000..3bdbefc83 --- /dev/null +++ b/_modules/sup3r/utilities/era_downloader.html @@ -0,0 +1,1613 @@ + + + + + + + + + + sup3r.utilities.era_downloader — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.utilities.era_downloader

+"""Download ERA5 file for the given year and month
+
+Note
+----
+To use this you need to have ``cdsapi`` package installed and a ``~/.cdsapirc``
+file with a url and api key. Follow the instructions here:
+https://cds.climate.copernicus.eu/how-to-api
+"""
+
+import logging
+import os
+import pprint
+from calendar import monthrange
+from warnings import warn
+
+import dask
+import dask.array as da
+import numpy as np
+import pandas as pd
+from rex import init_logger
+
+from sup3r.preprocessing import Cacher, Loader
+from sup3r.preprocessing.loaders.utilities import (
+    standardize_names,
+    standardize_values,
+)
+from sup3r.preprocessing.names import (
+    ERA_NAME_MAP,
+    LEVEL_VARS,
+    SFC_VARS,
+    Dimension,
+)
+from sup3r.preprocessing.utilities import log_args
+
+# these are occasionally included in downloaded files, more often with cds-beta
+IGNORE_VARS = ('number', 'expver')
+
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class EraDownloader: + """Class to handle ERA5 downloading, variable renaming, and file + combinations.""" + + @log_args + def __init__( + self, + year, + month, + area, + levels, + file_pattern, + overwrite=False, + variables=None, + product_type='reanalysis', + ): + """Initialize the class. + + Parameters + ---------- + year : int + Year of data to download. + month : int + Month of data to download. + area : list + Domain area of the data to download. + [max_lat, min_lon, min_lat, max_lon] + levels : list + List of pressure levels to download. + file_pattern : str + Pattern for combined monthly output file. Must include year and + month format keys. e.g. 'era5_{year}_{month}_combined.nc' + overwrite : bool + Whether to overwrite existing files. + variables : list | None + Variables to download. If None this defaults to just gepotential + and wind components. + product_type : str + Can be 'reanalysis', 'ensemble_mean', 'ensemble_spread', + 'ensemble_members', 'monthly_averaged_reanalysis', + 'monthly_averaged_ensemble_members' + """ + self.year = year + self.month = month + self.area = area + self.levels = levels + self.overwrite = overwrite + self.file_pattern = file_pattern + self._variables = variables + self.sfc_file_variables = [] + self.level_file_variables = [] + self.prep_var_lists(self.variables) + self.product_type = product_type + self.hours = self.get_hours() + +
+[docs] + def get_hours(self): + """ERA5 is hourly and EDA is 3-hourly. Check and warn for incompatible + requests.""" + if self.product_type in ( + 'monthly_averaged_reanalysis', + 'monthly_averaged_ensemble_members', + ): + hours = ['00:00'] + elif self.product_type in ( + 'reanalysis', + 'monthly_averaged_reanalysis_by_hour_of_day', + ): + hours = [str(n).zfill(2) + ':00' for n in range(0, 24)] + else: + hours = [str(n).zfill(2) + ':00' for n in range(0, 24, 3)] + return hours
+ + + @property + def variables(self): + """Get list of requested variables""" + if self._variables is None: + raise OSError('Received empty variable list.') + return self._variables + + @property + def days(self): + """Get list of days for the requested month""" + return [ + str(n).zfill(2) + for n in np.arange(1, monthrange(self.year, self.month)[1] + 1) + ] + + @property + def monthly_file(self): + """Name of file with all surface and level variables for a given month + and year.""" + monthly_file = self.file_pattern.replace( + '{var}', '_'.join(self.variables) + ).format(year=self.year, month=str(self.month).zfill(2)) + os.makedirs(os.path.dirname(monthly_file), exist_ok=True) + return monthly_file + + @property + def surface_file(self): + """Get name of file with variables from single level download""" + basedir = os.path.dirname(self.monthly_file) + basename = os.path.basename(self.monthly_file) + return os.path.join(basedir, f'sfc_{basename}') + + @property + def level_file(self): + """Get name of file with variables from pressure level download""" + basedir = os.path.dirname(self.monthly_file) + basename = os.path.basename(self.monthly_file) + return os.path.join(basedir, f'level_{basename}') + +
+[docs] + @classmethod + def get_tmp_file(cls, file): + """Get temp file for given file. Then only needed variables will be + written to the given file. + """ + tmp_file = file.replace('.nc', '_tmp.nc') + return tmp_file
+ + + def _prep_var_lists(self, variables): + """Add all downloadable variables for the generic requested variables. + e.g. if variable = 'u' add all downloadable u variables to list. + """ + d_vars = [] + var_list = variables.copy() + for i, v in enumerate(var_list): + if v in ('u', 'v'): + var_list[i] = f'{v}_' + + all_vars = SFC_VARS + LEVEL_VARS + ['zg', 'orog'] + for var in var_list: + d_vars.extend([d_var for d_var in all_vars if var in d_var]) + return d_vars + +
+[docs] + def prep_var_lists(self, variables): + """Create surface and level variable lists based on requested + variables. + """ + variables = self._prep_var_lists(variables) + for var in variables: + if var in SFC_VARS and var not in self.sfc_file_variables: + self.sfc_file_variables.append(var) + elif var in LEVEL_VARS and var not in self.level_file_variables: + self.level_file_variables.append(var) + elif var not in SFC_VARS + LEVEL_VARS + ['zg', 'orog']: + msg = f'Requested {var} is not available for download.' + logger.warning(msg) + warn(msg) + + sfc_and_level_check = ( + len(self.sfc_file_variables) > 0 + and len(self.level_file_variables) > 0 + and 'orog' not in variables + and 'zg' not in variables + ) + if sfc_and_level_check: + msg = ( + 'Both surface and pressure level variables were requested ' + 'without requesting "orog" and "zg". Adding these to the ' + 'download' + ) + logger.info(msg) + self.sfc_file_variables.append('geopotential') + self.level_file_variables.append('geopotential') + + else: + if 'orog' in variables: + self.sfc_file_variables.append('geopotential') + if 'zg' in variables: + self.level_file_variables.append('geopotential')
+ + +
+[docs] + @staticmethod + def get_cds_client(): + """Get the copernicus climate data store (CDS) API object for ERA + downloads.""" + + try: + import cdsapi # noqa + except ImportError as e: + msg = f'Could not import cdsapi package. {e}' + raise ImportError(msg) from e + + msg = ( + 'To download ERA5 data you need to have a ~/.cdsapirc file ' + 'with a valid url and api key. Follow the instructions here: ' + 'https://cds.climate.copernicus.eu/how-to-api' + ) + req_file = os.path.join(os.path.expanduser('~'), '.cdsapirc') + assert os.path.exists(req_file), msg + + return cdsapi.Client()
+ + +
+[docs] + def download_process_combine(self): + """Run the download routine.""" + sfc_check = len(self.sfc_file_variables) > 0 + level_check = ( + len(self.level_file_variables) > 0 + and self.levels is not None + and len(self.levels) > 0 + ) + if self.level_file_variables: + msg = ( + f'{self.level_file_variables} requested but no levels' + ' were provided.' + ) + if self.levels is None: + logger.warning(msg) + warn(msg) + + time_dict = { + 'year': self.year, + 'month': self.month, + 'time': self.hours, + } + if 'monthly' not in self.product_type: + time_dict['day'] = self.days + + if sfc_check: + tmp_file = self.get_tmp_file(self.surface_file) + self.download_file( + self.sfc_file_variables, + time_dict=time_dict, + area=self.area, + out_file=tmp_file, + level_type='single', + overwrite=self.overwrite, + product_type=self.product_type, + ) + os.replace(tmp_file, self.surface_file) + logger.info('Moved %s to %s', tmp_file, self.surface_file) + if level_check: + tmp_file = self.get_tmp_file(self.level_file) + self.download_file( + self.level_file_variables, + time_dict=time_dict, + area=self.area, + out_file=tmp_file, + level_type='pressure', + levels=self.levels, + overwrite=self.overwrite, + product_type=self.product_type, + ) + os.replace(tmp_file, self.level_file) + logger.info('Moved %s to %s', tmp_file, self.level_file) + if sfc_check or level_check: + self.process_and_combine()
+ + +
+[docs] + @classmethod + def download_file( + cls, + variables, + time_dict, + area, + out_file, + level_type, + levels=None, + product_type='reanalysis', + overwrite=False, + ): + """Download either single-level or pressure-level file + + Parameters + ---------- + variables : list + List of variables to download + time_dict : dict + Dictionary with year, month, day, time entries. + area : list + List of bounding box coordinates. + e.g. [max_lat, min_lon, min_lat, max_lon] + out_file : str + Name of output file + level_type : str + Either 'single' or 'pressure' + levels : list + List of pressure levels to download, if level_type == 'pressure' + product_type : str + Can be 'reanalysis', 'ensemble_mean', 'ensemble_spread', + 'ensemble_members', 'monthly_averaged_reanalysis', + 'monthly_averaged_ensemble_members' + overwrite : bool + Whether to overwrite existing file + """ + if os.path.exists(out_file) and not cls._can_skip_file(out_file): + os.remove(out_file) + + if not cls._can_skip_file(out_file) or overwrite: + msg = ( + f'Downloading {variables} to {out_file} with levels ' + f'= {levels}.' + ) + logger.info(msg) + dataset = f'reanalysis-era5-{level_type}-levels' + if 'monthly' in product_type: + dataset += '-monthly-means' + entry = { + 'product_type': [product_type], + 'data_format': 'netcdf', + 'download_format': 'unarchived', + 'variable': variables, + 'area': area, + } + entry.update(time_dict) + if level_type == 'pressure': + entry['pressure_level'] = levels + logger.info( + 'Calling CDS-API with dataset=%s, entry=%s.', dataset, entry + ) + cds_api_client = cls.get_cds_client() + + cds_api_client.retrieve(dataset, entry, out_file) + else: + logger.info(f'File already exists: {out_file}.')
+ + +
+[docs] + def process_surface_file(self): + """Rename variables and convert geopotential to geopotential height.""" + tmp_file = self.get_tmp_file(self.surface_file) + ds = Loader(self.surface_file) + logger.info('Converting "z" var to "orog" for %s', self.surface_file) + ds = self.convert_z(ds, name='orog') + ds = standardize_names(ds, ERA_NAME_MAP) + ds = standardize_values(ds) + + if 'monthly' in self.product_type: + ds['time'] = pd.DatetimeIndex( + [f'{self.year}-{str(self.month).zfill(2)}-01'] + ) + ds.compute().to_netcdf(tmp_file, format='NETCDF4', engine='h5netcdf') + os.replace(tmp_file, self.surface_file) + logger.info( + f'Finished processing {self.surface_file}. Moved {tmp_file} to ' + f'{self.surface_file}.' + )
+ + +
+[docs] + def add_pressure(self, ds): + """Add pressure to dataset + + Parameters + ---------- + ds : Dataset + xr.Dataset() object for which to add pressure + + Returns + ------- + ds : Dataset + """ + if 'pressure' in self.variables and 'pressure' not in ds.data_vars: + logger.info('Adding pressure variable.') + pres = 100 * ds[Dimension.PRESSURE_LEVEL].values.astype(np.float32) + + # if trailing dimensions don't match this is for an ensemble + # download + if len(pres) != ds['zg'].shape[-1]: + pres = np.repeat(pres[..., None], ds['zg'].shape[-1], axis=-1) + ds['pressure'] = ( + ds['zg'].dims, + da.broadcast_to(pres, ds['zg'].shape), + ) + ds['pressure'].attrs['units'] = 'Pa' + return ds
+ + +
+[docs] + def convert_z(self, ds, name): + """Convert z to given height variable + + Parameters + ---------- + ds : Dataset + xr.Dataset() object for new file + name : str + Variable name. e.g. zg or orog, typically + + Returns + ------- + ds : Dataset + xr.Dataset() object for new file with new height variable written. + """ + if name not in ds.data_vars and 'z' in ds.data_vars: + ds['z'] = (ds['z'].dims, ds['z'].values / 9.81) + ds['z'].attrs['units'] = 'm' + ds = ds.rename({'z': name}) + return ds
+ + +
+[docs] + def process_level_file(self): + """Convert geopotential to geopotential height.""" + tmp_file = self.get_tmp_file(self.level_file) + ds = Loader(self.level_file) + logger.info('Converting "z" var to "zg" for %s', self.level_file) + ds = self.convert_z(ds, name='zg') + ds = standardize_names(ds, ERA_NAME_MAP) + ds = standardize_values(ds) + ds = self.add_pressure(ds) + if 'monthly' in self.product_type: + ds['time'] = pd.DatetimeIndex( + [f'{self.year}-{str(self.month).zfill(2)}-01'] + ) + ds.compute().to_netcdf(tmp_file, format='NETCDF4', engine='h5netcdf') + os.replace(tmp_file, self.level_file) + logger.info( + f'Finished processing {self.level_file}. Moved ' + f'{tmp_file} to {self.level_file}.' + )
+ + +
+[docs] + def process_and_combine(self): + """Process variables and combine.""" + + if os.path.exists(self.monthly_file) and not self._can_skip_file( + self.monthly_file + ): + os.remove(self.monthly_file) + + if not self._can_skip_file(self.monthly_file) or self.overwrite: + files = [] + if os.path.exists(self.level_file): + logger.info(f'Processing {self.level_file}.') + self.process_level_file() + files.append(self.level_file) + if os.path.exists(self.surface_file): + logger.info(f'Processing {self.surface_file}.') + self.process_surface_file() + files.append(self.surface_file) + + kwargs = {'compat': 'override'} + self._combine_files(files, self.monthly_file, kwargs) + + if os.path.exists(self.level_file): + os.remove(self.level_file) + if os.path.exists(self.surface_file): + os.remove(self.surface_file) + else: + logger.info(f'{self.monthly_file} already exists.')
+ + +
+[docs] + def get_monthly_file(self): + """Download level and surface files, process variables, and combine + processed files. Includes checks for shape and variables.""" + if os.path.exists(self.monthly_file) and ( + not self._can_skip_file(self.monthly_file) or self.overwrite + ): + os.remove(self.monthly_file) + + if not os.path.exists(self.monthly_file): + self.download_process_combine()
+ + +
+[docs] + @classmethod + def all_vars_exist(cls, year, file_pattern, variables): + """Check if all yearly variable files for the requested year exist. + + Parameters + ---------- + year : int + Year used for data download. + file_pattern : str + Pattern for variable file. Must include year and + var format keys. e.g. 'era5_{year}_{var}_combined.nc' + variables : list + Variables that should have been downloaded + + Returns + ------- + bool + True if all monthly variable files for the requested year and month + exist. + """ + return all( + os.path.exists(file_pattern.format(year=year, var=var)) + for var in variables + )
+ + +
+[docs] + @classmethod + def run_month( + cls, + year, + month, + area, + levels, + file_pattern, + overwrite=False, + variables=None, + product_type='reanalysis', + ): + """Run routine for the given month and year. + + Parameters + ---------- + year : int + Year of data to download. + month : int + Month of data to download. + area : list + Domain area of the data to download. + [max_lat, min_lon, min_lat, max_lon] + levels : list + List of pressure levels to download. + file_pattern : str + Pattern for combined monthly output file. Must include year and + month format keys. e.g. 'era5_{year}_{month}_combined.nc' + overwrite : bool + Whether to overwrite existing files. + variables : list | None + Variables to download. If None this defaults to just gepotential + and wind components. + product_type : str + Can be 'reanalysis', 'ensemble_mean', 'ensemble_spread', + 'ensemble_members', 'monthly_averaged_reanalysis', + 'monthly_averaged_ensemble_members' + """ + variables = variables if isinstance(variables, list) else [variables] + for var in variables: + downloader = cls( + year=year, + month=month, + area=area, + levels=levels, + file_pattern=file_pattern, + overwrite=overwrite, + variables=[var], + product_type=product_type, + ) + downloader.get_monthly_file()
+ + +
+[docs] + @classmethod + def run_for_var( + cls, + year, + area, + levels, + monthly_file_pattern, + yearly_file_pattern=None, + months=None, + overwrite=False, + max_workers=None, + variable=None, + product_type='reanalysis', + chunks='auto', + res_kwargs=None, + ): + """Run routine for all requested months in the requested year for the + given variable. + + Parameters + ---------- + year : int + Year of data to download. + area : list + Domain area of the data to download. + [max_lat, min_lon, min_lat, max_lon] + levels : list + List of pressure levels to download. + monthly_file_pattern : str + Pattern for monthly output files. Must include year, month, and var + format keys. e.g. 'era5_{year}_{month}_{var}.nc' + yearly_file_pattern : str + Pattern for yearly output files. Must include year and var format + keys. e.g. 'era5_{year}_{var}.nc' + months : list | None + List of months to download data for. If None then all months for + the given year will be downloaded. + overwrite : bool + Whether to overwrite existing files. + max_workers : int + Max number of workers to use for downloading and processing monthly + files. + variable : str + Variable to download. + product_type : str + Can be 'reanalysis', 'ensemble_mean', 'ensemble_spread', + 'ensemble_members', 'monthly_averaged_reanalysis', + 'monthly_averaged_ensemble_members' + chunks : str | dict + Dictionary of chunksizes used when writing data to netcdf files. + Can also be 'auto'. + """ + if yearly_file_pattern is not None: + yearly_var_file = yearly_file_pattern.format( + year=year, var=variable + ) + if os.path.exists(yearly_var_file) and not overwrite: + logger.info( + '%s already exists and overwrite=False.', yearly_var_file + ) + msg = 'file_pattern must have {year}, {month}, and {var} format keys' + assert all( + key in monthly_file_pattern + for key in ('{year}', '{month}', '{var}') + ), msg + + tasks = [] + months = list(range(1, 13)) if months is None else months + for month in months: + task = dask.delayed(cls.run_month)( + year=year, + month=month, + area=area, + levels=levels, + file_pattern=monthly_file_pattern, + overwrite=overwrite, + variables=[variable], + product_type=product_type, + ) + tasks.append(task) + + if max_workers == 1: + dask.compute(*tasks, scheduler='single-threaded') + else: + dask.compute(*tasks, scheduler='threads', num_workers=max_workers) + + if yearly_file_pattern is not None and len(months) == 12: + cls.make_yearly_var_file( + year, + monthly_file_pattern, + yearly_file_pattern, + variable, + chunks=chunks, + res_kwargs=res_kwargs, + )
+ + +
+[docs] + @classmethod + def run( + cls, + year, + area, + levels, + monthly_file_pattern, + yearly_file_pattern=None, + months=None, + overwrite=False, + max_workers=None, + variables=None, + product_type='reanalysis', + chunks='auto', + combine_all_files=False, + res_kwargs=None, + ): + """Run routine for all requested months in the requested year. + + Parameters + ---------- + year : int + Year of data to download. + area : list + Domain area of the data to download. + [max_lat, min_lon, min_lat, max_lon] + levels : list + List of pressure levels to download. + monthly_file_pattern : str + Pattern for monthly output file. Must include year, month, and var + format keys. e.g. 'era5_{year}_{month}_{var}_combined.nc' + yearly_file_pattern : str + Pattern for yearly output file. Must include year and var + format keys. e.g. 'era5_{year}_{var}_combined.nc' + months : list | None + List of months to download data for. If None then all months for + the given year will be downloaded. + overwrite : bool + Whether to overwrite existing files. + max_workers : int + Max number of workers to use for downloading and processing monthly + files. + variables : list | None + Variables to download. If None this defaults to just gepotential + and wind components. + product_type : str + Can be 'reanalysis', 'ensemble_mean', 'ensemble_spread', + 'ensemble_members', 'monthly_averaged_reanalysis', + 'monthly_averaged_ensemble_members' + chunks : str | dict + Dictionary of chunksizes used when writing data to netcdf files. + Can also be 'auto' + combine_all_files : bool + Whether to combine separate yearly variable files into a single + yearly file with all variables included + """ + for var in variables: + cls.run_for_var( + year=year, + area=area, + levels=levels, + months=months, + monthly_file_pattern=monthly_file_pattern, + yearly_file_pattern=yearly_file_pattern, + overwrite=overwrite, + variable=var, + product_type=product_type, + max_workers=max_workers, + chunks=chunks, + res_kwargs=res_kwargs, + ) + + if yearly_file_pattern is not None and ( + cls.all_vars_exist( + year=year, + file_pattern=yearly_file_pattern, + variables=variables, + ) + and combine_all_files + ): + cls.make_yearly_file( + year, + yearly_file_pattern, + variables, + chunks=chunks, + res_kwargs=res_kwargs, + )
+ + +
+[docs] + @classmethod + def make_yearly_var_file( + cls, + year, + monthly_file_pattern, + yearly_file_pattern, + variable, + chunks='auto', + res_kwargs=None, + ): + """Combine monthly variable files into a single yearly variable file. + + Parameters + ---------- + year : int + Year used to download data + monthly_file_pattern : str + File pattern for monthly variable files. Must have year, month, and + var format keys. e.g. './era_{year}_{month}_{var}_combined.nc' + yearly_file_pattern : str + File pattern for yearly variable files. Must have year and var + format keys. e.g. './era_{year}_{var}_combined.nc' + variable : string + Variable name for the files to be combined. + chunks : str | dict + Dictionary of chunksizes used when writing data to netcdf files. + Can also be 'auto'. + res_kwargs : None | dict + Keyword arguments for base resource handler, like + ``xr.open_mfdataset.`` This is passed to a ``Loader`` object and + then used in the base loader contained by that obkect. + """ + files = [ + monthly_file_pattern.format( + year=year, month=str(month).zfill(2), var=variable + ) + for month in range(1, 13) + ] + + outfile = yearly_file_pattern.format(year=year, var=variable) + default_kwargs = { + 'combine': 'nested', + 'concat_dim': 'time', + 'coords': 'minimal', + } + res_kwargs = res_kwargs or {} + default_kwargs.update(res_kwargs) + cls._combine_files( + files, outfile, chunks=chunks, res_kwargs=res_kwargs + )
+ + + @classmethod + def _can_skip_file(cls, file): + """Make sure existing file has successfully downloaded and can be + opened.""" + if not os.path.exists(file): + return False + + logger.info( + '%s already exists. Making sure it downloaded successfully.', file + ) + openable = True + try: + _ = Loader(file) + except Exception as e: + msg = 'Could not open %s. %s Will redownload.' + logger.warning(msg, file, e) + warn(msg % (file, e)) + openable = False + + return openable + + @classmethod + def _combine_files(cls, files, outfile, chunks='auto', res_kwargs=None): + if not os.path.exists(outfile): + logger.info(f'Combining {files} into {outfile}.') + try: + res_kwargs = res_kwargs or {} + loader = Loader(files, res_kwargs=res_kwargs) + tmp_file = cls.get_tmp_file(outfile) + for ignore_var in IGNORE_VARS: + if ignore_var in loader.coords: + loader.data = loader.data.drop_vars(ignore_var) + Cacher.write_netcdf( + data=loader.data, + out_file=tmp_file, + max_workers=1, + chunks=chunks, + ) + os.replace(tmp_file, outfile) + logger.info('Moved %s to %s.', tmp_file, outfile) + except Exception as e: + msg = f'Error combining {files}. {e}' + logger.error(msg) + raise RuntimeError(msg) from e + else: + logger.info(f'{outfile} already exists.') + +
+[docs] + @classmethod + def make_yearly_file( + cls, year, file_pattern, variables, chunks='auto', res_kwargs=None + ): + """Combine yearly variable files into a single file. + + Parameters + ---------- + year : int + Year for the data to make into a yearly file. + file_pattern : str + File pattern for output files. Must have year and var + format keys. e.g. './era_{year}_{var}_combined.nc' + variables : list + List of variables corresponding to the yearly variable files to + combine. + chunks : str | dict + Dictionary of chunksizes used when writing data to netcdf files. + Can also be 'auto'. + res_kwargs : None | dict + Keyword arguments for base resource handler, like + ``xr.open_mfdataset.`` This is passed to a ``Loader`` object and + then used in the base loader contained by that obkect. + """ + msg = ( + f'Not all variable files with file_patten {file_pattern} for ' + f'year {year} exist.' + ) + assert cls.all_vars_exist( + year=year, file_pattern=file_pattern, variables=variables + ), msg + + files = [file_pattern.format(year=year, var=var) for var in variables] + yearly_file = ( + file_pattern.replace('_{var}_', '') + .replace('_{var}', '') + .format(year=year) + ) + cls._combine_files( + files, yearly_file, res_kwargs=res_kwargs, chunks=chunks + )
+ + +
+[docs] + @classmethod + def run_qa(cls, file, res_kwargs=None, log_file=None): + """Check for NaN values and log min / max / mean / stds for all + variables.""" + + logger = init_logger(__name__, log_level='DEBUG', log_file=log_file) + with Loader(file, res_kwargs=res_kwargs) as res: + logger.info('Running qa on file: %s', file) + logger.info('\n%s', pprint.pformat(res.qa(), indent=2))
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/utilities/interpolation.html b/_modules/sup3r/utilities/interpolation.html new file mode 100644 index 000000000..077183f1f --- /dev/null +++ b/_modules/sup3r/utilities/interpolation.html @@ -0,0 +1,882 @@ + + + + + + + + + + sup3r.utilities.interpolation — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.utilities.interpolation

+"""Interpolator class with methods for pressure and height interpolation"""
+
+import logging
+from typing import Union
+from warnings import warn
+
+import dask.array as da
+import numpy as np
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class Interpolator: + """Class for handling pressure and height interpolation""" + +
+[docs] + @classmethod + def get_level_masks(cls, lev_array, level): + """Get the masks used to select closest surrounding levels in the + lev_array to requested interpolation level. + + Parameters + ---------- + var_array : Union[np.ndarray, da.core.Array] + Array of variable data, for example u-wind in a 4D array of shape + (lat, lon, time, level) + lev_array : Union[np.ndarray, da.core.Array] + Height or pressure values for the corresponding entries in + var_array, in the same shape as var_array. If this is height and + the requested levels are hub heights above surface, lev_array + should be the geopotential height corresponding to every var_array + index relative to the surface elevation (subtract the elevation at + the surface from the geopotential height) + level : float + level to interpolate to (e.g. final desired hub height + above surface elevation) + + Returns + ------- + mask1 : Union[np.ndarray, da.core.Array] + Array of bools selecting the entries with the closest levels to the + one requested. + (lat, lon, time, level) + mask2 : Union[np.ndarray, da.core.Array] + Array of bools selecting the entries with the second closest levels + to the one requested. + (lat, lon, time, level) + """ + argmin1 = da.argmin(da.abs(lev_array - level), axis=-1, keepdims=True) + lev_indices = da.broadcast_to( + da.arange(lev_array.shape[-1]), lev_array.shape + ) + mask1 = lev_indices == argmin1 + + other_levs = da.ma.masked_array(lev_array, mask1) + argmin2 = da.argmin(da.abs(other_levs - level), axis=-1, keepdims=True) + mask2 = lev_indices == argmin2 + return mask1, mask2
+ + + @classmethod + def _lin_interp(cls, lev_samps, var_samps, level): + """Linearly interpolate between levels.""" + diff = da.map_blocks(lambda x, y: x - y, lev_samps[1], lev_samps[0]) + alpha = da.where( + diff == 0, + 0, + da.map_blocks(lambda x, y: x / y, (level - lev_samps[0]), diff), + ) + return (1 - alpha) * var_samps[0] + alpha * var_samps[1] + + @classmethod + def _log_interp(cls, lev_samps, var_samps, level): + """Interpolate between levels with log profile. + + Note + ---- + Here we fit the function a * log(h - h0 + 1) + v0 to the two given + levels and variable values. So a is calculated with `v1 = a * log(h1 - + h0 + 1) + v0` where v1, v0 are var_samps[0], var_samps[1] and h1, h0 + are lev_samps[1], lev_samps[0] + """ + mask = lev_samps[0] < lev_samps[1] + h0 = da.where(mask, lev_samps[0], lev_samps[1]) + h1 = da.where(mask, lev_samps[1], lev_samps[0]) + v0 = da.where(mask, var_samps[0], var_samps[1]) + v1 = da.where(mask, var_samps[1], var_samps[0]) + coeff = da.where(h1 == h0, 0, (v1 - v0) / np.log(h1 - h0 + 1)) + coeff = da.where(level < h0, -coeff, coeff) + return coeff * np.log(da.abs(level - h0) + 1) + v0 + +
+[docs] + @classmethod + def interp_to_level( + cls, + lev_array: Union[np.ndarray, da.core.Array], + var_array: Union[np.ndarray, da.core.Array], + level, + interp_kwargs=None, + ): + """Interpolate var_array to the given level. + + Parameters + ---------- + var_array : xr.DataArray + Array of variable data, for example u-wind in a 4D array of shape + (lat, lon, time, level) + lev_array : xr.DataArray + Height or pressure values for the corresponding entries in + var_array, in the same shape as var_array. If this is height and + the requested levels are hub heights above surface, lev_array + should be the geopotential height corresponding to every var_array + index relative to the surface elevation (subtract the elevation at + the surface from the geopotential height) + level : float + level or levels to interpolate to (e.g. final desired hub height + above surface elevation) + interp_kwargs: dict | None + Dictionary of kwargs for level interpolation. Can include "method" + and "run_level_check" keys + + Returns + ------- + out : Union[np.ndarray, da.core.Array] + Interpolated var_array (lat, lon, time) + """ + interp_kwargs = interp_kwargs or {} + interp_method = interp_kwargs.get('method', 'linear') + run_level_check = interp_kwargs.get('run_level_check', False) + + if run_level_check: + cls._check_lev_array(lev_array, levels=[level]) + levs = da.ma.masked_array(lev_array, da.isnan(lev_array)) + mask1, mask2 = cls.get_level_masks(levs, level) + lev1 = da.where(mask1, lev_array, np.nan) + lev2 = da.where(mask2, lev_array, np.nan) + var1 = da.where(mask1, var_array, np.nan) + var2 = da.where(mask2, var_array, np.nan) + lev1 = np.nanmean(lev1, axis=-1) + lev2 = np.nanmean(lev2, axis=-1) + var1 = np.nanmean(var1, axis=-1) + var2 = np.nanmean(var2, axis=-1) + + if interp_method == 'log': + out = cls._log_interp( + lev_samps=[lev1, lev2], var_samps=[var1, var2], level=level + ) + else: + out = cls._lin_interp( + lev_samps=[lev1, lev2], var_samps=[var1, var2], level=level + ) + return out
+ + + @classmethod + def _check_lev_array(cls, lev_array, levels): + """Check if the requested levels are consistent with the given + lev_array and if there are any nans in the lev_array.""" + + if np.isnan(lev_array).all(): + msg = 'All pressure level height data is NaN!' + logger.error(msg) + raise RuntimeError(msg) + + nans = np.isnan(lev_array) + logger.debug('Level array shape: {}'.format(lev_array.shape)) + + lowest_height = np.min(lev_array, axis=-1) + highest_height = np.max(lev_array, axis=-1) + bad_min = min(levels) < lowest_height + bad_max = max(levels) > highest_height + + if nans.any(): + nans = np.asarray(nans) + msg = ( + 'Approximately {:.2f}% of the vertical level ' + 'array is NaN. Data will be interpolated or extrapolated ' + 'past these NaN values.'.format(100 * nans.sum() / nans.size) + ) + logger.warning(msg) + warn(msg) + + # This and the next if statement can return warnings in the case of + # pressure inversions, in which case the "lowest" or "highest" pressure + # does not correspond to the lowest or highest height. Interpolation + # can be performed without issue in this case. + if bad_min.any(): + bad_min = np.asarray(bad_min) + lev_array = np.asarray(lev_array) + msg = ( + 'Approximately {:.2f}% of the lowest vertical levels ' + '(maximum value of {:.3f}, minimum value of {:.3f}) ' + 'were greater than the minimum requested level: {}'.format( + 100 * bad_min.sum() / bad_min.size, + np.nanmax(lev_array[..., 0]), + np.nanmin(lev_array[..., 0]), + min(levels), + ) + ) + logger.warning(msg) + warn(msg) + + if bad_max.any(): + bad_max = np.asarray(bad_max) + lev_array = np.asarray(lev_array) + msg = ( + 'Approximately {:.2f}% of the highest vertical levels ' + '(minimum value of {:.3f}, maximum value of {:.3f}) ' + 'were lower than the maximum requested level: {}'.format( + 100 * bad_max.sum() / bad_max.size, + np.nanmin(lev_array[..., -1]), + np.nanmax(lev_array[..., -1]), + max(levels), + ) + ) + logger.warning(msg) + warn(msg)
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/utilities/loss_metrics.html b/_modules/sup3r/utilities/loss_metrics.html new file mode 100644 index 000000000..d3f5d5dca --- /dev/null +++ b/_modules/sup3r/utilities/loss_metrics.html @@ -0,0 +1,1567 @@ + + + + + + + + + + sup3r.utilities.loss_metrics — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.utilities.loss_metrics

+"""Content loss metrics for Sup3r"""
+
+from typing import ClassVar
+
+import numpy as np
+import tensorflow as tf
+from tensorflow.keras.losses import MeanAbsoluteError, MeanSquaredError
+
+
+
+[docs] +def gaussian_kernel(x1, x2, sigma=1.0): + """Gaussian kernel for mmd content loss + + Parameters + ---------- + x1 : tf.tensor + synthetic generator output + (n_obs, spatial_1, spatial_2, temporal, features) + x2 : tf.tensor + high resolution data + (n_obs, spatial_1, spatial_2, temporal, features) + sigma : float + Standard deviation for gaussian kernel + + Returns + ------- + tf.tensor + kernel output tensor + + References + ---------- + Following MMD implementation in https://github.com/lmjohns3/theanets + """ + + # The expand dims + subtraction compares every entry for the dimension + # prior to the expanded dimension to every other entry. So expand_dims with + # axis=1 will compare every observation along axis=0 to every other + # observation along axis=0. + result = tf.exp( + -0.5 + * tf.reduce_sum((tf.expand_dims(x1, axis=1) - x2) ** 2, axis=-1) + / sigma**2 + ) + return result
+ + + +
+[docs] +class ExpLoss(tf.keras.losses.Loss): + """Loss class for squared exponential difference""" + +
+[docs] + def __call__(self, x1, x2): + """Exponential difference loss function + + Parameters + ---------- + x1 : tf.tensor + synthetic generator output + (n_observations, spatial_1, spatial_2, temporal, features) + x2 : tf.tensor + high resolution data + (n_observations, spatial_1, spatial_2, temporal, features) + + Returns + ------- + tf.tensor + 0D tensor with loss value + """ + return tf.reduce_mean(1 - tf.exp(-((x1 - x2) ** 2)))
+
+ + + +
+[docs] +class MseExpLoss(tf.keras.losses.Loss): + """Loss class for mse + squared exponential difference""" + + MSE_LOSS = MeanSquaredError() + +
+[docs] + def __call__(self, x1, x2): + """Mse + Exponential difference loss function + + Parameters + ---------- + x1 : tf.tensor + synthetic generator output + (n_observations, spatial_1, spatial_2, temporal, features) + x2 : tf.tensor + high resolution data + (n_observations, spatial_1, spatial_2, temporal, features) + + Returns + ------- + tf.tensor + 0D tensor with loss value + """ + mse = self.MSE_LOSS(x1, x2) + exp = tf.reduce_mean(1 - tf.exp(-((x1 - x2) ** 2))) + return mse + exp
+
+ + + +
+[docs] +class MmdLoss(tf.keras.losses.Loss): + """Loss class for max mean discrepancy loss""" + +
+[docs] + def __call__(self, x1, x2, sigma=1.0): + """Maximum mean discrepancy (MMD) based on Gaussian kernel function + for keras models + + Parameters + ---------- + x1 : tf.tensor + synthetic generator output + (n_observations, spatial_1, spatial_2, temporal, features) + x2 : tf.tensor + high resolution data + (n_observations, spatial_1, spatial_2, temporal, features) + sigma : float + standard deviation for gaussian kernel + + Returns + ------- + tf.tensor + 0D tensor with loss value + """ + mmd = tf.reduce_mean(gaussian_kernel(x1, x1, sigma)) + mmd += tf.reduce_mean(gaussian_kernel(x2, x2, sigma)) + mmd -= tf.reduce_mean(2 * gaussian_kernel(x1, x2, sigma)) + return mmd
+
+ + + +
+[docs] +class MaterialDerivativeLoss(tf.keras.losses.Loss): + """Loss class for the material derivative. This is the left hand side of + the Navier-Stokes equation and is equal to internal + external forces + divided by density. + + References + ---------- + https://en.wikipedia.org/wiki/Material_derivative + """ + + LOSS_METRIC = MeanAbsoluteError() + + def _derivative(self, x, axis=1): + """Custom derivative function for compatibility with tensorflow. + + Note + ---- + Matches np.gradient by using the central difference approximation. + + Parameters + ---------- + x : tf.Tensor + (n_observations, spatial_1, spatial_2, temporal) + axis : int + Axis to take derivative over + """ + if axis == 1: + return tf.concat( + [ + x[:, 1:2] - x[:, 0:1], + (x[:, 2:] - x[:, :-2]) / 2, + x[:, -1:] - x[:, -2:-1], + ], + axis=1, + ) + if axis == 2: + return tf.concat( + [ + x[..., 1:2, :] - x[..., 0:1, :], + (x[..., 2:, :] - x[..., :-2, :]) / 2, + x[..., -1:, :] - x[..., -2:-1, :], + ], + axis=2, + ) + if axis == 3: + return tf.concat( + [ + x[..., 1:2] - x[..., 0:1], + (x[..., 2:] - x[..., :-2]) / 2, + x[..., -1:] - x[..., -2:-1], + ], + axis=3, + ) + + msg = ( + f'{self.__class__.__name__}._derivative received ' + f'axis={axis}. This is meant to compute only temporal ' + '(axis=3) or spatial (axis=1/2) derivatives for tensors ' + 'of shape (n_obs, spatial_1, spatial_2, temporal)' + ) + raise ValueError(msg) + + def _compute_md(self, x, fidx): + """Compute material derivative the feature given by the index fidx. + It is assumed that for a given feature index fidx there is a pair of + wind components u/v given by 2 * (fidx // 2) and 2 * (fidx // 2) + 1 + + Parameters + ---------- + x : tf.tensor + synthetic output or high resolution data + (n_observations, spatial_1, spatial_2, temporal, features) + fidx : int + Feature index to compute material derivative for. + """ + uidx = 2 * (fidx // 2) + vidx = 2 * (fidx // 2) + 1 + # df/dt + x_div = self._derivative(x[..., fidx], axis=3) + # u * df/dx + x_div += tf.math.multiply( + x[..., uidx], self._derivative(x[..., fidx], axis=1) + ) + # v * df/dy + x_div += tf.math.multiply( + x[..., vidx], self._derivative(x[..., fidx], axis=2) + ) + + return x_div + +
+[docs] + def __call__(self, x1, x2): + """Custom content loss that encourages accuracy of the material + derivative. This assumes that the first 2 * N features are N u/v + wind components at different hub heights and that the total number of + features is either 2 * N or 2 * N + 1 + + Parameters + ---------- + x1 : tf.tensor + synthetic generator output + (n_observations, spatial_1, spatial_2, temporal, features) + x2 : tf.tensor + high resolution data + (n_observations, spatial_1, spatial_2, temporal, features) + + Returns + ------- + tf.tensor + 0D tensor with loss value + """ + hub_heights = x1.shape[-1] // 2 + + msg = ( + f'The {self.__class__.__name__} is meant to be used on ' + 'spatiotemporal data only. Received tensor(s) that are not 5D' + ) + assert len(x1.shape) == 5 and len(x2.shape) == 5, msg + + x1_div = tf.stack( + [ + self._compute_md(x1, fidx=i) + for i in range(0, 2 * hub_heights, 2) + ] + ) + x2_div = tf.stack( + [ + self._compute_md(x2, fidx=i) + for i in range(0, 2 * hub_heights, 2) + ] + ) + + mae = self.LOSS_METRIC(x1, x2) + div_mae = self.LOSS_METRIC(x1_div, x2_div) + + return (mae + div_mae) / 2
+
+ + + +
+[docs] +class MmdMseLoss(tf.keras.losses.Loss): + """Loss class for MMD + MSE""" + + MMD_LOSS = MmdLoss() + MSE_LOSS = MeanSquaredError() + +
+[docs] + def __call__(self, x1, x2, sigma=1.0): + """Maximum mean discrepancy (MMD) based on Gaussian kernel function + for keras models plus the typical MSE loss. + + Parameters + ---------- + x1 : tf.tensor + synthetic generator output + (n_observations, spatial_1, spatial_2, temporal, features) + x2 : tf.tensor + high resolution data + (n_observations, spatial_1, spatial_2, temporal, features) + sigma : float + standard deviation for gaussian kernel + + Returns + ------- + tf.tensor + 0D tensor with loss value + """ + mmd = self.MMD_LOSS(x1, x2, sigma=sigma) + mse = self.MSE_LOSS(x1, x2) + return (mmd + mse) / 2
+
+ + + +
+[docs] +class CoarseMseLoss(tf.keras.losses.Loss): + """Loss class for coarse mse on spatial average of 5D tensor""" + + MSE_LOSS = MeanSquaredError() + +
+[docs] + def __call__(self, x1, x2): + """Exponential difference loss function + + Parameters + ---------- + x1 : tf.tensor + synthetic generator output + (n_observations, spatial_1, spatial_2, temporal, features) + x2 : tf.tensor + high resolution data + (n_observations, spatial_1, spatial_2, temporal, features) + + Returns + ------- + tf.tensor + 0D tensor with loss value + """ + + x1_coarse = tf.reduce_mean(x1, axis=(1, 2)) + x2_coarse = tf.reduce_mean(x2, axis=(1, 2)) + return self.MSE_LOSS(x1_coarse, x2_coarse)
+
+ + + +
+[docs] +class SpatialExtremesOnlyLoss(tf.keras.losses.Loss): + """Loss class that encourages accuracy of the min/max values in the + spatial domain. This does not include an additional MAE term""" + + MAE_LOSS = MeanAbsoluteError() + +
+[docs] + def __call__(self, x1, x2): + """Custom content loss that encourages temporal min/max accuracy + + Parameters + ---------- + x1 : tf.tensor + synthetic generator output + (n_observations, spatial_1, spatial_2, features) + x2 : tf.tensor + high resolution data + (n_observations, spatial_1, spatial_2, features) + + Returns + ------- + tf.tensor + 0D tensor with loss value + """ + x1_min = tf.reduce_min(x1, axis=(1, 2)) + x2_min = tf.reduce_min(x2, axis=(1, 2)) + + x1_max = tf.reduce_max(x1, axis=(1, 2)) + x2_max = tf.reduce_max(x2, axis=(1, 2)) + + mae_min = self.MAE_LOSS(x1_min, x2_min) + mae_max = self.MAE_LOSS(x1_max, x2_max) + + return (mae_min + mae_max) / 2
+
+ + + +
+[docs] +class SpatialExtremesLoss(tf.keras.losses.Loss): + """Loss class that encourages accuracy of the min/max values in the + spatial domain""" + + MAE_LOSS = MeanAbsoluteError() + EX_LOSS = SpatialExtremesOnlyLoss() + + def __init__(self, weight=1.0): + """Initialize the loss with given weight + + Parameters + ---------- + weight : float + Weight for min/max loss terms. Setting this to zero turns + loss into MAE. + """ + super().__init__() + self._weight = weight + +
+[docs] + def __call__(self, x1, x2): + """Custom content loss that encourages temporal min/max accuracy + + Parameters + ---------- + x1 : tf.tensor + synthetic generator output + (n_observations, spatial_1, spatial_2, features) + x2 : tf.tensor + high resolution data + (n_observations, spatial_1, spatial_2, features) + + Returns + ------- + tf.tensor + 0D tensor with loss value + """ + mae = self.MAE_LOSS(x1, x2) + ex_mae = self.EX_LOSS(x1, x2) + + return (mae + 2 * self._weight * ex_mae) / 3
+
+ + + +
+[docs] +class TemporalExtremesOnlyLoss(tf.keras.losses.Loss): + """Loss class that encourages accuracy of the min/max values in the + timeseries. This does not include an additional mae term""" + + MAE_LOSS = MeanAbsoluteError() + +
+[docs] + def __call__(self, x1, x2): + """Custom content loss that encourages temporal min/max accuracy + + Parameters + ---------- + x1 : tf.tensor + synthetic generator output + (n_observations, spatial_1, spatial_2, temporal, features) + x2 : tf.tensor + high resolution data + (n_observations, spatial_1, spatial_2, temporal, features) + + Returns + ------- + tf.tensor + 0D tensor with loss value + """ + x1_min = tf.reduce_min(x1, axis=3) + x2_min = tf.reduce_min(x2, axis=3) + + x1_max = tf.reduce_max(x1, axis=3) + x2_max = tf.reduce_max(x2, axis=3) + + mae_min = self.MAE_LOSS(x1_min, x2_min) + mae_max = self.MAE_LOSS(x1_max, x2_max) + + return (mae_min + mae_max) / 2
+
+ + + +
+[docs] +class TemporalExtremesLoss(tf.keras.losses.Loss): + """Loss class that encourages accuracy of the min/max values in the + timeseries""" + + MAE_LOSS = MeanAbsoluteError() + EX_LOSS = TemporalExtremesOnlyLoss() + + def __init__(self, weight=1.0): + """Initialize the loss with given weight + + Parameters + ---------- + weight : float + Weight for min/max loss terms. Setting this to zero turns + loss into MAE. + """ + super().__init__() + self._weight = weight + +
+[docs] + def __call__(self, x1, x2): + """Custom content loss that encourages temporal min/max accuracy + + Parameters + ---------- + x1 : tf.tensor + synthetic generator output + (n_observations, spatial_1, spatial_2, temporal, features) + x2 : tf.tensor + high resolution data + (n_observations, spatial_1, spatial_2, temporal, features) + + Returns + ------- + tf.tensor + 0D tensor with loss value + """ + mae = self.MAE_LOSS(x1, x2) + ex_mae = self.EX_LOSS(x1, x2) + + return (mae + 2 * self._weight * ex_mae) / 3
+
+ + + +
+[docs] +class SpatiotemporalExtremesLoss(tf.keras.losses.Loss): + """Loss class that encourages accuracy of the min/max values across both + space and time""" + + MAE_LOSS = MeanAbsoluteError() + S_EX_LOSS = SpatialExtremesOnlyLoss() + T_EX_LOSS = TemporalExtremesOnlyLoss() + + def __init__(self, spatial_weight=1.0, temporal_weight=1.0): + """Initialize the loss with given weight + + Parameters + ---------- + spatial_weight : float + Weight for spatial min/max loss terms. + temporal_weight : float + Weight for temporal min/max loss terms. + """ + super().__init__() + self.s_weight = spatial_weight + self.t_weight = temporal_weight + +
+[docs] + def __call__(self, x1, x2): + """Custom content loss that encourages spatiotemporal min/max accuracy. + + Parameters + ---------- + x1 : tf.tensor + synthetic generator output + (n_observations, spatial_1, spatial_2, temporal, features) + x2 : tf.tensor + high resolution data + (n_observations, spatial_1, spatial_2, temporal, features) + + Returns + ------- + tf.tensor + 0D tensor with loss value + """ + mae = self.MAE_LOSS(x1, x2) + s_ex_mae = self.S_EX_LOSS(x1, x2) + t_ex_mae = self.T_EX_LOSS(x1, x2) + return ( + mae + 2 * self.s_weight * s_ex_mae + 2 * self.t_weight * t_ex_mae + ) / 5
+
+ + + +
+[docs] +class SpatialFftOnlyLoss(tf.keras.losses.Loss): + """Loss class that encourages accuracy of the spatial frequency spectrum""" + + MAE_LOSS = MeanAbsoluteError() + + @staticmethod + def _freq_weights(x): + """Get product of squared frequencies to weight frequency amplitudes""" + k0 = np.array([k**2 for k in range(x.shape[1])]) + k1 = np.array([k**2 for k in range(x.shape[2])]) + freqs = np.multiply.outer(k0, k1) + freqs = tf.convert_to_tensor(freqs[np.newaxis, ..., np.newaxis]) + return tf.cast(freqs, x.dtype) + + def _fft(self, x): + """Apply needed transpositions and fft operation.""" + x_hat = tf.transpose(x, perm=[3, 0, 1, 2]) + x_hat = tf.signal.fft2d(tf.cast(x_hat, tf.complex64)) + x_hat = tf.transpose(x_hat, perm=[1, 2, 3, 0]) + x_hat = tf.cast(tf.abs(x_hat), x.dtype) + x_hat = tf.math.multiply(self._freq_weights(x), x_hat) + return tf.math.log(1 + x_hat) + +
+[docs] + def __call__(self, x1, x2): + """Custom content loss that encourages frequency domain accuracy + + Parameters + ---------- + x1 : tf.tensor + synthetic generator output + (n_observations, spatial_1, spatial_2, features) + x2 : tf.tensor + high resolution data + (n_observations, spatial_1, spatial_2, features) + + Returns + ------- + tf.tensor + 0D tensor with loss value + """ + x1_hat = self._fft(x1) + x2_hat = self._fft(x2) + return self.MAE_LOSS(x1_hat, x2_hat)
+
+ + + +
+[docs] +class SpatiotemporalFftOnlyLoss(tf.keras.losses.Loss): + """Loss class that encourages accuracy of the spatiotemporal frequency + spectrum""" + + MAE_LOSS = MeanAbsoluteError() + + @staticmethod + def _freq_weights(x): + """Get product of squared frequencies to weight frequency amplitudes""" + k0 = np.array([k**2 for k in range(x.shape[1])]) + k1 = np.array([k**2 for k in range(x.shape[2])]) + f = np.array([f**2 for f in range(x.shape[3])]) + freqs = np.multiply.outer(k0, k1) + freqs = np.multiply.outer(freqs, f) + freqs = tf.convert_to_tensor(freqs[np.newaxis, ..., np.newaxis]) + return tf.cast(freqs, x.dtype) + + def _fft(self, x): + """Apply needed transpositions and fft operation.""" + x_hat = tf.transpose(x, perm=[4, 0, 1, 2, 3]) + x_hat = tf.signal.fft3d(tf.cast(x_hat, tf.complex64)) + x_hat = tf.transpose(x_hat, perm=[1, 2, 3, 4, 0]) + x_hat = tf.cast(tf.abs(x_hat), x.dtype) + x_hat = tf.math.multiply(self._freq_weights(x), x_hat) + return tf.math.log(1 + x_hat) + +
+[docs] + def __call__(self, x1, x2): + """Custom content loss that encourages frequency domain accuracy + + Parameters + ---------- + x1 : tf.tensor + synthetic generator output + (n_observations, spatial_1, spatial_2, temporal, features) + x2 : tf.tensor + high resolution data + (n_observations, spatial_1, spatial_2, temporal, features) + + Returns + ------- + tf.tensor + 0D tensor with loss value + """ + x1_hat = self._fft(x1) + x2_hat = self._fft(x2) + return self.MAE_LOSS(x1_hat, x2_hat)
+
+ + + +
+[docs] +class StExtremesFftLoss(tf.keras.losses.Loss): + """Loss class that encourages accuracy of the min/max values across both + space and time as well as frequency domain accuracy.""" + + def __init__( + self, spatial_weight=1.0, temporal_weight=1.0, fft_weight=1.0 + ): + """Initialize the loss with given weight + + Parameters + ---------- + spatial_weight : float + Weight for spatial min/max loss terms. + temporal_weight : float + Weight for temporal min/max loss terms. + fft_weight : float + Weight for the fft loss term. + """ + super().__init__() + self.st_ex_loss = SpatiotemporalExtremesLoss( + spatial_weight, temporal_weight + ) + self.fft_loss = SpatiotemporalFftOnlyLoss() + self.fft_weight = fft_weight + +
+[docs] + def __call__(self, x1, x2): + """Custom content loss that encourages spatiotemporal min/max accuracy + and fft accuracy. + + Parameters + ---------- + x1 : tf.tensor + synthetic generator output + (n_observations, spatial_1, spatial_2, temporal, features) + x2 : tf.tensor + high resolution data + (n_observations, spatial_1, spatial_2, temporal, features) + + Returns + ------- + tf.tensor + 0D tensor with loss value + """ + return ( + 5 * self.st_ex_loss(x1, x2) + + self.fft_weight * self.fft_loss(x1, x2) + ) / 6
+
+ + + +
+[docs] +class LowResLoss(tf.keras.losses.Loss): + """Content loss that is calculated by coarsening the synthetic and true + high-resolution data pairs and then performing the pointwise content loss + on the low-resolution fields""" + + EX_LOSS_METRICS: ClassVar = { + 'SpatialExtremesOnlyLoss': SpatialExtremesOnlyLoss, + 'TemporalExtremesOnlyLoss': TemporalExtremesOnlyLoss, + } + + def __init__( + self, + s_enhance=1, + t_enhance=1, + t_method='average', + tf_loss='MeanSquaredError', + ex_loss=None, + ): + """Initialize the loss with given weight + + Parameters + ---------- + s_enhance : int + factor by which to coarsen spatial dimensions. 1 will keep the + spatial axes as high-res + t_enhance : int + factor by which to coarsen temporal dimension. 1 will keep the + temporal axes as high-res + t_method : str + Accepted options: [subsample, average] + Subsample will take every t_enhance-th time step, average will + average over t_enhance time steps + tf_loss : str + The tensorflow loss function to operate on the low-res fields. Must + be the name of a loss class that can be retrieved from + ``tf.keras.losses`` e.g., "MeanSquaredError" or "MeanAbsoluteError" + ex_loss : None | str + Optional additional loss metric evaluating the spatial or temporal + extremes of the high-res data. Can be "SpatialExtremesOnlyLoss" or + "TemporalExtremesOnlyLoss" (keys in ``EX_LOSS_METRICS``). + """ + + super().__init__() + self._s_enhance = s_enhance + self._t_enhance = t_enhance + self._t_method = str(t_method).casefold() + self._tf_loss = getattr(tf.keras.losses, tf_loss)() + self._ex_loss = ex_loss + if self._ex_loss is not None: + self._ex_loss = self.EX_LOSS_METRICS[self._ex_loss]() + + def _s_coarsen_4d_tensor(self, tensor): + """Perform spatial coarsening on a 4D tensor of shape + (n_obs, spatial_1, spatial_2, features)""" + shape = tensor.shape + tensor = tf.reshape( + tensor, + ( + shape[0], + shape[1] // self._s_enhance, + self._s_enhance, + shape[2] // self._s_enhance, + self._s_enhance, + shape[3], + ), + ) + tensor = tf.math.reduce_sum(tensor, axis=(2, 4)) / self._s_enhance**2 + return tensor + + def _s_coarsen_5d_tensor(self, tensor): + """Perform spatial coarsening on a 5D tensor of shape + (n_obs, spatial_1, spatial_2, time, features)""" + shape = tensor.shape + tensor = tf.reshape( + tensor, + ( + shape[0], + shape[1] // self._s_enhance, + self._s_enhance, + shape[2] // self._s_enhance, + self._s_enhance, + shape[3], + shape[4], + ), + ) + tensor = tf.math.reduce_sum(tensor, axis=(2, 4)) / self._s_enhance**2 + return tensor + + def _t_coarsen_sample(self, tensor): + """Perform temporal subsampling on a 5D tensor of shape + (n_obs, spatial_1, spatial_2, time, features)""" + assert len(tensor.shape) == 5 + tensor = tensor[:, :, :, :: self._t_enhance, :] + return tensor + + def _t_coarsen_avg(self, tensor): + """Perform temporal coarsening on a 5D tensor of shape + (n_obs, spatial_1, spatial_2, time, features)""" + shape = tensor.shape + assert len(shape) == 5 + tensor = tf.reshape( + tensor, + (shape[0], shape[1], shape[2], -1, self._t_enhance, shape[4]), + ) + tensor = tf.math.reduce_sum(tensor, axis=4) / self._t_enhance + return tensor + +
+[docs] + def __call__(self, x1, x2): + """Custom content loss calculated on re-coarsened low-res fields + + Parameters + ---------- + x1 : tf.tensor + Synthetic high-res generator output, shape is either of these: + (n_obs, spatial_1, spatial_2, features) + (n_obs, spatial_1, spatial_2, temporal, features) + x2 : tf.tensor + True high resolution data, shape is either of these: + (n_obs, spatial_1, spatial_2, features) + (n_obs, spatial_1, spatial_2, temporal, features) + + Returns + ------- + tf.tensor + 0D tensor loss value + """ + + assert x1.shape == x2.shape + s_only = len(x1.shape) == 4 + + ex_loss = tf.constant(0, dtype=x1.dtype) + if self._ex_loss is not None: + ex_loss = self._ex_loss(x1, x2) + + if self._s_enhance > 1 and s_only: + x1 = self._s_coarsen_4d_tensor(x1) + x2 = self._s_coarsen_4d_tensor(x2) + + elif self._s_enhance > 1 and not s_only: + x1 = self._s_coarsen_5d_tensor(x1) + x2 = self._s_coarsen_5d_tensor(x2) + + if self._t_enhance > 1 and self._t_method == 'average': + x1 = self._t_coarsen_avg(x1) + x2 = self._t_coarsen_avg(x2) + + if self._t_enhance > 1 and self._t_method == 'subsample': + x1 = self._t_coarsen_sample(x1) + x2 = self._t_coarsen_sample(x2) + + return self._tf_loss(x1, x2) + ex_loss
+
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/utilities/pytest/helpers.html b/_modules/sup3r/utilities/pytest/helpers.html new file mode 100644 index 000000000..deceef5e6 --- /dev/null +++ b/_modules/sup3r/utilities/pytest/helpers.html @@ -0,0 +1,1191 @@ + + + + + + + + + + sup3r.utilities.pytest.helpers — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.utilities.pytest.helpers

+"""Testing helpers."""
+
+import os
+from itertools import product
+
+import dask.array as da
+import numpy as np
+import pandas as pd
+import xarray as xr
+
+from sup3r.postprocessing import OutputHandlerH5
+from sup3r.preprocessing.base import Container, Sup3rDataset
+from sup3r.preprocessing.batch_handlers import BatchHandlerCC, BatchHandlerDC
+from sup3r.preprocessing.names import Dimension
+from sup3r.preprocessing.samplers import DualSamplerCC, Sampler, SamplerDC
+from sup3r.utilities.utilities import RANDOM_GENERATOR, pd_date_range
+
+
+
+[docs] +def make_fake_tif(shape, outfile): + """Make dummy data for tests.""" + + y = np.linspace(70, -70, shape[0]) + x = np.linspace(-150, 150, shape[1]) + coords = {'band': [1], 'x': x, 'y': y} + data_vars = { + 'band_data': ( + ('band', 'y', 'x'), + RANDOM_GENERATOR.uniform(0, 1, (1, *shape)), + ) + } + nc = xr.Dataset(coords=coords, data_vars=data_vars) + nc.to_netcdf(outfile, format='NETCDF4', engine='h5netcdf')
+ + + +
+[docs] +def make_fake_dset(shape, features, const=None): + """Make dummy data for tests.""" + + lats = np.linspace(70, -70, shape[0]) + lons = np.linspace(-150, 150, shape[1]) + lons, lats = np.meshgrid(lons, lats) + time = pd.date_range('2023-01-01', '2023-12-31', freq='60min')[: shape[2]] + dims = ( + 'time', + Dimension.PRESSURE_LEVEL, + Dimension.SOUTH_NORTH, + Dimension.WEST_EAST, + ) + coords = {} + + if len(shape) == 4: + levels = np.linspace(1000, 0, shape[3]) + coords[Dimension.PRESSURE_LEVEL] = levels + coords['time'] = time + coords[Dimension.LATITUDE] = ( + (Dimension.SOUTH_NORTH, Dimension.WEST_EAST), + lats, + ) + coords[Dimension.LONGITUDE] = ( + (Dimension.SOUTH_NORTH, Dimension.WEST_EAST), + lons, + ) + + dims = ( + 'time', + Dimension.PRESSURE_LEVEL, + Dimension.SOUTH_NORTH, + Dimension.WEST_EAST, + ) + trans_axes = (2, 3, 0, 1) + if len(shape) == 3: + dims = ('time', *dims[2:]) + trans_axes = (2, 0, 1) + data_vars = {} + for f in features: + if 'zg' in f: + data = da.random.uniform(10, 100, shape) + elif 'orog' in f: + data = da.random.uniform(0, 1, shape) + else: + data = da.random.uniform(-1, 1, shape) + data_vars[f] = ( + dims[: len(shape)], + da.transpose( + np.full(shape, const) if const is not None else data, + axes=trans_axes, + ), + ) + nc = xr.Dataset(coords=coords, data_vars=data_vars) + return nc.astype(np.float32)
+ + + +
+[docs] +def make_fake_nc_file(file_name, shape, features): + """Make nc file with dummy data for tests.""" + nc = make_fake_dset(shape, features) + nc.to_netcdf(file_name, format='NETCDF4', engine='h5netcdf')
+ + + +
+[docs] +class DummyData(Container): + """Dummy container with random data.""" + + def __init__(self, data_shape, features): + super().__init__() + self.data = make_fake_dset(data_shape, features)
+ + + +
+[docs] +class DummySampler(Sampler): + """Dummy container with random data.""" + + def __init__( + self, sample_shape, data_shape, features, batch_size, feature_sets=None + ): + data = make_fake_dset(data_shape, features=features) + super().__init__( + Sup3rDataset(high_res=data), + sample_shape, + batch_size=batch_size, + feature_sets=feature_sets, + )
+ + + +
+[docs] +def test_sampler_factory(SamplerClass): + """Build test samplers which track indices.""" + + class SamplerTester(SamplerClass): + """Keep a record of sample indices for testing.""" + + def __init__(self, *args, **kwargs): + self.index_record = [] + super().__init__(*args, **kwargs) + + def get_sample_index(self, **kwargs): + """Override get_sample_index to keep record of index accessible by + batch handler. We store the index with the time entry divided by + the batch size, since we have multiplied by the batch size to get + a continuous time sample for multiple observations.""" + idx = super().get_sample_index(**kwargs) + if len(idx) == 2: + lr = list(idx[0]) + hr = list(idx[1]) + lr[2] = slice( + lr[2].start, + (lr[2].stop - lr[2].start) // self.batch_size + + lr[2].start, + ) + hr[2] = slice( + hr[2].start, + (hr[2].stop - hr[2].start) // self.batch_size + + hr[2].start, + ) + new_idx = (tuple(lr), tuple(hr)) + else: + new_idx = list(idx) + new_idx[2] = slice( + new_idx[2].start, + (new_idx[2].stop - new_idx[2].start) // self.batch_size + + new_idx[2].start, + ) + new_idx = tuple(new_idx) + self.index_record.append(new_idx) + return idx + + return SamplerTester
+ + + +DualSamplerTesterCC = test_sampler_factory(DualSamplerCC) +SamplerTesterDC = test_sampler_factory(SamplerDC) +SamplerTester = test_sampler_factory(Sampler) + + +
+[docs] +class BatchHandlerTesterCC(BatchHandlerCC): + """Batch handler with sampler with running index record.""" + + SAMPLER = DualSamplerTesterCC
+ + + +
+[docs] +class BatchHandlerTesterDC(BatchHandlerDC): + """Data-centric batch handler with record for testing""" + + SAMPLER = SamplerTesterDC + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.temporal_weights_record = [] + self.spatial_weights_record = [] + self.s_index_record = [] + self.t_index_record = [] + self.space_bin_record = [] + self.time_bin_record = [] + self.space_bin_count = np.zeros(self.n_space_bins) + self.time_bin_count = np.zeros(self.n_time_bins) + self.max_rows = self.data[0].shape[0] - self.sample_shape[0] + 1 + self.max_cols = self.data[0].shape[1] - self.sample_shape[1] + 1 + self.max_tsteps = self.data[0].shape[2] - self.sample_shape[2] + 1 + self.spatial_bins = np.array_split( + np.arange(0, self.max_rows * self.max_cols), + self.n_space_bins, + ) + self.spatial_bins = [b[-1] + 1 for b in self.spatial_bins] + self.temporal_bins = np.array_split( + np.arange(0, self.max_tsteps), self.n_time_bins + ) + self.temporal_bins = [b[-1] + 1 for b in self.temporal_bins] + + def _update_bin_count(self, slices): + s_idx = slices[0].start * self.max_cols + slices[1].start + t_idx = slices[2].start + self.s_index_record.append(s_idx) + self.t_index_record.append(t_idx) + self.space_bin_count[np.digitize(s_idx, self.spatial_bins)] += 1 + self.time_bin_count[np.digitize(t_idx, self.temporal_bins)] += 1 + +
+[docs] + def sample_batch(self): + """Override get_samples to track sample indices.""" + out = super().sample_batch() + if len(self.containers[0].index_record) > 0: + self._update_bin_count(self.containers[0].index_record[-1]) + return out
+ + + def __next__(self): + out = super().__next__() + if self._batch_count == self.n_batches: + self.update_record() + return out + +
+[docs] + def update_record(self): + """Reset records for a new epoch.""" + self.space_bin_record.append(self.space_bin_count) + self.time_bin_record.append(self.time_bin_count) + self.space_bin_count = np.zeros(self.n_space_bins) + self.time_bin_count = np.zeros(self.n_time_bins) + self.temporal_weights_record.append(self.temporal_weights) + self.spatial_weights_record.append(self.spatial_weights)
+ + + @staticmethod + def _mean_record_normed(record): + mean = np.array(record).mean(axis=0) + return mean / mean.sum()
+ + + +
+[docs] +def BatchHandlerTesterFactory(BatchHandlerClass, SamplerClass): + """Batch handler factory with sample counter and deterministic sampling for + testing.""" + + class BatchHandlerTester(BatchHandlerClass): + """testing version of BatchHandler.""" + + SAMPLER = SamplerClass + + def __init__(self, *args, **kwargs): + self.sample_count = 0 + super().__init__(*args, **kwargs) + + def sample_batch(self): + """Override get_samples to track sample count.""" + self.sample_count += 1 + return super().sample_batch() + + return BatchHandlerTester
+ + + +
+[docs] +def make_collect_chunks(td): + """Make fake h5 chunked output files for collection tests. + + Parameters + ---------- + td : tempfile.TemporaryDirectory + Test TemporaryDirectory + + Returns + ------- + out_files : list + List of filepaths to chunked files. + data : ndarray + (spatial_1, spatial_2, temporal, features) + High resolution forward pass output + ws_true : ndarray + Windspeed between 0 and 20 in shape (spatial_1, spatial_2, temporal, 1) + wd_true : ndarray + Windir between 0 and 360 in shape (spatial_1, spatial_2, temporal, 1) + features : list + List of feature names corresponding to the last dimension of data + ['windspeed_100m', 'winddirection_100m'] + hr_lat_lon : ndarray + Array of lat/lon for hr data. (spatial_1, spatial_2, 2) + Last dimension has ordering (lat, lon) + hr_times : list + List of np.datetime64 objects for hr data. + """ + + features = ['windspeed_100m', 'winddirection_100m'] + model_meta_data = {'foo': 'bar'} + shape = (50, 50, 96, 1) + ws_true = RANDOM_GENERATOR.uniform(0, 20, shape) + wd_true = RANDOM_GENERATOR.uniform(0, 360, shape) + data = np.concatenate((ws_true, wd_true), axis=3) + lat = np.linspace(90, 0, 50) + lon = np.linspace(-180, 0, 50) + lon, lat = np.meshgrid(lon, lat) + hr_lat_lon = np.dstack((lat, lon)) + + gids = np.arange(np.prod(shape[:2])) + gids = gids.reshape(shape[:2]) + + hr_times = pd_date_range( + '20220101', '20220103', freq='1800s', inclusive='left' + ) + + t_slices_hr = np.array_split(np.arange(len(hr_times)), 4) + t_slices_hr = [slice(s[0], s[-1] + 1) for s in t_slices_hr] + s_slices_hr = np.array_split(np.arange(shape[0]), 4) + s_slices_hr = [slice(s[0], s[-1] + 1) for s in s_slices_hr] + + out_pattern = os.path.join(td, 'fp_out_{t}_{s}.h5') + out_files = [] + for t, slice_hr in enumerate(t_slices_hr): + for s, (s1_hr, s2_hr) in enumerate(product(s_slices_hr, s_slices_hr)): + out_file = out_pattern.format( + t=str(t).zfill(6), + s=str(s).zfill(6) + ) + out_files.append(out_file) + OutputHandlerH5._write_output( + data[s1_hr, s2_hr, slice_hr, :], + features, + hr_lat_lon[s1_hr, s2_hr], + hr_times[slice_hr], + out_file, + meta_data=model_meta_data, + max_workers=1, + gids=gids[s1_hr, s2_hr], + ) + + return ( + out_files, + data, + ws_true, + wd_true, + features, + hr_lat_lon, + hr_times + )
+ + + +
+[docs] +def make_fake_h5_chunks(td): + """Make fake h5 chunked output files for a 5x spatial 2x temporal + multi-node forward pass output. + + Parameters + ---------- + td : tempfile.TemporaryDirectory + Test TemporaryDirectory + + Returns + ------- + out_files : list + List of filepaths to chunked files. + data : ndarray + (spatial_1, spatial_2, temporal, features) + High resolution forward pass output + ws_true : ndarray + Windspeed between 0 and 20 in shape (spatial_1, spatial_2, temporal, 1) + wd_true : ndarray + Windir between 0 and 360 in shape (spatial_1, spatial_2, temporal, 1) + features : list + List of feature names corresponding to the last dimension of data + ['windspeed_100m', 'winddirection_100m'] + t_slices_lr : list + List of low res temporal slices + t_slices_hr : list + List of high res temporal slices + s_slices_lr : list + List of low res spatial slices + s_slices_hr : list + List of high res spatial slices + low_res_lat_lon : ndarray + Array of lat/lon for input data. (spatial_1, spatial_2, 2) + Last dimension has ordering (lat, lon) + low_res_times : list + List of np.datetime64 objects for coarse data. + """ + + features = ['windspeed_100m', 'winddirection_100m'] + model_meta_data = {'foo': 'bar'} + shape = (50, 50, 96, 1) + ws_true = RANDOM_GENERATOR.uniform(0, 20, shape) + wd_true = RANDOM_GENERATOR.uniform(0, 360, shape) + data = np.concatenate((ws_true, wd_true), axis=3) + lat = np.linspace(90, 0, 10) + lon = np.linspace(-180, 0, 10) + lon, lat = np.meshgrid(lon, lat) + low_res_lat_lon = np.dstack((lat, lon)) + + gids = np.arange(np.prod(shape[:2])) + gids = gids.reshape(shape[:2]) + + low_res_times = pd_date_range( + '20220101', '20220103', freq='3600s', inclusive='left' + ) + + t_slices_lr = [slice(0, 24), slice(24, None)] + t_slices_hr = [slice(0, 48), slice(48, None)] + + s_slices_lr = [slice(0, 5), slice(5, 10)] + s_slices_hr = [slice(0, 25), slice(25, 50)] + + out_pattern = os.path.join(td, 'fp_out_{t}_{i}{j}.h5') + out_files = [] + for t, (slice_lr, slice_hr) in enumerate(zip(t_slices_lr, t_slices_hr)): + for i, (s1_lr, s1_hr) in enumerate(zip(s_slices_lr, s_slices_hr)): + for j, (s2_lr, s2_hr) in enumerate(zip(s_slices_lr, s_slices_hr)): + out_file = out_pattern.format( + t=str(t).zfill(6), + i=str(i).zfill(3), + j=str(j).zfill(3), + ) + out_files.append(out_file) + OutputHandlerH5.write_output( + data[s1_hr, s2_hr, slice_hr, :], + features, + low_res_lat_lon[s1_lr, s2_lr], + low_res_times[slice_lr], + out_file, + meta_data=model_meta_data, + max_workers=1, + gids=gids[s1_hr, s2_hr], + ) + + return ( + out_files, + data, + ws_true, + wd_true, + features, + t_slices_lr, + t_slices_hr, + s_slices_lr, + s_slices_hr, + low_res_lat_lon, + low_res_times + )
+ + + +
+[docs] +def make_fake_cs_ratio_files(td, low_res_times, low_res_lat_lon, model_meta): + """Make a set of dummy clearsky ratio files that match the GAN fwp outputs + + Parameters + ---------- + td : tempfile.TemporaryDirectory + Test TemporaryDirectory + low_res_times : + List of times for low res input data. If there is only a single low + res timestep, it is assumed the data is daily. + low_res_lat_lon + Array of lat/lon for input data. + (spatial_1, spatial_2, 2) + Last dimension has ordering (lat, lon) + model_meta : dict + Meta data for model to write to file. + + Returns + ------- + fps : list + List of clearsky ratio .h5 chunked files. + fp_pattern : str + Glob pattern*string to find fps + """ + fps = [] + chunk_dir = os.path.join(td, 'chunks/') + fp_pattern = os.path.join(chunk_dir, 'sup3r_chunk_*.h5') + os.makedirs(chunk_dir) + + for idt, timestamp in enumerate(low_res_times): + fn = 'sup3r_chunk_{}_{}.h5'.format(str(idt).zfill(6), str(0).zfill(6)) + out_file = os.path.join(chunk_dir, fn) + fps.append(out_file) + + cs_ratio = RANDOM_GENERATOR.uniform(0, 1, (20, 20, 1, 1)) + cs_ratio = np.repeat(cs_ratio, 24, axis=2) + + OutputHandlerH5.write_output( + cs_ratio, + ['clearsky_ratio'], + low_res_lat_lon, + [timestamp], + out_file, + max_workers=1, + meta_data=model_meta, + ) + return fps, fp_pattern
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/sup3r/utilities/utilities.html b/_modules/sup3r/utilities/utilities.html new file mode 100644 index 000000000..73254d199 --- /dev/null +++ b/_modules/sup3r/utilities/utilities.html @@ -0,0 +1,1071 @@ + + + + + + + + + + sup3r.utilities.utilities — sup3r 0.2.1.dev62 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + + +
+
+ + + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for sup3r.utilities.utilities

+"""Miscellaneous utilities shared across multiple modules"""
+
+import json
+import logging
+import random
+import string
+import time
+
+import numpy as np
+import pandas as pd
+import tensorflow as tf
+import xarray as xr
+from packaging import version
+from scipy import ndimage as nd
+
+logger = logging.getLogger(__name__)
+
+RANDOM_GENERATOR = np.random.default_rng(seed=42)
+
+
+
+[docs] +def preprocess_datasets(dset): + """Standardization preprocessing applied before datasets are concatenated + by ``xr.open_mfdataset``""" + if 'time' in dset and dset.time.size > 1: + if 'time' in dset.indexes and hasattr( + dset.indexes['time'], 'to_datetimeindex' + ): + dset['time'] = dset.indexes['time'].to_datetimeindex() + ti = dset['time'].astype(int) + dset['time'] = ti + if 'latitude' in dset.dims: + dset = dset.swap_dims({'latitude': 'south_north'}) + if 'longitude' in dset.dims: + dset = dset.swap_dims({'longitude': 'west_east'}) + # temporary to handle downloaded era files + if 'expver' in dset: + dset = dset.drop_vars('expver') + if 'number' in dset: + dset = dset.drop_vars('number') + return dset
+ + + +
+[docs] +def xr_open_mfdataset(files, **kwargs): + """Wrapper for xr.open_mfdataset with default opening options.""" + default_kwargs = {'engine': 'netcdf4'} + default_kwargs.update(kwargs) + if isinstance(files, str): + files = [files] + return xr.open_mfdataset( + files, preprocess=preprocess_datasets, **default_kwargs + )
+ + + +
+[docs] +def safe_cast(o): + """Cast to type safe for serialization.""" + if isinstance(o, tf.Tensor): + o = o.numpy() + if isinstance(o, (float, np.float64, np.float32)): + return float(o) + if isinstance(o, (int, np.int64, np.int32)): + return int(o) + if isinstance(o, (tuple, np.ndarray)): + return list(o) + if isinstance(o, (str, list)): + return o + return str(o)
+ + + +
+[docs] +def safe_serialize(obj, **kwargs): + """json.dumps with non-serializable object handling.""" + return json.dumps(obj, default=safe_cast, **kwargs)
+ + + +
+[docs] +class Timer: + """Timer class for timing and storing function call times.""" + + def __init__(self): + self.log = {} + self._elapsed = 0 + self._start = None + self._stop = None + +
+[docs] + def start(self): + """Set start of timing period.""" + self._start = time.time()
+ + +
+[docs] + def stop(self): + """Set stop time of timing period.""" + self._stop = time.time()
+ + + @property + def elapsed(self): + """Elapsed time between start and stop.""" + return self._stop - self._start + + @property + def elapsed_str(self): + """Elapsed time in string format.""" + return f'{round(self.elapsed, 5)} seconds' + +
+[docs] + def __call__(self, func, call_id=None, log=False): + """Time function call and store elapsed time in self.log. + + Parameters + ---------- + func : function + Function to time + call_id: int | None + ID to distingush calls with the same function name. For example, + when runnning forward passes on multiple chunks. + log : bool + Whether to write to active logger + + Returns + ------- + output of func + """ + + def wrapper(*args, **kwargs): + """Wrapper with decorator pattern. + + Parameters + ---------- + args : list + positional arguments for fun + kwargs : dict + keyword arguments for fun + """ + self.start() + out = func(*args, **kwargs) + self.stop() + if call_id is not None: + entry = self.log.get(call_id, {}) + entry[func.__name__] = self.elapsed + self.log[call_id] = entry + else: + self.log[func.__name__] = self.elapsed + if log: + logger.debug( + 'Call to %s finished in %s', + func.__name__, + self.elapsed_str, + ) + return out + + return wrapper
+
+ + + +
+[docs] +def generate_random_string(length): + """Generate random string with given length. Used for naming temporary + files to avoid collisions.""" + letters = string.ascii_letters + return ''.join(random.choice(letters) for i in range(length))
+ + + +
+[docs] +def temporal_coarsening(data, t_enhance=4, method='subsample'): + """Coarsen data according to t_enhance resolution + + Parameters + ---------- + data : Union[np.ndarray, da.core.Array] + 5D array with dimensions + (observations, spatial_1, spatial_2, temporal, features) + t_enhance : int + factor by which to coarsen temporal dimension + method : str + accepted options: [subsample, average, total, min, max] + Subsample will take every t_enhance-th time step, average will average + over t_enhance time steps, total will sum over t_enhance time steps + + Returns + ------- + coarse_data : Union[np.ndarray, da.core.Array] + 5D array with same dimensions as data with new coarse resolution + """ + + if t_enhance is not None and len(data.shape) == 5: + new_shape = ( + data.shape[0], + data.shape[1], + data.shape[2], + -1, + t_enhance, + data.shape[4], + ) + + if method == 'subsample': + coarse_data = data[:, :, :, ::t_enhance, :] + + elif method == 'average': + coarse_data = np.nansum(np.reshape(data, new_shape), axis=4) + coarse_data /= t_enhance + + elif method == 'max': + coarse_data = np.max(np.reshape(data, new_shape), axis=4) + + elif method == 'min': + coarse_data = np.min(np.reshape(data, new_shape), axis=4) + + elif method == 'total': + coarse_data = np.nansum(np.reshape(data, new_shape), axis=4) + + else: + msg = ( + f'Did not recognize temporal_coarsening method "{method}", ' + 'can only accept one of: [subsample, average, total, max, min]' + ) + logger.error(msg) + raise KeyError(msg) + + else: + coarse_data = data + + return coarse_data
+ + + +
+[docs] +def spatial_coarsening(data, s_enhance=2, obs_axis=True): + """Coarsen data according to s_enhance resolution + + Parameters + ---------- + data : Union[np.ndarray, da.core.Array] + 5D | 4D | 3D | 2D array with dimensions: + (n_obs, spatial_1, spatial_2, temporal, features) (obs_axis=True) + (n_obs, spatial_1, spatial_2, features) (obs_axis=True) + (spatial_1, spatial_2, temporal, features) (obs_axis=False) + (spatial_1, spatial_2, temporal_or_features) (obs_axis=False) + (spatial_1, spatial_2) (obs_axis=False) + s_enhance : int + factor by which to coarsen spatial dimensions + obs_axis : bool + Flag for if axis=0 is the observation axis. If True (default) + spatial axis=(1, 2) (zero-indexed), if False spatial axis=(0, 1) + + Returns + ------- + data : Union[np.ndarray, da.core.Array] + 2D, 3D | 4D | 5D array with same dimensions as data with new coarse + resolution + """ + + if len(data.shape) < 2: + msg = ( + 'Data must be 2D, 3D, 4D, or 5D to do spatial coarsening, but ' + f'received: {data.shape}' + ) + logger.error(msg) + raise ValueError(msg) + + if obs_axis and len(data.shape) < 3: + msg = ( + 'Data must be 3D, 4D, or 5D to do spatial coarsening with ' + f'obs_axis=True, but received: {data.shape}' + ) + logger.error(msg) + raise ValueError(msg) + + if s_enhance is not None and s_enhance > 1: + bad1 = obs_axis and ( + data.shape[1] % s_enhance != 0 or data.shape[2] % s_enhance != 0 + ) + bad2 = not obs_axis and ( + data.shape[0] % s_enhance != 0 or data.shape[1] % s_enhance != 0 + ) + if bad1 or bad2: + msg = ( + 's_enhance must evenly divide grid size. ' + f'Received s_enhance: {s_enhance} with data shape: ' + f'{data.shape}' + ) + logger.error(msg) + raise ValueError(msg) + + if obs_axis and len(data.shape) == 3: + data = np.reshape( + data, + ( + data.shape[0], + data.shape[1] // s_enhance, + s_enhance, + data.shape[2] // s_enhance, + s_enhance, + ), + ) + data = data.sum(axis=(2, 4)) / s_enhance**2 + + elif obs_axis: + data = np.reshape( + data, + ( + data.shape[0], + data.shape[1] // s_enhance, + s_enhance, + data.shape[2] // s_enhance, + s_enhance, + *data.shape[3:], + ), + ) + data = data.sum(axis=(2, 4)) / s_enhance**2 + + elif not obs_axis and len(data.shape) == 2: + data = np.reshape( + data, + ( + data.shape[0] // s_enhance, + s_enhance, + data.shape[1] // s_enhance, + s_enhance, + ), + ) + data = data.sum(axis=(1, 3)) / s_enhance**2 + + elif not obs_axis: + data = np.reshape( + data, + ( + data.shape[0] // s_enhance, + s_enhance, + data.shape[1] // s_enhance, + s_enhance, + *data.shape[2:], + ), + ) + data = data.sum(axis=(1, 3)) / s_enhance**2 + + else: + msg = ( + 'Data must be 2D, 3D, 4D, or 5D to do spatial coarsening, but ' + f'received: {data.shape}' + ) + logger.error(msg) + raise ValueError(msg) + + return data
+ + + +
+[docs] +def nn_fill_array(array): + """Fill any NaN values in an np.ndarray from the nearest non-nan values. + + Parameters + ---------- + array : Union[np.ndarray, da.core.Array] + Input array with NaN values + + Returns + ------- + array : Union[np.ndarray, da.core.Array] + Output array with NaN values filled + """ + + nan_mask = np.isnan(array) + indices = nd.distance_transform_edt( + nan_mask, return_distances=False, return_indices=True + ) + if hasattr(array, 'vindex'): + return array.vindex[tuple(indices)] + return array[tuple(indices)]
+ + + +
+[docs] +def pd_date_range(*args, **kwargs): + """A simple wrapper on the pd.date_range() method that handles the closed + vs. inclusive kwarg change in pd 1.4.0""" + incl = version.parse(pd.__version__) >= version.parse('1.4.0') + + if incl and 'closed' in kwargs: + kwargs['inclusive'] = kwargs.pop('closed') + elif not incl and 'inclusive' in kwargs: + kwargs['closed'] = kwargs.pop('inclusive') + if kwargs['closed'] == 'both': + kwargs['closed'] = None + + return pd.date_range(*args, **kwargs)
+ +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.batch.batch_cli.rst b/_sources/_autosummary/sup3r.batch.batch_cli.rst new file mode 100644 index 000000000..0546525ed --- /dev/null +++ b/_sources/_autosummary/sup3r.batch.batch_cli.rst @@ -0,0 +1,23 @@ +sup3r.batch.batch\_cli +====================== + +.. automodule:: sup3r.batch.batch_cli + + + + + + + + + + + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.batch.rst b/_sources/_autosummary/sup3r.batch.rst new file mode 100644 index 000000000..e4333ebde --- /dev/null +++ b/_sources/_autosummary/sup3r.batch.rst @@ -0,0 +1,30 @@ +sup3r.batch +=========== + +.. automodule:: sup3r.batch + + + + + + + + + + + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + batch_cli + diff --git a/_sources/_autosummary/sup3r.bias.base.DataRetrievalBase.rst b/_sources/_autosummary/sup3r.bias.base.DataRetrievalBase.rst new file mode 100644 index 000000000..d090ef0dd --- /dev/null +++ b/_sources/_autosummary/sup3r.bias.base.DataRetrievalBase.rst @@ -0,0 +1,38 @@ +sup3r.bias.base.DataRetrievalBase +================================= + +.. currentmodule:: sup3r.bias.base + +.. autoclass:: DataRetrievalBase + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~DataRetrievalBase.compare_dists + ~DataRetrievalBase.get_base_data + ~DataRetrievalBase.get_base_gid + ~DataRetrievalBase.get_bias_data + ~DataRetrievalBase.get_bias_gid + ~DataRetrievalBase.get_data_pair + ~DataRetrievalBase.get_node_cmd + ~DataRetrievalBase.pre_load + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~DataRetrievalBase.distance_upper_bound + ~DataRetrievalBase.meta + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.bias.base.rst b/_sources/_autosummary/sup3r.bias.base.rst new file mode 100644 index 000000000..603cd533f --- /dev/null +++ b/_sources/_autosummary/sup3r.bias.base.rst @@ -0,0 +1,31 @@ +sup3r.bias.base +=============== + +.. automodule:: sup3r.bias.base + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + DataRetrievalBase + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.bias.bias_calc.LinearCorrection.rst b/_sources/_autosummary/sup3r.bias.bias_calc.LinearCorrection.rst new file mode 100644 index 000000000..3399d2cd8 --- /dev/null +++ b/_sources/_autosummary/sup3r.bias.bias_calc.LinearCorrection.rst @@ -0,0 +1,43 @@ +sup3r.bias.bias\_calc.LinearCorrection +====================================== + +.. currentmodule:: sup3r.bias.bias_calc + +.. autoclass:: LinearCorrection + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~LinearCorrection.compare_dists + ~LinearCorrection.fill_and_smooth + ~LinearCorrection.get_base_data + ~LinearCorrection.get_base_gid + ~LinearCorrection.get_bias_data + ~LinearCorrection.get_bias_gid + ~LinearCorrection.get_data_pair + ~LinearCorrection.get_linear_correction + ~LinearCorrection.get_node_cmd + ~LinearCorrection.pre_load + ~LinearCorrection.run + ~LinearCorrection.write_outputs + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~LinearCorrection.NT + ~LinearCorrection.distance_upper_bound + ~LinearCorrection.meta + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.bias.bias_calc.MonthlyLinearCorrection.rst b/_sources/_autosummary/sup3r.bias.bias_calc.MonthlyLinearCorrection.rst new file mode 100644 index 000000000..134f740a4 --- /dev/null +++ b/_sources/_autosummary/sup3r.bias.bias_calc.MonthlyLinearCorrection.rst @@ -0,0 +1,43 @@ +sup3r.bias.bias\_calc.MonthlyLinearCorrection +============================================= + +.. currentmodule:: sup3r.bias.bias_calc + +.. autoclass:: MonthlyLinearCorrection + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~MonthlyLinearCorrection.compare_dists + ~MonthlyLinearCorrection.fill_and_smooth + ~MonthlyLinearCorrection.get_base_data + ~MonthlyLinearCorrection.get_base_gid + ~MonthlyLinearCorrection.get_bias_data + ~MonthlyLinearCorrection.get_bias_gid + ~MonthlyLinearCorrection.get_data_pair + ~MonthlyLinearCorrection.get_linear_correction + ~MonthlyLinearCorrection.get_node_cmd + ~MonthlyLinearCorrection.pre_load + ~MonthlyLinearCorrection.run + ~MonthlyLinearCorrection.write_outputs + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~MonthlyLinearCorrection.NT + ~MonthlyLinearCorrection.distance_upper_bound + ~MonthlyLinearCorrection.meta + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.bias.bias_calc.MonthlyScalarCorrection.rst b/_sources/_autosummary/sup3r.bias.bias_calc.MonthlyScalarCorrection.rst new file mode 100644 index 000000000..dfbee49ec --- /dev/null +++ b/_sources/_autosummary/sup3r.bias.bias_calc.MonthlyScalarCorrection.rst @@ -0,0 +1,43 @@ +sup3r.bias.bias\_calc.MonthlyScalarCorrection +============================================= + +.. currentmodule:: sup3r.bias.bias_calc + +.. autoclass:: MonthlyScalarCorrection + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~MonthlyScalarCorrection.compare_dists + ~MonthlyScalarCorrection.fill_and_smooth + ~MonthlyScalarCorrection.get_base_data + ~MonthlyScalarCorrection.get_base_gid + ~MonthlyScalarCorrection.get_bias_data + ~MonthlyScalarCorrection.get_bias_gid + ~MonthlyScalarCorrection.get_data_pair + ~MonthlyScalarCorrection.get_linear_correction + ~MonthlyScalarCorrection.get_node_cmd + ~MonthlyScalarCorrection.pre_load + ~MonthlyScalarCorrection.run + ~MonthlyScalarCorrection.write_outputs + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~MonthlyScalarCorrection.NT + ~MonthlyScalarCorrection.distance_upper_bound + ~MonthlyScalarCorrection.meta + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.bias.bias_calc.ScalarCorrection.rst b/_sources/_autosummary/sup3r.bias.bias_calc.ScalarCorrection.rst new file mode 100644 index 000000000..56211d6f4 --- /dev/null +++ b/_sources/_autosummary/sup3r.bias.bias_calc.ScalarCorrection.rst @@ -0,0 +1,43 @@ +sup3r.bias.bias\_calc.ScalarCorrection +====================================== + +.. currentmodule:: sup3r.bias.bias_calc + +.. autoclass:: ScalarCorrection + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~ScalarCorrection.compare_dists + ~ScalarCorrection.fill_and_smooth + ~ScalarCorrection.get_base_data + ~ScalarCorrection.get_base_gid + ~ScalarCorrection.get_bias_data + ~ScalarCorrection.get_bias_gid + ~ScalarCorrection.get_data_pair + ~ScalarCorrection.get_linear_correction + ~ScalarCorrection.get_node_cmd + ~ScalarCorrection.pre_load + ~ScalarCorrection.run + ~ScalarCorrection.write_outputs + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~ScalarCorrection.NT + ~ScalarCorrection.distance_upper_bound + ~ScalarCorrection.meta + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.bias.bias_calc.SkillAssessment.rst b/_sources/_autosummary/sup3r.bias.bias_calc.SkillAssessment.rst new file mode 100644 index 000000000..3bd2c0517 --- /dev/null +++ b/_sources/_autosummary/sup3r.bias.bias_calc.SkillAssessment.rst @@ -0,0 +1,44 @@ +sup3r.bias.bias\_calc.SkillAssessment +===================================== + +.. currentmodule:: sup3r.bias.bias_calc + +.. autoclass:: SkillAssessment + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SkillAssessment.compare_dists + ~SkillAssessment.fill_and_smooth + ~SkillAssessment.get_base_data + ~SkillAssessment.get_base_gid + ~SkillAssessment.get_bias_data + ~SkillAssessment.get_bias_gid + ~SkillAssessment.get_data_pair + ~SkillAssessment.get_linear_correction + ~SkillAssessment.get_node_cmd + ~SkillAssessment.pre_load + ~SkillAssessment.run + ~SkillAssessment.write_outputs + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SkillAssessment.NT + ~SkillAssessment.PERCENTILES + ~SkillAssessment.distance_upper_bound + ~SkillAssessment.meta + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.bias.bias_calc.rst b/_sources/_autosummary/sup3r.bias.bias_calc.rst new file mode 100644 index 000000000..5647ba7aa --- /dev/null +++ b/_sources/_autosummary/sup3r.bias.bias_calc.rst @@ -0,0 +1,35 @@ +sup3r.bias.bias\_calc +===================== + +.. automodule:: sup3r.bias.bias_calc + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + LinearCorrection + MonthlyLinearCorrection + MonthlyScalarCorrection + ScalarCorrection + SkillAssessment + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.bias.bias_calc_cli.kickoff_local_job.rst b/_sources/_autosummary/sup3r.bias.bias_calc_cli.kickoff_local_job.rst new file mode 100644 index 000000000..e13ed8bab --- /dev/null +++ b/_sources/_autosummary/sup3r.bias.bias_calc_cli.kickoff_local_job.rst @@ -0,0 +1,6 @@ +sup3r.bias.bias\_calc\_cli.kickoff\_local\_job +============================================== + +.. currentmodule:: sup3r.bias.bias_calc_cli + +.. autofunction:: kickoff_local_job \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.bias.bias_calc_cli.kickoff_slurm_job.rst b/_sources/_autosummary/sup3r.bias.bias_calc_cli.kickoff_slurm_job.rst new file mode 100644 index 000000000..661e825ee --- /dev/null +++ b/_sources/_autosummary/sup3r.bias.bias_calc_cli.kickoff_slurm_job.rst @@ -0,0 +1,6 @@ +sup3r.bias.bias\_calc\_cli.kickoff\_slurm\_job +============================================== + +.. currentmodule:: sup3r.bias.bias_calc_cli + +.. autofunction:: kickoff_slurm_job \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.bias.bias_calc_cli.rst b/_sources/_autosummary/sup3r.bias.bias_calc_cli.rst new file mode 100644 index 000000000..330e61a4a --- /dev/null +++ b/_sources/_autosummary/sup3r.bias.bias_calc_cli.rst @@ -0,0 +1,31 @@ +sup3r.bias.bias\_calc\_cli +========================== + +.. automodule:: sup3r.bias.bias_calc_cli + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + + kickoff_local_job + kickoff_slurm_job + + + + + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.bias.bias_calc_vortex.BiasCorrectUpdate.rst b/_sources/_autosummary/sup3r.bias.bias_calc_vortex.BiasCorrectUpdate.rst new file mode 100644 index 000000000..19d0ef6b3 --- /dev/null +++ b/_sources/_autosummary/sup3r.bias.bias_calc_vortex.BiasCorrectUpdate.rst @@ -0,0 +1,26 @@ +sup3r.bias.bias\_calc\_vortex.BiasCorrectUpdate +=============================================== + +.. currentmodule:: sup3r.bias.bias_calc_vortex + +.. autoclass:: BiasCorrectUpdate + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~BiasCorrectUpdate.get_bc_factors + ~BiasCorrectUpdate.run + ~BiasCorrectUpdate.update_file + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.bias.bias_calc_vortex.VortexMeanPrepper.rst b/_sources/_autosummary/sup3r.bias.bias_calc_vortex.VortexMeanPrepper.rst new file mode 100644 index 000000000..c9483cec9 --- /dev/null +++ b/_sources/_autosummary/sup3r.bias.bias_calc_vortex.VortexMeanPrepper.rst @@ -0,0 +1,48 @@ +sup3r.bias.bias\_calc\_vortex.VortexMeanPrepper +=============================================== + +.. currentmodule:: sup3r.bias.bias_calc_vortex + +.. autoclass:: VortexMeanPrepper + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~VortexMeanPrepper.convert_all_tifs + ~VortexMeanPrepper.convert_month_height_tif + ~VortexMeanPrepper.convert_month_tif + ~VortexMeanPrepper.get_all_data + ~VortexMeanPrepper.get_height_files + ~VortexMeanPrepper.get_input_file + ~VortexMeanPrepper.get_lat_lon + ~VortexMeanPrepper.get_month + ~VortexMeanPrepper.get_output_file + ~VortexMeanPrepper.interp + ~VortexMeanPrepper.run + ~VortexMeanPrepper.write_data + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~VortexMeanPrepper.global_attrs + ~VortexMeanPrepper.in_features + ~VortexMeanPrepper.input_files + ~VortexMeanPrepper.mask + ~VortexMeanPrepper.meta + ~VortexMeanPrepper.out_features + ~VortexMeanPrepper.output_files + ~VortexMeanPrepper.time_index + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.bias.bias_calc_vortex.rst b/_sources/_autosummary/sup3r.bias.bias_calc_vortex.rst new file mode 100644 index 000000000..0e75f8b80 --- /dev/null +++ b/_sources/_autosummary/sup3r.bias.bias_calc_vortex.rst @@ -0,0 +1,32 @@ +sup3r.bias.bias\_calc\_vortex +============================= + +.. automodule:: sup3r.bias.bias_calc_vortex + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + BiasCorrectUpdate + VortexMeanPrepper + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.bias.bias_transforms.global_linear_bc.rst b/_sources/_autosummary/sup3r.bias.bias_transforms.global_linear_bc.rst new file mode 100644 index 000000000..e23132432 --- /dev/null +++ b/_sources/_autosummary/sup3r.bias.bias_transforms.global_linear_bc.rst @@ -0,0 +1,6 @@ +sup3r.bias.bias\_transforms.global\_linear\_bc +============================================== + +.. currentmodule:: sup3r.bias.bias_transforms + +.. autofunction:: global_linear_bc \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.bias.bias_transforms.local_linear_bc.rst b/_sources/_autosummary/sup3r.bias.bias_transforms.local_linear_bc.rst new file mode 100644 index 000000000..80194c550 --- /dev/null +++ b/_sources/_autosummary/sup3r.bias.bias_transforms.local_linear_bc.rst @@ -0,0 +1,6 @@ +sup3r.bias.bias\_transforms.local\_linear\_bc +============================================= + +.. currentmodule:: sup3r.bias.bias_transforms + +.. autofunction:: local_linear_bc \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.bias.bias_transforms.local_presrat_bc.rst b/_sources/_autosummary/sup3r.bias.bias_transforms.local_presrat_bc.rst new file mode 100644 index 000000000..0724a4797 --- /dev/null +++ b/_sources/_autosummary/sup3r.bias.bias_transforms.local_presrat_bc.rst @@ -0,0 +1,6 @@ +sup3r.bias.bias\_transforms.local\_presrat\_bc +============================================== + +.. currentmodule:: sup3r.bias.bias_transforms + +.. autofunction:: local_presrat_bc \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.bias.bias_transforms.local_qdm_bc.rst b/_sources/_autosummary/sup3r.bias.bias_transforms.local_qdm_bc.rst new file mode 100644 index 000000000..9beab6bcc --- /dev/null +++ b/_sources/_autosummary/sup3r.bias.bias_transforms.local_qdm_bc.rst @@ -0,0 +1,6 @@ +sup3r.bias.bias\_transforms.local\_qdm\_bc +========================================== + +.. currentmodule:: sup3r.bias.bias_transforms + +.. autofunction:: local_qdm_bc \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.bias.bias_transforms.monthly_local_linear_bc.rst b/_sources/_autosummary/sup3r.bias.bias_transforms.monthly_local_linear_bc.rst new file mode 100644 index 000000000..0225d9335 --- /dev/null +++ b/_sources/_autosummary/sup3r.bias.bias_transforms.monthly_local_linear_bc.rst @@ -0,0 +1,6 @@ +sup3r.bias.bias\_transforms.monthly\_local\_linear\_bc +====================================================== + +.. currentmodule:: sup3r.bias.bias_transforms + +.. autofunction:: monthly_local_linear_bc \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.bias.bias_transforms.rst b/_sources/_autosummary/sup3r.bias.bias_transforms.rst new file mode 100644 index 000000000..6664ff2e9 --- /dev/null +++ b/_sources/_autosummary/sup3r.bias.bias_transforms.rst @@ -0,0 +1,34 @@ +sup3r.bias.bias\_transforms +=========================== + +.. automodule:: sup3r.bias.bias_transforms + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + + global_linear_bc + local_linear_bc + local_presrat_bc + local_qdm_bc + monthly_local_linear_bc + + + + + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.bias.mixins.FillAndSmoothMixin.rst b/_sources/_autosummary/sup3r.bias.mixins.FillAndSmoothMixin.rst new file mode 100644 index 000000000..f5b46d0d7 --- /dev/null +++ b/_sources/_autosummary/sup3r.bias.mixins.FillAndSmoothMixin.rst @@ -0,0 +1,24 @@ +sup3r.bias.mixins.FillAndSmoothMixin +==================================== + +.. currentmodule:: sup3r.bias.mixins + +.. autoclass:: FillAndSmoothMixin + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~FillAndSmoothMixin.fill_and_smooth + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.bias.mixins.ZeroRateMixin.rst b/_sources/_autosummary/sup3r.bias.mixins.ZeroRateMixin.rst new file mode 100644 index 000000000..e6178d2be --- /dev/null +++ b/_sources/_autosummary/sup3r.bias.mixins.ZeroRateMixin.rst @@ -0,0 +1,24 @@ +sup3r.bias.mixins.ZeroRateMixin +=============================== + +.. currentmodule:: sup3r.bias.mixins + +.. autoclass:: ZeroRateMixin + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~ZeroRateMixin.zero_precipitation_rate + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.bias.mixins.rst b/_sources/_autosummary/sup3r.bias.mixins.rst new file mode 100644 index 000000000..fa51cf505 --- /dev/null +++ b/_sources/_autosummary/sup3r.bias.mixins.rst @@ -0,0 +1,32 @@ +sup3r.bias.mixins +================= + +.. automodule:: sup3r.bias.mixins + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + FillAndSmoothMixin + ZeroRateMixin + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.bias.presrat.PresRat.rst b/_sources/_autosummary/sup3r.bias.presrat.PresRat.rst new file mode 100644 index 000000000..767e164d8 --- /dev/null +++ b/_sources/_autosummary/sup3r.bias.presrat.PresRat.rst @@ -0,0 +1,46 @@ +sup3r.bias.presrat.PresRat +========================== + +.. currentmodule:: sup3r.bias.presrat + +.. autoclass:: PresRat + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~PresRat.calc_k_factor + ~PresRat.calc_tau_fut + ~PresRat.compare_dists + ~PresRat.fill_and_smooth + ~PresRat.get_base_data + ~PresRat.get_base_gid + ~PresRat.get_bias_data + ~PresRat.get_bias_gid + ~PresRat.get_data_pair + ~PresRat.get_node_cmd + ~PresRat.get_qdm_params + ~PresRat.pre_load + ~PresRat.run + ~PresRat.window_mask + ~PresRat.write_outputs + ~PresRat.zero_precipitation_rate + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~PresRat.distance_upper_bound + ~PresRat.meta + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.bias.presrat.rst b/_sources/_autosummary/sup3r.bias.presrat.rst new file mode 100644 index 000000000..4573197fe --- /dev/null +++ b/_sources/_autosummary/sup3r.bias.presrat.rst @@ -0,0 +1,31 @@ +sup3r.bias.presrat +================== + +.. automodule:: sup3r.bias.presrat + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + PresRat + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.bias.qdm.QuantileDeltaMappingCorrection.rst b/_sources/_autosummary/sup3r.bias.qdm.QuantileDeltaMappingCorrection.rst new file mode 100644 index 000000000..3b0a2dd3b --- /dev/null +++ b/_sources/_autosummary/sup3r.bias.qdm.QuantileDeltaMappingCorrection.rst @@ -0,0 +1,43 @@ +sup3r.bias.qdm.QuantileDeltaMappingCorrection +============================================= + +.. currentmodule:: sup3r.bias.qdm + +.. autoclass:: QuantileDeltaMappingCorrection + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~QuantileDeltaMappingCorrection.compare_dists + ~QuantileDeltaMappingCorrection.fill_and_smooth + ~QuantileDeltaMappingCorrection.get_base_data + ~QuantileDeltaMappingCorrection.get_base_gid + ~QuantileDeltaMappingCorrection.get_bias_data + ~QuantileDeltaMappingCorrection.get_bias_gid + ~QuantileDeltaMappingCorrection.get_data_pair + ~QuantileDeltaMappingCorrection.get_node_cmd + ~QuantileDeltaMappingCorrection.get_qdm_params + ~QuantileDeltaMappingCorrection.pre_load + ~QuantileDeltaMappingCorrection.run + ~QuantileDeltaMappingCorrection.window_mask + ~QuantileDeltaMappingCorrection.write_outputs + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~QuantileDeltaMappingCorrection.distance_upper_bound + ~QuantileDeltaMappingCorrection.meta + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.bias.qdm.rst b/_sources/_autosummary/sup3r.bias.qdm.rst new file mode 100644 index 000000000..df73caae3 --- /dev/null +++ b/_sources/_autosummary/sup3r.bias.qdm.rst @@ -0,0 +1,31 @@ +sup3r.bias.qdm +============== + +.. automodule:: sup3r.bias.qdm + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + QuantileDeltaMappingCorrection + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.bias.rst b/_sources/_autosummary/sup3r.bias.rst new file mode 100644 index 000000000..74105c6ac --- /dev/null +++ b/_sources/_autosummary/sup3r.bias.rst @@ -0,0 +1,38 @@ +sup3r.bias +========== + +.. automodule:: sup3r.bias + + + + + + + + + + + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + base + bias_calc + bias_calc_cli + bias_calc_vortex + bias_transforms + mixins + presrat + qdm + utilities + diff --git a/_sources/_autosummary/sup3r.bias.utilities.bias_correct_feature.rst b/_sources/_autosummary/sup3r.bias.utilities.bias_correct_feature.rst new file mode 100644 index 000000000..3d5a207e2 --- /dev/null +++ b/_sources/_autosummary/sup3r.bias.utilities.bias_correct_feature.rst @@ -0,0 +1,6 @@ +sup3r.bias.utilities.bias\_correct\_feature +=========================================== + +.. currentmodule:: sup3r.bias.utilities + +.. autofunction:: bias_correct_feature \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.bias.utilities.bias_correct_features.rst b/_sources/_autosummary/sup3r.bias.utilities.bias_correct_features.rst new file mode 100644 index 000000000..670e16836 --- /dev/null +++ b/_sources/_autosummary/sup3r.bias.utilities.bias_correct_features.rst @@ -0,0 +1,6 @@ +sup3r.bias.utilities.bias\_correct\_features +============================================ + +.. currentmodule:: sup3r.bias.utilities + +.. autofunction:: bias_correct_features \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.bias.utilities.lin_bc.rst b/_sources/_autosummary/sup3r.bias.utilities.lin_bc.rst new file mode 100644 index 000000000..12a26cc8f --- /dev/null +++ b/_sources/_autosummary/sup3r.bias.utilities.lin_bc.rst @@ -0,0 +1,6 @@ +sup3r.bias.utilities.lin\_bc +============================ + +.. currentmodule:: sup3r.bias.utilities + +.. autofunction:: lin_bc \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.bias.utilities.qdm_bc.rst b/_sources/_autosummary/sup3r.bias.utilities.qdm_bc.rst new file mode 100644 index 000000000..50e736a55 --- /dev/null +++ b/_sources/_autosummary/sup3r.bias.utilities.qdm_bc.rst @@ -0,0 +1,6 @@ +sup3r.bias.utilities.qdm\_bc +============================ + +.. currentmodule:: sup3r.bias.utilities + +.. autofunction:: qdm_bc \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.bias.utilities.rst b/_sources/_autosummary/sup3r.bias.utilities.rst new file mode 100644 index 000000000..96e470987 --- /dev/null +++ b/_sources/_autosummary/sup3r.bias.utilities.rst @@ -0,0 +1,33 @@ +sup3r.bias.utilities +==================== + +.. automodule:: sup3r.bias.utilities + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + + bias_correct_feature + bias_correct_features + lin_bc + qdm_bc + + + + + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.cli.rst b/_sources/_autosummary/sup3r.cli.rst new file mode 100644 index 000000000..98699854e --- /dev/null +++ b/_sources/_autosummary/sup3r.cli.rst @@ -0,0 +1,23 @@ +sup3r.cli +========= + +.. automodule:: sup3r.cli + + + + + + + + + + + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.models.abstract.AbstractInterface.rst b/_sources/_autosummary/sup3r.models.abstract.AbstractInterface.rst new file mode 100644 index 000000000..d4181851e --- /dev/null +++ b/_sources/_autosummary/sup3r.models.abstract.AbstractInterface.rst @@ -0,0 +1,52 @@ +sup3r.models.abstract.AbstractInterface +======================================= + +.. currentmodule:: sup3r.models.abstract + +.. autoclass:: AbstractInterface + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~AbstractInterface.generate + ~AbstractInterface.get_s_enhance_from_layers + ~AbstractInterface.get_t_enhance_from_layers + ~AbstractInterface.load + ~AbstractInterface.save_params + ~AbstractInterface.seed + ~AbstractInterface.set_model_params + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~AbstractInterface.hr_exo_features + ~AbstractInterface.hr_out_features + ~AbstractInterface.input_dims + ~AbstractInterface.input_resolution + ~AbstractInterface.is_4d + ~AbstractInterface.is_5d + ~AbstractInterface.lr_features + ~AbstractInterface.meta + ~AbstractInterface.model_params + ~AbstractInterface.output_resolution + ~AbstractInterface.s_enhance + ~AbstractInterface.s_enhancements + ~AbstractInterface.smoothed_features + ~AbstractInterface.smoothing + ~AbstractInterface.t_enhance + ~AbstractInterface.t_enhancements + ~AbstractInterface.version_record + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.models.abstract.AbstractSingleModel.rst b/_sources/_autosummary/sup3r.models.abstract.AbstractSingleModel.rst new file mode 100644 index 000000000..bad6166b8 --- /dev/null +++ b/_sources/_autosummary/sup3r.models.abstract.AbstractSingleModel.rst @@ -0,0 +1,55 @@ +sup3r.models.abstract.AbstractSingleModel +========================================= + +.. currentmodule:: sup3r.models.abstract + +.. autoclass:: AbstractSingleModel + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~AbstractSingleModel.dict_to_tensorboard + ~AbstractSingleModel.early_stop + ~AbstractSingleModel.finish_epoch + ~AbstractSingleModel.generate + ~AbstractSingleModel.get_high_res_exo_input + ~AbstractSingleModel.get_loss_fun + ~AbstractSingleModel.get_optimizer_config + ~AbstractSingleModel.get_optimizer_state + ~AbstractSingleModel.get_single_grad + ~AbstractSingleModel.init_optimizer + ~AbstractSingleModel.load_network + ~AbstractSingleModel.load_saved_params + ~AbstractSingleModel.log_loss_details + ~AbstractSingleModel.norm_input + ~AbstractSingleModel.profile_to_tensorboard + ~AbstractSingleModel.run_gradient_descent + ~AbstractSingleModel.save + ~AbstractSingleModel.set_norm_stats + ~AbstractSingleModel.un_norm_output + ~AbstractSingleModel.update_loss_details + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~AbstractSingleModel.generator + ~AbstractSingleModel.generator_weights + ~AbstractSingleModel.history + ~AbstractSingleModel.means + ~AbstractSingleModel.optimizer + ~AbstractSingleModel.stdevs + ~AbstractSingleModel.total_batches + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.models.abstract.TensorboardMixIn.rst b/_sources/_autosummary/sup3r.models.abstract.TensorboardMixIn.rst new file mode 100644 index 000000000..40ace2647 --- /dev/null +++ b/_sources/_autosummary/sup3r.models.abstract.TensorboardMixIn.rst @@ -0,0 +1,31 @@ +sup3r.models.abstract.TensorboardMixIn +====================================== + +.. currentmodule:: sup3r.models.abstract + +.. autoclass:: TensorboardMixIn + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~TensorboardMixIn.dict_to_tensorboard + ~TensorboardMixIn.profile_to_tensorboard + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~TensorboardMixIn.total_batches + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.models.abstract.rst b/_sources/_autosummary/sup3r.models.abstract.rst new file mode 100644 index 000000000..3b1d4a168 --- /dev/null +++ b/_sources/_autosummary/sup3r.models.abstract.rst @@ -0,0 +1,33 @@ +sup3r.models.abstract +===================== + +.. automodule:: sup3r.models.abstract + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + AbstractInterface + AbstractSingleModel + TensorboardMixIn + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.models.base.Sup3rGan.rst b/_sources/_autosummary/sup3r.models.base.Sup3rGan.rst new file mode 100644 index 000000000..713f70790 --- /dev/null +++ b/_sources/_autosummary/sup3r.models.base.Sup3rGan.rst @@ -0,0 +1,95 @@ +sup3r.models.base.Sup3rGan +========================== + +.. currentmodule:: sup3r.models.base + +.. autoclass:: Sup3rGan + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~Sup3rGan.calc_loss + ~Sup3rGan.calc_loss_disc + ~Sup3rGan.calc_loss_gen_advers + ~Sup3rGan.calc_loss_gen_content + ~Sup3rGan.calc_val_loss + ~Sup3rGan.check_batch_handler_attrs + ~Sup3rGan.dict_to_tensorboard + ~Sup3rGan.discriminate + ~Sup3rGan.early_stop + ~Sup3rGan.finish_epoch + ~Sup3rGan.generate + ~Sup3rGan.get_high_res_exo_input + ~Sup3rGan.get_loss_fun + ~Sup3rGan.get_optimizer_config + ~Sup3rGan.get_optimizer_state + ~Sup3rGan.get_s_enhance_from_layers + ~Sup3rGan.get_single_grad + ~Sup3rGan.get_t_enhance_from_layers + ~Sup3rGan.get_weight_update_fraction + ~Sup3rGan.init_optimizer + ~Sup3rGan.init_weights + ~Sup3rGan.load + ~Sup3rGan.load_network + ~Sup3rGan.load_saved_params + ~Sup3rGan.log_loss_details + ~Sup3rGan.norm_input + ~Sup3rGan.profile_to_tensorboard + ~Sup3rGan.run_gradient_descent + ~Sup3rGan.save + ~Sup3rGan.save_params + ~Sup3rGan.seed + ~Sup3rGan.set_model_params + ~Sup3rGan.set_norm_stats + ~Sup3rGan.train + ~Sup3rGan.train_epoch + ~Sup3rGan.un_norm_output + ~Sup3rGan.update_adversarial_weights + ~Sup3rGan.update_loss_details + ~Sup3rGan.update_optimizer + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Sup3rGan.discriminator + ~Sup3rGan.discriminator_weights + ~Sup3rGan.generator + ~Sup3rGan.generator_weights + ~Sup3rGan.history + ~Sup3rGan.hr_exo_features + ~Sup3rGan.hr_out_features + ~Sup3rGan.input_dims + ~Sup3rGan.input_resolution + ~Sup3rGan.is_4d + ~Sup3rGan.is_5d + ~Sup3rGan.lr_features + ~Sup3rGan.means + ~Sup3rGan.meta + ~Sup3rGan.model_params + ~Sup3rGan.optimizer + ~Sup3rGan.optimizer_disc + ~Sup3rGan.output_resolution + ~Sup3rGan.s_enhance + ~Sup3rGan.s_enhancements + ~Sup3rGan.smoothed_features + ~Sup3rGan.smoothing + ~Sup3rGan.stdevs + ~Sup3rGan.t_enhance + ~Sup3rGan.t_enhancements + ~Sup3rGan.total_batches + ~Sup3rGan.version_record + ~Sup3rGan.weights + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.models.base.rst b/_sources/_autosummary/sup3r.models.base.rst new file mode 100644 index 000000000..6679976ae --- /dev/null +++ b/_sources/_autosummary/sup3r.models.base.rst @@ -0,0 +1,31 @@ +sup3r.models.base +================= + +.. automodule:: sup3r.models.base + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + Sup3rGan + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.models.conditional.Sup3rCondMom.rst b/_sources/_autosummary/sup3r.models.conditional.Sup3rCondMom.rst new file mode 100644 index 000000000..ea5697211 --- /dev/null +++ b/_sources/_autosummary/sup3r.models.conditional.Sup3rCondMom.rst @@ -0,0 +1,85 @@ +sup3r.models.conditional.Sup3rCondMom +===================================== + +.. currentmodule:: sup3r.models.conditional + +.. autoclass:: Sup3rCondMom + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~Sup3rCondMom.calc_loss + ~Sup3rCondMom.calc_loss_cond_mom + ~Sup3rCondMom.calc_val_loss + ~Sup3rCondMom.dict_to_tensorboard + ~Sup3rCondMom.early_stop + ~Sup3rCondMom.finish_epoch + ~Sup3rCondMom.generate + ~Sup3rCondMom.get_high_res_exo_input + ~Sup3rCondMom.get_loss_fun + ~Sup3rCondMom.get_optimizer_config + ~Sup3rCondMom.get_optimizer_state + ~Sup3rCondMom.get_s_enhance_from_layers + ~Sup3rCondMom.get_single_grad + ~Sup3rCondMom.get_t_enhance_from_layers + ~Sup3rCondMom.init_optimizer + ~Sup3rCondMom.load + ~Sup3rCondMom.load_network + ~Sup3rCondMom.load_saved_params + ~Sup3rCondMom.log_loss_details + ~Sup3rCondMom.norm_input + ~Sup3rCondMom.profile_to_tensorboard + ~Sup3rCondMom.run_gradient_descent + ~Sup3rCondMom.save + ~Sup3rCondMom.save_params + ~Sup3rCondMom.seed + ~Sup3rCondMom.set_model_params + ~Sup3rCondMom.set_norm_stats + ~Sup3rCondMom.train + ~Sup3rCondMom.train_epoch + ~Sup3rCondMom.un_norm_output + ~Sup3rCondMom.update_loss_details + ~Sup3rCondMom.update_optimizer + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Sup3rCondMom.generator + ~Sup3rCondMom.generator_weights + ~Sup3rCondMom.history + ~Sup3rCondMom.hr_exo_features + ~Sup3rCondMom.hr_out_features + ~Sup3rCondMom.input_dims + ~Sup3rCondMom.input_resolution + ~Sup3rCondMom.is_4d + ~Sup3rCondMom.is_5d + ~Sup3rCondMom.lr_features + ~Sup3rCondMom.means + ~Sup3rCondMom.meta + ~Sup3rCondMom.model_params + ~Sup3rCondMom.optimizer + ~Sup3rCondMom.output_resolution + ~Sup3rCondMom.s_enhance + ~Sup3rCondMom.s_enhancements + ~Sup3rCondMom.smoothed_features + ~Sup3rCondMom.smoothing + ~Sup3rCondMom.stdevs + ~Sup3rCondMom.t_enhance + ~Sup3rCondMom.t_enhancements + ~Sup3rCondMom.total_batches + ~Sup3rCondMom.version_record + ~Sup3rCondMom.weights + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.models.conditional.rst b/_sources/_autosummary/sup3r.models.conditional.rst new file mode 100644 index 000000000..793e790bf --- /dev/null +++ b/_sources/_autosummary/sup3r.models.conditional.rst @@ -0,0 +1,31 @@ +sup3r.models.conditional +======================== + +.. automodule:: sup3r.models.conditional + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + Sup3rCondMom + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.models.dc.Sup3rGanDC.rst b/_sources/_autosummary/sup3r.models.dc.Sup3rGanDC.rst new file mode 100644 index 000000000..f1d2087d8 --- /dev/null +++ b/_sources/_autosummary/sup3r.models.dc.Sup3rGanDC.rst @@ -0,0 +1,97 @@ +sup3r.models.dc.Sup3rGanDC +========================== + +.. currentmodule:: sup3r.models.dc + +.. autoclass:: Sup3rGanDC + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~Sup3rGanDC.calc_loss + ~Sup3rGanDC.calc_loss_disc + ~Sup3rGanDC.calc_loss_gen_advers + ~Sup3rGanDC.calc_loss_gen_content + ~Sup3rGanDC.calc_val_loss + ~Sup3rGanDC.calc_val_loss_gen + ~Sup3rGanDC.calc_val_loss_gen_content + ~Sup3rGanDC.check_batch_handler_attrs + ~Sup3rGanDC.dict_to_tensorboard + ~Sup3rGanDC.discriminate + ~Sup3rGanDC.early_stop + ~Sup3rGanDC.finish_epoch + ~Sup3rGanDC.generate + ~Sup3rGanDC.get_high_res_exo_input + ~Sup3rGanDC.get_loss_fun + ~Sup3rGanDC.get_optimizer_config + ~Sup3rGanDC.get_optimizer_state + ~Sup3rGanDC.get_s_enhance_from_layers + ~Sup3rGanDC.get_single_grad + ~Sup3rGanDC.get_t_enhance_from_layers + ~Sup3rGanDC.get_weight_update_fraction + ~Sup3rGanDC.init_optimizer + ~Sup3rGanDC.init_weights + ~Sup3rGanDC.load + ~Sup3rGanDC.load_network + ~Sup3rGanDC.load_saved_params + ~Sup3rGanDC.log_loss_details + ~Sup3rGanDC.norm_input + ~Sup3rGanDC.profile_to_tensorboard + ~Sup3rGanDC.run_gradient_descent + ~Sup3rGanDC.save + ~Sup3rGanDC.save_params + ~Sup3rGanDC.seed + ~Sup3rGanDC.set_model_params + ~Sup3rGanDC.set_norm_stats + ~Sup3rGanDC.train + ~Sup3rGanDC.train_epoch + ~Sup3rGanDC.un_norm_output + ~Sup3rGanDC.update_adversarial_weights + ~Sup3rGanDC.update_loss_details + ~Sup3rGanDC.update_optimizer + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Sup3rGanDC.discriminator + ~Sup3rGanDC.discriminator_weights + ~Sup3rGanDC.generator + ~Sup3rGanDC.generator_weights + ~Sup3rGanDC.history + ~Sup3rGanDC.hr_exo_features + ~Sup3rGanDC.hr_out_features + ~Sup3rGanDC.input_dims + ~Sup3rGanDC.input_resolution + ~Sup3rGanDC.is_4d + ~Sup3rGanDC.is_5d + ~Sup3rGanDC.lr_features + ~Sup3rGanDC.means + ~Sup3rGanDC.meta + ~Sup3rGanDC.model_params + ~Sup3rGanDC.optimizer + ~Sup3rGanDC.optimizer_disc + ~Sup3rGanDC.output_resolution + ~Sup3rGanDC.s_enhance + ~Sup3rGanDC.s_enhancements + ~Sup3rGanDC.smoothed_features + ~Sup3rGanDC.smoothing + ~Sup3rGanDC.stdevs + ~Sup3rGanDC.t_enhance + ~Sup3rGanDC.t_enhancements + ~Sup3rGanDC.total_batches + ~Sup3rGanDC.version_record + ~Sup3rGanDC.weights + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.models.dc.rst b/_sources/_autosummary/sup3r.models.dc.rst new file mode 100644 index 000000000..ebbcb7114 --- /dev/null +++ b/_sources/_autosummary/sup3r.models.dc.rst @@ -0,0 +1,31 @@ +sup3r.models.dc +=============== + +.. automodule:: sup3r.models.dc + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + Sup3rGanDC + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.models.linear.LinearInterp.rst b/_sources/_autosummary/sup3r.models.linear.LinearInterp.rst new file mode 100644 index 000000000..da57c2027 --- /dev/null +++ b/_sources/_autosummary/sup3r.models.linear.LinearInterp.rst @@ -0,0 +1,53 @@ +sup3r.models.linear.LinearInterp +================================ + +.. currentmodule:: sup3r.models.linear + +.. autoclass:: LinearInterp + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~LinearInterp.generate + ~LinearInterp.get_s_enhance_from_layers + ~LinearInterp.get_t_enhance_from_layers + ~LinearInterp.load + ~LinearInterp.save + ~LinearInterp.save_params + ~LinearInterp.seed + ~LinearInterp.set_model_params + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~LinearInterp.hr_exo_features + ~LinearInterp.hr_out_features + ~LinearInterp.input_dims + ~LinearInterp.input_resolution + ~LinearInterp.is_4d + ~LinearInterp.is_5d + ~LinearInterp.lr_features + ~LinearInterp.meta + ~LinearInterp.model_params + ~LinearInterp.output_resolution + ~LinearInterp.s_enhance + ~LinearInterp.s_enhancements + ~LinearInterp.smoothed_features + ~LinearInterp.smoothing + ~LinearInterp.t_enhance + ~LinearInterp.t_enhancements + ~LinearInterp.version_record + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.models.linear.rst b/_sources/_autosummary/sup3r.models.linear.rst new file mode 100644 index 000000000..b352ecd45 --- /dev/null +++ b/_sources/_autosummary/sup3r.models.linear.rst @@ -0,0 +1,31 @@ +sup3r.models.linear +=================== + +.. automodule:: sup3r.models.linear + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + LinearInterp + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.models.multi_step.MultiStepGan.rst b/_sources/_autosummary/sup3r.models.multi_step.MultiStepGan.rst new file mode 100644 index 000000000..253c3385f --- /dev/null +++ b/_sources/_autosummary/sup3r.models.multi_step.MultiStepGan.rst @@ -0,0 +1,55 @@ +sup3r.models.multi\_step.MultiStepGan +===================================== + +.. currentmodule:: sup3r.models.multi_step + +.. autoclass:: MultiStepGan + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~MultiStepGan.generate + ~MultiStepGan.get_s_enhance_from_layers + ~MultiStepGan.get_t_enhance_from_layers + ~MultiStepGan.load + ~MultiStepGan.save_params + ~MultiStepGan.seed + ~MultiStepGan.set_model_params + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~MultiStepGan.hr_exo_features + ~MultiStepGan.hr_out_features + ~MultiStepGan.input_dims + ~MultiStepGan.input_resolution + ~MultiStepGan.is_4d + ~MultiStepGan.is_5d + ~MultiStepGan.lr_features + ~MultiStepGan.means + ~MultiStepGan.meta + ~MultiStepGan.model_params + ~MultiStepGan.models + ~MultiStepGan.output_resolution + ~MultiStepGan.s_enhance + ~MultiStepGan.s_enhancements + ~MultiStepGan.smoothed_features + ~MultiStepGan.smoothing + ~MultiStepGan.stdevs + ~MultiStepGan.t_enhance + ~MultiStepGan.t_enhancements + ~MultiStepGan.version_record + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.models.multi_step.MultiStepSurfaceMetGan.rst b/_sources/_autosummary/sup3r.models.multi_step.MultiStepSurfaceMetGan.rst new file mode 100644 index 000000000..fc7fa6feb --- /dev/null +++ b/_sources/_autosummary/sup3r.models.multi_step.MultiStepSurfaceMetGan.rst @@ -0,0 +1,55 @@ +sup3r.models.multi\_step.MultiStepSurfaceMetGan +=============================================== + +.. currentmodule:: sup3r.models.multi_step + +.. autoclass:: MultiStepSurfaceMetGan + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~MultiStepSurfaceMetGan.generate + ~MultiStepSurfaceMetGan.get_s_enhance_from_layers + ~MultiStepSurfaceMetGan.get_t_enhance_from_layers + ~MultiStepSurfaceMetGan.load + ~MultiStepSurfaceMetGan.save_params + ~MultiStepSurfaceMetGan.seed + ~MultiStepSurfaceMetGan.set_model_params + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~MultiStepSurfaceMetGan.hr_exo_features + ~MultiStepSurfaceMetGan.hr_out_features + ~MultiStepSurfaceMetGan.input_dims + ~MultiStepSurfaceMetGan.input_resolution + ~MultiStepSurfaceMetGan.is_4d + ~MultiStepSurfaceMetGan.is_5d + ~MultiStepSurfaceMetGan.lr_features + ~MultiStepSurfaceMetGan.means + ~MultiStepSurfaceMetGan.meta + ~MultiStepSurfaceMetGan.model_params + ~MultiStepSurfaceMetGan.models + ~MultiStepSurfaceMetGan.output_resolution + ~MultiStepSurfaceMetGan.s_enhance + ~MultiStepSurfaceMetGan.s_enhancements + ~MultiStepSurfaceMetGan.smoothed_features + ~MultiStepSurfaceMetGan.smoothing + ~MultiStepSurfaceMetGan.stdevs + ~MultiStepSurfaceMetGan.t_enhance + ~MultiStepSurfaceMetGan.t_enhancements + ~MultiStepSurfaceMetGan.version_record + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.models.multi_step.SolarMultiStepGan.rst b/_sources/_autosummary/sup3r.models.multi_step.SolarMultiStepGan.rst new file mode 100644 index 000000000..be7623647 --- /dev/null +++ b/_sources/_autosummary/sup3r.models.multi_step.SolarMultiStepGan.rst @@ -0,0 +1,63 @@ +sup3r.models.multi\_step.SolarMultiStepGan +========================================== + +.. currentmodule:: sup3r.models.multi_step + +.. autoclass:: SolarMultiStepGan + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SolarMultiStepGan.generate + ~SolarMultiStepGan.get_s_enhance_from_layers + ~SolarMultiStepGan.get_t_enhance_from_layers + ~SolarMultiStepGan.load + ~SolarMultiStepGan.preflight + ~SolarMultiStepGan.save_params + ~SolarMultiStepGan.seed + ~SolarMultiStepGan.set_model_params + ~SolarMultiStepGan.temporal_pad + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SolarMultiStepGan.hr_exo_features + ~SolarMultiStepGan.hr_out_features + ~SolarMultiStepGan.idf_solar + ~SolarMultiStepGan.idf_wind + ~SolarMultiStepGan.idf_wind_out + ~SolarMultiStepGan.input_dims + ~SolarMultiStepGan.input_resolution + ~SolarMultiStepGan.is_4d + ~SolarMultiStepGan.is_5d + ~SolarMultiStepGan.lr_features + ~SolarMultiStepGan.means + ~SolarMultiStepGan.meta + ~SolarMultiStepGan.model_params + ~SolarMultiStepGan.models + ~SolarMultiStepGan.output_resolution + ~SolarMultiStepGan.s_enhance + ~SolarMultiStepGan.s_enhancements + ~SolarMultiStepGan.smoothed_features + ~SolarMultiStepGan.smoothing + ~SolarMultiStepGan.spatial_solar_models + ~SolarMultiStepGan.spatial_wind_models + ~SolarMultiStepGan.stdevs + ~SolarMultiStepGan.t_enhance + ~SolarMultiStepGan.t_enhancements + ~SolarMultiStepGan.temporal_solar_models + ~SolarMultiStepGan.version_record + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.models.multi_step.rst b/_sources/_autosummary/sup3r.models.multi_step.rst new file mode 100644 index 000000000..daea49ebd --- /dev/null +++ b/_sources/_autosummary/sup3r.models.multi_step.rst @@ -0,0 +1,33 @@ +sup3r.models.multi\_step +======================== + +.. automodule:: sup3r.models.multi_step + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + MultiStepGan + MultiStepSurfaceMetGan + SolarMultiStepGan + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.models.rst b/_sources/_autosummary/sup3r.models.rst new file mode 100644 index 000000000..2e5db312c --- /dev/null +++ b/_sources/_autosummary/sup3r.models.rst @@ -0,0 +1,38 @@ +sup3r.models +============ + +.. automodule:: sup3r.models + + + + + + + + + + + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + abstract + base + conditional + dc + linear + multi_step + solar_cc + surface + utilities + diff --git a/_sources/_autosummary/sup3r.models.solar_cc.SolarCC.rst b/_sources/_autosummary/sup3r.models.solar_cc.SolarCC.rst new file mode 100644 index 000000000..f79b4d198 --- /dev/null +++ b/_sources/_autosummary/sup3r.models.solar_cc.SolarCC.rst @@ -0,0 +1,99 @@ +sup3r.models.solar\_cc.SolarCC +============================== + +.. currentmodule:: sup3r.models.solar_cc + +.. autoclass:: SolarCC + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SolarCC.calc_loss + ~SolarCC.calc_loss_disc + ~SolarCC.calc_loss_gen_advers + ~SolarCC.calc_loss_gen_content + ~SolarCC.calc_val_loss + ~SolarCC.check_batch_handler_attrs + ~SolarCC.dict_to_tensorboard + ~SolarCC.discriminate + ~SolarCC.early_stop + ~SolarCC.finish_epoch + ~SolarCC.generate + ~SolarCC.get_high_res_exo_input + ~SolarCC.get_loss_fun + ~SolarCC.get_optimizer_config + ~SolarCC.get_optimizer_state + ~SolarCC.get_s_enhance_from_layers + ~SolarCC.get_single_grad + ~SolarCC.get_t_enhance_from_layers + ~SolarCC.get_weight_update_fraction + ~SolarCC.init_optimizer + ~SolarCC.init_weights + ~SolarCC.load + ~SolarCC.load_network + ~SolarCC.load_saved_params + ~SolarCC.log_loss_details + ~SolarCC.norm_input + ~SolarCC.profile_to_tensorboard + ~SolarCC.run_gradient_descent + ~SolarCC.save + ~SolarCC.save_params + ~SolarCC.seed + ~SolarCC.set_model_params + ~SolarCC.set_norm_stats + ~SolarCC.temporal_pad + ~SolarCC.train + ~SolarCC.train_epoch + ~SolarCC.un_norm_output + ~SolarCC.update_adversarial_weights + ~SolarCC.update_loss_details + ~SolarCC.update_optimizer + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SolarCC.DAYLIGHT_HOURS + ~SolarCC.STARTING_HOUR + ~SolarCC.STRIDE_LEN + ~SolarCC.discriminator + ~SolarCC.discriminator_weights + ~SolarCC.generator + ~SolarCC.generator_weights + ~SolarCC.history + ~SolarCC.hr_exo_features + ~SolarCC.hr_out_features + ~SolarCC.input_dims + ~SolarCC.input_resolution + ~SolarCC.is_4d + ~SolarCC.is_5d + ~SolarCC.lr_features + ~SolarCC.means + ~SolarCC.meta + ~SolarCC.model_params + ~SolarCC.optimizer + ~SolarCC.optimizer_disc + ~SolarCC.output_resolution + ~SolarCC.s_enhance + ~SolarCC.s_enhancements + ~SolarCC.smoothed_features + ~SolarCC.smoothing + ~SolarCC.stdevs + ~SolarCC.t_enhance + ~SolarCC.t_enhancements + ~SolarCC.total_batches + ~SolarCC.version_record + ~SolarCC.weights + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.models.solar_cc.rst b/_sources/_autosummary/sup3r.models.solar_cc.rst new file mode 100644 index 000000000..445aa390a --- /dev/null +++ b/_sources/_autosummary/sup3r.models.solar_cc.rst @@ -0,0 +1,31 @@ +sup3r.models.solar\_cc +====================== + +.. automodule:: sup3r.models.solar_cc + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + SolarCC + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.models.surface.SurfaceSpatialMetModel.rst b/_sources/_autosummary/sup3r.models.surface.SurfaceSpatialMetModel.rst new file mode 100644 index 000000000..bf6ce371b --- /dev/null +++ b/_sources/_autosummary/sup3r.models.surface.SurfaceSpatialMetModel.rst @@ -0,0 +1,68 @@ +sup3r.models.surface.SurfaceSpatialMetModel +=========================================== + +.. currentmodule:: sup3r.models.surface + +.. autoclass:: SurfaceSpatialMetModel + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SurfaceSpatialMetModel.downscale_arr + ~SurfaceSpatialMetModel.downscale_pres + ~SurfaceSpatialMetModel.downscale_rh + ~SurfaceSpatialMetModel.downscale_temp + ~SurfaceSpatialMetModel.fix_downscaled_bias + ~SurfaceSpatialMetModel.generate + ~SurfaceSpatialMetModel.get_s_enhance_from_layers + ~SurfaceSpatialMetModel.get_t_enhance_from_layers + ~SurfaceSpatialMetModel.load + ~SurfaceSpatialMetModel.save + ~SurfaceSpatialMetModel.save_params + ~SurfaceSpatialMetModel.seed + ~SurfaceSpatialMetModel.set_model_params + ~SurfaceSpatialMetModel.train + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SurfaceSpatialMetModel.PRES_DIV + ~SurfaceSpatialMetModel.PRES_EXP + ~SurfaceSpatialMetModel.TEMP_LAPSE + ~SurfaceSpatialMetModel.W_DELTA_TEMP + ~SurfaceSpatialMetModel.W_DELTA_TOPO + ~SurfaceSpatialMetModel.feature_inds_other + ~SurfaceSpatialMetModel.feature_inds_pres + ~SurfaceSpatialMetModel.feature_inds_rh + ~SurfaceSpatialMetModel.feature_inds_temp + ~SurfaceSpatialMetModel.hr_exo_features + ~SurfaceSpatialMetModel.hr_out_features + ~SurfaceSpatialMetModel.input_dims + ~SurfaceSpatialMetModel.input_resolution + ~SurfaceSpatialMetModel.is_4d + ~SurfaceSpatialMetModel.is_5d + ~SurfaceSpatialMetModel.lr_features + ~SurfaceSpatialMetModel.meta + ~SurfaceSpatialMetModel.model_params + ~SurfaceSpatialMetModel.output_resolution + ~SurfaceSpatialMetModel.s_enhance + ~SurfaceSpatialMetModel.s_enhancements + ~SurfaceSpatialMetModel.smoothed_features + ~SurfaceSpatialMetModel.smoothing + ~SurfaceSpatialMetModel.t_enhance + ~SurfaceSpatialMetModel.t_enhancements + ~SurfaceSpatialMetModel.version_record + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.models.surface.rst b/_sources/_autosummary/sup3r.models.surface.rst new file mode 100644 index 000000000..d578f3579 --- /dev/null +++ b/_sources/_autosummary/sup3r.models.surface.rst @@ -0,0 +1,31 @@ +sup3r.models.surface +==================== + +.. automodule:: sup3r.models.surface + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + SurfaceSpatialMetModel + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.models.utilities.TrainingSession.rst b/_sources/_autosummary/sup3r.models.utilities.TrainingSession.rst new file mode 100644 index 000000000..abe8dd705 --- /dev/null +++ b/_sources/_autosummary/sup3r.models.utilities.TrainingSession.rst @@ -0,0 +1,24 @@ +sup3r.models.utilities.TrainingSession +====================================== + +.. currentmodule:: sup3r.models.utilities + +.. autoclass:: TrainingSession + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~TrainingSession.run + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.models.utilities.get_optimizer_class.rst b/_sources/_autosummary/sup3r.models.utilities.get_optimizer_class.rst new file mode 100644 index 000000000..67f56ec3d --- /dev/null +++ b/_sources/_autosummary/sup3r.models.utilities.get_optimizer_class.rst @@ -0,0 +1,6 @@ +sup3r.models.utilities.get\_optimizer\_class +============================================ + +.. currentmodule:: sup3r.models.utilities + +.. autofunction:: get_optimizer_class \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.models.utilities.rst b/_sources/_autosummary/sup3r.models.utilities.rst new file mode 100644 index 000000000..1ce328fa7 --- /dev/null +++ b/_sources/_autosummary/sup3r.models.utilities.rst @@ -0,0 +1,39 @@ +sup3r.models.utilities +====================== + +.. automodule:: sup3r.models.utilities + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + + get_optimizer_class + st_interp + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + TrainingSession + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.models.utilities.st_interp.rst b/_sources/_autosummary/sup3r.models.utilities.st_interp.rst new file mode 100644 index 000000000..518306f06 --- /dev/null +++ b/_sources/_autosummary/sup3r.models.utilities.st_interp.rst @@ -0,0 +1,6 @@ +sup3r.models.utilities.st\_interp +================================= + +.. currentmodule:: sup3r.models.utilities + +.. autofunction:: st_interp \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.pipeline.forward_pass.ForwardPass.rst b/_sources/_autosummary/sup3r.pipeline.forward_pass.ForwardPass.rst new file mode 100644 index 000000000..2c607597b --- /dev/null +++ b/_sources/_autosummary/sup3r.pipeline.forward_pass.ForwardPass.rst @@ -0,0 +1,36 @@ +sup3r.pipeline.forward\_pass.ForwardPass +======================================== + +.. currentmodule:: sup3r.pipeline.forward_pass + +.. autoclass:: ForwardPass + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~ForwardPass.get_input_chunk + ~ForwardPass.get_node_cmd + ~ForwardPass.pad_source_data + ~ForwardPass.run + ~ForwardPass.run_chunk + ~ForwardPass.run_generator + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~ForwardPass.OUTPUT_HANDLER_CLASS + ~ForwardPass.meta + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.pipeline.forward_pass.rst b/_sources/_autosummary/sup3r.pipeline.forward_pass.rst new file mode 100644 index 000000000..d433da0e7 --- /dev/null +++ b/_sources/_autosummary/sup3r.pipeline.forward_pass.rst @@ -0,0 +1,31 @@ +sup3r.pipeline.forward\_pass +============================ + +.. automodule:: sup3r.pipeline.forward_pass + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + ForwardPass + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.pipeline.forward_pass_cli.kickoff_local_job.rst b/_sources/_autosummary/sup3r.pipeline.forward_pass_cli.kickoff_local_job.rst new file mode 100644 index 000000000..82588fc5a --- /dev/null +++ b/_sources/_autosummary/sup3r.pipeline.forward_pass_cli.kickoff_local_job.rst @@ -0,0 +1,6 @@ +sup3r.pipeline.forward\_pass\_cli.kickoff\_local\_job +===================================================== + +.. currentmodule:: sup3r.pipeline.forward_pass_cli + +.. autofunction:: kickoff_local_job \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.pipeline.forward_pass_cli.kickoff_slurm_job.rst b/_sources/_autosummary/sup3r.pipeline.forward_pass_cli.kickoff_slurm_job.rst new file mode 100644 index 000000000..477be7d63 --- /dev/null +++ b/_sources/_autosummary/sup3r.pipeline.forward_pass_cli.kickoff_slurm_job.rst @@ -0,0 +1,6 @@ +sup3r.pipeline.forward\_pass\_cli.kickoff\_slurm\_job +===================================================== + +.. currentmodule:: sup3r.pipeline.forward_pass_cli + +.. autofunction:: kickoff_slurm_job \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.pipeline.forward_pass_cli.rst b/_sources/_autosummary/sup3r.pipeline.forward_pass_cli.rst new file mode 100644 index 000000000..ebb07df37 --- /dev/null +++ b/_sources/_autosummary/sup3r.pipeline.forward_pass_cli.rst @@ -0,0 +1,31 @@ +sup3r.pipeline.forward\_pass\_cli +================================= + +.. automodule:: sup3r.pipeline.forward_pass_cli + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + + kickoff_local_job + kickoff_slurm_job + + + + + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.pipeline.pipeline_cli.rst b/_sources/_autosummary/sup3r.pipeline.pipeline_cli.rst new file mode 100644 index 000000000..c751f1b1d --- /dev/null +++ b/_sources/_autosummary/sup3r.pipeline.pipeline_cli.rst @@ -0,0 +1,23 @@ +sup3r.pipeline.pipeline\_cli +============================ + +.. automodule:: sup3r.pipeline.pipeline_cli + + + + + + + + + + + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.pipeline.rst b/_sources/_autosummary/sup3r.pipeline.rst new file mode 100644 index 000000000..dd3ce1e18 --- /dev/null +++ b/_sources/_autosummary/sup3r.pipeline.rst @@ -0,0 +1,35 @@ +sup3r.pipeline +============== + +.. automodule:: sup3r.pipeline + + + + + + + + + + + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + forward_pass + forward_pass_cli + pipeline_cli + slicer + strategy + utilities + diff --git a/_sources/_autosummary/sup3r.pipeline.slicer.ForwardPassSlicer.rst b/_sources/_autosummary/sup3r.pipeline.slicer.ForwardPassSlicer.rst new file mode 100644 index 000000000..5c772e66a --- /dev/null +++ b/_sources/_autosummary/sup3r.pipeline.slicer.ForwardPassSlicer.rst @@ -0,0 +1,62 @@ +sup3r.pipeline.slicer.ForwardPassSlicer +======================================= + +.. currentmodule:: sup3r.pipeline.slicer + +.. autoclass:: ForwardPassSlicer + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~ForwardPassSlicer.get_cropped_slices + ~ForwardPassSlicer.get_hr_slices + ~ForwardPassSlicer.get_padded_slices + ~ForwardPassSlicer.get_spatial_slices + ~ForwardPassSlicer.get_time_slices + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~ForwardPassSlicer.chunk_lookup + ~ForwardPassSlicer.hr_crop_slices + ~ForwardPassSlicer.n_chunks + ~ForwardPassSlicer.n_spatial_chunks + ~ForwardPassSlicer.n_time_chunks + ~ForwardPassSlicer.s1_hr_slices + ~ForwardPassSlicer.s1_lr_pad_slices + ~ForwardPassSlicer.s1_lr_slices + ~ForwardPassSlicer.s2_hr_slices + ~ForwardPassSlicer.s2_lr_pad_slices + ~ForwardPassSlicer.s2_lr_slices + ~ForwardPassSlicer.s_hr_crop_slices + ~ForwardPassSlicer.s_hr_slices + ~ForwardPassSlicer.s_lr_crop_slices + ~ForwardPassSlicer.s_lr_pad_slices + ~ForwardPassSlicer.s_lr_slices + ~ForwardPassSlicer.spatial_chunk_lookup + ~ForwardPassSlicer.t_hr_crop_slices + ~ForwardPassSlicer.t_lr_crop_slices + ~ForwardPassSlicer.t_lr_pad_slices + ~ForwardPassSlicer.t_lr_slices + ~ForwardPassSlicer.coarse_shape + ~ForwardPassSlicer.time_steps + ~ForwardPassSlicer.s_enhance + ~ForwardPassSlicer.t_enhance + ~ForwardPassSlicer.time_slice + ~ForwardPassSlicer.temporal_pad + ~ForwardPassSlicer.spatial_pad + ~ForwardPassSlicer.chunk_shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.pipeline.slicer.rst b/_sources/_autosummary/sup3r.pipeline.slicer.rst new file mode 100644 index 000000000..1d5ed3752 --- /dev/null +++ b/_sources/_autosummary/sup3r.pipeline.slicer.rst @@ -0,0 +1,31 @@ +sup3r.pipeline.slicer +===================== + +.. automodule:: sup3r.pipeline.slicer + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + ForwardPassSlicer + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.pipeline.strategy.ForwardPassChunk.rst b/_sources/_autosummary/sup3r.pipeline.strategy.ForwardPassChunk.rst new file mode 100644 index 000000000..f856a718b --- /dev/null +++ b/_sources/_autosummary/sup3r.pipeline.strategy.ForwardPassChunk.rst @@ -0,0 +1,38 @@ +sup3r.pipeline.strategy.ForwardPassChunk +======================================== + +.. currentmodule:: sup3r.pipeline.strategy + +.. autoclass:: ForwardPassChunk + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~ForwardPassChunk.input_data + ~ForwardPassChunk.exo_data + ~ForwardPassChunk.hr_crop_slice + ~ForwardPassChunk.lr_pad_slice + ~ForwardPassChunk.hr_lat_lon + ~ForwardPassChunk.hr_times + ~ForwardPassChunk.gids + ~ForwardPassChunk.out_file + ~ForwardPassChunk.pad_width + ~ForwardPassChunk.index + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.pipeline.strategy.ForwardPassStrategy.rst b/_sources/_autosummary/sup3r.pipeline.strategy.ForwardPassStrategy.rst new file mode 100644 index 000000000..06e45ee6d --- /dev/null +++ b/_sources/_autosummary/sup3r.pipeline.strategy.ForwardPassStrategy.rst @@ -0,0 +1,65 @@ +sup3r.pipeline.strategy.ForwardPassStrategy +=========================================== + +.. currentmodule:: sup3r.pipeline.strategy + +.. autoclass:: ForwardPassStrategy + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~ForwardPassStrategy.chunk_finished + ~ForwardPassStrategy.chunk_masked + ~ForwardPassStrategy.get_chunk_indices + ~ForwardPassStrategy.get_exo_cache_files + ~ForwardPassStrategy.get_exo_kwargs + ~ForwardPassStrategy.get_pad_width + ~ForwardPassStrategy.init_chunk + ~ForwardPassStrategy.init_input_handler + ~ForwardPassStrategy.load_exo_data + ~ForwardPassStrategy.node_finished + ~ForwardPassStrategy.preflight + ~ForwardPassStrategy.prep_chunk_data + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~ForwardPassStrategy.allowed_const + ~ForwardPassStrategy.bias_correct_kwargs + ~ForwardPassStrategy.bias_correct_method + ~ForwardPassStrategy.exo_handler_kwargs + ~ForwardPassStrategy.fwp_chunk_shape + ~ForwardPassStrategy.fwp_mask + ~ForwardPassStrategy.head_node + ~ForwardPassStrategy.hr_lat_lon + ~ForwardPassStrategy.incremental + ~ForwardPassStrategy.input_handler_kwargs + ~ForwardPassStrategy.input_handler_name + ~ForwardPassStrategy.invert_uv + ~ForwardPassStrategy.max_nodes + ~ForwardPassStrategy.meta + ~ForwardPassStrategy.model_class + ~ForwardPassStrategy.node_chunks + ~ForwardPassStrategy.out_files + ~ForwardPassStrategy.out_pattern + ~ForwardPassStrategy.output_workers + ~ForwardPassStrategy.pass_workers + ~ForwardPassStrategy.spatial_pad + ~ForwardPassStrategy.temporal_pad + ~ForwardPassStrategy.unmasked_chunks + ~ForwardPassStrategy.file_paths + ~ForwardPassStrategy.model_kwargs + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.pipeline.strategy.rst b/_sources/_autosummary/sup3r.pipeline.strategy.rst new file mode 100644 index 000000000..0fc9521b4 --- /dev/null +++ b/_sources/_autosummary/sup3r.pipeline.strategy.rst @@ -0,0 +1,32 @@ +sup3r.pipeline.strategy +======================= + +.. automodule:: sup3r.pipeline.strategy + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + ForwardPassChunk + ForwardPassStrategy + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.pipeline.utilities.get_chunk_slices.rst b/_sources/_autosummary/sup3r.pipeline.utilities.get_chunk_slices.rst new file mode 100644 index 000000000..6ca7587de --- /dev/null +++ b/_sources/_autosummary/sup3r.pipeline.utilities.get_chunk_slices.rst @@ -0,0 +1,6 @@ +sup3r.pipeline.utilities.get\_chunk\_slices +=========================================== + +.. currentmodule:: sup3r.pipeline.utilities + +.. autofunction:: get_chunk_slices \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.pipeline.utilities.get_model.rst b/_sources/_autosummary/sup3r.pipeline.utilities.get_model.rst new file mode 100644 index 000000000..b009ccc39 --- /dev/null +++ b/_sources/_autosummary/sup3r.pipeline.utilities.get_model.rst @@ -0,0 +1,6 @@ +sup3r.pipeline.utilities.get\_model +=================================== + +.. currentmodule:: sup3r.pipeline.utilities + +.. autofunction:: get_model \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.pipeline.utilities.rst b/_sources/_autosummary/sup3r.pipeline.utilities.rst new file mode 100644 index 000000000..c9fe1833f --- /dev/null +++ b/_sources/_autosummary/sup3r.pipeline.utilities.rst @@ -0,0 +1,31 @@ +sup3r.pipeline.utilities +======================== + +.. automodule:: sup3r.pipeline.utilities + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + + get_chunk_slices + get_model + + + + + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.postprocessing.collectors.base.BaseCollector.rst b/_sources/_autosummary/sup3r.postprocessing.collectors.base.BaseCollector.rst new file mode 100644 index 000000000..b4d580339 --- /dev/null +++ b/_sources/_autosummary/sup3r.postprocessing.collectors.base.BaseCollector.rst @@ -0,0 +1,29 @@ +sup3r.postprocessing.collectors.base.BaseCollector +================================================== + +.. currentmodule:: sup3r.postprocessing.collectors.base + +.. autoclass:: BaseCollector + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~BaseCollector.collect + ~BaseCollector.get_chunk_indices + ~BaseCollector.get_dset_attrs + ~BaseCollector.get_node_cmd + ~BaseCollector.get_time_dim_name + ~BaseCollector.write_data + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.postprocessing.collectors.base.rst b/_sources/_autosummary/sup3r.postprocessing.collectors.base.rst new file mode 100644 index 000000000..ad9936f61 --- /dev/null +++ b/_sources/_autosummary/sup3r.postprocessing.collectors.base.rst @@ -0,0 +1,31 @@ +sup3r.postprocessing.collectors.base +==================================== + +.. automodule:: sup3r.postprocessing.collectors.base + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + BaseCollector + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.postprocessing.collectors.h5.CollectorH5.rst b/_sources/_autosummary/sup3r.postprocessing.collectors.h5.CollectorH5.rst new file mode 100644 index 000000000..d2aa0d8ce --- /dev/null +++ b/_sources/_autosummary/sup3r.postprocessing.collectors.h5.CollectorH5.rst @@ -0,0 +1,37 @@ +sup3r.postprocessing.collectors.h5.CollectorH5 +============================================== + +.. currentmodule:: sup3r.postprocessing.collectors.h5 + +.. autoclass:: CollectorH5 + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~CollectorH5.collect + ~CollectorH5.collect_feature + ~CollectorH5.get_chunk_indices + ~CollectorH5.get_collection_attrs + ~CollectorH5.get_coordinate_indices + ~CollectorH5.get_data + ~CollectorH5.get_dset_attrs + ~CollectorH5.get_flist_chunks + ~CollectorH5.get_node_cmd + ~CollectorH5.get_slices + ~CollectorH5.get_target_and_masked_meta + ~CollectorH5.get_time_dim_name + ~CollectorH5.get_unique_chunk_files + ~CollectorH5.write_data + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.postprocessing.collectors.h5.rst b/_sources/_autosummary/sup3r.postprocessing.collectors.h5.rst new file mode 100644 index 000000000..1e16ad74d --- /dev/null +++ b/_sources/_autosummary/sup3r.postprocessing.collectors.h5.rst @@ -0,0 +1,31 @@ +sup3r.postprocessing.collectors.h5 +================================== + +.. automodule:: sup3r.postprocessing.collectors.h5 + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + CollectorH5 + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.postprocessing.collectors.nc.CollectorNC.rst b/_sources/_autosummary/sup3r.postprocessing.collectors.nc.CollectorNC.rst new file mode 100644 index 000000000..45b7027b4 --- /dev/null +++ b/_sources/_autosummary/sup3r.postprocessing.collectors.nc.CollectorNC.rst @@ -0,0 +1,30 @@ +sup3r.postprocessing.collectors.nc.CollectorNC +============================================== + +.. currentmodule:: sup3r.postprocessing.collectors.nc + +.. autoclass:: CollectorNC + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~CollectorNC.collect + ~CollectorNC.get_chunk_indices + ~CollectorNC.get_dset_attrs + ~CollectorNC.get_node_cmd + ~CollectorNC.get_time_dim_name + ~CollectorNC.group_spatial_chunks + ~CollectorNC.write_data + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.postprocessing.collectors.nc.rst b/_sources/_autosummary/sup3r.postprocessing.collectors.nc.rst new file mode 100644 index 000000000..5994c9bf2 --- /dev/null +++ b/_sources/_autosummary/sup3r.postprocessing.collectors.nc.rst @@ -0,0 +1,31 @@ +sup3r.postprocessing.collectors.nc +================================== + +.. automodule:: sup3r.postprocessing.collectors.nc + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + CollectorNC + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.postprocessing.collectors.rst b/_sources/_autosummary/sup3r.postprocessing.collectors.rst new file mode 100644 index 000000000..bec0ff6d2 --- /dev/null +++ b/_sources/_autosummary/sup3r.postprocessing.collectors.rst @@ -0,0 +1,32 @@ +sup3r.postprocessing.collectors +=============================== + +.. automodule:: sup3r.postprocessing.collectors + + + + + + + + + + + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + base + h5 + nc + diff --git a/_sources/_autosummary/sup3r.postprocessing.data_collect_cli.kickoff_local_job.rst b/_sources/_autosummary/sup3r.postprocessing.data_collect_cli.kickoff_local_job.rst new file mode 100644 index 000000000..5f645a597 --- /dev/null +++ b/_sources/_autosummary/sup3r.postprocessing.data_collect_cli.kickoff_local_job.rst @@ -0,0 +1,6 @@ +sup3r.postprocessing.data\_collect\_cli.kickoff\_local\_job +=========================================================== + +.. currentmodule:: sup3r.postprocessing.data_collect_cli + +.. autofunction:: kickoff_local_job \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.postprocessing.data_collect_cli.kickoff_slurm_job.rst b/_sources/_autosummary/sup3r.postprocessing.data_collect_cli.kickoff_slurm_job.rst new file mode 100644 index 000000000..36185fad0 --- /dev/null +++ b/_sources/_autosummary/sup3r.postprocessing.data_collect_cli.kickoff_slurm_job.rst @@ -0,0 +1,6 @@ +sup3r.postprocessing.data\_collect\_cli.kickoff\_slurm\_job +=========================================================== + +.. currentmodule:: sup3r.postprocessing.data_collect_cli + +.. autofunction:: kickoff_slurm_job \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.postprocessing.data_collect_cli.rst b/_sources/_autosummary/sup3r.postprocessing.data_collect_cli.rst new file mode 100644 index 000000000..39bfcdfd5 --- /dev/null +++ b/_sources/_autosummary/sup3r.postprocessing.data_collect_cli.rst @@ -0,0 +1,31 @@ +sup3r.postprocessing.data\_collect\_cli +======================================= + +.. automodule:: sup3r.postprocessing.data_collect_cli + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + + kickoff_local_job + kickoff_slurm_job + + + + + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.postprocessing.rst b/_sources/_autosummary/sup3r.postprocessing.rst new file mode 100644 index 000000000..501238154 --- /dev/null +++ b/_sources/_autosummary/sup3r.postprocessing.rst @@ -0,0 +1,32 @@ +sup3r.postprocessing +==================== + +.. automodule:: sup3r.postprocessing + + + + + + + + + + + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + collectors + data_collect_cli + writers + diff --git a/_sources/_autosummary/sup3r.postprocessing.writers.base.OutputHandler.rst b/_sources/_autosummary/sup3r.postprocessing.writers.base.OutputHandler.rst new file mode 100644 index 000000000..bab9c76a9 --- /dev/null +++ b/_sources/_autosummary/sup3r.postprocessing.writers.base.OutputHandler.rst @@ -0,0 +1,35 @@ +sup3r.postprocessing.writers.base.OutputHandler +=============================================== + +.. currentmodule:: sup3r.postprocessing.writers.base + +.. autoclass:: OutputHandler + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~OutputHandler.enforce_limits + ~OutputHandler.get_dset_attrs + ~OutputHandler.get_lat_lon + ~OutputHandler.get_renamed_features + ~OutputHandler.get_time_dim_name + ~OutputHandler.get_times + ~OutputHandler.invert_uv_features + ~OutputHandler.invert_uv_single_pair + ~OutputHandler.is_increasing_lons + ~OutputHandler.pad_lat_lon + ~OutputHandler.write_data + ~OutputHandler.write_output + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.postprocessing.writers.base.OutputMixin.rst b/_sources/_autosummary/sup3r.postprocessing.writers.base.OutputMixin.rst new file mode 100644 index 000000000..53e773990 --- /dev/null +++ b/_sources/_autosummary/sup3r.postprocessing.writers.base.OutputMixin.rst @@ -0,0 +1,26 @@ +sup3r.postprocessing.writers.base.OutputMixin +============================================= + +.. currentmodule:: sup3r.postprocessing.writers.base + +.. autoclass:: OutputMixin + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~OutputMixin.get_dset_attrs + ~OutputMixin.get_time_dim_name + ~OutputMixin.write_data + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.postprocessing.writers.base.RexOutputs.rst b/_sources/_autosummary/sup3r.postprocessing.writers.base.RexOutputs.rst new file mode 100644 index 000000000..ecd6e1113 --- /dev/null +++ b/_sources/_autosummary/sup3r.postprocessing.writers.base.RexOutputs.rst @@ -0,0 +1,77 @@ +sup3r.postprocessing.writers.base.RexOutputs +============================================ + +.. currentmodule:: sup3r.postprocessing.writers.base + +.. autoclass:: RexOutputs + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~RexOutputs.add_dataset + ~RexOutputs.close + ~RexOutputs.df_str_decode + ~RexOutputs.get_SAM_df + ~RexOutputs.get_attrs + ~RexOutputs.get_config + ~RexOutputs.get_dset_properties + ~RexOutputs.get_meta_arr + ~RexOutputs.get_scale_factor + ~RexOutputs.get_units + ~RexOutputs.init_h5 + ~RexOutputs.open_dataset + ~RexOutputs.preload_SAM + ~RexOutputs.set_configs + ~RexOutputs.set_version_attr + ~RexOutputs.update_dset + ~RexOutputs.write_dataset + ~RexOutputs.write_means + ~RexOutputs.write_profiles + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~RexOutputs.ADD_ATTR + ~RexOutputs.SAM_configs + ~RexOutputs.SCALE_ATTR + ~RexOutputs.UNIT_ATTR + ~RexOutputs.adders + ~RexOutputs.attrs + ~RexOutputs.chunks + ~RexOutputs.coordinates + ~RexOutputs.data_version + ~RexOutputs.datasets + ~RexOutputs.dsets + ~RexOutputs.dtypes + ~RexOutputs.full_version_record + ~RexOutputs.global_attrs + ~RexOutputs.groups + ~RexOutputs.h5 + ~RexOutputs.lat_lon + ~RexOutputs.meta + ~RexOutputs.package + ~RexOutputs.res_dsets + ~RexOutputs.resource_datasets + ~RexOutputs.run_attrs + ~RexOutputs.scale_factors + ~RexOutputs.shape + ~RexOutputs.shapes + ~RexOutputs.source + ~RexOutputs.time_index + ~RexOutputs.units + ~RexOutputs.version + ~RexOutputs.writable + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.postprocessing.writers.base.rst b/_sources/_autosummary/sup3r.postprocessing.writers.base.rst new file mode 100644 index 000000000..a8e753f02 --- /dev/null +++ b/_sources/_autosummary/sup3r.postprocessing.writers.base.rst @@ -0,0 +1,33 @@ +sup3r.postprocessing.writers.base +================================= + +.. automodule:: sup3r.postprocessing.writers.base + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + OutputHandler + OutputMixin + RexOutputs + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.postprocessing.writers.h5.OutputHandlerH5.rst b/_sources/_autosummary/sup3r.postprocessing.writers.h5.OutputHandlerH5.rst new file mode 100644 index 000000000..1b79cf32b --- /dev/null +++ b/_sources/_autosummary/sup3r.postprocessing.writers.h5.OutputHandlerH5.rst @@ -0,0 +1,35 @@ +sup3r.postprocessing.writers.h5.OutputHandlerH5 +=============================================== + +.. currentmodule:: sup3r.postprocessing.writers.h5 + +.. autoclass:: OutputHandlerH5 + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~OutputHandlerH5.enforce_limits + ~OutputHandlerH5.get_dset_attrs + ~OutputHandlerH5.get_lat_lon + ~OutputHandlerH5.get_renamed_features + ~OutputHandlerH5.get_time_dim_name + ~OutputHandlerH5.get_times + ~OutputHandlerH5.invert_uv_features + ~OutputHandlerH5.invert_uv_single_pair + ~OutputHandlerH5.is_increasing_lons + ~OutputHandlerH5.pad_lat_lon + ~OutputHandlerH5.write_data + ~OutputHandlerH5.write_output + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.postprocessing.writers.h5.rst b/_sources/_autosummary/sup3r.postprocessing.writers.h5.rst new file mode 100644 index 000000000..5d94a7890 --- /dev/null +++ b/_sources/_autosummary/sup3r.postprocessing.writers.h5.rst @@ -0,0 +1,31 @@ +sup3r.postprocessing.writers.h5 +=============================== + +.. automodule:: sup3r.postprocessing.writers.h5 + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + OutputHandlerH5 + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.postprocessing.writers.nc.OutputHandlerNC.rst b/_sources/_autosummary/sup3r.postprocessing.writers.nc.OutputHandlerNC.rst new file mode 100644 index 000000000..da7bed486 --- /dev/null +++ b/_sources/_autosummary/sup3r.postprocessing.writers.nc.OutputHandlerNC.rst @@ -0,0 +1,35 @@ +sup3r.postprocessing.writers.nc.OutputHandlerNC +=============================================== + +.. currentmodule:: sup3r.postprocessing.writers.nc + +.. autoclass:: OutputHandlerNC + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~OutputHandlerNC.enforce_limits + ~OutputHandlerNC.get_dset_attrs + ~OutputHandlerNC.get_lat_lon + ~OutputHandlerNC.get_renamed_features + ~OutputHandlerNC.get_time_dim_name + ~OutputHandlerNC.get_times + ~OutputHandlerNC.invert_uv_features + ~OutputHandlerNC.invert_uv_single_pair + ~OutputHandlerNC.is_increasing_lons + ~OutputHandlerNC.pad_lat_lon + ~OutputHandlerNC.write_data + ~OutputHandlerNC.write_output + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.postprocessing.writers.nc.rst b/_sources/_autosummary/sup3r.postprocessing.writers.nc.rst new file mode 100644 index 000000000..399264d01 --- /dev/null +++ b/_sources/_autosummary/sup3r.postprocessing.writers.nc.rst @@ -0,0 +1,31 @@ +sup3r.postprocessing.writers.nc +=============================== + +.. automodule:: sup3r.postprocessing.writers.nc + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + OutputHandlerNC + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.postprocessing.writers.rst b/_sources/_autosummary/sup3r.postprocessing.writers.rst new file mode 100644 index 000000000..99e2e0e38 --- /dev/null +++ b/_sources/_autosummary/sup3r.postprocessing.writers.rst @@ -0,0 +1,32 @@ +sup3r.postprocessing.writers +============================ + +.. automodule:: sup3r.postprocessing.writers + + + + + + + + + + + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + base + h5 + nc + diff --git a/_sources/_autosummary/sup3r.preprocessing.accessor.Sup3rX.rst b/_sources/_autosummary/sup3r.preprocessing.accessor.Sup3rX.rst new file mode 100644 index 000000000..f11c3041c --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.accessor.Sup3rX.rst @@ -0,0 +1,60 @@ +sup3r.preprocessing.accessor.Sup3rX +=================================== + +.. currentmodule:: sup3r.preprocessing.accessor + +.. autoclass:: Sup3rX + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~Sup3rX.add_dims_to_data_vars + ~Sup3rX.as_array + ~Sup3rX.assign + ~Sup3rX.coarsen + ~Sup3rX.compute + ~Sup3rX.flatten + ~Sup3rX.interpolate_na + ~Sup3rX.isel + ~Sup3rX.mean + ~Sup3rX.normalize + ~Sup3rX.ordered + ~Sup3rX.qa + ~Sup3rX.sample + ~Sup3rX.std + ~Sup3rX.to_dataarray + ~Sup3rX.unflatten + ~Sup3rX.update_ds + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Sup3rX.dtype + ~Sup3rX.features + ~Sup3rX.flattened + ~Sup3rX.grid_shape + ~Sup3rX.lat_lon + ~Sup3rX.loaded + ~Sup3rX.meta + ~Sup3rX.name + ~Sup3rX.shape + ~Sup3rX.size + ~Sup3rX.target + ~Sup3rX.time_independent + ~Sup3rX.time_index + ~Sup3rX.time_step + ~Sup3rX.values + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.accessor.rst b/_sources/_autosummary/sup3r.preprocessing.accessor.rst new file mode 100644 index 000000000..37dc3baa7 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.accessor.rst @@ -0,0 +1,31 @@ +sup3r.preprocessing.accessor +============================ + +.. automodule:: sup3r.preprocessing.accessor + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + Sup3rX + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.base.Container.rst b/_sources/_autosummary/sup3r.preprocessing.base.Container.rst new file mode 100644 index 000000000..637982662 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.base.Container.rst @@ -0,0 +1,32 @@ +sup3r.preprocessing.base.Container +================================== + +.. currentmodule:: sup3r.preprocessing.base + +.. autoclass:: Container + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~Container.post_init_log + ~Container.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Container.data + ~Container.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.base.Sup3rDataset.rst b/_sources/_autosummary/sup3r.preprocessing.base.Sup3rDataset.rst new file mode 100644 index 000000000..7ab8a597d --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.base.Sup3rDataset.rst @@ -0,0 +1,40 @@ +sup3r.preprocessing.base.Sup3rDataset +===================================== + +.. currentmodule:: sup3r.preprocessing.base + +.. autoclass:: Sup3rDataset + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~Sup3rDataset.compute + ~Sup3rDataset.isel + ~Sup3rDataset.mean + ~Sup3rDataset.normalize + ~Sup3rDataset.rewrap + ~Sup3rDataset.sample + ~Sup3rDataset.std + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Sup3rDataset.dtype + ~Sup3rDataset.features + ~Sup3rDataset.loaded + ~Sup3rDataset.shape + ~Sup3rDataset.size + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.base.Sup3rMeta.rst b/_sources/_autosummary/sup3r.preprocessing.base.Sup3rMeta.rst new file mode 100644 index 000000000..6bb172761 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.base.Sup3rMeta.rst @@ -0,0 +1,25 @@ +sup3r.preprocessing.base.Sup3rMeta +================================== + +.. currentmodule:: sup3r.preprocessing.base + +.. autoclass:: Sup3rMeta + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~Sup3rMeta.mro + ~Sup3rMeta.register + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.base.rst b/_sources/_autosummary/sup3r.preprocessing.base.rst new file mode 100644 index 000000000..26eca149b --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.base.rst @@ -0,0 +1,33 @@ +sup3r.preprocessing.base +======================== + +.. automodule:: sup3r.preprocessing.base + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + Container + Sup3rDataset + Sup3rMeta + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_handlers.dc.BatchHandlerDC.rst b/_sources/_autosummary/sup3r.preprocessing.batch_handlers.dc.BatchHandlerDC.rst new file mode 100644 index 000000000..5bc51a406 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_handlers.dc.BatchHandlerDC.rst @@ -0,0 +1,59 @@ +sup3r.preprocessing.batch\_handlers.dc.BatchHandlerDC +===================================================== + +.. currentmodule:: sup3r.preprocessing.batch_handlers.dc + +.. autoclass:: BatchHandlerDC + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~BatchHandlerDC.check_enhancement_factors + ~BatchHandlerDC.check_features + ~BatchHandlerDC.check_shared_attr + ~BatchHandlerDC.enqueue_batch + ~BatchHandlerDC.enqueue_batches + ~BatchHandlerDC.get_batch + ~BatchHandlerDC.get_container_index + ~BatchHandlerDC.get_queue + ~BatchHandlerDC.get_random_container + ~BatchHandlerDC.init_samplers + ~BatchHandlerDC.log_queue_info + ~BatchHandlerDC.post_init_log + ~BatchHandlerDC.post_proc + ~BatchHandlerDC.preflight + ~BatchHandlerDC.sample_batch + ~BatchHandlerDC.start + ~BatchHandlerDC.stop + ~BatchHandlerDC.transform + ~BatchHandlerDC.update_weights + ~BatchHandlerDC.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~BatchHandlerDC.container_weights + ~BatchHandlerDC.data + ~BatchHandlerDC.features + ~BatchHandlerDC.hr_shape + ~BatchHandlerDC.lr_shape + ~BatchHandlerDC.queue_shape + ~BatchHandlerDC.queue_thread + ~BatchHandlerDC.running + ~BatchHandlerDC.shape + ~BatchHandlerDC.spatial_weights + ~BatchHandlerDC.temporal_weights + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_handlers.dc.rst b/_sources/_autosummary/sup3r.preprocessing.batch_handlers.dc.rst new file mode 100644 index 000000000..4176e6fde --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_handlers.dc.rst @@ -0,0 +1,31 @@ +sup3r.preprocessing.batch\_handlers.dc +====================================== + +.. automodule:: sup3r.preprocessing.batch_handlers.dc + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + BatchHandlerDC + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandler.rst b/_sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandler.rst new file mode 100644 index 000000000..e57864a64 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandler.rst @@ -0,0 +1,56 @@ +sup3r.preprocessing.batch\_handlers.factory.BatchHandler +======================================================== + +.. currentmodule:: sup3r.preprocessing.batch_handlers.factory + +.. autoclass:: BatchHandler + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~BatchHandler.check_enhancement_factors + ~BatchHandler.check_features + ~BatchHandler.check_shared_attr + ~BatchHandler.enqueue_batch + ~BatchHandler.enqueue_batches + ~BatchHandler.get_batch + ~BatchHandler.get_container_index + ~BatchHandler.get_queue + ~BatchHandler.get_random_container + ~BatchHandler.init_samplers + ~BatchHandler.log_queue_info + ~BatchHandler.post_init_log + ~BatchHandler.post_proc + ~BatchHandler.preflight + ~BatchHandler.sample_batch + ~BatchHandler.start + ~BatchHandler.stop + ~BatchHandler.transform + ~BatchHandler.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~BatchHandler.container_weights + ~BatchHandler.data + ~BatchHandler.features + ~BatchHandler.hr_shape + ~BatchHandler.lr_shape + ~BatchHandler.queue_shape + ~BatchHandler.queue_thread + ~BatchHandler.running + ~BatchHandler.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerCC.rst b/_sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerCC.rst new file mode 100644 index 000000000..c73f2d747 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerCC.rst @@ -0,0 +1,56 @@ +sup3r.preprocessing.batch\_handlers.factory.BatchHandlerCC +========================================================== + +.. currentmodule:: sup3r.preprocessing.batch_handlers.factory + +.. autoclass:: BatchHandlerCC + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~BatchHandlerCC.check_enhancement_factors + ~BatchHandlerCC.check_features + ~BatchHandlerCC.check_shared_attr + ~BatchHandlerCC.enqueue_batch + ~BatchHandlerCC.enqueue_batches + ~BatchHandlerCC.get_batch + ~BatchHandlerCC.get_container_index + ~BatchHandlerCC.get_queue + ~BatchHandlerCC.get_random_container + ~BatchHandlerCC.init_samplers + ~BatchHandlerCC.log_queue_info + ~BatchHandlerCC.post_init_log + ~BatchHandlerCC.post_proc + ~BatchHandlerCC.preflight + ~BatchHandlerCC.sample_batch + ~BatchHandlerCC.start + ~BatchHandlerCC.stop + ~BatchHandlerCC.transform + ~BatchHandlerCC.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~BatchHandlerCC.container_weights + ~BatchHandlerCC.data + ~BatchHandlerCC.features + ~BatchHandlerCC.hr_shape + ~BatchHandlerCC.lr_shape + ~BatchHandlerCC.queue_shape + ~BatchHandlerCC.queue_thread + ~BatchHandlerCC.running + ~BatchHandlerCC.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerFactory.rst b/_sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerFactory.rst new file mode 100644 index 000000000..a9ea323c6 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerFactory.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.batch\_handlers.factory.BatchHandlerFactory +=============================================================== + +.. currentmodule:: sup3r.preprocessing.batch_handlers.factory + +.. autofunction:: BatchHandlerFactory \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom1.rst b/_sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom1.rst new file mode 100644 index 000000000..2248fd654 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom1.rst @@ -0,0 +1,58 @@ +sup3r.preprocessing.batch\_handlers.factory.BatchHandlerMom1 +============================================================ + +.. currentmodule:: sup3r.preprocessing.batch_handlers.factory + +.. autoclass:: BatchHandlerMom1 + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~BatchHandlerMom1.check_enhancement_factors + ~BatchHandlerMom1.check_features + ~BatchHandlerMom1.check_shared_attr + ~BatchHandlerMom1.enqueue_batch + ~BatchHandlerMom1.enqueue_batches + ~BatchHandlerMom1.get_batch + ~BatchHandlerMom1.get_container_index + ~BatchHandlerMom1.get_queue + ~BatchHandlerMom1.get_random_container + ~BatchHandlerMom1.init_samplers + ~BatchHandlerMom1.log_queue_info + ~BatchHandlerMom1.make_mask + ~BatchHandlerMom1.make_output + ~BatchHandlerMom1.post_init_log + ~BatchHandlerMom1.post_proc + ~BatchHandlerMom1.preflight + ~BatchHandlerMom1.sample_batch + ~BatchHandlerMom1.start + ~BatchHandlerMom1.stop + ~BatchHandlerMom1.transform + ~BatchHandlerMom1.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~BatchHandlerMom1.container_weights + ~BatchHandlerMom1.data + ~BatchHandlerMom1.features + ~BatchHandlerMom1.hr_shape + ~BatchHandlerMom1.lr_shape + ~BatchHandlerMom1.queue_shape + ~BatchHandlerMom1.queue_thread + ~BatchHandlerMom1.running + ~BatchHandlerMom1.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom1SF.rst b/_sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom1SF.rst new file mode 100644 index 000000000..eb92c6365 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom1SF.rst @@ -0,0 +1,58 @@ +sup3r.preprocessing.batch\_handlers.factory.BatchHandlerMom1SF +============================================================== + +.. currentmodule:: sup3r.preprocessing.batch_handlers.factory + +.. autoclass:: BatchHandlerMom1SF + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~BatchHandlerMom1SF.check_enhancement_factors + ~BatchHandlerMom1SF.check_features + ~BatchHandlerMom1SF.check_shared_attr + ~BatchHandlerMom1SF.enqueue_batch + ~BatchHandlerMom1SF.enqueue_batches + ~BatchHandlerMom1SF.get_batch + ~BatchHandlerMom1SF.get_container_index + ~BatchHandlerMom1SF.get_queue + ~BatchHandlerMom1SF.get_random_container + ~BatchHandlerMom1SF.init_samplers + ~BatchHandlerMom1SF.log_queue_info + ~BatchHandlerMom1SF.make_mask + ~BatchHandlerMom1SF.make_output + ~BatchHandlerMom1SF.post_init_log + ~BatchHandlerMom1SF.post_proc + ~BatchHandlerMom1SF.preflight + ~BatchHandlerMom1SF.sample_batch + ~BatchHandlerMom1SF.start + ~BatchHandlerMom1SF.stop + ~BatchHandlerMom1SF.transform + ~BatchHandlerMom1SF.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~BatchHandlerMom1SF.container_weights + ~BatchHandlerMom1SF.data + ~BatchHandlerMom1SF.features + ~BatchHandlerMom1SF.hr_shape + ~BatchHandlerMom1SF.lr_shape + ~BatchHandlerMom1SF.queue_shape + ~BatchHandlerMom1SF.queue_thread + ~BatchHandlerMom1SF.running + ~BatchHandlerMom1SF.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2.rst b/_sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2.rst new file mode 100644 index 000000000..36e831767 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2.rst @@ -0,0 +1,58 @@ +sup3r.preprocessing.batch\_handlers.factory.BatchHandlerMom2 +============================================================ + +.. currentmodule:: sup3r.preprocessing.batch_handlers.factory + +.. autoclass:: BatchHandlerMom2 + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~BatchHandlerMom2.check_enhancement_factors + ~BatchHandlerMom2.check_features + ~BatchHandlerMom2.check_shared_attr + ~BatchHandlerMom2.enqueue_batch + ~BatchHandlerMom2.enqueue_batches + ~BatchHandlerMom2.get_batch + ~BatchHandlerMom2.get_container_index + ~BatchHandlerMom2.get_queue + ~BatchHandlerMom2.get_random_container + ~BatchHandlerMom2.init_samplers + ~BatchHandlerMom2.log_queue_info + ~BatchHandlerMom2.make_mask + ~BatchHandlerMom2.make_output + ~BatchHandlerMom2.post_init_log + ~BatchHandlerMom2.post_proc + ~BatchHandlerMom2.preflight + ~BatchHandlerMom2.sample_batch + ~BatchHandlerMom2.start + ~BatchHandlerMom2.stop + ~BatchHandlerMom2.transform + ~BatchHandlerMom2.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~BatchHandlerMom2.container_weights + ~BatchHandlerMom2.data + ~BatchHandlerMom2.features + ~BatchHandlerMom2.hr_shape + ~BatchHandlerMom2.lr_shape + ~BatchHandlerMom2.queue_shape + ~BatchHandlerMom2.queue_thread + ~BatchHandlerMom2.running + ~BatchHandlerMom2.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2SF.rst b/_sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2SF.rst new file mode 100644 index 000000000..6809d7249 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2SF.rst @@ -0,0 +1,58 @@ +sup3r.preprocessing.batch\_handlers.factory.BatchHandlerMom2SF +============================================================== + +.. currentmodule:: sup3r.preprocessing.batch_handlers.factory + +.. autoclass:: BatchHandlerMom2SF + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~BatchHandlerMom2SF.check_enhancement_factors + ~BatchHandlerMom2SF.check_features + ~BatchHandlerMom2SF.check_shared_attr + ~BatchHandlerMom2SF.enqueue_batch + ~BatchHandlerMom2SF.enqueue_batches + ~BatchHandlerMom2SF.get_batch + ~BatchHandlerMom2SF.get_container_index + ~BatchHandlerMom2SF.get_queue + ~BatchHandlerMom2SF.get_random_container + ~BatchHandlerMom2SF.init_samplers + ~BatchHandlerMom2SF.log_queue_info + ~BatchHandlerMom2SF.make_mask + ~BatchHandlerMom2SF.make_output + ~BatchHandlerMom2SF.post_init_log + ~BatchHandlerMom2SF.post_proc + ~BatchHandlerMom2SF.preflight + ~BatchHandlerMom2SF.sample_batch + ~BatchHandlerMom2SF.start + ~BatchHandlerMom2SF.stop + ~BatchHandlerMom2SF.transform + ~BatchHandlerMom2SF.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~BatchHandlerMom2SF.container_weights + ~BatchHandlerMom2SF.data + ~BatchHandlerMom2SF.features + ~BatchHandlerMom2SF.hr_shape + ~BatchHandlerMom2SF.lr_shape + ~BatchHandlerMom2SF.queue_shape + ~BatchHandlerMom2SF.queue_thread + ~BatchHandlerMom2SF.running + ~BatchHandlerMom2SF.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2Sep.rst b/_sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2Sep.rst new file mode 100644 index 000000000..a4cbeb381 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2Sep.rst @@ -0,0 +1,58 @@ +sup3r.preprocessing.batch\_handlers.factory.BatchHandlerMom2Sep +=============================================================== + +.. currentmodule:: sup3r.preprocessing.batch_handlers.factory + +.. autoclass:: BatchHandlerMom2Sep + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~BatchHandlerMom2Sep.check_enhancement_factors + ~BatchHandlerMom2Sep.check_features + ~BatchHandlerMom2Sep.check_shared_attr + ~BatchHandlerMom2Sep.enqueue_batch + ~BatchHandlerMom2Sep.enqueue_batches + ~BatchHandlerMom2Sep.get_batch + ~BatchHandlerMom2Sep.get_container_index + ~BatchHandlerMom2Sep.get_queue + ~BatchHandlerMom2Sep.get_random_container + ~BatchHandlerMom2Sep.init_samplers + ~BatchHandlerMom2Sep.log_queue_info + ~BatchHandlerMom2Sep.make_mask + ~BatchHandlerMom2Sep.make_output + ~BatchHandlerMom2Sep.post_init_log + ~BatchHandlerMom2Sep.post_proc + ~BatchHandlerMom2Sep.preflight + ~BatchHandlerMom2Sep.sample_batch + ~BatchHandlerMom2Sep.start + ~BatchHandlerMom2Sep.stop + ~BatchHandlerMom2Sep.transform + ~BatchHandlerMom2Sep.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~BatchHandlerMom2Sep.container_weights + ~BatchHandlerMom2Sep.data + ~BatchHandlerMom2Sep.features + ~BatchHandlerMom2Sep.hr_shape + ~BatchHandlerMom2Sep.lr_shape + ~BatchHandlerMom2Sep.queue_shape + ~BatchHandlerMom2Sep.queue_thread + ~BatchHandlerMom2Sep.running + ~BatchHandlerMom2Sep.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2SepSF.rst b/_sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2SepSF.rst new file mode 100644 index 000000000..4c50ac9cd --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.BatchHandlerMom2SepSF.rst @@ -0,0 +1,58 @@ +sup3r.preprocessing.batch\_handlers.factory.BatchHandlerMom2SepSF +================================================================= + +.. currentmodule:: sup3r.preprocessing.batch_handlers.factory + +.. autoclass:: BatchHandlerMom2SepSF + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~BatchHandlerMom2SepSF.check_enhancement_factors + ~BatchHandlerMom2SepSF.check_features + ~BatchHandlerMom2SepSF.check_shared_attr + ~BatchHandlerMom2SepSF.enqueue_batch + ~BatchHandlerMom2SepSF.enqueue_batches + ~BatchHandlerMom2SepSF.get_batch + ~BatchHandlerMom2SepSF.get_container_index + ~BatchHandlerMom2SepSF.get_queue + ~BatchHandlerMom2SepSF.get_random_container + ~BatchHandlerMom2SepSF.init_samplers + ~BatchHandlerMom2SepSF.log_queue_info + ~BatchHandlerMom2SepSF.make_mask + ~BatchHandlerMom2SepSF.make_output + ~BatchHandlerMom2SepSF.post_init_log + ~BatchHandlerMom2SepSF.post_proc + ~BatchHandlerMom2SepSF.preflight + ~BatchHandlerMom2SepSF.sample_batch + ~BatchHandlerMom2SepSF.start + ~BatchHandlerMom2SepSF.stop + ~BatchHandlerMom2SepSF.transform + ~BatchHandlerMom2SepSF.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~BatchHandlerMom2SepSF.container_weights + ~BatchHandlerMom2SepSF.data + ~BatchHandlerMom2SepSF.features + ~BatchHandlerMom2SepSF.hr_shape + ~BatchHandlerMom2SepSF.lr_shape + ~BatchHandlerMom2SepSF.queue_shape + ~BatchHandlerMom2SepSF.queue_thread + ~BatchHandlerMom2SepSF.running + ~BatchHandlerMom2SepSF.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.DualBatchHandler.rst b/_sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.DualBatchHandler.rst new file mode 100644 index 000000000..a442f9819 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.DualBatchHandler.rst @@ -0,0 +1,56 @@ +sup3r.preprocessing.batch\_handlers.factory.DualBatchHandler +============================================================ + +.. currentmodule:: sup3r.preprocessing.batch_handlers.factory + +.. autoclass:: DualBatchHandler + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~DualBatchHandler.check_enhancement_factors + ~DualBatchHandler.check_features + ~DualBatchHandler.check_shared_attr + ~DualBatchHandler.enqueue_batch + ~DualBatchHandler.enqueue_batches + ~DualBatchHandler.get_batch + ~DualBatchHandler.get_container_index + ~DualBatchHandler.get_queue + ~DualBatchHandler.get_random_container + ~DualBatchHandler.init_samplers + ~DualBatchHandler.log_queue_info + ~DualBatchHandler.post_init_log + ~DualBatchHandler.post_proc + ~DualBatchHandler.preflight + ~DualBatchHandler.sample_batch + ~DualBatchHandler.start + ~DualBatchHandler.stop + ~DualBatchHandler.transform + ~DualBatchHandler.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~DualBatchHandler.container_weights + ~DualBatchHandler.data + ~DualBatchHandler.features + ~DualBatchHandler.hr_shape + ~DualBatchHandler.lr_shape + ~DualBatchHandler.queue_shape + ~DualBatchHandler.queue_thread + ~DualBatchHandler.running + ~DualBatchHandler.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.rst b/_sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.rst new file mode 100644 index 000000000..3ebc6d130 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_handlers.factory.rst @@ -0,0 +1,46 @@ +sup3r.preprocessing.batch\_handlers.factory +=========================================== + +.. automodule:: sup3r.preprocessing.batch_handlers.factory + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + + BatchHandlerFactory + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + BatchHandler + BatchHandlerCC + BatchHandlerMom1 + BatchHandlerMom1SF + BatchHandlerMom2 + BatchHandlerMom2SF + BatchHandlerMom2Sep + BatchHandlerMom2SepSF + DualBatchHandler + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_handlers.rst b/_sources/_autosummary/sup3r.preprocessing.batch_handlers.rst new file mode 100644 index 000000000..4acd12c3f --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_handlers.rst @@ -0,0 +1,31 @@ +sup3r.preprocessing.batch\_handlers +=================================== + +.. automodule:: sup3r.preprocessing.batch_handlers + + + + + + + + + + + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + dc + factory + diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_queues.abstract.AbstractBatchQueue.rst b/_sources/_autosummary/sup3r.preprocessing.batch_queues.abstract.AbstractBatchQueue.rst new file mode 100644 index 000000000..8d906fab0 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_queues.abstract.AbstractBatchQueue.rst @@ -0,0 +1,55 @@ +sup3r.preprocessing.batch\_queues.abstract.AbstractBatchQueue +============================================================= + +.. currentmodule:: sup3r.preprocessing.batch_queues.abstract + +.. autoclass:: AbstractBatchQueue + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~AbstractBatchQueue.check_enhancement_factors + ~AbstractBatchQueue.check_features + ~AbstractBatchQueue.check_shared_attr + ~AbstractBatchQueue.enqueue_batch + ~AbstractBatchQueue.enqueue_batches + ~AbstractBatchQueue.get_batch + ~AbstractBatchQueue.get_container_index + ~AbstractBatchQueue.get_queue + ~AbstractBatchQueue.get_random_container + ~AbstractBatchQueue.log_queue_info + ~AbstractBatchQueue.post_init_log + ~AbstractBatchQueue.post_proc + ~AbstractBatchQueue.preflight + ~AbstractBatchQueue.sample_batch + ~AbstractBatchQueue.start + ~AbstractBatchQueue.stop + ~AbstractBatchQueue.transform + ~AbstractBatchQueue.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~AbstractBatchQueue.container_weights + ~AbstractBatchQueue.data + ~AbstractBatchQueue.features + ~AbstractBatchQueue.hr_shape + ~AbstractBatchQueue.lr_shape + ~AbstractBatchQueue.queue_shape + ~AbstractBatchQueue.queue_thread + ~AbstractBatchQueue.running + ~AbstractBatchQueue.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_queues.abstract.rst b/_sources/_autosummary/sup3r.preprocessing.batch_queues.abstract.rst new file mode 100644 index 000000000..c1473e1a9 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_queues.abstract.rst @@ -0,0 +1,31 @@ +sup3r.preprocessing.batch\_queues.abstract +========================================== + +.. automodule:: sup3r.preprocessing.batch_queues.abstract + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + AbstractBatchQueue + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_queues.base.SingleBatchQueue.rst b/_sources/_autosummary/sup3r.preprocessing.batch_queues.base.SingleBatchQueue.rst new file mode 100644 index 000000000..6cb9ca92f --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_queues.base.SingleBatchQueue.rst @@ -0,0 +1,55 @@ +sup3r.preprocessing.batch\_queues.base.SingleBatchQueue +======================================================= + +.. currentmodule:: sup3r.preprocessing.batch_queues.base + +.. autoclass:: SingleBatchQueue + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SingleBatchQueue.check_enhancement_factors + ~SingleBatchQueue.check_features + ~SingleBatchQueue.check_shared_attr + ~SingleBatchQueue.enqueue_batch + ~SingleBatchQueue.enqueue_batches + ~SingleBatchQueue.get_batch + ~SingleBatchQueue.get_container_index + ~SingleBatchQueue.get_queue + ~SingleBatchQueue.get_random_container + ~SingleBatchQueue.log_queue_info + ~SingleBatchQueue.post_init_log + ~SingleBatchQueue.post_proc + ~SingleBatchQueue.preflight + ~SingleBatchQueue.sample_batch + ~SingleBatchQueue.start + ~SingleBatchQueue.stop + ~SingleBatchQueue.transform + ~SingleBatchQueue.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SingleBatchQueue.container_weights + ~SingleBatchQueue.data + ~SingleBatchQueue.features + ~SingleBatchQueue.hr_shape + ~SingleBatchQueue.lr_shape + ~SingleBatchQueue.queue_shape + ~SingleBatchQueue.queue_thread + ~SingleBatchQueue.running + ~SingleBatchQueue.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_queues.base.rst b/_sources/_autosummary/sup3r.preprocessing.batch_queues.base.rst new file mode 100644 index 000000000..6dd7a39ba --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_queues.base.rst @@ -0,0 +1,31 @@ +sup3r.preprocessing.batch\_queues.base +====================================== + +.. automodule:: sup3r.preprocessing.batch_queues.base + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + SingleBatchQueue + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_queues.conditional.ConditionalBatchQueue.rst b/_sources/_autosummary/sup3r.preprocessing.batch_queues.conditional.ConditionalBatchQueue.rst new file mode 100644 index 000000000..cca3a7b77 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_queues.conditional.ConditionalBatchQueue.rst @@ -0,0 +1,57 @@ +sup3r.preprocessing.batch\_queues.conditional.ConditionalBatchQueue +=================================================================== + +.. currentmodule:: sup3r.preprocessing.batch_queues.conditional + +.. autoclass:: ConditionalBatchQueue + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~ConditionalBatchQueue.check_enhancement_factors + ~ConditionalBatchQueue.check_features + ~ConditionalBatchQueue.check_shared_attr + ~ConditionalBatchQueue.enqueue_batch + ~ConditionalBatchQueue.enqueue_batches + ~ConditionalBatchQueue.get_batch + ~ConditionalBatchQueue.get_container_index + ~ConditionalBatchQueue.get_queue + ~ConditionalBatchQueue.get_random_container + ~ConditionalBatchQueue.log_queue_info + ~ConditionalBatchQueue.make_mask + ~ConditionalBatchQueue.make_output + ~ConditionalBatchQueue.post_init_log + ~ConditionalBatchQueue.post_proc + ~ConditionalBatchQueue.preflight + ~ConditionalBatchQueue.sample_batch + ~ConditionalBatchQueue.start + ~ConditionalBatchQueue.stop + ~ConditionalBatchQueue.transform + ~ConditionalBatchQueue.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~ConditionalBatchQueue.container_weights + ~ConditionalBatchQueue.data + ~ConditionalBatchQueue.features + ~ConditionalBatchQueue.hr_shape + ~ConditionalBatchQueue.lr_shape + ~ConditionalBatchQueue.queue_shape + ~ConditionalBatchQueue.queue_thread + ~ConditionalBatchQueue.running + ~ConditionalBatchQueue.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom1.rst b/_sources/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom1.rst new file mode 100644 index 000000000..d6f79beb8 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom1.rst @@ -0,0 +1,57 @@ +sup3r.preprocessing.batch\_queues.conditional.QueueMom1 +======================================================= + +.. currentmodule:: sup3r.preprocessing.batch_queues.conditional + +.. autoclass:: QueueMom1 + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~QueueMom1.check_enhancement_factors + ~QueueMom1.check_features + ~QueueMom1.check_shared_attr + ~QueueMom1.enqueue_batch + ~QueueMom1.enqueue_batches + ~QueueMom1.get_batch + ~QueueMom1.get_container_index + ~QueueMom1.get_queue + ~QueueMom1.get_random_container + ~QueueMom1.log_queue_info + ~QueueMom1.make_mask + ~QueueMom1.make_output + ~QueueMom1.post_init_log + ~QueueMom1.post_proc + ~QueueMom1.preflight + ~QueueMom1.sample_batch + ~QueueMom1.start + ~QueueMom1.stop + ~QueueMom1.transform + ~QueueMom1.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~QueueMom1.container_weights + ~QueueMom1.data + ~QueueMom1.features + ~QueueMom1.hr_shape + ~QueueMom1.lr_shape + ~QueueMom1.queue_shape + ~QueueMom1.queue_thread + ~QueueMom1.running + ~QueueMom1.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom1SF.rst b/_sources/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom1SF.rst new file mode 100644 index 000000000..2422bf900 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom1SF.rst @@ -0,0 +1,57 @@ +sup3r.preprocessing.batch\_queues.conditional.QueueMom1SF +========================================================= + +.. currentmodule:: sup3r.preprocessing.batch_queues.conditional + +.. autoclass:: QueueMom1SF + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~QueueMom1SF.check_enhancement_factors + ~QueueMom1SF.check_features + ~QueueMom1SF.check_shared_attr + ~QueueMom1SF.enqueue_batch + ~QueueMom1SF.enqueue_batches + ~QueueMom1SF.get_batch + ~QueueMom1SF.get_container_index + ~QueueMom1SF.get_queue + ~QueueMom1SF.get_random_container + ~QueueMom1SF.log_queue_info + ~QueueMom1SF.make_mask + ~QueueMom1SF.make_output + ~QueueMom1SF.post_init_log + ~QueueMom1SF.post_proc + ~QueueMom1SF.preflight + ~QueueMom1SF.sample_batch + ~QueueMom1SF.start + ~QueueMom1SF.stop + ~QueueMom1SF.transform + ~QueueMom1SF.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~QueueMom1SF.container_weights + ~QueueMom1SF.data + ~QueueMom1SF.features + ~QueueMom1SF.hr_shape + ~QueueMom1SF.lr_shape + ~QueueMom1SF.queue_shape + ~QueueMom1SF.queue_thread + ~QueueMom1SF.running + ~QueueMom1SF.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom2.rst b/_sources/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom2.rst new file mode 100644 index 000000000..32931da05 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom2.rst @@ -0,0 +1,57 @@ +sup3r.preprocessing.batch\_queues.conditional.QueueMom2 +======================================================= + +.. currentmodule:: sup3r.preprocessing.batch_queues.conditional + +.. autoclass:: QueueMom2 + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~QueueMom2.check_enhancement_factors + ~QueueMom2.check_features + ~QueueMom2.check_shared_attr + ~QueueMom2.enqueue_batch + ~QueueMom2.enqueue_batches + ~QueueMom2.get_batch + ~QueueMom2.get_container_index + ~QueueMom2.get_queue + ~QueueMom2.get_random_container + ~QueueMom2.log_queue_info + ~QueueMom2.make_mask + ~QueueMom2.make_output + ~QueueMom2.post_init_log + ~QueueMom2.post_proc + ~QueueMom2.preflight + ~QueueMom2.sample_batch + ~QueueMom2.start + ~QueueMom2.stop + ~QueueMom2.transform + ~QueueMom2.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~QueueMom2.container_weights + ~QueueMom2.data + ~QueueMom2.features + ~QueueMom2.hr_shape + ~QueueMom2.lr_shape + ~QueueMom2.queue_shape + ~QueueMom2.queue_thread + ~QueueMom2.running + ~QueueMom2.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom2SF.rst b/_sources/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom2SF.rst new file mode 100644 index 000000000..883e456df --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom2SF.rst @@ -0,0 +1,57 @@ +sup3r.preprocessing.batch\_queues.conditional.QueueMom2SF +========================================================= + +.. currentmodule:: sup3r.preprocessing.batch_queues.conditional + +.. autoclass:: QueueMom2SF + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~QueueMom2SF.check_enhancement_factors + ~QueueMom2SF.check_features + ~QueueMom2SF.check_shared_attr + ~QueueMom2SF.enqueue_batch + ~QueueMom2SF.enqueue_batches + ~QueueMom2SF.get_batch + ~QueueMom2SF.get_container_index + ~QueueMom2SF.get_queue + ~QueueMom2SF.get_random_container + ~QueueMom2SF.log_queue_info + ~QueueMom2SF.make_mask + ~QueueMom2SF.make_output + ~QueueMom2SF.post_init_log + ~QueueMom2SF.post_proc + ~QueueMom2SF.preflight + ~QueueMom2SF.sample_batch + ~QueueMom2SF.start + ~QueueMom2SF.stop + ~QueueMom2SF.transform + ~QueueMom2SF.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~QueueMom2SF.container_weights + ~QueueMom2SF.data + ~QueueMom2SF.features + ~QueueMom2SF.hr_shape + ~QueueMom2SF.lr_shape + ~QueueMom2SF.queue_shape + ~QueueMom2SF.queue_thread + ~QueueMom2SF.running + ~QueueMom2SF.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom2Sep.rst b/_sources/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom2Sep.rst new file mode 100644 index 000000000..425894977 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom2Sep.rst @@ -0,0 +1,57 @@ +sup3r.preprocessing.batch\_queues.conditional.QueueMom2Sep +========================================================== + +.. currentmodule:: sup3r.preprocessing.batch_queues.conditional + +.. autoclass:: QueueMom2Sep + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~QueueMom2Sep.check_enhancement_factors + ~QueueMom2Sep.check_features + ~QueueMom2Sep.check_shared_attr + ~QueueMom2Sep.enqueue_batch + ~QueueMom2Sep.enqueue_batches + ~QueueMom2Sep.get_batch + ~QueueMom2Sep.get_container_index + ~QueueMom2Sep.get_queue + ~QueueMom2Sep.get_random_container + ~QueueMom2Sep.log_queue_info + ~QueueMom2Sep.make_mask + ~QueueMom2Sep.make_output + ~QueueMom2Sep.post_init_log + ~QueueMom2Sep.post_proc + ~QueueMom2Sep.preflight + ~QueueMom2Sep.sample_batch + ~QueueMom2Sep.start + ~QueueMom2Sep.stop + ~QueueMom2Sep.transform + ~QueueMom2Sep.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~QueueMom2Sep.container_weights + ~QueueMom2Sep.data + ~QueueMom2Sep.features + ~QueueMom2Sep.hr_shape + ~QueueMom2Sep.lr_shape + ~QueueMom2Sep.queue_shape + ~QueueMom2Sep.queue_thread + ~QueueMom2Sep.running + ~QueueMom2Sep.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom2SepSF.rst b/_sources/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom2SepSF.rst new file mode 100644 index 000000000..abf4bf85a --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_queues.conditional.QueueMom2SepSF.rst @@ -0,0 +1,57 @@ +sup3r.preprocessing.batch\_queues.conditional.QueueMom2SepSF +============================================================ + +.. currentmodule:: sup3r.preprocessing.batch_queues.conditional + +.. autoclass:: QueueMom2SepSF + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~QueueMom2SepSF.check_enhancement_factors + ~QueueMom2SepSF.check_features + ~QueueMom2SepSF.check_shared_attr + ~QueueMom2SepSF.enqueue_batch + ~QueueMom2SepSF.enqueue_batches + ~QueueMom2SepSF.get_batch + ~QueueMom2SepSF.get_container_index + ~QueueMom2SepSF.get_queue + ~QueueMom2SepSF.get_random_container + ~QueueMom2SepSF.log_queue_info + ~QueueMom2SepSF.make_mask + ~QueueMom2SepSF.make_output + ~QueueMom2SepSF.post_init_log + ~QueueMom2SepSF.post_proc + ~QueueMom2SepSF.preflight + ~QueueMom2SepSF.sample_batch + ~QueueMom2SepSF.start + ~QueueMom2SepSF.stop + ~QueueMom2SepSF.transform + ~QueueMom2SepSF.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~QueueMom2SepSF.container_weights + ~QueueMom2SepSF.data + ~QueueMom2SepSF.features + ~QueueMom2SepSF.hr_shape + ~QueueMom2SepSF.lr_shape + ~QueueMom2SepSF.queue_shape + ~QueueMom2SepSF.queue_thread + ~QueueMom2SepSF.running + ~QueueMom2SepSF.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_queues.conditional.rst b/_sources/_autosummary/sup3r.preprocessing.batch_queues.conditional.rst new file mode 100644 index 000000000..1ed84208e --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_queues.conditional.rst @@ -0,0 +1,37 @@ +sup3r.preprocessing.batch\_queues.conditional +============================================= + +.. automodule:: sup3r.preprocessing.batch_queues.conditional + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + ConditionalBatchQueue + QueueMom1 + QueueMom1SF + QueueMom2 + QueueMom2SF + QueueMom2Sep + QueueMom2SepSF + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_queues.dc.BatchQueueDC.rst b/_sources/_autosummary/sup3r.preprocessing.batch_queues.dc.BatchQueueDC.rst new file mode 100644 index 000000000..838d5abfa --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_queues.dc.BatchQueueDC.rst @@ -0,0 +1,58 @@ +sup3r.preprocessing.batch\_queues.dc.BatchQueueDC +================================================= + +.. currentmodule:: sup3r.preprocessing.batch_queues.dc + +.. autoclass:: BatchQueueDC + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~BatchQueueDC.check_enhancement_factors + ~BatchQueueDC.check_features + ~BatchQueueDC.check_shared_attr + ~BatchQueueDC.enqueue_batch + ~BatchQueueDC.enqueue_batches + ~BatchQueueDC.get_batch + ~BatchQueueDC.get_container_index + ~BatchQueueDC.get_queue + ~BatchQueueDC.get_random_container + ~BatchQueueDC.log_queue_info + ~BatchQueueDC.post_init_log + ~BatchQueueDC.post_proc + ~BatchQueueDC.preflight + ~BatchQueueDC.sample_batch + ~BatchQueueDC.start + ~BatchQueueDC.stop + ~BatchQueueDC.transform + ~BatchQueueDC.update_weights + ~BatchQueueDC.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~BatchQueueDC.container_weights + ~BatchQueueDC.data + ~BatchQueueDC.features + ~BatchQueueDC.hr_shape + ~BatchQueueDC.lr_shape + ~BatchQueueDC.queue_shape + ~BatchQueueDC.queue_thread + ~BatchQueueDC.running + ~BatchQueueDC.shape + ~BatchQueueDC.spatial_weights + ~BatchQueueDC.temporal_weights + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_queues.dc.ValBatchQueueDC.rst b/_sources/_autosummary/sup3r.preprocessing.batch_queues.dc.ValBatchQueueDC.rst new file mode 100644 index 000000000..14984af94 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_queues.dc.ValBatchQueueDC.rst @@ -0,0 +1,58 @@ +sup3r.preprocessing.batch\_queues.dc.ValBatchQueueDC +==================================================== + +.. currentmodule:: sup3r.preprocessing.batch_queues.dc + +.. autoclass:: ValBatchQueueDC + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~ValBatchQueueDC.check_enhancement_factors + ~ValBatchQueueDC.check_features + ~ValBatchQueueDC.check_shared_attr + ~ValBatchQueueDC.enqueue_batch + ~ValBatchQueueDC.enqueue_batches + ~ValBatchQueueDC.get_batch + ~ValBatchQueueDC.get_container_index + ~ValBatchQueueDC.get_queue + ~ValBatchQueueDC.get_random_container + ~ValBatchQueueDC.log_queue_info + ~ValBatchQueueDC.post_init_log + ~ValBatchQueueDC.post_proc + ~ValBatchQueueDC.preflight + ~ValBatchQueueDC.sample_batch + ~ValBatchQueueDC.start + ~ValBatchQueueDC.stop + ~ValBatchQueueDC.transform + ~ValBatchQueueDC.update_weights + ~ValBatchQueueDC.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~ValBatchQueueDC.container_weights + ~ValBatchQueueDC.data + ~ValBatchQueueDC.features + ~ValBatchQueueDC.hr_shape + ~ValBatchQueueDC.lr_shape + ~ValBatchQueueDC.queue_shape + ~ValBatchQueueDC.queue_thread + ~ValBatchQueueDC.running + ~ValBatchQueueDC.shape + ~ValBatchQueueDC.spatial_weights + ~ValBatchQueueDC.temporal_weights + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_queues.dc.rst b/_sources/_autosummary/sup3r.preprocessing.batch_queues.dc.rst new file mode 100644 index 000000000..9ade0a2af --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_queues.dc.rst @@ -0,0 +1,32 @@ +sup3r.preprocessing.batch\_queues.dc +==================================== + +.. automodule:: sup3r.preprocessing.batch_queues.dc + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + BatchQueueDC + ValBatchQueueDC + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_queues.dual.DualBatchQueue.rst b/_sources/_autosummary/sup3r.preprocessing.batch_queues.dual.DualBatchQueue.rst new file mode 100644 index 000000000..950f31b79 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_queues.dual.DualBatchQueue.rst @@ -0,0 +1,55 @@ +sup3r.preprocessing.batch\_queues.dual.DualBatchQueue +===================================================== + +.. currentmodule:: sup3r.preprocessing.batch_queues.dual + +.. autoclass:: DualBatchQueue + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~DualBatchQueue.check_enhancement_factors + ~DualBatchQueue.check_features + ~DualBatchQueue.check_shared_attr + ~DualBatchQueue.enqueue_batch + ~DualBatchQueue.enqueue_batches + ~DualBatchQueue.get_batch + ~DualBatchQueue.get_container_index + ~DualBatchQueue.get_queue + ~DualBatchQueue.get_random_container + ~DualBatchQueue.log_queue_info + ~DualBatchQueue.post_init_log + ~DualBatchQueue.post_proc + ~DualBatchQueue.preflight + ~DualBatchQueue.sample_batch + ~DualBatchQueue.start + ~DualBatchQueue.stop + ~DualBatchQueue.transform + ~DualBatchQueue.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~DualBatchQueue.container_weights + ~DualBatchQueue.data + ~DualBatchQueue.features + ~DualBatchQueue.hr_shape + ~DualBatchQueue.lr_shape + ~DualBatchQueue.queue_shape + ~DualBatchQueue.queue_thread + ~DualBatchQueue.running + ~DualBatchQueue.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_queues.dual.rst b/_sources/_autosummary/sup3r.preprocessing.batch_queues.dual.rst new file mode 100644 index 000000000..92cf76197 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_queues.dual.rst @@ -0,0 +1,31 @@ +sup3r.preprocessing.batch\_queues.dual +====================================== + +.. automodule:: sup3r.preprocessing.batch_queues.dual + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + DualBatchQueue + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_queues.rst b/_sources/_autosummary/sup3r.preprocessing.batch_queues.rst new file mode 100644 index 000000000..550b3e6e9 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_queues.rst @@ -0,0 +1,35 @@ +sup3r.preprocessing.batch\_queues +================================= + +.. automodule:: sup3r.preprocessing.batch_queues + + + + + + + + + + + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + abstract + base + conditional + dc + dual + utilities + diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_queues.utilities.rst b/_sources/_autosummary/sup3r.preprocessing.batch_queues.utilities.rst new file mode 100644 index 000000000..46590bf26 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_queues.utilities.rst @@ -0,0 +1,32 @@ +sup3r.preprocessing.batch\_queues.utilities +=========================================== + +.. automodule:: sup3r.preprocessing.batch_queues.utilities + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + + smooth_data + spatial_simple_enhancing + temporal_simple_enhancing + + + + + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_queues.utilities.smooth_data.rst b/_sources/_autosummary/sup3r.preprocessing.batch_queues.utilities.smooth_data.rst new file mode 100644 index 000000000..91c1afb6c --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_queues.utilities.smooth_data.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.batch\_queues.utilities.smooth\_data +======================================================== + +.. currentmodule:: sup3r.preprocessing.batch_queues.utilities + +.. autofunction:: smooth_data \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_queues.utilities.spatial_simple_enhancing.rst b/_sources/_autosummary/sup3r.preprocessing.batch_queues.utilities.spatial_simple_enhancing.rst new file mode 100644 index 000000000..8a4849775 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_queues.utilities.spatial_simple_enhancing.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.batch\_queues.utilities.spatial\_simple\_enhancing +====================================================================== + +.. currentmodule:: sup3r.preprocessing.batch_queues.utilities + +.. autofunction:: spatial_simple_enhancing \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.batch_queues.utilities.temporal_simple_enhancing.rst b/_sources/_autosummary/sup3r.preprocessing.batch_queues.utilities.temporal_simple_enhancing.rst new file mode 100644 index 000000000..7288ae19f --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.batch_queues.utilities.temporal_simple_enhancing.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.batch\_queues.utilities.temporal\_simple\_enhancing +======================================================================= + +.. currentmodule:: sup3r.preprocessing.batch_queues.utilities + +.. autofunction:: temporal_simple_enhancing \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.cachers.base.Cacher.rst b/_sources/_autosummary/sup3r.preprocessing.cachers.base.Cacher.rst new file mode 100644 index 000000000..e18a8e010 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.cachers.base.Cacher.rst @@ -0,0 +1,41 @@ +sup3r.preprocessing.cachers.base.Cacher +======================================= + +.. currentmodule:: sup3r.preprocessing.cachers.base + +.. autoclass:: Cacher + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~Cacher.add_coord_meta + ~Cacher.cache_data + ~Cacher.get_chunk_slices + ~Cacher.get_chunksizes + ~Cacher.parse_chunks + ~Cacher.post_init_log + ~Cacher.wrap + ~Cacher.write_chunk + ~Cacher.write_h5 + ~Cacher.write_netcdf + ~Cacher.write_netcdf_chunks + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Cacher.data + ~Cacher.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.cachers.base.rst b/_sources/_autosummary/sup3r.preprocessing.cachers.base.rst new file mode 100644 index 000000000..eb291a36a --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.cachers.base.rst @@ -0,0 +1,31 @@ +sup3r.preprocessing.cachers.base +================================ + +.. automodule:: sup3r.preprocessing.cachers.base + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + Cacher + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.cachers.rst b/_sources/_autosummary/sup3r.preprocessing.cachers.rst new file mode 100644 index 000000000..f04f74a8a --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.cachers.rst @@ -0,0 +1,31 @@ +sup3r.preprocessing.cachers +=========================== + +.. automodule:: sup3r.preprocessing.cachers + + + + + + + + + + + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + base + utilities + diff --git a/_sources/_autosummary/sup3r.preprocessing.cachers.utilities.rst b/_sources/_autosummary/sup3r.preprocessing.cachers.utilities.rst new file mode 100644 index 000000000..0ff78f34e --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.cachers.utilities.rst @@ -0,0 +1,23 @@ +sup3r.preprocessing.cachers.utilities +===================================== + +.. automodule:: sup3r.preprocessing.cachers.utilities + + + + + + + + + + + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.collections.base.Collection.rst b/_sources/_autosummary/sup3r.preprocessing.collections.base.Collection.rst new file mode 100644 index 000000000..b2323ce6b --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.collections.base.Collection.rst @@ -0,0 +1,35 @@ +sup3r.preprocessing.collections.base.Collection +=============================================== + +.. currentmodule:: sup3r.preprocessing.collections.base + +.. autoclass:: Collection + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~Collection.check_shared_attr + ~Collection.post_init_log + ~Collection.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Collection.container_weights + ~Collection.data + ~Collection.features + ~Collection.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.collections.base.rst b/_sources/_autosummary/sup3r.preprocessing.collections.base.rst new file mode 100644 index 000000000..34a2703fb --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.collections.base.rst @@ -0,0 +1,31 @@ +sup3r.preprocessing.collections.base +==================================== + +.. automodule:: sup3r.preprocessing.collections.base + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + Collection + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.collections.rst b/_sources/_autosummary/sup3r.preprocessing.collections.rst new file mode 100644 index 000000000..c523b1980 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.collections.rst @@ -0,0 +1,31 @@ +sup3r.preprocessing.collections +=============================== + +.. automodule:: sup3r.preprocessing.collections + + + + + + + + + + + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + base + stats + diff --git a/_sources/_autosummary/sup3r.preprocessing.collections.stats.StatsCollection.rst b/_sources/_autosummary/sup3r.preprocessing.collections.stats.StatsCollection.rst new file mode 100644 index 000000000..303019e1f --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.collections.stats.StatsCollection.rst @@ -0,0 +1,39 @@ +sup3r.preprocessing.collections.stats.StatsCollection +===================================================== + +.. currentmodule:: sup3r.preprocessing.collections.stats + +.. autoclass:: StatsCollection + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~StatsCollection.check_shared_attr + ~StatsCollection.get_means + ~StatsCollection.get_stds + ~StatsCollection.normalize + ~StatsCollection.post_init_log + ~StatsCollection.save_stats + ~StatsCollection.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~StatsCollection.container_weights + ~StatsCollection.data + ~StatsCollection.features + ~StatsCollection.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.collections.stats.rst b/_sources/_autosummary/sup3r.preprocessing.collections.stats.rst new file mode 100644 index 000000000..05744709a --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.collections.stats.rst @@ -0,0 +1,31 @@ +sup3r.preprocessing.collections.stats +===================================== + +.. automodule:: sup3r.preprocessing.collections.stats + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + StatsCollection + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.data_handlers.exo.ExoData.rst b/_sources/_autosummary/sup3r.preprocessing.data_handlers.exo.ExoData.rst new file mode 100644 index 000000000..34faf07e5 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.data_handlers.exo.ExoData.rst @@ -0,0 +1,38 @@ +sup3r.preprocessing.data\_handlers.exo.ExoData +============================================== + +.. currentmodule:: sup3r.preprocessing.data_handlers.exo + +.. autoclass:: ExoData + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~ExoData.clear + ~ExoData.copy + ~ExoData.fromkeys + ~ExoData.get + ~ExoData.get_chunk + ~ExoData.get_combine_type_data + ~ExoData.get_model_step_exo + ~ExoData.items + ~ExoData.keys + ~ExoData.pop + ~ExoData.popitem + ~ExoData.setdefault + ~ExoData.split + ~ExoData.update + ~ExoData.values + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.data_handlers.exo.ExoDataHandler.rst b/_sources/_autosummary/sup3r.preprocessing.data_handlers.exo.ExoDataHandler.rst new file mode 100644 index 000000000..7795e8d3f --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.data_handlers.exo.ExoDataHandler.rst @@ -0,0 +1,43 @@ +sup3r.preprocessing.data\_handlers.exo.ExoDataHandler +===================================================== + +.. currentmodule:: sup3r.preprocessing.data_handlers.exo + +.. autoclass:: ExoDataHandler + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~ExoDataHandler.get_all_step_data + ~ExoDataHandler.get_exo_rasterizer + ~ExoDataHandler.get_exo_steps + ~ExoDataHandler.get_single_step_data + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~ExoDataHandler.cache_dir + ~ExoDataHandler.cache_files + ~ExoDataHandler.chunks + ~ExoDataHandler.distance_upper_bound + ~ExoDataHandler.input_handler_kwargs + ~ExoDataHandler.input_handler_name + ~ExoDataHandler.model + ~ExoDataHandler.source_file + ~ExoDataHandler.steps + ~ExoDataHandler.file_paths + ~ExoDataHandler.feature + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.data_handlers.exo.SingleExoDataStep.rst b/_sources/_autosummary/sup3r.preprocessing.data_handlers.exo.SingleExoDataStep.rst new file mode 100644 index 000000000..285338895 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.data_handlers.exo.SingleExoDataStep.rst @@ -0,0 +1,40 @@ +sup3r.preprocessing.data\_handlers.exo.SingleExoDataStep +======================================================== + +.. currentmodule:: sup3r.preprocessing.data_handlers.exo + +.. autoclass:: SingleExoDataStep + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SingleExoDataStep.clear + ~SingleExoDataStep.copy + ~SingleExoDataStep.fromkeys + ~SingleExoDataStep.get + ~SingleExoDataStep.items + ~SingleExoDataStep.keys + ~SingleExoDataStep.pop + ~SingleExoDataStep.popitem + ~SingleExoDataStep.setdefault + ~SingleExoDataStep.update + ~SingleExoDataStep.values + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SingleExoDataStep.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.data_handlers.exo.rst b/_sources/_autosummary/sup3r.preprocessing.data_handlers.exo.rst new file mode 100644 index 000000000..433505754 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.data_handlers.exo.rst @@ -0,0 +1,33 @@ +sup3r.preprocessing.data\_handlers.exo +====================================== + +.. automodule:: sup3r.preprocessing.data_handlers.exo + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + ExoData + ExoDataHandler + SingleExoDataStep + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.data_handlers.factory.DailyDataHandler.rst b/_sources/_autosummary/sup3r.preprocessing.data_handlers.factory.DailyDataHandler.rst new file mode 100644 index 000000000..fbdb660f6 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.data_handlers.factory.DailyDataHandler.rst @@ -0,0 +1,42 @@ +sup3r.preprocessing.data\_handlers.factory.DailyDataHandler +=========================================================== + +.. currentmodule:: sup3r.preprocessing.data_handlers.factory + +.. autoclass:: DailyDataHandler + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~DailyDataHandler.check_registry + ~DailyDataHandler.derive + ~DailyDataHandler.do_level_interpolation + ~DailyDataHandler.get_inputs + ~DailyDataHandler.get_multi_level_data + ~DailyDataHandler.get_single_level_data + ~DailyDataHandler.has_interp_variables + ~DailyDataHandler.map_new_name + ~DailyDataHandler.no_overlap + ~DailyDataHandler.post_init_log + ~DailyDataHandler.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~DailyDataHandler.FEATURE_REGISTRY + ~DailyDataHandler.data + ~DailyDataHandler.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.data_handlers.factory.DataHandler.rst b/_sources/_autosummary/sup3r.preprocessing.data_handlers.factory.DataHandler.rst new file mode 100644 index 000000000..4fa7e8ca8 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.data_handlers.factory.DataHandler.rst @@ -0,0 +1,42 @@ +sup3r.preprocessing.data\_handlers.factory.DataHandler +====================================================== + +.. currentmodule:: sup3r.preprocessing.data_handlers.factory + +.. autoclass:: DataHandler + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~DataHandler.check_registry + ~DataHandler.derive + ~DataHandler.do_level_interpolation + ~DataHandler.get_inputs + ~DataHandler.get_multi_level_data + ~DataHandler.get_single_level_data + ~DataHandler.has_interp_variables + ~DataHandler.map_new_name + ~DataHandler.no_overlap + ~DataHandler.post_init_log + ~DataHandler.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~DataHandler.FEATURE_REGISTRY + ~DataHandler.data + ~DataHandler.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.data_handlers.factory.DataHandlerFactory.rst b/_sources/_autosummary/sup3r.preprocessing.data_handlers.factory.DataHandlerFactory.rst new file mode 100644 index 000000000..b8cd282a9 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.data_handlers.factory.DataHandlerFactory.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.data\_handlers.factory.DataHandlerFactory +============================================================= + +.. currentmodule:: sup3r.preprocessing.data_handlers.factory + +.. autofunction:: DataHandlerFactory \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.data_handlers.factory.DataHandlerH5SolarCC.rst b/_sources/_autosummary/sup3r.preprocessing.data_handlers.factory.DataHandlerH5SolarCC.rst new file mode 100644 index 000000000..3bac9ba36 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.data_handlers.factory.DataHandlerH5SolarCC.rst @@ -0,0 +1,42 @@ +sup3r.preprocessing.data\_handlers.factory.DataHandlerH5SolarCC +=============================================================== + +.. currentmodule:: sup3r.preprocessing.data_handlers.factory + +.. autoclass:: DataHandlerH5SolarCC + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~DataHandlerH5SolarCC.check_registry + ~DataHandlerH5SolarCC.derive + ~DataHandlerH5SolarCC.do_level_interpolation + ~DataHandlerH5SolarCC.get_inputs + ~DataHandlerH5SolarCC.get_multi_level_data + ~DataHandlerH5SolarCC.get_single_level_data + ~DataHandlerH5SolarCC.has_interp_variables + ~DataHandlerH5SolarCC.map_new_name + ~DataHandlerH5SolarCC.no_overlap + ~DataHandlerH5SolarCC.post_init_log + ~DataHandlerH5SolarCC.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~DataHandlerH5SolarCC.FEATURE_REGISTRY + ~DataHandlerH5SolarCC.data + ~DataHandlerH5SolarCC.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.data_handlers.factory.DataHandlerH5WindCC.rst b/_sources/_autosummary/sup3r.preprocessing.data_handlers.factory.DataHandlerH5WindCC.rst new file mode 100644 index 000000000..f6a908c6e --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.data_handlers.factory.DataHandlerH5WindCC.rst @@ -0,0 +1,42 @@ +sup3r.preprocessing.data\_handlers.factory.DataHandlerH5WindCC +============================================================== + +.. currentmodule:: sup3r.preprocessing.data_handlers.factory + +.. autoclass:: DataHandlerH5WindCC + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~DataHandlerH5WindCC.check_registry + ~DataHandlerH5WindCC.derive + ~DataHandlerH5WindCC.do_level_interpolation + ~DataHandlerH5WindCC.get_inputs + ~DataHandlerH5WindCC.get_multi_level_data + ~DataHandlerH5WindCC.get_single_level_data + ~DataHandlerH5WindCC.has_interp_variables + ~DataHandlerH5WindCC.map_new_name + ~DataHandlerH5WindCC.no_overlap + ~DataHandlerH5WindCC.post_init_log + ~DataHandlerH5WindCC.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~DataHandlerH5WindCC.FEATURE_REGISTRY + ~DataHandlerH5WindCC.data + ~DataHandlerH5WindCC.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.data_handlers.factory.rst b/_sources/_autosummary/sup3r.preprocessing.data_handlers.factory.rst new file mode 100644 index 000000000..c45985d42 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.data_handlers.factory.rst @@ -0,0 +1,41 @@ +sup3r.preprocessing.data\_handlers.factory +========================================== + +.. automodule:: sup3r.preprocessing.data_handlers.factory + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + + DataHandlerFactory + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + DailyDataHandler + DataHandler + DataHandlerH5SolarCC + DataHandlerH5WindCC + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.data_handlers.nc_cc.DataHandlerNCforCC.rst b/_sources/_autosummary/sup3r.preprocessing.data_handlers.nc_cc.DataHandlerNCforCC.rst new file mode 100644 index 000000000..a0fd26e74 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.data_handlers.nc_cc.DataHandlerNCforCC.rst @@ -0,0 +1,47 @@ +sup3r.preprocessing.data\_handlers.nc\_cc.DataHandlerNCforCC +============================================================ + +.. currentmodule:: sup3r.preprocessing.data_handlers.nc_cc + +.. autoclass:: DataHandlerNCforCC + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~DataHandlerNCforCC.check_registry + ~DataHandlerNCforCC.derive + ~DataHandlerNCforCC.do_level_interpolation + ~DataHandlerNCforCC.get_clearsky_ghi + ~DataHandlerNCforCC.get_inputs + ~DataHandlerNCforCC.get_multi_level_data + ~DataHandlerNCforCC.get_single_level_data + ~DataHandlerNCforCC.get_time_slice + ~DataHandlerNCforCC.has_interp_variables + ~DataHandlerNCforCC.map_new_name + ~DataHandlerNCforCC.no_overlap + ~DataHandlerNCforCC.post_init_log + ~DataHandlerNCforCC.run_input_checks + ~DataHandlerNCforCC.run_wrap_checks + ~DataHandlerNCforCC.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~DataHandlerNCforCC.BASE_LOADER + ~DataHandlerNCforCC.FEATURE_REGISTRY + ~DataHandlerNCforCC.data + ~DataHandlerNCforCC.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.data_handlers.nc_cc.DataHandlerNCforCCwithPowerLaw.rst b/_sources/_autosummary/sup3r.preprocessing.data_handlers.nc_cc.DataHandlerNCforCCwithPowerLaw.rst new file mode 100644 index 000000000..3df26cf13 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.data_handlers.nc_cc.DataHandlerNCforCCwithPowerLaw.rst @@ -0,0 +1,47 @@ +sup3r.preprocessing.data\_handlers.nc\_cc.DataHandlerNCforCCwithPowerLaw +======================================================================== + +.. currentmodule:: sup3r.preprocessing.data_handlers.nc_cc + +.. autoclass:: DataHandlerNCforCCwithPowerLaw + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~DataHandlerNCforCCwithPowerLaw.check_registry + ~DataHandlerNCforCCwithPowerLaw.derive + ~DataHandlerNCforCCwithPowerLaw.do_level_interpolation + ~DataHandlerNCforCCwithPowerLaw.get_clearsky_ghi + ~DataHandlerNCforCCwithPowerLaw.get_inputs + ~DataHandlerNCforCCwithPowerLaw.get_multi_level_data + ~DataHandlerNCforCCwithPowerLaw.get_single_level_data + ~DataHandlerNCforCCwithPowerLaw.get_time_slice + ~DataHandlerNCforCCwithPowerLaw.has_interp_variables + ~DataHandlerNCforCCwithPowerLaw.map_new_name + ~DataHandlerNCforCCwithPowerLaw.no_overlap + ~DataHandlerNCforCCwithPowerLaw.post_init_log + ~DataHandlerNCforCCwithPowerLaw.run_input_checks + ~DataHandlerNCforCCwithPowerLaw.run_wrap_checks + ~DataHandlerNCforCCwithPowerLaw.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~DataHandlerNCforCCwithPowerLaw.BASE_LOADER + ~DataHandlerNCforCCwithPowerLaw.FEATURE_REGISTRY + ~DataHandlerNCforCCwithPowerLaw.data + ~DataHandlerNCforCCwithPowerLaw.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.data_handlers.nc_cc.rst b/_sources/_autosummary/sup3r.preprocessing.data_handlers.nc_cc.rst new file mode 100644 index 000000000..4bee00052 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.data_handlers.nc_cc.rst @@ -0,0 +1,32 @@ +sup3r.preprocessing.data\_handlers.nc\_cc +========================================= + +.. automodule:: sup3r.preprocessing.data_handlers.nc_cc + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + DataHandlerNCforCC + DataHandlerNCforCCwithPowerLaw + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.data_handlers.rst b/_sources/_autosummary/sup3r.preprocessing.data_handlers.rst new file mode 100644 index 000000000..fa6c71e75 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.data_handlers.rst @@ -0,0 +1,32 @@ +sup3r.preprocessing.data\_handlers +================================== + +.. automodule:: sup3r.preprocessing.data_handlers + + + + + + + + + + + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + exo + factory + nc_cc + diff --git a/_sources/_autosummary/sup3r.preprocessing.derivers.base.BaseDeriver.rst b/_sources/_autosummary/sup3r.preprocessing.derivers.base.BaseDeriver.rst new file mode 100644 index 000000000..cd14e67f5 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.derivers.base.BaseDeriver.rst @@ -0,0 +1,42 @@ +sup3r.preprocessing.derivers.base.BaseDeriver +============================================= + +.. currentmodule:: sup3r.preprocessing.derivers.base + +.. autoclass:: BaseDeriver + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~BaseDeriver.check_registry + ~BaseDeriver.derive + ~BaseDeriver.do_level_interpolation + ~BaseDeriver.get_inputs + ~BaseDeriver.get_multi_level_data + ~BaseDeriver.get_single_level_data + ~BaseDeriver.has_interp_variables + ~BaseDeriver.map_new_name + ~BaseDeriver.no_overlap + ~BaseDeriver.post_init_log + ~BaseDeriver.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~BaseDeriver.FEATURE_REGISTRY + ~BaseDeriver.data + ~BaseDeriver.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.derivers.base.Deriver.rst b/_sources/_autosummary/sup3r.preprocessing.derivers.base.Deriver.rst new file mode 100644 index 000000000..c902ddab3 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.derivers.base.Deriver.rst @@ -0,0 +1,42 @@ +sup3r.preprocessing.derivers.base.Deriver +========================================= + +.. currentmodule:: sup3r.preprocessing.derivers.base + +.. autoclass:: Deriver + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~Deriver.check_registry + ~Deriver.derive + ~Deriver.do_level_interpolation + ~Deriver.get_inputs + ~Deriver.get_multi_level_data + ~Deriver.get_single_level_data + ~Deriver.has_interp_variables + ~Deriver.map_new_name + ~Deriver.no_overlap + ~Deriver.post_init_log + ~Deriver.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Deriver.FEATURE_REGISTRY + ~Deriver.data + ~Deriver.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.derivers.base.rst b/_sources/_autosummary/sup3r.preprocessing.derivers.base.rst new file mode 100644 index 000000000..be05bf6ac --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.derivers.base.rst @@ -0,0 +1,32 @@ +sup3r.preprocessing.derivers.base +================================= + +.. automodule:: sup3r.preprocessing.derivers.base + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + BaseDeriver + Deriver + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.derivers.methods.ClearSkyRatio.rst b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.ClearSkyRatio.rst new file mode 100644 index 000000000..694c9b4a1 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.ClearSkyRatio.rst @@ -0,0 +1,30 @@ +sup3r.preprocessing.derivers.methods.ClearSkyRatio +================================================== + +.. currentmodule:: sup3r.preprocessing.derivers.methods + +.. autoclass:: ClearSkyRatio + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~ClearSkyRatio.compute + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~ClearSkyRatio.inputs + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.derivers.methods.ClearSkyRatioCC.rst b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.ClearSkyRatioCC.rst new file mode 100644 index 000000000..4144f2d75 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.ClearSkyRatioCC.rst @@ -0,0 +1,30 @@ +sup3r.preprocessing.derivers.methods.ClearSkyRatioCC +==================================================== + +.. currentmodule:: sup3r.preprocessing.derivers.methods + +.. autoclass:: ClearSkyRatioCC + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~ClearSkyRatioCC.compute + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~ClearSkyRatioCC.inputs + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.derivers.methods.CloudMask.rst b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.CloudMask.rst new file mode 100644 index 000000000..305eb3716 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.CloudMask.rst @@ -0,0 +1,30 @@ +sup3r.preprocessing.derivers.methods.CloudMask +============================================== + +.. currentmodule:: sup3r.preprocessing.derivers.methods + +.. autoclass:: CloudMask + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~CloudMask.compute + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~CloudMask.inputs + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.derivers.methods.DerivedFeature.rst b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.DerivedFeature.rst new file mode 100644 index 000000000..62e78ebce --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.DerivedFeature.rst @@ -0,0 +1,30 @@ +sup3r.preprocessing.derivers.methods.DerivedFeature +=================================================== + +.. currentmodule:: sup3r.preprocessing.derivers.methods + +.. autoclass:: DerivedFeature + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~DerivedFeature.compute + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~DerivedFeature.inputs + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.derivers.methods.PressureWRF.rst b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.PressureWRF.rst new file mode 100644 index 000000000..2d2bda0df --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.PressureWRF.rst @@ -0,0 +1,30 @@ +sup3r.preprocessing.derivers.methods.PressureWRF +================================================ + +.. currentmodule:: sup3r.preprocessing.derivers.methods + +.. autoclass:: PressureWRF + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~PressureWRF.compute + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~PressureWRF.inputs + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.derivers.methods.SurfaceRH.rst b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.SurfaceRH.rst new file mode 100644 index 000000000..eaae3af17 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.SurfaceRH.rst @@ -0,0 +1,30 @@ +sup3r.preprocessing.derivers.methods.SurfaceRH +============================================== + +.. currentmodule:: sup3r.preprocessing.derivers.methods + +.. autoclass:: SurfaceRH + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SurfaceRH.compute + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SurfaceRH.inputs + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.derivers.methods.Sza.rst b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.Sza.rst new file mode 100644 index 000000000..5e2c84beb --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.Sza.rst @@ -0,0 +1,30 @@ +sup3r.preprocessing.derivers.methods.Sza +======================================== + +.. currentmodule:: sup3r.preprocessing.derivers.methods + +.. autoclass:: Sza + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~Sza.compute + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Sza.inputs + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.derivers.methods.Tas.rst b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.Tas.rst new file mode 100644 index 000000000..4b2369faa --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.Tas.rst @@ -0,0 +1,30 @@ +sup3r.preprocessing.derivers.methods.Tas +======================================== + +.. currentmodule:: sup3r.preprocessing.derivers.methods + +.. autoclass:: Tas + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~Tas.compute + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Tas.inputs + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.derivers.methods.TasMax.rst b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.TasMax.rst new file mode 100644 index 000000000..7dc2d59cc --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.TasMax.rst @@ -0,0 +1,30 @@ +sup3r.preprocessing.derivers.methods.TasMax +=========================================== + +.. currentmodule:: sup3r.preprocessing.derivers.methods + +.. autoclass:: TasMax + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~TasMax.compute + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~TasMax.inputs + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.derivers.methods.TasMin.rst b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.TasMin.rst new file mode 100644 index 000000000..11c6b0ce9 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.TasMin.rst @@ -0,0 +1,30 @@ +sup3r.preprocessing.derivers.methods.TasMin +=========================================== + +.. currentmodule:: sup3r.preprocessing.derivers.methods + +.. autoclass:: TasMin + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~TasMin.compute + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~TasMin.inputs + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.derivers.methods.TempNCforCC.rst b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.TempNCforCC.rst new file mode 100644 index 000000000..9b19c675d --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.TempNCforCC.rst @@ -0,0 +1,30 @@ +sup3r.preprocessing.derivers.methods.TempNCforCC +================================================ + +.. currentmodule:: sup3r.preprocessing.derivers.methods + +.. autoclass:: TempNCforCC + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~TempNCforCC.compute + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~TempNCforCC.inputs + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.derivers.methods.USolar.rst b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.USolar.rst new file mode 100644 index 000000000..d881c2814 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.USolar.rst @@ -0,0 +1,30 @@ +sup3r.preprocessing.derivers.methods.USolar +=========================================== + +.. currentmodule:: sup3r.preprocessing.derivers.methods + +.. autoclass:: USolar + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~USolar.compute + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~USolar.inputs + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.derivers.methods.UWind.rst b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.UWind.rst new file mode 100644 index 000000000..b942ff4ad --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.UWind.rst @@ -0,0 +1,30 @@ +sup3r.preprocessing.derivers.methods.UWind +========================================== + +.. currentmodule:: sup3r.preprocessing.derivers.methods + +.. autoclass:: UWind + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~UWind.compute + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~UWind.inputs + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.derivers.methods.UWindPowerLaw.rst b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.UWindPowerLaw.rst new file mode 100644 index 000000000..bd3f0fcdc --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.UWindPowerLaw.rst @@ -0,0 +1,32 @@ +sup3r.preprocessing.derivers.methods.UWindPowerLaw +================================================== + +.. currentmodule:: sup3r.preprocessing.derivers.methods + +.. autoclass:: UWindPowerLaw + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~UWindPowerLaw.compute + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~UWindPowerLaw.ALPHA + ~UWindPowerLaw.NEAR_SFC_HEIGHT + ~UWindPowerLaw.inputs + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.derivers.methods.VSolar.rst b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.VSolar.rst new file mode 100644 index 000000000..418319e89 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.VSolar.rst @@ -0,0 +1,30 @@ +sup3r.preprocessing.derivers.methods.VSolar +=========================================== + +.. currentmodule:: sup3r.preprocessing.derivers.methods + +.. autoclass:: VSolar + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~VSolar.compute + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~VSolar.inputs + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.derivers.methods.VWind.rst b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.VWind.rst new file mode 100644 index 000000000..c8dc6fd88 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.VWind.rst @@ -0,0 +1,30 @@ +sup3r.preprocessing.derivers.methods.VWind +========================================== + +.. currentmodule:: sup3r.preprocessing.derivers.methods + +.. autoclass:: VWind + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~VWind.compute + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~VWind.inputs + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.derivers.methods.VWindPowerLaw.rst b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.VWindPowerLaw.rst new file mode 100644 index 000000000..438ecffdf --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.VWindPowerLaw.rst @@ -0,0 +1,32 @@ +sup3r.preprocessing.derivers.methods.VWindPowerLaw +================================================== + +.. currentmodule:: sup3r.preprocessing.derivers.methods + +.. autoclass:: VWindPowerLaw + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~VWindPowerLaw.compute + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~VWindPowerLaw.ALPHA + ~VWindPowerLaw.NEAR_SFC_HEIGHT + ~VWindPowerLaw.inputs + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.derivers.methods.Winddirection.rst b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.Winddirection.rst new file mode 100644 index 000000000..277a7395f --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.Winddirection.rst @@ -0,0 +1,30 @@ +sup3r.preprocessing.derivers.methods.Winddirection +================================================== + +.. currentmodule:: sup3r.preprocessing.derivers.methods + +.. autoclass:: Winddirection + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~Winddirection.compute + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Winddirection.inputs + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.derivers.methods.Windspeed.rst b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.Windspeed.rst new file mode 100644 index 000000000..6fab55627 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.Windspeed.rst @@ -0,0 +1,30 @@ +sup3r.preprocessing.derivers.methods.Windspeed +============================================== + +.. currentmodule:: sup3r.preprocessing.derivers.methods + +.. autoclass:: Windspeed + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~Windspeed.compute + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Windspeed.inputs + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.derivers.methods.rst b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.rst new file mode 100644 index 000000000..655870662 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.derivers.methods.rst @@ -0,0 +1,49 @@ +sup3r.preprocessing.derivers.methods +==================================== + +.. automodule:: sup3r.preprocessing.derivers.methods + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + ClearSkyRatio + ClearSkyRatioCC + CloudMask + DerivedFeature + PressureWRF + SurfaceRH + Sza + Tas + TasMax + TasMin + TempNCforCC + USolar + UWind + UWindPowerLaw + VSolar + VWind + VWindPowerLaw + Winddirection + Windspeed + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.derivers.rst b/_sources/_autosummary/sup3r.preprocessing.derivers.rst new file mode 100644 index 000000000..2f4321477 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.derivers.rst @@ -0,0 +1,32 @@ +sup3r.preprocessing.derivers +============================ + +.. automodule:: sup3r.preprocessing.derivers + + + + + + + + + + + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + base + methods + utilities + diff --git a/_sources/_autosummary/sup3r.preprocessing.derivers.utilities.SolarZenith.rst b/_sources/_autosummary/sup3r.preprocessing.derivers.utilities.SolarZenith.rst new file mode 100644 index 000000000..a589f4722 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.derivers.utilities.SolarZenith.rst @@ -0,0 +1,24 @@ +sup3r.preprocessing.derivers.utilities.SolarZenith +================================================== + +.. currentmodule:: sup3r.preprocessing.derivers.utilities + +.. autoclass:: SolarZenith + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SolarZenith.get_zenith + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.derivers.utilities.invert_uv.rst b/_sources/_autosummary/sup3r.preprocessing.derivers.utilities.invert_uv.rst new file mode 100644 index 000000000..4c6d520c7 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.derivers.utilities.invert_uv.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.derivers.utilities.invert\_uv +================================================= + +.. currentmodule:: sup3r.preprocessing.derivers.utilities + +.. autofunction:: invert_uv \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.derivers.utilities.parse_feature.rst b/_sources/_autosummary/sup3r.preprocessing.derivers.utilities.parse_feature.rst new file mode 100644 index 000000000..a85dabea1 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.derivers.utilities.parse_feature.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.derivers.utilities.parse\_feature +===================================================== + +.. currentmodule:: sup3r.preprocessing.derivers.utilities + +.. autofunction:: parse_feature \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.derivers.utilities.rst b/_sources/_autosummary/sup3r.preprocessing.derivers.utilities.rst new file mode 100644 index 000000000..d292ea226 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.derivers.utilities.rst @@ -0,0 +1,41 @@ +sup3r.preprocessing.derivers.utilities +====================================== + +.. automodule:: sup3r.preprocessing.derivers.utilities + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + + invert_uv + parse_feature + transform_rotate_wind + windspeed_log_law + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + SolarZenith + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.derivers.utilities.transform_rotate_wind.rst b/_sources/_autosummary/sup3r.preprocessing.derivers.utilities.transform_rotate_wind.rst new file mode 100644 index 000000000..1a82d7538 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.derivers.utilities.transform_rotate_wind.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.derivers.utilities.transform\_rotate\_wind +============================================================== + +.. currentmodule:: sup3r.preprocessing.derivers.utilities + +.. autofunction:: transform_rotate_wind \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.derivers.utilities.windspeed_log_law.rst b/_sources/_autosummary/sup3r.preprocessing.derivers.utilities.windspeed_log_law.rst new file mode 100644 index 000000000..e27b6566e --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.derivers.utilities.windspeed_log_law.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.derivers.utilities.windspeed\_log\_law +========================================================== + +.. currentmodule:: sup3r.preprocessing.derivers.utilities + +.. autofunction:: windspeed_log_law \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.loaders.Loader.rst b/_sources/_autosummary/sup3r.preprocessing.loaders.Loader.rst new file mode 100644 index 000000000..a76232d59 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.loaders.Loader.rst @@ -0,0 +1,29 @@ +sup3r.preprocessing.loaders.Loader +================================== + +.. currentmodule:: sup3r.preprocessing.loaders + +.. autoclass:: Loader + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Loader.TypeSpecificClasses + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.loaders.base.BaseLoader.rst b/_sources/_autosummary/sup3r.preprocessing.loaders.base.BaseLoader.rst new file mode 100644 index 000000000..f1ba40b9f --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.loaders.base.BaseLoader.rst @@ -0,0 +1,34 @@ +sup3r.preprocessing.loaders.base.BaseLoader +=========================================== + +.. currentmodule:: sup3r.preprocessing.loaders.base + +.. autoclass:: BaseLoader + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~BaseLoader.BASE_LOADER + ~BaseLoader.post_init_log + ~BaseLoader.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~BaseLoader.data + ~BaseLoader.file_paths + ~BaseLoader.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.loaders.base.rst b/_sources/_autosummary/sup3r.preprocessing.loaders.base.rst new file mode 100644 index 000000000..d328424e1 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.loaders.base.rst @@ -0,0 +1,31 @@ +sup3r.preprocessing.loaders.base +================================ + +.. automodule:: sup3r.preprocessing.loaders.base + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + BaseLoader + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.loaders.h5.LoaderH5.rst b/_sources/_autosummary/sup3r.preprocessing.loaders.h5.LoaderH5.rst new file mode 100644 index 000000000..3a9fb779e --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.loaders.h5.LoaderH5.rst @@ -0,0 +1,34 @@ +sup3r.preprocessing.loaders.h5.LoaderH5 +======================================= + +.. currentmodule:: sup3r.preprocessing.loaders.h5 + +.. autoclass:: LoaderH5 + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~LoaderH5.post_init_log + ~LoaderH5.scale_factor + ~LoaderH5.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~LoaderH5.data + ~LoaderH5.file_paths + ~LoaderH5.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.loaders.h5.rst b/_sources/_autosummary/sup3r.preprocessing.loaders.h5.rst new file mode 100644 index 000000000..d63e5dfa5 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.loaders.h5.rst @@ -0,0 +1,31 @@ +sup3r.preprocessing.loaders.h5 +============================== + +.. automodule:: sup3r.preprocessing.loaders.h5 + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + LoaderH5 + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.loaders.nc.LoaderNC.rst b/_sources/_autosummary/sup3r.preprocessing.loaders.nc.LoaderNC.rst new file mode 100644 index 000000000..d90c80ef8 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.loaders.nc.LoaderNC.rst @@ -0,0 +1,36 @@ +sup3r.preprocessing.loaders.nc.LoaderNC +======================================= + +.. currentmodule:: sup3r.preprocessing.loaders.nc + +.. autoclass:: LoaderNC + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~LoaderNC.BASE_LOADER + ~LoaderNC.get_coords + ~LoaderNC.get_dims + ~LoaderNC.post_init_log + ~LoaderNC.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~LoaderNC.data + ~LoaderNC.file_paths + ~LoaderNC.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.loaders.nc.rst b/_sources/_autosummary/sup3r.preprocessing.loaders.nc.rst new file mode 100644 index 000000000..bf7d08f85 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.loaders.nc.rst @@ -0,0 +1,31 @@ +sup3r.preprocessing.loaders.nc +============================== + +.. automodule:: sup3r.preprocessing.loaders.nc + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + LoaderNC + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.loaders.rst b/_sources/_autosummary/sup3r.preprocessing.loaders.rst new file mode 100644 index 000000000..78537abea --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.loaders.rst @@ -0,0 +1,41 @@ +sup3r.preprocessing.loaders +=========================== + +.. automodule:: sup3r.preprocessing.loaders + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + Loader + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + base + h5 + nc + utilities + diff --git a/_sources/_autosummary/sup3r.preprocessing.loaders.utilities.rst b/_sources/_autosummary/sup3r.preprocessing.loaders.utilities.rst new file mode 100644 index 000000000..e5b9b00ef --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.loaders.utilities.rst @@ -0,0 +1,31 @@ +sup3r.preprocessing.loaders.utilities +===================================== + +.. automodule:: sup3r.preprocessing.loaders.utilities + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + + standardize_names + standardize_values + + + + + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.loaders.utilities.standardize_names.rst b/_sources/_autosummary/sup3r.preprocessing.loaders.utilities.standardize_names.rst new file mode 100644 index 000000000..c44a12226 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.loaders.utilities.standardize_names.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.loaders.utilities.standardize\_names +======================================================== + +.. currentmodule:: sup3r.preprocessing.loaders.utilities + +.. autofunction:: standardize_names \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.loaders.utilities.standardize_values.rst b/_sources/_autosummary/sup3r.preprocessing.loaders.utilities.standardize_values.rst new file mode 100644 index 000000000..bdba21c5c --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.loaders.utilities.standardize_values.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.loaders.utilities.standardize\_values +========================================================= + +.. currentmodule:: sup3r.preprocessing.loaders.utilities + +.. autofunction:: standardize_values \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.names.Dimension.rst b/_sources/_autosummary/sup3r.preprocessing.names.Dimension.rst new file mode 100644 index 000000000..e35bf3b92 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.names.Dimension.rst @@ -0,0 +1,98 @@ +sup3r.preprocessing.names.Dimension +=================================== + +.. currentmodule:: sup3r.preprocessing.names + +.. autoclass:: Dimension + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~Dimension.encode + ~Dimension.replace + ~Dimension.split + ~Dimension.rsplit + ~Dimension.join + ~Dimension.capitalize + ~Dimension.casefold + ~Dimension.title + ~Dimension.center + ~Dimension.count + ~Dimension.expandtabs + ~Dimension.find + ~Dimension.partition + ~Dimension.index + ~Dimension.ljust + ~Dimension.lower + ~Dimension.lstrip + ~Dimension.rfind + ~Dimension.rindex + ~Dimension.rjust + ~Dimension.rstrip + ~Dimension.rpartition + ~Dimension.splitlines + ~Dimension.strip + ~Dimension.swapcase + ~Dimension.translate + ~Dimension.upper + ~Dimension.startswith + ~Dimension.endswith + ~Dimension.removeprefix + ~Dimension.removesuffix + ~Dimension.isascii + ~Dimension.islower + ~Dimension.isupper + ~Dimension.istitle + ~Dimension.isspace + ~Dimension.isdecimal + ~Dimension.isdigit + ~Dimension.isnumeric + ~Dimension.isalpha + ~Dimension.isalnum + ~Dimension.isidentifier + ~Dimension.isprintable + ~Dimension.zfill + ~Dimension.format + ~Dimension.format_map + ~Dimension.maketrans + ~Dimension.order + ~Dimension.flat_2d + ~Dimension.dims_2d + ~Dimension.coords_2d + ~Dimension.coords_3d + ~Dimension.dims_3d + ~Dimension.dims_4d + ~Dimension.coords_4d_pres + ~Dimension.coords_4d + ~Dimension.dims_4d_pres + ~Dimension.dims_3d_bc + ~Dimension.dims_4d_bc + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Dimension.FLATTENED_SPATIAL + ~Dimension.SOUTH_NORTH + ~Dimension.WEST_EAST + ~Dimension.TIME + ~Dimension.PRESSURE_LEVEL + ~Dimension.HEIGHT + ~Dimension.VARIABLE + ~Dimension.LATITUDE + ~Dimension.LONGITUDE + ~Dimension.QUANTILE + ~Dimension.GLOBAL_TIME + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.names.rst b/_sources/_autosummary/sup3r.preprocessing.names.rst new file mode 100644 index 000000000..5be959298 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.names.rst @@ -0,0 +1,31 @@ +sup3r.preprocessing.names +========================= + +.. automodule:: sup3r.preprocessing.names + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + Dimension + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.rasterizers.base.BaseRasterizer.rst b/_sources/_autosummary/sup3r.preprocessing.rasterizers.base.BaseRasterizer.rst new file mode 100644 index 000000000..b1595a601 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.rasterizers.base.BaseRasterizer.rst @@ -0,0 +1,41 @@ +sup3r.preprocessing.rasterizers.base.BaseRasterizer +=================================================== + +.. currentmodule:: sup3r.preprocessing.rasterizers.base + +.. autoclass:: BaseRasterizer + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~BaseRasterizer.check_target_and_shape + ~BaseRasterizer.get_closest_row_col + ~BaseRasterizer.get_lat_lon + ~BaseRasterizer.get_raster_index + ~BaseRasterizer.post_init_log + ~BaseRasterizer.rasterize_data + ~BaseRasterizer.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~BaseRasterizer.data + ~BaseRasterizer.grid_shape + ~BaseRasterizer.lat_lon + ~BaseRasterizer.shape + ~BaseRasterizer.target + ~BaseRasterizer.time_slice + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.rasterizers.base.rst b/_sources/_autosummary/sup3r.preprocessing.rasterizers.base.rst new file mode 100644 index 000000000..239726954 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.rasterizers.base.rst @@ -0,0 +1,31 @@ +sup3r.preprocessing.rasterizers.base +==================================== + +.. automodule:: sup3r.preprocessing.rasterizers.base + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + BaseRasterizer + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.rasterizers.dual.DualRasterizer.rst b/_sources/_autosummary/sup3r.preprocessing.rasterizers.dual.DualRasterizer.rst new file mode 100644 index 000000000..f09b79f52 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.rasterizers.dual.DualRasterizer.rst @@ -0,0 +1,36 @@ +sup3r.preprocessing.rasterizers.dual.DualRasterizer +=================================================== + +.. currentmodule:: sup3r.preprocessing.rasterizers.dual + +.. autoclass:: DualRasterizer + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~DualRasterizer.check_regridded_lr_data + ~DualRasterizer.get_regridder + ~DualRasterizer.post_init_log + ~DualRasterizer.update_hr_data + ~DualRasterizer.update_lr_data + ~DualRasterizer.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~DualRasterizer.data + ~DualRasterizer.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.rasterizers.dual.rst b/_sources/_autosummary/sup3r.preprocessing.rasterizers.dual.rst new file mode 100644 index 000000000..0b49adc44 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.rasterizers.dual.rst @@ -0,0 +1,31 @@ +sup3r.preprocessing.rasterizers.dual +==================================== + +.. automodule:: sup3r.preprocessing.rasterizers.dual + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + DualRasterizer + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.rasterizers.exo.BaseExoRasterizer.rst b/_sources/_autosummary/sup3r.preprocessing.rasterizers.exo.BaseExoRasterizer.rst new file mode 100644 index 000000000..8cfc6c01f --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.rasterizers.exo.BaseExoRasterizer.rst @@ -0,0 +1,54 @@ +sup3r.preprocessing.rasterizers.exo.BaseExoRasterizer +===================================================== + +.. currentmodule:: sup3r.preprocessing.rasterizers.exo + +.. autoclass:: BaseExoRasterizer + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~BaseExoRasterizer.get_data + ~BaseExoRasterizer.get_distance_upper_bound + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~BaseExoRasterizer.cache_dir + ~BaseExoRasterizer.cache_file + ~BaseExoRasterizer.chunks + ~BaseExoRasterizer.coords + ~BaseExoRasterizer.data + ~BaseExoRasterizer.distance_upper_bound + ~BaseExoRasterizer.feature + ~BaseExoRasterizer.file_paths + ~BaseExoRasterizer.hr_lat_lon + ~BaseExoRasterizer.hr_shape + ~BaseExoRasterizer.hr_time_index + ~BaseExoRasterizer.input_handler_kwargs + ~BaseExoRasterizer.input_handler_name + ~BaseExoRasterizer.lr_shape + ~BaseExoRasterizer.max_workers + ~BaseExoRasterizer.nn + ~BaseExoRasterizer.s_enhance + ~BaseExoRasterizer.source_data + ~BaseExoRasterizer.source_file + ~BaseExoRasterizer.source_handler + ~BaseExoRasterizer.source_lat_lon + ~BaseExoRasterizer.t_enhance + ~BaseExoRasterizer.tree + ~BaseExoRasterizer.verbose + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.rasterizers.exo.ExoRasterizer.rst b/_sources/_autosummary/sup3r.preprocessing.rasterizers.exo.ExoRasterizer.rst new file mode 100644 index 000000000..d5d92b2e5 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.rasterizers.exo.ExoRasterizer.rst @@ -0,0 +1,55 @@ +sup3r.preprocessing.rasterizers.exo.ExoRasterizer +================================================= + +.. currentmodule:: sup3r.preprocessing.rasterizers.exo + +.. autoclass:: ExoRasterizer + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~ExoRasterizer.get_data + ~ExoRasterizer.get_distance_upper_bound + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~ExoRasterizer.TypeSpecificClasses + ~ExoRasterizer.cache_dir + ~ExoRasterizer.cache_file + ~ExoRasterizer.chunks + ~ExoRasterizer.coords + ~ExoRasterizer.data + ~ExoRasterizer.distance_upper_bound + ~ExoRasterizer.feature + ~ExoRasterizer.file_paths + ~ExoRasterizer.hr_lat_lon + ~ExoRasterizer.hr_shape + ~ExoRasterizer.hr_time_index + ~ExoRasterizer.input_handler_kwargs + ~ExoRasterizer.input_handler_name + ~ExoRasterizer.lr_shape + ~ExoRasterizer.max_workers + ~ExoRasterizer.nn + ~ExoRasterizer.s_enhance + ~ExoRasterizer.source_data + ~ExoRasterizer.source_file + ~ExoRasterizer.source_handler + ~ExoRasterizer.source_lat_lon + ~ExoRasterizer.t_enhance + ~ExoRasterizer.tree + ~ExoRasterizer.verbose + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.rasterizers.exo.ExoRasterizerH5.rst b/_sources/_autosummary/sup3r.preprocessing.rasterizers.exo.ExoRasterizerH5.rst new file mode 100644 index 000000000..af870ca57 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.rasterizers.exo.ExoRasterizerH5.rst @@ -0,0 +1,54 @@ +sup3r.preprocessing.rasterizers.exo.ExoRasterizerH5 +=================================================== + +.. currentmodule:: sup3r.preprocessing.rasterizers.exo + +.. autoclass:: ExoRasterizerH5 + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~ExoRasterizerH5.get_data + ~ExoRasterizerH5.get_distance_upper_bound + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~ExoRasterizerH5.cache_dir + ~ExoRasterizerH5.cache_file + ~ExoRasterizerH5.chunks + ~ExoRasterizerH5.coords + ~ExoRasterizerH5.data + ~ExoRasterizerH5.distance_upper_bound + ~ExoRasterizerH5.feature + ~ExoRasterizerH5.file_paths + ~ExoRasterizerH5.hr_lat_lon + ~ExoRasterizerH5.hr_shape + ~ExoRasterizerH5.hr_time_index + ~ExoRasterizerH5.input_handler_kwargs + ~ExoRasterizerH5.input_handler_name + ~ExoRasterizerH5.lr_shape + ~ExoRasterizerH5.max_workers + ~ExoRasterizerH5.nn + ~ExoRasterizerH5.s_enhance + ~ExoRasterizerH5.source_data + ~ExoRasterizerH5.source_file + ~ExoRasterizerH5.source_handler + ~ExoRasterizerH5.source_lat_lon + ~ExoRasterizerH5.t_enhance + ~ExoRasterizerH5.tree + ~ExoRasterizerH5.verbose + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.rasterizers.exo.ExoRasterizerNC.rst b/_sources/_autosummary/sup3r.preprocessing.rasterizers.exo.ExoRasterizerNC.rst new file mode 100644 index 000000000..bc3ca0bb2 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.rasterizers.exo.ExoRasterizerNC.rst @@ -0,0 +1,54 @@ +sup3r.preprocessing.rasterizers.exo.ExoRasterizerNC +=================================================== + +.. currentmodule:: sup3r.preprocessing.rasterizers.exo + +.. autoclass:: ExoRasterizerNC + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~ExoRasterizerNC.get_data + ~ExoRasterizerNC.get_distance_upper_bound + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~ExoRasterizerNC.cache_dir + ~ExoRasterizerNC.cache_file + ~ExoRasterizerNC.chunks + ~ExoRasterizerNC.coords + ~ExoRasterizerNC.data + ~ExoRasterizerNC.distance_upper_bound + ~ExoRasterizerNC.feature + ~ExoRasterizerNC.file_paths + ~ExoRasterizerNC.hr_lat_lon + ~ExoRasterizerNC.hr_shape + ~ExoRasterizerNC.hr_time_index + ~ExoRasterizerNC.input_handler_kwargs + ~ExoRasterizerNC.input_handler_name + ~ExoRasterizerNC.lr_shape + ~ExoRasterizerNC.max_workers + ~ExoRasterizerNC.nn + ~ExoRasterizerNC.s_enhance + ~ExoRasterizerNC.source_data + ~ExoRasterizerNC.source_file + ~ExoRasterizerNC.source_handler + ~ExoRasterizerNC.source_lat_lon + ~ExoRasterizerNC.t_enhance + ~ExoRasterizerNC.tree + ~ExoRasterizerNC.verbose + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.rasterizers.exo.SzaRasterizer.rst b/_sources/_autosummary/sup3r.preprocessing.rasterizers.exo.SzaRasterizer.rst new file mode 100644 index 000000000..a2f38c77d --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.rasterizers.exo.SzaRasterizer.rst @@ -0,0 +1,54 @@ +sup3r.preprocessing.rasterizers.exo.SzaRasterizer +================================================= + +.. currentmodule:: sup3r.preprocessing.rasterizers.exo + +.. autoclass:: SzaRasterizer + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SzaRasterizer.get_data + ~SzaRasterizer.get_distance_upper_bound + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SzaRasterizer.cache_dir + ~SzaRasterizer.cache_file + ~SzaRasterizer.chunks + ~SzaRasterizer.coords + ~SzaRasterizer.data + ~SzaRasterizer.distance_upper_bound + ~SzaRasterizer.feature + ~SzaRasterizer.file_paths + ~SzaRasterizer.hr_lat_lon + ~SzaRasterizer.hr_shape + ~SzaRasterizer.hr_time_index + ~SzaRasterizer.input_handler_kwargs + ~SzaRasterizer.input_handler_name + ~SzaRasterizer.lr_shape + ~SzaRasterizer.max_workers + ~SzaRasterizer.nn + ~SzaRasterizer.s_enhance + ~SzaRasterizer.source_data + ~SzaRasterizer.source_file + ~SzaRasterizer.source_handler + ~SzaRasterizer.source_lat_lon + ~SzaRasterizer.t_enhance + ~SzaRasterizer.tree + ~SzaRasterizer.verbose + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.rasterizers.exo.rst b/_sources/_autosummary/sup3r.preprocessing.rasterizers.exo.rst new file mode 100644 index 000000000..3432f16d1 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.rasterizers.exo.rst @@ -0,0 +1,35 @@ +sup3r.preprocessing.rasterizers.exo +=================================== + +.. automodule:: sup3r.preprocessing.rasterizers.exo + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + BaseExoRasterizer + ExoRasterizer + ExoRasterizerH5 + ExoRasterizerNC + SzaRasterizer + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.rasterizers.extended.Rasterizer.rst b/_sources/_autosummary/sup3r.preprocessing.rasterizers.extended.Rasterizer.rst new file mode 100644 index 000000000..0cec6e47b --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.rasterizers.extended.Rasterizer.rst @@ -0,0 +1,42 @@ +sup3r.preprocessing.rasterizers.extended.Rasterizer +=================================================== + +.. currentmodule:: sup3r.preprocessing.rasterizers.extended + +.. autoclass:: Rasterizer + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~Rasterizer.check_target_and_shape + ~Rasterizer.get_closest_row_col + ~Rasterizer.get_lat_lon + ~Rasterizer.get_raster_index + ~Rasterizer.post_init_log + ~Rasterizer.rasterize_data + ~Rasterizer.save_raster_index + ~Rasterizer.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Rasterizer.data + ~Rasterizer.grid_shape + ~Rasterizer.lat_lon + ~Rasterizer.shape + ~Rasterizer.target + ~Rasterizer.time_slice + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.rasterizers.extended.rst b/_sources/_autosummary/sup3r.preprocessing.rasterizers.extended.rst new file mode 100644 index 000000000..711f8bc6b --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.rasterizers.extended.rst @@ -0,0 +1,31 @@ +sup3r.preprocessing.rasterizers.extended +======================================== + +.. automodule:: sup3r.preprocessing.rasterizers.extended + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + Rasterizer + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.rasterizers.rst b/_sources/_autosummary/sup3r.preprocessing.rasterizers.rst new file mode 100644 index 000000000..26e83e78a --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.rasterizers.rst @@ -0,0 +1,33 @@ +sup3r.preprocessing.rasterizers +=============================== + +.. automodule:: sup3r.preprocessing.rasterizers + + + + + + + + + + + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + base + dual + exo + extended + diff --git a/_sources/_autosummary/sup3r.preprocessing.rst b/_sources/_autosummary/sup3r.preprocessing.rst new file mode 100644 index 000000000..eb6f2cfbc --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.rst @@ -0,0 +1,42 @@ +sup3r.preprocessing +=================== + +.. automodule:: sup3r.preprocessing + + + + + + + + + + + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + accessor + base + batch_handlers + batch_queues + cachers + collections + data_handlers + derivers + loaders + names + rasterizers + samplers + utilities + diff --git a/_sources/_autosummary/sup3r.preprocessing.samplers.base.Sampler.rst b/_sources/_autosummary/sup3r.preprocessing.samplers.base.Sampler.rst new file mode 100644 index 000000000..ce4090eae --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.samplers.base.Sampler.rst @@ -0,0 +1,41 @@ +sup3r.preprocessing.samplers.base.Sampler +========================================= + +.. currentmodule:: sup3r.preprocessing.samplers.base + +.. autoclass:: Sampler + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~Sampler.get_sample_index + ~Sampler.post_init_log + ~Sampler.preflight + ~Sampler.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Sampler.data + ~Sampler.hr_exo_features + ~Sampler.hr_features + ~Sampler.hr_features_ind + ~Sampler.hr_out_features + ~Sampler.hr_sample_shape + ~Sampler.lr_only_features + ~Sampler.sample_shape + ~Sampler.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.samplers.base.rst b/_sources/_autosummary/sup3r.preprocessing.samplers.base.rst new file mode 100644 index 000000000..493c97e56 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.samplers.base.rst @@ -0,0 +1,31 @@ +sup3r.preprocessing.samplers.base +================================= + +.. automodule:: sup3r.preprocessing.samplers.base + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + Sampler + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.samplers.cc.DualSamplerCC.rst b/_sources/_autosummary/sup3r.preprocessing.samplers.cc.DualSamplerCC.rst new file mode 100644 index 000000000..caad91491 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.samplers.cc.DualSamplerCC.rst @@ -0,0 +1,45 @@ +sup3r.preprocessing.samplers.cc.DualSamplerCC +============================================= + +.. currentmodule:: sup3r.preprocessing.samplers.cc + +.. autoclass:: DualSamplerCC + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~DualSamplerCC.check_for_consistent_shapes + ~DualSamplerCC.get_features + ~DualSamplerCC.get_middle_days + ~DualSamplerCC.get_sample_index + ~DualSamplerCC.post_init_log + ~DualSamplerCC.preflight + ~DualSamplerCC.reduce_high_res_sub_daily + ~DualSamplerCC.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~DualSamplerCC.data + ~DualSamplerCC.hr_exo_features + ~DualSamplerCC.hr_features + ~DualSamplerCC.hr_features_ind + ~DualSamplerCC.hr_out_features + ~DualSamplerCC.hr_sample_shape + ~DualSamplerCC.lr_only_features + ~DualSamplerCC.sample_shape + ~DualSamplerCC.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.samplers.cc.rst b/_sources/_autosummary/sup3r.preprocessing.samplers.cc.rst new file mode 100644 index 000000000..4f6b6b7e8 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.samplers.cc.rst @@ -0,0 +1,31 @@ +sup3r.preprocessing.samplers.cc +=============================== + +.. automodule:: sup3r.preprocessing.samplers.cc + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + DualSamplerCC + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.samplers.dc.SamplerDC.rst b/_sources/_autosummary/sup3r.preprocessing.samplers.dc.SamplerDC.rst new file mode 100644 index 000000000..0f1a104e5 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.samplers.dc.SamplerDC.rst @@ -0,0 +1,42 @@ +sup3r.preprocessing.samplers.dc.SamplerDC +========================================= + +.. currentmodule:: sup3r.preprocessing.samplers.dc + +.. autoclass:: SamplerDC + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SamplerDC.get_sample_index + ~SamplerDC.post_init_log + ~SamplerDC.preflight + ~SamplerDC.update_weights + ~SamplerDC.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SamplerDC.data + ~SamplerDC.hr_exo_features + ~SamplerDC.hr_features + ~SamplerDC.hr_features_ind + ~SamplerDC.hr_out_features + ~SamplerDC.hr_sample_shape + ~SamplerDC.lr_only_features + ~SamplerDC.sample_shape + ~SamplerDC.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.samplers.dc.rst b/_sources/_autosummary/sup3r.preprocessing.samplers.dc.rst new file mode 100644 index 000000000..b860493d5 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.samplers.dc.rst @@ -0,0 +1,31 @@ +sup3r.preprocessing.samplers.dc +=============================== + +.. automodule:: sup3r.preprocessing.samplers.dc + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + SamplerDC + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.samplers.dual.DualSampler.rst b/_sources/_autosummary/sup3r.preprocessing.samplers.dual.DualSampler.rst new file mode 100644 index 000000000..0320e5902 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.samplers.dual.DualSampler.rst @@ -0,0 +1,43 @@ +sup3r.preprocessing.samplers.dual.DualSampler +============================================= + +.. currentmodule:: sup3r.preprocessing.samplers.dual + +.. autoclass:: DualSampler + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~DualSampler.check_for_consistent_shapes + ~DualSampler.get_features + ~DualSampler.get_sample_index + ~DualSampler.post_init_log + ~DualSampler.preflight + ~DualSampler.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~DualSampler.data + ~DualSampler.hr_exo_features + ~DualSampler.hr_features + ~DualSampler.hr_features_ind + ~DualSampler.hr_out_features + ~DualSampler.hr_sample_shape + ~DualSampler.lr_only_features + ~DualSampler.sample_shape + ~DualSampler.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.samplers.dual.rst b/_sources/_autosummary/sup3r.preprocessing.samplers.dual.rst new file mode 100644 index 000000000..4c4a2b0c5 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.samplers.dual.rst @@ -0,0 +1,31 @@ +sup3r.preprocessing.samplers.dual +================================= + +.. automodule:: sup3r.preprocessing.samplers.dual + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + DualSampler + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.samplers.rst b/_sources/_autosummary/sup3r.preprocessing.samplers.rst new file mode 100644 index 000000000..6f3296155 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.samplers.rst @@ -0,0 +1,34 @@ +sup3r.preprocessing.samplers +============================ + +.. automodule:: sup3r.preprocessing.samplers + + + + + + + + + + + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + base + cc + dc + dual + utilities + diff --git a/_sources/_autosummary/sup3r.preprocessing.samplers.utilities.daily_time_sampler.rst b/_sources/_autosummary/sup3r.preprocessing.samplers.utilities.daily_time_sampler.rst new file mode 100644 index 000000000..1d428cb44 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.samplers.utilities.daily_time_sampler.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.samplers.utilities.daily\_time\_sampler +=========================================================== + +.. currentmodule:: sup3r.preprocessing.samplers.utilities + +.. autofunction:: daily_time_sampler \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.samplers.utilities.nsrdb_reduce_daily_data.rst b/_sources/_autosummary/sup3r.preprocessing.samplers.utilities.nsrdb_reduce_daily_data.rst new file mode 100644 index 000000000..c930e534e --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.samplers.utilities.nsrdb_reduce_daily_data.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.samplers.utilities.nsrdb\_reduce\_daily\_data +================================================================= + +.. currentmodule:: sup3r.preprocessing.samplers.utilities + +.. autofunction:: nsrdb_reduce_daily_data \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.samplers.utilities.nsrdb_sub_daily_sampler.rst b/_sources/_autosummary/sup3r.preprocessing.samplers.utilities.nsrdb_sub_daily_sampler.rst new file mode 100644 index 000000000..508ff99b0 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.samplers.utilities.nsrdb_sub_daily_sampler.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.samplers.utilities.nsrdb\_sub\_daily\_sampler +================================================================= + +.. currentmodule:: sup3r.preprocessing.samplers.utilities + +.. autofunction:: nsrdb_sub_daily_sampler \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.samplers.utilities.rst b/_sources/_autosummary/sup3r.preprocessing.samplers.utilities.rst new file mode 100644 index 000000000..fedd05d0e --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.samplers.utilities.rst @@ -0,0 +1,36 @@ +sup3r.preprocessing.samplers.utilities +====================================== + +.. automodule:: sup3r.preprocessing.samplers.utilities + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + + daily_time_sampler + nsrdb_reduce_daily_data + nsrdb_sub_daily_sampler + uniform_box_sampler + uniform_time_sampler + weighted_box_sampler + weighted_time_sampler + + + + + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.preprocessing.samplers.utilities.uniform_box_sampler.rst b/_sources/_autosummary/sup3r.preprocessing.samplers.utilities.uniform_box_sampler.rst new file mode 100644 index 000000000..cb7f7f576 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.samplers.utilities.uniform_box_sampler.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.samplers.utilities.uniform\_box\_sampler +============================================================ + +.. currentmodule:: sup3r.preprocessing.samplers.utilities + +.. autofunction:: uniform_box_sampler \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.samplers.utilities.uniform_time_sampler.rst b/_sources/_autosummary/sup3r.preprocessing.samplers.utilities.uniform_time_sampler.rst new file mode 100644 index 000000000..efcf666fe --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.samplers.utilities.uniform_time_sampler.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.samplers.utilities.uniform\_time\_sampler +============================================================= + +.. currentmodule:: sup3r.preprocessing.samplers.utilities + +.. autofunction:: uniform_time_sampler \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.samplers.utilities.weighted_box_sampler.rst b/_sources/_autosummary/sup3r.preprocessing.samplers.utilities.weighted_box_sampler.rst new file mode 100644 index 000000000..fcea36453 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.samplers.utilities.weighted_box_sampler.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.samplers.utilities.weighted\_box\_sampler +============================================================= + +.. currentmodule:: sup3r.preprocessing.samplers.utilities + +.. autofunction:: weighted_box_sampler \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.samplers.utilities.weighted_time_sampler.rst b/_sources/_autosummary/sup3r.preprocessing.samplers.utilities.weighted_time_sampler.rst new file mode 100644 index 000000000..824b9847b --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.samplers.utilities.weighted_time_sampler.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.samplers.utilities.weighted\_time\_sampler +============================================================== + +.. currentmodule:: sup3r.preprocessing.samplers.utilities + +.. autofunction:: weighted_time_sampler \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.utilities.check_signatures.rst b/_sources/_autosummary/sup3r.preprocessing.utilities.check_signatures.rst new file mode 100644 index 000000000..0fed204a6 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.utilities.check_signatures.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.utilities.check\_signatures +=============================================== + +.. currentmodule:: sup3r.preprocessing.utilities + +.. autofunction:: check_signatures \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.utilities.composite_info.rst b/_sources/_autosummary/sup3r.preprocessing.utilities.composite_info.rst new file mode 100644 index 000000000..a2dde0d9a --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.utilities.composite_info.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.utilities.composite\_info +============================================= + +.. currentmodule:: sup3r.preprocessing.utilities + +.. autofunction:: composite_info \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.utilities.composite_sig.rst b/_sources/_autosummary/sup3r.preprocessing.utilities.composite_sig.rst new file mode 100644 index 000000000..b6663ffc7 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.utilities.composite_sig.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.utilities.composite\_sig +============================================ + +.. currentmodule:: sup3r.preprocessing.utilities + +.. autofunction:: composite_sig \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.utilities.compute_if_dask.rst b/_sources/_autosummary/sup3r.preprocessing.utilities.compute_if_dask.rst new file mode 100644 index 000000000..75af9acbb --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.utilities.compute_if_dask.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.utilities.compute\_if\_dask +=============================================== + +.. currentmodule:: sup3r.preprocessing.utilities + +.. autofunction:: compute_if_dask \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.utilities.contains_ellipsis.rst b/_sources/_autosummary/sup3r.preprocessing.utilities.contains_ellipsis.rst new file mode 100644 index 000000000..74c65a5ec --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.utilities.contains_ellipsis.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.utilities.contains\_ellipsis +================================================ + +.. currentmodule:: sup3r.preprocessing.utilities + +.. autofunction:: contains_ellipsis \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.utilities.dims_array_tuple.rst b/_sources/_autosummary/sup3r.preprocessing.utilities.dims_array_tuple.rst new file mode 100644 index 000000000..b04c86d68 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.utilities.dims_array_tuple.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.utilities.dims\_array\_tuple +================================================ + +.. currentmodule:: sup3r.preprocessing.utilities + +.. autofunction:: dims_array_tuple \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.utilities.expand_paths.rst b/_sources/_autosummary/sup3r.preprocessing.utilities.expand_paths.rst new file mode 100644 index 000000000..968e7fb05 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.utilities.expand_paths.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.utilities.expand\_paths +=========================================== + +.. currentmodule:: sup3r.preprocessing.utilities + +.. autofunction:: expand_paths \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.utilities.get_class_kwargs.rst b/_sources/_autosummary/sup3r.preprocessing.utilities.get_class_kwargs.rst new file mode 100644 index 000000000..e8a5e682f --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.utilities.get_class_kwargs.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.utilities.get\_class\_kwargs +================================================ + +.. currentmodule:: sup3r.preprocessing.utilities + +.. autofunction:: get_class_kwargs \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.utilities.get_date_range_kwargs.rst b/_sources/_autosummary/sup3r.preprocessing.utilities.get_date_range_kwargs.rst new file mode 100644 index 000000000..b3bea1513 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.utilities.get_date_range_kwargs.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.utilities.get\_date\_range\_kwargs +====================================================== + +.. currentmodule:: sup3r.preprocessing.utilities + +.. autofunction:: get_date_range_kwargs \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.utilities.get_input_handler_class.rst b/_sources/_autosummary/sup3r.preprocessing.utilities.get_input_handler_class.rst new file mode 100644 index 000000000..742d0fe5e --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.utilities.get_input_handler_class.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.utilities.get\_input\_handler\_class +======================================================== + +.. currentmodule:: sup3r.preprocessing.utilities + +.. autofunction:: get_input_handler_class \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.utilities.get_obj_params.rst b/_sources/_autosummary/sup3r.preprocessing.utilities.get_obj_params.rst new file mode 100644 index 000000000..b2d91b3a0 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.utilities.get_obj_params.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.utilities.get\_obj\_params +============================================== + +.. currentmodule:: sup3r.preprocessing.utilities + +.. autofunction:: get_obj_params \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.utilities.get_source_type.rst b/_sources/_autosummary/sup3r.preprocessing.utilities.get_source_type.rst new file mode 100644 index 000000000..5a824fde1 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.utilities.get_source_type.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.utilities.get\_source\_type +=============================================== + +.. currentmodule:: sup3r.preprocessing.utilities + +.. autofunction:: get_source_type \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.utilities.is_type_of.rst b/_sources/_autosummary/sup3r.preprocessing.utilities.is_type_of.rst new file mode 100644 index 000000000..6d60ed0f7 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.utilities.is_type_of.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.utilities.is\_type\_of +========================================== + +.. currentmodule:: sup3r.preprocessing.utilities + +.. autofunction:: is_type_of \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.utilities.log_args.rst b/_sources/_autosummary/sup3r.preprocessing.utilities.log_args.rst new file mode 100644 index 000000000..1251b7022 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.utilities.log_args.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.utilities.log\_args +======================================= + +.. currentmodule:: sup3r.preprocessing.utilities + +.. autofunction:: log_args \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.utilities.lower_names.rst b/_sources/_autosummary/sup3r.preprocessing.utilities.lower_names.rst new file mode 100644 index 000000000..87cbc9838 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.utilities.lower_names.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.utilities.lower\_names +========================================== + +.. currentmodule:: sup3r.preprocessing.utilities + +.. autofunction:: lower_names \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.utilities.lowered.rst b/_sources/_autosummary/sup3r.preprocessing.utilities.lowered.rst new file mode 100644 index 000000000..cc2c467cf --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.utilities.lowered.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.utilities.lowered +===================================== + +.. currentmodule:: sup3r.preprocessing.utilities + +.. autofunction:: lowered \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.utilities.make_time_index_from_kws.rst b/_sources/_autosummary/sup3r.preprocessing.utilities.make_time_index_from_kws.rst new file mode 100644 index 000000000..69b0e08ce --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.utilities.make_time_index_from_kws.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.utilities.make\_time\_index\_from\_kws +========================================================== + +.. currentmodule:: sup3r.preprocessing.utilities + +.. autofunction:: make_time_index_from_kws \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.utilities.numpy_if_tensor.rst b/_sources/_autosummary/sup3r.preprocessing.utilities.numpy_if_tensor.rst new file mode 100644 index 000000000..f4597f4ef --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.utilities.numpy_if_tensor.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.utilities.numpy\_if\_tensor +=============================================== + +.. currentmodule:: sup3r.preprocessing.utilities + +.. autofunction:: numpy_if_tensor \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.utilities.ordered_array.rst b/_sources/_autosummary/sup3r.preprocessing.utilities.ordered_array.rst new file mode 100644 index 000000000..1ea113438 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.utilities.ordered_array.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.utilities.ordered\_array +============================================ + +.. currentmodule:: sup3r.preprocessing.utilities + +.. autofunction:: ordered_array \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.utilities.ordered_dims.rst b/_sources/_autosummary/sup3r.preprocessing.utilities.ordered_dims.rst new file mode 100644 index 000000000..11cb30044 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.utilities.ordered_dims.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.utilities.ordered\_dims +=========================================== + +.. currentmodule:: sup3r.preprocessing.utilities + +.. autofunction:: ordered_dims \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.utilities.parse_ellipsis.rst b/_sources/_autosummary/sup3r.preprocessing.utilities.parse_ellipsis.rst new file mode 100644 index 000000000..f63c12ac0 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.utilities.parse_ellipsis.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.utilities.parse\_ellipsis +============================================= + +.. currentmodule:: sup3r.preprocessing.utilities + +.. autofunction:: parse_ellipsis \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.utilities.parse_features.rst b/_sources/_autosummary/sup3r.preprocessing.utilities.parse_features.rst new file mode 100644 index 000000000..9545699ab --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.utilities.parse_features.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.utilities.parse\_features +============================================= + +.. currentmodule:: sup3r.preprocessing.utilities + +.. autofunction:: parse_features \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.utilities.parse_keys.rst b/_sources/_autosummary/sup3r.preprocessing.utilities.parse_keys.rst new file mode 100644 index 000000000..54eb566c3 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.utilities.parse_keys.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.utilities.parse\_keys +========================================= + +.. currentmodule:: sup3r.preprocessing.utilities + +.. autofunction:: parse_keys \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.utilities.parse_to_list.rst b/_sources/_autosummary/sup3r.preprocessing.utilities.parse_to_list.rst new file mode 100644 index 000000000..209eff085 --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.utilities.parse_to_list.rst @@ -0,0 +1,6 @@ +sup3r.preprocessing.utilities.parse\_to\_list +============================================= + +.. currentmodule:: sup3r.preprocessing.utilities + +.. autofunction:: parse_to_list \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.preprocessing.utilities.rst b/_sources/_autosummary/sup3r.preprocessing.utilities.rst new file mode 100644 index 000000000..0e47f0fcb --- /dev/null +++ b/_sources/_autosummary/sup3r.preprocessing.utilities.rst @@ -0,0 +1,53 @@ +sup3r.preprocessing.utilities +============================= + +.. automodule:: sup3r.preprocessing.utilities + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + + check_signatures + composite_info + composite_sig + compute_if_dask + contains_ellipsis + dims_array_tuple + expand_paths + get_class_kwargs + get_date_range_kwargs + get_input_handler_class + get_obj_params + get_source_type + is_type_of + log_args + lower_names + lowered + make_time_index_from_kws + numpy_if_tensor + ordered_array + ordered_dims + parse_ellipsis + parse_features + parse_keys + parse_to_list + + + + + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.qa.qa.Sup3rQa.rst b/_sources/_autosummary/sup3r.qa.qa.Sup3rQa.rst new file mode 100644 index 000000000..776b36c5e --- /dev/null +++ b/_sources/_autosummary/sup3r.qa.qa.Sup3rQa.rst @@ -0,0 +1,38 @@ +sup3r.qa.qa.Sup3rQa +=================== + +.. currentmodule:: sup3r.qa.qa + +.. autoclass:: Sup3rQa + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~Sup3rQa.bias_correct_input_handler + ~Sup3rQa.close + ~Sup3rQa.coarsen_data + ~Sup3rQa.export + ~Sup3rQa.get_dset_out + ~Sup3rQa.get_node_cmd + ~Sup3rQa.run + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Sup3rQa.features + ~Sup3rQa.output_names + ~Sup3rQa.output_type + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.qa.qa.rst b/_sources/_autosummary/sup3r.qa.qa.rst new file mode 100644 index 000000000..9e8d522a1 --- /dev/null +++ b/_sources/_autosummary/sup3r.qa.qa.rst @@ -0,0 +1,31 @@ +sup3r.qa.qa +=========== + +.. automodule:: sup3r.qa.qa + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + Sup3rQa + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.qa.qa_cli.rst b/_sources/_autosummary/sup3r.qa.qa_cli.rst new file mode 100644 index 000000000..de6de99bd --- /dev/null +++ b/_sources/_autosummary/sup3r.qa.qa_cli.rst @@ -0,0 +1,23 @@ +sup3r.qa.qa\_cli +================ + +.. automodule:: sup3r.qa.qa_cli + + + + + + + + + + + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.qa.rst b/_sources/_autosummary/sup3r.qa.rst new file mode 100644 index 000000000..173b046e0 --- /dev/null +++ b/_sources/_autosummary/sup3r.qa.rst @@ -0,0 +1,32 @@ +sup3r.qa +======== + +.. automodule:: sup3r.qa + + + + + + + + + + + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + qa + qa_cli + utilities + diff --git a/_sources/_autosummary/sup3r.qa.utilities.continuous_dist.rst b/_sources/_autosummary/sup3r.qa.utilities.continuous_dist.rst new file mode 100644 index 000000000..d71ca9248 --- /dev/null +++ b/_sources/_autosummary/sup3r.qa.utilities.continuous_dist.rst @@ -0,0 +1,6 @@ +sup3r.qa.utilities.continuous\_dist +=================================== + +.. currentmodule:: sup3r.qa.utilities + +.. autofunction:: continuous_dist \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.qa.utilities.direct_dist.rst b/_sources/_autosummary/sup3r.qa.utilities.direct_dist.rst new file mode 100644 index 000000000..f9d98e3c3 --- /dev/null +++ b/_sources/_autosummary/sup3r.qa.utilities.direct_dist.rst @@ -0,0 +1,6 @@ +sup3r.qa.utilities.direct\_dist +=============================== + +.. currentmodule:: sup3r.qa.utilities + +.. autofunction:: direct_dist \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.qa.utilities.frequency_spectrum.rst b/_sources/_autosummary/sup3r.qa.utilities.frequency_spectrum.rst new file mode 100644 index 000000000..e5cb3a7d8 --- /dev/null +++ b/_sources/_autosummary/sup3r.qa.utilities.frequency_spectrum.rst @@ -0,0 +1,6 @@ +sup3r.qa.utilities.frequency\_spectrum +====================================== + +.. currentmodule:: sup3r.qa.utilities + +.. autofunction:: frequency_spectrum \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.qa.utilities.gradient_dist.rst b/_sources/_autosummary/sup3r.qa.utilities.gradient_dist.rst new file mode 100644 index 000000000..e9ba14309 --- /dev/null +++ b/_sources/_autosummary/sup3r.qa.utilities.gradient_dist.rst @@ -0,0 +1,6 @@ +sup3r.qa.utilities.gradient\_dist +================================= + +.. currentmodule:: sup3r.qa.utilities + +.. autofunction:: gradient_dist \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.qa.utilities.rst b/_sources/_autosummary/sup3r.qa.utilities.rst new file mode 100644 index 000000000..b5880173d --- /dev/null +++ b/_sources/_autosummary/sup3r.qa.utilities.rst @@ -0,0 +1,37 @@ +sup3r.qa.utilities +================== + +.. automodule:: sup3r.qa.utilities + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + + continuous_dist + direct_dist + frequency_spectrum + gradient_dist + time_derivative_dist + tke_frequency_spectrum + tke_wavenumber_spectrum + wavenumber_spectrum + + + + + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.qa.utilities.time_derivative_dist.rst b/_sources/_autosummary/sup3r.qa.utilities.time_derivative_dist.rst new file mode 100644 index 000000000..31757692b --- /dev/null +++ b/_sources/_autosummary/sup3r.qa.utilities.time_derivative_dist.rst @@ -0,0 +1,6 @@ +sup3r.qa.utilities.time\_derivative\_dist +========================================= + +.. currentmodule:: sup3r.qa.utilities + +.. autofunction:: time_derivative_dist \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.qa.utilities.tke_frequency_spectrum.rst b/_sources/_autosummary/sup3r.qa.utilities.tke_frequency_spectrum.rst new file mode 100644 index 000000000..2e87b787d --- /dev/null +++ b/_sources/_autosummary/sup3r.qa.utilities.tke_frequency_spectrum.rst @@ -0,0 +1,6 @@ +sup3r.qa.utilities.tke\_frequency\_spectrum +=========================================== + +.. currentmodule:: sup3r.qa.utilities + +.. autofunction:: tke_frequency_spectrum \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.qa.utilities.tke_wavenumber_spectrum.rst b/_sources/_autosummary/sup3r.qa.utilities.tke_wavenumber_spectrum.rst new file mode 100644 index 000000000..a43e3ef4c --- /dev/null +++ b/_sources/_autosummary/sup3r.qa.utilities.tke_wavenumber_spectrum.rst @@ -0,0 +1,6 @@ +sup3r.qa.utilities.tke\_wavenumber\_spectrum +============================================ + +.. currentmodule:: sup3r.qa.utilities + +.. autofunction:: tke_wavenumber_spectrum \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.qa.utilities.wavenumber_spectrum.rst b/_sources/_autosummary/sup3r.qa.utilities.wavenumber_spectrum.rst new file mode 100644 index 000000000..ea2edd996 --- /dev/null +++ b/_sources/_autosummary/sup3r.qa.utilities.wavenumber_spectrum.rst @@ -0,0 +1,6 @@ +sup3r.qa.utilities.wavenumber\_spectrum +======================================= + +.. currentmodule:: sup3r.qa.utilities + +.. autofunction:: wavenumber_spectrum \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.rst b/_sources/_autosummary/sup3r.rst new file mode 100644 index 000000000..3fc9fb2ad --- /dev/null +++ b/_sources/_autosummary/sup3r.rst @@ -0,0 +1,39 @@ +sup3r +===== + +.. automodule:: sup3r + + + + + + + + + + + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + batch + bias + cli + models + pipeline + postprocessing + preprocessing + qa + solar + utilities + diff --git a/_sources/_autosummary/sup3r.solar.rst b/_sources/_autosummary/sup3r.solar.rst new file mode 100644 index 000000000..2b6374965 --- /dev/null +++ b/_sources/_autosummary/sup3r.solar.rst @@ -0,0 +1,31 @@ +sup3r.solar +=========== + +.. automodule:: sup3r.solar + + + + + + + + + + + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + solar + solar_cli + diff --git a/_sources/_autosummary/sup3r.solar.solar.Solar.rst b/_sources/_autosummary/sup3r.solar.solar.Solar.rst new file mode 100644 index 000000000..5fc9f30bb --- /dev/null +++ b/_sources/_autosummary/sup3r.solar.solar.Solar.rst @@ -0,0 +1,46 @@ +sup3r.solar.solar.Solar +======================= + +.. currentmodule:: sup3r.solar.solar + +.. autoclass:: Solar + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~Solar.close + ~Solar.get_node_cmd + ~Solar.get_nsrdb_data + ~Solar.get_sup3r_fps + ~Solar.preflight + ~Solar.run_temporal_chunks + ~Solar.write + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Solar.clearsky_ratio + ~Solar.cloud_mask + ~Solar.dhi + ~Solar.dist + ~Solar.dni + ~Solar.ghi + ~Solar.idnn + ~Solar.nsrdb_tslice + ~Solar.out_of_bounds + ~Solar.solar_zenith_angle + ~Solar.time_index + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.solar.solar.rst b/_sources/_autosummary/sup3r.solar.solar.rst new file mode 100644 index 000000000..582072ed8 --- /dev/null +++ b/_sources/_autosummary/sup3r.solar.solar.rst @@ -0,0 +1,31 @@ +sup3r.solar.solar +================= + +.. automodule:: sup3r.solar.solar + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + Solar + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.solar.solar_cli.kickoff_local_job.rst b/_sources/_autosummary/sup3r.solar.solar_cli.kickoff_local_job.rst new file mode 100644 index 000000000..d476e6727 --- /dev/null +++ b/_sources/_autosummary/sup3r.solar.solar_cli.kickoff_local_job.rst @@ -0,0 +1,6 @@ +sup3r.solar.solar\_cli.kickoff\_local\_job +========================================== + +.. currentmodule:: sup3r.solar.solar_cli + +.. autofunction:: kickoff_local_job \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.solar.solar_cli.kickoff_slurm_job.rst b/_sources/_autosummary/sup3r.solar.solar_cli.kickoff_slurm_job.rst new file mode 100644 index 000000000..e10b3a24e --- /dev/null +++ b/_sources/_autosummary/sup3r.solar.solar_cli.kickoff_slurm_job.rst @@ -0,0 +1,6 @@ +sup3r.solar.solar\_cli.kickoff\_slurm\_job +========================================== + +.. currentmodule:: sup3r.solar.solar_cli + +.. autofunction:: kickoff_slurm_job \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.solar.solar_cli.rst b/_sources/_autosummary/sup3r.solar.solar_cli.rst new file mode 100644 index 000000000..a776886bd --- /dev/null +++ b/_sources/_autosummary/sup3r.solar.solar_cli.rst @@ -0,0 +1,31 @@ +sup3r.solar.solar\_cli +====================== + +.. automodule:: sup3r.solar.solar_cli + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + + kickoff_local_job + kickoff_slurm_job + + + + + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.utilities.ModuleName.rst b/_sources/_autosummary/sup3r.utilities.ModuleName.rst new file mode 100644 index 000000000..d15dfdf98 --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.ModuleName.rst @@ -0,0 +1,85 @@ +sup3r.utilities.ModuleName +========================== + +.. currentmodule:: sup3r.utilities + +.. autoclass:: ModuleName + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~ModuleName.encode + ~ModuleName.replace + ~ModuleName.split + ~ModuleName.rsplit + ~ModuleName.join + ~ModuleName.capitalize + ~ModuleName.casefold + ~ModuleName.title + ~ModuleName.center + ~ModuleName.count + ~ModuleName.expandtabs + ~ModuleName.find + ~ModuleName.partition + ~ModuleName.index + ~ModuleName.ljust + ~ModuleName.lower + ~ModuleName.lstrip + ~ModuleName.rfind + ~ModuleName.rindex + ~ModuleName.rjust + ~ModuleName.rstrip + ~ModuleName.rpartition + ~ModuleName.splitlines + ~ModuleName.strip + ~ModuleName.swapcase + ~ModuleName.translate + ~ModuleName.upper + ~ModuleName.startswith + ~ModuleName.endswith + ~ModuleName.removeprefix + ~ModuleName.removesuffix + ~ModuleName.isascii + ~ModuleName.islower + ~ModuleName.isupper + ~ModuleName.istitle + ~ModuleName.isspace + ~ModuleName.isdecimal + ~ModuleName.isdigit + ~ModuleName.isnumeric + ~ModuleName.isalpha + ~ModuleName.isalnum + ~ModuleName.isidentifier + ~ModuleName.isprintable + ~ModuleName.zfill + ~ModuleName.format + ~ModuleName.format_map + ~ModuleName.maketrans + ~ModuleName.all_names + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~ModuleName.FORWARD_PASS + ~ModuleName.DATA_EXTRACT + ~ModuleName.DATA_COLLECT + ~ModuleName.QA + ~ModuleName.SOLAR + ~ModuleName.STATS + ~ModuleName.BIAS_CALC + ~ModuleName.VISUAL_QA + ~ModuleName.REGRID + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.cli.BaseCLI.rst b/_sources/_autosummary/sup3r.utilities.cli.BaseCLI.rst new file mode 100644 index 000000000..d812484af --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.cli.BaseCLI.rst @@ -0,0 +1,29 @@ +sup3r.utilities.cli.BaseCLI +=========================== + +.. currentmodule:: sup3r.utilities.cli + +.. autoclass:: BaseCLI + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~BaseCLI.add_status_cmd + ~BaseCLI.check_module_name + ~BaseCLI.from_config + ~BaseCLI.from_config_preflight + ~BaseCLI.kickoff_local_job + ~BaseCLI.kickoff_slurm_job + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.cli.SlurmManager.rst b/_sources/_autosummary/sup3r.utilities.cli.SlurmManager.rst new file mode 100644 index 000000000..e7f2f94a5 --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.cli.SlurmManager.rst @@ -0,0 +1,54 @@ +sup3r.utilities.cli.SlurmManager +================================ + +.. currentmodule:: sup3r.utilities.cli + +.. autoclass:: SlurmManager + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SlurmManager.change_qos + ~SlurmManager.check_status + ~SlurmManager.check_status_using_job_id + ~SlurmManager.format_walltime + ~SlurmManager.hold + ~SlurmManager.make_path + ~SlurmManager.make_sh + ~SlurmManager.parse_queue_str + ~SlurmManager.query_queue + ~SlurmManager.release + ~SlurmManager.rm + ~SlurmManager.s + ~SlurmManager.sbatch + ~SlurmManager.scancel + ~SlurmManager.scontrol + ~SlurmManager.submit + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SlurmManager.MAX_NAME_LEN + ~SlurmManager.QCOL_ID + ~SlurmManager.QCOL_NAME + ~SlurmManager.QCOL_STATUS + ~SlurmManager.QSKIP + ~SlurmManager.SQ_FORMAT + ~SlurmManager.USER + ~SlurmManager.queue + ~SlurmManager.queue_job_ids + ~SlurmManager.queue_job_names + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.cli.rst b/_sources/_autosummary/sup3r.utilities.cli.rst new file mode 100644 index 000000000..a6f99e932 --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.cli.rst @@ -0,0 +1,32 @@ +sup3r.utilities.cli +=================== + +.. automodule:: sup3r.utilities.cli + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + BaseCLI + SlurmManager + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.utilities.era_downloader.EraDownloader.rst b/_sources/_autosummary/sup3r.utilities.era_downloader.EraDownloader.rst new file mode 100644 index 000000000..495c31c22 --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.era_downloader.EraDownloader.rst @@ -0,0 +1,52 @@ +sup3r.utilities.era\_downloader.EraDownloader +============================================= + +.. currentmodule:: sup3r.utilities.era_downloader + +.. autoclass:: EraDownloader + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~EraDownloader.add_pressure + ~EraDownloader.all_vars_exist + ~EraDownloader.convert_z + ~EraDownloader.download_file + ~EraDownloader.download_process_combine + ~EraDownloader.get_cds_client + ~EraDownloader.get_hours + ~EraDownloader.get_monthly_file + ~EraDownloader.get_tmp_file + ~EraDownloader.make_yearly_file + ~EraDownloader.make_yearly_var_file + ~EraDownloader.prep_var_lists + ~EraDownloader.process_and_combine + ~EraDownloader.process_level_file + ~EraDownloader.process_surface_file + ~EraDownloader.run + ~EraDownloader.run_for_var + ~EraDownloader.run_month + ~EraDownloader.run_qa + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~EraDownloader.days + ~EraDownloader.level_file + ~EraDownloader.monthly_file + ~EraDownloader.surface_file + ~EraDownloader.variables + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.era_downloader.rst b/_sources/_autosummary/sup3r.utilities.era_downloader.rst new file mode 100644 index 000000000..69cbf2209 --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.era_downloader.rst @@ -0,0 +1,31 @@ +sup3r.utilities.era\_downloader +=============================== + +.. automodule:: sup3r.utilities.era_downloader + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + EraDownloader + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.utilities.interpolation.Interpolator.rst b/_sources/_autosummary/sup3r.utilities.interpolation.Interpolator.rst new file mode 100644 index 000000000..7f06effde --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.interpolation.Interpolator.rst @@ -0,0 +1,25 @@ +sup3r.utilities.interpolation.Interpolator +========================================== + +.. currentmodule:: sup3r.utilities.interpolation + +.. autoclass:: Interpolator + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~Interpolator.get_level_masks + ~Interpolator.interp_to_level + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.interpolation.rst b/_sources/_autosummary/sup3r.utilities.interpolation.rst new file mode 100644 index 000000000..6f42e068f --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.interpolation.rst @@ -0,0 +1,31 @@ +sup3r.utilities.interpolation +============================= + +.. automodule:: sup3r.utilities.interpolation + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + Interpolator + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.utilities.loss_metrics.CoarseMseLoss.rst b/_sources/_autosummary/sup3r.utilities.loss_metrics.CoarseMseLoss.rst new file mode 100644 index 000000000..f912622de --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.loss_metrics.CoarseMseLoss.rst @@ -0,0 +1,32 @@ +sup3r.utilities.loss\_metrics.CoarseMseLoss +=========================================== + +.. currentmodule:: sup3r.utilities.loss_metrics + +.. autoclass:: CoarseMseLoss + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~CoarseMseLoss.call + ~CoarseMseLoss.from_config + ~CoarseMseLoss.get_config + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~CoarseMseLoss.MSE_LOSS + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.loss_metrics.ExpLoss.rst b/_sources/_autosummary/sup3r.utilities.loss_metrics.ExpLoss.rst new file mode 100644 index 000000000..0ec34e636 --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.loss_metrics.ExpLoss.rst @@ -0,0 +1,26 @@ +sup3r.utilities.loss\_metrics.ExpLoss +===================================== + +.. currentmodule:: sup3r.utilities.loss_metrics + +.. autoclass:: ExpLoss + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~ExpLoss.call + ~ExpLoss.from_config + ~ExpLoss.get_config + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.loss_metrics.LowResLoss.rst b/_sources/_autosummary/sup3r.utilities.loss_metrics.LowResLoss.rst new file mode 100644 index 000000000..6c925ff85 --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.loss_metrics.LowResLoss.rst @@ -0,0 +1,32 @@ +sup3r.utilities.loss\_metrics.LowResLoss +======================================== + +.. currentmodule:: sup3r.utilities.loss_metrics + +.. autoclass:: LowResLoss + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~LowResLoss.call + ~LowResLoss.from_config + ~LowResLoss.get_config + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~LowResLoss.EX_LOSS_METRICS + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.loss_metrics.MaterialDerivativeLoss.rst b/_sources/_autosummary/sup3r.utilities.loss_metrics.MaterialDerivativeLoss.rst new file mode 100644 index 000000000..0c0b98313 --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.loss_metrics.MaterialDerivativeLoss.rst @@ -0,0 +1,32 @@ +sup3r.utilities.loss\_metrics.MaterialDerivativeLoss +==================================================== + +.. currentmodule:: sup3r.utilities.loss_metrics + +.. autoclass:: MaterialDerivativeLoss + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~MaterialDerivativeLoss.call + ~MaterialDerivativeLoss.from_config + ~MaterialDerivativeLoss.get_config + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~MaterialDerivativeLoss.LOSS_METRIC + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.loss_metrics.MmdLoss.rst b/_sources/_autosummary/sup3r.utilities.loss_metrics.MmdLoss.rst new file mode 100644 index 000000000..9c4522b2c --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.loss_metrics.MmdLoss.rst @@ -0,0 +1,26 @@ +sup3r.utilities.loss\_metrics.MmdLoss +===================================== + +.. currentmodule:: sup3r.utilities.loss_metrics + +.. autoclass:: MmdLoss + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~MmdLoss.call + ~MmdLoss.from_config + ~MmdLoss.get_config + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.loss_metrics.MmdMseLoss.rst b/_sources/_autosummary/sup3r.utilities.loss_metrics.MmdMseLoss.rst new file mode 100644 index 000000000..4e34eb5bf --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.loss_metrics.MmdMseLoss.rst @@ -0,0 +1,33 @@ +sup3r.utilities.loss\_metrics.MmdMseLoss +======================================== + +.. currentmodule:: sup3r.utilities.loss_metrics + +.. autoclass:: MmdMseLoss + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~MmdMseLoss.call + ~MmdMseLoss.from_config + ~MmdMseLoss.get_config + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~MmdMseLoss.MMD_LOSS + ~MmdMseLoss.MSE_LOSS + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.loss_metrics.MseExpLoss.rst b/_sources/_autosummary/sup3r.utilities.loss_metrics.MseExpLoss.rst new file mode 100644 index 000000000..3e08795e3 --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.loss_metrics.MseExpLoss.rst @@ -0,0 +1,32 @@ +sup3r.utilities.loss\_metrics.MseExpLoss +======================================== + +.. currentmodule:: sup3r.utilities.loss_metrics + +.. autoclass:: MseExpLoss + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~MseExpLoss.call + ~MseExpLoss.from_config + ~MseExpLoss.get_config + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~MseExpLoss.MSE_LOSS + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.loss_metrics.SpatialExtremesLoss.rst b/_sources/_autosummary/sup3r.utilities.loss_metrics.SpatialExtremesLoss.rst new file mode 100644 index 000000000..68fb866f0 --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.loss_metrics.SpatialExtremesLoss.rst @@ -0,0 +1,33 @@ +sup3r.utilities.loss\_metrics.SpatialExtremesLoss +================================================= + +.. currentmodule:: sup3r.utilities.loss_metrics + +.. autoclass:: SpatialExtremesLoss + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SpatialExtremesLoss.call + ~SpatialExtremesLoss.from_config + ~SpatialExtremesLoss.get_config + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SpatialExtremesLoss.EX_LOSS + ~SpatialExtremesLoss.MAE_LOSS + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.loss_metrics.SpatialExtremesOnlyLoss.rst b/_sources/_autosummary/sup3r.utilities.loss_metrics.SpatialExtremesOnlyLoss.rst new file mode 100644 index 000000000..b09b8e4c0 --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.loss_metrics.SpatialExtremesOnlyLoss.rst @@ -0,0 +1,32 @@ +sup3r.utilities.loss\_metrics.SpatialExtremesOnlyLoss +===================================================== + +.. currentmodule:: sup3r.utilities.loss_metrics + +.. autoclass:: SpatialExtremesOnlyLoss + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SpatialExtremesOnlyLoss.call + ~SpatialExtremesOnlyLoss.from_config + ~SpatialExtremesOnlyLoss.get_config + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SpatialExtremesOnlyLoss.MAE_LOSS + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.loss_metrics.SpatialFftOnlyLoss.rst b/_sources/_autosummary/sup3r.utilities.loss_metrics.SpatialFftOnlyLoss.rst new file mode 100644 index 000000000..991819ea4 --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.loss_metrics.SpatialFftOnlyLoss.rst @@ -0,0 +1,32 @@ +sup3r.utilities.loss\_metrics.SpatialFftOnlyLoss +================================================ + +.. currentmodule:: sup3r.utilities.loss_metrics + +.. autoclass:: SpatialFftOnlyLoss + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SpatialFftOnlyLoss.call + ~SpatialFftOnlyLoss.from_config + ~SpatialFftOnlyLoss.get_config + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SpatialFftOnlyLoss.MAE_LOSS + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.loss_metrics.SpatiotemporalExtremesLoss.rst b/_sources/_autosummary/sup3r.utilities.loss_metrics.SpatiotemporalExtremesLoss.rst new file mode 100644 index 000000000..31b566d10 --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.loss_metrics.SpatiotemporalExtremesLoss.rst @@ -0,0 +1,34 @@ +sup3r.utilities.loss\_metrics.SpatiotemporalExtremesLoss +======================================================== + +.. currentmodule:: sup3r.utilities.loss_metrics + +.. autoclass:: SpatiotemporalExtremesLoss + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SpatiotemporalExtremesLoss.call + ~SpatiotemporalExtremesLoss.from_config + ~SpatiotemporalExtremesLoss.get_config + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SpatiotemporalExtremesLoss.MAE_LOSS + ~SpatiotemporalExtremesLoss.S_EX_LOSS + ~SpatiotemporalExtremesLoss.T_EX_LOSS + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.loss_metrics.SpatiotemporalFftOnlyLoss.rst b/_sources/_autosummary/sup3r.utilities.loss_metrics.SpatiotemporalFftOnlyLoss.rst new file mode 100644 index 000000000..1dee4b111 --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.loss_metrics.SpatiotemporalFftOnlyLoss.rst @@ -0,0 +1,32 @@ +sup3r.utilities.loss\_metrics.SpatiotemporalFftOnlyLoss +======================================================= + +.. currentmodule:: sup3r.utilities.loss_metrics + +.. autoclass:: SpatiotemporalFftOnlyLoss + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SpatiotemporalFftOnlyLoss.call + ~SpatiotemporalFftOnlyLoss.from_config + ~SpatiotemporalFftOnlyLoss.get_config + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SpatiotemporalFftOnlyLoss.MAE_LOSS + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.loss_metrics.StExtremesFftLoss.rst b/_sources/_autosummary/sup3r.utilities.loss_metrics.StExtremesFftLoss.rst new file mode 100644 index 000000000..334536fd3 --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.loss_metrics.StExtremesFftLoss.rst @@ -0,0 +1,26 @@ +sup3r.utilities.loss\_metrics.StExtremesFftLoss +=============================================== + +.. currentmodule:: sup3r.utilities.loss_metrics + +.. autoclass:: StExtremesFftLoss + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~StExtremesFftLoss.call + ~StExtremesFftLoss.from_config + ~StExtremesFftLoss.get_config + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.loss_metrics.TemporalExtremesLoss.rst b/_sources/_autosummary/sup3r.utilities.loss_metrics.TemporalExtremesLoss.rst new file mode 100644 index 000000000..65a61f8db --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.loss_metrics.TemporalExtremesLoss.rst @@ -0,0 +1,33 @@ +sup3r.utilities.loss\_metrics.TemporalExtremesLoss +================================================== + +.. currentmodule:: sup3r.utilities.loss_metrics + +.. autoclass:: TemporalExtremesLoss + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~TemporalExtremesLoss.call + ~TemporalExtremesLoss.from_config + ~TemporalExtremesLoss.get_config + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~TemporalExtremesLoss.EX_LOSS + ~TemporalExtremesLoss.MAE_LOSS + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.loss_metrics.TemporalExtremesOnlyLoss.rst b/_sources/_autosummary/sup3r.utilities.loss_metrics.TemporalExtremesOnlyLoss.rst new file mode 100644 index 000000000..6368c55a3 --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.loss_metrics.TemporalExtremesOnlyLoss.rst @@ -0,0 +1,32 @@ +sup3r.utilities.loss\_metrics.TemporalExtremesOnlyLoss +====================================================== + +.. currentmodule:: sup3r.utilities.loss_metrics + +.. autoclass:: TemporalExtremesOnlyLoss + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~TemporalExtremesOnlyLoss.call + ~TemporalExtremesOnlyLoss.from_config + ~TemporalExtremesOnlyLoss.get_config + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~TemporalExtremesOnlyLoss.MAE_LOSS + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.loss_metrics.gaussian_kernel.rst b/_sources/_autosummary/sup3r.utilities.loss_metrics.gaussian_kernel.rst new file mode 100644 index 000000000..1bf0276af --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.loss_metrics.gaussian_kernel.rst @@ -0,0 +1,6 @@ +sup3r.utilities.loss\_metrics.gaussian\_kernel +============================================== + +.. currentmodule:: sup3r.utilities.loss_metrics + +.. autofunction:: gaussian_kernel \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.loss_metrics.rst b/_sources/_autosummary/sup3r.utilities.loss_metrics.rst new file mode 100644 index 000000000..11a13ab73 --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.loss_metrics.rst @@ -0,0 +1,52 @@ +sup3r.utilities.loss\_metrics +============================= + +.. automodule:: sup3r.utilities.loss_metrics + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + + gaussian_kernel + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + CoarseMseLoss + ExpLoss + LowResLoss + MaterialDerivativeLoss + MmdLoss + MmdMseLoss + MseExpLoss + SpatialExtremesLoss + SpatialExtremesOnlyLoss + SpatialFftOnlyLoss + SpatiotemporalExtremesLoss + SpatiotemporalFftOnlyLoss + StExtremesFftLoss + TemporalExtremesLoss + TemporalExtremesOnlyLoss + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.utilities.pytest.helpers.BatchHandlerTesterCC.rst b/_sources/_autosummary/sup3r.utilities.pytest.helpers.BatchHandlerTesterCC.rst new file mode 100644 index 000000000..fdf2f4d01 --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.pytest.helpers.BatchHandlerTesterCC.rst @@ -0,0 +1,56 @@ +sup3r.utilities.pytest.helpers.BatchHandlerTesterCC +=================================================== + +.. currentmodule:: sup3r.utilities.pytest.helpers + +.. autoclass:: BatchHandlerTesterCC + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~BatchHandlerTesterCC.check_enhancement_factors + ~BatchHandlerTesterCC.check_features + ~BatchHandlerTesterCC.check_shared_attr + ~BatchHandlerTesterCC.enqueue_batch + ~BatchHandlerTesterCC.enqueue_batches + ~BatchHandlerTesterCC.get_batch + ~BatchHandlerTesterCC.get_container_index + ~BatchHandlerTesterCC.get_queue + ~BatchHandlerTesterCC.get_random_container + ~BatchHandlerTesterCC.init_samplers + ~BatchHandlerTesterCC.log_queue_info + ~BatchHandlerTesterCC.post_init_log + ~BatchHandlerTesterCC.post_proc + ~BatchHandlerTesterCC.preflight + ~BatchHandlerTesterCC.sample_batch + ~BatchHandlerTesterCC.start + ~BatchHandlerTesterCC.stop + ~BatchHandlerTesterCC.transform + ~BatchHandlerTesterCC.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~BatchHandlerTesterCC.container_weights + ~BatchHandlerTesterCC.data + ~BatchHandlerTesterCC.features + ~BatchHandlerTesterCC.hr_shape + ~BatchHandlerTesterCC.lr_shape + ~BatchHandlerTesterCC.queue_shape + ~BatchHandlerTesterCC.queue_thread + ~BatchHandlerTesterCC.running + ~BatchHandlerTesterCC.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.pytest.helpers.BatchHandlerTesterDC.rst b/_sources/_autosummary/sup3r.utilities.pytest.helpers.BatchHandlerTesterDC.rst new file mode 100644 index 000000000..882578656 --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.pytest.helpers.BatchHandlerTesterDC.rst @@ -0,0 +1,60 @@ +sup3r.utilities.pytest.helpers.BatchHandlerTesterDC +=================================================== + +.. currentmodule:: sup3r.utilities.pytest.helpers + +.. autoclass:: BatchHandlerTesterDC + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~BatchHandlerTesterDC.check_enhancement_factors + ~BatchHandlerTesterDC.check_features + ~BatchHandlerTesterDC.check_shared_attr + ~BatchHandlerTesterDC.enqueue_batch + ~BatchHandlerTesterDC.enqueue_batches + ~BatchHandlerTesterDC.get_batch + ~BatchHandlerTesterDC.get_container_index + ~BatchHandlerTesterDC.get_queue + ~BatchHandlerTesterDC.get_random_container + ~BatchHandlerTesterDC.init_samplers + ~BatchHandlerTesterDC.log_queue_info + ~BatchHandlerTesterDC.post_init_log + ~BatchHandlerTesterDC.post_proc + ~BatchHandlerTesterDC.preflight + ~BatchHandlerTesterDC.sample_batch + ~BatchHandlerTesterDC.start + ~BatchHandlerTesterDC.stop + ~BatchHandlerTesterDC.transform + ~BatchHandlerTesterDC.update_record + ~BatchHandlerTesterDC.update_weights + ~BatchHandlerTesterDC.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~BatchHandlerTesterDC.container_weights + ~BatchHandlerTesterDC.data + ~BatchHandlerTesterDC.features + ~BatchHandlerTesterDC.hr_shape + ~BatchHandlerTesterDC.lr_shape + ~BatchHandlerTesterDC.queue_shape + ~BatchHandlerTesterDC.queue_thread + ~BatchHandlerTesterDC.running + ~BatchHandlerTesterDC.shape + ~BatchHandlerTesterDC.spatial_weights + ~BatchHandlerTesterDC.temporal_weights + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.pytest.helpers.BatchHandlerTesterFactory.rst b/_sources/_autosummary/sup3r.utilities.pytest.helpers.BatchHandlerTesterFactory.rst new file mode 100644 index 000000000..8706c1eae --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.pytest.helpers.BatchHandlerTesterFactory.rst @@ -0,0 +1,6 @@ +sup3r.utilities.pytest.helpers.BatchHandlerTesterFactory +======================================================== + +.. currentmodule:: sup3r.utilities.pytest.helpers + +.. autofunction:: BatchHandlerTesterFactory \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.pytest.helpers.DualSamplerTesterCC.rst b/_sources/_autosummary/sup3r.utilities.pytest.helpers.DualSamplerTesterCC.rst new file mode 100644 index 000000000..466921c03 --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.pytest.helpers.DualSamplerTesterCC.rst @@ -0,0 +1,45 @@ +sup3r.utilities.pytest.helpers.DualSamplerTesterCC +================================================== + +.. currentmodule:: sup3r.utilities.pytest.helpers + +.. autoclass:: DualSamplerTesterCC + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~DualSamplerTesterCC.check_for_consistent_shapes + ~DualSamplerTesterCC.get_features + ~DualSamplerTesterCC.get_middle_days + ~DualSamplerTesterCC.get_sample_index + ~DualSamplerTesterCC.post_init_log + ~DualSamplerTesterCC.preflight + ~DualSamplerTesterCC.reduce_high_res_sub_daily + ~DualSamplerTesterCC.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~DualSamplerTesterCC.data + ~DualSamplerTesterCC.hr_exo_features + ~DualSamplerTesterCC.hr_features + ~DualSamplerTesterCC.hr_features_ind + ~DualSamplerTesterCC.hr_out_features + ~DualSamplerTesterCC.hr_sample_shape + ~DualSamplerTesterCC.lr_only_features + ~DualSamplerTesterCC.sample_shape + ~DualSamplerTesterCC.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.pytest.helpers.DummyData.rst b/_sources/_autosummary/sup3r.utilities.pytest.helpers.DummyData.rst new file mode 100644 index 000000000..e5cbc7a5f --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.pytest.helpers.DummyData.rst @@ -0,0 +1,32 @@ +sup3r.utilities.pytest.helpers.DummyData +======================================== + +.. currentmodule:: sup3r.utilities.pytest.helpers + +.. autoclass:: DummyData + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~DummyData.post_init_log + ~DummyData.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~DummyData.data + ~DummyData.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.pytest.helpers.DummySampler.rst b/_sources/_autosummary/sup3r.utilities.pytest.helpers.DummySampler.rst new file mode 100644 index 000000000..74b190d81 --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.pytest.helpers.DummySampler.rst @@ -0,0 +1,41 @@ +sup3r.utilities.pytest.helpers.DummySampler +=========================================== + +.. currentmodule:: sup3r.utilities.pytest.helpers + +.. autoclass:: DummySampler + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~DummySampler.get_sample_index + ~DummySampler.post_init_log + ~DummySampler.preflight + ~DummySampler.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~DummySampler.data + ~DummySampler.hr_exo_features + ~DummySampler.hr_features + ~DummySampler.hr_features_ind + ~DummySampler.hr_out_features + ~DummySampler.hr_sample_shape + ~DummySampler.lr_only_features + ~DummySampler.sample_shape + ~DummySampler.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.pytest.helpers.SamplerTester.rst b/_sources/_autosummary/sup3r.utilities.pytest.helpers.SamplerTester.rst new file mode 100644 index 000000000..6ffda8476 --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.pytest.helpers.SamplerTester.rst @@ -0,0 +1,41 @@ +sup3r.utilities.pytest.helpers.SamplerTester +============================================ + +.. currentmodule:: sup3r.utilities.pytest.helpers + +.. autoclass:: SamplerTester + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SamplerTester.get_sample_index + ~SamplerTester.post_init_log + ~SamplerTester.preflight + ~SamplerTester.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SamplerTester.data + ~SamplerTester.hr_exo_features + ~SamplerTester.hr_features + ~SamplerTester.hr_features_ind + ~SamplerTester.hr_out_features + ~SamplerTester.hr_sample_shape + ~SamplerTester.lr_only_features + ~SamplerTester.sample_shape + ~SamplerTester.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.pytest.helpers.SamplerTesterDC.rst b/_sources/_autosummary/sup3r.utilities.pytest.helpers.SamplerTesterDC.rst new file mode 100644 index 000000000..fdf19392e --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.pytest.helpers.SamplerTesterDC.rst @@ -0,0 +1,42 @@ +sup3r.utilities.pytest.helpers.SamplerTesterDC +============================================== + +.. currentmodule:: sup3r.utilities.pytest.helpers + +.. autoclass:: SamplerTesterDC + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SamplerTesterDC.get_sample_index + ~SamplerTesterDC.post_init_log + ~SamplerTesterDC.preflight + ~SamplerTesterDC.update_weights + ~SamplerTesterDC.wrap + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SamplerTesterDC.data + ~SamplerTesterDC.hr_exo_features + ~SamplerTesterDC.hr_features + ~SamplerTesterDC.hr_features_ind + ~SamplerTesterDC.hr_out_features + ~SamplerTesterDC.hr_sample_shape + ~SamplerTesterDC.lr_only_features + ~SamplerTesterDC.sample_shape + ~SamplerTesterDC.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.pytest.helpers.make_collect_chunks.rst b/_sources/_autosummary/sup3r.utilities.pytest.helpers.make_collect_chunks.rst new file mode 100644 index 000000000..9dda22e0c --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.pytest.helpers.make_collect_chunks.rst @@ -0,0 +1,6 @@ +sup3r.utilities.pytest.helpers.make\_collect\_chunks +==================================================== + +.. currentmodule:: sup3r.utilities.pytest.helpers + +.. autofunction:: make_collect_chunks \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.pytest.helpers.make_fake_cs_ratio_files.rst b/_sources/_autosummary/sup3r.utilities.pytest.helpers.make_fake_cs_ratio_files.rst new file mode 100644 index 000000000..91e69890e --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.pytest.helpers.make_fake_cs_ratio_files.rst @@ -0,0 +1,6 @@ +sup3r.utilities.pytest.helpers.make\_fake\_cs\_ratio\_files +=========================================================== + +.. currentmodule:: sup3r.utilities.pytest.helpers + +.. autofunction:: make_fake_cs_ratio_files \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.pytest.helpers.make_fake_dset.rst b/_sources/_autosummary/sup3r.utilities.pytest.helpers.make_fake_dset.rst new file mode 100644 index 000000000..ff981c4ac --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.pytest.helpers.make_fake_dset.rst @@ -0,0 +1,6 @@ +sup3r.utilities.pytest.helpers.make\_fake\_dset +=============================================== + +.. currentmodule:: sup3r.utilities.pytest.helpers + +.. autofunction:: make_fake_dset \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.pytest.helpers.make_fake_h5_chunks.rst b/_sources/_autosummary/sup3r.utilities.pytest.helpers.make_fake_h5_chunks.rst new file mode 100644 index 000000000..1c5953e69 --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.pytest.helpers.make_fake_h5_chunks.rst @@ -0,0 +1,6 @@ +sup3r.utilities.pytest.helpers.make\_fake\_h5\_chunks +===================================================== + +.. currentmodule:: sup3r.utilities.pytest.helpers + +.. autofunction:: make_fake_h5_chunks \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.pytest.helpers.make_fake_nc_file.rst b/_sources/_autosummary/sup3r.utilities.pytest.helpers.make_fake_nc_file.rst new file mode 100644 index 000000000..0c991cfb5 --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.pytest.helpers.make_fake_nc_file.rst @@ -0,0 +1,6 @@ +sup3r.utilities.pytest.helpers.make\_fake\_nc\_file +=================================================== + +.. currentmodule:: sup3r.utilities.pytest.helpers + +.. autofunction:: make_fake_nc_file \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.pytest.helpers.make_fake_tif.rst b/_sources/_autosummary/sup3r.utilities.pytest.helpers.make_fake_tif.rst new file mode 100644 index 000000000..18809e491 --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.pytest.helpers.make_fake_tif.rst @@ -0,0 +1,6 @@ +sup3r.utilities.pytest.helpers.make\_fake\_tif +============================================== + +.. currentmodule:: sup3r.utilities.pytest.helpers + +.. autofunction:: make_fake_tif \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.pytest.helpers.rst b/_sources/_autosummary/sup3r.utilities.pytest.helpers.rst new file mode 100644 index 000000000..bc519d4e1 --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.pytest.helpers.rst @@ -0,0 +1,51 @@ +sup3r.utilities.pytest.helpers +============================== + +.. automodule:: sup3r.utilities.pytest.helpers + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + + BatchHandlerTesterFactory + make_collect_chunks + make_fake_cs_ratio_files + make_fake_dset + make_fake_h5_chunks + make_fake_nc_file + make_fake_tif + test_sampler_factory + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + BatchHandlerTesterCC + BatchHandlerTesterDC + DualSamplerTesterCC + DummyData + DummySampler + SamplerTester + SamplerTesterDC + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.utilities.pytest.helpers.test_sampler_factory.rst b/_sources/_autosummary/sup3r.utilities.pytest.helpers.test_sampler_factory.rst new file mode 100644 index 000000000..0961fd3fc --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.pytest.helpers.test_sampler_factory.rst @@ -0,0 +1,6 @@ +sup3r.utilities.pytest.helpers.test\_sampler\_factory +===================================================== + +.. currentmodule:: sup3r.utilities.pytest.helpers + +.. autofunction:: test_sampler_factory \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.pytest.rst b/_sources/_autosummary/sup3r.utilities.pytest.rst new file mode 100644 index 000000000..85755d8e1 --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.pytest.rst @@ -0,0 +1,30 @@ +sup3r.utilities.pytest +====================== + +.. automodule:: sup3r.utilities.pytest + + + + + + + + + + + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + helpers + diff --git a/_sources/_autosummary/sup3r.utilities.rst b/_sources/_autosummary/sup3r.utilities.rst new file mode 100644 index 000000000..a5464793c --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.rst @@ -0,0 +1,43 @@ +sup3r.utilities +=============== + +.. automodule:: sup3r.utilities + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + ModuleName + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + cli + era_downloader + interpolation + loss_metrics + pytest + utilities + diff --git a/_sources/_autosummary/sup3r.utilities.utilities.Timer.rst b/_sources/_autosummary/sup3r.utilities.utilities.Timer.rst new file mode 100644 index 000000000..fbd3ddf75 --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.utilities.Timer.rst @@ -0,0 +1,32 @@ +sup3r.utilities.utilities.Timer +=============================== + +.. currentmodule:: sup3r.utilities.utilities + +.. autoclass:: Timer + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~Timer.start + ~Timer.stop + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Timer.elapsed + ~Timer.elapsed_str + + \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.utilities.generate_random_string.rst b/_sources/_autosummary/sup3r.utilities.utilities.generate_random_string.rst new file mode 100644 index 000000000..22374e6f9 --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.utilities.generate_random_string.rst @@ -0,0 +1,6 @@ +sup3r.utilities.utilities.generate\_random\_string +================================================== + +.. currentmodule:: sup3r.utilities.utilities + +.. autofunction:: generate_random_string \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.utilities.nn_fill_array.rst b/_sources/_autosummary/sup3r.utilities.utilities.nn_fill_array.rst new file mode 100644 index 000000000..efc8c0c0a --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.utilities.nn_fill_array.rst @@ -0,0 +1,6 @@ +sup3r.utilities.utilities.nn\_fill\_array +========================================= + +.. currentmodule:: sup3r.utilities.utilities + +.. autofunction:: nn_fill_array \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.utilities.pd_date_range.rst b/_sources/_autosummary/sup3r.utilities.utilities.pd_date_range.rst new file mode 100644 index 000000000..a993e982f --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.utilities.pd_date_range.rst @@ -0,0 +1,6 @@ +sup3r.utilities.utilities.pd\_date\_range +========================================= + +.. currentmodule:: sup3r.utilities.utilities + +.. autofunction:: pd_date_range \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.utilities.preprocess_datasets.rst b/_sources/_autosummary/sup3r.utilities.utilities.preprocess_datasets.rst new file mode 100644 index 000000000..26a66a3da --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.utilities.preprocess_datasets.rst @@ -0,0 +1,6 @@ +sup3r.utilities.utilities.preprocess\_datasets +============================================== + +.. currentmodule:: sup3r.utilities.utilities + +.. autofunction:: preprocess_datasets \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.utilities.rst b/_sources/_autosummary/sup3r.utilities.utilities.rst new file mode 100644 index 000000000..ea22352de --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.utilities.rst @@ -0,0 +1,46 @@ +sup3r.utilities.utilities +========================= + +.. automodule:: sup3r.utilities.utilities + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + + generate_random_string + nn_fill_array + pd_date_range + preprocess_datasets + safe_cast + safe_serialize + spatial_coarsening + temporal_coarsening + xr_open_mfdataset + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + Timer + + + + + + + + + diff --git a/_sources/_autosummary/sup3r.utilities.utilities.safe_cast.rst b/_sources/_autosummary/sup3r.utilities.utilities.safe_cast.rst new file mode 100644 index 000000000..41f91429d --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.utilities.safe_cast.rst @@ -0,0 +1,6 @@ +sup3r.utilities.utilities.safe\_cast +==================================== + +.. currentmodule:: sup3r.utilities.utilities + +.. autofunction:: safe_cast \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.utilities.safe_serialize.rst b/_sources/_autosummary/sup3r.utilities.utilities.safe_serialize.rst new file mode 100644 index 000000000..83350c0b2 --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.utilities.safe_serialize.rst @@ -0,0 +1,6 @@ +sup3r.utilities.utilities.safe\_serialize +========================================= + +.. currentmodule:: sup3r.utilities.utilities + +.. autofunction:: safe_serialize \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.utilities.spatial_coarsening.rst b/_sources/_autosummary/sup3r.utilities.utilities.spatial_coarsening.rst new file mode 100644 index 000000000..4d137b75e --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.utilities.spatial_coarsening.rst @@ -0,0 +1,6 @@ +sup3r.utilities.utilities.spatial\_coarsening +============================================= + +.. currentmodule:: sup3r.utilities.utilities + +.. autofunction:: spatial_coarsening \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.utilities.temporal_coarsening.rst b/_sources/_autosummary/sup3r.utilities.utilities.temporal_coarsening.rst new file mode 100644 index 000000000..12068d7ba --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.utilities.temporal_coarsening.rst @@ -0,0 +1,6 @@ +sup3r.utilities.utilities.temporal\_coarsening +============================================== + +.. currentmodule:: sup3r.utilities.utilities + +.. autofunction:: temporal_coarsening \ No newline at end of file diff --git a/_sources/_autosummary/sup3r.utilities.utilities.xr_open_mfdataset.rst b/_sources/_autosummary/sup3r.utilities.utilities.xr_open_mfdataset.rst new file mode 100644 index 000000000..ac2932aa9 --- /dev/null +++ b/_sources/_autosummary/sup3r.utilities.utilities.xr_open_mfdataset.rst @@ -0,0 +1,6 @@ +sup3r.utilities.utilities.xr\_open\_mfdataset +============================================= + +.. currentmodule:: sup3r.utilities.utilities + +.. autofunction:: xr_open_mfdataset \ No newline at end of file diff --git a/_sources/_cli/cli.rst b/_sources/_cli/cli.rst new file mode 100644 index 000000000..b3a40aaa2 --- /dev/null +++ b/_sources/_cli/cli.rst @@ -0,0 +1,6 @@ +Command Line Interfaces (CLIs) +=============================== + +.. toctree:: + + sup3r diff --git a/_sources/_cli/sup3r.rst b/_sources/_cli/sup3r.rst new file mode 100644 index 000000000..b31aee9b2 --- /dev/null +++ b/_sources/_cli/sup3r.rst @@ -0,0 +1,3 @@ +.. click:: sup3r.cli:main + :prog: sup3r + :show-nested: diff --git a/_sources/api.rst b/_sources/api.rst new file mode 100644 index 000000000..ef2adc14d --- /dev/null +++ b/_sources/api.rst @@ -0,0 +1,6 @@ +.. autosummary:: + :toctree: _autosummary + :template: custom-module-template.rst + :recursive: + + sup3r diff --git a/_sources/examples/examples.rst b/_sources/examples/examples.rst new file mode 100644 index 000000000..c60da1750 --- /dev/null +++ b/_sources/examples/examples.rst @@ -0,0 +1,7 @@ +Examples +======== + +.. toctree:: + + sup3rcc + sup3rwind diff --git a/_sources/examples/sup3rcc.rst b/_sources/examples/sup3rcc.rst new file mode 100644 index 000000000..b51748566 --- /dev/null +++ b/_sources/examples/sup3rcc.rst @@ -0,0 +1 @@ +.. include:: ../../../examples/sup3rcc/README.rst \ No newline at end of file diff --git a/_sources/examples/sup3rwind.rst b/_sources/examples/sup3rwind.rst new file mode 100644 index 000000000..9f33c3fe9 --- /dev/null +++ b/_sources/examples/sup3rwind.rst @@ -0,0 +1 @@ +.. include:: ../../../examples/sup3rwind/README.rst \ No newline at end of file diff --git a/_sources/index.rst b/_sources/index.rst new file mode 100644 index 000000000..6b7d5b0aa --- /dev/null +++ b/_sources/index.rst @@ -0,0 +1,15 @@ +.. toctree:: + :hidden: + + Home page + Installation and Usage + Examples + API reference <_autosummary/sup3r> + CLI reference <_cli/cli> + +################# +Welcome to Sup3r! +################# + +.. include:: ../../README.rst + :start-after: inclusion-intro diff --git a/_sources/misc/installation.rst b/_sources/misc/installation.rst new file mode 100644 index 000000000..baa709b5a --- /dev/null +++ b/_sources/misc/installation.rst @@ -0,0 +1,6 @@ +Installation +============ + +.. include:: ../../../README.rst + :start-after: Installing sup3r + :end-before: Recommended Citation diff --git a/_sources/misc/installation_usage.rst b/_sources/misc/installation_usage.rst new file mode 100644 index 000000000..c56c1b2d0 --- /dev/null +++ b/_sources/misc/installation_usage.rst @@ -0,0 +1,6 @@ +Installation and Usage +====================== + +.. toctree:: + + installation diff --git a/_static/basic.css b/_static/basic.css new file mode 100644 index 000000000..2a9e4114a --- /dev/null +++ b/_static/basic.css @@ -0,0 +1,914 @@ +/* + * Sphinx stylesheet -- basic theme. + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +div.section::after { + display: block; + content: ''; + clear: left; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 270px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin-top: 10px; +} + +ul.search li { + padding: 5px 0; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li p.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 360px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +a:visited { + color: #551A8B; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, figure.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, figure.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, figure.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +img.align-default, figure.align-default, .figure.align-default { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-default { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar, +aside.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px; + background-color: #ffe; + width: 40%; + float: right; + clear: right; + overflow-x: auto; +} + +p.sidebar-title { + font-weight: bold; +} + +nav.contents, +aside.topic, +div.admonition, div.topic, blockquote { + clear: left; +} + +/* -- topics ---------------------------------------------------------------- */ + +nav.contents, +aside.topic, +div.topic { + border: 1px solid #ccc; + padding: 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- content of sidebars/topics/admonitions -------------------------------- */ + +div.sidebar > :last-child, +aside.sidebar > :last-child, +nav.contents > :last-child, +aside.topic > :last-child, +div.topic > :last-child, +div.admonition > :last-child { + margin-bottom: 0; +} + +div.sidebar::after, +aside.sidebar::after, +nav.contents::after, +aside.topic::after, +div.topic::after, +div.admonition::after, +blockquote::after { + display: block; + content: ''; + clear: both; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + margin-top: 10px; + margin-bottom: 10px; + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table.align-default { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +th > :first-child, +td > :first-child { + margin-top: 0px; +} + +th > :last-child, +td > :last-child { + margin-bottom: 0px; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure, figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption, figcaption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number, +figcaption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text, +figcaption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist { + margin: 1em 0; +} + +table.hlist td { + vertical-align: top; +} + +/* -- object description styles --------------------------------------------- */ + +.sig { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; +} + +.sig-name, code.descname { + background-color: transparent; + font-weight: bold; +} + +.sig-name { + font-size: 1.1em; +} + +code.descname { + font-size: 1.2em; +} + +.sig-prename, code.descclassname { + background-color: transparent; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.sig-param.n { + font-style: italic; +} + +/* C++ specific styling */ + +.sig-inline.c-texpr, +.sig-inline.cpp-texpr { + font-family: unset; +} + +.sig.c .k, .sig.c .kt, +.sig.cpp .k, .sig.cpp .kt { + color: #0033B3; +} + +.sig.c .m, +.sig.cpp .m { + color: #1750EB; +} + +.sig.c .s, .sig.c .sc, +.sig.cpp .s, .sig.cpp .sc { + color: #067D17; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +:not(li) > ol > li:first-child > :first-child, +:not(li) > ul > li:first-child > :first-child { + margin-top: 0px; +} + +:not(li) > ol > li:last-child > :last-child, +:not(li) > ul > li:last-child > :last-child { + margin-bottom: 0px; +} + +ol.simple ol p, +ol.simple ul p, +ul.simple ol p, +ul.simple ul p { + margin-top: 0; +} + +ol.simple > li:not(:first-child) > p, +ul.simple > li:not(:first-child) > p { + margin-top: 0; +} + +ol.simple p, +ul.simple p { + margin-bottom: 0; +} + +aside.footnote > span, +div.citation > span { + float: left; +} +aside.footnote > span:last-of-type, +div.citation > span:last-of-type { + padding-right: 0.5em; +} +aside.footnote > p { + margin-left: 2em; +} +div.citation > p { + margin-left: 4em; +} +aside.footnote > p:last-of-type, +div.citation > p:last-of-type { + margin-bottom: 0em; +} +aside.footnote > p:last-of-type:after, +div.citation > p:last-of-type:after { + content: ""; + clear: both; +} + +dl.field-list { + display: grid; + grid-template-columns: fit-content(30%) auto; +} + +dl.field-list > dt { + font-weight: bold; + word-break: break-word; + padding-left: 0.5em; + padding-right: 5px; +} + +dl.field-list > dd { + padding-left: 0.5em; + margin-top: 0em; + margin-left: 0em; + margin-bottom: 0em; +} + +dl { + margin-bottom: 15px; +} + +dd > :first-child { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +.sig dd { + margin-top: 0px; + margin-bottom: 0px; +} + +.sig dl { + margin-top: 0px; + margin-bottom: 0px; +} + +dl > dd:last-child, +dl > dd:last-child > :last-child { + margin-bottom: 0; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +.classifier:before { + font-style: normal; + margin: 0 0.5em; + content: ":"; + display: inline-block; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +.translated { + background-color: rgba(207, 255, 207, 0.2) +} + +.untranslated { + background-color: rgba(255, 207, 207, 0.2) +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +pre, div[class*="highlight-"] { + clear: both; +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; + white-space: nowrap; +} + +div[class*="highlight-"] { + margin: 1em 0; +} + +td.linenos pre { + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + display: block; +} + +table.highlighttable tbody { + display: block; +} + +table.highlighttable tr { + display: flex; +} + +table.highlighttable td { + margin: 0; + padding: 0; +} + +table.highlighttable td.linenos { + padding-right: 0.5em; +} + +table.highlighttable td.code { + flex: 1; + overflow: hidden; +} + +.highlight .hll { + display: block; +} + +div.highlight pre, +table.highlighttable pre { + margin: 0; +} + +div.code-block-caption + div { + margin-top: 0; +} + +div.code-block-caption { + margin-top: 1em; + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +table.highlighttable td.linenos, +span.linenos, +div.highlight span.gp { /* gp: Generic.Prompt */ + user-select: none; + -webkit-user-select: text; /* Safari fallback only */ + -webkit-user-select: none; /* Chrome/Safari */ + -moz-user-select: none; /* Firefox */ + -ms-user-select: none; /* IE10+ */ +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + margin: 1em 0; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: absolute; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/_static/check-solid.svg b/_static/check-solid.svg new file mode 100644 index 000000000..92fad4b5c --- /dev/null +++ b/_static/check-solid.svg @@ -0,0 +1,4 @@ + + + + diff --git a/_static/clipboard.min.js b/_static/clipboard.min.js new file mode 100644 index 000000000..54b3c4638 --- /dev/null +++ b/_static/clipboard.min.js @@ -0,0 +1,7 @@ +/*! + * clipboard.js v2.0.8 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */ +!function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):"object"==typeof exports?exports.ClipboardJS=e():t.ClipboardJS=e()}(this,function(){return n={686:function(t,e,n){"use strict";n.d(e,{default:function(){return o}});var e=n(279),i=n.n(e),e=n(370),u=n.n(e),e=n(817),c=n.n(e);function a(t){try{return document.execCommand(t)}catch(t){return}}var f=function(t){t=c()(t);return a("cut"),t};var l=function(t){var e,n,o,r=1 + + + + diff --git a/_static/copybutton.css b/_static/copybutton.css new file mode 100644 index 000000000..f1916ec7d --- /dev/null +++ b/_static/copybutton.css @@ -0,0 +1,94 @@ +/* Copy buttons */ +button.copybtn { + position: absolute; + display: flex; + top: .3em; + right: .3em; + width: 1.7em; + height: 1.7em; + opacity: 0; + transition: opacity 0.3s, border .3s, background-color .3s; + user-select: none; + padding: 0; + border: none; + outline: none; + border-radius: 0.4em; + /* The colors that GitHub uses */ + border: #1b1f2426 1px solid; + background-color: #f6f8fa; + color: #57606a; +} + +button.copybtn.success { + border-color: #22863a; + color: #22863a; +} + +button.copybtn svg { + stroke: currentColor; + width: 1.5em; + height: 1.5em; + padding: 0.1em; +} + +div.highlight { + position: relative; +} + +/* Show the copybutton */ +.highlight:hover button.copybtn, button.copybtn.success { + opacity: 1; +} + +.highlight button.copybtn:hover { + background-color: rgb(235, 235, 235); +} + +.highlight button.copybtn:active { + background-color: rgb(187, 187, 187); +} + +/** + * A minimal CSS-only tooltip copied from: + * https://codepen.io/mildrenben/pen/rVBrpK + * + * To use, write HTML like the following: + * + *

Short

+ */ + .o-tooltip--left { + position: relative; + } + + .o-tooltip--left:after { + opacity: 0; + visibility: hidden; + position: absolute; + content: attr(data-tooltip); + padding: .2em; + font-size: .8em; + left: -.2em; + background: grey; + color: white; + white-space: nowrap; + z-index: 2; + border-radius: 2px; + transform: translateX(-102%) translateY(0); + transition: opacity 0.2s cubic-bezier(0.64, 0.09, 0.08, 1), transform 0.2s cubic-bezier(0.64, 0.09, 0.08, 1); +} + +.o-tooltip--left:hover:after { + display: block; + opacity: 1; + visibility: visible; + transform: translateX(-100%) translateY(0); + transition: opacity 0.2s cubic-bezier(0.64, 0.09, 0.08, 1), transform 0.2s cubic-bezier(0.64, 0.09, 0.08, 1); + transition-delay: .5s; +} + +/* By default the copy button shouldn't show up when printing a page */ +@media print { + button.copybtn { + display: none; + } +} diff --git a/_static/copybutton.js b/_static/copybutton.js new file mode 100644 index 000000000..2ea7ff3e2 --- /dev/null +++ b/_static/copybutton.js @@ -0,0 +1,248 @@ +// Localization support +const messages = { + 'en': { + 'copy': 'Copy', + 'copy_to_clipboard': 'Copy to clipboard', + 'copy_success': 'Copied!', + 'copy_failure': 'Failed to copy', + }, + 'es' : { + 'copy': 'Copiar', + 'copy_to_clipboard': 'Copiar al portapapeles', + 'copy_success': '¡Copiado!', + 'copy_failure': 'Error al copiar', + }, + 'de' : { + 'copy': 'Kopieren', + 'copy_to_clipboard': 'In die Zwischenablage kopieren', + 'copy_success': 'Kopiert!', + 'copy_failure': 'Fehler beim Kopieren', + }, + 'fr' : { + 'copy': 'Copier', + 'copy_to_clipboard': 'Copier dans le presse-papier', + 'copy_success': 'Copié !', + 'copy_failure': 'Échec de la copie', + }, + 'ru': { + 'copy': 'Скопировать', + 'copy_to_clipboard': 'Скопировать в буфер', + 'copy_success': 'Скопировано!', + 'copy_failure': 'Не удалось скопировать', + }, + 'zh-CN': { + 'copy': '复制', + 'copy_to_clipboard': '复制到剪贴板', + 'copy_success': '复制成功!', + 'copy_failure': '复制失败', + }, + 'it' : { + 'copy': 'Copiare', + 'copy_to_clipboard': 'Copiato negli appunti', + 'copy_success': 'Copiato!', + 'copy_failure': 'Errore durante la copia', + } +} + +let locale = 'en' +if( document.documentElement.lang !== undefined + && messages[document.documentElement.lang] !== undefined ) { + locale = document.documentElement.lang +} + +let doc_url_root = DOCUMENTATION_OPTIONS.URL_ROOT; +if (doc_url_root == '#') { + doc_url_root = ''; +} + +/** + * SVG files for our copy buttons + */ +let iconCheck = ` + ${messages[locale]['copy_success']} + + +` + +// If the user specified their own SVG use that, otherwise use the default +let iconCopy = ``; +if (!iconCopy) { + iconCopy = ` + ${messages[locale]['copy_to_clipboard']} + + + +` +} + +/** + * Set up copy/paste for code blocks + */ + +const runWhenDOMLoaded = cb => { + if (document.readyState != 'loading') { + cb() + } else if (document.addEventListener) { + document.addEventListener('DOMContentLoaded', cb) + } else { + document.attachEvent('onreadystatechange', function() { + if (document.readyState == 'complete') cb() + }) + } +} + +const codeCellId = index => `codecell${index}` + +// Clears selected text since ClipboardJS will select the text when copying +const clearSelection = () => { + if (window.getSelection) { + window.getSelection().removeAllRanges() + } else if (document.selection) { + document.selection.empty() + } +} + +// Changes tooltip text for a moment, then changes it back +// We want the timeout of our `success` class to be a bit shorter than the +// tooltip and icon change, so that we can hide the icon before changing back. +var timeoutIcon = 2000; +var timeoutSuccessClass = 1500; + +const temporarilyChangeTooltip = (el, oldText, newText) => { + el.setAttribute('data-tooltip', newText) + el.classList.add('success') + // Remove success a little bit sooner than we change the tooltip + // So that we can use CSS to hide the copybutton first + setTimeout(() => el.classList.remove('success'), timeoutSuccessClass) + setTimeout(() => el.setAttribute('data-tooltip', oldText), timeoutIcon) +} + +// Changes the copy button icon for two seconds, then changes it back +const temporarilyChangeIcon = (el) => { + el.innerHTML = iconCheck; + setTimeout(() => {el.innerHTML = iconCopy}, timeoutIcon) +} + +const addCopyButtonToCodeCells = () => { + // If ClipboardJS hasn't loaded, wait a bit and try again. This + // happens because we load ClipboardJS asynchronously. + if (window.ClipboardJS === undefined) { + setTimeout(addCopyButtonToCodeCells, 250) + return + } + + // Add copybuttons to all of our code cells + const COPYBUTTON_SELECTOR = 'div.highlight pre'; + const codeCells = document.querySelectorAll(COPYBUTTON_SELECTOR) + codeCells.forEach((codeCell, index) => { + const id = codeCellId(index) + codeCell.setAttribute('id', id) + + const clipboardButton = id => + `` + codeCell.insertAdjacentHTML('afterend', clipboardButton(id)) + }) + +function escapeRegExp(string) { + return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string +} + +/** + * Removes excluded text from a Node. + * + * @param {Node} target Node to filter. + * @param {string} exclude CSS selector of nodes to exclude. + * @returns {DOMString} Text from `target` with text removed. + */ +function filterText(target, exclude) { + const clone = target.cloneNode(true); // clone as to not modify the live DOM + if (exclude) { + // remove excluded nodes + clone.querySelectorAll(exclude).forEach(node => node.remove()); + } + return clone.innerText; +} + +// Callback when a copy button is clicked. Will be passed the node that was clicked +// should then grab the text and replace pieces of text that shouldn't be used in output +function formatCopyText(textContent, copybuttonPromptText, isRegexp = false, onlyCopyPromptLines = true, removePrompts = true, copyEmptyLines = true, lineContinuationChar = "", hereDocDelim = "") { + var regexp; + var match; + + // Do we check for line continuation characters and "HERE-documents"? + var useLineCont = !!lineContinuationChar + var useHereDoc = !!hereDocDelim + + // create regexp to capture prompt and remaining line + if (isRegexp) { + regexp = new RegExp('^(' + copybuttonPromptText + ')(.*)') + } else { + regexp = new RegExp('^(' + escapeRegExp(copybuttonPromptText) + ')(.*)') + } + + const outputLines = []; + var promptFound = false; + var gotLineCont = false; + var gotHereDoc = false; + const lineGotPrompt = []; + for (const line of textContent.split('\n')) { + match = line.match(regexp) + if (match || gotLineCont || gotHereDoc) { + promptFound = regexp.test(line) + lineGotPrompt.push(promptFound) + if (removePrompts && promptFound) { + outputLines.push(match[2]) + } else { + outputLines.push(line) + } + gotLineCont = line.endsWith(lineContinuationChar) & useLineCont + if (line.includes(hereDocDelim) & useHereDoc) + gotHereDoc = !gotHereDoc + } else if (!onlyCopyPromptLines) { + outputLines.push(line) + } else if (copyEmptyLines && line.trim() === '') { + outputLines.push(line) + } + } + + // If no lines with the prompt were found then just use original lines + if (lineGotPrompt.some(v => v === true)) { + textContent = outputLines.join('\n'); + } + + // Remove a trailing newline to avoid auto-running when pasting + if (textContent.endsWith("\n")) { + textContent = textContent.slice(0, -1) + } + return textContent +} + + +var copyTargetText = (trigger) => { + var target = document.querySelector(trigger.attributes['data-clipboard-target'].value); + + // get filtered text + let exclude = '.linenos'; + + let text = filterText(target, exclude); + return formatCopyText(text, '', false, true, true, true, '', '') +} + + // Initialize with a callback so we can modify the text before copy + const clipboard = new ClipboardJS('.copybtn', {text: copyTargetText}) + + // Update UI with error/success messages + clipboard.on('success', event => { + clearSelection() + temporarilyChangeTooltip(event.trigger, messages[locale]['copy'], messages[locale]['copy_success']) + temporarilyChangeIcon(event.trigger) + }) + + clipboard.on('error', event => { + temporarilyChangeTooltip(event.trigger, messages[locale]['copy'], messages[locale]['copy_failure']) + }) +} + +runWhenDOMLoaded(addCopyButtonToCodeCells) \ No newline at end of file diff --git a/_static/copybutton_funcs.js b/_static/copybutton_funcs.js new file mode 100644 index 000000000..dbe1aaad7 --- /dev/null +++ b/_static/copybutton_funcs.js @@ -0,0 +1,73 @@ +function escapeRegExp(string) { + return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string +} + +/** + * Removes excluded text from a Node. + * + * @param {Node} target Node to filter. + * @param {string} exclude CSS selector of nodes to exclude. + * @returns {DOMString} Text from `target` with text removed. + */ +export function filterText(target, exclude) { + const clone = target.cloneNode(true); // clone as to not modify the live DOM + if (exclude) { + // remove excluded nodes + clone.querySelectorAll(exclude).forEach(node => node.remove()); + } + return clone.innerText; +} + +// Callback when a copy button is clicked. Will be passed the node that was clicked +// should then grab the text and replace pieces of text that shouldn't be used in output +export function formatCopyText(textContent, copybuttonPromptText, isRegexp = false, onlyCopyPromptLines = true, removePrompts = true, copyEmptyLines = true, lineContinuationChar = "", hereDocDelim = "") { + var regexp; + var match; + + // Do we check for line continuation characters and "HERE-documents"? + var useLineCont = !!lineContinuationChar + var useHereDoc = !!hereDocDelim + + // create regexp to capture prompt and remaining line + if (isRegexp) { + regexp = new RegExp('^(' + copybuttonPromptText + ')(.*)') + } else { + regexp = new RegExp('^(' + escapeRegExp(copybuttonPromptText) + ')(.*)') + } + + const outputLines = []; + var promptFound = false; + var gotLineCont = false; + var gotHereDoc = false; + const lineGotPrompt = []; + for (const line of textContent.split('\n')) { + match = line.match(regexp) + if (match || gotLineCont || gotHereDoc) { + promptFound = regexp.test(line) + lineGotPrompt.push(promptFound) + if (removePrompts && promptFound) { + outputLines.push(match[2]) + } else { + outputLines.push(line) + } + gotLineCont = line.endsWith(lineContinuationChar) & useLineCont + if (line.includes(hereDocDelim) & useHereDoc) + gotHereDoc = !gotHereDoc + } else if (!onlyCopyPromptLines) { + outputLines.push(line) + } else if (copyEmptyLines && line.trim() === '') { + outputLines.push(line) + } + } + + // If no lines with the prompt were found then just use original lines + if (lineGotPrompt.some(v => v === true)) { + textContent = outputLines.join('\n'); + } + + // Remove a trailing newline to avoid auto-running when pasting + if (textContent.endsWith("\n")) { + textContent = textContent.slice(0, -1) + } + return textContent +} diff --git a/_static/custom.css b/_static/custom.css new file mode 100644 index 000000000..85fec2de9 --- /dev/null +++ b/_static/custom.css @@ -0,0 +1,14 @@ +.wy-nav-content { + max-width: 60% !important; +} + +.wy-side-nav-search { + display: block; + width: 300px; + padding: 0.809em; + margin-bottom: 0.809em; + z-index: 200; + background-color: #fcfcfc; + text-align: center; + color: #fcfcfc; +} diff --git a/_static/doctools.js b/_static/doctools.js new file mode 100644 index 000000000..0398ebb9f --- /dev/null +++ b/_static/doctools.js @@ -0,0 +1,149 @@ +/* + * Base JavaScript utilities for all Sphinx HTML documentation. + */ +"use strict"; + +const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ + "TEXTAREA", + "INPUT", + "SELECT", + "BUTTON", +]); + +const _ready = (callback) => { + if (document.readyState !== "loading") { + callback(); + } else { + document.addEventListener("DOMContentLoaded", callback); + } +}; + +/** + * Small JavaScript module for the documentation. + */ +const Documentation = { + init: () => { + Documentation.initDomainIndexTable(); + Documentation.initOnKeyListeners(); + }, + + /** + * i18n support + */ + TRANSLATIONS: {}, + PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), + LOCALE: "unknown", + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext: (string) => { + const translated = Documentation.TRANSLATIONS[string]; + switch (typeof translated) { + case "undefined": + return string; // no translation + case "string": + return translated; // translation exists + default: + return translated[0]; // (singular, plural) translation tuple exists + } + }, + + ngettext: (singular, plural, n) => { + const translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated !== "undefined") + return translated[Documentation.PLURAL_EXPR(n)]; + return n === 1 ? singular : plural; + }, + + addTranslations: (catalog) => { + Object.assign(Documentation.TRANSLATIONS, catalog.messages); + Documentation.PLURAL_EXPR = new Function( + "n", + `return (${catalog.plural_expr})` + ); + Documentation.LOCALE = catalog.locale; + }, + + /** + * helper function to focus on search bar + */ + focusSearchBar: () => { + document.querySelectorAll("input[name=q]")[0]?.focus(); + }, + + /** + * Initialise the domain index toggle buttons + */ + initDomainIndexTable: () => { + const toggler = (el) => { + const idNumber = el.id.substr(7); + const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); + if (el.src.substr(-9) === "minus.png") { + el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; + toggledRows.forEach((el) => (el.style.display = "none")); + } else { + el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; + toggledRows.forEach((el) => (el.style.display = "")); + } + }; + + const togglerElements = document.querySelectorAll("img.toggler"); + togglerElements.forEach((el) => + el.addEventListener("click", (event) => toggler(event.currentTarget)) + ); + togglerElements.forEach((el) => (el.style.display = "")); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); + }, + + initOnKeyListeners: () => { + // only install a listener if it is really needed + if ( + !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && + !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS + ) + return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.altKey || event.ctrlKey || event.metaKey) return; + + if (!event.shiftKey) { + switch (event.key) { + case "ArrowLeft": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const prevLink = document.querySelector('link[rel="prev"]'); + if (prevLink && prevLink.href) { + window.location.href = prevLink.href; + event.preventDefault(); + } + break; + case "ArrowRight": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const nextLink = document.querySelector('link[rel="next"]'); + if (nextLink && nextLink.href) { + window.location.href = nextLink.href; + event.preventDefault(); + } + break; + } + } + + // some keyboard layouts may need Shift to get / + switch (event.key) { + case "/": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.focusSearchBar(); + event.preventDefault(); + } + }); + }, +}; + +// quick alias for translations +const _ = Documentation.gettext; + +_ready(Documentation.init); diff --git a/_static/documentation_options.js b/_static/documentation_options.js new file mode 100644 index 000000000..dbc9e921b --- /dev/null +++ b/_static/documentation_options.js @@ -0,0 +1,13 @@ +const DOCUMENTATION_OPTIONS = { + VERSION: '0.2.1.dev62', + LANGUAGE: 'en', + COLLAPSE_INDEX: false, + BUILDER: 'html', + FILE_SUFFIX: '.html', + LINK_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '', + NAVIGATION_WITH_KEYS: false, + SHOW_SEARCH_SUMMARY: true, + ENABLE_SEARCH_SHORTCUTS: true, +}; \ No newline at end of file diff --git a/_static/file.png b/_static/file.png new file mode 100644 index 0000000000000000000000000000000000000000..a858a410e4faa62ce324d814e4b816fff83a6fb3 GIT binary patch literal 286 zcmV+(0pb3MP)s`hMrGg#P~ix$^RISR_I47Y|r1 z_CyJOe}D1){SET-^Amu_i71Lt6eYfZjRyw@I6OQAIXXHDfiX^GbOlHe=Ae4>0m)d(f|Me07*qoM6N<$f}vM^LjV8( literal 0 HcmV?d00001 diff --git a/_static/images/logo_binder.svg b/_static/images/logo_binder.svg new file mode 100644 index 000000000..45fecf751 --- /dev/null +++ b/_static/images/logo_binder.svg @@ -0,0 +1,19 @@ + + + + +logo + + + + + + + + diff --git a/_static/images/logo_colab.png b/_static/images/logo_colab.png new file mode 100644 index 0000000000000000000000000000000000000000..b7560ec216b2d1b6f77855525fe966c741833428 GIT binary patch literal 7601 zcmeI1^;ZuSFsz@@e&Hu|o~yU_Jn_7Cy4b4(M?f2S`owL6D#ysoM3Rsb4MX|l6hl52QIsX*kmQMmFZ6Xu|Wk1r15+E^+Er?@^MFpIE zq!=C|$Nn*F4aR@N|DPxS6E^f|7Z=H%T>vS)_|-RkkprWw zSGb9TlwheKfo{U5J)kX1$cHtEFe}Pa2Au|?^hCk%8gdI}l*ypIUsLXLMy9W|q-ZAw zJpZkmGRa|!=7CyrA#Bs2?5UdZ1^pDaji}+DimdE$JB@FrJvAIxy*3v#1-8OwO;OS$ zsv*P<%V4%?*Keca@o9}LMOs~ph)z!AU;${{23k&Gq7A@nDP{*I1HiTZ=Q*54?Bok) zp6L_4HhiE->YU6{m*{7O7j#SkBb9JPo!k8TD0H6{ zdSE-mmA!Js{}(?qh${0wB7Rx{*F=43D>?j3kU8MX&`sQJ+wHUD6eEr7j%*2x%5|a8 z*;AP<*tCQwj`Af5vvGHXF=9{cdzV2BMI@}VHgmol)^f>Ectcls5p3dW?40~ADd>ki za*q>v=nQQmGI5&BS!GU|iX9>qB9r=_Qm9t_Qwi+zWI zc%%oQ`P}{ZXk^}?+H!u2my^C#TD%=V|3pb$MXhJ07bx-^=oxj?ZSk!---?f2cs8_& z8?O{lvxMDZi7gsdvoZ2bmyLYs1!O1RMC)1Wv`9p-I(1pfww9siX;Lu>^>_Y=g+OHo zPm(N|h?h5Z>yze~wKtPBRv(mZx*A4R%bganw#OV=SE*=J^b#~(YfIcj(k=(i37PY7 zUiawSj8SKczPk-^=SwOOb%X+bRcFm+=N1r{{CA<=kbVq8cFGcLSGqM5FUxChbc&`o9$mUo4kZLh+%KP6m zDMd3SH~N5fH8J+8;bpxhi-9i}^PV(^u?zb49_c!Ow_!1w%w(RLEeXJoMU>Nnlc8sd z<;K$L<-WwC`NJ0PWzB59Pzbg|FZS-=xlaWDjM-PXIJ;r4qyFnFc_<-VDg5P=Zk0Pd z%f7GFg?FzC??rmjG^Ib<{cfE+dud-%)Ep=a8Q(Z-Fng}&CvD+JPdO)mL-$u4eH#LJ z7heze_GA*{rYAL;ejb#P;oTD_*Rgrw;)1(e;+zGN{)D)k?o$t&BGWEM!Hn}LQm1jd zf@B0+pEzI&qREI@Qr=#K;u~Fs)Saf>_1X|EQGz0D_a|>)d?IOck($^4a`v4Hc6sKV zgm7-VK|sz+(A$-L0BnhZ#qKk${svcv4#QmCcMCb>t9=e+^b49rrK@5C@-Qs{PN6H8Tb^nIy#)VA`)o~+c~m2m9bN}EcwI`-IP+fB&d^;19iX9{XvM6VYHE(fX{BIU zjMLmkl7p}TslG;@C!HvX=7hVy6cGIM{h7hxrM^q{j`Y4Ux1nI*k9MB?ToSK!Qpvy< zT~`Qofe|OBk8vza_r02Y;~+V6WKn(J{_?BR9@-`D&Q;nTEx7+j36Qk0(l3TahUki} z;O-FUuOnNVcc-Q3c?;A)ZpgKC-Sa8`{c}MNm$j))KPPdL#xR*0kxQz|V-;WZxI+?u zFB#~P=os0);b?+6$-z@yE%k*^!0x)K_!|4!L%ADpXqe`pG|8A+rht_!jZid=wb1j& zjPG_SeS*{ef!h*}~k!*;Aar3`tCeHO@>c{c>ak(x3f^w3+_zT>j)aP_hVoV4~^0L<5^eu_y z-@tf0YyH-(#5uTh`s3DIhpc^`UysO{L8JS|z=qnHFb)UqfMnC!Hu$=eiC+a;9t*X6R?Q8POFRq?_ak1&yP&YF6`@B=qySm8MJ)n*E zdS-&E$a$DMp!}+S%^(Q))m7O$Qece1ZtB+=H{**c0@XT53VGNeFhvnDVocubi6~ru z2X&(|kp)joFLfuG?i;d=&CZBQhez8i+lhV+c;_pEL6+Teo z1qclCF-EO~XWkH3u|unGI79@`+YLi}rF>PbBrn{PBKWF&S%K6N0u^DRx7qImnJ`+c z>Nu)TJyhpyJX_!XHh^82M+YgW&cxs(vQKEpL%}iK(hH=<@)j#E3_?a*JP@0=R z;O*(_2@>IjYLClnL+$PJ-5!vt6>UJ7$KHM3LlFFMxb19oFZ_fi@{fp};$@_n8driG z`=77&{Z^0#T>t%$hCqQi8M}0E4XipxikcsB$>o9M)rBJWQDY7UrgKAy|BP4kr`Nay z??T|Ajh_U=3lem-tL$_tEhB=Rqfi?bUj`u>$a-x5WxqHn6t4)Q-NQ^Bt-k!mcE0ES z4)*3-(5@V)=EloLT~ReorH252&Q&MWWc$oiSS{!xpO?VPpJFD-QN6c=<7HxnH1nH% zeiOM22U=%trq`HCXYNL#H!P!M1{?)QcIGYWO$;mCMHnpgd?*ZE&bmylPxndZ$B}ct zIfSCaCu!a^rBwLoo4gQJnU<%~!6cPP-qxJLZM#F&_gwU%?O$k?DIF6l%q_lvcs3})|Z?z(K3q9(BASQtZlw@+<5mv zrHuRbc}A4I9hLtxbS!@ju49VVt1XxpO?1&$LA;?ZANYo=SC^nMg{9BY`=cZcTaR{A@r{UB@;%H zPb6QWRuvU)J>>*0FB;9Uq|hH4C$u8T=T?sz{5%Ex)I%5W6wQmtel=rJ)Tbw#E7{Z;t3U zY9a$t=WkneF<9867^HBvLp>hs;A@H}9KEwn2t!?ITQ1vZ?fCFF(RfFYplQUymF`y4 z74MX)v7%4i_52G~fn=&qCfo}f%Gj8bd7dI^BDI?AlVN_!qWMJT#NBLs^p)e{tG?D4 z)|x9tIcLpO$-JtVj=#$1Y&GRE*-xUKd_{uxiZkqAudNRF!dph|+p41KtIf(8)c1p~ zv)f(_RGUK*j_{s!DNDET-@ekFNlnTXW_=+4t5>Qbq`aWl%F6e}e)<=0U{Lp}8twQ? z8cJ&^2hntuxcqQ~k;<29cTQz)@X@zbQN?f1q??MK&`gi2me&l@XLSxN|!? z;kRJcy-ahz{?{Aj;b0E9*MKf|Q@H!%2FhB8=t$dhTtR4^%hSctIRz;tXJPme_gd zLiJlhH^x9|I?_vaIKkgiAyrk&%Mv26OqK|av#t%u9aU2`wvZ61wo4$DW%z~d9P`5& zx2Zk{zL$Z1@bGicZ})KZzJKhZaZ+P!-p1uH9dgwUQ5u(q{HyTaprSe95WuIadBYv0 zPUJ~G+G2~n0DfE{7!{N*#1+?ql4nK8`Fr?o@j~3c(>T^^trK4t~7#7WQoVk)7KnFY{iPIQ?Qh8 z+Wy6Ol|m6pA8r4lQdt@$=Z{k}^_evzh~Vt_J$aBM!djok7rTfxt8f+KVv7GM1Awc>b%$6NDX zcl~`@-PYtGJSGIO(C^sr&BxXHz*cUJnB~X1`0$kX)@xH+qFRp1^Vpt^u3V$(w;_vf zHIi3Mb+A5@Nx^>r8g^tF%=j0o$Rhli22c4xiy2SEGE=Dk)m)mzF}VhHtiP43?%dTPKbDg+Gmq$pq6DlCZzY5@`})4DTSfgVh3B z6B#;izoI9B%{^V1qYVp<-KgZ=_(;UqyU^wT{IFPQ?YY4%;yq4cbgN`_dqp${t%ytU z!T>q+J?*26u4Ak4Jx#9uHgScR2!%5YX9%5Bu@HL^VaJ7%jj#ceYuaRZk7vMWX)jq| z-rX)3v33MqZ$qaWp!X$i1yJ*rOfjP-u6noa{n9pxzJw0P2+@UNLHS(-e>##A#9xc` zAr=;dh7~9d71L_&bj`DI@l$2 zSX@4j7tZbUYdo?rgctpAg3>Z@gv1{~grCRQUGVyTbzIJ-YZt2xF(cT)W0~l-76Lw* z<6YF%D4R$X>ZEj#!c)zMi018e@?^1%&N`zutD(OQ;X8am+pNW(YhRwy*%wrsnwb#T z>n{K;55wQE!cVF)X+X12fX<x`lE~DquFsMPRoBuzhuVdR8Gv zevya06i9>q3oJZyDGUHOP=iTbBg`AO7~BI0N8$lqEvK_=V)(Du!8=i|%_2^xqnCgh zYEho!c`8!%;N8>VD_@8NZxuyDHBlxl_=CBT5z4cft(NLsv9Wo81)VnjTne@sFAuLA zv^?3h>Rc?eDzkn@SvwCF^spU#ZJuQz6o4V90>Al2JL^>6N4y0wyg#4m?khQ$4$xa5 zlJZV5E$o~arUalDb_b7lXJs*(UA*P>jQ%3i`I8pyKN?*kY>iRE7J9GGiz^nA>aIV> zaJ}>Ecj_*#d8xFcjhy+6oRGfCr^qR6C2fGkhPUT-of7St?XBEaY>?_o$Y;IiV*<6d zlA;M(1^;P>tJxjiTQAB{T$TKPJ?7HfGON=ms6=%yai0?j-qHB-nhvKj_0=^YawDhO z&$wC;93X#RhmcNJTfn66z&E;UAFGeV6TsD61;r(%GZvUrDg2W3Y2hPsTqkinoI4PV zXDedcq+P^|`+Zqpt5*;9cKbAf6!xI4X{#P5OMaE4?*}B?BIY^Gyv0%UUq}lKO~C#Z zCRamrC=OeXKTKm|4p>}U!kLbE%NxPGuZ1-DR(wWFK@>24ca*qhEt5B*r|(Kty!Pj0 zZauh;NqoiV&&q9pT#S7@dl4JUVA|RmaH8kslFhypJ_)20*ebs^yXIQA(6mi|Wph<8 z=`?$6$QX%TaWE9DLjOgi>rciE+f(9`A4gn4&jZA)v29ug%2=CtvV-U|71pd@edT~> zTA~BLBxs`RYEh%@DuEBdVt=S~6x5VXGkg4=c(|;e@Uk2Mxd}~#h^+`jF}r@=C0+HS zJcg`@*AUj2Ymhzqb=;b}w_oSQ>VH<@k=B`!P>>u5;cpo7O#PB&IQ>AS{06fz5fsXyOt1R0^~JUdht$M7yYTxq$&$T&teFpg;y{BUxXR(00s6bHa2EU zQz~u3(zn7I;Ei{D%kc60jYvUAK^2vZcMr$(Mvo58z}?>{fBdZv&KdKaM(W*WeijQ+ z;}+j>_K=@gAG4KLl-oHs1uHl{4Iq_bV|(|n23Ml=$x+vE+w;rZ1-;Cgwa-{hvjGND zf$}y#wu81ZOPZ@Wj}WbIj4k%PEPTy)sLP0Kk0C=n2lpOrPl~et;FC1`zjD=4!5coL zUgdZMo&inr`+cr#<^beEmG){%LjzXvEJ;=`hMnEYG|VU#W^gR^?uh;u@MsY$78=09EY#xn`@9X5)nb~&t)6wi zB(Y#$oL!o_oI|#`LeD5m>ezV6;nKHq@ZYvUufb~M33Qw%6`GhEa}S@P!}T;dH@bLx zG_yiKDTq6zQz}25>oeWOXpL<9!kJrP)LQASx)Dh$MiaKmk}q7TZJjtiA`M6zv_)Sn zoW-S@(c2ebP+DQqvD-S;#gt=zlveyhax!aybe(eZtlKEO1+bZSM diff --git a/_static/images/logo_jupyterhub.svg b/_static/images/logo_jupyterhub.svg new file mode 100644 index 000000000..60cfe9f22 --- /dev/null +++ b/_static/images/logo_jupyterhub.svg @@ -0,0 +1 @@ +logo_jupyterhubHub diff --git a/_static/language_data.js b/_static/language_data.js new file mode 100644 index 000000000..c7fe6c6fa --- /dev/null +++ b/_static/language_data.js @@ -0,0 +1,192 @@ +/* + * This script contains the language-specific data used by searchtools.js, + * namely the list of stopwords, stemmer, scorer and splitter. + */ + +var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"]; + + +/* Non-minified version is copied as a separate JS file, if available */ + +/** + * Porter Stemmer + */ +var Stemmer = function() { + + var step2list = { + ational: 'ate', + tional: 'tion', + enci: 'ence', + anci: 'ance', + izer: 'ize', + bli: 'ble', + alli: 'al', + entli: 'ent', + eli: 'e', + ousli: 'ous', + ization: 'ize', + ation: 'ate', + ator: 'ate', + alism: 'al', + iveness: 'ive', + fulness: 'ful', + ousness: 'ous', + aliti: 'al', + iviti: 'ive', + biliti: 'ble', + logi: 'log' + }; + + var step3list = { + icate: 'ic', + ative: '', + alize: 'al', + iciti: 'ic', + ical: 'ic', + ful: '', + ness: '' + }; + + var c = "[^aeiou]"; // consonant + var v = "[aeiouy]"; // vowel + var C = c + "[^aeiouy]*"; // consonant sequence + var V = v + "[aeiou]*"; // vowel sequence + + var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0 + var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 + var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 + var s_v = "^(" + C + ")?" + v; // vowel in stem + + this.stemWord = function (w) { + var stem; + var suffix; + var firstch; + var origword = w; + + if (w.length < 3) + return w; + + var re; + var re2; + var re3; + var re4; + + firstch = w.substr(0,1); + if (firstch == "y") + w = firstch.toUpperCase() + w.substr(1); + + // Step 1a + re = /^(.+?)(ss|i)es$/; + re2 = /^(.+?)([^s])s$/; + + if (re.test(w)) + w = w.replace(re,"$1$2"); + else if (re2.test(w)) + w = w.replace(re2,"$1$2"); + + // Step 1b + re = /^(.+?)eed$/; + re2 = /^(.+?)(ed|ing)$/; + if (re.test(w)) { + var fp = re.exec(w); + re = new RegExp(mgr0); + if (re.test(fp[1])) { + re = /.$/; + w = w.replace(re,""); + } + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1]; + re2 = new RegExp(s_v); + if (re2.test(stem)) { + w = stem; + re2 = /(at|bl|iz)$/; + re3 = new RegExp("([^aeiouylsz])\\1$"); + re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re2.test(w)) + w = w + "e"; + else if (re3.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + else if (re4.test(w)) + w = w + "e"; + } + } + + // Step 1c + re = /^(.+?)y$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(s_v); + if (re.test(stem)) + w = stem + "i"; + } + + // Step 2 + re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step2list[suffix]; + } + + // Step 3 + re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step3list[suffix]; + } + + // Step 4 + re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; + re2 = /^(.+?)(s|t)(ion)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + if (re.test(stem)) + w = stem; + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1] + fp[2]; + re2 = new RegExp(mgr1); + if (re2.test(stem)) + w = stem; + } + + // Step 5 + re = /^(.+?)e$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + re2 = new RegExp(meq1); + re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) + w = stem; + } + re = /ll$/; + re2 = new RegExp(mgr1); + if (re.test(w) && re2.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + + // and turn initial Y back to y + if (firstch == "y") + w = firstch.toLowerCase() + w.substr(1); + return w; + } +} + diff --git a/_static/locales/ar/LC_MESSAGES/booktheme.mo b/_static/locales/ar/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000000000000000000000000000000000000..15541a6a375f93efed3f410c2f892174ba91aaa0 GIT binary patch literal 1541 zcmZvaJ!}+56vrn40$lhsA5n;v39lh z&UUPnK|(?lfROD&7suGxxK0yMQ@o*|gesy+5vu&(?mBh?qn-WTn|=G<|NZw~b#1&Y zus*}?#(shQ752{`;fA&4V5qyTPS`e-!60i}(%5{r(0^;D!zP`#V66yFspV7<>kvF5*86z5`j$ z=8ez}?gu^a8}Li;DtHLIQScsk0P*i2&-V)CK7WC%Q*07qAJ7GU4juvd{a%p0tQWim zeu4N7$oY#!{2XK-Ul-?ZLAIP{VE@?5Oee^5uV0WLc46~8yjT}6*2P}4uiOhI#i!V; zXZ>RTx%Xyl*phu;(K~`m+PV^FjkZeL1V5sYI1K7moHI9c!}v9+ja5U&{2;G|4YhWh zm^7}c{~g+Tn8?Tvl<2F47Nu9i{l>4L9=6uy?FAD0FiY3w(=eiIKTWvQbqOe&{(Wd5^qM9YH}dGcsaNzja& zq-k29Wt#nO;9At7wwrWQc-|J&abpO^Bu%@f8>dB@7kE5QPu!GiO4q{asH8bJN^P3Y zvv7uYJM=8C@P0OzYSB}gLot+ZNt#(-bAo@Zj6?Yfp?anYnoPQR?;I# z{we#%LHPn31obOfkF`-+I9KUX>gUj zW@~ literal 0 HcmV?d00001 diff --git a/_static/locales/ar/LC_MESSAGES/booktheme.po b/_static/locales/ar/LC_MESSAGES/booktheme.po new file mode 100644 index 000000000..34d404c6d --- /dev/null +++ b/_static/locales/ar/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ar\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "طباعة إلى PDF" + +msgid "Theme by the" +msgstr "موضوع بواسطة" + +msgid "Download source file" +msgstr "تنزيل ملف المصدر" + +msgid "open issue" +msgstr "قضية مفتوحة" + +msgid "Contents" +msgstr "محتويات" + +msgid "previous page" +msgstr "الصفحة السابقة" + +msgid "Download notebook file" +msgstr "تنزيل ملف دفتر الملاحظات" + +msgid "Copyright" +msgstr "حقوق النشر" + +msgid "Download this page" +msgstr "قم بتنزيل هذه الصفحة" + +msgid "Source repository" +msgstr "مستودع المصدر" + +msgid "By" +msgstr "بواسطة" + +msgid "repository" +msgstr "مخزن" + +msgid "Last updated on" +msgstr "آخر تحديث في" + +msgid "Toggle navigation" +msgstr "تبديل التنقل" + +msgid "Sphinx Book Theme" +msgstr "موضوع كتاب أبو الهول" + +msgid "suggest edit" +msgstr "أقترح تحرير" + +msgid "Open an issue" +msgstr "افتح قضية" + +msgid "Launch" +msgstr "إطلاق" + +msgid "Fullscreen mode" +msgstr "وضع ملء الشاشة" + +msgid "Edit this page" +msgstr "قم بتحرير هذه الصفحة" + +msgid "By the" +msgstr "بواسطة" + +msgid "next page" +msgstr "الصفحة التالية" diff --git a/_static/locales/bg/LC_MESSAGES/booktheme.mo b/_static/locales/bg/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000000000000000000000000000000000000..da95120037f8d7c52ad1f166a7682ea55ca47a01 GIT binary patch literal 1708 zcmaKqOKcQJ5QaOv0vkhkhC?jT2nh&cNkBwFYxY90B_t3AOpcMpyUpy7b@wpSL+}lZ zc_s=RKte*|C2`<_7URcy7jLdPR->G9h;qvTamgjhjlX8bj}R-P?fIs=y6dm{dw*ZQ z?5@DG8KWEHSBxzfKYoB0o|Q|4z)!5gYX#T^t^_xLtH5sX6L34Y5iEo2!IR(`P=OzV zHs>hFdG3I#!TVqr_zYYJPUie2U;izi{|U0*KVSh|x->h#2IP4+$a!{vO>jTB41AmO zFOci|8~haPLhws)C%7B@Hs=rEKFpioR`3nD2VA{8JAV*le@=j0?>TT6cp2OVJ_ot4 z*WiBe50LY0LeuQ=S0K-igY45ea0fUFvgOv1JgO3NWrRpO`@TbYDa&#J04 z8uLU~YT|f^4W5=k94AWjM}bBXBl{2ciBp+gq(T!1)p!vCDOMX z%z7FyzF${Td*=h+vjIf3x?r>F4C`bfVWiFnCdtcQkUma)U!h8cPEd+Dt17m5q+0w& zMKQ9KWTsza{bH(IICkV%?|gjko-YcyvBiPmP?e-b0iTC;FVH=*QuCr%+4AXuzT$!T zeUYc*Gb$?f>WZlb+AqligMlp^^|YVB*ODCc3;p#Z^6JGt6Ai^Bsl#*{m-qF^?6bU6 z%dKU(_p_dYpa*V5&_n77dhBk}xVz~t(j+x#hMF`X=r)bH5$d>$?h>`ABk5bs2r;-dTfq#Q2#?t@q|9teP3*uNPWy^ACfqd~8<8}Y{+a1%CJzCK zxEruG6KALRIVJ-U$3Yy!~NpG7AK|w8QyPuG1485SvTmo%i zrHNb3JL61fEcXDLQBPwL`;mLLOPayeL#0F0B5ju4Yr5-c6aTq5cjf(V=MS}!0~aLI KlK*|VZ0$SA#iddJ literal 0 HcmV?d00001 diff --git a/_static/locales/bg/LC_MESSAGES/booktheme.po b/_static/locales/bg/LC_MESSAGES/booktheme.po new file mode 100644 index 000000000..7420c19eb --- /dev/null +++ b/_static/locales/bg/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: bg\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Печат в PDF" + +msgid "Theme by the" +msgstr "Тема от" + +msgid "Download source file" +msgstr "Изтеглете изходния файл" + +msgid "open issue" +msgstr "отворен брой" + +msgid "Contents" +msgstr "Съдържание" + +msgid "previous page" +msgstr "предишна страница" + +msgid "Download notebook file" +msgstr "Изтеглете файла на бележника" + +msgid "Copyright" +msgstr "Авторско право" + +msgid "Download this page" +msgstr "Изтеглете тази страница" + +msgid "Source repository" +msgstr "Хранилище на източника" + +msgid "By" +msgstr "От" + +msgid "repository" +msgstr "хранилище" + +msgid "Last updated on" +msgstr "Последна актуализация на" + +msgid "Toggle navigation" +msgstr "Превключване на навигацията" + +msgid "Sphinx Book Theme" +msgstr "Тема на книгата Sphinx" + +msgid "suggest edit" +msgstr "предложи редактиране" + +msgid "Open an issue" +msgstr "Отворете проблем" + +msgid "Launch" +msgstr "Стартиране" + +msgid "Fullscreen mode" +msgstr "Режим на цял екран" + +msgid "Edit this page" +msgstr "Редактирайте тази страница" + +msgid "By the" +msgstr "По" + +msgid "next page" +msgstr "Следваща страница" diff --git a/_static/locales/bn/LC_MESSAGES/booktheme.mo b/_static/locales/bn/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000000000000000000000000000000000000..6b96639b726a2fa959a6419b6f8b7e0dfcce33ae GIT binary patch literal 1646 zcmbV~&5IOA7>8S2xE{WN>)mqrjbP_+ zD|`%n|9`+8@Gtlc{Cs5;eF=|2zkdOGUHYZ{3U0$*$xxrd?Ipj1NbvXay{#>+)sQwK zLc@BGr=Q8s@3s2pVyti%e$UVEm)Gmv@K$Kxb@iIkGQ)QH3Dc6%KAm>6q}ejje*L3r z>$oA5HgZzyAH?ybE$6p$-O1{5(WYr7xk%%tj7A!XA>+Uti*qA7X(KkWA+(Cd;*P4f zqC=@vB32^F^Nx(hvqTZ3#rXcw=x~uglc~-VqqA;wIBg~Bl-TQgnP|xqG73*IS+qW( zo6WWqDn6MsW0O!s$y282M*BKNku;Mh6WuA>9MAOkvTmve8r7pR%ZaLqqE6Ml~R{3$6RYz1^ zHxkvXiQSWlsf@*{*+H+Gm{gVVb|;J5)lr?D$ZJBS!OVxh8WP3p@HQnr8y3617^+0h zTGzYm>~GFKb#|8L(pyu`-gfp`;o|IhXQy$z<$9NR^WnYI&Q1wu?}S90&tc32?JEMg4wtZV(;LB0u<-^*y2)npB|JLL@f}SDrcO%Hi&xc%8I{oPgE$%7j?Kgl1>Fz ze@F9IjWZ|r@GsvuUE*t^AN05zoTs2{kFZmI~m25V-8`&>S^dPTx z$^cEyl^jJ+?oOKAsWhagj`qI>dC8~bzq_0hZSgC)LsF@U6+9F=tR`Ky;!WHMs_vF; z;&hv=3*2(}kGreju(wh1_KV>qMpKtu5HHORr>y#KY7MM6T^uJ&R^K(T z4hABYyJ6tQiOxVI^L*F1qO_S8am=vBunT>6i=$ATZdb3f&v8VJ2BK9Ft@hoIsQAhin!}wxzs(N*6pVu~DVqb08$hD1)Yjm|` zLS;tVxWMJrDAJYrr0_baPC1|EHL_WMn2%o?qrvC+h@~^*GMofV4`)o0LtLbidwpll9h6X)!5Y za@FKiW0X*mn3iX1k|l^w_?&_QX>NDCJM7-fGBbN{ zu90Yv ze2zJT`3vTkm``?L!Pxha5O~CXEFXgh!F}K{a6dQ$o&?W;$H6Xm6ub={0u}ff7#cnT z+0QfZQ*aGD2(E)iz#kj_+}M9>_J4vL?;Y3zckiz69|Czl1G1m9;7jl_xEK7Z;olA4 zfrnxL7yKGLyhn&Hz$@Tsa2Y%S-Uqp^$KWaO2ax0Z1af_E!2{s0AfNlAvHt_IJ_it- z&m9L@hqKN83dlMwfSm7Lkn7GsmYi+Vd*Crl_RV$g90)P3f&KE}ym+u5)|z$Y8sKu8 zBlmXaVEsAAPcR|Fw7=ro7!QS1iR;z`WrD|#V(fI1264kaFu9d685`8V+Kg6ouC~qI zmM&5s7(=Q(6*5s`F4h6A|KHA+dF~^pl%b)GmAEN=pt6W%P%&A9eQBapEEYH*4QcO7 zC3;R9II+~bF)xtNkjEETCX59u1`)=$+?iRns%x~9HOl3bCIBb~^g5yYs6 zq0Y{7oH|J1)JWU1NqdKTUnYq{mI{?16?;!bp}i2dzgNz~TZiiSZI0ibMsD3&xHY%^ z{^Gd{twz{(e_W^z1tf63$fY*36s6L66}rp)`S#WAb50t+qFj5Cn}I4y~Kg zBqen1P~=;^ymT^e&s#V29Wupq>${g`ss7ph#?V(?y7=vEOZ04vuGQ<}z{^y~66*P^ zno!}Y$w-Q&P)$-}%RuPqz&a}Is;MT!%#Nz{)Y2fgS=pT0Tnmc4rqvjN;Qj07&Ihjp zVvrB;Z*XvQD zxu;CEAr^xgZM{%PrEqqD`$N&!KBKt)RPHp%tyA?Xw}`Se37c!8k8_eTqth`htw3AZ zm~1S1a92z`@RU9C@Xu~NJj63$#L LZS{&p7>oY^Ev$2? literal 0 HcmV?d00001 diff --git a/_static/locales/cs/LC_MESSAGES/booktheme.po b/_static/locales/cs/LC_MESSAGES/booktheme.po new file mode 100644 index 000000000..c6ef46908 --- /dev/null +++ b/_static/locales/cs/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: cs\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Tisk do PDF" + +msgid "Theme by the" +msgstr "Téma od" + +msgid "Download source file" +msgstr "Stáhněte si zdrojový soubor" + +msgid "open issue" +msgstr "otevřené číslo" + +msgid "Contents" +msgstr "Obsah" + +msgid "previous page" +msgstr "předchozí stránka" + +msgid "Download notebook file" +msgstr "Stáhnout soubor poznámkového bloku" + +msgid "Copyright" +msgstr "autorská práva" + +msgid "Download this page" +msgstr "Stáhněte si tuto stránku" + +msgid "Source repository" +msgstr "Zdrojové úložiště" + +msgid "By" +msgstr "Podle" + +msgid "repository" +msgstr "úložiště" + +msgid "Last updated on" +msgstr "Naposledy aktualizováno" + +msgid "Toggle navigation" +msgstr "Přepnout navigaci" + +msgid "Sphinx Book Theme" +msgstr "Téma knihy Sfinga" + +msgid "suggest edit" +msgstr "navrhnout úpravy" + +msgid "Open an issue" +msgstr "Otevřete problém" + +msgid "Launch" +msgstr "Zahájení" + +msgid "Fullscreen mode" +msgstr "Režim celé obrazovky" + +msgid "Edit this page" +msgstr "Upravit tuto stránku" + +msgid "By the" +msgstr "Podle" + +msgid "next page" +msgstr "další strana" diff --git a/_static/locales/da/LC_MESSAGES/booktheme.mo b/_static/locales/da/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000000000000000000000000000000000000..f43157d70c63ff21b4385dc36cb73f5b14eb6b01 GIT binary patch literal 1304 zcmZ9KyKmG$5XKi02pkaJ&;+qGJPH$nD9}aMKoTh;K$JVu@15uDO?nzn&U>A+XM4Phfw-zKH#7 zA8uHO_6vba9LDV+coaMYo(2zt6W}@UGI$1TfTzGEa2zD?I2bB!f*j{Lcno|A9tB^6 zC&9NBKUV%PmHi!Ly?>3CQ(qR(uY!{wt8{eFL)2d+-GK0XzbJ2D#pEAm_OQa=bqv z>yEt{#_?I3XFI&O|2#v^6KxpGSKQga zB|}Q$t~G&7@Oa2KoKDgpX6$2=SrtoT1GTMPm0g|DuG`zfb!d+>q}t0|B_w8J9T57T zIag$v?>HhOdp0JqpnQ-;9;-mHv<7xzI;nV=;{#=+_P!vo=(Ir)D;H<{wy4q`3h#>~ zA!JFY1gY2+>V#-Mj+V)JM61hEKVto8pj^8@e}8uO{nYgvwW_gEYa^$+3`k%y&y+UP zvXd(3DKs9n=AzrX-#KM`ms~V!IyTlOsmt4K9cl~8BnA3fm$9lXW`$E(G-qAU*QLn^ zrf=MwmgQsPs*#r(a_ZW2O)OJ9*z+b%Z59igr)teR+B0C^wn>Bm@6LBsXEFWfiOCp} zUSoVqXi7p(4!JPl*su1tkyFr_56Z-%sbe(I4#o^g{#-PJazXT;wvB`*ceb5TPdvg| zr`IHo4yJvN)iV_A{*4(42I?u%!cD5KXs+sR7@qEf{)q=$-v3Mo~pNRgs6dB45A_~g;fK07nJ@Atd!PtN@)FuuTCzw-;3=>No&-0+;~;^bfwAQikaeDcpMpPw z$G{ih3Gh|R-`o9n?fMUp?fnCGz(a>7-ya8gy#TV#x8N_}HSiTa&9^9*#YUsChPKG9Ug3-wb>5ahNSoell`9^ zoCmHU$Afe<^%ebHTryT9ZaN#u#(;-<*Xy#1V#z(Q#;HPD7is9+wj60gd)vV!pVPk0 zSm_{ZRgzdPbcFPDxsr?txhJx6>8J8fdK zt=*csGQxpNwNctG%DhrOQ0(2=TFGwgz2}t;Bl6j@&0V2w*_AhjI(F8SEfezEl?8P+ zO!CTPE6$HYSK7K+hThdhIeF}TXXUw`ymDc&BbHF5O7u~OM`4rK)5Mn;Zwdn zburuA-y2y!aDY4^_ YHT)aX(-~_xf`&nTEYd88)4;9x4>RdeS^xk5 literal 0 HcmV?d00001 diff --git a/_static/locales/de/LC_MESSAGES/booktheme.po b/_static/locales/de/LC_MESSAGES/booktheme.po new file mode 100644 index 000000000..4925360d4 --- /dev/null +++ b/_static/locales/de/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: de\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "In PDF drucken" + +msgid "Theme by the" +msgstr "Thema von der" + +msgid "Download source file" +msgstr "Quelldatei herunterladen" + +msgid "open issue" +msgstr "offenes Thema" + +msgid "Contents" +msgstr "Inhalt" + +msgid "previous page" +msgstr "vorherige Seite" + +msgid "Download notebook file" +msgstr "Notebook-Datei herunterladen" + +msgid "Copyright" +msgstr "Urheberrechte ©" + +msgid "Download this page" +msgstr "Laden Sie diese Seite herunter" + +msgid "Source repository" +msgstr "Quell-Repository" + +msgid "By" +msgstr "Durch" + +msgid "repository" +msgstr "Repository" + +msgid "Last updated on" +msgstr "Zuletzt aktualisiert am" + +msgid "Toggle navigation" +msgstr "Navigation umschalten" + +msgid "Sphinx Book Theme" +msgstr "Sphinx-Buch-Thema" + +msgid "suggest edit" +msgstr "vorschlagen zu bearbeiten" + +msgid "Open an issue" +msgstr "Öffnen Sie ein Problem" + +msgid "Launch" +msgstr "Starten" + +msgid "Fullscreen mode" +msgstr "Vollbildmodus" + +msgid "Edit this page" +msgstr "Bearbeite diese Seite" + +msgid "By the" +msgstr "Bis zum" + +msgid "next page" +msgstr "Nächste Seite" diff --git a/_static/locales/el/LC_MESSAGES/booktheme.mo b/_static/locales/el/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000000000000000000000000000000000000..fca6e9355f314677f2890f5679fd79dd67a7a5a2 GIT binary patch literal 1722 zcmZ{iPiS047{Euewt4U5n=fkKPHMz8X+JKcS{dGGPw+nU_c zCZPt>L)D-XMN9P{m<`*yO}BBoS3#I}@#Mj)Cp~!cBL2SFE!h-0?96ZHoA3Yk{@Ax; zMX{b{53_$`KhIu%fP=N`L8bVq-5d|WJ#ZJ?4|l_1_!N8@J_(C(AAASyg$6zb(>@oV z`1uSz3RmGC_yc?b-t6<2zWn$8{ZAG}N=-3*|lx#m@-51&_iV@GtlX{0B;& z|G|Us0FOuETkutwLXlsEZ^B>UVYqXrQitFhQ0g-9MK}YWfvbJC;B(wFD1G=Fz66IT z_E|X6=LsnC_o4J>9*)3IptM|SkT~LFKU;E^x(64jsmIx3FH7o|CH<3LOJ5JLNx{`F zvmIQ7ax;7w(hfIYb$phePOCsWALdpYv;G zOPqxDxMc1Trd2=DkykNltn8<_zIz?7*J?>AHYU*1VcDqDUXtp1RQ6I+)?vWC9+ax; z?Fa=g(0-ECjhcx404JfII61D)_Tt4R3KKsK<5_h!s`|lJua|GifioP~!B0M6o&VhxLBi1MZW0rDDj^PzkAoADB{_KUL1ZXW|5J z1>K9Em-u-XId|sNnX#?+BZm&>`i;#`&PJx7QxbSBs(F4es!LTbPE1;ScXB*`Y)dcp zg5;cu^J76NEc-#FppQ-YY3{TaRO_9ar-jjaZW8iNR~zWhPCd z!IGORj47br#qth1q_-xG*Y+m$QJl1ldYsi|#&y>pn)3O}2r<588%%~cb6DJv&M0zR z6Ju?!Gpp;aJ2H>n=Q;4)9QX)UBsQv>oVXExo49XqkvY35v_#)clq&D(UelG*<`bgE k?KNN}BbD^J+2#A$4_2dkCejtU)5(_TEbrUqDh*fv19*CpTMYflop9djWEuuR*>)1Ua4$Am958?gqbuT!-Hv=RLl? ztm83|e|Hk(lCvi7xt0eoxwahd<^hR9n|*n(9}lh{d$2y|1j)ji^V&Q(5ANYkOq8Lh zleoHqM}{7WYt{rZ!Q&-caXRS*F>UXd)T&q-8)(7WzFgEPZ5(@>yDt567<$^vOeG|m zu?~=4-?s8J^<76~z&terQmeydOn{H3s$Z$a!slCrhw4F8(v9dkg67!{> zqs)38tXmQDSx=k0@+!yH>Cu2hvC4%qr;bgMl%!G1I#EHx#L(SPMrYZlG|3z-YnxYL zZ<6X6Bk3-!Gg!U#kimbyhN}%YXpR zWvS9;N_Kn7c?ykNomO;Zym7(o#sxZQOml4 zuS=5^Ti>`iCCk^wX(LZG)Bqh@I0@tnvqcu0^K+ literal 0 HcmV?d00001 diff --git a/_static/locales/eo/LC_MESSAGES/booktheme.po b/_static/locales/eo/LC_MESSAGES/booktheme.po new file mode 100644 index 000000000..f7ed2262d --- /dev/null +++ b/_static/locales/eo/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: eo\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Presi al PDF" + +msgid "Theme by the" +msgstr "Temo de la" + +msgid "Download source file" +msgstr "Elŝutu fontodosieron" + +msgid "open issue" +msgstr "malferma numero" + +msgid "Contents" +msgstr "Enhavo" + +msgid "previous page" +msgstr "antaŭa paĝo" + +msgid "Download notebook file" +msgstr "Elŝutu kajeran dosieron" + +msgid "Copyright" +msgstr "Kopirajto" + +msgid "Download this page" +msgstr "Elŝutu ĉi tiun paĝon" + +msgid "Source repository" +msgstr "Fonto-deponejo" + +msgid "By" +msgstr "De" + +msgid "repository" +msgstr "deponejo" + +msgid "Last updated on" +msgstr "Laste ĝisdatigita la" + +msgid "Toggle navigation" +msgstr "Ŝalti navigadon" + +msgid "Sphinx Book Theme" +msgstr "Sfinksa Libro-Temo" + +msgid "suggest edit" +msgstr "sugesti redaktadon" + +msgid "Open an issue" +msgstr "Malfermu numeron" + +msgid "Launch" +msgstr "Lanĉo" + +msgid "Fullscreen mode" +msgstr "Plenekrana reĝimo" + +msgid "Edit this page" +msgstr "Redaktu ĉi tiun paĝon" + +msgid "By the" +msgstr "Per la" + +msgid "next page" +msgstr "sekva paĝo" diff --git a/_static/locales/es/LC_MESSAGES/booktheme.mo b/_static/locales/es/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000000000000000000000000000000000000..ba2ee4dc22148ed53f2aeba32c5965654d4a5a2f GIT binary patch literal 1396 zcmZ9Kzi$;s5XU#z;m3pU!zKwxjKJWQQZ~wEO{|$=X8*l*5%yj3EgEC(L#m|@E3-CIKPyLHQ^!@|o{A*Bj z4$La`8F(5z16~6q{%=5u|2}vYjQ#o}@F?a_K{@|BDEE2^ehj_>C4c{bvhUD`U0&uv z*?$p~_`U+=+!`qRz6aYpfs%6BBl;4{Ji6po;@;mNp-!U9Icai#X`&;(C9e__T(%nW zw)>mpL*DQZy5tBcRcdJ)pH3yKo8BdMDdMNz4yGuRTJ{f|^^|Mp6HmSWQEwW{d*1tzb%mTC1mCsq-qL_43=ae|t-eKOjtW>vaY#^sij18-e zU>uxyy|KKi?sj^CYadPWVO!m;OXD8uC5dfPa>c40^|ZUw#21BS?P$vslng{1KTKV8 zUUWKW55MCO;ccWl{h{a&Tjjy}+WN}g`qfKU z27O|OlkJ*EIw63|wI$;gbyiY{oW}PitHT?6`vSSR$zizSGM^h)jP#AENrPMDiUxTd zX^w*p+XS-1RUfK2(ynf&aeQr2cfaHFPM;g=s}~jrYQqQVnr;8cCiiNY181d=I%iD{ zk_k)9C=?Vlg{(ANUY4?kat==RzrVWKpj=Q5{Q{y&B&juy+NY=9viqbpt^2(7Sd&X+ zE$PfxwdF+Wo=v7AO<8KlnjOl_?lY%ZD#GnhWgz3#*VABvZk(=zpIWX+tyOg}6^wXC zsg65u%fhre9yJUf6KusrthQzv+672++(cecEV5X4sri<+xahTU@tQ2Tj8rs5BN4#T iU!*-mQ^Nsab@xTRCJqKkd{;ciD^w4Rh!536LH!R)!cyS? literal 0 HcmV?d00001 diff --git a/_static/locales/es/LC_MESSAGES/booktheme.po b/_static/locales/es/LC_MESSAGES/booktheme.po new file mode 100644 index 000000000..5e0029e5f --- /dev/null +++ b/_static/locales/es/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: es\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Imprimir en PDF" + +msgid "Theme by the" +msgstr "Tema por el" + +msgid "Download source file" +msgstr "Descargar archivo fuente" + +msgid "open issue" +msgstr "Tema abierto" + +msgid "Contents" +msgstr "Contenido" + +msgid "previous page" +msgstr "pagina anterior" + +msgid "Download notebook file" +msgstr "Descargar archivo de cuaderno" + +msgid "Copyright" +msgstr "Derechos de autor" + +msgid "Download this page" +msgstr "Descarga esta pagina" + +msgid "Source repository" +msgstr "Repositorio de origen" + +msgid "By" +msgstr "Por" + +msgid "repository" +msgstr "repositorio" + +msgid "Last updated on" +msgstr "Ultima actualización en" + +msgid "Toggle navigation" +msgstr "Navegación de palanca" + +msgid "Sphinx Book Theme" +msgstr "Tema del libro de la esfinge" + +msgid "suggest edit" +msgstr "sugerir editar" + +msgid "Open an issue" +msgstr "Abrir un problema" + +msgid "Launch" +msgstr "Lanzamiento" + +msgid "Fullscreen mode" +msgstr "Modo de pantalla completa" + +msgid "Edit this page" +msgstr "Edita esta página" + +msgid "By the" +msgstr "Por el" + +msgid "next page" +msgstr "siguiente página" diff --git a/_static/locales/et/LC_MESSAGES/booktheme.mo b/_static/locales/et/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000000000000000000000000000000000000..983b82391f499b67a9046c15d0dd8744650ad925 GIT binary patch literal 1341 zcmZ9Kzi%8x6vqb=^5gOYNC*@$VrY^ev%C-5~0q6cNoSQvQHcDIz*5N(ux8-*>yj7Nec{?37 zm@hCt!hG}w7K~$WDuqWK$8rQb2_6H_g2%xn@O|(C_#QX{&wyWnr$GYW22;<6py+%D zo&tXWPl7*z?}ESdeBSTB?Csw{@%I-v01qANzCR6$y#$KRMeqsu1$Y$v1AGho6Fdz5 z4az++J_JsIlKZFNIq(K3aoh#Zg45o90E(Y)LCNb!P;`Fo`2>{wpMm1IA0b zcW{Vb>7mpIdO`vjME%rCbb#Z zLB#9Ln`>&T^B1V~k&_R*YOAf;-PhM8wsB1jsl}?dyPU?psw`<|W?Y%XaN_8G>Z0@F zr*qN&8_XbscI- zgH4-*vBjDXO&n_1F3dRkd|7v|qf1VIJknP`Sstj{^T%OktXj#ancCu*yH}n+<|;=N z-jrOCj`OFMOqk`wddINU##;0GR19tBdvzi?e7zruw52+Aw>NT5Q%F&X%h*Lj=6f7z z8jNuq=-IE#jk<#c{m?m+L;rJ$825O_Nq(D^8CKl}zio+{#UD_z^uJ)WVl7Wi?YTr` zTRZHgk)aH+jLK4D)OfM3BNeE1ohJ(1utQX6H>rE`y-+P|2~e4t1fZw$r;!^}gUIe5 P_MW2m5}a<3ol*Y+f5$~k literal 0 HcmV?d00001 diff --git a/_static/locales/et/LC_MESSAGES/booktheme.po b/_static/locales/et/LC_MESSAGES/booktheme.po new file mode 100644 index 000000000..8680982a9 --- /dev/null +++ b/_static/locales/et/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: et\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Prindi PDF-i" + +msgid "Theme by the" +msgstr "Teema" + +msgid "Download source file" +msgstr "Laadige alla lähtefail" + +msgid "open issue" +msgstr "avatud küsimus" + +msgid "Contents" +msgstr "Sisu" + +msgid "previous page" +msgstr "eelmine leht" + +msgid "Download notebook file" +msgstr "Laadige sülearvuti fail alla" + +msgid "Copyright" +msgstr "Autoriõigus" + +msgid "Download this page" +msgstr "Laadige see leht alla" + +msgid "Source repository" +msgstr "Allikahoidla" + +msgid "By" +msgstr "Kõrval" + +msgid "repository" +msgstr "hoidla" + +msgid "Last updated on" +msgstr "Viimati uuendatud" + +msgid "Toggle navigation" +msgstr "Lülita navigeerimine sisse" + +msgid "Sphinx Book Theme" +msgstr "Sfinksiraamatu teema" + +msgid "suggest edit" +msgstr "soovita muuta" + +msgid "Open an issue" +msgstr "Avage probleem" + +msgid "Launch" +msgstr "Käivitage" + +msgid "Fullscreen mode" +msgstr "Täisekraanirežiim" + +msgid "Edit this page" +msgstr "Muutke seda lehte" + +msgid "By the" +msgstr "Autor" + +msgid "next page" +msgstr "järgmine leht" diff --git a/_static/locales/fi/LC_MESSAGES/booktheme.mo b/_static/locales/fi/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000000000000000000000000000000000000..d8ac054597c924e3010f629caeac1c748b7211cd GIT binary patch literal 1368 zcmZ9KJ#5r46vquMU&lA4Af!r|g^wXX5kqz52B58kprWEjclYv|xbfLh>>S*}#K;Cr ztS2O9B*cCTVrBw~l?gF1AR)y6B`K}2;{5K}=lB1=7k@o5`BGq<#+lRCrOA1CaQ!^kKlhsTrZJIH9K_^ryK`_q_#3i5+EIO9achW2 zhLpr@X9L;b@gpA=I!S|AaF48UDwfs-T6Zp!8`{wJvUg?CqyH8|s=drrLSix20jamb zrP3JRD~PP@yO_j1<%2BqSOtouvxrOEOU3;he^6Fx?@JOZg|3?XUWo3*(Ha#V+1j$!k61sdlv~SpmKV48XD?rEHI0qB!<^bOz`&KTmRqe+kV}^cf!#bT&>G1)AQnFuaA9SXR zO>~T6=Ywl@%y^W>^xt9_kNV@0^m3c4B9BMrgfh( zuVp?S>46Ru!ccTe7eieYS*GF0;G*3eAeplXnZCllffvOWA=>i5w+5yH-zgk~ao1dl zs44(@M0}Rt9Mp|iQ#yrKbuAq3msY|P`l$x0E<_Em{7-o?CS$aPMpfmO5T&sTb;r2u H66VBTf3Z^G literal 0 HcmV?d00001 diff --git a/_static/locales/fi/LC_MESSAGES/booktheme.po b/_static/locales/fi/LC_MESSAGES/booktheme.po new file mode 100644 index 000000000..34dac2183 --- /dev/null +++ b/_static/locales/fi/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: fi\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Tulosta PDF-tiedostoon" + +msgid "Theme by the" +msgstr "Teeman tekijä" + +msgid "Download source file" +msgstr "Lataa lähdetiedosto" + +msgid "open issue" +msgstr "avoin ongelma" + +msgid "Contents" +msgstr "Sisällys" + +msgid "previous page" +msgstr "Edellinen sivu" + +msgid "Download notebook file" +msgstr "Lataa muistikirjatiedosto" + +msgid "Copyright" +msgstr "Tekijänoikeus" + +msgid "Download this page" +msgstr "Lataa tämä sivu" + +msgid "Source repository" +msgstr "Lähteen arkisto" + +msgid "By" +msgstr "Tekijä" + +msgid "repository" +msgstr "arkisto" + +msgid "Last updated on" +msgstr "Viimeksi päivitetty" + +msgid "Toggle navigation" +msgstr "Vaihda navigointia" + +msgid "Sphinx Book Theme" +msgstr "Sphinx-kirjan teema" + +msgid "suggest edit" +msgstr "ehdottaa muokkausta" + +msgid "Open an issue" +msgstr "Avaa ongelma" + +msgid "Launch" +msgstr "Tuoda markkinoille" + +msgid "Fullscreen mode" +msgstr "Koko näytön tila" + +msgid "Edit this page" +msgstr "Muokkaa tätä sivua" + +msgid "By the" +msgstr "Mukaan" + +msgid "next page" +msgstr "seuraava sivu" diff --git a/_static/locales/fr/LC_MESSAGES/booktheme.mo b/_static/locales/fr/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000000000000000000000000000000000000..f663d39f0faa76c5b9bd504c51252eef74cca5de GIT binary patch literal 1412 zcmZ{jJ!}+56vv0pm9t{vhm&8^IIO`h6wqyy*798~B-FLeKdo!!~aIl*+ zlr)i&DosjGX-Sm+nxs!ffzqWyh>mpmzuUEJBQe_9-_FOq|NFX^vnPL27*{aoG5^N= z4D;zRyf9{tD}_g$!Rs`57MuYuf@i>a@G^KEoCEvdEO-|@2Qqjbj4hvlV&_NjBk*VN zEI0-~27hn)XS@HRUH=P;-hW^RJaMA={W(z9^Pt$d0saAg1L9Gy@DjZ@;0NGaQ2bFR zmAVF8043f<@DuPZcoBRAia+0jpMpPtqW^1q{u?N9{{>2%|FrAZpu~0hRC7KD%K8iN z5_lUFyPKf+qrt170VU-UgXB+gDSn8C=pG)BP!}*oSBAtdL+nayq9N(OOB9IrIvT`P%K$}>uh8j13v0qZ;CRi75AO3oX*L*$lK06qM@nyU^}?vGk#|? zmL^cG3s$SSi7>stTuW6IGS6&j2uDVy3#y+NH@oiA@h}HB>+f7UZR}>YK)jOup(LluVV6L*Ln| zlGjyt&G}L2k*%l8&|h4l=Cgm((zQMpFVk1~#tn>=qIhM|}`x`)$>jU}ieUbVvBn#nWIG%|r{ z@(VpgvBl*|+YE)ZQEjFj&wG?C)!uDalV>RR`l$A11nWA{5XsPS+9@lOaXon+Y2Ue0 zE>oVUGO*E>4ZSkmuAAH}>yV{N!q_?5zAzp!&O#! TA2+cw{Vw>h7Yg+MtLz81? literal 0 HcmV?d00001 diff --git a/_static/locales/fr/LC_MESSAGES/booktheme.po b/_static/locales/fr/LC_MESSAGES/booktheme.po new file mode 100644 index 000000000..8991a1b87 --- /dev/null +++ b/_static/locales/fr/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: fr\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Imprimer au format PDF" + +msgid "Theme by the" +msgstr "Thème par le" + +msgid "Download source file" +msgstr "Télécharger le fichier source" + +msgid "open issue" +msgstr "signaler un problème" + +msgid "Contents" +msgstr "Contenu" + +msgid "previous page" +msgstr "page précédente" + +msgid "Download notebook file" +msgstr "Télécharger le fichier notebook" + +msgid "Copyright" +msgstr "droits d'auteur" + +msgid "Download this page" +msgstr "Téléchargez cette page" + +msgid "Source repository" +msgstr "Dépôt source" + +msgid "By" +msgstr "Par" + +msgid "repository" +msgstr "dépôt" + +msgid "Last updated on" +msgstr "Dernière mise à jour le" + +msgid "Toggle navigation" +msgstr "Basculer la navigation" + +msgid "Sphinx Book Theme" +msgstr "Thème du livre Sphinx" + +msgid "suggest edit" +msgstr "suggestion de modification" + +msgid "Open an issue" +msgstr "Ouvrez un problème" + +msgid "Launch" +msgstr "lancement" + +msgid "Fullscreen mode" +msgstr "Mode plein écran" + +msgid "Edit this page" +msgstr "Modifier cette page" + +msgid "By the" +msgstr "Par le" + +msgid "next page" +msgstr "page suivante" diff --git a/_static/locales/hr/LC_MESSAGES/booktheme.mo b/_static/locales/hr/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000000000000000000000000000000000000..eca4a1a2842830f06bd5f6235bf01d07bdd313d2 GIT binary patch literal 1402 zcmZ9KJ!~9B6vu}+kmLvh)-Bbu!YZ&sL*)#Wq0oOW>)iY z#I7UJKoJQlD$3MIh{}~uqNJdrprN2bqC$d-|Jz;1r;K*?xAU?8_kVBZ_tmrCDzsP8 z*U*1Oe;xg+XYfKhe?}=h>H=QRffvE^;1%!!xCXugz6HJv4#8FMQ}7bV;EQ1F_z)DG zZ@?G8@4$=TWAG*L$BsXD^S^cDKS7E2FW3WDR@(nBfihkLMdxksC-6Fmt?DmO{`(sg z-~WN)YvpX4?<)8j#_OQu@i8cQeBP}`P-^oC6rIQ5W$*{^dGHvNe1GZqM>qcuD0MvZ zY`gv@DF5B)#v@RCe+FI!b5KeyIf$NkxPmU8B<^y9bfYDCNE01tQa`D+)KxSg*?1KH z%T4MpwK#{4ax`^Pw+`@7ELnZzY-Ae)KI#K+iZZI(?h9Kvos)Hu_nn*4eN*x2ba2UM z{M2MDO`uvAtTuBKA$>9&rK$>r--+ODxdTQVUeRy*EU zh&bB0JyN@EJkPZYCOUthcI(pE2XsrkjZ2=fYTmTp<}`LiQ88IPH-(M{K`cLrtve@i z+90*(xpAqpy(Aw}Q7~LGGJzF0;VkyI^8P*c0crzk0e zJ5}W`eCg#p>R8HV8p8`ORd-?8+@kgmPE(NjB4ZRZpsbXo1{+%JywM17c0!7$ZS zdp+04j?>}H5RL>u+?)6`B(!QC;nQ(E%YYHwOi5)9EgF2Tundr{ymsm2Z;f2mKh k(#nYgw(f}olqHrfw@Z7L48TfWxy6y8Q4`jSBkXa^hFIaGO(a}_Drr&ngPIpyPhwkKtCW1zS zxtWoenhB;xBBn-yn24D8zwTalu&Ao9Uv<5s{`Yli#|wdR9BT&a6V_?0r`xb$Ol}tf zQ|!UE3)}}zg45t0a0WaEo&}G9U2qD#3myOoJP3w{Pe6|I9NZ7S1owfj!NcI&h98>q z&yD>JoIl_+xD%H+@5zQ2!9%dS z4R3=dU_S)+f*T;`dD8G1$o{WD_Im?z{P&Ih5#&5yK-S?0$ohz#wGPuD`<((=a*oaa zSxfd~Z8_ffK#0nR_2t3u=fQPw4%YQ37D84vb8p87>(4#hjm4TkQX%F>m@;G}=B){2 zg2zWWayrd|Sg?;xZdD?U4b-=GDA#mOTgTo;H=zFxL#DkfRZ3zp(E*`1&83*aj(wjsI*=O>qg>UnQ5~j z=eV|BMg@s#m;L%pJ)5RENuwU=R0WMBhBiW7o#i}rlG4#5ZDUjRSa=`Plu#w%B`C$N z(;&2$llB909??3o&fn(z?J9F?WqD;KhDS6Yp*?cS)QjtnT^a+xb_X5}DL&Qs{# z?=7`&Y~6Fp_%(9vMKiF8HfcxR=o0auv z_q>s3y7KCU*_ODI6J+IW*=v!hp&aNkC~@B;F{`@wufgMIJ{Em)%}J(mRd82~uKPNE zu!45VT#b~I5H)6^M@|hh?PQ`$a((5ZmR_kKRwzf4qZ=wC=HgJvLU}~a#5@JmHKeM~ zcQ+dhueyjH6-vS(Hi{zy4lKXvX09`YYnog=urAe8uFQH2W~4-u2I{*$(n5sIJAG7C T@Rc+s5Lgrh10CRyRonjp5+F85 literal 0 HcmV?d00001 diff --git a/_static/locales/id/LC_MESSAGES/booktheme.po b/_static/locales/id/LC_MESSAGES/booktheme.po new file mode 100644 index 000000000..b8d8d898e --- /dev/null +++ b/_static/locales/id/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: id\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Cetak ke PDF" + +msgid "Theme by the" +msgstr "Tema oleh" + +msgid "Download source file" +msgstr "Unduh file sumber" + +msgid "open issue" +msgstr "masalah terbuka" + +msgid "Contents" +msgstr "Isi" + +msgid "previous page" +msgstr "halaman sebelumnya" + +msgid "Download notebook file" +msgstr "Unduh file notebook" + +msgid "Copyright" +msgstr "hak cipta" + +msgid "Download this page" +msgstr "Unduh halaman ini" + +msgid "Source repository" +msgstr "Repositori sumber" + +msgid "By" +msgstr "Oleh" + +msgid "repository" +msgstr "gudang" + +msgid "Last updated on" +msgstr "Terakhir diperbarui saat" + +msgid "Toggle navigation" +msgstr "Alihkan navigasi" + +msgid "Sphinx Book Theme" +msgstr "Tema Buku Sphinx" + +msgid "suggest edit" +msgstr "menyarankan edit" + +msgid "Open an issue" +msgstr "Buka masalah" + +msgid "Launch" +msgstr "Meluncurkan" + +msgid "Fullscreen mode" +msgstr "Mode layar penuh" + +msgid "Edit this page" +msgstr "Edit halaman ini" + +msgid "By the" +msgstr "Oleh" + +msgid "next page" +msgstr "halaman selanjutnya" diff --git a/_static/locales/it/LC_MESSAGES/booktheme.mo b/_static/locales/it/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000000000000000000000000000000000000..53ba476edd2df2a802917e9df402257ceca1a130 GIT binary patch literal 1403 zcmZXSJ&aXF6vsyafhY1QYQiQS8W$5EU!j1vc?Gb$351Os-eM!oynD~ycVOPkWoGWm zTH9#}m5qrNiM6q@vN55xvLLatGO@6+(D*<1-gP%Hxii0e=Htvc|2Z#C?tf~~-bG(P z{{{Vh^sipSfOhD0WAK^77~TMnfQP_S;9+n9db5tC&9bmQINr7VD9+^sD7S+ zZ-HNfN5E&`3Gn-#Klk&$_V(|f-uE{+0Qc?d&L0KUUI5k4BKQM%1;keK2Z+!7i9yf* z1s(_g1vTGe`;B=I{0KY^u7H})r~P~cRhutC&HGzW&wmGM{6B(v?m2h{{0%$^{sW!` z4;<+7S_JjnwcfrBYTWn1Q(y(ER@tjRN(zMHIBUF`R@^(uqn+2D6ew@4L zg5J|zQb*pB(DY&Nai5y1VnoTv1fhfnT;$=ca`*|y1aBki?jP#?!}jLE+O4&f-Stc7 zFAjQPha1~9k0>L63$?T2m#CQ8m^hE`ZLAKj?(U1$r%jH-6<>r>d^Mu0Q^|un)>jR5 z9TALm*F@`vt0B(QhBFUgSr)TwUH`yMh9yxlG%Xix0~y&& z3LB-cB(8l-D%nxCMbU^4uPmkzD-_D^sYeYovZ|_hYt|6+raF6N+B(TUeM703ahwOj z!*UGfp34HuR+WV4?HsPL=1>m45R2fh*HNhPRL3xNJhx_&?Yu^?zAcKo|5pQ*LIIU4 z9%X=yZ5mFd5t*`cmL{7}b7xJ=^_*qP(gmDLw)@AXu2??D^RnquqSO=xr-aR>O-k3u a)*Q3l*@6-(xNK#4xqhOgLbT*tS;gN$Zm<*V0gr*F!6RT6JPeM4`#}XB0Byp@Am^!p`@mOV z7q|)@0zW4Fn)rVt_HU5uZGb6o$Bx$f{UF;tAm=#)eg-dtd%-^-*V_PDx7aDf37`|? zeoukD;5CqS=MqLB_wxkgxVgmt1?-0X4dj{qO1J@X+@W2qe$Rs(cQvs`6FUcTpOYZ# zd6M|6Adj4TU>#hW-}7v_@AidK;vhE9mlyZXi}UiVd9FvXkushE>ug^<53aoj8)J$8 zzT$crKWU3f+%VcIZ9^RVa^Sf|D{h)8T{2EVYGYN-m<~=w|xXST6$p^DKQdw8cB>C zxj87tTJ-|uo6xf+D2p+_=;?>@I%^v*stG0HugtZ28aJ+6Qc^pUp6gf-MYMWoTj~tg zX%+DUHR+isk=>R)j9gbir9vl=V(zQFO%D~)cU2G~TSm6(r@4MQuACYk8XnlX?>m1n zl?*mLUiMW+S_rt{mmE*`%Y4xZLS?ge#s||^w&DUuhxb&F9?*GH@U)weS8|?B-Ey=W zVXhhJO{GRkQQ(x)gC>{=GgAAp8D=l_%hoY_R?FV3>^s+=67+^$;pPAU literal 0 HcmV?d00001 diff --git a/_static/locales/iw/LC_MESSAGES/booktheme.po b/_static/locales/iw/LC_MESSAGES/booktheme.po new file mode 100644 index 000000000..dede9cb08 --- /dev/null +++ b/_static/locales/iw/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: iw\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "הדפס לקובץ PDF" + +msgid "Theme by the" +msgstr "נושא מאת" + +msgid "Download source file" +msgstr "הורד את קובץ המקור" + +msgid "open issue" +msgstr "בעיה פתוחה" + +msgid "Contents" +msgstr "תוכן" + +msgid "previous page" +msgstr "עמוד קודם" + +msgid "Download notebook file" +msgstr "הורד קובץ מחברת" + +msgid "Copyright" +msgstr "זכויות יוצרים" + +msgid "Download this page" +msgstr "הורד דף זה" + +msgid "Source repository" +msgstr "מאגר המקורות" + +msgid "By" +msgstr "על ידי" + +msgid "repository" +msgstr "מאגר" + +msgid "Last updated on" +msgstr "עודכן לאחרונה ב" + +msgid "Toggle navigation" +msgstr "החלף ניווט" + +msgid "Sphinx Book Theme" +msgstr "נושא ספר ספינקס" + +msgid "suggest edit" +msgstr "מציע לערוך" + +msgid "Open an issue" +msgstr "פתח גיליון" + +msgid "Launch" +msgstr "לְהַשִׁיק" + +msgid "Fullscreen mode" +msgstr "מצב מסך מלא" + +msgid "Edit this page" +msgstr "ערוך דף זה" + +msgid "By the" +msgstr "דרך" + +msgid "next page" +msgstr "עמוד הבא" diff --git a/_static/locales/ja/LC_MESSAGES/booktheme.mo b/_static/locales/ja/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000000000000000000000000000000000000..1cefd29ce3cc08792667a82dc7ff47e9843107be GIT binary patch literal 1471 zcma))U2GIp6vr=uT2=%temyktnh+I|RZ$+uHv1sbO-LX%QhlH9-p&qe?;Yo((zoth z!EFJNkCxzqu~zx0SU`c+ubA-Uv++eAe7Tv~_Q5w};+y|7GulOC;?11-%{lkn^L6K! z-gVOq>v2dQu6IDn_Xk)2*RIXZZv;v211Zik;1ak4q&&ao z_TO@S&AKd35BL)Hd%6F@2|fYNfYiq|km|n!QoJ8>{U?y}t$^hJJGcehbZ=(g z0h0bQNasgDiZ_<)lR3|UkHUThq>)oh@}pU9hR|%O@9u?E>>&uvmlpL;i{?kUXs#3! zA=v{Ey4&tWeb61=55ZW{`(?Yv@$jH7*egl~LIyrQ+;Pvb>jB%X-jWSv)wooF7*^^a zA8{ID)$FU#tBQXZgSz8$*Rlm0s5t>b|D{&KM#HaqLP$QUYJ%;x{D6mU%?d<~D+xW6 z)jE6Cg@Yxz75=BO|)yDPjO+O|NI0-c}~bwE^u;+|UZ4dMBS zR^nOyBIPfpnG5^&>>F6!-~QYSg?zBZp>bD~cz^<)cN>-?`+2o)dAX*25Q`IlO*w3@i^0SgZ zRp#5b_7~Xl$+OE7hnZ=9YND2D&Y9*#)@d&!S7*EK+UR$U{>JE*(KAMC6J0ZU+C;M^ z`q*e3JZ1ERi7p!bh0&j-o|DW(-_lp~<^OPPo;J+|qu);tEd58ke>rs)FR!$Zu=w!h z&Y7=QE_`G(9#lEWlJgUtTT#-!nao^I&VIs{Z!N}0r&&CGVrBLi!p9%ph)+&2{5I%2 zK7KuJEq2#CZ}bHdEv5a=V6`%k-R^d(Xw}Jf>8QN5e!h zBbA`3bkQ)WS=dVKSy@;~B<{I88!P_b+$$|{a=-hV^ZNe(@4NG5)%+^MSdZvHd_rtS zROaA?v3M?Hcvu@=3&Ew}VsJIs26lk!z^&jKFbl2%PlD~B0GES0VHu=655Q&M6L2Xw z0j>n66Mjh6KPU5VAnE-CQ{cRL@%?s?<{cpA*#_Ef2kG3A zWc>t4Ehi1?A9+}fAZZd{dqh6EFS_D$EB?$Vqvj_EszTqMS+R^&H z>|hBG*9E~2DXE3jK7P0*&#?=d9aa}*QCWE|l@|R-n>7qsZZHEJ ziA7vbTyRv7*q)W%4{Te&r9dUHqRxq&PWR-~XN2b?TZYH_Y0^)(lvBrgj&;rK@7cLK zl{7ZpS8_#$YZ%z&7A;41@?634e4(?a`?}KyX3lw*^v?<}-6eA>??^kt5A-`ab<~n} zfWBto^Y9)w_0g!> zVBythE}X>$!Q{z_su`ZeFjL;phL~syBFa zXVlaiX8iI$xBl9S?ljH#2qeqqHLg5*5Kc5=W8s^|_vRD}$A^$29KXrU!@=^_j5Dv6%;4cA#sHtggl>eA))${_687xQDk?6`Kc@OWFO}WOS!3uO( zjPOmFMc3|`Ymgi(M~_AsRb8*l6oj5N!k1G#oNSsg^a1i?<=NZRz?N8C9`e*ob%^~2 D6Q!<* literal 0 HcmV?d00001 diff --git a/_static/locales/ko/LC_MESSAGES/booktheme.po b/_static/locales/ko/LC_MESSAGES/booktheme.po new file mode 100644 index 000000000..c9e13a427 --- /dev/null +++ b/_static/locales/ko/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ko\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "PDF로 인쇄" + +msgid "Theme by the" +msgstr "테마별" + +msgid "Download source file" +msgstr "소스 파일 다운로드" + +msgid "open issue" +msgstr "열린 문제" + +msgid "Contents" +msgstr "내용" + +msgid "previous page" +msgstr "이전 페이지" + +msgid "Download notebook file" +msgstr "노트북 파일 다운로드" + +msgid "Copyright" +msgstr "저작권" + +msgid "Download this page" +msgstr "이 페이지 다운로드" + +msgid "Source repository" +msgstr "소스 저장소" + +msgid "By" +msgstr "으로" + +msgid "repository" +msgstr "저장소" + +msgid "Last updated on" +msgstr "마지막 업데이트" + +msgid "Toggle navigation" +msgstr "탐색 전환" + +msgid "Sphinx Book Theme" +msgstr "스핑크스 도서 테마" + +msgid "suggest edit" +msgstr "편집 제안" + +msgid "Open an issue" +msgstr "이슈 열기" + +msgid "Launch" +msgstr "시작하다" + +msgid "Fullscreen mode" +msgstr "전체 화면으로보기" + +msgid "Edit this page" +msgstr "이 페이지 편집" + +msgid "By the" +msgstr "에 의해" + +msgid "next page" +msgstr "다음 페이지" diff --git a/_static/locales/lt/LC_MESSAGES/booktheme.mo b/_static/locales/lt/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000000000000000000000000000000000000..4468ba04bc134a84fea5e3c973461cf02c9c2da3 GIT binary patch literal 1413 zcmZvaJ*-qk6vsyag_RElKVp1=p+SgQ1!6(=Ws8Trm%IpJ!!Cq&-rn=>&feLX$$UJx zy>yfs3mYDtv7j(9R@&?)#@fol&{Y(AbXL(_rUwj`9qL(er@8vgIw=pko$cKo&XOVSm=ES z^8O^qK3oL({z?6$1#4Nz2(054oJ;=;JiGzE*_kpy=GszcBB+< zVRAov2m8<3><4U^|G(mLjYp;ti7%W@WK+ZsUk6=|lDOjTSmRV7txL4wTqQTPq1|wF zISlEQU>fNty(&qp7CIsI&UP&u6NiDw%84sTTvaj2>KHSL zUeFdvoa|j$6a9r=An&41F4Us$N7~+%m$|mVh$bZFtK3-hG;n2UNLsa}OO-T4EZt3u z>KyAVM0}tv?Q&E0p7b%7C80_}Cn&{@X_(sUMf(ENwtj7O_x!@A z=Ua`j?LqCSBNGbv%p0ZcvK)?7h?KfF2W#z1yZ3^!ag#!O)ec>uZP}5RHgsxTRkqCV zwIfYx^-K=RwAWmi#E!InzKz|B%X0Cvd)CU+U3uZ$a!agNntg;f8E)25tYny{XFsAC z9jD(SkfM$V#H6LA*|v6LHF0AnD>v~vCkoohO{LBB-b;69+sY(u|L3sMiR5g1`bhd5 zjq=k+;szC}%nlMS$9vjXC#gc)P6nF&q9-bfo{CjQZe*xbNYi^#C9ConpM%1oLFKH0 za&)0WJ=gQWZ5W!M$i7!O!aIl|0e?HQ2Ro%kmCL#^O7u6its3{(8W!QLNCjg(=>2Jsvw ClWZvf literal 0 HcmV?d00001 diff --git a/_static/locales/lt/LC_MESSAGES/booktheme.po b/_static/locales/lt/LC_MESSAGES/booktheme.po new file mode 100644 index 000000000..35eabd955 --- /dev/null +++ b/_static/locales/lt/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: lt\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Spausdinti į PDF" + +msgid "Theme by the" +msgstr "Tema" + +msgid "Download source file" +msgstr "Atsisiųsti šaltinio failą" + +msgid "open issue" +msgstr "atviras klausimas" + +msgid "Contents" +msgstr "Turinys" + +msgid "previous page" +msgstr "Ankstesnis puslapis" + +msgid "Download notebook file" +msgstr "Atsisiųsti nešiojamojo kompiuterio failą" + +msgid "Copyright" +msgstr "Autorių teisės" + +msgid "Download this page" +msgstr "Atsisiųskite šį puslapį" + +msgid "Source repository" +msgstr "Šaltinio saugykla" + +msgid "By" +msgstr "Iki" + +msgid "repository" +msgstr "saugykla" + +msgid "Last updated on" +msgstr "Paskutinį kartą atnaujinta" + +msgid "Toggle navigation" +msgstr "Perjungti naršymą" + +msgid "Sphinx Book Theme" +msgstr "Sfinkso knygos tema" + +msgid "suggest edit" +msgstr "pasiūlyti redaguoti" + +msgid "Open an issue" +msgstr "Atidarykite problemą" + +msgid "Launch" +msgstr "Paleiskite" + +msgid "Fullscreen mode" +msgstr "Pilno ekrano režimas" + +msgid "Edit this page" +msgstr "Redaguoti šį puslapį" + +msgid "By the" +msgstr "Prie" + +msgid "next page" +msgstr "Kitas puslapis" diff --git a/_static/locales/lv/LC_MESSAGES/booktheme.mo b/_static/locales/lv/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000000000000000000000000000000000000..74aa4d8985d96a07c4c9be96f006f3b58d884342 GIT binary patch literal 1404 zcmZ9K&5ImG7{*_dsL7ZQP5eMvump*E7&XB|oMjFhcNYS?IAk~8Yi8c*sqUVtras2( zdKwT<9)#7Cr{GC*^B|r)^?-kXCk3y9;7t%O;`2^-vdb1-{p+f(exK)k=-;oN`-(7L z!d%1r1@l$R&!4~zPs;uHG2 zUH=1$um8Z;!As|eUIA~m+y%w|6qJ5G0bd5c0G|fGYu|qlzJT=-lzP8`lHs4A_&D=q zlgkxQ>Rkt~f$z861tmufO5Zssk;}JoUbN-BWGj829FU-AF-2E~^e;o|NY;|8ctc5g z22*^U9FmJ@OFxKVl@HxMz@_4p=>umY+Zgar9e9&XBW=2mZRzw_Sr>WeT%jhWmHkCxl1EW^k%TQzfIrX3xkO4V3L9xG?DPIfe<4=WtdR+$hI z)4&^xB2Eo9w`s4@^IW-LqVor|S51w*uWpOC{VC6wR(CaQdfIncRMGG) zBv*eURUdB+Oli45Wvi9v9Ia6vne84ZIht?0o$f< A2mk;8 literal 0 HcmV?d00001 diff --git a/_static/locales/lv/LC_MESSAGES/booktheme.po b/_static/locales/lv/LC_MESSAGES/booktheme.po new file mode 100644 index 000000000..ee1bd08df --- /dev/null +++ b/_static/locales/lv/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: lv\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Drukāt PDF formātā" + +msgid "Theme by the" +msgstr "Autora tēma" + +msgid "Download source file" +msgstr "Lejupielādēt avota failu" + +msgid "open issue" +msgstr "atklāts jautājums" + +msgid "Contents" +msgstr "Saturs" + +msgid "previous page" +msgstr "iepriekšējā lapa" + +msgid "Download notebook file" +msgstr "Lejupielādēt piezīmju grāmatiņu" + +msgid "Copyright" +msgstr "Autortiesības" + +msgid "Download this page" +msgstr "Lejupielādējiet šo lapu" + +msgid "Source repository" +msgstr "Avota krātuve" + +msgid "By" +msgstr "Autors" + +msgid "repository" +msgstr "krātuve" + +msgid "Last updated on" +msgstr "Pēdējoreiz atjaunināts" + +msgid "Toggle navigation" +msgstr "Pārslēgt navigāciju" + +msgid "Sphinx Book Theme" +msgstr "Sfinksa grāmatas tēma" + +msgid "suggest edit" +msgstr "ieteikt rediģēt" + +msgid "Open an issue" +msgstr "Atveriet problēmu" + +msgid "Launch" +msgstr "Uzsākt" + +msgid "Fullscreen mode" +msgstr "Pilnekrāna režīms" + +msgid "Edit this page" +msgstr "Rediģēt šo lapu" + +msgid "By the" +msgstr "Ar" + +msgid "next page" +msgstr "nākamā lapaspuse" diff --git a/_static/locales/ml/LC_MESSAGES/booktheme.mo b/_static/locales/ml/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000000000000000000000000000000000000..2736e8fcf6f9e923c2403307f0b366086d37b335 GIT binary patch literal 1883 zcmbW0%ZnUE9LGydG>*^22ObnFl0!U=*`!Gj=35eWDu@Vn6%SYIC#v~L=<+PV8oUUd{}t%`e}*gYclZSS_^!ZZxSjb`_!_(p zTd=uFh;ewl;wqeG{x{qKTbqR#gE5?hpTno&U(oq&;Si6(7JLc50p0#-xD$R4pMlrl zez^G_At=eHx!OEj&F+K95`^|`3%xs|tB0$dYp$lQaXj&eABB3jp*ug}eRDN&btbLg zC_?O6*AzH~{-A|&9)JdV{ljJRVAnvQRl4{_?&D7b7KD1k z&HJ`jzT)e&M4gbqlB?mhqaXMpAZg<;-hlh5ZiX1=e36nUswzgb&SN${bnp4Gv=IQ6$P05hgaiFop^2w W+5FkQ^s?wV7=JSkIl3Km4aE({iDO&< literal 0 HcmV?d00001 diff --git a/_static/locales/ml/LC_MESSAGES/booktheme.po b/_static/locales/ml/LC_MESSAGES/booktheme.po new file mode 100644 index 000000000..d471277d6 --- /dev/null +++ b/_static/locales/ml/LC_MESSAGES/booktheme.po @@ -0,0 +1,66 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ml\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "PDF- ലേക്ക് പ്രിന്റുചെയ്യുക" + +msgid "Theme by the" +msgstr "പ്രമേയം" + +msgid "Download source file" +msgstr "ഉറവിട ഫയൽ ഡൗൺലോഡുചെയ്യുക" + +msgid "open issue" +msgstr "തുറന്ന പ്രശ്നം" + +msgid "previous page" +msgstr "മുൻപത്തെ താൾ" + +msgid "Download notebook file" +msgstr "നോട്ട്ബുക്ക് ഫയൽ ഡൺലോഡ് ചെയ്യുക" + +msgid "Copyright" +msgstr "പകർപ്പവകാശം" + +msgid "Download this page" +msgstr "ഈ പേജ് ഡൗൺലോഡുചെയ്യുക" + +msgid "Source repository" +msgstr "ഉറവിട ശേഖരം" + +msgid "By" +msgstr "എഴുതിയത്" + +msgid "Last updated on" +msgstr "അവസാനം അപ്‌ഡേറ്റുചെയ്‌തത്" + +msgid "Toggle navigation" +msgstr "നാവിഗേഷൻ ടോഗിൾ ചെയ്യുക" + +msgid "Sphinx Book Theme" +msgstr "സ്ഫിങ്ക്സ് പുസ്തക തീം" + +msgid "suggest edit" +msgstr "എഡിറ്റുചെയ്യാൻ നിർദ്ദേശിക്കുക" + +msgid "Open an issue" +msgstr "ഒരു പ്രശ്നം തുറക്കുക" + +msgid "Launch" +msgstr "സമാരംഭിക്കുക" + +msgid "Edit this page" +msgstr "ഈ പേജ് എഡിറ്റുചെയ്യുക" + +msgid "By the" +msgstr "എഴുതിയത്" + +msgid "next page" +msgstr "അടുത്ത പേജ്" diff --git a/_static/locales/mr/LC_MESSAGES/booktheme.mo b/_static/locales/mr/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000000000000000000000000000000000000..fe530100d7715cdc19a6f9db33a971665835f3c4 GIT binary patch literal 1674 zcma))-D_M$7>7rVU#qnriYO?KB2uYGt)hetL1>ylCBZ;a@uD{;yVJ8rH#3KG&bG~6 zRisua;ze5NO^wW!;71IF+EldSwKo+61wrPsjrLTQ<5nO9X$iPPmDi?R5_3f- z*T>{OU(0+)Eh-V7av_#JZ?debq)w0ISz70aXmzZP)flf6MT$}#$xUxiX4T6wMQMt1 z(Km0ZR-GJZC$ForB1Kb1(Zq`;L9Q`#U}kc-zxUba8xuy?O4rH!QlXl%M)kR(o922< zwmNB9se1h2{8TbJJXfZ=I;_fMQnySy*I84JF64D%I@MVZ^O|z8Y|M6hW!g=qOu1M! zr7ps>8h>F-#@F%PT0TB5_dY$=5YC=={Zr0fboPp~>pX9~wCwsPo&DVPkH=Hao^bXY zXIEYSL*AVI()HhVc3C?6MacEOvlp1GgbA^$%){9gR+l%mHiEnj&-JixH&Y;SGbt{+ z{t03z?p+g;%KlU0jXnJ;6hm+E zO4d<`2T4o{Hn?Oyg$)#jm)Q^UYaHJCp>S-t$!CBi7ej)}A>Wy>@(p%y@Lc$O&cyp1 zdP#?v^I4GaH+|g|B8k!Ldl%Wfwrg(}`)i>kl#a*D@zz$^*>a1%2AcIZ5x;fu4@Sqm A6951J literal 0 HcmV?d00001 diff --git a/_static/locales/mr/LC_MESSAGES/booktheme.po b/_static/locales/mr/LC_MESSAGES/booktheme.po new file mode 100644 index 000000000..f3694acfa --- /dev/null +++ b/_static/locales/mr/LC_MESSAGES/booktheme.po @@ -0,0 +1,66 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: mr\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "पीडीएफवर मुद्रित करा" + +msgid "Theme by the" +msgstr "द्वारा थीम" + +msgid "Download source file" +msgstr "स्त्रोत फाइल डाउनलोड करा" + +msgid "open issue" +msgstr "खुला मुद्दा" + +msgid "previous page" +msgstr "मागील पान" + +msgid "Download notebook file" +msgstr "नोटबुक फाईल डाउनलोड करा" + +msgid "Copyright" +msgstr "कॉपीराइट" + +msgid "Download this page" +msgstr "हे पृष्ठ डाउनलोड करा" + +msgid "Source repository" +msgstr "स्त्रोत भांडार" + +msgid "By" +msgstr "द्वारा" + +msgid "Last updated on" +msgstr "अखेरचे अद्यतनित" + +msgid "Toggle navigation" +msgstr "नेव्हिगेशन टॉगल करा" + +msgid "Sphinx Book Theme" +msgstr "स्फिंक्स बुक थीम" + +msgid "suggest edit" +msgstr "संपादन सुचवा" + +msgid "Open an issue" +msgstr "एक मुद्दा उघडा" + +msgid "Launch" +msgstr "लाँच करा" + +msgid "Edit this page" +msgstr "हे पृष्ठ संपादित करा" + +msgid "By the" +msgstr "द्वारा" + +msgid "next page" +msgstr "पुढील पृष्ठ" diff --git a/_static/locales/ms/LC_MESSAGES/booktheme.mo b/_static/locales/ms/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000000000000000000000000000000000000..f02603fa2522a40060bd3f1b5d65052c77530de6 GIT binary patch literal 1213 zcmZXRJ8Km|6vs!6uj@1M0TG1}Eworov@58TBL2_ajS)TU%x`DrywA6>k!K8Y7V9F`8>~xM5BFh% zd;s@@AHhT5C-5lv6+91q2g&v)I0lYF`6PH1JO$nc$*v2Y0Pldu!3W?dC>w4z_m7+N zHb`+^fGzM9NcL|*vVRZ02ETwK;L`&&w?VSo0ms4TAm#b4;b-tP&Og8l;4hH$$Kizh zUTHW1Qhptf^p`=>djwK_>)<(X03HLMfK>lZ^ZpV{}g%EROCaZxzWLzM_G6K*tH6hX%sTdt($LUZL@kZi>& z4I{%>X6M;z?cd7EIOUBUu+=J4dW+9cF1<{ak_~s>uT$@tG%Y08Vne0ED+JMU%h%Z$ zic<%vtlUs0G-K1wUB*6INTU1FI=E=_I%q^eqhYw#(!$c*-uvW@iB{KW zFSU<)gG#o!N7S2DA(WoxaVD&jzH_fPA5HD)S)tvUw9%Z7O`>$#=2Ly;TZ=-c0nKak z(zRBKU_}wl8(X?I*VXXoI=7~I{oA>w`Q;9uyguDx%Y{TQY0E^;V^w*;2yki7=)(TYV{?8_pQ^c*8Iog%4v$iD&q$RjNP literal 0 HcmV?d00001 diff --git a/_static/locales/ms/LC_MESSAGES/booktheme.po b/_static/locales/ms/LC_MESSAGES/booktheme.po new file mode 100644 index 000000000..65b7c6026 --- /dev/null +++ b/_static/locales/ms/LC_MESSAGES/booktheme.po @@ -0,0 +1,66 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ms\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Cetak ke PDF" + +msgid "Theme by the" +msgstr "Tema oleh" + +msgid "Download source file" +msgstr "Muat turun fail sumber" + +msgid "open issue" +msgstr "isu terbuka" + +msgid "previous page" +msgstr "halaman sebelumnya" + +msgid "Download notebook file" +msgstr "Muat turun fail buku nota" + +msgid "Copyright" +msgstr "hak cipta" + +msgid "Download this page" +msgstr "Muat turun halaman ini" + +msgid "Source repository" +msgstr "Repositori sumber" + +msgid "By" +msgstr "Oleh" + +msgid "Last updated on" +msgstr "Terakhir dikemas kini pada" + +msgid "Toggle navigation" +msgstr "Togol navigasi" + +msgid "Sphinx Book Theme" +msgstr "Tema Buku Sphinx" + +msgid "suggest edit" +msgstr "cadangkan edit" + +msgid "Open an issue" +msgstr "Buka masalah" + +msgid "Launch" +msgstr "Lancarkan" + +msgid "Edit this page" +msgstr "Edit halaman ini" + +msgid "By the" +msgstr "Oleh" + +msgid "next page" +msgstr "muka surat seterusnya" diff --git a/_static/locales/nl/LC_MESSAGES/booktheme.mo b/_static/locales/nl/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000000000000000000000000000000000000..e59e7ecb308a7648cd23aa6342a9ad98a61d0009 GIT binary patch literal 1356 zcmZXTJ5Los6vqcag)1TtUj+$cqf)Gju^^CJh(JODkqBZvyL;~La`(<`W@dTSmfC47 z`~-XfW8nv|v9q$Ewb8~-3*-OXUAP1%JNw%?Gxz+@WA4%Tz*~WF19KAdE9MmDt3JFi z2G0qBM-1V00UQPg!3l5(oCL3fx52Am6C4Mhfg>P+S3qC$CCGl>fS1Ad;4pXyj)5O) zey;7`>h*V!^?rj5u)n|h{RqhGNs#^A0Y8F|zya_lcoF;svhE*{ng4G|2HU zfE@n|kmKI2*9Rc$zXmzZceVWx9L4%m%`YJ5bp-OcA0YetTieIZSNl^S#~pwZ;4H`` z=N$Mv$HI25E$f~haMji1Jb17l9<0YPv;H+qxD=N#xwoeW*MWP;bwN40{S^!Qc%)BB zJTcmn_6{G#zRi--xzS!`5#mI~uCZ|uI< zDAG*t$pwyWGo=oR?puacPMao4a*|r@WQp<_L^SRBDmueDl}KS}Co^Ro_9^$SOcFwr zgiH{M*`~+`D{-(z*1_AHtn>rc54y^YwUxD{-u}$J2aUS0!RCHJbJ8P#`$euYJujnF zSx3J4d~-RN?VYnqyB67CNk=BmbTTJr!^}5Ul}<|3bx!KMv7VP!<-xMC9S1|vUApGO zd0Bln?`nCgDQBkV8|as@-IqkwS9EI1Sg6udX`Q7gMb~>RJ??afw3Uu~8!34iTBHBl zi0Od1qM24Aq+POY$n;EGCeKm6M7k&?+d-F=M@18=IxkHDla2^|PO9GX1u3ivbE1xT z+=^}4Zo{ONva*On--8-OUjI=r94V1Y}`N=IHv19Ut jbWAeLReX?b<52fDoT%;?`~gm~I(s23S;SN2irW1H>^?=m literal 0 HcmV?d00001 diff --git a/_static/locales/nl/LC_MESSAGES/booktheme.po b/_static/locales/nl/LC_MESSAGES/booktheme.po new file mode 100644 index 000000000..71bd1cda7 --- /dev/null +++ b/_static/locales/nl/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: nl\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Afdrukken naar pdf" + +msgid "Theme by the" +msgstr "Thema door de" + +msgid "Download source file" +msgstr "Download het bronbestand" + +msgid "open issue" +msgstr "open probleem" + +msgid "Contents" +msgstr "Inhoud" + +msgid "previous page" +msgstr "vorige pagina" + +msgid "Download notebook file" +msgstr "Download notebookbestand" + +msgid "Copyright" +msgstr "auteursrechten" + +msgid "Download this page" +msgstr "Download deze pagina" + +msgid "Source repository" +msgstr "Bronopslagplaats" + +msgid "By" +msgstr "Door" + +msgid "repository" +msgstr "repository" + +msgid "Last updated on" +msgstr "Laatst geupdate op" + +msgid "Toggle navigation" +msgstr "Schakel navigatie" + +msgid "Sphinx Book Theme" +msgstr "Sphinx-boekthema" + +msgid "suggest edit" +msgstr "suggereren bewerken" + +msgid "Open an issue" +msgstr "Open een probleem" + +msgid "Launch" +msgstr "Lancering" + +msgid "Fullscreen mode" +msgstr "Volledig scherm" + +msgid "Edit this page" +msgstr "bewerk deze pagina" + +msgid "By the" +msgstr "Door de" + +msgid "next page" +msgstr "volgende bladzijde" diff --git a/_static/locales/no/LC_MESSAGES/booktheme.mo b/_static/locales/no/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000000000000000000000000000000000000..6cd15c88de675226c00ca4d0430171075e5559ff GIT binary patch literal 1317 zcmZ9KJ#5r46vxeXdz6pzxm3a|d<+2!3{|TeDD*@{E2`eX#NxZWChjG6WIIQ@F#r=I zh=CaiW+cQ+7sSlefekSsA;E&g!2exRwZe-1i=UnMe;@gJZ0`$!aT;?1^9$w~%xAl> zV2tb*0*@HQvL74+N5JFYC^!M00?&abz$SPMTm}z=1ReoH&Bq}7c@7=|Ux8!bTW}nF zU-MIK->moFLDu^NHo!f5s_zejyq^Hs&w20zcpb!5@dLyoequQQ{ss?%e?g9WWG`|7 z&ww2NwR(RWJc|7Tkn_pGec+?o{vnrOx2I^Ydmn%A>ZL_zf>(PJ4kZLarm5`W=b%5)w zZL7>O-*ZGp<~Am=pnQ;J5vxG4v+R9LTr zb?c&Cq}r^>S&prfQchyH%5IfY$0kWe(x`z>RL~GHv=*x9EbCMvg`Y>jh2AfB-HRnbKxP z_EP0Mh336ZE1KHA=alg)i!%*zV<7HREar_#Z59h#6sa0`M0_gBf~5&B$u(bn(=RqnJ4d{?xCazSi7Dd36F5NEp? z<)YnpdLTAmS7{(1PP{Mcl94asi9}+1PF;uA{A!sWqQCgLP1e#J@rEvuCjIeG)u_ z_zHL&oC3+mBarlB@FchjQl1we?OzAU{{~3=--1+!&*l7gkoNrokAjC_hW4EWNq-dF z2i^p!UUxt$Ipw67YDs=blg_=fL9(w&^`%C>sF5z^AYYUllKnH&-R^8uf4W1;k81~m7!h)9ylfZ~^CgwY?O|s!8_53#eN#D}ib%4l zPdl`G0* zrR^Q^E{h@wml7v|6|*b@U!4i752bZbtMMX#mGW2nnJcq1vs2siW24t9Wnrs}c`9q% z!@$)v5lT<+ped}AzW!iwx;nnS&I;|8q^(Zrz=TRiH9p=@zH(RSC_`OqJWVR|NoGY- zoi?`RYFwxN(ABR`V3KC)moz_L=VO;ADlp!Wow9!{+QNF%d)$vmWXUlwR2&%HTrgJW19E;7l zzACcDJ`;)Rh+tW;2Q~|urlsI%&bL2qk_T%ViEOb+o5j!ziwCfPoA2cCtFji~wj$ZA z@p+raPIaI5KE$fLmvXo`p>N|Ts6=nA`!wj_O|+>xW d^Crc{P^+yKEfI-Mu31OGXlgVp$}sP%{Q=l!SrY&N literal 0 HcmV?d00001 diff --git a/_static/locales/pl/LC_MESSAGES/booktheme.po b/_static/locales/pl/LC_MESSAGES/booktheme.po new file mode 100644 index 000000000..1b7233f4f --- /dev/null +++ b/_static/locales/pl/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: pl\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Drukuj do PDF" + +msgid "Theme by the" +msgstr "Motyw autorstwa" + +msgid "Download source file" +msgstr "Pobierz plik źródłowy" + +msgid "open issue" +msgstr "otwarty problem" + +msgid "Contents" +msgstr "Zawartość" + +msgid "previous page" +msgstr "Poprzednia strona" + +msgid "Download notebook file" +msgstr "Pobierz plik notatnika" + +msgid "Copyright" +msgstr "prawa autorskie" + +msgid "Download this page" +msgstr "Pobierz tę stronę" + +msgid "Source repository" +msgstr "Repozytorium źródłowe" + +msgid "By" +msgstr "Przez" + +msgid "repository" +msgstr "magazyn" + +msgid "Last updated on" +msgstr "Ostatnia aktualizacja" + +msgid "Toggle navigation" +msgstr "Przełącz nawigację" + +msgid "Sphinx Book Theme" +msgstr "Motyw książki Sphinx" + +msgid "suggest edit" +msgstr "zaproponuj edycję" + +msgid "Open an issue" +msgstr "Otwórz problem" + +msgid "Launch" +msgstr "Uruchomić" + +msgid "Fullscreen mode" +msgstr "Pełny ekran" + +msgid "Edit this page" +msgstr "Edytuj tę strone" + +msgid "By the" +msgstr "Przez" + +msgid "next page" +msgstr "Następna strona" diff --git a/_static/locales/pt/LC_MESSAGES/booktheme.mo b/_static/locales/pt/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000000000000000000000000000000000000..d0ddb8728e1d5ef72dddc1537f66aa32251a83c8 GIT binary patch literal 1364 zcmZ9KJ!lj`6vsyszs~RYi3A)AKVkhsL@peXm>dKW4Dqy|yM5Uk-JMxyXV09qrC4bb zE3pu<6hyS##>&dd!p6cz!N$T$|8I9gLWZ0D-MpQB@BjWY-^NCt35+9{h16S(zZ$SR;BglC^gY3_DkUjkg^8V2+RlY+YuTO%k z_iDYq0kY*B<9wX!0496Oy4Me+EOW5GJh*=z>>q1pUk_m-WvQ8UtRL(@Yuttj8%m$V z)DS=E`y_5yrAF9A~6$c zkJPKnb{@xB&k-4!*pS4W%Dl|ePxSZ1+Sg`TPO-L5pAr(~Q+BJKIyQ=8l12@5q`XEEL(9HWXSq&QBy}{>iJ+c4DOK2kgX}J`T^Gu%F2!T+4-5Z_m|FJY}CdEoncB%=^@}k z8Y^ukWUsGWM!t2s(+;k!?Q_axOXPwX)3c#AQBz*)YTuYsCd%P!Q>MPL5a&+CLEE|{ zYf6)r%dB;ILRNoUXN)}2l9$d-G{iJ)q}Wy`Tzm+v2(=>^C{+#@RV=2IURF*jcR$wy zD??aTpZLaCOMFYpAW1l*WLmt4v{B_@b0}L2&D+qT-&mnh(n=I7vDb-`%I7N94^{E1 zcxfdiIC)dNQVDLK6_Z`3WS+>>*=|gU60=F_bfO(LmX~0$I{o6U(^gh;I@l*sE7k*N z>oXB$Fttuh#-5x(d)$^9P*fEbsVm+tv!2x!?&s);C8;hsuS6ZkWkdpvO!dtuOAFi$ LL?J8GWh3H0!r)O_ literal 0 HcmV?d00001 diff --git a/_static/locales/pt/LC_MESSAGES/booktheme.po b/_static/locales/pt/LC_MESSAGES/booktheme.po new file mode 100644 index 000000000..1b27314d6 --- /dev/null +++ b/_static/locales/pt/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: pt\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Imprimir em PDF" + +msgid "Theme by the" +msgstr "Tema por" + +msgid "Download source file" +msgstr "Baixar arquivo fonte" + +msgid "open issue" +msgstr "questão aberta" + +msgid "Contents" +msgstr "Conteúdo" + +msgid "previous page" +msgstr "página anterior" + +msgid "Download notebook file" +msgstr "Baixar arquivo de notebook" + +msgid "Copyright" +msgstr "direito autoral" + +msgid "Download this page" +msgstr "Baixe esta página" + +msgid "Source repository" +msgstr "Repositório fonte" + +msgid "By" +msgstr "De" + +msgid "repository" +msgstr "repositório" + +msgid "Last updated on" +msgstr "Última atualização em" + +msgid "Toggle navigation" +msgstr "Alternar de navegação" + +msgid "Sphinx Book Theme" +msgstr "Tema do livro Sphinx" + +msgid "suggest edit" +msgstr "sugerir edição" + +msgid "Open an issue" +msgstr "Abra um problema" + +msgid "Launch" +msgstr "Lançamento" + +msgid "Fullscreen mode" +msgstr "Modo tela cheia" + +msgid "Edit this page" +msgstr "Edite essa página" + +msgid "By the" +msgstr "Pelo" + +msgid "next page" +msgstr "próxima página" diff --git a/_static/locales/ro/LC_MESSAGES/booktheme.mo b/_static/locales/ro/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000000000000000000000000000000000000..3c36ab1df7c589ad82614599b8c91f072a25d4a6 GIT binary patch literal 1390 zcmZ{jxo;Ce6vhX_5zG}5ZUr$kT#~s&fgHI>97~Z1L||yB#=9@;f%VL44!~(cNeM!T zzkmW7Bt%b&NVHTGv{Xn`NHmD=truc}813$F=kWX9`}X8CsJNO-Bou44rIk82E zW8ekwD0mG#0%~vyTm=t+&p?jz3S_U|fL!MXkn8*mPJmxQ*7*st&Nlprc^KsLXDc>A z&U>kv-vQZju7S_7m#oj;a^0f?p-U~+<-u`yunv38zH-h8DR$!Kd`AcS&)<;qgbk%n zVs?N_h8~GIYXX_z@lp(&&U!(#>;scql}cj+b*;T8mvv4X%iczp&|iz8r@bsxMq)nI z0il0S+cD35;)smw+mysr<%5hxssg3b8qA|fdg6M41IkG4eI&8uv_TLnms)LcbExO2 zuwDo22I6MX)8?U^<=i?w>XRs+vOBD)W3wzLY1Dn4sh}ZZ=wTRUXIW<`QaHM=ZLHEB z3GZW;5wavyf>i8XN#8^5U79nrN}tCR1WA7O>Nc7D^1!X_=B#5k}&OPu5mlqSjw% ze9!ggnq18PeE0wD`{?}IYPqW%0!Wn*zA9eO0NP-Sd!P*Z&(5Mvp+E_$mG57h;3yNf z@}T<)Q9@L?UQv!@t~hzGN|ByUi8HjaaJHLM9|{Y76xA;^5T@*t78+y)y;r5zA;cW% z6j3gq1Ltf%1{N7Mh$B&^tz>kF94xj(C8Xe$PteOytPUysU!2AvEWuE6Fj7Z>o*_qS K1txIYaExD}s9q8P literal 0 HcmV?d00001 diff --git a/_static/locales/ro/LC_MESSAGES/booktheme.po b/_static/locales/ro/LC_MESSAGES/booktheme.po new file mode 100644 index 000000000..1783ad2c4 --- /dev/null +++ b/_static/locales/ro/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ro\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Imprimați în PDF" + +msgid "Theme by the" +msgstr "Tema de" + +msgid "Download source file" +msgstr "Descărcați fișierul sursă" + +msgid "open issue" +msgstr "problema deschisă" + +msgid "Contents" +msgstr "Cuprins" + +msgid "previous page" +msgstr "pagina anterioară" + +msgid "Download notebook file" +msgstr "Descărcați fișierul notebook" + +msgid "Copyright" +msgstr "Drepturi de autor" + +msgid "Download this page" +msgstr "Descarcă această pagină" + +msgid "Source repository" +msgstr "Depozit sursă" + +msgid "By" +msgstr "De" + +msgid "repository" +msgstr "repertoriu" + +msgid "Last updated on" +msgstr "Ultima actualizare la" + +msgid "Toggle navigation" +msgstr "Comutare navigare" + +msgid "Sphinx Book Theme" +msgstr "Tema Sphinx Book" + +msgid "suggest edit" +msgstr "sugerează editare" + +msgid "Open an issue" +msgstr "Deschideți o problemă" + +msgid "Launch" +msgstr "Lansa" + +msgid "Fullscreen mode" +msgstr "Modul ecran întreg" + +msgid "Edit this page" +msgstr "Editați această pagină" + +msgid "By the" +msgstr "Langa" + +msgid "next page" +msgstr "pagina următoare" diff --git a/_static/locales/ru/LC_MESSAGES/booktheme.mo b/_static/locales/ru/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000000000000000000000000000000000000..6b8ca41f36ebf869818399a9584cdb15619bea78 GIT binary patch literal 1722 zcmZ{iPiz!b9LJx6N>>C0{~WCHj6uL;MNkv6buUP3NFY{Oy-#-^yQABg$;>R!8?=By zpk6eZkcjo>ZEd^jw#)9`jK?>LiHQeq#*-Is-2D7zrnWZnvTr^!zxmDY_x*j}{=RSL zmx}cqdz}3X`vCjaLtI$99#)E<8sqXP+zofZ{csG9!)M_u@M&0v```y~4>WKujB{Rv zQs)c!1iS-x!|&l!u$%MeJpNn${3n!t_hA9<*pYp|2g>s}lsboC555U^!oQ&C`x}bB zF@i6^7vWp*ZFm@l@KtyR9)~}{gYZ5)1P?rt)qfX0&vOMzodC-FMW|sH%3Qy~Bk&LS zGTh71lK%!g2;Ya|(?xg`eg?&InL*;Bbw69Q%iLQRlGKxI@mH42FH8IruciMpY+9?w z*>bj97p>AWd<Owguyc8|lEQ8#P&TV@hv4PPLlNs2UpM=~=&K)EOs=bt|Yjv8id_3$EYFWp61Twd!?)O2Z^b@jo`zxOlo&{K$k6 z)k-?+UzGmEv~%I?>9doY?~lFqdLcKqSeXk!s4V?^=qzIFXM1NyDcB~ z^h;%Z?8rnx*}Ha_UXm-xW!tfr6}wJC zN81q@gr(9Fv3Bw)dp^0PldHtnY+u_Sxf$6W-wgO>O_L_|y0EwXD@jYPrM(BqO_Ddt zDEk#lk9`p%b*uDr%Zu6PU zG_bh>@urJlTp^1w`f2$AR}2{1AJVy&({OH-XdNXBVw1Appn{Ug&D59FU~!E~qO?uY z2%}Tqa+P*D{a@R!x0@kSMzqWv;2@B-rtEiw@7eZ-vn#fz?Gn!QP_`~@{!JJr^D>2; zYX|@P$s&z$lLI4H4zg!gP;s5$q8v^-E0ZiHHwb^e&1~CN%&|G1oY;Q{yqk7fcCfJk literal 0 HcmV?d00001 diff --git a/_static/locales/ru/LC_MESSAGES/booktheme.po b/_static/locales/ru/LC_MESSAGES/booktheme.po new file mode 100644 index 000000000..b1176b7ae --- /dev/null +++ b/_static/locales/ru/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ru\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Распечатать в PDF" + +msgid "Theme by the" +msgstr "Тема от" + +msgid "Download source file" +msgstr "Скачать исходный файл" + +msgid "open issue" +msgstr "открытый вопрос" + +msgid "Contents" +msgstr "Содержание" + +msgid "previous page" +msgstr "Предыдущая страница" + +msgid "Download notebook file" +msgstr "Скачать файл записной книжки" + +msgid "Copyright" +msgstr "авторское право" + +msgid "Download this page" +msgstr "Загрузите эту страницу" + +msgid "Source repository" +msgstr "Исходный репозиторий" + +msgid "By" +msgstr "По" + +msgid "repository" +msgstr "хранилище" + +msgid "Last updated on" +msgstr "Последнее обновление" + +msgid "Toggle navigation" +msgstr "Переключить навигацию" + +msgid "Sphinx Book Theme" +msgstr "Тема книги Сфинкс" + +msgid "suggest edit" +msgstr "предложить редактировать" + +msgid "Open an issue" +msgstr "Открыть вопрос" + +msgid "Launch" +msgstr "Запуск" + +msgid "Fullscreen mode" +msgstr "Полноэкранный режим" + +msgid "Edit this page" +msgstr "Редактировать эту страницу" + +msgid "By the" +msgstr "Посредством" + +msgid "next page" +msgstr "Следующая страница" diff --git a/_static/locales/sk/LC_MESSAGES/booktheme.mo b/_static/locales/sk/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000000000000000000000000000000000000..59bd0ddfa341477cf5ccfdc88f4c3e6127046f7a GIT binary patch literal 1393 zcmZ9KKWrRD6vijSKyrit=ARHoJeoj?1A-``v#uc6mLesxmAg<-Am(Iw==v>7a08THd<;rm`=IpY4k&eh2g>u`gW~ryD0Tk|O5J~fa_(Nc zzXDUK|0PiBy9|o{Iw-jZpgi{xC@mLFS&OFRlMGV#=>h3w2I;R1(UBqg;w6239ut!4 z2~6=jJ)|#^@o`MFWBGp7wHbaCb5`#=8`;KyKjqAuERSl#ePRozQ?f4d$hk4?nu1TZ zgR6YP4{gTW1S)mLYBMzv(*G~Fs-g&qXSOtPDXSYgM5@YEM^4FEtSg)3YP-Y%ZOMdC zvD)#*Ld4O|#+Dj1ex6GgOmu#x24!ySK3$V+!<;9qmRA}zH4R;s6--u7O{SxPiRJy+ zq;ulan3SHU##L?XQ}Ut8GDb>9C5YnoIEmfsY4-#60ct&J{JY}cT{?GeUcb3{a{tPk zZ+F_pc89Z)dlV7it+LR@u2Yh0A2{}J54XBkPtJL5!!G;orcGRGY}TWzBNIC}w9P8? zwMSvx*(oZoi|&^5lh7kuE|;PI&N?-J`)^o!txs1juXogY6?)ZEJ1#xCtJU@>=)Bnr zU1l0t--9arjh>CyA! zZ|R}^TYP-iOeo=k_sEUBK5*0V4ADZtsjcUv$Fb6dIjHA}(Ucy4hGb%cXdqT-thpkjw8w z94mEnM;EAFODWoF<;0UM`ZS%T+8S2)Skfe~=jnmos~5@rll9m4U7_dmM!H@|Co1(H Di}h+h literal 0 HcmV?d00001 diff --git a/_static/locales/sk/LC_MESSAGES/booktheme.po b/_static/locales/sk/LC_MESSAGES/booktheme.po new file mode 100644 index 000000000..650128817 --- /dev/null +++ b/_static/locales/sk/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: sk\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Tlač do PDF" + +msgid "Theme by the" +msgstr "Téma od" + +msgid "Download source file" +msgstr "Stiahnite si zdrojový súbor" + +msgid "open issue" +msgstr "otvorené vydanie" + +msgid "Contents" +msgstr "Obsah" + +msgid "previous page" +msgstr "predchádzajúca strana" + +msgid "Download notebook file" +msgstr "Stiahnite si zošit" + +msgid "Copyright" +msgstr "Autorské práva" + +msgid "Download this page" +msgstr "Stiahnite si túto stránku" + +msgid "Source repository" +msgstr "Zdrojové úložisko" + +msgid "By" +msgstr "Autor:" + +msgid "repository" +msgstr "Úložisko" + +msgid "Last updated on" +msgstr "Posledná aktualizácia dňa" + +msgid "Toggle navigation" +msgstr "Prepnúť navigáciu" + +msgid "Sphinx Book Theme" +msgstr "Téma knihy Sfinga" + +msgid "suggest edit" +msgstr "navrhnúť úpravu" + +msgid "Open an issue" +msgstr "Otvorte problém" + +msgid "Launch" +msgstr "Spustiť" + +msgid "Fullscreen mode" +msgstr "Režim celej obrazovky" + +msgid "Edit this page" +msgstr "Upraviť túto stránku" + +msgid "By the" +msgstr "Podľa" + +msgid "next page" +msgstr "ďalšia strana" diff --git a/_static/locales/sl/LC_MESSAGES/booktheme.mo b/_static/locales/sl/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000000000000000000000000000000000000..87bf26de683cb18c73bd23c2b4c57ef6a02545ec GIT binary patch literal 1374 zcmZ9KyKfXR5XQ}$9PjWFC=#aOAvj*5h%UN@MVlwf1j4YtMW$vtK7Sz7QA(F{dy;VIIMJx(*A* z*7ZW*5ffN8gWJKa;9hV7oB|Jk$H0AH8=M61fIC3~cY(3y6OjEp2X}z4!0q5`a1Z#l z=EvIprQUx7Io@xu0dCk(-QNlFehOqi$H8~tMQ{`NqvlU=3--Uk-Qc>7LL34o!NcHb zkab@J`TGNq^T|Ly_ZVc|FG1Eh0$KkXa4+~CYpSj^k88a!&*8idy!V z`%t7QK_wv*NO67Y#^!v|yiY#BTT52)n;gGcMsD1kzd5`1{rs78jk>VS#g&{|GD5)F z+$e2lWVfe$pxC~*IM2jgT5~@lP4MDpz zCx-(XW@_w<9uAzh?%#>APv}sH5H9X{E*5diD#H~rZm5EBHe?#D`0z|yaZ3$VV*3d1 zf=gV-^ojji64%jMWljW5^x#&@4 literal 0 HcmV?d00001 diff --git a/_static/locales/sl/LC_MESSAGES/booktheme.po b/_static/locales/sl/LC_MESSAGES/booktheme.po new file mode 100644 index 000000000..3c7e3a866 --- /dev/null +++ b/_static/locales/sl/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: sl\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Natisni v PDF" + +msgid "Theme by the" +msgstr "Tema avtorja" + +msgid "Download source file" +msgstr "Prenesite izvorno datoteko" + +msgid "open issue" +msgstr "odprto vprašanje" + +msgid "Contents" +msgstr "Vsebina" + +msgid "previous page" +msgstr "Prejšnja stran" + +msgid "Download notebook file" +msgstr "Prenesite datoteko zvezka" + +msgid "Copyright" +msgstr "avtorske pravice" + +msgid "Download this page" +msgstr "Prenesite to stran" + +msgid "Source repository" +msgstr "Izvorno skladišče" + +msgid "By" +msgstr "Avtor" + +msgid "repository" +msgstr "odlagališče" + +msgid "Last updated on" +msgstr "Nazadnje posodobljeno dne" + +msgid "Toggle navigation" +msgstr "Preklopi navigacijo" + +msgid "Sphinx Book Theme" +msgstr "Tema knjige Sphinx" + +msgid "suggest edit" +msgstr "predlagajte urejanje" + +msgid "Open an issue" +msgstr "Odprite številko" + +msgid "Launch" +msgstr "Kosilo" + +msgid "Fullscreen mode" +msgstr "Celozaslonski način" + +msgid "Edit this page" +msgstr "Uredite to stran" + +msgid "By the" +msgstr "Avtor" + +msgid "next page" +msgstr "Naslednja stran" diff --git a/_static/locales/sr/LC_MESSAGES/booktheme.mo b/_static/locales/sr/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000000000000000000000000000000000000..ec740f4852f3973fa72ed0c7f7cf59273b8dba41 GIT binary patch literal 1679 zcmZ{i&2Jk;7>5TMpf#n12FigTrXZCM$qB70R5|j2(nPA%iiifpt=iZVd#SU#+K*9wix4UV%CCk|YD;tzleJn!xzyAg~$`^-$R zf|BP7d;(sF`{57pDcCFcOL6{NvA++c-k-1p_w32;FeHVeLgQ7^hJ zGdzy`B&?ZThjG)5bl^0MnyI@nssG*1CaqRf3ytygl3zFKtP{mL3F=O4>e~0%Ctj_o z-VO+GJncqNV$@vddL;4n-04|$KCc&=z>nP650};Xpy_&-^(oOd-!w}`WnaCJdz$wf zjh4}#bIENuu}cxpT#j>fzSPN!1fjX)`bi;sBz=@L8U~dHCrI%>FtxaRre1!}gb~>) zID-xfGsC|hn|!@gG`2jy9GHrZ5ik|B9M_xHwWbqBCa%6aKU+RIj0+tv zS~OvK#;f^t*K1Vt$ptqqoproMLSHL73QKdXBy?KkSwCEgD%uOOZB#uzt@CB|6;B_j z>SM1?mz2F>J33uW*U}Z+wmY_)ohZA>uESZ+w$lw|zev~9H9JUG(p9@{2io4t;AJmR@7g|5U2Qwul5jS5J6+fI7K$b?*B^Gym(vvm#N%kCu@EWomq3c>6awm HD*E*gL}ZXP literal 0 HcmV?d00001 diff --git a/_static/locales/sr/LC_MESSAGES/booktheme.po b/_static/locales/sr/LC_MESSAGES/booktheme.po new file mode 100644 index 000000000..773b8adae --- /dev/null +++ b/_static/locales/sr/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: sr\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Испис у ПДФ" + +msgid "Theme by the" +msgstr "Тхеме би" + +msgid "Download source file" +msgstr "Преузми изворну датотеку" + +msgid "open issue" +msgstr "отворено издање" + +msgid "Contents" +msgstr "Садржај" + +msgid "previous page" +msgstr "Претходна страница" + +msgid "Download notebook file" +msgstr "Преузмите датотеку бележнице" + +msgid "Copyright" +msgstr "Ауторско право" + +msgid "Download this page" +msgstr "Преузмите ову страницу" + +msgid "Source repository" +msgstr "Изворно спремиште" + +msgid "By" +msgstr "Од стране" + +msgid "repository" +msgstr "спремиште" + +msgid "Last updated on" +msgstr "Последње ажурирање" + +msgid "Toggle navigation" +msgstr "Укључи / искључи навигацију" + +msgid "Sphinx Book Theme" +msgstr "Тема књиге Спхинк" + +msgid "suggest edit" +msgstr "предложи уређивање" + +msgid "Open an issue" +msgstr "Отворите издање" + +msgid "Launch" +msgstr "Лансирање" + +msgid "Fullscreen mode" +msgstr "Режим целог екрана" + +msgid "Edit this page" +msgstr "Уредите ову страницу" + +msgid "By the" +msgstr "Од" + +msgid "next page" +msgstr "Следећа страна" diff --git a/_static/locales/sv/LC_MESSAGES/booktheme.mo b/_static/locales/sv/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000000000000000000000000000000000000..b07dc76ff21128244172d2e415cc899555d8b49d GIT binary patch literal 1365 zcmZ9Ky>App7{XZ}|d=zQVMY>d0>TJUwK!GS|siLHd(&c$~H~3(*Gryg$_j%@>d3o;WBZ2ZE>JsWR z)Jv%M7to-bct;3);v|}P!BgM~@H}`DTmmnGm%$If0eB9)11^FDo&i(GpFxiE5Ihb3 z0-geY1>Xk`IzH{jUv&LfAnW}L_P`@Y+Wm_l_m@D9a|Qez{0hWU@h8Z7e}laLACT8A z9Bq9sg6#KG@FQ>;^|ue&MB7Xc{&_$WV4fmrpPUX_NMBC1d<&HLVI2>ISM)cNT zDs_}z6(rVjoe=u}?s{cR90ejPCoU&(OT{EBpQ}W+2Kkatlh7xu)KFSXs3tL$x9(u73Q%I)?{Lst}rq*YU0sH7ocX*ad*9P6|q zKG0OVs2vT8zb&|U%F`w^(-Z1 zKF^i36kcC+hKrsX9@jHt-W)0~UWJ0RHRwJIJFG|WNq7Z50sn;0!@U&o7(5PV;9F4M{{RobKj41& z@a;;?!xtbeX@>OX{(5y6M3y47A9wJS8l@l7I~meb8I-3U@UzelnM(bH_d|LjeI~u4 zB&Ck5^3z4%s2P{8W=Y&H)T}#g2QKPq>k6~t+?)DDGB7*axf^C(bKPdqPja0`u~Boq zq#)vWxe(=r9;UshFg@+8T8M_W+gC58#_GuGB+rLNEoOcl%M6n_~Kv<@xsE9lI>D`3aM?=WN&Y5*v5);gzIlEkrgR(mY~Bt;NAGiw5m^ zmz~Tz+NOS)Pd_uI!*qJT)%&LPp{J%=sw%gu@~f)+wA%QnDo0iMT~%)Q%}sWAT~-@s zt8&XvWPPzJFLmFFj3kCLZVZKXbJg6qObpgpYXhKc{dPQvXD4*l1`&-oR|ZVWnD5S zLMPGk+4yL_MW)wWJ7GI+sWh)Cq}dE7ao%TGqX8uPK%$P%oUIVNaozG6uhIA~XTs-( z77E8j3VKIkNhd=@QrBItuMs9x^kr3kK4HautE;{2cexG}GQN=#P4;o6q{&7}8xZ++ zl w$ZZQ&WaeX;?9mtGBq&}escqTfGyJc(*W8(47(T)#0?{xcP9tpFr||Or2L57y?*IS* literal 0 HcmV?d00001 diff --git a/_static/locales/ta/LC_MESSAGES/booktheme.po b/_static/locales/ta/LC_MESSAGES/booktheme.po new file mode 100644 index 000000000..b48bdfaf1 --- /dev/null +++ b/_static/locales/ta/LC_MESSAGES/booktheme.po @@ -0,0 +1,66 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ta\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "PDF இல் அச்சிடுக" + +msgid "Theme by the" +msgstr "வழங்கிய தீம்" + +msgid "Download source file" +msgstr "மூல கோப்பைப் பதிவிறக்குக" + +msgid "open issue" +msgstr "திறந்த பிரச்சினை" + +msgid "previous page" +msgstr "முந்தைய பக்கம்" + +msgid "Download notebook file" +msgstr "நோட்புக் கோப்பைப் பதிவிறக்கவும்" + +msgid "Copyright" +msgstr "பதிப்புரிமை" + +msgid "Download this page" +msgstr "இந்தப் பக்கத்தைப் பதிவிறக்கவும்" + +msgid "Source repository" +msgstr "மூல களஞ்சியம்" + +msgid "By" +msgstr "வழங்கியவர்" + +msgid "Last updated on" +msgstr "கடைசியாக புதுப்பிக்கப்பட்டது" + +msgid "Toggle navigation" +msgstr "வழிசெலுத்தலை நிலைமாற்று" + +msgid "Sphinx Book Theme" +msgstr "ஸ்பிங்க்ஸ் புத்தக தீம்" + +msgid "suggest edit" +msgstr "திருத்த பரிந்துரைக்கவும்" + +msgid "Open an issue" +msgstr "சிக்கலைத் திறக்கவும்" + +msgid "Launch" +msgstr "தொடங்க" + +msgid "Edit this page" +msgstr "இந்தப் பக்கத்தைத் திருத்தவும்" + +msgid "By the" +msgstr "மூலம்" + +msgid "next page" +msgstr "அடுத்த பக்கம்" diff --git a/_static/locales/te/LC_MESSAGES/booktheme.mo b/_static/locales/te/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000000000000000000000000000000000000..0a5f4b46adfda1551ae6791c3c3782525b61be04 GIT binary patch literal 1806 zcmbu8?`s@I7{^DgzpiR+74e1QD2kPOYU&G1LO_}((2{DPsaEjK=5D&TdYxHz_mbva zT2L$M3z1kXg(AcfwAS4~Neb5f0lujqQV?VieD5FN3qLcn=UrpK7cRH^*?DH3U*CQ1 zw_V$o1=qvek8yv`{W$mAxAEY*0dI#t!Motk@Lu>Ud<6aho$p_87u?C=2jJ6i2YeYi zzX^ChJOuB9$KXz=YA)3F59{^k(B)Z$4Y&rK|263Re}o(GCUki}y(4f1KFWF>z6o!@ z*Wm7LLQKL1xEEf7ufjj!vvA~2A@;zx;Pdc3_zYZw?)**o1l&$Dhu|Lg6r6|7{snaU zU4^f}Um%kFbGaHHtXJD1WeGz1>mHu2N2iC=&RtGZchR1B$iD?W+^g&V?%L)waXOPu z(25ZI7x~Geqr`+s7qcYp6yktc&|MR?r8b3{Gv-}+Jn5=|IXAtmr8XIhPLj(sij|mb zCj}Av%jqaDWG`(;g=$Nq#dOrut&Vs-Ra!<`CVAddVkS#8PDaigm=Z@q{7j`LPYRPQ ziX&+!(I@48SIcZiolwF*xU=VM^?@zFv*l@L zDz6jwvaMLG-4f{crQm*REN}ghN#a8coKPu>p z%YTFt$m|qAXTDh&g5gT~hljsJ*ry=A+<9Nb1sA(o5-xGw{Z;?6OlS@gI#jemLQG%z zrpf6>XY=H3-bW;Qrf>v2>%ZN4n4aM|U<|2-ZBn^mBghuhM0)Dv2+@ei()B(V2&(za uuMrF7ToguSy%w`N&3+IHSt#2LVBfiP)B5iT;9Jx)h5&elIS4O(ZT|-CPABOA literal 0 HcmV?d00001 diff --git a/_static/locales/te/LC_MESSAGES/booktheme.po b/_static/locales/te/LC_MESSAGES/booktheme.po new file mode 100644 index 000000000..952278f5f --- /dev/null +++ b/_static/locales/te/LC_MESSAGES/booktheme.po @@ -0,0 +1,66 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: te\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "PDF కి ముద్రించండి" + +msgid "Theme by the" +msgstr "ద్వారా థీమ్" + +msgid "Download source file" +msgstr "మూల ఫైల్‌ను డౌన్‌లోడ్ చేయండి" + +msgid "open issue" +msgstr "ఓపెన్ ఇష్యూ" + +msgid "previous page" +msgstr "ముందు పేజి" + +msgid "Download notebook file" +msgstr "నోట్బుక్ ఫైల్ను డౌన్లోడ్ చేయండి" + +msgid "Copyright" +msgstr "కాపీరైట్" + +msgid "Download this page" +msgstr "ఈ పేజీని డౌన్‌లోడ్ చేయండి" + +msgid "Source repository" +msgstr "మూల రిపోజిటరీ" + +msgid "By" +msgstr "ద్వారా" + +msgid "Last updated on" +msgstr "చివరిగా నవీకరించబడింది" + +msgid "Toggle navigation" +msgstr "నావిగేషన్‌ను టోగుల్ చేయండి" + +msgid "Sphinx Book Theme" +msgstr "సింహిక పుస్తక థీమ్" + +msgid "suggest edit" +msgstr "సవరించమని సూచించండి" + +msgid "Open an issue" +msgstr "సమస్యను తెరవండి" + +msgid "Launch" +msgstr "ప్రారంభించండి" + +msgid "Edit this page" +msgstr "ఈ పేజీని సవరించండి" + +msgid "By the" +msgstr "ద్వారా" + +msgid "next page" +msgstr "తరువాతి పేజీ" diff --git a/_static/locales/tg/LC_MESSAGES/booktheme.mo b/_static/locales/tg/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000000000000000000000000000000000000..b21c6c6340194fdf35f7e5660deb42479c8dbff9 GIT binary patch literal 1628 zcmaiyO^6gn6vxZY=tNE8S5Q|hA#9RE$8XSGCI@vF1jR*HLCB?drtIlBTh(+|t9#pZ z_b^Dr#19Y@K@`-J?Yg?{x;qOwd8{6D@{o&%oP^*d#K?f}Wpw_pnH1Q&v@ z3%&tK*FWF}u!PNBU}wRfz%8&Zfe!dP_zn0sxE@@yD9?KJ9kAteKMU^WF zJC>CiI~?GEE4d#GJ20M-!R?)~ z>>S<1-6l7a<}oHySZKVs&CQhc{Mbyqb88w7X3RqzI{&F#*USvhMoiLrM6s>Hk4E|6 zc@o%qM9F2t?V4KSrinZf?58}t0JXzb84{b#Mr}$-{y{=+V#Xn)X(lNzTPtR6wl32O zGdJL5Bs-5oxB+T`zIGRrAt`^lPLZT?M?g!0angPB{%GR5vxHp0oR8Arqi zbZiRp&%w__8!lf*NbDpe;WA2%r?Xi$atSF8VXr;q_FB@_W_A(5kPfr&0p+;^8?eY= pg1vVOUqyoB&tBYSn9xh&7BjPW`v%MukJU629MMs5Ow+8v{s(GpYQg{j literal 0 HcmV?d00001 diff --git a/_static/locales/tg/LC_MESSAGES/booktheme.po b/_static/locales/tg/LC_MESSAGES/booktheme.po new file mode 100644 index 000000000..c33dc4217 --- /dev/null +++ b/_static/locales/tg/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: tg\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Чоп ба PDF" + +msgid "Theme by the" +msgstr "Мавзӯъи аз" + +msgid "Download source file" +msgstr "Файли манбаъро зеркашӣ кунед" + +msgid "open issue" +msgstr "барориши кушод" + +msgid "Contents" +msgstr "Мундариҷа" + +msgid "previous page" +msgstr "саҳифаи қаблӣ" + +msgid "Download notebook file" +msgstr "Файли дафтарро зеркашӣ кунед" + +msgid "Copyright" +msgstr "Ҳуқуқи муаллиф" + +msgid "Download this page" +msgstr "Ин саҳифаро зеркашӣ кунед" + +msgid "Source repository" +msgstr "Анбори манбаъ" + +msgid "By" +msgstr "Бо" + +msgid "repository" +msgstr "анбор" + +msgid "Last updated on" +msgstr "Last навсозӣ дар" + +msgid "Toggle navigation" +msgstr "Гузаришро иваз кунед" + +msgid "Sphinx Book Theme" +msgstr "Сфинкс Мавзӯи китоб" + +msgid "suggest edit" +msgstr "пешниҳод вироиш" + +msgid "Open an issue" +msgstr "Масъаларо кушоед" + +msgid "Launch" +msgstr "Оғоз" + +msgid "Fullscreen mode" +msgstr "Ҳолати экрани пурра" + +msgid "Edit this page" +msgstr "Ин саҳифаро таҳрир кунед" + +msgid "By the" +msgstr "Бо" + +msgid "next page" +msgstr "саҳифаи оянда" diff --git a/_static/locales/th/LC_MESSAGES/booktheme.mo b/_static/locales/th/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000000000000000000000000000000000000..abede98aa11b163e580a26b545980ace31e61ccf GIT binary patch literal 1766 zcma)*-)|K~6vqcarB^@&)CU_K663GrYB9uM>wQ7mgal$EMW46#4trN`XEwWgD~&H; zNI=2(Qi7!k28|(VOaz2Z{{sC3d@_d21Bp*Q8{hn#*{fZQQ8%6a%*>u2-*e9G?VVdL zD4xeyEM|9;NtGW4!Kz+u&BX3y#5Y_$YiD?uH$>6TSg=Km#9y)rhB| z#94;h;VRq)zl0CNTOLHf&S01Tf9_gR-TKX!nk)j@8 z$!wbssp1UphqNWmR~=a5Pgi}TUURlGwkp}=OGTFUD|OJlYX>grY3nL8@7z23cs4K_ z*QFa4UGulgs-KlQPg0|%ds#*3KaVrR!JzCG##p`JdPW^i%1RIOUQ(H!c9!$dcKhn} z9D~H_tSpB{%@&y@h|{wNXVlR;Utw}rW|b?J)X}`3*%SJJXq)Ss1*76y&(}50xilRZ zZIi_;O{$C})|{wnbx!isN%F!hW^Oo=-3VU})6}5SPzh4p+ooH!5B1t_nxZ7yq^|R~ zC4W0+ZXG#vWP0QK-WMiXqsF%9mU1(xD+Iil50cDI>25zMN>g=?&CRs;Z`c)yEsvX` zJ#D+Lm)Ueu@1M`A*5SmaL;6aLert9xERsQc#uW?Yq_%lHmYtWTbiM67YxSOv-uv8C zONI6KgI^B*v*yl!75w$!KM(#=@YjO>NCp2z@ZSZGG2hr+4E__@te@u_{+M44euZ<4 z_;qwx4(q3b|5Wzei|3W#eYCFwuf#yNf&j$Yu0#;+cXcn64J%wUT=~$4O>SO5=h=^

cLPeiV0|6qlnFzL#U0qn`*`iT?nxl@xb2Mx-{rpyeF$L|PmGL73Xj zjT&US*r9DL+Oc6=XyNi4J`IP)4~3RpiSp}N5WkMWZ=`O(hjnrBo?i;ppCMASYj{fI qV*6MBqwc03(drtRAFXAoSBd=B?a4S*sk|OB^ZqtY{YJQZ;_45RC*i9A literal 0 HcmV?d00001 diff --git a/_static/locales/th/LC_MESSAGES/booktheme.po b/_static/locales/th/LC_MESSAGES/booktheme.po new file mode 100644 index 000000000..9d24294a7 --- /dev/null +++ b/_static/locales/th/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: th\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "พิมพ์เป็น PDF" + +msgid "Theme by the" +msgstr "ธีมโดย" + +msgid "Download source file" +msgstr "ดาวน์โหลดไฟล์ต้นฉบับ" + +msgid "open issue" +msgstr "เปิดปัญหา" + +msgid "Contents" +msgstr "สารบัญ" + +msgid "previous page" +msgstr "หน้าที่แล้ว" + +msgid "Download notebook file" +msgstr "ดาวน์โหลดไฟล์สมุดบันทึก" + +msgid "Copyright" +msgstr "ลิขสิทธิ์" + +msgid "Download this page" +msgstr "ดาวน์โหลดหน้านี้" + +msgid "Source repository" +msgstr "ที่เก็บซอร์ส" + +msgid "By" +msgstr "โดย" + +msgid "repository" +msgstr "ที่เก็บ" + +msgid "Last updated on" +msgstr "ปรับปรุงล่าสุดเมื่อ" + +msgid "Toggle navigation" +msgstr "ไม่ต้องสลับช่องทาง" + +msgid "Sphinx Book Theme" +msgstr "ธีมหนังสือสฟิงซ์" + +msgid "suggest edit" +msgstr "แนะนำแก้ไข" + +msgid "Open an issue" +msgstr "เปิดปัญหา" + +msgid "Launch" +msgstr "เปิด" + +msgid "Fullscreen mode" +msgstr "โหมดเต็มหน้าจอ" + +msgid "Edit this page" +msgstr "แก้ไขหน้านี้" + +msgid "By the" +msgstr "โดย" + +msgid "next page" +msgstr "หน้าต่อไป" diff --git a/_static/locales/tl/LC_MESSAGES/booktheme.mo b/_static/locales/tl/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000000000000000000000000000000000000..8df1b73310e0c606251d1abff7e6033d1f6b273f GIT binary patch literal 1273 zcmZ{iJ#P~+7{?8ixA4}MHz7bWh@lJz6qYJggw!_(YD=U9)PcpxxsJ)jKAr8h8TbH< zl>mt^fEW=h3@|dWP{DwN#01}fk%9knmnKyup2YV%`|Ricw;%r;A9>CYr_e5-eL%a2 z_Gk|th%ew?a0@&DeghAKKfp8KZ;;mh1;@cLOg;`?0Z)LpKw38o9s}=zN5K_v43vGY z_s5_1`!7J!c@37pw;-+m2-5n`;Ct{ph(FuhSLAyJ(z;h52j75~!7cDS_y;@-j*b-m zC%}W~-v&p)J0SUA0Z)SpJPAGlY2GGy4txoc|92qy`UKMYuOOYrFAzsE2s#`3PH_Yc zHrRgZAP%9?9w{GbQ&0{H+8gOp(7q^W|Kx>&wCSAbj3LR`Ob>tV+mg-Nw3lhs_H53s zo5YHk8|!7$+74gUi5w0)8?u(%HR#*g@l+_u=40)Val5-HoaZ5pg_kk6hAoOEtD7`fpKHxt1^q`P0PmGs4|~w zYF}CuMg<%mawMf%5;BoQ3pVSzGB;`7b(Ndbym+ll7=Ee3C$CPI*s^AIft_~oQ`+M| zjoMMlC845tP)?QC3^N9mW&fLyY9mxPB*L)jFaumd3$>sZlm_4E_)y1P<99=X&7mSu zQF#bj7^^11I1!U-GjxO_E86t}@}&6i_@% literal 0 HcmV?d00001 diff --git a/_static/locales/tl/LC_MESSAGES/booktheme.po b/_static/locales/tl/LC_MESSAGES/booktheme.po new file mode 100644 index 000000000..20e0d07ce --- /dev/null +++ b/_static/locales/tl/LC_MESSAGES/booktheme.po @@ -0,0 +1,66 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: tl\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "I-print sa PDF" + +msgid "Theme by the" +msgstr "Tema ng" + +msgid "Download source file" +msgstr "Mag-download ng file ng pinagmulan" + +msgid "open issue" +msgstr "bukas na isyu" + +msgid "previous page" +msgstr "Nakaraang pahina" + +msgid "Download notebook file" +msgstr "Mag-download ng file ng notebook" + +msgid "Copyright" +msgstr "Copyright" + +msgid "Download this page" +msgstr "I-download ang pahinang ito" + +msgid "Source repository" +msgstr "Pinagmulan ng imbakan" + +msgid "By" +msgstr "Ni" + +msgid "Last updated on" +msgstr "Huling na-update noong" + +msgid "Toggle navigation" +msgstr "I-toggle ang pag-navigate" + +msgid "Sphinx Book Theme" +msgstr "Tema ng Sphinx Book" + +msgid "suggest edit" +msgstr "iminumungkahi i-edit" + +msgid "Open an issue" +msgstr "Magbukas ng isyu" + +msgid "Launch" +msgstr "Ilunsad" + +msgid "Edit this page" +msgstr "I-edit ang pahinang ito" + +msgid "By the" +msgstr "Sa pamamagitan ng" + +msgid "next page" +msgstr "Susunod na pahina" diff --git a/_static/locales/tr/LC_MESSAGES/booktheme.mo b/_static/locales/tr/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000000000000000000000000000000000000..029ae18afb5360a8c238a052e8d3971761759d3d GIT binary patch literal 1373 zcmZ{izi%8x6vqdWKynFz!~~?sQXUOIiX{o62)3@sVLOWA2n!$dC&=i7J19yL{j6T0W2%?d)gY?96-L@67yl{`g~s z_CESD`XA_*&>y~r7uw0!mBOP=;q?Z120RHafv3P_@ICND@LjMEo(I1M&w>oT4aSc5 zLCNz7d<%R6o&kRZ&w(7D`FN2cjBk&jSbMQ3yvf}|L z^-dgb=PiL35fdohSHUH48x$WIDEhw##oq%^>ihtT&YwW>`D+*d4$8W}!FRxeF7BOZ z>s|syPtwI-fS96gfHHp*lr0wzl2>wx9;q$97n`h;HKI?N=#nP7WUs~N1$3mG*A#z? zP4-{T@J)1V%lv%Rpus~iXZ5ACk!=k4s2XpwJgPPKoh_YC$-2lp&J{E^B_9n3SNjqF zXE5d_P^B|g>#2!IeHd=kWf?}E+0w+NtTuIsR9C5voRYJMYdgx-R~05`OD2Sx)s{CF zNgQphZK&MIp`-&XC zw^i0&m&u0nldwXznunqP*($Z4{f{ku(5I`PtoGDUdp(}*+f>_seb0WLJ~cMw0*C6= zkV`YBTo=V`U#;;tvZvIAMh7gN?Gt7i9EQ538>@@sDR7=_PxgBv?;s7aXZ*rps`o&(U<3tx~U_1rqYg?0^K7|^E%RsJ#t%4f8 zo6h$Ap{&BCl*;`;;xXGI&@B^nMMIv5kwyPFiiuV?c-PnoHwMdQd*;PnG+qT~k=xL= f7*f}zXhDUuwVFPIQ2d!&RZ>YE8iRKH#ZJBgJpfrQ literal 0 HcmV?d00001 diff --git a/_static/locales/tr/LC_MESSAGES/booktheme.po b/_static/locales/tr/LC_MESSAGES/booktheme.po new file mode 100644 index 000000000..a77eb0273 --- /dev/null +++ b/_static/locales/tr/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: tr\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "PDF olarak yazdır" + +msgid "Theme by the" +msgstr "Tarafından tema" + +msgid "Download source file" +msgstr "Kaynak dosyayı indirin" + +msgid "open issue" +msgstr "Açık konu" + +msgid "Contents" +msgstr "İçindekiler" + +msgid "previous page" +msgstr "önceki sayfa" + +msgid "Download notebook file" +msgstr "Defter dosyasını indirin" + +msgid "Copyright" +msgstr "Telif hakkı" + +msgid "Download this page" +msgstr "Bu sayfayı indirin" + +msgid "Source repository" +msgstr "Kaynak kod deposu" + +msgid "By" +msgstr "Tarafından" + +msgid "repository" +msgstr "depo" + +msgid "Last updated on" +msgstr "Son güncelleme tarihi" + +msgid "Toggle navigation" +msgstr "Gezinmeyi değiştir" + +msgid "Sphinx Book Theme" +msgstr "Sfenks Kitap Teması" + +msgid "suggest edit" +msgstr "düzenleme öner" + +msgid "Open an issue" +msgstr "Bir sorunu açın" + +msgid "Launch" +msgstr "Başlatmak" + +msgid "Fullscreen mode" +msgstr "Tam ekran modu" + +msgid "Edit this page" +msgstr "Bu sayfayı düzenle" + +msgid "By the" +msgstr "Tarafından" + +msgid "next page" +msgstr "sonraki Sayfa" diff --git a/_static/locales/uk/LC_MESSAGES/booktheme.mo b/_static/locales/uk/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000000000000000000000000000000000000..16ab78909cfbaba7fa199f47fdfc2934426ac699 GIT binary patch literal 1681 zcmZvaO>7%Q6vv0QKx#^WmXAw-X;52=V^7yhotb5Kwlue- zPJ@uD1fdWPh|e20*TKPc;zZ)a@r}d@A+B8DUJjM|f4dvvq>McK+j;xuef*#QJbLI0 zf%O{p5cY4_Z(`qj3JD;1Tdg@Fj3P=dZc__x$`9$oc*S3*fo^agnKo4Zexd+ydwLFH++H&9h3nhscvH3nP?w=Ry$GKV8 zSFn*%JcrG*?O#Zh&hS|fa-{PW7iaO4wx+~;Mq8zAjF)gW3jCTCm(0hyZoI10#;R#! zZps@$UG17A`bVb!y$DvicD z(UqEbKZJv)We~@S5))CN5yZ%eOXK2NmM>DFi34k*S#d3_1^TwU$hA$?)Ql48Q%+|+ zO&Q;>E2+I(f$!M>Nwm6cv+4}z$&!STx)qotFS{>%ocO*%l?t7p6!W2~*y81C@dFjb zh*pwW{vziurkM*@FJB$o{eEubY#|r6I5iuplC&t`Oj!2O`(TSv<;z`$dL~;A5u^%qBgBK^s=!h>rLI6)qdpT zRwLUoA{Qc+BL{@CNd#$~fM}{TM=rgzQV%n0sfS7&;BP2WpLe~9L#0ZLy!)G(eLtS} z@uTBCHyGACc;3gei{}G8S6;vi>pu7*_yBwvdd_{ zw7^~PH24qrDR`oXv3~FiknZO|ioXGV2*%(?;Gf{z;NRf8;J+FBUg~@w04eVRNO2|b zJ#Z2H7+e8i$-yFT-pWp-6LJ_r(sdMXnvv>}ceKb;S{N^_PkZu%7R~=ky6bpCK9k3g z$k@3h{CHRvY(#lWzU!1jHmVk6McE}TRVZeZy2@wWis)VkD)Nisnai;32Hdk9!Ny8% zh>H8iNjnI6>?(vD=Y%)au3|&?BFFX~vq4Jm5 zMX&72MShO7Oqazq!O}OM=}4bej#Cj_+Vieshc22(u^4vLDC*N`;`w6URZ%80h#W^i znt)vNS978mS`#JfvhV|x$n#E9i<(+#yWIJS^JCrbgJ(X=jVKuk8Cugzp2+hMT?f31 z?aE=i4%v6}~kli>l;GC(nmw+%Pw3ODBSPc^>(>sY>MA6>D7i z*MdBkUV01)pAYlSw$Ly6$pRnzWH`t4n!cr5?Pa~wuCipkz0CC=x@BmuW3xx+mX7tR z*NZtNFp)tbr@2L4}dj%B? z$~?F1e_~CYdU3r0$=I6uq5ePVO_Bn=P}riFSZ}qf6oE57w5w3K+FoX+hEW?y<4|&p zx|p)Aw=sGx%}Vw#el49X^(ARiaRizwH+vA!fH?=_BQ=JATO9Uo!rm6_g(Z9Fvrf~J ziFzY}!KTjO`w9hS|3mkKY2Z8hBsWpwH*$FgF5fip{lMp#&N|*Vkp4MHrJJsNw1%8D c^9?3>eEmE8GP@z_uy-`khW>?G>aA{%rxd0lq5uE@ literal 0 HcmV?d00001 diff --git a/_static/locales/ur/LC_MESSAGES/booktheme.po b/_static/locales/ur/LC_MESSAGES/booktheme.po new file mode 100644 index 000000000..2f774267f --- /dev/null +++ b/_static/locales/ur/LC_MESSAGES/booktheme.po @@ -0,0 +1,66 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ur\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "پی ڈی ایف پرنٹ کریں" + +msgid "Theme by the" +msgstr "کے ذریعہ تھیم" + +msgid "Download source file" +msgstr "سورس فائل ڈاؤن لوڈ کریں" + +msgid "open issue" +msgstr "کھلا مسئلہ" + +msgid "previous page" +msgstr "سابقہ ​​صفحہ" + +msgid "Download notebook file" +msgstr "نوٹ بک فائل ڈاؤن لوڈ کریں" + +msgid "Copyright" +msgstr "کاپی رائٹ" + +msgid "Download this page" +msgstr "اس صفحے کو ڈاؤن لوڈ کریں" + +msgid "Source repository" +msgstr "ماخذ ذخیرہ" + +msgid "By" +msgstr "بذریعہ" + +msgid "Last updated on" +msgstr "آخری بار تازہ کاری ہوئی" + +msgid "Toggle navigation" +msgstr "نیویگیشن ٹوگل کریں" + +msgid "Sphinx Book Theme" +msgstr "سپنکس بک تھیم" + +msgid "suggest edit" +msgstr "ترمیم کی تجویز کریں" + +msgid "Open an issue" +msgstr "ایک مسئلہ کھولیں" + +msgid "Launch" +msgstr "لانچ کریں" + +msgid "Edit this page" +msgstr "اس صفحے میں ترمیم کریں" + +msgid "By the" +msgstr "کی طرف" + +msgid "next page" +msgstr "اگلا صفحہ" diff --git a/_static/locales/vi/LC_MESSAGES/booktheme.mo b/_static/locales/vi/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000000000000000000000000000000000000..2bb32555c3ea0dab08ed37b71cb863fbe75698c6 GIT binary patch literal 1431 zcmZvb&x;&I6vr#s{2bR9catc(#OK8%htWiG2(!#(H#@MntBVvV&v#)!JFr^_{KP>F`@nJV05}OA1gF4b;9KAkunA6pAA*NK z3BCmSiq}An^CkEqxCxGf--9oMKUe&tTK}_}KL%Ov30MQi#>)GLK%P&59Onf13wRdX z3;qSN-ajCp`xj*0y?e@hC&AY-KMlSLE>x7&{%7EEtZ#xGrw?+zzkx4+4=O$aInLi8 z=P?fP6X0=>=PmFs_%X6t9{djXqq7H;REU`#e&m%Dv(|WNyu(A%OLe5YIB!2Su?;$8te4By zUZxct%bjIsvow_dS@cRfN`gp=xsLV-{cPIG;@E|$l!jJqM~a2OdCHPb;AMxb!8|je z5+5Wu5E#l0;esOMR|IFK`YgJ+E?Oq}qlDRK7TFmDND~hLNUKno*_5*I#duC ze1pUH?uya95XS7fK6vqb$6da&5d>$e(Tqr%5FL8(*`2r*YiH`^Z>aE(?ll6i#v)Y{nLNAmS zVZLM|@+Cqff{+L(H9>+X*h#n|&YXJarQWrDsNf1G{?EIRgwekJ?VFu>|Np%Cx20*2 zVQfXTBHkmmBW^Fl3uEes3ZOb zQk?;C9XJZE1)qXHfzuJ+M)Utfc@-qRPhbpOu_D~R9;Cb#q&h!?W$*w<>pw>P43chB zQ^;m;Gx8RY{OyX^7V!v3^?n6uod9Y7@6r5Kko@OC(isEk+)qHNR|2Y~glBh97KqDQIoyw}Q#g=trLPD*?? zsWX<#ZNn6-BW-Dv{+f1r*{qv#gpj;Xr3E`?xSD%*+R!4+l|=5zREC|jvA~era$Qfb zF2|B6qIlQgPIfxfbA+v2ODm_JowhTUyu=TZx9*JS6Rh#(J>i+Um1$-Lm&OIlG_(Z~ zDK6>Iog$r3#CF65OL>v)Mde-3GzDA=oCH?XIg!%wqv`lr;kc-l;Guq;^y3ZX*zu#s zI~M-$+r2jyH8$SeZ;J%iFtEqY8kTJ5sf^*cLMP93cgEWm);WfBdxaD4kSUe6q?zDt zJ(i9gGoRpuVl zu3Y;tdw+Y?BAIRa)rh7%Q6vqb|O6z_>OFs^gI=uiz7%1XUIrvhVNR?8ES{E)zXk$;;3!9nM?rdo| zg(?BJNt;NGU78fyR1pmy`2barI!U>5;l_yr7d&hG1XmCT{?EI?hS9$L&6|(+{`20i zd%MaE;~=aX_A~4-Y~m68Ft$C)7(TWgzpda-a2vP}+zxhw2f(Mnr@$<@7d!**1_ihW z)CosHiZc!F0%yUU;1}SN;CBgsOxAx%{5nYa{sdFtmM!u9-5~khAjNqGTmxSL>HHrF z{{ks*S695g10>!1LDKhp!dDaaCwv>+k9`-U`l4+X18A zb`E+6_3&{R%F^mDJ28xp>!M(W-N{d0I-s9&TSG1P{ z75aJczeQbi0`6J1V7&!LBlM=*7rJhc_l1yrNEHO@w*t*WuV86W;7Y;|WxmMX@NmGA z+zEnEumRtZ2%`AF$v$>A&gTnH1&&t!FgxoN9eI(TAZ@v#7!s`Y=Y#Q^a>}+{!KHP< zu`TT&i4+%gtWHs$IEm+r3yumC*&X47(6$9s3cLhT)Vm_D)29mQH-#S{T878@)098m z%A7iV>U3{=|JVyJrINy?bHkpi>0X&v z1xMN$etgi;seVh^A?ljp9~9roy_FhpL*H`KeaasSGF*BsH^{!+!{cf8S;>!N`LUxt zDOTIK)fgH3eSM~5R-avJPKqIR9T5`UTRjZlMYk9ZN|&2 zSsBNyvAEKln_#AN!(3g7N>iJPqMOUL)it`->9il( zoLjEXSEEv;IrB~9#-v%f*7$q_8AyL^bp+y>8T%Uf>ocRKJlP7gdS!Fx->lK(l=-+? P`(Y`Xeqh$BP|E%Rvo4ks literal 0 HcmV?d00001 diff --git a/_static/locales/zh_TW/LC_MESSAGES/booktheme.po b/_static/locales/zh_TW/LC_MESSAGES/booktheme.po new file mode 100644 index 000000000..beecb076b --- /dev/null +++ b/_static/locales/zh_TW/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: zh_TW\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "列印成 PDF" + +msgid "Theme by the" +msgstr "佈景主題作者:" + +msgid "Download source file" +msgstr "下載原始檔" + +msgid "open issue" +msgstr "公開的問題" + +msgid "Contents" +msgstr "目錄" + +msgid "previous page" +msgstr "上一頁" + +msgid "Download notebook file" +msgstr "下載 Notebook 檔案" + +msgid "Copyright" +msgstr "Copyright" + +msgid "Download this page" +msgstr "下載此頁面" + +msgid "Source repository" +msgstr "來源儲存庫" + +msgid "By" +msgstr "作者:" + +msgid "repository" +msgstr "儲存庫" + +msgid "Last updated on" +msgstr "最後更新時間:" + +msgid "Toggle navigation" +msgstr "顯示或隱藏導覽列" + +msgid "Sphinx Book Theme" +msgstr "Sphinx Book 佈景主題" + +msgid "suggest edit" +msgstr "提出修改建議" + +msgid "Open an issue" +msgstr "開啟議題" + +msgid "Launch" +msgstr "啟動" + +msgid "Fullscreen mode" +msgstr "全螢幕模式" + +msgid "Edit this page" +msgstr "編輯此頁面" + +msgid "By the" +msgstr "作者:" + +msgid "next page" +msgstr "下一頁" diff --git a/_static/minus.png b/_static/minus.png new file mode 100644 index 0000000000000000000000000000000000000000..d96755fdaf8bb2214971e0db9c1fd3077d7c419d GIT binary patch literal 90 zcmeAS@N?(olHy`uVBq!ia0vp^+#t*WBp7;*Yy1LIik>cxAr*|t7R?Mi>2?kWtu=nj kDsEF_5m^0CR;1wuP-*O&G^0G}KYk!hp00i_>zopr08q^qX#fBK literal 0 HcmV?d00001 diff --git a/_static/plus.png b/_static/plus.png new file mode 100644 index 0000000000000000000000000000000000000000..7107cec93a979b9a5f64843235a16651d563ce2d GIT binary patch literal 90 zcmeAS@N?(olHy`uVBq!ia0vp^+#t*WBp7;*Yy1LIik>cxAr*|t7R?Mi>2?kWtu>-2 m3q%Vub%g%s<8sJhVPMczOq}xhg9DJoz~JfX=d#Wzp$Pyb1r*Kz literal 0 HcmV?d00001 diff --git a/_static/pygments.css b/_static/pygments.css new file mode 100644 index 000000000..012e6a00a --- /dev/null +++ b/_static/pygments.css @@ -0,0 +1,152 @@ +html[data-theme="light"] .highlight pre { line-height: 125%; } +html[data-theme="light"] .highlight td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +html[data-theme="light"] .highlight span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +html[data-theme="light"] .highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +html[data-theme="light"] .highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +html[data-theme="light"] .highlight .hll { background-color: #fae4c2 } +html[data-theme="light"] .highlight { background: #fefefe; color: #080808 } +html[data-theme="light"] .highlight .c { color: #515151 } /* Comment */ +html[data-theme="light"] .highlight .err { color: #a12236 } /* Error */ +html[data-theme="light"] .highlight .k { color: #6730c5 } /* Keyword */ +html[data-theme="light"] .highlight .l { color: #7f4707 } /* Literal */ +html[data-theme="light"] .highlight .n { color: #080808 } /* Name */ +html[data-theme="light"] .highlight .o { color: #00622f } /* Operator */ +html[data-theme="light"] .highlight .p { color: #080808 } /* Punctuation */ +html[data-theme="light"] .highlight .ch { color: #515151 } /* Comment.Hashbang */ +html[data-theme="light"] .highlight .cm { color: #515151 } /* Comment.Multiline */ +html[data-theme="light"] .highlight .cp { color: #515151 } /* Comment.Preproc */ +html[data-theme="light"] .highlight .cpf { color: #515151 } /* Comment.PreprocFile */ +html[data-theme="light"] .highlight .c1 { color: #515151 } /* Comment.Single */ +html[data-theme="light"] .highlight .cs { color: #515151 } /* Comment.Special */ +html[data-theme="light"] .highlight .gd { color: #005b82 } /* Generic.Deleted */ +html[data-theme="light"] .highlight .ge { font-style: italic } /* Generic.Emph */ +html[data-theme="light"] .highlight .gh { color: #005b82 } /* Generic.Heading */ +html[data-theme="light"] .highlight .gs { font-weight: bold } /* Generic.Strong */ +html[data-theme="light"] .highlight .gu { color: #005b82 } /* Generic.Subheading */ +html[data-theme="light"] .highlight .kc { color: #6730c5 } /* Keyword.Constant */ +html[data-theme="light"] .highlight .kd { color: #6730c5 } /* Keyword.Declaration */ +html[data-theme="light"] .highlight .kn { color: #6730c5 } /* Keyword.Namespace */ +html[data-theme="light"] .highlight .kp { color: #6730c5 } /* Keyword.Pseudo */ +html[data-theme="light"] .highlight .kr { color: #6730c5 } /* Keyword.Reserved */ +html[data-theme="light"] .highlight .kt { color: #7f4707 } /* Keyword.Type */ +html[data-theme="light"] .highlight .ld { color: #7f4707 } /* Literal.Date */ +html[data-theme="light"] .highlight .m { color: #7f4707 } /* Literal.Number */ +html[data-theme="light"] .highlight .s { color: #00622f } /* Literal.String */ +html[data-theme="light"] .highlight .na { color: #912583 } /* Name.Attribute */ +html[data-theme="light"] .highlight .nb { color: #7f4707 } /* Name.Builtin */ +html[data-theme="light"] .highlight .nc { color: #005b82 } /* Name.Class */ +html[data-theme="light"] .highlight .no { color: #005b82 } /* Name.Constant */ +html[data-theme="light"] .highlight .nd { color: #7f4707 } /* Name.Decorator */ +html[data-theme="light"] .highlight .ni { color: #00622f } /* Name.Entity */ +html[data-theme="light"] .highlight .ne { color: #6730c5 } /* Name.Exception */ +html[data-theme="light"] .highlight .nf { color: #005b82 } /* Name.Function */ +html[data-theme="light"] .highlight .nl { color: #7f4707 } /* Name.Label */ +html[data-theme="light"] .highlight .nn { color: #080808 } /* Name.Namespace */ +html[data-theme="light"] .highlight .nx { color: #080808 } /* Name.Other */ +html[data-theme="light"] .highlight .py { color: #005b82 } /* Name.Property */ +html[data-theme="light"] .highlight .nt { color: #005b82 } /* Name.Tag */ +html[data-theme="light"] .highlight .nv { color: #a12236 } /* Name.Variable */ +html[data-theme="light"] .highlight .ow { color: #6730c5 } /* Operator.Word */ +html[data-theme="light"] .highlight .pm { color: #080808 } /* Punctuation.Marker */ +html[data-theme="light"] .highlight .w { color: #080808 } /* Text.Whitespace */ +html[data-theme="light"] .highlight .mb { color: #7f4707 } /* Literal.Number.Bin */ +html[data-theme="light"] .highlight .mf { color: #7f4707 } /* Literal.Number.Float */ +html[data-theme="light"] .highlight .mh { color: #7f4707 } /* Literal.Number.Hex */ +html[data-theme="light"] .highlight .mi { color: #7f4707 } /* Literal.Number.Integer */ +html[data-theme="light"] .highlight .mo { color: #7f4707 } /* Literal.Number.Oct */ +html[data-theme="light"] .highlight .sa { color: #00622f } /* Literal.String.Affix */ +html[data-theme="light"] .highlight .sb { color: #00622f } /* Literal.String.Backtick */ +html[data-theme="light"] .highlight .sc { color: #00622f } /* Literal.String.Char */ +html[data-theme="light"] .highlight .dl { color: #00622f } /* Literal.String.Delimiter */ +html[data-theme="light"] .highlight .sd { color: #00622f } /* Literal.String.Doc */ +html[data-theme="light"] .highlight .s2 { color: #00622f } /* Literal.String.Double */ +html[data-theme="light"] .highlight .se { color: #00622f } /* Literal.String.Escape */ +html[data-theme="light"] .highlight .sh { color: #00622f } /* Literal.String.Heredoc */ +html[data-theme="light"] .highlight .si { color: #00622f } /* Literal.String.Interpol */ +html[data-theme="light"] .highlight .sx { color: #00622f } /* Literal.String.Other */ +html[data-theme="light"] .highlight .sr { color: #a12236 } /* Literal.String.Regex */ +html[data-theme="light"] .highlight .s1 { color: #00622f } /* Literal.String.Single */ +html[data-theme="light"] .highlight .ss { color: #005b82 } /* Literal.String.Symbol */ +html[data-theme="light"] .highlight .bp { color: #7f4707 } /* Name.Builtin.Pseudo */ +html[data-theme="light"] .highlight .fm { color: #005b82 } /* Name.Function.Magic */ +html[data-theme="light"] .highlight .vc { color: #a12236 } /* Name.Variable.Class */ +html[data-theme="light"] .highlight .vg { color: #a12236 } /* Name.Variable.Global */ +html[data-theme="light"] .highlight .vi { color: #a12236 } /* Name.Variable.Instance */ +html[data-theme="light"] .highlight .vm { color: #7f4707 } /* Name.Variable.Magic */ +html[data-theme="light"] .highlight .il { color: #7f4707 } /* Literal.Number.Integer.Long */ +html[data-theme="dark"] .highlight pre { line-height: 125%; } +html[data-theme="dark"] .highlight td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +html[data-theme="dark"] .highlight span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +html[data-theme="dark"] .highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +html[data-theme="dark"] .highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +html[data-theme="dark"] .highlight .hll { background-color: #ffd9002e } +html[data-theme="dark"] .highlight { background: #2b2b2b; color: #f8f8f2 } +html[data-theme="dark"] .highlight .c { color: #ffd900 } /* Comment */ +html[data-theme="dark"] .highlight .err { color: #ffa07a } /* Error */ +html[data-theme="dark"] .highlight .k { color: #dcc6e0 } /* Keyword */ +html[data-theme="dark"] .highlight .l { color: #ffd900 } /* Literal */ +html[data-theme="dark"] .highlight .n { color: #f8f8f2 } /* Name */ +html[data-theme="dark"] .highlight .o { color: #abe338 } /* Operator */ +html[data-theme="dark"] .highlight .p { color: #f8f8f2 } /* Punctuation */ +html[data-theme="dark"] .highlight .ch { color: #ffd900 } /* Comment.Hashbang */ +html[data-theme="dark"] .highlight .cm { color: #ffd900 } /* Comment.Multiline */ +html[data-theme="dark"] .highlight .cp { color: #ffd900 } /* Comment.Preproc */ +html[data-theme="dark"] .highlight .cpf { color: #ffd900 } /* Comment.PreprocFile */ +html[data-theme="dark"] .highlight .c1 { color: #ffd900 } /* Comment.Single */ +html[data-theme="dark"] .highlight .cs { color: #ffd900 } /* Comment.Special */ +html[data-theme="dark"] .highlight .gd { color: #00e0e0 } /* Generic.Deleted */ +html[data-theme="dark"] .highlight .ge { font-style: italic } /* Generic.Emph */ +html[data-theme="dark"] .highlight .gh { color: #00e0e0 } /* Generic.Heading */ +html[data-theme="dark"] .highlight .gs { font-weight: bold } /* Generic.Strong */ +html[data-theme="dark"] .highlight .gu { color: #00e0e0 } /* Generic.Subheading */ +html[data-theme="dark"] .highlight .kc { color: #dcc6e0 } /* Keyword.Constant */ +html[data-theme="dark"] .highlight .kd { color: #dcc6e0 } /* Keyword.Declaration */ +html[data-theme="dark"] .highlight .kn { color: #dcc6e0 } /* Keyword.Namespace */ +html[data-theme="dark"] .highlight .kp { color: #dcc6e0 } /* Keyword.Pseudo */ +html[data-theme="dark"] .highlight .kr { color: #dcc6e0 } /* Keyword.Reserved */ +html[data-theme="dark"] .highlight .kt { color: #ffd900 } /* Keyword.Type */ +html[data-theme="dark"] .highlight .ld { color: #ffd900 } /* Literal.Date */ +html[data-theme="dark"] .highlight .m { color: #ffd900 } /* Literal.Number */ +html[data-theme="dark"] .highlight .s { color: #abe338 } /* Literal.String */ +html[data-theme="dark"] .highlight .na { color: #ffd900 } /* Name.Attribute */ +html[data-theme="dark"] .highlight .nb { color: #ffd900 } /* Name.Builtin */ +html[data-theme="dark"] .highlight .nc { color: #00e0e0 } /* Name.Class */ +html[data-theme="dark"] .highlight .no { color: #00e0e0 } /* Name.Constant */ +html[data-theme="dark"] .highlight .nd { color: #ffd900 } /* Name.Decorator */ +html[data-theme="dark"] .highlight .ni { color: #abe338 } /* Name.Entity */ +html[data-theme="dark"] .highlight .ne { color: #dcc6e0 } /* Name.Exception */ +html[data-theme="dark"] .highlight .nf { color: #00e0e0 } /* Name.Function */ +html[data-theme="dark"] .highlight .nl { color: #ffd900 } /* Name.Label */ +html[data-theme="dark"] .highlight .nn { color: #f8f8f2 } /* Name.Namespace */ +html[data-theme="dark"] .highlight .nx { color: #f8f8f2 } /* Name.Other */ +html[data-theme="dark"] .highlight .py { color: #00e0e0 } /* Name.Property */ +html[data-theme="dark"] .highlight .nt { color: #00e0e0 } /* Name.Tag */ +html[data-theme="dark"] .highlight .nv { color: #ffa07a } /* Name.Variable */ +html[data-theme="dark"] .highlight .ow { color: #dcc6e0 } /* Operator.Word */ +html[data-theme="dark"] .highlight .pm { color: #f8f8f2 } /* Punctuation.Marker */ +html[data-theme="dark"] .highlight .w { color: #f8f8f2 } /* Text.Whitespace */ +html[data-theme="dark"] .highlight .mb { color: #ffd900 } /* Literal.Number.Bin */ +html[data-theme="dark"] .highlight .mf { color: #ffd900 } /* Literal.Number.Float */ +html[data-theme="dark"] .highlight .mh { color: #ffd900 } /* Literal.Number.Hex */ +html[data-theme="dark"] .highlight .mi { color: #ffd900 } /* Literal.Number.Integer */ +html[data-theme="dark"] .highlight .mo { color: #ffd900 } /* Literal.Number.Oct */ +html[data-theme="dark"] .highlight .sa { color: #abe338 } /* Literal.String.Affix */ +html[data-theme="dark"] .highlight .sb { color: #abe338 } /* Literal.String.Backtick */ +html[data-theme="dark"] .highlight .sc { color: #abe338 } /* Literal.String.Char */ +html[data-theme="dark"] .highlight .dl { color: #abe338 } /* Literal.String.Delimiter */ +html[data-theme="dark"] .highlight .sd { color: #abe338 } /* Literal.String.Doc */ +html[data-theme="dark"] .highlight .s2 { color: #abe338 } /* Literal.String.Double */ +html[data-theme="dark"] .highlight .se { color: #abe338 } /* Literal.String.Escape */ +html[data-theme="dark"] .highlight .sh { color: #abe338 } /* Literal.String.Heredoc */ +html[data-theme="dark"] .highlight .si { color: #abe338 } /* Literal.String.Interpol */ +html[data-theme="dark"] .highlight .sx { color: #abe338 } /* Literal.String.Other */ +html[data-theme="dark"] .highlight .sr { color: #ffa07a } /* Literal.String.Regex */ +html[data-theme="dark"] .highlight .s1 { color: #abe338 } /* Literal.String.Single */ +html[data-theme="dark"] .highlight .ss { color: #00e0e0 } /* Literal.String.Symbol */ +html[data-theme="dark"] .highlight .bp { color: #ffd900 } /* Name.Builtin.Pseudo */ +html[data-theme="dark"] .highlight .fm { color: #00e0e0 } /* Name.Function.Magic */ +html[data-theme="dark"] .highlight .vc { color: #ffa07a } /* Name.Variable.Class */ +html[data-theme="dark"] .highlight .vg { color: #ffa07a } /* Name.Variable.Global */ +html[data-theme="dark"] .highlight .vi { color: #ffa07a } /* Name.Variable.Instance */ +html[data-theme="dark"] .highlight .vm { color: #ffd900 } /* Name.Variable.Magic */ +html[data-theme="dark"] .highlight .il { color: #ffd900 } /* Literal.Number.Integer.Long */ \ No newline at end of file diff --git a/_static/sbt-webpack-macros.html b/_static/sbt-webpack-macros.html new file mode 100644 index 000000000..6cbf559fa --- /dev/null +++ b/_static/sbt-webpack-macros.html @@ -0,0 +1,11 @@ + +{% macro head_pre_bootstrap() %} + +{% endmacro %} + +{% macro body_post() %} + +{% endmacro %} diff --git a/_static/scripts/bootstrap.js b/_static/scripts/bootstrap.js new file mode 100644 index 000000000..c8178debb --- /dev/null +++ b/_static/scripts/bootstrap.js @@ -0,0 +1,3 @@ +/*! For license information please see bootstrap.js.LICENSE.txt */ +(()=>{"use strict";var t={d:(e,i)=>{for(var n in i)t.o(i,n)&&!t.o(e,n)&&Object.defineProperty(e,n,{enumerable:!0,get:i[n]})},o:(t,e)=>Object.prototype.hasOwnProperty.call(t,e),r:t=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})}},e={};t.r(e),t.d(e,{afterMain:()=>E,afterRead:()=>v,afterWrite:()=>C,applyStyles:()=>$,arrow:()=>J,auto:()=>a,basePlacements:()=>l,beforeMain:()=>y,beforeRead:()=>_,beforeWrite:()=>A,bottom:()=>s,clippingParents:()=>d,computeStyles:()=>it,createPopper:()=>Dt,createPopperBase:()=>St,createPopperLite:()=>$t,detectOverflow:()=>_t,end:()=>h,eventListeners:()=>st,flip:()=>bt,hide:()=>wt,left:()=>r,main:()=>w,modifierPhases:()=>O,offset:()=>Et,placements:()=>g,popper:()=>f,popperGenerator:()=>Lt,popperOffsets:()=>At,preventOverflow:()=>Tt,read:()=>b,reference:()=>p,right:()=>o,start:()=>c,top:()=>n,variationPlacements:()=>m,viewport:()=>u,write:()=>T});var i={};t.r(i),t.d(i,{Alert:()=>Oe,Button:()=>ke,Carousel:()=>li,Collapse:()=>Ei,Dropdown:()=>Ki,Modal:()=>Ln,Offcanvas:()=>Kn,Popover:()=>bs,ScrollSpy:()=>Ls,Tab:()=>Js,Toast:()=>po,Tooltip:()=>fs});var n="top",s="bottom",o="right",r="left",a="auto",l=[n,s,o,r],c="start",h="end",d="clippingParents",u="viewport",f="popper",p="reference",m=l.reduce((function(t,e){return t.concat([e+"-"+c,e+"-"+h])}),[]),g=[].concat(l,[a]).reduce((function(t,e){return t.concat([e,e+"-"+c,e+"-"+h])}),[]),_="beforeRead",b="read",v="afterRead",y="beforeMain",w="main",E="afterMain",A="beforeWrite",T="write",C="afterWrite",O=[_,b,v,y,w,E,A,T,C];function x(t){return t?(t.nodeName||"").toLowerCase():null}function k(t){if(null==t)return window;if("[object Window]"!==t.toString()){var e=t.ownerDocument;return e&&e.defaultView||window}return t}function L(t){return t instanceof k(t).Element||t instanceof Element}function S(t){return t instanceof k(t).HTMLElement||t instanceof HTMLElement}function D(t){return"undefined"!=typeof ShadowRoot&&(t instanceof k(t).ShadowRoot||t instanceof ShadowRoot)}const $={name:"applyStyles",enabled:!0,phase:"write",fn:function(t){var e=t.state;Object.keys(e.elements).forEach((function(t){var i=e.styles[t]||{},n=e.attributes[t]||{},s=e.elements[t];S(s)&&x(s)&&(Object.assign(s.style,i),Object.keys(n).forEach((function(t){var e=n[t];!1===e?s.removeAttribute(t):s.setAttribute(t,!0===e?"":e)})))}))},effect:function(t){var e=t.state,i={popper:{position:e.options.strategy,left:"0",top:"0",margin:"0"},arrow:{position:"absolute"},reference:{}};return Object.assign(e.elements.popper.style,i.popper),e.styles=i,e.elements.arrow&&Object.assign(e.elements.arrow.style,i.arrow),function(){Object.keys(e.elements).forEach((function(t){var n=e.elements[t],s=e.attributes[t]||{},o=Object.keys(e.styles.hasOwnProperty(t)?e.styles[t]:i[t]).reduce((function(t,e){return t[e]="",t}),{});S(n)&&x(n)&&(Object.assign(n.style,o),Object.keys(s).forEach((function(t){n.removeAttribute(t)})))}))}},requires:["computeStyles"]};function I(t){return t.split("-")[0]}var N=Math.max,P=Math.min,M=Math.round;function j(){var t=navigator.userAgentData;return null!=t&&t.brands&&Array.isArray(t.brands)?t.brands.map((function(t){return t.brand+"/"+t.version})).join(" "):navigator.userAgent}function F(){return!/^((?!chrome|android).)*safari/i.test(j())}function H(t,e,i){void 0===e&&(e=!1),void 0===i&&(i=!1);var n=t.getBoundingClientRect(),s=1,o=1;e&&S(t)&&(s=t.offsetWidth>0&&M(n.width)/t.offsetWidth||1,o=t.offsetHeight>0&&M(n.height)/t.offsetHeight||1);var r=(L(t)?k(t):window).visualViewport,a=!F()&&i,l=(n.left+(a&&r?r.offsetLeft:0))/s,c=(n.top+(a&&r?r.offsetTop:0))/o,h=n.width/s,d=n.height/o;return{width:h,height:d,top:c,right:l+h,bottom:c+d,left:l,x:l,y:c}}function B(t){var e=H(t),i=t.offsetWidth,n=t.offsetHeight;return Math.abs(e.width-i)<=1&&(i=e.width),Math.abs(e.height-n)<=1&&(n=e.height),{x:t.offsetLeft,y:t.offsetTop,width:i,height:n}}function W(t,e){var i=e.getRootNode&&e.getRootNode();if(t.contains(e))return!0;if(i&&D(i)){var n=e;do{if(n&&t.isSameNode(n))return!0;n=n.parentNode||n.host}while(n)}return!1}function z(t){return k(t).getComputedStyle(t)}function R(t){return["table","td","th"].indexOf(x(t))>=0}function q(t){return((L(t)?t.ownerDocument:t.document)||window.document).documentElement}function V(t){return"html"===x(t)?t:t.assignedSlot||t.parentNode||(D(t)?t.host:null)||q(t)}function Y(t){return S(t)&&"fixed"!==z(t).position?t.offsetParent:null}function K(t){for(var e=k(t),i=Y(t);i&&R(i)&&"static"===z(i).position;)i=Y(i);return i&&("html"===x(i)||"body"===x(i)&&"static"===z(i).position)?e:i||function(t){var e=/firefox/i.test(j());if(/Trident/i.test(j())&&S(t)&&"fixed"===z(t).position)return null;var i=V(t);for(D(i)&&(i=i.host);S(i)&&["html","body"].indexOf(x(i))<0;){var n=z(i);if("none"!==n.transform||"none"!==n.perspective||"paint"===n.contain||-1!==["transform","perspective"].indexOf(n.willChange)||e&&"filter"===n.willChange||e&&n.filter&&"none"!==n.filter)return i;i=i.parentNode}return null}(t)||e}function Q(t){return["top","bottom"].indexOf(t)>=0?"x":"y"}function X(t,e,i){return N(t,P(e,i))}function U(t){return Object.assign({},{top:0,right:0,bottom:0,left:0},t)}function G(t,e){return e.reduce((function(e,i){return e[i]=t,e}),{})}const J={name:"arrow",enabled:!0,phase:"main",fn:function(t){var e,i=t.state,a=t.name,c=t.options,h=i.elements.arrow,d=i.modifiersData.popperOffsets,u=I(i.placement),f=Q(u),p=[r,o].indexOf(u)>=0?"height":"width";if(h&&d){var m=function(t,e){return U("number"!=typeof(t="function"==typeof t?t(Object.assign({},e.rects,{placement:e.placement})):t)?t:G(t,l))}(c.padding,i),g=B(h),_="y"===f?n:r,b="y"===f?s:o,v=i.rects.reference[p]+i.rects.reference[f]-d[f]-i.rects.popper[p],y=d[f]-i.rects.reference[f],w=K(h),E=w?"y"===f?w.clientHeight||0:w.clientWidth||0:0,A=v/2-y/2,T=m[_],C=E-g[p]-m[b],O=E/2-g[p]/2+A,x=X(T,O,C),k=f;i.modifiersData[a]=((e={})[k]=x,e.centerOffset=x-O,e)}},effect:function(t){var e=t.state,i=t.options.element,n=void 0===i?"[data-popper-arrow]":i;null!=n&&("string"!=typeof n||(n=e.elements.popper.querySelector(n)))&&W(e.elements.popper,n)&&(e.elements.arrow=n)},requires:["popperOffsets"],requiresIfExists:["preventOverflow"]};function Z(t){return t.split("-")[1]}var tt={top:"auto",right:"auto",bottom:"auto",left:"auto"};function et(t){var e,i=t.popper,a=t.popperRect,l=t.placement,c=t.variation,d=t.offsets,u=t.position,f=t.gpuAcceleration,p=t.adaptive,m=t.roundOffsets,g=t.isFixed,_=d.x,b=void 0===_?0:_,v=d.y,y=void 0===v?0:v,w="function"==typeof m?m({x:b,y}):{x:b,y};b=w.x,y=w.y;var E=d.hasOwnProperty("x"),A=d.hasOwnProperty("y"),T=r,C=n,O=window;if(p){var x=K(i),L="clientHeight",S="clientWidth";x===k(i)&&"static"!==z(x=q(i)).position&&"absolute"===u&&(L="scrollHeight",S="scrollWidth"),(l===n||(l===r||l===o)&&c===h)&&(C=s,y-=(g&&x===O&&O.visualViewport?O.visualViewport.height:x[L])-a.height,y*=f?1:-1),l!==r&&(l!==n&&l!==s||c!==h)||(T=o,b-=(g&&x===O&&O.visualViewport?O.visualViewport.width:x[S])-a.width,b*=f?1:-1)}var D,$=Object.assign({position:u},p&&tt),I=!0===m?function(t,e){var i=t.x,n=t.y,s=e.devicePixelRatio||1;return{x:M(i*s)/s||0,y:M(n*s)/s||0}}({x:b,y},k(i)):{x:b,y};return b=I.x,y=I.y,f?Object.assign({},$,((D={})[C]=A?"0":"",D[T]=E?"0":"",D.transform=(O.devicePixelRatio||1)<=1?"translate("+b+"px, "+y+"px)":"translate3d("+b+"px, "+y+"px, 0)",D)):Object.assign({},$,((e={})[C]=A?y+"px":"",e[T]=E?b+"px":"",e.transform="",e))}const it={name:"computeStyles",enabled:!0,phase:"beforeWrite",fn:function(t){var e=t.state,i=t.options,n=i.gpuAcceleration,s=void 0===n||n,o=i.adaptive,r=void 0===o||o,a=i.roundOffsets,l=void 0===a||a,c={placement:I(e.placement),variation:Z(e.placement),popper:e.elements.popper,popperRect:e.rects.popper,gpuAcceleration:s,isFixed:"fixed"===e.options.strategy};null!=e.modifiersData.popperOffsets&&(e.styles.popper=Object.assign({},e.styles.popper,et(Object.assign({},c,{offsets:e.modifiersData.popperOffsets,position:e.options.strategy,adaptive:r,roundOffsets:l})))),null!=e.modifiersData.arrow&&(e.styles.arrow=Object.assign({},e.styles.arrow,et(Object.assign({},c,{offsets:e.modifiersData.arrow,position:"absolute",adaptive:!1,roundOffsets:l})))),e.attributes.popper=Object.assign({},e.attributes.popper,{"data-popper-placement":e.placement})},data:{}};var nt={passive:!0};const st={name:"eventListeners",enabled:!0,phase:"write",fn:function(){},effect:function(t){var e=t.state,i=t.instance,n=t.options,s=n.scroll,o=void 0===s||s,r=n.resize,a=void 0===r||r,l=k(e.elements.popper),c=[].concat(e.scrollParents.reference,e.scrollParents.popper);return o&&c.forEach((function(t){t.addEventListener("scroll",i.update,nt)})),a&&l.addEventListener("resize",i.update,nt),function(){o&&c.forEach((function(t){t.removeEventListener("scroll",i.update,nt)})),a&&l.removeEventListener("resize",i.update,nt)}},data:{}};var ot={left:"right",right:"left",bottom:"top",top:"bottom"};function rt(t){return t.replace(/left|right|bottom|top/g,(function(t){return ot[t]}))}var at={start:"end",end:"start"};function lt(t){return t.replace(/start|end/g,(function(t){return at[t]}))}function ct(t){var e=k(t);return{scrollLeft:e.pageXOffset,scrollTop:e.pageYOffset}}function ht(t){return H(q(t)).left+ct(t).scrollLeft}function dt(t){var e=z(t),i=e.overflow,n=e.overflowX,s=e.overflowY;return/auto|scroll|overlay|hidden/.test(i+s+n)}function ut(t){return["html","body","#document"].indexOf(x(t))>=0?t.ownerDocument.body:S(t)&&dt(t)?t:ut(V(t))}function ft(t,e){var i;void 0===e&&(e=[]);var n=ut(t),s=n===(null==(i=t.ownerDocument)?void 0:i.body),o=k(n),r=s?[o].concat(o.visualViewport||[],dt(n)?n:[]):n,a=e.concat(r);return s?a:a.concat(ft(V(r)))}function pt(t){return Object.assign({},t,{left:t.x,top:t.y,right:t.x+t.width,bottom:t.y+t.height})}function mt(t,e,i){return e===u?pt(function(t,e){var i=k(t),n=q(t),s=i.visualViewport,o=n.clientWidth,r=n.clientHeight,a=0,l=0;if(s){o=s.width,r=s.height;var c=F();(c||!c&&"fixed"===e)&&(a=s.offsetLeft,l=s.offsetTop)}return{width:o,height:r,x:a+ht(t),y:l}}(t,i)):L(e)?function(t,e){var i=H(t,!1,"fixed"===e);return i.top=i.top+t.clientTop,i.left=i.left+t.clientLeft,i.bottom=i.top+t.clientHeight,i.right=i.left+t.clientWidth,i.width=t.clientWidth,i.height=t.clientHeight,i.x=i.left,i.y=i.top,i}(e,i):pt(function(t){var e,i=q(t),n=ct(t),s=null==(e=t.ownerDocument)?void 0:e.body,o=N(i.scrollWidth,i.clientWidth,s?s.scrollWidth:0,s?s.clientWidth:0),r=N(i.scrollHeight,i.clientHeight,s?s.scrollHeight:0,s?s.clientHeight:0),a=-n.scrollLeft+ht(t),l=-n.scrollTop;return"rtl"===z(s||i).direction&&(a+=N(i.clientWidth,s?s.clientWidth:0)-o),{width:o,height:r,x:a,y:l}}(q(t)))}function gt(t){var e,i=t.reference,a=t.element,l=t.placement,d=l?I(l):null,u=l?Z(l):null,f=i.x+i.width/2-a.width/2,p=i.y+i.height/2-a.height/2;switch(d){case n:e={x:f,y:i.y-a.height};break;case s:e={x:f,y:i.y+i.height};break;case o:e={x:i.x+i.width,y:p};break;case r:e={x:i.x-a.width,y:p};break;default:e={x:i.x,y:i.y}}var m=d?Q(d):null;if(null!=m){var g="y"===m?"height":"width";switch(u){case c:e[m]=e[m]-(i[g]/2-a[g]/2);break;case h:e[m]=e[m]+(i[g]/2-a[g]/2)}}return e}function _t(t,e){void 0===e&&(e={});var i=e,r=i.placement,a=void 0===r?t.placement:r,c=i.strategy,h=void 0===c?t.strategy:c,m=i.boundary,g=void 0===m?d:m,_=i.rootBoundary,b=void 0===_?u:_,v=i.elementContext,y=void 0===v?f:v,w=i.altBoundary,E=void 0!==w&&w,A=i.padding,T=void 0===A?0:A,C=U("number"!=typeof T?T:G(T,l)),O=y===f?p:f,k=t.rects.popper,D=t.elements[E?O:y],$=function(t,e,i,n){var s="clippingParents"===e?function(t){var e=ft(V(t)),i=["absolute","fixed"].indexOf(z(t).position)>=0&&S(t)?K(t):t;return L(i)?e.filter((function(t){return L(t)&&W(t,i)&&"body"!==x(t)})):[]}(t):[].concat(e),o=[].concat(s,[i]),r=o[0],a=o.reduce((function(e,i){var s=mt(t,i,n);return e.top=N(s.top,e.top),e.right=P(s.right,e.right),e.bottom=P(s.bottom,e.bottom),e.left=N(s.left,e.left),e}),mt(t,r,n));return a.width=a.right-a.left,a.height=a.bottom-a.top,a.x=a.left,a.y=a.top,a}(L(D)?D:D.contextElement||q(t.elements.popper),g,b,h),I=H(t.elements.reference),M=gt({reference:I,element:k,strategy:"absolute",placement:a}),j=pt(Object.assign({},k,M)),F=y===f?j:I,B={top:$.top-F.top+C.top,bottom:F.bottom-$.bottom+C.bottom,left:$.left-F.left+C.left,right:F.right-$.right+C.right},R=t.modifiersData.offset;if(y===f&&R){var Y=R[a];Object.keys(B).forEach((function(t){var e=[o,s].indexOf(t)>=0?1:-1,i=[n,s].indexOf(t)>=0?"y":"x";B[t]+=Y[i]*e}))}return B}const bt={name:"flip",enabled:!0,phase:"main",fn:function(t){var e=t.state,i=t.options,h=t.name;if(!e.modifiersData[h]._skip){for(var d=i.mainAxis,u=void 0===d||d,f=i.altAxis,p=void 0===f||f,_=i.fallbackPlacements,b=i.padding,v=i.boundary,y=i.rootBoundary,w=i.altBoundary,E=i.flipVariations,A=void 0===E||E,T=i.allowedAutoPlacements,C=e.options.placement,O=I(C),x=_||(O!==C&&A?function(t){if(I(t)===a)return[];var e=rt(t);return[lt(t),e,lt(e)]}(C):[rt(C)]),k=[C].concat(x).reduce((function(t,i){return t.concat(I(i)===a?function(t,e){void 0===e&&(e={});var i=e,n=i.placement,s=i.boundary,o=i.rootBoundary,r=i.padding,a=i.flipVariations,c=i.allowedAutoPlacements,h=void 0===c?g:c,d=Z(n),u=d?a?m:m.filter((function(t){return Z(t)===d})):l,f=u.filter((function(t){return h.indexOf(t)>=0}));0===f.length&&(f=u);var p=f.reduce((function(e,i){return e[i]=_t(t,{placement:i,boundary:s,rootBoundary:o,padding:r})[I(i)],e}),{});return Object.keys(p).sort((function(t,e){return p[t]-p[e]}))}(e,{placement:i,boundary:v,rootBoundary:y,padding:b,flipVariations:A,allowedAutoPlacements:T}):i)}),[]),L=e.rects.reference,S=e.rects.popper,D=new Map,$=!0,N=k[0],P=0;P=0,B=H?"width":"height",W=_t(e,{placement:M,boundary:v,rootBoundary:y,altBoundary:w,padding:b}),z=H?F?o:r:F?s:n;L[B]>S[B]&&(z=rt(z));var R=rt(z),q=[];if(u&&q.push(W[j]<=0),p&&q.push(W[z]<=0,W[R]<=0),q.every((function(t){return t}))){N=M,$=!1;break}D.set(M,q)}if($)for(var V=function(t){var e=k.find((function(e){var i=D.get(e);if(i)return i.slice(0,t).every((function(t){return t}))}));if(e)return N=e,"break"},Y=A?3:1;Y>0&&"break"!==V(Y);Y--);e.placement!==N&&(e.modifiersData[h]._skip=!0,e.placement=N,e.reset=!0)}},requiresIfExists:["offset"],data:{_skip:!1}};function vt(t,e,i){return void 0===i&&(i={x:0,y:0}),{top:t.top-e.height-i.y,right:t.right-e.width+i.x,bottom:t.bottom-e.height+i.y,left:t.left-e.width-i.x}}function yt(t){return[n,o,s,r].some((function(e){return t[e]>=0}))}const wt={name:"hide",enabled:!0,phase:"main",requiresIfExists:["preventOverflow"],fn:function(t){var e=t.state,i=t.name,n=e.rects.reference,s=e.rects.popper,o=e.modifiersData.preventOverflow,r=_t(e,{elementContext:"reference"}),a=_t(e,{altBoundary:!0}),l=vt(r,n),c=vt(a,s,o),h=yt(l),d=yt(c);e.modifiersData[i]={referenceClippingOffsets:l,popperEscapeOffsets:c,isReferenceHidden:h,hasPopperEscaped:d},e.attributes.popper=Object.assign({},e.attributes.popper,{"data-popper-reference-hidden":h,"data-popper-escaped":d})}},Et={name:"offset",enabled:!0,phase:"main",requires:["popperOffsets"],fn:function(t){var e=t.state,i=t.options,s=t.name,a=i.offset,l=void 0===a?[0,0]:a,c=g.reduce((function(t,i){return t[i]=function(t,e,i){var s=I(t),a=[r,n].indexOf(s)>=0?-1:1,l="function"==typeof i?i(Object.assign({},e,{placement:t})):i,c=l[0],h=l[1];return c=c||0,h=(h||0)*a,[r,o].indexOf(s)>=0?{x:h,y:c}:{x:c,y:h}}(i,e.rects,l),t}),{}),h=c[e.placement],d=h.x,u=h.y;null!=e.modifiersData.popperOffsets&&(e.modifiersData.popperOffsets.x+=d,e.modifiersData.popperOffsets.y+=u),e.modifiersData[s]=c}},At={name:"popperOffsets",enabled:!0,phase:"read",fn:function(t){var e=t.state,i=t.name;e.modifiersData[i]=gt({reference:e.rects.reference,element:e.rects.popper,strategy:"absolute",placement:e.placement})},data:{}},Tt={name:"preventOverflow",enabled:!0,phase:"main",fn:function(t){var e=t.state,i=t.options,a=t.name,l=i.mainAxis,h=void 0===l||l,d=i.altAxis,u=void 0!==d&&d,f=i.boundary,p=i.rootBoundary,m=i.altBoundary,g=i.padding,_=i.tether,b=void 0===_||_,v=i.tetherOffset,y=void 0===v?0:v,w=_t(e,{boundary:f,rootBoundary:p,padding:g,altBoundary:m}),E=I(e.placement),A=Z(e.placement),T=!A,C=Q(E),O="x"===C?"y":"x",x=e.modifiersData.popperOffsets,k=e.rects.reference,L=e.rects.popper,S="function"==typeof y?y(Object.assign({},e.rects,{placement:e.placement})):y,D="number"==typeof S?{mainAxis:S,altAxis:S}:Object.assign({mainAxis:0,altAxis:0},S),$=e.modifiersData.offset?e.modifiersData.offset[e.placement]:null,M={x:0,y:0};if(x){if(h){var j,F="y"===C?n:r,H="y"===C?s:o,W="y"===C?"height":"width",z=x[C],R=z+w[F],q=z-w[H],V=b?-L[W]/2:0,Y=A===c?k[W]:L[W],U=A===c?-L[W]:-k[W],G=e.elements.arrow,J=b&&G?B(G):{width:0,height:0},tt=e.modifiersData["arrow#persistent"]?e.modifiersData["arrow#persistent"].padding:{top:0,right:0,bottom:0,left:0},et=tt[F],it=tt[H],nt=X(0,k[W],J[W]),st=T?k[W]/2-V-nt-et-D.mainAxis:Y-nt-et-D.mainAxis,ot=T?-k[W]/2+V+nt+it+D.mainAxis:U+nt+it+D.mainAxis,rt=e.elements.arrow&&K(e.elements.arrow),at=rt?"y"===C?rt.clientTop||0:rt.clientLeft||0:0,lt=null!=(j=null==$?void 0:$[C])?j:0,ct=z+ot-lt,ht=X(b?P(R,z+st-lt-at):R,z,b?N(q,ct):q);x[C]=ht,M[C]=ht-z}if(u){var dt,ut="x"===C?n:r,ft="x"===C?s:o,pt=x[O],mt="y"===O?"height":"width",gt=pt+w[ut],bt=pt-w[ft],vt=-1!==[n,r].indexOf(E),yt=null!=(dt=null==$?void 0:$[O])?dt:0,wt=vt?gt:pt-k[mt]-L[mt]-yt+D.altAxis,Et=vt?pt+k[mt]+L[mt]-yt-D.altAxis:bt,At=b&&vt?function(t,e,i){var n=X(t,e,i);return n>i?i:n}(wt,pt,Et):X(b?wt:gt,pt,b?Et:bt);x[O]=At,M[O]=At-pt}e.modifiersData[a]=M}},requiresIfExists:["offset"]};function Ct(t,e,i){void 0===i&&(i=!1);var n,s,o=S(e),r=S(e)&&function(t){var e=t.getBoundingClientRect(),i=M(e.width)/t.offsetWidth||1,n=M(e.height)/t.offsetHeight||1;return 1!==i||1!==n}(e),a=q(e),l=H(t,r,i),c={scrollLeft:0,scrollTop:0},h={x:0,y:0};return(o||!o&&!i)&&(("body"!==x(e)||dt(a))&&(c=(n=e)!==k(n)&&S(n)?{scrollLeft:(s=n).scrollLeft,scrollTop:s.scrollTop}:ct(n)),S(e)?((h=H(e,!0)).x+=e.clientLeft,h.y+=e.clientTop):a&&(h.x=ht(a))),{x:l.left+c.scrollLeft-h.x,y:l.top+c.scrollTop-h.y,width:l.width,height:l.height}}function Ot(t){var e=new Map,i=new Set,n=[];function s(t){i.add(t.name),[].concat(t.requires||[],t.requiresIfExists||[]).forEach((function(t){if(!i.has(t)){var n=e.get(t);n&&s(n)}})),n.push(t)}return t.forEach((function(t){e.set(t.name,t)})),t.forEach((function(t){i.has(t.name)||s(t)})),n}var xt={placement:"bottom",modifiers:[],strategy:"absolute"};function kt(){for(var t=arguments.length,e=new Array(t),i=0;iIt.has(t)&&It.get(t).get(e)||null,remove(t,e){if(!It.has(t))return;const i=It.get(t);i.delete(e),0===i.size&&It.delete(t)}},Pt="transitionend",Mt=t=>(t&&window.CSS&&window.CSS.escape&&(t=t.replace(/#([^\s"#']+)/g,((t,e)=>`#${CSS.escape(e)}`))),t),jt=t=>{t.dispatchEvent(new Event(Pt))},Ft=t=>!(!t||"object"!=typeof t)&&(void 0!==t.jquery&&(t=t[0]),void 0!==t.nodeType),Ht=t=>Ft(t)?t.jquery?t[0]:t:"string"==typeof t&&t.length>0?document.querySelector(Mt(t)):null,Bt=t=>{if(!Ft(t)||0===t.getClientRects().length)return!1;const e="visible"===getComputedStyle(t).getPropertyValue("visibility"),i=t.closest("details:not([open])");if(!i)return e;if(i!==t){const e=t.closest("summary");if(e&&e.parentNode!==i)return!1;if(null===e)return!1}return e},Wt=t=>!t||t.nodeType!==Node.ELEMENT_NODE||!!t.classList.contains("disabled")||(void 0!==t.disabled?t.disabled:t.hasAttribute("disabled")&&"false"!==t.getAttribute("disabled")),zt=t=>{if(!document.documentElement.attachShadow)return null;if("function"==typeof t.getRootNode){const e=t.getRootNode();return e instanceof ShadowRoot?e:null}return t instanceof ShadowRoot?t:t.parentNode?zt(t.parentNode):null},Rt=()=>{},qt=t=>{t.offsetHeight},Vt=()=>window.jQuery&&!document.body.hasAttribute("data-bs-no-jquery")?window.jQuery:null,Yt=[],Kt=()=>"rtl"===document.documentElement.dir,Qt=t=>{var e;e=()=>{const e=Vt();if(e){const i=t.NAME,n=e.fn[i];e.fn[i]=t.jQueryInterface,e.fn[i].Constructor=t,e.fn[i].noConflict=()=>(e.fn[i]=n,t.jQueryInterface)}},"loading"===document.readyState?(Yt.length||document.addEventListener("DOMContentLoaded",(()=>{for(const t of Yt)t()})),Yt.push(e)):e()},Xt=(t,e=[],i=t)=>"function"==typeof t?t(...e):i,Ut=(t,e,i=!0)=>{if(!i)return void Xt(t);const n=(t=>{if(!t)return 0;let{transitionDuration:e,transitionDelay:i}=window.getComputedStyle(t);const n=Number.parseFloat(e),s=Number.parseFloat(i);return n||s?(e=e.split(",")[0],i=i.split(",")[0],1e3*(Number.parseFloat(e)+Number.parseFloat(i))):0})(e)+5;let s=!1;const o=({target:i})=>{i===e&&(s=!0,e.removeEventListener(Pt,o),Xt(t))};e.addEventListener(Pt,o),setTimeout((()=>{s||jt(e)}),n)},Gt=(t,e,i,n)=>{const s=t.length;let o=t.indexOf(e);return-1===o?!i&&n?t[s-1]:t[0]:(o+=i?1:-1,n&&(o=(o+s)%s),t[Math.max(0,Math.min(o,s-1))])},Jt=/[^.]*(?=\..*)\.|.*/,Zt=/\..*/,te=/::\d+$/,ee={};let ie=1;const ne={mouseenter:"mouseover",mouseleave:"mouseout"},se=new Set(["click","dblclick","mouseup","mousedown","contextmenu","mousewheel","DOMMouseScroll","mouseover","mouseout","mousemove","selectstart","selectend","keydown","keypress","keyup","orientationchange","touchstart","touchmove","touchend","touchcancel","pointerdown","pointermove","pointerup","pointerleave","pointercancel","gesturestart","gesturechange","gestureend","focus","blur","change","reset","select","submit","focusin","focusout","load","unload","beforeunload","resize","move","DOMContentLoaded","readystatechange","error","abort","scroll"]);function oe(t,e){return e&&`${e}::${ie++}`||t.uidEvent||ie++}function re(t){const e=oe(t);return t.uidEvent=e,ee[e]=ee[e]||{},ee[e]}function ae(t,e,i=null){return Object.values(t).find((t=>t.callable===e&&t.delegationSelector===i))}function le(t,e,i){const n="string"==typeof e,s=n?i:e||i;let o=ue(t);return se.has(o)||(o=t),[n,s,o]}function ce(t,e,i,n,s){if("string"!=typeof e||!t)return;let[o,r,a]=le(e,i,n);if(e in ne){const t=t=>function(e){if(!e.relatedTarget||e.relatedTarget!==e.delegateTarget&&!e.delegateTarget.contains(e.relatedTarget))return t.call(this,e)};r=t(r)}const l=re(t),c=l[a]||(l[a]={}),h=ae(c,r,o?i:null);if(h)return void(h.oneOff=h.oneOff&&s);const d=oe(r,e.replace(Jt,"")),u=o?function(t,e,i){return function n(s){const o=t.querySelectorAll(e);for(let{target:r}=s;r&&r!==this;r=r.parentNode)for(const a of o)if(a===r)return pe(s,{delegateTarget:r}),n.oneOff&&fe.off(t,s.type,e,i),i.apply(r,[s])}}(t,i,r):function(t,e){return function i(n){return pe(n,{delegateTarget:t}),i.oneOff&&fe.off(t,n.type,e),e.apply(t,[n])}}(t,r);u.delegationSelector=o?i:null,u.callable=r,u.oneOff=s,u.uidEvent=d,c[d]=u,t.addEventListener(a,u,o)}function he(t,e,i,n,s){const o=ae(e[i],n,s);o&&(t.removeEventListener(i,o,Boolean(s)),delete e[i][o.uidEvent])}function de(t,e,i,n){const s=e[i]||{};for(const[o,r]of Object.entries(s))o.includes(n)&&he(t,e,i,r.callable,r.delegationSelector)}function ue(t){return t=t.replace(Zt,""),ne[t]||t}const fe={on(t,e,i,n){ce(t,e,i,n,!1)},one(t,e,i,n){ce(t,e,i,n,!0)},off(t,e,i,n){if("string"!=typeof e||!t)return;const[s,o,r]=le(e,i,n),a=r!==e,l=re(t),c=l[r]||{},h=e.startsWith(".");if(void 0===o){if(h)for(const i of Object.keys(l))de(t,l,i,e.slice(1));for(const[i,n]of Object.entries(c)){const s=i.replace(te,"");a&&!e.includes(s)||he(t,l,r,n.callable,n.delegationSelector)}}else{if(!Object.keys(c).length)return;he(t,l,r,o,s?i:null)}},trigger(t,e,i){if("string"!=typeof e||!t)return null;const n=Vt();let s=null,o=!0,r=!0,a=!1;e!==ue(e)&&n&&(s=n.Event(e,i),n(t).trigger(s),o=!s.isPropagationStopped(),r=!s.isImmediatePropagationStopped(),a=s.isDefaultPrevented());const l=pe(new Event(e,{bubbles:o,cancelable:!0}),i);return a&&l.preventDefault(),r&&t.dispatchEvent(l),l.defaultPrevented&&s&&s.preventDefault(),l}};function pe(t,e={}){for(const[i,n]of Object.entries(e))try{t[i]=n}catch(e){Object.defineProperty(t,i,{configurable:!0,get:()=>n})}return t}function me(t){if("true"===t)return!0;if("false"===t)return!1;if(t===Number(t).toString())return Number(t);if(""===t||"null"===t)return null;if("string"!=typeof t)return t;try{return JSON.parse(decodeURIComponent(t))}catch(e){return t}}function ge(t){return t.replace(/[A-Z]/g,(t=>`-${t.toLowerCase()}`))}const _e={setDataAttribute(t,e,i){t.setAttribute(`data-bs-${ge(e)}`,i)},removeDataAttribute(t,e){t.removeAttribute(`data-bs-${ge(e)}`)},getDataAttributes(t){if(!t)return{};const e={},i=Object.keys(t.dataset).filter((t=>t.startsWith("bs")&&!t.startsWith("bsConfig")));for(const n of i){let i=n.replace(/^bs/,"");i=i.charAt(0).toLowerCase()+i.slice(1,i.length),e[i]=me(t.dataset[n])}return e},getDataAttribute:(t,e)=>me(t.getAttribute(`data-bs-${ge(e)}`))};class be{static get Default(){return{}}static get DefaultType(){return{}}static get NAME(){throw new Error('You have to implement the static method "NAME", for each component!')}_getConfig(t){return t=this._mergeConfigObj(t),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}_configAfterMerge(t){return t}_mergeConfigObj(t,e){const i=Ft(e)?_e.getDataAttribute(e,"config"):{};return{...this.constructor.Default,..."object"==typeof i?i:{},...Ft(e)?_e.getDataAttributes(e):{},..."object"==typeof t?t:{}}}_typeCheckConfig(t,e=this.constructor.DefaultType){for(const[n,s]of Object.entries(e)){const e=t[n],o=Ft(e)?"element":null==(i=e)?`${i}`:Object.prototype.toString.call(i).match(/\s([a-z]+)/i)[1].toLowerCase();if(!new RegExp(s).test(o))throw new TypeError(`${this.constructor.NAME.toUpperCase()}: Option "${n}" provided type "${o}" but expected type "${s}".`)}var i}}class ve extends be{constructor(t,e){super(),(t=Ht(t))&&(this._element=t,this._config=this._getConfig(e),Nt.set(this._element,this.constructor.DATA_KEY,this))}dispose(){Nt.remove(this._element,this.constructor.DATA_KEY),fe.off(this._element,this.constructor.EVENT_KEY);for(const t of Object.getOwnPropertyNames(this))this[t]=null}_queueCallback(t,e,i=!0){Ut(t,e,i)}_getConfig(t){return t=this._mergeConfigObj(t,this._element),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}static getInstance(t){return Nt.get(Ht(t),this.DATA_KEY)}static getOrCreateInstance(t,e={}){return this.getInstance(t)||new this(t,"object"==typeof e?e:null)}static get VERSION(){return"5.3.3"}static get DATA_KEY(){return`bs.${this.NAME}`}static get EVENT_KEY(){return`.${this.DATA_KEY}`}static eventName(t){return`${t}${this.EVENT_KEY}`}}const ye=t=>{let e=t.getAttribute("data-bs-target");if(!e||"#"===e){let i=t.getAttribute("href");if(!i||!i.includes("#")&&!i.startsWith("."))return null;i.includes("#")&&!i.startsWith("#")&&(i=`#${i.split("#")[1]}`),e=i&&"#"!==i?i.trim():null}return e?e.split(",").map((t=>Mt(t))).join(","):null},we={find:(t,e=document.documentElement)=>[].concat(...Element.prototype.querySelectorAll.call(e,t)),findOne:(t,e=document.documentElement)=>Element.prototype.querySelector.call(e,t),children:(t,e)=>[].concat(...t.children).filter((t=>t.matches(e))),parents(t,e){const i=[];let n=t.parentNode.closest(e);for(;n;)i.push(n),n=n.parentNode.closest(e);return i},prev(t,e){let i=t.previousElementSibling;for(;i;){if(i.matches(e))return[i];i=i.previousElementSibling}return[]},next(t,e){let i=t.nextElementSibling;for(;i;){if(i.matches(e))return[i];i=i.nextElementSibling}return[]},focusableChildren(t){const e=["a","button","input","textarea","select","details","[tabindex]",'[contenteditable="true"]'].map((t=>`${t}:not([tabindex^="-"])`)).join(",");return this.find(e,t).filter((t=>!Wt(t)&&Bt(t)))},getSelectorFromElement(t){const e=ye(t);return e&&we.findOne(e)?e:null},getElementFromSelector(t){const e=ye(t);return e?we.findOne(e):null},getMultipleElementsFromSelector(t){const e=ye(t);return e?we.find(e):[]}},Ee=(t,e="hide")=>{const i=`click.dismiss${t.EVENT_KEY}`,n=t.NAME;fe.on(document,i,`[data-bs-dismiss="${n}"]`,(function(i){if(["A","AREA"].includes(this.tagName)&&i.preventDefault(),Wt(this))return;const s=we.getElementFromSelector(this)||this.closest(`.${n}`);t.getOrCreateInstance(s)[e]()}))},Ae=".bs.alert",Te=`close${Ae}`,Ce=`closed${Ae}`;class Oe extends ve{static get NAME(){return"alert"}close(){if(fe.trigger(this._element,Te).defaultPrevented)return;this._element.classList.remove("show");const t=this._element.classList.contains("fade");this._queueCallback((()=>this._destroyElement()),this._element,t)}_destroyElement(){this._element.remove(),fe.trigger(this._element,Ce),this.dispose()}static jQueryInterface(t){return this.each((function(){const e=Oe.getOrCreateInstance(this);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}Ee(Oe,"close"),Qt(Oe);const xe='[data-bs-toggle="button"]';class ke extends ve{static get NAME(){return"button"}toggle(){this._element.setAttribute("aria-pressed",this._element.classList.toggle("active"))}static jQueryInterface(t){return this.each((function(){const e=ke.getOrCreateInstance(this);"toggle"===t&&e[t]()}))}}fe.on(document,"click.bs.button.data-api",xe,(t=>{t.preventDefault();const e=t.target.closest(xe);ke.getOrCreateInstance(e).toggle()})),Qt(ke);const Le=".bs.swipe",Se=`touchstart${Le}`,De=`touchmove${Le}`,$e=`touchend${Le}`,Ie=`pointerdown${Le}`,Ne=`pointerup${Le}`,Pe={endCallback:null,leftCallback:null,rightCallback:null},Me={endCallback:"(function|null)",leftCallback:"(function|null)",rightCallback:"(function|null)"};class je extends be{constructor(t,e){super(),this._element=t,t&&je.isSupported()&&(this._config=this._getConfig(e),this._deltaX=0,this._supportPointerEvents=Boolean(window.PointerEvent),this._initEvents())}static get Default(){return Pe}static get DefaultType(){return Me}static get NAME(){return"swipe"}dispose(){fe.off(this._element,Le)}_start(t){this._supportPointerEvents?this._eventIsPointerPenTouch(t)&&(this._deltaX=t.clientX):this._deltaX=t.touches[0].clientX}_end(t){this._eventIsPointerPenTouch(t)&&(this._deltaX=t.clientX-this._deltaX),this._handleSwipe(),Xt(this._config.endCallback)}_move(t){this._deltaX=t.touches&&t.touches.length>1?0:t.touches[0].clientX-this._deltaX}_handleSwipe(){const t=Math.abs(this._deltaX);if(t<=40)return;const e=t/this._deltaX;this._deltaX=0,e&&Xt(e>0?this._config.rightCallback:this._config.leftCallback)}_initEvents(){this._supportPointerEvents?(fe.on(this._element,Ie,(t=>this._start(t))),fe.on(this._element,Ne,(t=>this._end(t))),this._element.classList.add("pointer-event")):(fe.on(this._element,Se,(t=>this._start(t))),fe.on(this._element,De,(t=>this._move(t))),fe.on(this._element,$e,(t=>this._end(t))))}_eventIsPointerPenTouch(t){return this._supportPointerEvents&&("pen"===t.pointerType||"touch"===t.pointerType)}static isSupported(){return"ontouchstart"in document.documentElement||navigator.maxTouchPoints>0}}const Fe=".bs.carousel",He=".data-api",Be="ArrowLeft",We="ArrowRight",ze="next",Re="prev",qe="left",Ve="right",Ye=`slide${Fe}`,Ke=`slid${Fe}`,Qe=`keydown${Fe}`,Xe=`mouseenter${Fe}`,Ue=`mouseleave${Fe}`,Ge=`dragstart${Fe}`,Je=`load${Fe}${He}`,Ze=`click${Fe}${He}`,ti="carousel",ei="active",ii=".active",ni=".carousel-item",si=ii+ni,oi={[Be]:Ve,[We]:qe},ri={interval:5e3,keyboard:!0,pause:"hover",ride:!1,touch:!0,wrap:!0},ai={interval:"(number|boolean)",keyboard:"boolean",pause:"(string|boolean)",ride:"(boolean|string)",touch:"boolean",wrap:"boolean"};class li extends ve{constructor(t,e){super(t,e),this._interval=null,this._activeElement=null,this._isSliding=!1,this.touchTimeout=null,this._swipeHelper=null,this._indicatorsElement=we.findOne(".carousel-indicators",this._element),this._addEventListeners(),this._config.ride===ti&&this.cycle()}static get Default(){return ri}static get DefaultType(){return ai}static get NAME(){return"carousel"}next(){this._slide(ze)}nextWhenVisible(){!document.hidden&&Bt(this._element)&&this.next()}prev(){this._slide(Re)}pause(){this._isSliding&&jt(this._element),this._clearInterval()}cycle(){this._clearInterval(),this._updateInterval(),this._interval=setInterval((()=>this.nextWhenVisible()),this._config.interval)}_maybeEnableCycle(){this._config.ride&&(this._isSliding?fe.one(this._element,Ke,(()=>this.cycle())):this.cycle())}to(t){const e=this._getItems();if(t>e.length-1||t<0)return;if(this._isSliding)return void fe.one(this._element,Ke,(()=>this.to(t)));const i=this._getItemIndex(this._getActive());if(i===t)return;const n=t>i?ze:Re;this._slide(n,e[t])}dispose(){this._swipeHelper&&this._swipeHelper.dispose(),super.dispose()}_configAfterMerge(t){return t.defaultInterval=t.interval,t}_addEventListeners(){this._config.keyboard&&fe.on(this._element,Qe,(t=>this._keydown(t))),"hover"===this._config.pause&&(fe.on(this._element,Xe,(()=>this.pause())),fe.on(this._element,Ue,(()=>this._maybeEnableCycle()))),this._config.touch&&je.isSupported()&&this._addTouchEventListeners()}_addTouchEventListeners(){for(const t of we.find(".carousel-item img",this._element))fe.on(t,Ge,(t=>t.preventDefault()));const t={leftCallback:()=>this._slide(this._directionToOrder(qe)),rightCallback:()=>this._slide(this._directionToOrder(Ve)),endCallback:()=>{"hover"===this._config.pause&&(this.pause(),this.touchTimeout&&clearTimeout(this.touchTimeout),this.touchTimeout=setTimeout((()=>this._maybeEnableCycle()),500+this._config.interval))}};this._swipeHelper=new je(this._element,t)}_keydown(t){if(/input|textarea/i.test(t.target.tagName))return;const e=oi[t.key];e&&(t.preventDefault(),this._slide(this._directionToOrder(e)))}_getItemIndex(t){return this._getItems().indexOf(t)}_setActiveIndicatorElement(t){if(!this._indicatorsElement)return;const e=we.findOne(ii,this._indicatorsElement);e.classList.remove(ei),e.removeAttribute("aria-current");const i=we.findOne(`[data-bs-slide-to="${t}"]`,this._indicatorsElement);i&&(i.classList.add(ei),i.setAttribute("aria-current","true"))}_updateInterval(){const t=this._activeElement||this._getActive();if(!t)return;const e=Number.parseInt(t.getAttribute("data-bs-interval"),10);this._config.interval=e||this._config.defaultInterval}_slide(t,e=null){if(this._isSliding)return;const i=this._getActive(),n=t===ze,s=e||Gt(this._getItems(),i,n,this._config.wrap);if(s===i)return;const o=this._getItemIndex(s),r=e=>fe.trigger(this._element,e,{relatedTarget:s,direction:this._orderToDirection(t),from:this._getItemIndex(i),to:o});if(r(Ye).defaultPrevented)return;if(!i||!s)return;const a=Boolean(this._interval);this.pause(),this._isSliding=!0,this._setActiveIndicatorElement(o),this._activeElement=s;const l=n?"carousel-item-start":"carousel-item-end",c=n?"carousel-item-next":"carousel-item-prev";s.classList.add(c),qt(s),i.classList.add(l),s.classList.add(l),this._queueCallback((()=>{s.classList.remove(l,c),s.classList.add(ei),i.classList.remove(ei,c,l),this._isSliding=!1,r(Ke)}),i,this._isAnimated()),a&&this.cycle()}_isAnimated(){return this._element.classList.contains("slide")}_getActive(){return we.findOne(si,this._element)}_getItems(){return we.find(ni,this._element)}_clearInterval(){this._interval&&(clearInterval(this._interval),this._interval=null)}_directionToOrder(t){return Kt()?t===qe?Re:ze:t===qe?ze:Re}_orderToDirection(t){return Kt()?t===Re?qe:Ve:t===Re?Ve:qe}static jQueryInterface(t){return this.each((function(){const e=li.getOrCreateInstance(this,t);if("number"!=typeof t){if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t]()}}else e.to(t)}))}}fe.on(document,Ze,"[data-bs-slide], [data-bs-slide-to]",(function(t){const e=we.getElementFromSelector(this);if(!e||!e.classList.contains(ti))return;t.preventDefault();const i=li.getOrCreateInstance(e),n=this.getAttribute("data-bs-slide-to");return n?(i.to(n),void i._maybeEnableCycle()):"next"===_e.getDataAttribute(this,"slide")?(i.next(),void i._maybeEnableCycle()):(i.prev(),void i._maybeEnableCycle())})),fe.on(window,Je,(()=>{const t=we.find('[data-bs-ride="carousel"]');for(const e of t)li.getOrCreateInstance(e)})),Qt(li);const ci=".bs.collapse",hi=`show${ci}`,di=`shown${ci}`,ui=`hide${ci}`,fi=`hidden${ci}`,pi=`click${ci}.data-api`,mi="show",gi="collapse",_i="collapsing",bi=`:scope .${gi} .${gi}`,vi='[data-bs-toggle="collapse"]',yi={parent:null,toggle:!0},wi={parent:"(null|element)",toggle:"boolean"};class Ei extends ve{constructor(t,e){super(t,e),this._isTransitioning=!1,this._triggerArray=[];const i=we.find(vi);for(const t of i){const e=we.getSelectorFromElement(t),i=we.find(e).filter((t=>t===this._element));null!==e&&i.length&&this._triggerArray.push(t)}this._initializeChildren(),this._config.parent||this._addAriaAndCollapsedClass(this._triggerArray,this._isShown()),this._config.toggle&&this.toggle()}static get Default(){return yi}static get DefaultType(){return wi}static get NAME(){return"collapse"}toggle(){this._isShown()?this.hide():this.show()}show(){if(this._isTransitioning||this._isShown())return;let t=[];if(this._config.parent&&(t=this._getFirstLevelChildren(".collapse.show, .collapse.collapsing").filter((t=>t!==this._element)).map((t=>Ei.getOrCreateInstance(t,{toggle:!1})))),t.length&&t[0]._isTransitioning)return;if(fe.trigger(this._element,hi).defaultPrevented)return;for(const e of t)e.hide();const e=this._getDimension();this._element.classList.remove(gi),this._element.classList.add(_i),this._element.style[e]=0,this._addAriaAndCollapsedClass(this._triggerArray,!0),this._isTransitioning=!0;const i=`scroll${e[0].toUpperCase()+e.slice(1)}`;this._queueCallback((()=>{this._isTransitioning=!1,this._element.classList.remove(_i),this._element.classList.add(gi,mi),this._element.style[e]="",fe.trigger(this._element,di)}),this._element,!0),this._element.style[e]=`${this._element[i]}px`}hide(){if(this._isTransitioning||!this._isShown())return;if(fe.trigger(this._element,ui).defaultPrevented)return;const t=this._getDimension();this._element.style[t]=`${this._element.getBoundingClientRect()[t]}px`,qt(this._element),this._element.classList.add(_i),this._element.classList.remove(gi,mi);for(const t of this._triggerArray){const e=we.getElementFromSelector(t);e&&!this._isShown(e)&&this._addAriaAndCollapsedClass([t],!1)}this._isTransitioning=!0,this._element.style[t]="",this._queueCallback((()=>{this._isTransitioning=!1,this._element.classList.remove(_i),this._element.classList.add(gi),fe.trigger(this._element,fi)}),this._element,!0)}_isShown(t=this._element){return t.classList.contains(mi)}_configAfterMerge(t){return t.toggle=Boolean(t.toggle),t.parent=Ht(t.parent),t}_getDimension(){return this._element.classList.contains("collapse-horizontal")?"width":"height"}_initializeChildren(){if(!this._config.parent)return;const t=this._getFirstLevelChildren(vi);for(const e of t){const t=we.getElementFromSelector(e);t&&this._addAriaAndCollapsedClass([e],this._isShown(t))}}_getFirstLevelChildren(t){const e=we.find(bi,this._config.parent);return we.find(t,this._config.parent).filter((t=>!e.includes(t)))}_addAriaAndCollapsedClass(t,e){if(t.length)for(const i of t)i.classList.toggle("collapsed",!e),i.setAttribute("aria-expanded",e)}static jQueryInterface(t){const e={};return"string"==typeof t&&/show|hide/.test(t)&&(e.toggle=!1),this.each((function(){const i=Ei.getOrCreateInstance(this,e);if("string"==typeof t){if(void 0===i[t])throw new TypeError(`No method named "${t}"`);i[t]()}}))}}fe.on(document,pi,vi,(function(t){("A"===t.target.tagName||t.delegateTarget&&"A"===t.delegateTarget.tagName)&&t.preventDefault();for(const t of we.getMultipleElementsFromSelector(this))Ei.getOrCreateInstance(t,{toggle:!1}).toggle()})),Qt(Ei);const Ai="dropdown",Ti=".bs.dropdown",Ci=".data-api",Oi="ArrowUp",xi="ArrowDown",ki=`hide${Ti}`,Li=`hidden${Ti}`,Si=`show${Ti}`,Di=`shown${Ti}`,$i=`click${Ti}${Ci}`,Ii=`keydown${Ti}${Ci}`,Ni=`keyup${Ti}${Ci}`,Pi="show",Mi='[data-bs-toggle="dropdown"]:not(.disabled):not(:disabled)',ji=`${Mi}.${Pi}`,Fi=".dropdown-menu",Hi=Kt()?"top-end":"top-start",Bi=Kt()?"top-start":"top-end",Wi=Kt()?"bottom-end":"bottom-start",zi=Kt()?"bottom-start":"bottom-end",Ri=Kt()?"left-start":"right-start",qi=Kt()?"right-start":"left-start",Vi={autoClose:!0,boundary:"clippingParents",display:"dynamic",offset:[0,2],popperConfig:null,reference:"toggle"},Yi={autoClose:"(boolean|string)",boundary:"(string|element)",display:"string",offset:"(array|string|function)",popperConfig:"(null|object|function)",reference:"(string|element|object)"};class Ki extends ve{constructor(t,e){super(t,e),this._popper=null,this._parent=this._element.parentNode,this._menu=we.next(this._element,Fi)[0]||we.prev(this._element,Fi)[0]||we.findOne(Fi,this._parent),this._inNavbar=this._detectNavbar()}static get Default(){return Vi}static get DefaultType(){return Yi}static get NAME(){return Ai}toggle(){return this._isShown()?this.hide():this.show()}show(){if(Wt(this._element)||this._isShown())return;const t={relatedTarget:this._element};if(!fe.trigger(this._element,Si,t).defaultPrevented){if(this._createPopper(),"ontouchstart"in document.documentElement&&!this._parent.closest(".navbar-nav"))for(const t of[].concat(...document.body.children))fe.on(t,"mouseover",Rt);this._element.focus(),this._element.setAttribute("aria-expanded",!0),this._menu.classList.add(Pi),this._element.classList.add(Pi),fe.trigger(this._element,Di,t)}}hide(){if(Wt(this._element)||!this._isShown())return;const t={relatedTarget:this._element};this._completeHide(t)}dispose(){this._popper&&this._popper.destroy(),super.dispose()}update(){this._inNavbar=this._detectNavbar(),this._popper&&this._popper.update()}_completeHide(t){if(!fe.trigger(this._element,ki,t).defaultPrevented){if("ontouchstart"in document.documentElement)for(const t of[].concat(...document.body.children))fe.off(t,"mouseover",Rt);this._popper&&this._popper.destroy(),this._menu.classList.remove(Pi),this._element.classList.remove(Pi),this._element.setAttribute("aria-expanded","false"),_e.removeDataAttribute(this._menu,"popper"),fe.trigger(this._element,Li,t)}}_getConfig(t){if("object"==typeof(t=super._getConfig(t)).reference&&!Ft(t.reference)&&"function"!=typeof t.reference.getBoundingClientRect)throw new TypeError(`${Ai.toUpperCase()}: Option "reference" provided type "object" without a required "getBoundingClientRect" method.`);return t}_createPopper(){if(void 0===e)throw new TypeError("Bootstrap's dropdowns require Popper (https://popper.js.org)");let t=this._element;"parent"===this._config.reference?t=this._parent:Ft(this._config.reference)?t=Ht(this._config.reference):"object"==typeof this._config.reference&&(t=this._config.reference);const i=this._getPopperConfig();this._popper=Dt(t,this._menu,i)}_isShown(){return this._menu.classList.contains(Pi)}_getPlacement(){const t=this._parent;if(t.classList.contains("dropend"))return Ri;if(t.classList.contains("dropstart"))return qi;if(t.classList.contains("dropup-center"))return"top";if(t.classList.contains("dropdown-center"))return"bottom";const e="end"===getComputedStyle(this._menu).getPropertyValue("--bs-position").trim();return t.classList.contains("dropup")?e?Bi:Hi:e?zi:Wi}_detectNavbar(){return null!==this._element.closest(".navbar")}_getOffset(){const{offset:t}=this._config;return"string"==typeof t?t.split(",").map((t=>Number.parseInt(t,10))):"function"==typeof t?e=>t(e,this._element):t}_getPopperConfig(){const t={placement:this._getPlacement(),modifiers:[{name:"preventOverflow",options:{boundary:this._config.boundary}},{name:"offset",options:{offset:this._getOffset()}}]};return(this._inNavbar||"static"===this._config.display)&&(_e.setDataAttribute(this._menu,"popper","static"),t.modifiers=[{name:"applyStyles",enabled:!1}]),{...t,...Xt(this._config.popperConfig,[t])}}_selectMenuItem({key:t,target:e}){const i=we.find(".dropdown-menu .dropdown-item:not(.disabled):not(:disabled)",this._menu).filter((t=>Bt(t)));i.length&&Gt(i,e,t===xi,!i.includes(e)).focus()}static jQueryInterface(t){return this.each((function(){const e=Ki.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}static clearMenus(t){if(2===t.button||"keyup"===t.type&&"Tab"!==t.key)return;const e=we.find(ji);for(const i of e){const e=Ki.getInstance(i);if(!e||!1===e._config.autoClose)continue;const n=t.composedPath(),s=n.includes(e._menu);if(n.includes(e._element)||"inside"===e._config.autoClose&&!s||"outside"===e._config.autoClose&&s)continue;if(e._menu.contains(t.target)&&("keyup"===t.type&&"Tab"===t.key||/input|select|option|textarea|form/i.test(t.target.tagName)))continue;const o={relatedTarget:e._element};"click"===t.type&&(o.clickEvent=t),e._completeHide(o)}}static dataApiKeydownHandler(t){const e=/input|textarea/i.test(t.target.tagName),i="Escape"===t.key,n=[Oi,xi].includes(t.key);if(!n&&!i)return;if(e&&!i)return;t.preventDefault();const s=this.matches(Mi)?this:we.prev(this,Mi)[0]||we.next(this,Mi)[0]||we.findOne(Mi,t.delegateTarget.parentNode),o=Ki.getOrCreateInstance(s);if(n)return t.stopPropagation(),o.show(),void o._selectMenuItem(t);o._isShown()&&(t.stopPropagation(),o.hide(),s.focus())}}fe.on(document,Ii,Mi,Ki.dataApiKeydownHandler),fe.on(document,Ii,Fi,Ki.dataApiKeydownHandler),fe.on(document,$i,Ki.clearMenus),fe.on(document,Ni,Ki.clearMenus),fe.on(document,$i,Mi,(function(t){t.preventDefault(),Ki.getOrCreateInstance(this).toggle()})),Qt(Ki);const Qi="backdrop",Xi="show",Ui=`mousedown.bs.${Qi}`,Gi={className:"modal-backdrop",clickCallback:null,isAnimated:!1,isVisible:!0,rootElement:"body"},Ji={className:"string",clickCallback:"(function|null)",isAnimated:"boolean",isVisible:"boolean",rootElement:"(element|string)"};class Zi extends be{constructor(t){super(),this._config=this._getConfig(t),this._isAppended=!1,this._element=null}static get Default(){return Gi}static get DefaultType(){return Ji}static get NAME(){return Qi}show(t){if(!this._config.isVisible)return void Xt(t);this._append();const e=this._getElement();this._config.isAnimated&&qt(e),e.classList.add(Xi),this._emulateAnimation((()=>{Xt(t)}))}hide(t){this._config.isVisible?(this._getElement().classList.remove(Xi),this._emulateAnimation((()=>{this.dispose(),Xt(t)}))):Xt(t)}dispose(){this._isAppended&&(fe.off(this._element,Ui),this._element.remove(),this._isAppended=!1)}_getElement(){if(!this._element){const t=document.createElement("div");t.className=this._config.className,this._config.isAnimated&&t.classList.add("fade"),this._element=t}return this._element}_configAfterMerge(t){return t.rootElement=Ht(t.rootElement),t}_append(){if(this._isAppended)return;const t=this._getElement();this._config.rootElement.append(t),fe.on(t,Ui,(()=>{Xt(this._config.clickCallback)})),this._isAppended=!0}_emulateAnimation(t){Ut(t,this._getElement(),this._config.isAnimated)}}const tn=".bs.focustrap",en=`focusin${tn}`,nn=`keydown.tab${tn}`,sn="backward",on={autofocus:!0,trapElement:null},rn={autofocus:"boolean",trapElement:"element"};class an extends be{constructor(t){super(),this._config=this._getConfig(t),this._isActive=!1,this._lastTabNavDirection=null}static get Default(){return on}static get DefaultType(){return rn}static get NAME(){return"focustrap"}activate(){this._isActive||(this._config.autofocus&&this._config.trapElement.focus(),fe.off(document,tn),fe.on(document,en,(t=>this._handleFocusin(t))),fe.on(document,nn,(t=>this._handleKeydown(t))),this._isActive=!0)}deactivate(){this._isActive&&(this._isActive=!1,fe.off(document,tn))}_handleFocusin(t){const{trapElement:e}=this._config;if(t.target===document||t.target===e||e.contains(t.target))return;const i=we.focusableChildren(e);0===i.length?e.focus():this._lastTabNavDirection===sn?i[i.length-1].focus():i[0].focus()}_handleKeydown(t){"Tab"===t.key&&(this._lastTabNavDirection=t.shiftKey?sn:"forward")}}const ln=".fixed-top, .fixed-bottom, .is-fixed, .sticky-top",cn=".sticky-top",hn="padding-right",dn="margin-right";class un{constructor(){this._element=document.body}getWidth(){const t=document.documentElement.clientWidth;return Math.abs(window.innerWidth-t)}hide(){const t=this.getWidth();this._disableOverFlow(),this._setElementAttributes(this._element,hn,(e=>e+t)),this._setElementAttributes(ln,hn,(e=>e+t)),this._setElementAttributes(cn,dn,(e=>e-t))}reset(){this._resetElementAttributes(this._element,"overflow"),this._resetElementAttributes(this._element,hn),this._resetElementAttributes(ln,hn),this._resetElementAttributes(cn,dn)}isOverflowing(){return this.getWidth()>0}_disableOverFlow(){this._saveInitialAttribute(this._element,"overflow"),this._element.style.overflow="hidden"}_setElementAttributes(t,e,i){const n=this.getWidth();this._applyManipulationCallback(t,(t=>{if(t!==this._element&&window.innerWidth>t.clientWidth+n)return;this._saveInitialAttribute(t,e);const s=window.getComputedStyle(t).getPropertyValue(e);t.style.setProperty(e,`${i(Number.parseFloat(s))}px`)}))}_saveInitialAttribute(t,e){const i=t.style.getPropertyValue(e);i&&_e.setDataAttribute(t,e,i)}_resetElementAttributes(t,e){this._applyManipulationCallback(t,(t=>{const i=_e.getDataAttribute(t,e);null!==i?(_e.removeDataAttribute(t,e),t.style.setProperty(e,i)):t.style.removeProperty(e)}))}_applyManipulationCallback(t,e){if(Ft(t))e(t);else for(const i of we.find(t,this._element))e(i)}}const fn=".bs.modal",pn=`hide${fn}`,mn=`hidePrevented${fn}`,gn=`hidden${fn}`,_n=`show${fn}`,bn=`shown${fn}`,vn=`resize${fn}`,yn=`click.dismiss${fn}`,wn=`mousedown.dismiss${fn}`,En=`keydown.dismiss${fn}`,An=`click${fn}.data-api`,Tn="modal-open",Cn="show",On="modal-static",xn={backdrop:!0,focus:!0,keyboard:!0},kn={backdrop:"(boolean|string)",focus:"boolean",keyboard:"boolean"};class Ln extends ve{constructor(t,e){super(t,e),this._dialog=we.findOne(".modal-dialog",this._element),this._backdrop=this._initializeBackDrop(),this._focustrap=this._initializeFocusTrap(),this._isShown=!1,this._isTransitioning=!1,this._scrollBar=new un,this._addEventListeners()}static get Default(){return xn}static get DefaultType(){return kn}static get NAME(){return"modal"}toggle(t){return this._isShown?this.hide():this.show(t)}show(t){this._isShown||this._isTransitioning||fe.trigger(this._element,_n,{relatedTarget:t}).defaultPrevented||(this._isShown=!0,this._isTransitioning=!0,this._scrollBar.hide(),document.body.classList.add(Tn),this._adjustDialog(),this._backdrop.show((()=>this._showElement(t))))}hide(){this._isShown&&!this._isTransitioning&&(fe.trigger(this._element,pn).defaultPrevented||(this._isShown=!1,this._isTransitioning=!0,this._focustrap.deactivate(),this._element.classList.remove(Cn),this._queueCallback((()=>this._hideModal()),this._element,this._isAnimated())))}dispose(){fe.off(window,fn),fe.off(this._dialog,fn),this._backdrop.dispose(),this._focustrap.deactivate(),super.dispose()}handleUpdate(){this._adjustDialog()}_initializeBackDrop(){return new Zi({isVisible:Boolean(this._config.backdrop),isAnimated:this._isAnimated()})}_initializeFocusTrap(){return new an({trapElement:this._element})}_showElement(t){document.body.contains(this._element)||document.body.append(this._element),this._element.style.display="block",this._element.removeAttribute("aria-hidden"),this._element.setAttribute("aria-modal",!0),this._element.setAttribute("role","dialog"),this._element.scrollTop=0;const e=we.findOne(".modal-body",this._dialog);e&&(e.scrollTop=0),qt(this._element),this._element.classList.add(Cn),this._queueCallback((()=>{this._config.focus&&this._focustrap.activate(),this._isTransitioning=!1,fe.trigger(this._element,bn,{relatedTarget:t})}),this._dialog,this._isAnimated())}_addEventListeners(){fe.on(this._element,En,(t=>{"Escape"===t.key&&(this._config.keyboard?this.hide():this._triggerBackdropTransition())})),fe.on(window,vn,(()=>{this._isShown&&!this._isTransitioning&&this._adjustDialog()})),fe.on(this._element,wn,(t=>{fe.one(this._element,yn,(e=>{this._element===t.target&&this._element===e.target&&("static"!==this._config.backdrop?this._config.backdrop&&this.hide():this._triggerBackdropTransition())}))}))}_hideModal(){this._element.style.display="none",this._element.setAttribute("aria-hidden",!0),this._element.removeAttribute("aria-modal"),this._element.removeAttribute("role"),this._isTransitioning=!1,this._backdrop.hide((()=>{document.body.classList.remove(Tn),this._resetAdjustments(),this._scrollBar.reset(),fe.trigger(this._element,gn)}))}_isAnimated(){return this._element.classList.contains("fade")}_triggerBackdropTransition(){if(fe.trigger(this._element,mn).defaultPrevented)return;const t=this._element.scrollHeight>document.documentElement.clientHeight,e=this._element.style.overflowY;"hidden"===e||this._element.classList.contains(On)||(t||(this._element.style.overflowY="hidden"),this._element.classList.add(On),this._queueCallback((()=>{this._element.classList.remove(On),this._queueCallback((()=>{this._element.style.overflowY=e}),this._dialog)}),this._dialog),this._element.focus())}_adjustDialog(){const t=this._element.scrollHeight>document.documentElement.clientHeight,e=this._scrollBar.getWidth(),i=e>0;if(i&&!t){const t=Kt()?"paddingLeft":"paddingRight";this._element.style[t]=`${e}px`}if(!i&&t){const t=Kt()?"paddingRight":"paddingLeft";this._element.style[t]=`${e}px`}}_resetAdjustments(){this._element.style.paddingLeft="",this._element.style.paddingRight=""}static jQueryInterface(t,e){return this.each((function(){const i=Ln.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===i[t])throw new TypeError(`No method named "${t}"`);i[t](e)}}))}}fe.on(document,An,'[data-bs-toggle="modal"]',(function(t){const e=we.getElementFromSelector(this);["A","AREA"].includes(this.tagName)&&t.preventDefault(),fe.one(e,_n,(t=>{t.defaultPrevented||fe.one(e,gn,(()=>{Bt(this)&&this.focus()}))}));const i=we.findOne(".modal.show");i&&Ln.getInstance(i).hide(),Ln.getOrCreateInstance(e).toggle(this)})),Ee(Ln),Qt(Ln);const Sn=".bs.offcanvas",Dn=".data-api",$n=`load${Sn}${Dn}`,In="show",Nn="showing",Pn="hiding",Mn=".offcanvas.show",jn=`show${Sn}`,Fn=`shown${Sn}`,Hn=`hide${Sn}`,Bn=`hidePrevented${Sn}`,Wn=`hidden${Sn}`,zn=`resize${Sn}`,Rn=`click${Sn}${Dn}`,qn=`keydown.dismiss${Sn}`,Vn={backdrop:!0,keyboard:!0,scroll:!1},Yn={backdrop:"(boolean|string)",keyboard:"boolean",scroll:"boolean"};class Kn extends ve{constructor(t,e){super(t,e),this._isShown=!1,this._backdrop=this._initializeBackDrop(),this._focustrap=this._initializeFocusTrap(),this._addEventListeners()}static get Default(){return Vn}static get DefaultType(){return Yn}static get NAME(){return"offcanvas"}toggle(t){return this._isShown?this.hide():this.show(t)}show(t){this._isShown||fe.trigger(this._element,jn,{relatedTarget:t}).defaultPrevented||(this._isShown=!0,this._backdrop.show(),this._config.scroll||(new un).hide(),this._element.setAttribute("aria-modal",!0),this._element.setAttribute("role","dialog"),this._element.classList.add(Nn),this._queueCallback((()=>{this._config.scroll&&!this._config.backdrop||this._focustrap.activate(),this._element.classList.add(In),this._element.classList.remove(Nn),fe.trigger(this._element,Fn,{relatedTarget:t})}),this._element,!0))}hide(){this._isShown&&(fe.trigger(this._element,Hn).defaultPrevented||(this._focustrap.deactivate(),this._element.blur(),this._isShown=!1,this._element.classList.add(Pn),this._backdrop.hide(),this._queueCallback((()=>{this._element.classList.remove(In,Pn),this._element.removeAttribute("aria-modal"),this._element.removeAttribute("role"),this._config.scroll||(new un).reset(),fe.trigger(this._element,Wn)}),this._element,!0)))}dispose(){this._backdrop.dispose(),this._focustrap.deactivate(),super.dispose()}_initializeBackDrop(){const t=Boolean(this._config.backdrop);return new Zi({className:"offcanvas-backdrop",isVisible:t,isAnimated:!0,rootElement:this._element.parentNode,clickCallback:t?()=>{"static"!==this._config.backdrop?this.hide():fe.trigger(this._element,Bn)}:null})}_initializeFocusTrap(){return new an({trapElement:this._element})}_addEventListeners(){fe.on(this._element,qn,(t=>{"Escape"===t.key&&(this._config.keyboard?this.hide():fe.trigger(this._element,Bn))}))}static jQueryInterface(t){return this.each((function(){const e=Kn.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}fe.on(document,Rn,'[data-bs-toggle="offcanvas"]',(function(t){const e=we.getElementFromSelector(this);if(["A","AREA"].includes(this.tagName)&&t.preventDefault(),Wt(this))return;fe.one(e,Wn,(()=>{Bt(this)&&this.focus()}));const i=we.findOne(Mn);i&&i!==e&&Kn.getInstance(i).hide(),Kn.getOrCreateInstance(e).toggle(this)})),fe.on(window,$n,(()=>{for(const t of we.find(Mn))Kn.getOrCreateInstance(t).show()})),fe.on(window,zn,(()=>{for(const t of we.find("[aria-modal][class*=show][class*=offcanvas-]"))"fixed"!==getComputedStyle(t).position&&Kn.getOrCreateInstance(t).hide()})),Ee(Kn),Qt(Kn);const Qn={"*":["class","dir","id","lang","role",/^aria-[\w-]*$/i],a:["target","href","title","rel"],area:[],b:[],br:[],col:[],code:[],dd:[],div:[],dl:[],dt:[],em:[],hr:[],h1:[],h2:[],h3:[],h4:[],h5:[],h6:[],i:[],img:["src","srcset","alt","title","width","height"],li:[],ol:[],p:[],pre:[],s:[],small:[],span:[],sub:[],sup:[],strong:[],u:[],ul:[]},Xn=new Set(["background","cite","href","itemtype","longdesc","poster","src","xlink:href"]),Un=/^(?!javascript:)(?:[a-z0-9+.-]+:|[^&:/?#]*(?:[/?#]|$))/i,Gn=(t,e)=>{const i=t.nodeName.toLowerCase();return e.includes(i)?!Xn.has(i)||Boolean(Un.test(t.nodeValue)):e.filter((t=>t instanceof RegExp)).some((t=>t.test(i)))},Jn={allowList:Qn,content:{},extraClass:"",html:!1,sanitize:!0,sanitizeFn:null,template:"

"},Zn={allowList:"object",content:"object",extraClass:"(string|function)",html:"boolean",sanitize:"boolean",sanitizeFn:"(null|function)",template:"string"},ts={entry:"(string|element|function|null)",selector:"(string|element)"};class es extends be{constructor(t){super(),this._config=this._getConfig(t)}static get Default(){return Jn}static get DefaultType(){return Zn}static get NAME(){return"TemplateFactory"}getContent(){return Object.values(this._config.content).map((t=>this._resolvePossibleFunction(t))).filter(Boolean)}hasContent(){return this.getContent().length>0}changeContent(t){return this._checkContent(t),this._config.content={...this._config.content,...t},this}toHtml(){const t=document.createElement("div");t.innerHTML=this._maybeSanitize(this._config.template);for(const[e,i]of Object.entries(this._config.content))this._setContent(t,i,e);const e=t.children[0],i=this._resolvePossibleFunction(this._config.extraClass);return i&&e.classList.add(...i.split(" ")),e}_typeCheckConfig(t){super._typeCheckConfig(t),this._checkContent(t.content)}_checkContent(t){for(const[e,i]of Object.entries(t))super._typeCheckConfig({selector:e,entry:i},ts)}_setContent(t,e,i){const n=we.findOne(i,t);n&&((e=this._resolvePossibleFunction(e))?Ft(e)?this._putElementInTemplate(Ht(e),n):this._config.html?n.innerHTML=this._maybeSanitize(e):n.textContent=e:n.remove())}_maybeSanitize(t){return this._config.sanitize?function(t,e,i){if(!t.length)return t;if(i&&"function"==typeof i)return i(t);const n=(new window.DOMParser).parseFromString(t,"text/html"),s=[].concat(...n.body.querySelectorAll("*"));for(const t of s){const i=t.nodeName.toLowerCase();if(!Object.keys(e).includes(i)){t.remove();continue}const n=[].concat(...t.attributes),s=[].concat(e["*"]||[],e[i]||[]);for(const e of n)Gn(e,s)||t.removeAttribute(e.nodeName)}return n.body.innerHTML}(t,this._config.allowList,this._config.sanitizeFn):t}_resolvePossibleFunction(t){return Xt(t,[this])}_putElementInTemplate(t,e){if(this._config.html)return e.innerHTML="",void e.append(t);e.textContent=t.textContent}}const is=new Set(["sanitize","allowList","sanitizeFn"]),ns="fade",ss="show",os=".tooltip-inner",rs=".modal",as="hide.bs.modal",ls="hover",cs="focus",hs={AUTO:"auto",TOP:"top",RIGHT:Kt()?"left":"right",BOTTOM:"bottom",LEFT:Kt()?"right":"left"},ds={allowList:Qn,animation:!0,boundary:"clippingParents",container:!1,customClass:"",delay:0,fallbackPlacements:["top","right","bottom","left"],html:!1,offset:[0,6],placement:"top",popperConfig:null,sanitize:!0,sanitizeFn:null,selector:!1,template:'',title:"",trigger:"hover focus"},us={allowList:"object",animation:"boolean",boundary:"(string|element)",container:"(string|element|boolean)",customClass:"(string|function)",delay:"(number|object)",fallbackPlacements:"array",html:"boolean",offset:"(array|string|function)",placement:"(string|function)",popperConfig:"(null|object|function)",sanitize:"boolean",sanitizeFn:"(null|function)",selector:"(string|boolean)",template:"string",title:"(string|element|function)",trigger:"string"};class fs extends ve{constructor(t,i){if(void 0===e)throw new TypeError("Bootstrap's tooltips require Popper (https://popper.js.org)");super(t,i),this._isEnabled=!0,this._timeout=0,this._isHovered=null,this._activeTrigger={},this._popper=null,this._templateFactory=null,this._newContent=null,this.tip=null,this._setListeners(),this._config.selector||this._fixTitle()}static get Default(){return ds}static get DefaultType(){return us}static get NAME(){return"tooltip"}enable(){this._isEnabled=!0}disable(){this._isEnabled=!1}toggleEnabled(){this._isEnabled=!this._isEnabled}toggle(){this._isEnabled&&(this._activeTrigger.click=!this._activeTrigger.click,this._isShown()?this._leave():this._enter())}dispose(){clearTimeout(this._timeout),fe.off(this._element.closest(rs),as,this._hideModalHandler),this._element.getAttribute("data-bs-original-title")&&this._element.setAttribute("title",this._element.getAttribute("data-bs-original-title")),this._disposePopper(),super.dispose()}show(){if("none"===this._element.style.display)throw new Error("Please use show on visible elements");if(!this._isWithContent()||!this._isEnabled)return;const t=fe.trigger(this._element,this.constructor.eventName("show")),e=(zt(this._element)||this._element.ownerDocument.documentElement).contains(this._element);if(t.defaultPrevented||!e)return;this._disposePopper();const i=this._getTipElement();this._element.setAttribute("aria-describedby",i.getAttribute("id"));const{container:n}=this._config;if(this._element.ownerDocument.documentElement.contains(this.tip)||(n.append(i),fe.trigger(this._element,this.constructor.eventName("inserted"))),this._popper=this._createPopper(i),i.classList.add(ss),"ontouchstart"in document.documentElement)for(const t of[].concat(...document.body.children))fe.on(t,"mouseover",Rt);this._queueCallback((()=>{fe.trigger(this._element,this.constructor.eventName("shown")),!1===this._isHovered&&this._leave(),this._isHovered=!1}),this.tip,this._isAnimated())}hide(){if(this._isShown()&&!fe.trigger(this._element,this.constructor.eventName("hide")).defaultPrevented){if(this._getTipElement().classList.remove(ss),"ontouchstart"in document.documentElement)for(const t of[].concat(...document.body.children))fe.off(t,"mouseover",Rt);this._activeTrigger.click=!1,this._activeTrigger[cs]=!1,this._activeTrigger[ls]=!1,this._isHovered=null,this._queueCallback((()=>{this._isWithActiveTrigger()||(this._isHovered||this._disposePopper(),this._element.removeAttribute("aria-describedby"),fe.trigger(this._element,this.constructor.eventName("hidden")))}),this.tip,this._isAnimated())}}update(){this._popper&&this._popper.update()}_isWithContent(){return Boolean(this._getTitle())}_getTipElement(){return this.tip||(this.tip=this._createTipElement(this._newContent||this._getContentForTemplate())),this.tip}_createTipElement(t){const e=this._getTemplateFactory(t).toHtml();if(!e)return null;e.classList.remove(ns,ss),e.classList.add(`bs-${this.constructor.NAME}-auto`);const i=(t=>{do{t+=Math.floor(1e6*Math.random())}while(document.getElementById(t));return t})(this.constructor.NAME).toString();return e.setAttribute("id",i),this._isAnimated()&&e.classList.add(ns),e}setContent(t){this._newContent=t,this._isShown()&&(this._disposePopper(),this.show())}_getTemplateFactory(t){return this._templateFactory?this._templateFactory.changeContent(t):this._templateFactory=new es({...this._config,content:t,extraClass:this._resolvePossibleFunction(this._config.customClass)}),this._templateFactory}_getContentForTemplate(){return{[os]:this._getTitle()}}_getTitle(){return this._resolvePossibleFunction(this._config.title)||this._element.getAttribute("data-bs-original-title")}_initializeOnDelegatedTarget(t){return this.constructor.getOrCreateInstance(t.delegateTarget,this._getDelegateConfig())}_isAnimated(){return this._config.animation||this.tip&&this.tip.classList.contains(ns)}_isShown(){return this.tip&&this.tip.classList.contains(ss)}_createPopper(t){const e=Xt(this._config.placement,[this,t,this._element]),i=hs[e.toUpperCase()];return Dt(this._element,t,this._getPopperConfig(i))}_getOffset(){const{offset:t}=this._config;return"string"==typeof t?t.split(",").map((t=>Number.parseInt(t,10))):"function"==typeof t?e=>t(e,this._element):t}_resolvePossibleFunction(t){return Xt(t,[this._element])}_getPopperConfig(t){const e={placement:t,modifiers:[{name:"flip",options:{fallbackPlacements:this._config.fallbackPlacements}},{name:"offset",options:{offset:this._getOffset()}},{name:"preventOverflow",options:{boundary:this._config.boundary}},{name:"arrow",options:{element:`.${this.constructor.NAME}-arrow`}},{name:"preSetPlacement",enabled:!0,phase:"beforeMain",fn:t=>{this._getTipElement().setAttribute("data-popper-placement",t.state.placement)}}]};return{...e,...Xt(this._config.popperConfig,[e])}}_setListeners(){const t=this._config.trigger.split(" ");for(const e of t)if("click"===e)fe.on(this._element,this.constructor.eventName("click"),this._config.selector,(t=>{this._initializeOnDelegatedTarget(t).toggle()}));else if("manual"!==e){const t=e===ls?this.constructor.eventName("mouseenter"):this.constructor.eventName("focusin"),i=e===ls?this.constructor.eventName("mouseleave"):this.constructor.eventName("focusout");fe.on(this._element,t,this._config.selector,(t=>{const e=this._initializeOnDelegatedTarget(t);e._activeTrigger["focusin"===t.type?cs:ls]=!0,e._enter()})),fe.on(this._element,i,this._config.selector,(t=>{const e=this._initializeOnDelegatedTarget(t);e._activeTrigger["focusout"===t.type?cs:ls]=e._element.contains(t.relatedTarget),e._leave()}))}this._hideModalHandler=()=>{this._element&&this.hide()},fe.on(this._element.closest(rs),as,this._hideModalHandler)}_fixTitle(){const t=this._element.getAttribute("title");t&&(this._element.getAttribute("aria-label")||this._element.textContent.trim()||this._element.setAttribute("aria-label",t),this._element.setAttribute("data-bs-original-title",t),this._element.removeAttribute("title"))}_enter(){this._isShown()||this._isHovered?this._isHovered=!0:(this._isHovered=!0,this._setTimeout((()=>{this._isHovered&&this.show()}),this._config.delay.show))}_leave(){this._isWithActiveTrigger()||(this._isHovered=!1,this._setTimeout((()=>{this._isHovered||this.hide()}),this._config.delay.hide))}_setTimeout(t,e){clearTimeout(this._timeout),this._timeout=setTimeout(t,e)}_isWithActiveTrigger(){return Object.values(this._activeTrigger).includes(!0)}_getConfig(t){const e=_e.getDataAttributes(this._element);for(const t of Object.keys(e))is.has(t)&&delete e[t];return t={...e,..."object"==typeof t&&t?t:{}},t=this._mergeConfigObj(t),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}_configAfterMerge(t){return t.container=!1===t.container?document.body:Ht(t.container),"number"==typeof t.delay&&(t.delay={show:t.delay,hide:t.delay}),"number"==typeof t.title&&(t.title=t.title.toString()),"number"==typeof t.content&&(t.content=t.content.toString()),t}_getDelegateConfig(){const t={};for(const[e,i]of Object.entries(this._config))this.constructor.Default[e]!==i&&(t[e]=i);return t.selector=!1,t.trigger="manual",t}_disposePopper(){this._popper&&(this._popper.destroy(),this._popper=null),this.tip&&(this.tip.remove(),this.tip=null)}static jQueryInterface(t){return this.each((function(){const e=fs.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}Qt(fs);const ps=".popover-header",ms=".popover-body",gs={...fs.Default,content:"",offset:[0,8],placement:"right",template:'',trigger:"click"},_s={...fs.DefaultType,content:"(null|string|element|function)"};class bs extends fs{static get Default(){return gs}static get DefaultType(){return _s}static get NAME(){return"popover"}_isWithContent(){return this._getTitle()||this._getContent()}_getContentForTemplate(){return{[ps]:this._getTitle(),[ms]:this._getContent()}}_getContent(){return this._resolvePossibleFunction(this._config.content)}static jQueryInterface(t){return this.each((function(){const e=bs.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}Qt(bs);const vs=".bs.scrollspy",ys=`activate${vs}`,ws=`click${vs}`,Es=`load${vs}.data-api`,As="active",Ts="[href]",Cs=".nav-link",Os=`${Cs}, .nav-item > ${Cs}, .list-group-item`,xs={offset:null,rootMargin:"0px 0px -25%",smoothScroll:!1,target:null,threshold:[.1,.5,1]},ks={offset:"(number|null)",rootMargin:"string",smoothScroll:"boolean",target:"element",threshold:"array"};class Ls extends ve{constructor(t,e){super(t,e),this._targetLinks=new Map,this._observableSections=new Map,this._rootElement="visible"===getComputedStyle(this._element).overflowY?null:this._element,this._activeTarget=null,this._observer=null,this._previousScrollData={visibleEntryTop:0,parentScrollTop:0},this.refresh()}static get Default(){return xs}static get DefaultType(){return ks}static get NAME(){return"scrollspy"}refresh(){this._initializeTargetsAndObservables(),this._maybeEnableSmoothScroll(),this._observer?this._observer.disconnect():this._observer=this._getNewObserver();for(const t of this._observableSections.values())this._observer.observe(t)}dispose(){this._observer.disconnect(),super.dispose()}_configAfterMerge(t){return t.target=Ht(t.target)||document.body,t.rootMargin=t.offset?`${t.offset}px 0px -30%`:t.rootMargin,"string"==typeof t.threshold&&(t.threshold=t.threshold.split(",").map((t=>Number.parseFloat(t)))),t}_maybeEnableSmoothScroll(){this._config.smoothScroll&&(fe.off(this._config.target,ws),fe.on(this._config.target,ws,Ts,(t=>{const e=this._observableSections.get(t.target.hash);if(e){t.preventDefault();const i=this._rootElement||window,n=e.offsetTop-this._element.offsetTop;if(i.scrollTo)return void i.scrollTo({top:n,behavior:"smooth"});i.scrollTop=n}})))}_getNewObserver(){const t={root:this._rootElement,threshold:this._config.threshold,rootMargin:this._config.rootMargin};return new IntersectionObserver((t=>this._observerCallback(t)),t)}_observerCallback(t){const e=t=>this._targetLinks.get(`#${t.target.id}`),i=t=>{this._previousScrollData.visibleEntryTop=t.target.offsetTop,this._process(e(t))},n=(this._rootElement||document.documentElement).scrollTop,s=n>=this._previousScrollData.parentScrollTop;this._previousScrollData.parentScrollTop=n;for(const o of t){if(!o.isIntersecting){this._activeTarget=null,this._clearActiveClass(e(o));continue}const t=o.target.offsetTop>=this._previousScrollData.visibleEntryTop;if(s&&t){if(i(o),!n)return}else s||t||i(o)}}_initializeTargetsAndObservables(){this._targetLinks=new Map,this._observableSections=new Map;const t=we.find(Ts,this._config.target);for(const e of t){if(!e.hash||Wt(e))continue;const t=we.findOne(decodeURI(e.hash),this._element);Bt(t)&&(this._targetLinks.set(decodeURI(e.hash),e),this._observableSections.set(e.hash,t))}}_process(t){this._activeTarget!==t&&(this._clearActiveClass(this._config.target),this._activeTarget=t,t.classList.add(As),this._activateParents(t),fe.trigger(this._element,ys,{relatedTarget:t}))}_activateParents(t){if(t.classList.contains("dropdown-item"))we.findOne(".dropdown-toggle",t.closest(".dropdown")).classList.add(As);else for(const e of we.parents(t,".nav, .list-group"))for(const t of we.prev(e,Os))t.classList.add(As)}_clearActiveClass(t){t.classList.remove(As);const e=we.find(`${Ts}.${As}`,t);for(const t of e)t.classList.remove(As)}static jQueryInterface(t){return this.each((function(){const e=Ls.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t]()}}))}}fe.on(window,Es,(()=>{for(const t of we.find('[data-bs-spy="scroll"]'))Ls.getOrCreateInstance(t)})),Qt(Ls);const Ss=".bs.tab",Ds=`hide${Ss}`,$s=`hidden${Ss}`,Is=`show${Ss}`,Ns=`shown${Ss}`,Ps=`click${Ss}`,Ms=`keydown${Ss}`,js=`load${Ss}`,Fs="ArrowLeft",Hs="ArrowRight",Bs="ArrowUp",Ws="ArrowDown",zs="Home",Rs="End",qs="active",Vs="fade",Ys="show",Ks=".dropdown-toggle",Qs=`:not(${Ks})`,Xs='[data-bs-toggle="tab"], [data-bs-toggle="pill"], [data-bs-toggle="list"]',Us=`.nav-link${Qs}, .list-group-item${Qs}, [role="tab"]${Qs}, ${Xs}`,Gs=`.${qs}[data-bs-toggle="tab"], .${qs}[data-bs-toggle="pill"], .${qs}[data-bs-toggle="list"]`;class Js extends ve{constructor(t){super(t),this._parent=this._element.closest('.list-group, .nav, [role="tablist"]'),this._parent&&(this._setInitialAttributes(this._parent,this._getChildren()),fe.on(this._element,Ms,(t=>this._keydown(t))))}static get NAME(){return"tab"}show(){const t=this._element;if(this._elemIsActive(t))return;const e=this._getActiveElem(),i=e?fe.trigger(e,Ds,{relatedTarget:t}):null;fe.trigger(t,Is,{relatedTarget:e}).defaultPrevented||i&&i.defaultPrevented||(this._deactivate(e,t),this._activate(t,e))}_activate(t,e){t&&(t.classList.add(qs),this._activate(we.getElementFromSelector(t)),this._queueCallback((()=>{"tab"===t.getAttribute("role")?(t.removeAttribute("tabindex"),t.setAttribute("aria-selected",!0),this._toggleDropDown(t,!0),fe.trigger(t,Ns,{relatedTarget:e})):t.classList.add(Ys)}),t,t.classList.contains(Vs)))}_deactivate(t,e){t&&(t.classList.remove(qs),t.blur(),this._deactivate(we.getElementFromSelector(t)),this._queueCallback((()=>{"tab"===t.getAttribute("role")?(t.setAttribute("aria-selected",!1),t.setAttribute("tabindex","-1"),this._toggleDropDown(t,!1),fe.trigger(t,$s,{relatedTarget:e})):t.classList.remove(Ys)}),t,t.classList.contains(Vs)))}_keydown(t){if(![Fs,Hs,Bs,Ws,zs,Rs].includes(t.key))return;t.stopPropagation(),t.preventDefault();const e=this._getChildren().filter((t=>!Wt(t)));let i;if([zs,Rs].includes(t.key))i=e[t.key===zs?0:e.length-1];else{const n=[Hs,Ws].includes(t.key);i=Gt(e,t.target,n,!0)}i&&(i.focus({preventScroll:!0}),Js.getOrCreateInstance(i).show())}_getChildren(){return we.find(Us,this._parent)}_getActiveElem(){return this._getChildren().find((t=>this._elemIsActive(t)))||null}_setInitialAttributes(t,e){this._setAttributeIfNotExists(t,"role","tablist");for(const t of e)this._setInitialAttributesOnChild(t)}_setInitialAttributesOnChild(t){t=this._getInnerElement(t);const e=this._elemIsActive(t),i=this._getOuterElement(t);t.setAttribute("aria-selected",e),i!==t&&this._setAttributeIfNotExists(i,"role","presentation"),e||t.setAttribute("tabindex","-1"),this._setAttributeIfNotExists(t,"role","tab"),this._setInitialAttributesOnTargetPanel(t)}_setInitialAttributesOnTargetPanel(t){const e=we.getElementFromSelector(t);e&&(this._setAttributeIfNotExists(e,"role","tabpanel"),t.id&&this._setAttributeIfNotExists(e,"aria-labelledby",`${t.id}`))}_toggleDropDown(t,e){const i=this._getOuterElement(t);if(!i.classList.contains("dropdown"))return;const n=(t,n)=>{const s=we.findOne(t,i);s&&s.classList.toggle(n,e)};n(Ks,qs),n(".dropdown-menu",Ys),i.setAttribute("aria-expanded",e)}_setAttributeIfNotExists(t,e,i){t.hasAttribute(e)||t.setAttribute(e,i)}_elemIsActive(t){return t.classList.contains(qs)}_getInnerElement(t){return t.matches(Us)?t:we.findOne(Us,t)}_getOuterElement(t){return t.closest(".nav-item, .list-group-item")||t}static jQueryInterface(t){return this.each((function(){const e=Js.getOrCreateInstance(this);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t]()}}))}}fe.on(document,Ps,Xs,(function(t){["A","AREA"].includes(this.tagName)&&t.preventDefault(),Wt(this)||Js.getOrCreateInstance(this).show()})),fe.on(window,js,(()=>{for(const t of we.find(Gs))Js.getOrCreateInstance(t)})),Qt(Js);const Zs=".bs.toast",to=`mouseover${Zs}`,eo=`mouseout${Zs}`,io=`focusin${Zs}`,no=`focusout${Zs}`,so=`hide${Zs}`,oo=`hidden${Zs}`,ro=`show${Zs}`,ao=`shown${Zs}`,lo="hide",co="show",ho="showing",uo={animation:"boolean",autohide:"boolean",delay:"number"},fo={animation:!0,autohide:!0,delay:5e3};class po extends ve{constructor(t,e){super(t,e),this._timeout=null,this._hasMouseInteraction=!1,this._hasKeyboardInteraction=!1,this._setListeners()}static get Default(){return fo}static get DefaultType(){return uo}static get NAME(){return"toast"}show(){fe.trigger(this._element,ro).defaultPrevented||(this._clearTimeout(),this._config.animation&&this._element.classList.add("fade"),this._element.classList.remove(lo),qt(this._element),this._element.classList.add(co,ho),this._queueCallback((()=>{this._element.classList.remove(ho),fe.trigger(this._element,ao),this._maybeScheduleHide()}),this._element,this._config.animation))}hide(){this.isShown()&&(fe.trigger(this._element,so).defaultPrevented||(this._element.classList.add(ho),this._queueCallback((()=>{this._element.classList.add(lo),this._element.classList.remove(ho,co),fe.trigger(this._element,oo)}),this._element,this._config.animation)))}dispose(){this._clearTimeout(),this.isShown()&&this._element.classList.remove(co),super.dispose()}isShown(){return this._element.classList.contains(co)}_maybeScheduleHide(){this._config.autohide&&(this._hasMouseInteraction||this._hasKeyboardInteraction||(this._timeout=setTimeout((()=>{this.hide()}),this._config.delay)))}_onInteraction(t,e){switch(t.type){case"mouseover":case"mouseout":this._hasMouseInteraction=e;break;case"focusin":case"focusout":this._hasKeyboardInteraction=e}if(e)return void this._clearTimeout();const i=t.relatedTarget;this._element===i||this._element.contains(i)||this._maybeScheduleHide()}_setListeners(){fe.on(this._element,to,(t=>this._onInteraction(t,!0))),fe.on(this._element,eo,(t=>this._onInteraction(t,!1))),fe.on(this._element,io,(t=>this._onInteraction(t,!0))),fe.on(this._element,no,(t=>this._onInteraction(t,!1)))}_clearTimeout(){clearTimeout(this._timeout),this._timeout=null}static jQueryInterface(t){return this.each((function(){const e=po.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}function mo(t){"loading"!=document.readyState?t():document.addEventListener("DOMContentLoaded",t)}Ee(po),Qt(po),mo((function(){[].slice.call(document.querySelectorAll('[data-bs-toggle="tooltip"]')).map((function(t){return new fs(t,{delay:{show:500,hide:100}})}))})),mo((function(){document.getElementById("pst-back-to-top").addEventListener("click",(function(){document.body.scrollTop=0,document.documentElement.scrollTop=0}))})),mo((function(){var t=document.getElementById("pst-back-to-top"),e=document.getElementsByClassName("bd-header")[0].getBoundingClientRect();window.addEventListener("scroll",(function(){this.oldScroll>this.scrollY&&this.scrollY>e.bottom?t.style.display="block":t.style.display="none",this.oldScroll=this.scrollY}))})),window.bootstrap=i})(); +//# sourceMappingURL=bootstrap.js.map \ No newline at end of file diff --git a/_static/scripts/bootstrap.js.LICENSE.txt b/_static/scripts/bootstrap.js.LICENSE.txt new file mode 100644 index 000000000..28755c2c5 --- /dev/null +++ b/_static/scripts/bootstrap.js.LICENSE.txt @@ -0,0 +1,5 @@ +/*! + * Bootstrap v5.3.3 (https://getbootstrap.com/) + * Copyright 2011-2024 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors) + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE) + */ diff --git a/_static/scripts/bootstrap.js.map b/_static/scripts/bootstrap.js.map new file mode 100644 index 000000000..4a3502aeb --- /dev/null +++ b/_static/scripts/bootstrap.js.map @@ -0,0 +1 @@ +{"version":3,"file":"scripts/bootstrap.js","mappings":";mBACA,IAAIA,EAAsB,CCA1BA,EAAwB,CAACC,EAASC,KACjC,IAAI,IAAIC,KAAOD,EACXF,EAAoBI,EAAEF,EAAYC,KAASH,EAAoBI,EAAEH,EAASE,IAC5EE,OAAOC,eAAeL,EAASE,EAAK,CAAEI,YAAY,EAAMC,IAAKN,EAAWC,IAE1E,ECNDH,EAAwB,CAACS,EAAKC,IAAUL,OAAOM,UAAUC,eAAeC,KAAKJ,EAAKC,GCClFV,EAAyBC,IACH,oBAAXa,QAA0BA,OAAOC,aAC1CV,OAAOC,eAAeL,EAASa,OAAOC,YAAa,CAAEC,MAAO,WAE7DX,OAAOC,eAAeL,EAAS,aAAc,CAAEe,OAAO,GAAO,01BCLvD,IAAI,EAAM,MACNC,EAAS,SACTC,EAAQ,QACRC,EAAO,OACPC,EAAO,OACPC,EAAiB,CAAC,EAAKJ,EAAQC,EAAOC,GACtCG,EAAQ,QACRC,EAAM,MACNC,EAAkB,kBAClBC,EAAW,WACXC,EAAS,SACTC,EAAY,YACZC,EAAmCP,EAAeQ,QAAO,SAAUC,EAAKC,GACjF,OAAOD,EAAIE,OAAO,CAACD,EAAY,IAAMT,EAAOS,EAAY,IAAMR,GAChE,GAAG,IACQ,EAA0B,GAAGS,OAAOX,EAAgB,CAACD,IAAOS,QAAO,SAAUC,EAAKC,GAC3F,OAAOD,EAAIE,OAAO,CAACD,EAAWA,EAAY,IAAMT,EAAOS,EAAY,IAAMR,GAC3E,GAAG,IAEQU,EAAa,aACbC,EAAO,OACPC,EAAY,YAEZC,EAAa,aACbC,EAAO,OACPC,EAAY,YAEZC,EAAc,cACdC,EAAQ,QACRC,EAAa,aACbC,EAAiB,CAACT,EAAYC,EAAMC,EAAWC,EAAYC,EAAMC,EAAWC,EAAaC,EAAOC,GC9B5F,SAASE,EAAYC,GAClC,OAAOA,GAAWA,EAAQC,UAAY,IAAIC,cAAgB,IAC5D,CCFe,SAASC,EAAUC,GAChC,GAAY,MAARA,EACF,OAAOC,OAGT,GAAwB,oBAApBD,EAAKE,WAAkC,CACzC,IAAIC,EAAgBH,EAAKG,cACzB,OAAOA,GAAgBA,EAAcC,aAAwBH,MAC/D,CAEA,OAAOD,CACT,CCTA,SAASK,EAAUL,GAEjB,OAAOA,aADUD,EAAUC,GAAMM,SACIN,aAAgBM,OACvD,CAEA,SAASC,EAAcP,GAErB,OAAOA,aADUD,EAAUC,GAAMQ,aACIR,aAAgBQ,WACvD,CAEA,SAASC,EAAaT,GAEpB,MAA0B,oBAAfU,aAKJV,aADUD,EAAUC,GAAMU,YACIV,aAAgBU,WACvD,CCwDA,SACEC,KAAM,cACNC,SAAS,EACTC,MAAO,QACPC,GA5EF,SAAqBC,GACnB,IAAIC,EAAQD,EAAKC,MACjB3D,OAAO4D,KAAKD,EAAME,UAAUC,SAAQ,SAAUR,GAC5C,IAAIS,EAAQJ,EAAMK,OAAOV,IAAS,CAAC,EAC/BW,EAAaN,EAAMM,WAAWX,IAAS,CAAC,EACxCf,EAAUoB,EAAME,SAASP,GAExBJ,EAAcX,IAAaD,EAAYC,KAO5CvC,OAAOkE,OAAO3B,EAAQwB,MAAOA,GAC7B/D,OAAO4D,KAAKK,GAAYH,SAAQ,SAAUR,GACxC,IAAI3C,EAAQsD,EAAWX,IAET,IAAV3C,EACF4B,EAAQ4B,gBAAgBb,GAExBf,EAAQ6B,aAAad,GAAgB,IAAV3C,EAAiB,GAAKA,EAErD,IACF,GACF,EAoDE0D,OAlDF,SAAgBC,GACd,IAAIX,EAAQW,EAAMX,MACdY,EAAgB,CAClBlD,OAAQ,CACNmD,SAAUb,EAAMc,QAAQC,SACxB5D,KAAM,IACN6D,IAAK,IACLC,OAAQ,KAEVC,MAAO,CACLL,SAAU,YAEZlD,UAAW,CAAC,GASd,OAPAtB,OAAOkE,OAAOP,EAAME,SAASxC,OAAO0C,MAAOQ,EAAclD,QACzDsC,EAAMK,OAASO,EAEXZ,EAAME,SAASgB,OACjB7E,OAAOkE,OAAOP,EAAME,SAASgB,MAAMd,MAAOQ,EAAcM,OAGnD,WACL7E,OAAO4D,KAAKD,EAAME,UAAUC,SAAQ,SAAUR,GAC5C,IAAIf,EAAUoB,EAAME,SAASP,GACzBW,EAAaN,EAAMM,WAAWX,IAAS,CAAC,EAGxCS,EAFkB/D,OAAO4D,KAAKD,EAAMK,OAAOzD,eAAe+C,GAAQK,EAAMK,OAAOV,GAAQiB,EAAcjB,IAE7E9B,QAAO,SAAUuC,EAAOe,GAElD,OADAf,EAAMe,GAAY,GACXf,CACT,GAAG,CAAC,GAECb,EAAcX,IAAaD,EAAYC,KAI5CvC,OAAOkE,OAAO3B,EAAQwB,MAAOA,GAC7B/D,OAAO4D,KAAKK,GAAYH,SAAQ,SAAUiB,GACxCxC,EAAQ4B,gBAAgBY,EAC1B,IACF,GACF,CACF,EASEC,SAAU,CAAC,kBCjFE,SAASC,EAAiBvD,GACvC,OAAOA,EAAUwD,MAAM,KAAK,EAC9B,CCHO,IAAI,EAAMC,KAAKC,IACX,EAAMD,KAAKE,IACXC,EAAQH,KAAKG,MCFT,SAASC,IACtB,IAAIC,EAASC,UAAUC,cAEvB,OAAc,MAAVF,GAAkBA,EAAOG,QAAUC,MAAMC,QAAQL,EAAOG,QACnDH,EAAOG,OAAOG,KAAI,SAAUC,GACjC,OAAOA,EAAKC,MAAQ,IAAMD,EAAKE,OACjC,IAAGC,KAAK,KAGHT,UAAUU,SACnB,CCTe,SAASC,IACtB,OAAQ,iCAAiCC,KAAKd,IAChD,CCCe,SAASe,EAAsB/D,EAASgE,EAAcC,QAC9C,IAAjBD,IACFA,GAAe,QAGO,IAApBC,IACFA,GAAkB,GAGpB,IAAIC,EAAalE,EAAQ+D,wBACrBI,EAAS,EACTC,EAAS,EAETJ,GAAgBrD,EAAcX,KAChCmE,EAASnE,EAAQqE,YAAc,GAAItB,EAAMmB,EAAWI,OAAStE,EAAQqE,aAAmB,EACxFD,EAASpE,EAAQuE,aAAe,GAAIxB,EAAMmB,EAAWM,QAAUxE,EAAQuE,cAAoB,GAG7F,IACIE,GADOhE,EAAUT,GAAWG,EAAUH,GAAWK,QAC3BoE,eAEtBC,GAAoBb,KAAsBI,EAC1CU,GAAKT,EAAW3F,MAAQmG,GAAoBD,EAAiBA,EAAeG,WAAa,IAAMT,EAC/FU,GAAKX,EAAW9B,KAAOsC,GAAoBD,EAAiBA,EAAeK,UAAY,IAAMV,EAC7FE,EAAQJ,EAAWI,MAAQH,EAC3BK,EAASN,EAAWM,OAASJ,EACjC,MAAO,CACLE,MAAOA,EACPE,OAAQA,EACRpC,IAAKyC,EACLvG,MAAOqG,EAAIL,EACXjG,OAAQwG,EAAIL,EACZjG,KAAMoG,EACNA,EAAGA,EACHE,EAAGA,EAEP,CCrCe,SAASE,EAAc/E,GACpC,IAAIkE,EAAaH,EAAsB/D,GAGnCsE,EAAQtE,EAAQqE,YAChBG,EAASxE,EAAQuE,aAUrB,OARI3B,KAAKoC,IAAId,EAAWI,MAAQA,IAAU,IACxCA,EAAQJ,EAAWI,OAGjB1B,KAAKoC,IAAId,EAAWM,OAASA,IAAW,IAC1CA,EAASN,EAAWM,QAGf,CACLG,EAAG3E,EAAQ4E,WACXC,EAAG7E,EAAQ8E,UACXR,MAAOA,EACPE,OAAQA,EAEZ,CCvBe,SAASS,EAASC,EAAQC,GACvC,IAAIC,EAAWD,EAAME,aAAeF,EAAME,cAE1C,GAAIH,EAAOD,SAASE,GAClB,OAAO,EAEJ,GAAIC,GAAYvE,EAAauE,GAAW,CACzC,IAAIE,EAAOH,EAEX,EAAG,CACD,GAAIG,GAAQJ,EAAOK,WAAWD,GAC5B,OAAO,EAITA,EAAOA,EAAKE,YAAcF,EAAKG,IACjC,OAASH,EACX,CAGF,OAAO,CACT,CCrBe,SAAS,EAAiBtF,GACvC,OAAOG,EAAUH,GAAS0F,iBAAiB1F,EAC7C,CCFe,SAAS2F,EAAe3F,GACrC,MAAO,CAAC,QAAS,KAAM,MAAM4F,QAAQ7F,EAAYC,KAAa,CAChE,CCFe,SAAS6F,EAAmB7F,GAEzC,QAASS,EAAUT,GAAWA,EAAQO,cACtCP,EAAQ8F,WAAazF,OAAOyF,UAAUC,eACxC,CCFe,SAASC,EAAchG,GACpC,MAA6B,SAAzBD,EAAYC,GACPA,EAMPA,EAAQiG,cACRjG,EAAQwF,aACR3E,EAAab,GAAWA,EAAQyF,KAAO,OAEvCI,EAAmB7F,EAGvB,CCVA,SAASkG,EAAoBlG,GAC3B,OAAKW,EAAcX,IACoB,UAAvC,EAAiBA,GAASiC,SAInBjC,EAAQmG,aAHN,IAIX,CAwCe,SAASC,EAAgBpG,GAItC,IAHA,IAAIK,EAASF,EAAUH,GACnBmG,EAAeD,EAAoBlG,GAEhCmG,GAAgBR,EAAeQ,IAA6D,WAA5C,EAAiBA,GAAclE,UACpFkE,EAAeD,EAAoBC,GAGrC,OAAIA,IAA+C,SAA9BpG,EAAYoG,IAA0D,SAA9BpG,EAAYoG,IAAwE,WAA5C,EAAiBA,GAAclE,UAC3H5B,EAGF8F,GAhDT,SAA4BnG,GAC1B,IAAIqG,EAAY,WAAWvC,KAAKd,KAGhC,GAFW,WAAWc,KAAKd,MAEfrC,EAAcX,IAII,UAFX,EAAiBA,GAEnBiC,SACb,OAAO,KAIX,IAAIqE,EAAcN,EAAchG,GAMhC,IAJIa,EAAayF,KACfA,EAAcA,EAAYb,MAGrB9E,EAAc2F,IAAgB,CAAC,OAAQ,QAAQV,QAAQ7F,EAAYuG,IAAgB,GAAG,CAC3F,IAAIC,EAAM,EAAiBD,GAI3B,GAAsB,SAAlBC,EAAIC,WAA4C,SAApBD,EAAIE,aAA0C,UAAhBF,EAAIG,UAAiF,IAA1D,CAAC,YAAa,eAAed,QAAQW,EAAII,aAAsBN,GAAgC,WAAnBE,EAAII,YAA2BN,GAAaE,EAAIK,QAAyB,SAAfL,EAAIK,OACjO,OAAON,EAEPA,EAAcA,EAAYd,UAE9B,CAEA,OAAO,IACT,CAgByBqB,CAAmB7G,IAAYK,CACxD,CCpEe,SAASyG,EAAyB3H,GAC/C,MAAO,CAAC,MAAO,UAAUyG,QAAQzG,IAAc,EAAI,IAAM,GAC3D,CCDO,SAAS4H,EAAOjE,EAAK1E,EAAOyE,GACjC,OAAO,EAAQC,EAAK,EAAQ1E,EAAOyE,GACrC,CCFe,SAASmE,EAAmBC,GACzC,OAAOxJ,OAAOkE,OAAO,CAAC,ECDf,CACLS,IAAK,EACL9D,MAAO,EACPD,OAAQ,EACRE,KAAM,GDHuC0I,EACjD,CEHe,SAASC,EAAgB9I,EAAOiD,GAC7C,OAAOA,EAAKpC,QAAO,SAAUkI,EAAS5J,GAEpC,OADA4J,EAAQ5J,GAAOa,EACR+I,CACT,GAAG,CAAC,EACN,CC4EA,SACEpG,KAAM,QACNC,SAAS,EACTC,MAAO,OACPC,GApEF,SAAeC,GACb,IAAIiG,EAEAhG,EAAQD,EAAKC,MACbL,EAAOI,EAAKJ,KACZmB,EAAUf,EAAKe,QACfmF,EAAejG,EAAME,SAASgB,MAC9BgF,EAAgBlG,EAAMmG,cAAcD,cACpCE,EAAgB9E,EAAiBtB,EAAMjC,WACvCsI,EAAOX,EAAyBU,GAEhCE,EADa,CAACnJ,EAAMD,GAAOsH,QAAQ4B,IAAkB,EAClC,SAAW,QAElC,GAAKH,GAAiBC,EAAtB,CAIA,IAAIL,EAxBgB,SAAyBU,EAASvG,GAItD,OAAO4F,EAAsC,iBAH7CW,EAA6B,mBAAZA,EAAyBA,EAAQlK,OAAOkE,OAAO,CAAC,EAAGP,EAAMwG,MAAO,CAC/EzI,UAAWiC,EAAMjC,aACbwI,GACkDA,EAAUT,EAAgBS,EAASlJ,GAC7F,CAmBsBoJ,CAAgB3F,EAAQyF,QAASvG,GACjD0G,EAAY/C,EAAcsC,GAC1BU,EAAmB,MAATN,EAAe,EAAMlJ,EAC/ByJ,EAAmB,MAATP,EAAepJ,EAASC,EAClC2J,EAAU7G,EAAMwG,MAAM7I,UAAU2I,GAAOtG,EAAMwG,MAAM7I,UAAU0I,GAAQH,EAAcG,GAAQrG,EAAMwG,MAAM9I,OAAO4I,GAC9GQ,EAAYZ,EAAcG,GAAQrG,EAAMwG,MAAM7I,UAAU0I,GACxDU,EAAoB/B,EAAgBiB,GACpCe,EAAaD,EAA6B,MAATV,EAAeU,EAAkBE,cAAgB,EAAIF,EAAkBG,aAAe,EAAI,EAC3HC,EAAoBN,EAAU,EAAIC,EAAY,EAG9CpF,EAAMmE,EAAcc,GACpBlF,EAAMuF,EAAaN,EAAUJ,GAAOT,EAAce,GAClDQ,EAASJ,EAAa,EAAIN,EAAUJ,GAAO,EAAIa,EAC/CE,EAAS1B,EAAOjE,EAAK0F,EAAQ3F,GAE7B6F,EAAWjB,EACfrG,EAAMmG,cAAcxG,KAASqG,EAAwB,CAAC,GAAyBsB,GAAYD,EAAQrB,EAAsBuB,aAAeF,EAASD,EAAQpB,EAnBzJ,CAoBF,EAkCEtF,OAhCF,SAAgBC,GACd,IAAIX,EAAQW,EAAMX,MAEdwH,EADU7G,EAAMG,QACWlC,QAC3BqH,OAAoC,IAArBuB,EAA8B,sBAAwBA,EAErD,MAAhBvB,IAKwB,iBAAjBA,IACTA,EAAejG,EAAME,SAASxC,OAAO+J,cAAcxB,MAOhDpC,EAAS7D,EAAME,SAASxC,OAAQuI,KAIrCjG,EAAME,SAASgB,MAAQ+E,EACzB,EASE5E,SAAU,CAAC,iBACXqG,iBAAkB,CAAC,oBCxFN,SAASC,EAAa5J,GACnC,OAAOA,EAAUwD,MAAM,KAAK,EAC9B,CCOA,IAAIqG,GAAa,CACf5G,IAAK,OACL9D,MAAO,OACPD,OAAQ,OACRE,KAAM,QAeD,SAAS0K,GAAYlH,GAC1B,IAAImH,EAEApK,EAASiD,EAAMjD,OACfqK,EAAapH,EAAMoH,WACnBhK,EAAY4C,EAAM5C,UAClBiK,EAAYrH,EAAMqH,UAClBC,EAAUtH,EAAMsH,QAChBpH,EAAWF,EAAME,SACjBqH,EAAkBvH,EAAMuH,gBACxBC,EAAWxH,EAAMwH,SACjBC,EAAezH,EAAMyH,aACrBC,EAAU1H,EAAM0H,QAChBC,EAAaL,EAAQ1E,EACrBA,OAAmB,IAAf+E,EAAwB,EAAIA,EAChCC,EAAaN,EAAQxE,EACrBA,OAAmB,IAAf8E,EAAwB,EAAIA,EAEhCC,EAAgC,mBAAjBJ,EAA8BA,EAAa,CAC5D7E,EAAGA,EACHE,IACG,CACHF,EAAGA,EACHE,GAGFF,EAAIiF,EAAMjF,EACVE,EAAI+E,EAAM/E,EACV,IAAIgF,EAAOR,EAAQrL,eAAe,KAC9B8L,EAAOT,EAAQrL,eAAe,KAC9B+L,EAAQxL,EACRyL,EAAQ,EACRC,EAAM5J,OAEV,GAAIkJ,EAAU,CACZ,IAAIpD,EAAeC,EAAgBtH,GAC/BoL,EAAa,eACbC,EAAY,cAEZhE,IAAiBhG,EAAUrB,IAGmB,WAA5C,EAFJqH,EAAeN,EAAmB/G,IAECmD,UAAsC,aAAbA,IAC1DiI,EAAa,eACbC,EAAY,gBAOZhL,IAAc,IAAQA,IAAcZ,GAAQY,IAAcb,IAAU8K,IAAczK,KACpFqL,EAAQ3L,EAGRwG,IAFc4E,GAAWtD,IAAiB8D,GAAOA,EAAIxF,eAAiBwF,EAAIxF,eAAeD,OACzF2B,EAAa+D,IACEf,EAAW3E,OAC1BK,GAAKyE,EAAkB,GAAK,GAG1BnK,IAAcZ,IAASY,IAAc,GAAOA,IAAcd,GAAW+K,IAAczK,KACrFoL,EAAQzL,EAGRqG,IAFc8E,GAAWtD,IAAiB8D,GAAOA,EAAIxF,eAAiBwF,EAAIxF,eAAeH,MACzF6B,EAAagE,IACEhB,EAAW7E,MAC1BK,GAAK2E,EAAkB,GAAK,EAEhC,CAEA,IAgBMc,EAhBFC,EAAe5M,OAAOkE,OAAO,CAC/BM,SAAUA,GACTsH,GAAYP,IAEXsB,GAAyB,IAAjBd,EAlFd,SAA2BrI,EAAM8I,GAC/B,IAAItF,EAAIxD,EAAKwD,EACTE,EAAI1D,EAAK0D,EACT0F,EAAMN,EAAIO,kBAAoB,EAClC,MAAO,CACL7F,EAAG5B,EAAM4B,EAAI4F,GAAOA,GAAO,EAC3B1F,EAAG9B,EAAM8B,EAAI0F,GAAOA,GAAO,EAE/B,CA0EsCE,CAAkB,CACpD9F,EAAGA,EACHE,GACC1E,EAAUrB,IAAW,CACtB6F,EAAGA,EACHE,GAMF,OAHAF,EAAI2F,EAAM3F,EACVE,EAAIyF,EAAMzF,EAENyE,EAGK7L,OAAOkE,OAAO,CAAC,EAAG0I,IAAeD,EAAiB,CAAC,GAAkBJ,GAASF,EAAO,IAAM,GAAIM,EAAeL,GAASF,EAAO,IAAM,GAAIO,EAAe5D,WAAayD,EAAIO,kBAAoB,IAAM,EAAI,aAAe7F,EAAI,OAASE,EAAI,MAAQ,eAAiBF,EAAI,OAASE,EAAI,SAAUuF,IAG5R3M,OAAOkE,OAAO,CAAC,EAAG0I,IAAenB,EAAkB,CAAC,GAAmBc,GAASF,EAAOjF,EAAI,KAAO,GAAIqE,EAAgBa,GAASF,EAAOlF,EAAI,KAAO,GAAIuE,EAAgB1C,UAAY,GAAI0C,GAC9L,CA4CA,UACEnI,KAAM,gBACNC,SAAS,EACTC,MAAO,cACPC,GA9CF,SAAuBwJ,GACrB,IAAItJ,EAAQsJ,EAAMtJ,MACdc,EAAUwI,EAAMxI,QAChByI,EAAwBzI,EAAQoH,gBAChCA,OAA4C,IAA1BqB,GAA0CA,EAC5DC,EAAoB1I,EAAQqH,SAC5BA,OAAiC,IAAtBqB,GAAsCA,EACjDC,EAAwB3I,EAAQsH,aAChCA,OAAyC,IAA1BqB,GAA0CA,EACzDR,EAAe,CACjBlL,UAAWuD,EAAiBtB,EAAMjC,WAClCiK,UAAWL,EAAa3H,EAAMjC,WAC9BL,OAAQsC,EAAME,SAASxC,OACvBqK,WAAY/H,EAAMwG,MAAM9I,OACxBwK,gBAAiBA,EACjBG,QAAoC,UAA3BrI,EAAMc,QAAQC,UAGgB,MAArCf,EAAMmG,cAAcD,gBACtBlG,EAAMK,OAAO3C,OAASrB,OAAOkE,OAAO,CAAC,EAAGP,EAAMK,OAAO3C,OAAQmK,GAAYxL,OAAOkE,OAAO,CAAC,EAAG0I,EAAc,CACvGhB,QAASjI,EAAMmG,cAAcD,cAC7BrF,SAAUb,EAAMc,QAAQC,SACxBoH,SAAUA,EACVC,aAAcA,OAIe,MAA7BpI,EAAMmG,cAAcjF,QACtBlB,EAAMK,OAAOa,MAAQ7E,OAAOkE,OAAO,CAAC,EAAGP,EAAMK,OAAOa,MAAO2G,GAAYxL,OAAOkE,OAAO,CAAC,EAAG0I,EAAc,CACrGhB,QAASjI,EAAMmG,cAAcjF,MAC7BL,SAAU,WACVsH,UAAU,EACVC,aAAcA,OAIlBpI,EAAMM,WAAW5C,OAASrB,OAAOkE,OAAO,CAAC,EAAGP,EAAMM,WAAW5C,OAAQ,CACnE,wBAAyBsC,EAAMjC,WAEnC,EAQE2L,KAAM,CAAC,GCrKT,IAAIC,GAAU,CACZA,SAAS,GAsCX,UACEhK,KAAM,iBACNC,SAAS,EACTC,MAAO,QACPC,GAAI,WAAe,EACnBY,OAxCF,SAAgBX,GACd,IAAIC,EAAQD,EAAKC,MACb4J,EAAW7J,EAAK6J,SAChB9I,EAAUf,EAAKe,QACf+I,EAAkB/I,EAAQgJ,OAC1BA,OAA6B,IAApBD,GAAoCA,EAC7CE,EAAkBjJ,EAAQkJ,OAC1BA,OAA6B,IAApBD,GAAoCA,EAC7C9K,EAASF,EAAUiB,EAAME,SAASxC,QAClCuM,EAAgB,GAAGjM,OAAOgC,EAAMiK,cAActM,UAAWqC,EAAMiK,cAAcvM,QAYjF,OAVIoM,GACFG,EAAc9J,SAAQ,SAAU+J,GAC9BA,EAAaC,iBAAiB,SAAUP,EAASQ,OAAQT,GAC3D,IAGEK,GACF/K,EAAOkL,iBAAiB,SAAUP,EAASQ,OAAQT,IAG9C,WACDG,GACFG,EAAc9J,SAAQ,SAAU+J,GAC9BA,EAAaG,oBAAoB,SAAUT,EAASQ,OAAQT,GAC9D,IAGEK,GACF/K,EAAOoL,oBAAoB,SAAUT,EAASQ,OAAQT,GAE1D,CACF,EASED,KAAM,CAAC,GC/CT,IAAIY,GAAO,CACTnN,KAAM,QACND,MAAO,OACPD,OAAQ,MACR+D,IAAK,UAEQ,SAASuJ,GAAqBxM,GAC3C,OAAOA,EAAUyM,QAAQ,0BAA0B,SAAUC,GAC3D,OAAOH,GAAKG,EACd,GACF,CCVA,IAAI,GAAO,CACTnN,MAAO,MACPC,IAAK,SAEQ,SAASmN,GAA8B3M,GACpD,OAAOA,EAAUyM,QAAQ,cAAc,SAAUC,GAC/C,OAAO,GAAKA,EACd,GACF,CCPe,SAASE,GAAgB3L,GACtC,IAAI6J,EAAM9J,EAAUC,GAGpB,MAAO,CACL4L,WAHe/B,EAAIgC,YAInBC,UAHcjC,EAAIkC,YAKtB,CCNe,SAASC,GAAoBpM,GAQ1C,OAAO+D,EAAsB8B,EAAmB7F,IAAUzB,KAAOwN,GAAgB/L,GAASgM,UAC5F,CCXe,SAASK,GAAerM,GAErC,IAAIsM,EAAoB,EAAiBtM,GACrCuM,EAAWD,EAAkBC,SAC7BC,EAAYF,EAAkBE,UAC9BC,EAAYH,EAAkBG,UAElC,MAAO,6BAA6B3I,KAAKyI,EAAWE,EAAYD,EAClE,CCLe,SAASE,GAAgBtM,GACtC,MAAI,CAAC,OAAQ,OAAQ,aAAawF,QAAQ7F,EAAYK,KAAU,EAEvDA,EAAKG,cAAcoM,KAGxBhM,EAAcP,IAASiM,GAAejM,GACjCA,EAGFsM,GAAgB1G,EAAc5F,GACvC,CCJe,SAASwM,GAAkB5M,EAAS6M,GACjD,IAAIC,OAES,IAATD,IACFA,EAAO,IAGT,IAAIvB,EAAeoB,GAAgB1M,GAC/B+M,EAASzB,KAAqE,OAAlDwB,EAAwB9M,EAAQO,oBAAyB,EAASuM,EAAsBH,MACpH1C,EAAM9J,EAAUmL,GAChB0B,EAASD,EAAS,CAAC9C,GAAK7K,OAAO6K,EAAIxF,gBAAkB,GAAI4H,GAAef,GAAgBA,EAAe,IAAMA,EAC7G2B,EAAcJ,EAAKzN,OAAO4N,GAC9B,OAAOD,EAASE,EAChBA,EAAY7N,OAAOwN,GAAkB5G,EAAcgH,IACrD,CCzBe,SAASE,GAAiBC,GACvC,OAAO1P,OAAOkE,OAAO,CAAC,EAAGwL,EAAM,CAC7B5O,KAAM4O,EAAKxI,EACXvC,IAAK+K,EAAKtI,EACVvG,MAAO6O,EAAKxI,EAAIwI,EAAK7I,MACrBjG,OAAQ8O,EAAKtI,EAAIsI,EAAK3I,QAE1B,CCqBA,SAAS4I,GAA2BpN,EAASqN,EAAgBlL,GAC3D,OAAOkL,IAAmBxO,EAAWqO,GCzBxB,SAAyBlN,EAASmC,GAC/C,IAAI8H,EAAM9J,EAAUH,GAChBsN,EAAOzH,EAAmB7F,GAC1ByE,EAAiBwF,EAAIxF,eACrBH,EAAQgJ,EAAKhF,YACb9D,EAAS8I,EAAKjF,aACd1D,EAAI,EACJE,EAAI,EAER,GAAIJ,EAAgB,CAClBH,EAAQG,EAAeH,MACvBE,EAASC,EAAeD,OACxB,IAAI+I,EAAiB1J,KAEjB0J,IAAmBA,GAA+B,UAAbpL,KACvCwC,EAAIF,EAAeG,WACnBC,EAAIJ,EAAeK,UAEvB,CAEA,MAAO,CACLR,MAAOA,EACPE,OAAQA,EACRG,EAAGA,EAAIyH,GAAoBpM,GAC3B6E,EAAGA,EAEP,CDDwD2I,CAAgBxN,EAASmC,IAAa1B,EAAU4M,GAdxG,SAAoCrN,EAASmC,GAC3C,IAAIgL,EAAOpJ,EAAsB/D,GAAS,EAAoB,UAAbmC,GASjD,OARAgL,EAAK/K,IAAM+K,EAAK/K,IAAMpC,EAAQyN,UAC9BN,EAAK5O,KAAO4O,EAAK5O,KAAOyB,EAAQ0N,WAChCP,EAAK9O,OAAS8O,EAAK/K,IAAMpC,EAAQqI,aACjC8E,EAAK7O,MAAQ6O,EAAK5O,KAAOyB,EAAQsI,YACjC6E,EAAK7I,MAAQtE,EAAQsI,YACrB6E,EAAK3I,OAASxE,EAAQqI,aACtB8E,EAAKxI,EAAIwI,EAAK5O,KACd4O,EAAKtI,EAAIsI,EAAK/K,IACP+K,CACT,CAG0HQ,CAA2BN,EAAgBlL,GAAY+K,GEtBlK,SAAyBlN,GACtC,IAAI8M,EAEAQ,EAAOzH,EAAmB7F,GAC1B4N,EAAY7B,GAAgB/L,GAC5B2M,EAA0D,OAAlDG,EAAwB9M,EAAQO,oBAAyB,EAASuM,EAAsBH,KAChGrI,EAAQ,EAAIgJ,EAAKO,YAAaP,EAAKhF,YAAaqE,EAAOA,EAAKkB,YAAc,EAAGlB,EAAOA,EAAKrE,YAAc,GACvG9D,EAAS,EAAI8I,EAAKQ,aAAcR,EAAKjF,aAAcsE,EAAOA,EAAKmB,aAAe,EAAGnB,EAAOA,EAAKtE,aAAe,GAC5G1D,GAAKiJ,EAAU5B,WAAaI,GAAoBpM,GAChD6E,GAAK+I,EAAU1B,UAMnB,MAJiD,QAA7C,EAAiBS,GAAQW,GAAMS,YACjCpJ,GAAK,EAAI2I,EAAKhF,YAAaqE,EAAOA,EAAKrE,YAAc,GAAKhE,GAGrD,CACLA,MAAOA,EACPE,OAAQA,EACRG,EAAGA,EACHE,EAAGA,EAEP,CFCkMmJ,CAAgBnI,EAAmB7F,IACrO,CG1Be,SAASiO,GAAe9M,GACrC,IAOIkI,EAPAtK,EAAYoC,EAAKpC,UACjBiB,EAAUmB,EAAKnB,QACfb,EAAYgC,EAAKhC,UACjBqI,EAAgBrI,EAAYuD,EAAiBvD,GAAa,KAC1DiK,EAAYjK,EAAY4J,EAAa5J,GAAa,KAClD+O,EAAUnP,EAAU4F,EAAI5F,EAAUuF,MAAQ,EAAItE,EAAQsE,MAAQ,EAC9D6J,EAAUpP,EAAU8F,EAAI9F,EAAUyF,OAAS,EAAIxE,EAAQwE,OAAS,EAGpE,OAAQgD,GACN,KAAK,EACH6B,EAAU,CACR1E,EAAGuJ,EACHrJ,EAAG9F,EAAU8F,EAAI7E,EAAQwE,QAE3B,MAEF,KAAKnG,EACHgL,EAAU,CACR1E,EAAGuJ,EACHrJ,EAAG9F,EAAU8F,EAAI9F,EAAUyF,QAE7B,MAEF,KAAKlG,EACH+K,EAAU,CACR1E,EAAG5F,EAAU4F,EAAI5F,EAAUuF,MAC3BO,EAAGsJ,GAEL,MAEF,KAAK5P,EACH8K,EAAU,CACR1E,EAAG5F,EAAU4F,EAAI3E,EAAQsE,MACzBO,EAAGsJ,GAEL,MAEF,QACE9E,EAAU,CACR1E,EAAG5F,EAAU4F,EACbE,EAAG9F,EAAU8F,GAInB,IAAIuJ,EAAW5G,EAAgBV,EAAyBU,GAAiB,KAEzE,GAAgB,MAAZ4G,EAAkB,CACpB,IAAI1G,EAAmB,MAAb0G,EAAmB,SAAW,QAExC,OAAQhF,GACN,KAAK1K,EACH2K,EAAQ+E,GAAY/E,EAAQ+E,IAAarP,EAAU2I,GAAO,EAAI1H,EAAQ0H,GAAO,GAC7E,MAEF,KAAK/I,EACH0K,EAAQ+E,GAAY/E,EAAQ+E,IAAarP,EAAU2I,GAAO,EAAI1H,EAAQ0H,GAAO,GAKnF,CAEA,OAAO2B,CACT,CC3De,SAASgF,GAAejN,EAAOc,QAC5B,IAAZA,IACFA,EAAU,CAAC,GAGb,IAAIoM,EAAWpM,EACXqM,EAAqBD,EAASnP,UAC9BA,OAAmC,IAAvBoP,EAAgCnN,EAAMjC,UAAYoP,EAC9DC,EAAoBF,EAASnM,SAC7BA,OAAiC,IAAtBqM,EAA+BpN,EAAMe,SAAWqM,EAC3DC,EAAoBH,EAASI,SAC7BA,OAAiC,IAAtBD,EAA+B7P,EAAkB6P,EAC5DE,EAAwBL,EAASM,aACjCA,OAAyC,IAA1BD,EAAmC9P,EAAW8P,EAC7DE,EAAwBP,EAASQ,eACjCA,OAA2C,IAA1BD,EAAmC/P,EAAS+P,EAC7DE,EAAuBT,EAASU,YAChCA,OAAuC,IAAzBD,GAA0CA,EACxDE,EAAmBX,EAAS3G,QAC5BA,OAA+B,IAArBsH,EAA8B,EAAIA,EAC5ChI,EAAgBD,EAAsC,iBAAZW,EAAuBA,EAAUT,EAAgBS,EAASlJ,IACpGyQ,EAAaJ,IAAmBhQ,EAASC,EAAYD,EACrDqK,EAAa/H,EAAMwG,MAAM9I,OACzBkB,EAAUoB,EAAME,SAAS0N,EAAcE,EAAaJ,GACpDK,EJkBS,SAAyBnP,EAAS0O,EAAUE,EAAczM,GACvE,IAAIiN,EAAmC,oBAAbV,EAlB5B,SAA4B1O,GAC1B,IAAIpB,EAAkBgO,GAAkB5G,EAAchG,IAElDqP,EADoB,CAAC,WAAY,SAASzJ,QAAQ,EAAiB5F,GAASiC,WAAa,GACnDtB,EAAcX,GAAWoG,EAAgBpG,GAAWA,EAE9F,OAAKS,EAAU4O,GAKRzQ,EAAgBgI,QAAO,SAAUyG,GACtC,OAAO5M,EAAU4M,IAAmBpI,EAASoI,EAAgBgC,IAAmD,SAAhCtP,EAAYsN,EAC9F,IANS,EAOX,CAK6DiC,CAAmBtP,GAAW,GAAGZ,OAAOsP,GAC/F9P,EAAkB,GAAGQ,OAAOgQ,EAAqB,CAACR,IAClDW,EAAsB3Q,EAAgB,GACtC4Q,EAAe5Q,EAAgBK,QAAO,SAAUwQ,EAASpC,GAC3D,IAAIF,EAAOC,GAA2BpN,EAASqN,EAAgBlL,GAK/D,OAJAsN,EAAQrN,IAAM,EAAI+K,EAAK/K,IAAKqN,EAAQrN,KACpCqN,EAAQnR,MAAQ,EAAI6O,EAAK7O,MAAOmR,EAAQnR,OACxCmR,EAAQpR,OAAS,EAAI8O,EAAK9O,OAAQoR,EAAQpR,QAC1CoR,EAAQlR,KAAO,EAAI4O,EAAK5O,KAAMkR,EAAQlR,MAC/BkR,CACT,GAAGrC,GAA2BpN,EAASuP,EAAqBpN,IAK5D,OAJAqN,EAAalL,MAAQkL,EAAalR,MAAQkR,EAAajR,KACvDiR,EAAahL,OAASgL,EAAanR,OAASmR,EAAapN,IACzDoN,EAAa7K,EAAI6K,EAAajR,KAC9BiR,EAAa3K,EAAI2K,EAAapN,IACvBoN,CACT,CInC2BE,CAAgBjP,EAAUT,GAAWA,EAAUA,EAAQ2P,gBAAkB9J,EAAmBzE,EAAME,SAASxC,QAAS4P,EAAUE,EAAczM,GACjKyN,EAAsB7L,EAAsB3C,EAAME,SAASvC,WAC3DuI,EAAgB2G,GAAe,CACjClP,UAAW6Q,EACX5P,QAASmJ,EACThH,SAAU,WACVhD,UAAWA,IAET0Q,EAAmB3C,GAAiBzP,OAAOkE,OAAO,CAAC,EAAGwH,EAAY7B,IAClEwI,EAAoBhB,IAAmBhQ,EAAS+Q,EAAmBD,EAGnEG,EAAkB,CACpB3N,IAAK+M,EAAmB/M,IAAM0N,EAAkB1N,IAAM6E,EAAc7E,IACpE/D,OAAQyR,EAAkBzR,OAAS8Q,EAAmB9Q,OAAS4I,EAAc5I,OAC7EE,KAAM4Q,EAAmB5Q,KAAOuR,EAAkBvR,KAAO0I,EAAc1I,KACvED,MAAOwR,EAAkBxR,MAAQ6Q,EAAmB7Q,MAAQ2I,EAAc3I,OAExE0R,EAAa5O,EAAMmG,cAAckB,OAErC,GAAIqG,IAAmBhQ,GAAUkR,EAAY,CAC3C,IAAIvH,EAASuH,EAAW7Q,GACxB1B,OAAO4D,KAAK0O,GAAiBxO,SAAQ,SAAUhE,GAC7C,IAAI0S,EAAW,CAAC3R,EAAOD,GAAQuH,QAAQrI,IAAQ,EAAI,GAAK,EACpDkK,EAAO,CAAC,EAAKpJ,GAAQuH,QAAQrI,IAAQ,EAAI,IAAM,IACnDwS,EAAgBxS,IAAQkL,EAAOhB,GAAQwI,CACzC,GACF,CAEA,OAAOF,CACT,CCyEA,UACEhP,KAAM,OACNC,SAAS,EACTC,MAAO,OACPC,GA5HF,SAAcC,GACZ,IAAIC,EAAQD,EAAKC,MACbc,EAAUf,EAAKe,QACfnB,EAAOI,EAAKJ,KAEhB,IAAIK,EAAMmG,cAAcxG,GAAMmP,MAA9B,CAoCA,IAhCA,IAAIC,EAAoBjO,EAAQkM,SAC5BgC,OAAsC,IAAtBD,GAAsCA,EACtDE,EAAmBnO,EAAQoO,QAC3BC,OAAoC,IAArBF,GAAqCA,EACpDG,EAA8BtO,EAAQuO,mBACtC9I,EAAUzF,EAAQyF,QAClB+G,EAAWxM,EAAQwM,SACnBE,EAAe1M,EAAQ0M,aACvBI,EAAc9M,EAAQ8M,YACtB0B,EAAwBxO,EAAQyO,eAChCA,OAA2C,IAA1BD,GAA0CA,EAC3DE,EAAwB1O,EAAQ0O,sBAChCC,EAAqBzP,EAAMc,QAAQ/C,UACnCqI,EAAgB9E,EAAiBmO,GAEjCJ,EAAqBD,IADHhJ,IAAkBqJ,GACqCF,EAjC/E,SAAuCxR,GACrC,GAAIuD,EAAiBvD,KAAeX,EAClC,MAAO,GAGT,IAAIsS,EAAoBnF,GAAqBxM,GAC7C,MAAO,CAAC2M,GAA8B3M,GAAY2R,EAAmBhF,GAA8BgF,GACrG,CA0B6IC,CAA8BF,GAA3E,CAAClF,GAAqBkF,KAChHG,EAAa,CAACH,GAAoBzR,OAAOqR,GAAoBxR,QAAO,SAAUC,EAAKC,GACrF,OAAOD,EAAIE,OAAOsD,EAAiBvD,KAAeX,ECvCvC,SAA8B4C,EAAOc,QAClC,IAAZA,IACFA,EAAU,CAAC,GAGb,IAAIoM,EAAWpM,EACX/C,EAAYmP,EAASnP,UACrBuP,EAAWJ,EAASI,SACpBE,EAAeN,EAASM,aACxBjH,EAAU2G,EAAS3G,QACnBgJ,EAAiBrC,EAASqC,eAC1BM,EAAwB3C,EAASsC,sBACjCA,OAAkD,IAA1BK,EAAmC,EAAgBA,EAC3E7H,EAAYL,EAAa5J,GACzB6R,EAAa5H,EAAYuH,EAAiB3R,EAAsBA,EAAoB4H,QAAO,SAAUzH,GACvG,OAAO4J,EAAa5J,KAAeiK,CACrC,IAAK3K,EACDyS,EAAoBF,EAAWpK,QAAO,SAAUzH,GAClD,OAAOyR,EAAsBhL,QAAQzG,IAAc,CACrD,IAEiC,IAA7B+R,EAAkBC,SACpBD,EAAoBF,GAItB,IAAII,EAAYF,EAAkBjS,QAAO,SAAUC,EAAKC,GAOtD,OANAD,EAAIC,GAAakP,GAAejN,EAAO,CACrCjC,UAAWA,EACXuP,SAAUA,EACVE,aAAcA,EACdjH,QAASA,IACRjF,EAAiBvD,IACbD,CACT,GAAG,CAAC,GACJ,OAAOzB,OAAO4D,KAAK+P,GAAWC,MAAK,SAAUC,EAAGC,GAC9C,OAAOH,EAAUE,GAAKF,EAAUG,EAClC,GACF,CDC6DC,CAAqBpQ,EAAO,CACnFjC,UAAWA,EACXuP,SAAUA,EACVE,aAAcA,EACdjH,QAASA,EACTgJ,eAAgBA,EAChBC,sBAAuBA,IACpBzR,EACP,GAAG,IACCsS,EAAgBrQ,EAAMwG,MAAM7I,UAC5BoK,EAAa/H,EAAMwG,MAAM9I,OACzB4S,EAAY,IAAIC,IAChBC,GAAqB,EACrBC,EAAwBb,EAAW,GAE9Bc,EAAI,EAAGA,EAAId,EAAWG,OAAQW,IAAK,CAC1C,IAAI3S,EAAY6R,EAAWc,GAEvBC,EAAiBrP,EAAiBvD,GAElC6S,EAAmBjJ,EAAa5J,KAAeT,EAC/CuT,EAAa,CAAC,EAAK5T,GAAQuH,QAAQmM,IAAmB,EACtDrK,EAAMuK,EAAa,QAAU,SAC7B1F,EAAW8B,GAAejN,EAAO,CACnCjC,UAAWA,EACXuP,SAAUA,EACVE,aAAcA,EACdI,YAAaA,EACbrH,QAASA,IAEPuK,EAAoBD,EAAaD,EAAmB1T,EAAQC,EAAOyT,EAAmB3T,EAAS,EAE/FoT,EAAc/J,GAAOyB,EAAWzB,KAClCwK,EAAoBvG,GAAqBuG,IAG3C,IAAIC,EAAmBxG,GAAqBuG,GACxCE,EAAS,GAUb,GARIhC,GACFgC,EAAOC,KAAK9F,EAASwF,IAAmB,GAGtCxB,GACF6B,EAAOC,KAAK9F,EAAS2F,IAAsB,EAAG3F,EAAS4F,IAAqB,GAG1EC,EAAOE,OAAM,SAAUC,GACzB,OAAOA,CACT,IAAI,CACFV,EAAwB1S,EACxByS,GAAqB,EACrB,KACF,CAEAF,EAAUc,IAAIrT,EAAWiT,EAC3B,CAEA,GAAIR,EAqBF,IAnBA,IAEIa,EAAQ,SAAeC,GACzB,IAAIC,EAAmB3B,EAAW4B,MAAK,SAAUzT,GAC/C,IAAIiT,EAASV,EAAU9T,IAAIuB,GAE3B,GAAIiT,EACF,OAAOA,EAAOS,MAAM,EAAGH,GAAIJ,OAAM,SAAUC,GACzC,OAAOA,CACT,GAEJ,IAEA,GAAII,EAEF,OADAd,EAAwBc,EACjB,OAEX,EAESD,EAnBY/B,EAAiB,EAAI,EAmBZ+B,EAAK,GAGpB,UAFFD,EAAMC,GADmBA,KAOpCtR,EAAMjC,YAAc0S,IACtBzQ,EAAMmG,cAAcxG,GAAMmP,OAAQ,EAClC9O,EAAMjC,UAAY0S,EAClBzQ,EAAM0R,OAAQ,EA5GhB,CA8GF,EAQEhK,iBAAkB,CAAC,UACnBgC,KAAM,CACJoF,OAAO,IE7IX,SAAS6C,GAAexG,EAAUY,EAAM6F,GAQtC,YAPyB,IAArBA,IACFA,EAAmB,CACjBrO,EAAG,EACHE,EAAG,IAIA,CACLzC,IAAKmK,EAASnK,IAAM+K,EAAK3I,OAASwO,EAAiBnO,EACnDvG,MAAOiO,EAASjO,MAAQ6O,EAAK7I,MAAQ0O,EAAiBrO,EACtDtG,OAAQkO,EAASlO,OAAS8O,EAAK3I,OAASwO,EAAiBnO,EACzDtG,KAAMgO,EAAShO,KAAO4O,EAAK7I,MAAQ0O,EAAiBrO,EAExD,CAEA,SAASsO,GAAsB1G,GAC7B,MAAO,CAAC,EAAKjO,EAAOD,EAAQE,GAAM2U,MAAK,SAAUC,GAC/C,OAAO5G,EAAS4G,IAAS,CAC3B,GACF,CA+BA,UACEpS,KAAM,OACNC,SAAS,EACTC,MAAO,OACP6H,iBAAkB,CAAC,mBACnB5H,GAlCF,SAAcC,GACZ,IAAIC,EAAQD,EAAKC,MACbL,EAAOI,EAAKJ,KACZ0Q,EAAgBrQ,EAAMwG,MAAM7I,UAC5BoK,EAAa/H,EAAMwG,MAAM9I,OACzBkU,EAAmB5R,EAAMmG,cAAc6L,gBACvCC,EAAoBhF,GAAejN,EAAO,CAC5C0N,eAAgB,cAEdwE,EAAoBjF,GAAejN,EAAO,CAC5C4N,aAAa,IAEXuE,EAA2BR,GAAeM,EAAmB5B,GAC7D+B,EAAsBT,GAAeO,EAAmBnK,EAAY6J,GACpES,EAAoBR,GAAsBM,GAC1CG,EAAmBT,GAAsBO,GAC7CpS,EAAMmG,cAAcxG,GAAQ,CAC1BwS,yBAA0BA,EAC1BC,oBAAqBA,EACrBC,kBAAmBA,EACnBC,iBAAkBA,GAEpBtS,EAAMM,WAAW5C,OAASrB,OAAOkE,OAAO,CAAC,EAAGP,EAAMM,WAAW5C,OAAQ,CACnE,+BAAgC2U,EAChC,sBAAuBC,GAE3B,GCJA,IACE3S,KAAM,SACNC,SAAS,EACTC,MAAO,OACPwB,SAAU,CAAC,iBACXvB,GA5BF,SAAgBa,GACd,IAAIX,EAAQW,EAAMX,MACdc,EAAUH,EAAMG,QAChBnB,EAAOgB,EAAMhB,KACb4S,EAAkBzR,EAAQuG,OAC1BA,OAA6B,IAApBkL,EAA6B,CAAC,EAAG,GAAKA,EAC/C7I,EAAO,EAAW7L,QAAO,SAAUC,EAAKC,GAE1C,OADAD,EAAIC,GA5BD,SAAiCA,EAAWyI,EAAOa,GACxD,IAAIjB,EAAgB9E,EAAiBvD,GACjCyU,EAAiB,CAACrV,EAAM,GAAKqH,QAAQ4B,IAAkB,GAAK,EAAI,EAEhErG,EAAyB,mBAAXsH,EAAwBA,EAAOhL,OAAOkE,OAAO,CAAC,EAAGiG,EAAO,CACxEzI,UAAWA,KACPsJ,EACFoL,EAAW1S,EAAK,GAChB2S,EAAW3S,EAAK,GAIpB,OAFA0S,EAAWA,GAAY,EACvBC,GAAYA,GAAY,GAAKF,EACtB,CAACrV,EAAMD,GAAOsH,QAAQ4B,IAAkB,EAAI,CACjD7C,EAAGmP,EACHjP,EAAGgP,GACD,CACFlP,EAAGkP,EACHhP,EAAGiP,EAEP,CASqBC,CAAwB5U,EAAWiC,EAAMwG,MAAOa,GAC1DvJ,CACT,GAAG,CAAC,GACA8U,EAAwBlJ,EAAK1J,EAAMjC,WACnCwF,EAAIqP,EAAsBrP,EAC1BE,EAAImP,EAAsBnP,EAEW,MAArCzD,EAAMmG,cAAcD,gBACtBlG,EAAMmG,cAAcD,cAAc3C,GAAKA,EACvCvD,EAAMmG,cAAcD,cAAczC,GAAKA,GAGzCzD,EAAMmG,cAAcxG,GAAQ+J,CAC9B,GC1BA,IACE/J,KAAM,gBACNC,SAAS,EACTC,MAAO,OACPC,GApBF,SAAuBC,GACrB,IAAIC,EAAQD,EAAKC,MACbL,EAAOI,EAAKJ,KAKhBK,EAAMmG,cAAcxG,GAAQkN,GAAe,CACzClP,UAAWqC,EAAMwG,MAAM7I,UACvBiB,QAASoB,EAAMwG,MAAM9I,OACrBqD,SAAU,WACVhD,UAAWiC,EAAMjC,WAErB,EAQE2L,KAAM,CAAC,GCgHT,IACE/J,KAAM,kBACNC,SAAS,EACTC,MAAO,OACPC,GA/HF,SAAyBC,GACvB,IAAIC,EAAQD,EAAKC,MACbc,EAAUf,EAAKe,QACfnB,EAAOI,EAAKJ,KACZoP,EAAoBjO,EAAQkM,SAC5BgC,OAAsC,IAAtBD,GAAsCA,EACtDE,EAAmBnO,EAAQoO,QAC3BC,OAAoC,IAArBF,GAAsCA,EACrD3B,EAAWxM,EAAQwM,SACnBE,EAAe1M,EAAQ0M,aACvBI,EAAc9M,EAAQ8M,YACtBrH,EAAUzF,EAAQyF,QAClBsM,EAAkB/R,EAAQgS,OAC1BA,OAA6B,IAApBD,GAAoCA,EAC7CE,EAAwBjS,EAAQkS,aAChCA,OAAyC,IAA1BD,EAAmC,EAAIA,EACtD5H,EAAW8B,GAAejN,EAAO,CACnCsN,SAAUA,EACVE,aAAcA,EACdjH,QAASA,EACTqH,YAAaA,IAEXxH,EAAgB9E,EAAiBtB,EAAMjC,WACvCiK,EAAYL,EAAa3H,EAAMjC,WAC/BkV,GAAmBjL,EACnBgF,EAAWtH,EAAyBU,GACpC8I,ECrCY,MDqCSlC,ECrCH,IAAM,IDsCxB9G,EAAgBlG,EAAMmG,cAAcD,cACpCmK,EAAgBrQ,EAAMwG,MAAM7I,UAC5BoK,EAAa/H,EAAMwG,MAAM9I,OACzBwV,EAA4C,mBAAjBF,EAA8BA,EAAa3W,OAAOkE,OAAO,CAAC,EAAGP,EAAMwG,MAAO,CACvGzI,UAAWiC,EAAMjC,aACbiV,EACFG,EAA2D,iBAAtBD,EAAiC,CACxElG,SAAUkG,EACVhE,QAASgE,GACP7W,OAAOkE,OAAO,CAChByM,SAAU,EACVkC,QAAS,GACRgE,GACCE,EAAsBpT,EAAMmG,cAAckB,OAASrH,EAAMmG,cAAckB,OAAOrH,EAAMjC,WAAa,KACjG2L,EAAO,CACTnG,EAAG,EACHE,EAAG,GAGL,GAAKyC,EAAL,CAIA,GAAI8I,EAAe,CACjB,IAAIqE,EAEAC,EAAwB,MAAbtG,EAAmB,EAAM7P,EACpCoW,EAAuB,MAAbvG,EAAmB/P,EAASC,EACtCoJ,EAAmB,MAAb0G,EAAmB,SAAW,QACpC3F,EAASnB,EAAc8G,GACvBtL,EAAM2F,EAAS8D,EAASmI,GACxB7R,EAAM4F,EAAS8D,EAASoI,GACxBC,EAAWV,GAAU/K,EAAWzB,GAAO,EAAI,EAC3CmN,EAASzL,IAAc1K,EAAQ+S,EAAc/J,GAAOyB,EAAWzB,GAC/DoN,EAAS1L,IAAc1K,GAASyK,EAAWzB,IAAQ+J,EAAc/J,GAGjEL,EAAejG,EAAME,SAASgB,MAC9BwF,EAAYoM,GAAU7M,EAAetC,EAAcsC,GAAgB,CACrE/C,MAAO,EACPE,OAAQ,GAENuQ,GAAqB3T,EAAMmG,cAAc,oBAAsBnG,EAAMmG,cAAc,oBAAoBI,QxBhFtG,CACLvF,IAAK,EACL9D,MAAO,EACPD,OAAQ,EACRE,KAAM,GwB6EFyW,GAAkBD,GAAmBL,GACrCO,GAAkBF,GAAmBJ,GAMrCO,GAAWnO,EAAO,EAAG0K,EAAc/J,GAAMI,EAAUJ,IACnDyN,GAAYd,EAAkB5C,EAAc/J,GAAO,EAAIkN,EAAWM,GAAWF,GAAkBT,EAA4BnG,SAAWyG,EAASK,GAAWF,GAAkBT,EAA4BnG,SACxMgH,GAAYf,GAAmB5C,EAAc/J,GAAO,EAAIkN,EAAWM,GAAWD,GAAkBV,EAA4BnG,SAAW0G,EAASI,GAAWD,GAAkBV,EAA4BnG,SACzMjG,GAAoB/G,EAAME,SAASgB,OAAS8D,EAAgBhF,EAAME,SAASgB,OAC3E+S,GAAelN,GAAiC,MAAbiG,EAAmBjG,GAAkBsF,WAAa,EAAItF,GAAkBuF,YAAc,EAAI,EAC7H4H,GAAwH,OAAjGb,EAA+C,MAAvBD,OAA8B,EAASA,EAAoBpG,IAAqBqG,EAAwB,EAEvJc,GAAY9M,EAAS2M,GAAYE,GACjCE,GAAkBzO,EAAOmN,EAAS,EAAQpR,EAF9B2F,EAAS0M,GAAYG,GAAsBD,IAEKvS,EAAK2F,EAAQyL,EAAS,EAAQrR,EAAK0S,IAAa1S,GAChHyE,EAAc8G,GAAYoH,GAC1B1K,EAAKsD,GAAYoH,GAAkB/M,CACrC,CAEA,GAAI8H,EAAc,CAChB,IAAIkF,GAEAC,GAAyB,MAAbtH,EAAmB,EAAM7P,EAErCoX,GAAwB,MAAbvH,EAAmB/P,EAASC,EAEvCsX,GAAUtO,EAAcgJ,GAExBuF,GAAmB,MAAZvF,EAAkB,SAAW,QAEpCwF,GAAOF,GAAUrJ,EAASmJ,IAE1BK,GAAOH,GAAUrJ,EAASoJ,IAE1BK,IAAuD,IAAxC,CAAC,EAAKzX,GAAMqH,QAAQ4B,GAEnCyO,GAAyH,OAAjGR,GAAgD,MAAvBjB,OAA8B,EAASA,EAAoBlE,IAAoBmF,GAAyB,EAEzJS,GAAaF,GAAeF,GAAOF,GAAUnE,EAAcoE,IAAQ1M,EAAW0M,IAAQI,GAAuB1B,EAA4BjE,QAEzI6F,GAAaH,GAAeJ,GAAUnE,EAAcoE,IAAQ1M,EAAW0M,IAAQI,GAAuB1B,EAA4BjE,QAAUyF,GAE5IK,GAAmBlC,GAAU8B,G1BzH9B,SAAwBlT,EAAK1E,EAAOyE,GACzC,IAAIwT,EAAItP,EAAOjE,EAAK1E,EAAOyE,GAC3B,OAAOwT,EAAIxT,EAAMA,EAAMwT,CACzB,C0BsHoDC,CAAeJ,GAAYN,GAASO,IAAcpP,EAAOmN,EAASgC,GAAaJ,GAAMF,GAAS1B,EAASiC,GAAaJ,IAEpKzO,EAAcgJ,GAAW8F,GACzBtL,EAAKwF,GAAW8F,GAAmBR,EACrC,CAEAxU,EAAMmG,cAAcxG,GAAQ+J,CAvE5B,CAwEF,EAQEhC,iBAAkB,CAAC,WE1HN,SAASyN,GAAiBC,EAAyBrQ,EAAcsD,QAC9D,IAAZA,IACFA,GAAU,GAGZ,ICnBoCrJ,ECJOJ,EFuBvCyW,EAA0B9V,EAAcwF,GACxCuQ,EAAuB/V,EAAcwF,IAf3C,SAAyBnG,GACvB,IAAImN,EAAOnN,EAAQ+D,wBACfI,EAASpB,EAAMoK,EAAK7I,OAAStE,EAAQqE,aAAe,EACpDD,EAASrB,EAAMoK,EAAK3I,QAAUxE,EAAQuE,cAAgB,EAC1D,OAAkB,IAAXJ,GAA2B,IAAXC,CACzB,CAU4DuS,CAAgBxQ,GACtEJ,EAAkBF,EAAmBM,GACrCgH,EAAOpJ,EAAsByS,EAAyBE,EAAsBjN,GAC5EyB,EAAS,CACXc,WAAY,EACZE,UAAW,GAET7C,EAAU,CACZ1E,EAAG,EACHE,EAAG,GAkBL,OAfI4R,IAA4BA,IAA4BhN,MACxB,SAA9B1J,EAAYoG,IAChBkG,GAAetG,MACbmF,GCnCgC9K,EDmCT+F,KClCdhG,EAAUC,IAAUO,EAAcP,GCJxC,CACL4L,YAFyChM,EDQbI,GCNR4L,WACpBE,UAAWlM,EAAQkM,WDGZH,GAAgB3L,IDoCnBO,EAAcwF,KAChBkD,EAAUtF,EAAsBoC,GAAc,IACtCxB,GAAKwB,EAAauH,WAC1BrE,EAAQxE,GAAKsB,EAAasH,WACjB1H,IACTsD,EAAQ1E,EAAIyH,GAAoBrG,KAI7B,CACLpB,EAAGwI,EAAK5O,KAAO2M,EAAOc,WAAa3C,EAAQ1E,EAC3CE,EAAGsI,EAAK/K,IAAM8I,EAAOgB,UAAY7C,EAAQxE,EACzCP,MAAO6I,EAAK7I,MACZE,OAAQ2I,EAAK3I,OAEjB,CGvDA,SAASoS,GAAMC,GACb,IAAItT,EAAM,IAAIoO,IACVmF,EAAU,IAAIC,IACdC,EAAS,GAKb,SAAS3F,EAAK4F,GACZH,EAAQI,IAAID,EAASlW,MACN,GAAG3B,OAAO6X,EAASxU,UAAY,GAAIwU,EAASnO,kBAAoB,IACtEvH,SAAQ,SAAU4V,GACzB,IAAKL,EAAQM,IAAID,GAAM,CACrB,IAAIE,EAAc9T,EAAI3F,IAAIuZ,GAEtBE,GACFhG,EAAKgG,EAET,CACF,IACAL,EAAO3E,KAAK4E,EACd,CAQA,OAzBAJ,EAAUtV,SAAQ,SAAU0V,GAC1B1T,EAAIiP,IAAIyE,EAASlW,KAAMkW,EACzB,IAiBAJ,EAAUtV,SAAQ,SAAU0V,GACrBH,EAAQM,IAAIH,EAASlW,OAExBsQ,EAAK4F,EAET,IACOD,CACT,CCvBA,IAAIM,GAAkB,CACpBnY,UAAW,SACX0X,UAAW,GACX1U,SAAU,YAGZ,SAASoV,KACP,IAAK,IAAI1B,EAAO2B,UAAUrG,OAAQsG,EAAO,IAAIpU,MAAMwS,GAAO6B,EAAO,EAAGA,EAAO7B,EAAM6B,IAC/ED,EAAKC,GAAQF,UAAUE,GAGzB,OAAQD,EAAKvE,MAAK,SAAUlT,GAC1B,QAASA,GAAoD,mBAAlCA,EAAQ+D,sBACrC,GACF,CAEO,SAAS4T,GAAgBC,QACL,IAArBA,IACFA,EAAmB,CAAC,GAGtB,IAAIC,EAAoBD,EACpBE,EAAwBD,EAAkBE,iBAC1CA,OAA6C,IAA1BD,EAAmC,GAAKA,EAC3DE,EAAyBH,EAAkBI,eAC3CA,OAA4C,IAA3BD,EAAoCV,GAAkBU,EAC3E,OAAO,SAAsBjZ,EAAWD,EAAQoD,QAC9B,IAAZA,IACFA,EAAU+V,GAGZ,ICxC6B/W,EAC3BgX,EDuCE9W,EAAQ,CACVjC,UAAW,SACXgZ,iBAAkB,GAClBjW,QAASzE,OAAOkE,OAAO,CAAC,EAAG2V,GAAiBW,GAC5C1Q,cAAe,CAAC,EAChBjG,SAAU,CACRvC,UAAWA,EACXD,OAAQA,GAEV4C,WAAY,CAAC,EACbD,OAAQ,CAAC,GAEP2W,EAAmB,GACnBC,GAAc,EACdrN,EAAW,CACb5J,MAAOA,EACPkX,WAAY,SAAoBC,GAC9B,IAAIrW,EAAsC,mBAArBqW,EAAkCA,EAAiBnX,EAAMc,SAAWqW,EACzFC,IACApX,EAAMc,QAAUzE,OAAOkE,OAAO,CAAC,EAAGsW,EAAgB7W,EAAMc,QAASA,GACjEd,EAAMiK,cAAgB,CACpBtM,UAAW0B,EAAU1B,GAAa6N,GAAkB7N,GAAaA,EAAU4Q,eAAiB/C,GAAkB7N,EAAU4Q,gBAAkB,GAC1I7Q,OAAQ8N,GAAkB9N,IAI5B,IElE4B+X,EAC9B4B,EFiEMN,EDhCG,SAAwBtB,GAErC,IAAIsB,EAAmBvB,GAAMC,GAE7B,OAAO/W,EAAeb,QAAO,SAAUC,EAAK+B,GAC1C,OAAO/B,EAAIE,OAAO+Y,EAAiBvR,QAAO,SAAUqQ,GAClD,OAAOA,EAAShW,QAAUA,CAC5B,IACF,GAAG,GACL,CCuB+ByX,EElEK7B,EFkEsB,GAAGzX,OAAO2Y,EAAkB3W,EAAMc,QAAQ2U,WEjE9F4B,EAAS5B,EAAU5X,QAAO,SAAUwZ,EAAQE,GAC9C,IAAIC,EAAWH,EAAOE,EAAQ5X,MAK9B,OAJA0X,EAAOE,EAAQ5X,MAAQ6X,EAAWnb,OAAOkE,OAAO,CAAC,EAAGiX,EAAUD,EAAS,CACrEzW,QAASzE,OAAOkE,OAAO,CAAC,EAAGiX,EAAS1W,QAASyW,EAAQzW,SACrD4I,KAAMrN,OAAOkE,OAAO,CAAC,EAAGiX,EAAS9N,KAAM6N,EAAQ7N,QAC5C6N,EACEF,CACT,GAAG,CAAC,GAEGhb,OAAO4D,KAAKoX,GAAQlV,KAAI,SAAUhG,GACvC,OAAOkb,EAAOlb,EAChB,MF4DM,OAJA6D,EAAM+W,iBAAmBA,EAAiBvR,QAAO,SAAUiS,GACzD,OAAOA,EAAE7X,OACX,IA+FFI,EAAM+W,iBAAiB5W,SAAQ,SAAUJ,GACvC,IAAIJ,EAAOI,EAAKJ,KACZ+X,EAAe3X,EAAKe,QACpBA,OAA2B,IAAjB4W,EAA0B,CAAC,EAAIA,EACzChX,EAASX,EAAKW,OAElB,GAAsB,mBAAXA,EAAuB,CAChC,IAAIiX,EAAYjX,EAAO,CACrBV,MAAOA,EACPL,KAAMA,EACNiK,SAAUA,EACV9I,QAASA,IAKXkW,EAAiB/F,KAAK0G,GAFT,WAAmB,EAGlC,CACF,IA/GS/N,EAASQ,QAClB,EAMAwN,YAAa,WACX,IAAIX,EAAJ,CAIA,IAAIY,EAAkB7X,EAAME,SACxBvC,EAAYka,EAAgBla,UAC5BD,EAASma,EAAgBna,OAG7B,GAAKyY,GAAiBxY,EAAWD,GAAjC,CAKAsC,EAAMwG,MAAQ,CACZ7I,UAAWwX,GAAiBxX,EAAWqH,EAAgBtH,GAAoC,UAA3BsC,EAAMc,QAAQC,UAC9ErD,OAAQiG,EAAcjG,IAOxBsC,EAAM0R,OAAQ,EACd1R,EAAMjC,UAAYiC,EAAMc,QAAQ/C,UAKhCiC,EAAM+W,iBAAiB5W,SAAQ,SAAU0V,GACvC,OAAO7V,EAAMmG,cAAc0P,EAASlW,MAAQtD,OAAOkE,OAAO,CAAC,EAAGsV,EAASnM,KACzE,IAEA,IAAK,IAAIoO,EAAQ,EAAGA,EAAQ9X,EAAM+W,iBAAiBhH,OAAQ+H,IACzD,IAAoB,IAAhB9X,EAAM0R,MAAV,CAMA,IAAIqG,EAAwB/X,EAAM+W,iBAAiBe,GAC/ChY,EAAKiY,EAAsBjY,GAC3BkY,EAAyBD,EAAsBjX,QAC/CoM,OAAsC,IAA3B8K,EAAoC,CAAC,EAAIA,EACpDrY,EAAOoY,EAAsBpY,KAEf,mBAAPG,IACTE,EAAQF,EAAG,CACTE,MAAOA,EACPc,QAASoM,EACTvN,KAAMA,EACNiK,SAAUA,KACN5J,EAdR,MAHEA,EAAM0R,OAAQ,EACdoG,GAAS,CAzBb,CATA,CAqDF,EAGA1N,QC1I2BtK,ED0IV,WACf,OAAO,IAAImY,SAAQ,SAAUC,GAC3BtO,EAASgO,cACTM,EAAQlY,EACV,GACF,EC7IG,WAUL,OATK8W,IACHA,EAAU,IAAImB,SAAQ,SAAUC,GAC9BD,QAAQC,UAAUC,MAAK,WACrBrB,OAAUsB,EACVF,EAAQpY,IACV,GACF,KAGKgX,CACT,GDmIIuB,QAAS,WACPjB,IACAH,GAAc,CAChB,GAGF,IAAKd,GAAiBxY,EAAWD,GAC/B,OAAOkM,EAmCT,SAASwN,IACPJ,EAAiB7W,SAAQ,SAAUL,GACjC,OAAOA,GACT,IACAkX,EAAmB,EACrB,CAEA,OAvCApN,EAASsN,WAAWpW,GAASqX,MAAK,SAAUnY,IACrCiX,GAAenW,EAAQwX,eAC1BxX,EAAQwX,cAActY,EAE1B,IAmCO4J,CACT,CACF,CACO,IAAI2O,GAA4BhC,KGzLnC,GAA4BA,GAAgB,CAC9CI,iBAFqB,CAAC6B,GAAgB,GAAe,GAAe,EAAa,GAAQ,GAAM,GAAiB,EAAO,MCJrH,GAA4BjC,GAAgB,CAC9CI,iBAFqB,CAAC6B,GAAgB,GAAe,GAAe,KCatE,MAAMC,GAAa,IAAIlI,IACjBmI,GAAO,CACX,GAAAtH,CAAIxS,EAASzC,EAAKyN,GACX6O,GAAWzC,IAAIpX,IAClB6Z,GAAWrH,IAAIxS,EAAS,IAAI2R,KAE9B,MAAMoI,EAAcF,GAAWjc,IAAIoC,GAI9B+Z,EAAY3C,IAAI7Z,IAA6B,IAArBwc,EAAYC,KAKzCD,EAAYvH,IAAIjV,EAAKyN,GAHnBiP,QAAQC,MAAM,+EAA+E7W,MAAM8W,KAAKJ,EAAY1Y,QAAQ,MAIhI,EACAzD,IAAG,CAACoC,EAASzC,IACPsc,GAAWzC,IAAIpX,IACV6Z,GAAWjc,IAAIoC,GAASpC,IAAIL,IAE9B,KAET,MAAA6c,CAAOpa,EAASzC,GACd,IAAKsc,GAAWzC,IAAIpX,GAClB,OAEF,MAAM+Z,EAAcF,GAAWjc,IAAIoC,GACnC+Z,EAAYM,OAAO9c,GAGM,IAArBwc,EAAYC,MACdH,GAAWQ,OAAOra,EAEtB,GAYIsa,GAAiB,gBAOjBC,GAAgBC,IAChBA,GAAYna,OAAOoa,KAAOpa,OAAOoa,IAAIC,SAEvCF,EAAWA,EAAS5O,QAAQ,iBAAiB,CAAC+O,EAAOC,IAAO,IAAIH,IAAIC,OAAOE,QAEtEJ,GA4CHK,GAAuB7a,IAC3BA,EAAQ8a,cAAc,IAAIC,MAAMT,IAAgB,EAE5C,GAAYU,MACXA,GAA4B,iBAAXA,UAGO,IAAlBA,EAAOC,SAChBD,EAASA,EAAO,SAEgB,IAApBA,EAAOE,UAEjBC,GAAaH,GAEb,GAAUA,GACLA,EAAOC,OAASD,EAAO,GAAKA,EAEf,iBAAXA,GAAuBA,EAAO7J,OAAS,EACzCrL,SAAS+C,cAAc0R,GAAcS,IAEvC,KAEHI,GAAYpb,IAChB,IAAK,GAAUA,IAAgD,IAApCA,EAAQqb,iBAAiBlK,OAClD,OAAO,EAET,MAAMmK,EAAgF,YAA7D5V,iBAAiB1F,GAASub,iBAAiB,cAE9DC,EAAgBxb,EAAQyb,QAAQ,uBACtC,IAAKD,EACH,OAAOF,EAET,GAAIE,IAAkBxb,EAAS,CAC7B,MAAM0b,EAAU1b,EAAQyb,QAAQ,WAChC,GAAIC,GAAWA,EAAQlW,aAAegW,EACpC,OAAO,EAET,GAAgB,OAAZE,EACF,OAAO,CAEX,CACA,OAAOJ,CAAgB,EAEnBK,GAAa3b,IACZA,GAAWA,EAAQkb,WAAaU,KAAKC,gBAGtC7b,EAAQ8b,UAAU7W,SAAS,mBAGC,IAArBjF,EAAQ+b,SACV/b,EAAQ+b,SAEV/b,EAAQgc,aAAa,aAAoD,UAArChc,EAAQic,aAAa,aAE5DC,GAAiBlc,IACrB,IAAK8F,SAASC,gBAAgBoW,aAC5B,OAAO,KAIT,GAAmC,mBAAxBnc,EAAQqF,YAA4B,CAC7C,MAAM+W,EAAOpc,EAAQqF,cACrB,OAAO+W,aAAgBtb,WAAasb,EAAO,IAC7C,CACA,OAAIpc,aAAmBc,WACdd,EAIJA,EAAQwF,WAGN0W,GAAelc,EAAQwF,YAFrB,IAEgC,EAErC6W,GAAO,OAUPC,GAAStc,IACbA,EAAQuE,YAAY,EAEhBgY,GAAY,IACZlc,OAAOmc,SAAW1W,SAAS6G,KAAKqP,aAAa,qBACxC3b,OAAOmc,OAET,KAEHC,GAA4B,GAgB5BC,GAAQ,IAAuC,QAAjC5W,SAASC,gBAAgB4W,IACvCC,GAAqBC,IAhBAC,QAiBN,KACjB,MAAMC,EAAIR,KAEV,GAAIQ,EAAG,CACL,MAAMhc,EAAO8b,EAAOG,KACdC,EAAqBF,EAAE7b,GAAGH,GAChCgc,EAAE7b,GAAGH,GAAQ8b,EAAOK,gBACpBH,EAAE7b,GAAGH,GAAMoc,YAAcN,EACzBE,EAAE7b,GAAGH,GAAMqc,WAAa,KACtBL,EAAE7b,GAAGH,GAAQkc,EACNJ,EAAOK,gBAElB,GA5B0B,YAAxBpX,SAASuX,YAENZ,GAA0BtL,QAC7BrL,SAASyF,iBAAiB,oBAAoB,KAC5C,IAAK,MAAMuR,KAAYL,GACrBK,GACF,IAGJL,GAA0BpK,KAAKyK,IAE/BA,GAkBA,EAEEQ,GAAU,CAACC,EAAkB9F,EAAO,GAAI+F,EAAeD,IACxB,mBAArBA,EAAkCA,KAAoB9F,GAAQ+F,EAExEC,GAAyB,CAACX,EAAUY,EAAmBC,GAAoB,KAC/E,IAAKA,EAEH,YADAL,GAAQR,GAGV,MACMc,EA/JiC5d,KACvC,IAAKA,EACH,OAAO,EAIT,IAAI,mBACF6d,EAAkB,gBAClBC,GACEzd,OAAOqF,iBAAiB1F,GAC5B,MAAM+d,EAA0BC,OAAOC,WAAWJ,GAC5CK,EAAuBF,OAAOC,WAAWH,GAG/C,OAAKC,GAA4BG,GAKjCL,EAAqBA,EAAmBlb,MAAM,KAAK,GACnDmb,EAAkBA,EAAgBnb,MAAM,KAAK,GAtDf,KAuDtBqb,OAAOC,WAAWJ,GAAsBG,OAAOC,WAAWH,KANzD,CAMoG,EA0IpFK,CAAiCT,GADlC,EAExB,IAAIU,GAAS,EACb,MAAMC,EAAU,EACdrR,aAEIA,IAAW0Q,IAGfU,GAAS,EACTV,EAAkBjS,oBAAoB6O,GAAgB+D,GACtDf,GAAQR,GAAS,EAEnBY,EAAkBnS,iBAAiB+O,GAAgB+D,GACnDC,YAAW,KACJF,GACHvD,GAAqB6C,EACvB,GACCE,EAAiB,EAYhBW,GAAuB,CAAC1R,EAAM2R,EAAeC,EAAeC,KAChE,MAAMC,EAAa9R,EAAKsE,OACxB,IAAI+H,EAAQrM,EAAKjH,QAAQ4Y,GAIzB,OAAe,IAAXtF,GACMuF,GAAiBC,EAAiB7R,EAAK8R,EAAa,GAAK9R,EAAK,IAExEqM,GAASuF,EAAgB,GAAK,EAC1BC,IACFxF,GAASA,EAAQyF,GAAcA,GAE1B9R,EAAKjK,KAAKC,IAAI,EAAGD,KAAKE,IAAIoW,EAAOyF,EAAa,KAAI,EAerDC,GAAiB,qBACjBC,GAAiB,OACjBC,GAAgB,SAChBC,GAAgB,CAAC,EACvB,IAAIC,GAAW,EACf,MAAMC,GAAe,CACnBC,WAAY,YACZC,WAAY,YAERC,GAAe,IAAIrI,IAAI,CAAC,QAAS,WAAY,UAAW,YAAa,cAAe,aAAc,iBAAkB,YAAa,WAAY,YAAa,cAAe,YAAa,UAAW,WAAY,QAAS,oBAAqB,aAAc,YAAa,WAAY,cAAe,cAAe,cAAe,YAAa,eAAgB,gBAAiB,eAAgB,gBAAiB,aAAc,QAAS,OAAQ,SAAU,QAAS,SAAU,SAAU,UAAW,WAAY,OAAQ,SAAU,eAAgB,SAAU,OAAQ,mBAAoB,mBAAoB,QAAS,QAAS,WAM/lB,SAASsI,GAAarf,EAASsf,GAC7B,OAAOA,GAAO,GAAGA,MAAQN,QAAgBhf,EAAQgf,UAAYA,IAC/D,CACA,SAASO,GAAiBvf,GACxB,MAAMsf,EAAMD,GAAarf,GAGzB,OAFAA,EAAQgf,SAAWM,EACnBP,GAAcO,GAAOP,GAAcO,IAAQ,CAAC,EACrCP,GAAcO,EACvB,CAiCA,SAASE,GAAYC,EAAQC,EAAUC,EAAqB,MAC1D,OAAOliB,OAAOmiB,OAAOH,GAAQ7M,MAAKiN,GAASA,EAAMH,WAAaA,GAAYG,EAAMF,qBAAuBA,GACzG,CACA,SAASG,GAAoBC,EAAmB1B,EAAS2B,GACvD,MAAMC,EAAiC,iBAAZ5B,EAErBqB,EAAWO,EAAcD,EAAqB3B,GAAW2B,EAC/D,IAAIE,EAAYC,GAAaJ,GAI7B,OAHKX,GAAahI,IAAI8I,KACpBA,EAAYH,GAEP,CAACE,EAAaP,EAAUQ,EACjC,CACA,SAASE,GAAWpgB,EAAS+f,EAAmB1B,EAAS2B,EAAoBK,GAC3E,GAAiC,iBAAtBN,IAAmC/f,EAC5C,OAEF,IAAKigB,EAAaP,EAAUQ,GAAaJ,GAAoBC,EAAmB1B,EAAS2B,GAIzF,GAAID,KAAqBd,GAAc,CACrC,MAAMqB,EAAepf,GACZ,SAAU2e,GACf,IAAKA,EAAMU,eAAiBV,EAAMU,gBAAkBV,EAAMW,iBAAmBX,EAAMW,eAAevb,SAAS4a,EAAMU,eAC/G,OAAOrf,EAAGjD,KAAKwiB,KAAMZ,EAEzB,EAEFH,EAAWY,EAAaZ,EAC1B,CACA,MAAMD,EAASF,GAAiBvf,GAC1B0gB,EAAWjB,EAAOS,KAAeT,EAAOS,GAAa,CAAC,GACtDS,EAAmBnB,GAAYkB,EAAUhB,EAAUO,EAAc5B,EAAU,MACjF,GAAIsC,EAEF,YADAA,EAAiBN,OAASM,EAAiBN,QAAUA,GAGvD,MAAMf,EAAMD,GAAaK,EAAUK,EAAkBnU,QAAQgT,GAAgB,KACvE1d,EAAK+e,EA5Db,SAAoCjgB,EAASwa,EAAUtZ,GACrD,OAAO,SAASmd,EAAQwB,GACtB,MAAMe,EAAc5gB,EAAQ6gB,iBAAiBrG,GAC7C,IAAK,IAAI,OACPxN,GACE6S,EAAO7S,GAAUA,IAAWyT,KAAMzT,EAASA,EAAOxH,WACpD,IAAK,MAAMsb,KAAcF,EACvB,GAAIE,IAAe9T,EASnB,OANA+T,GAAWlB,EAAO,CAChBW,eAAgBxT,IAEdqR,EAAQgC,QACVW,GAAaC,IAAIjhB,EAAS6f,EAAMqB,KAAM1G,EAAUtZ,GAE3CA,EAAGigB,MAAMnU,EAAQ,CAAC6S,GAG/B,CACF,CAwC2BuB,CAA2BphB,EAASqe,EAASqB,GAvExE,SAA0B1f,EAASkB,GACjC,OAAO,SAASmd,EAAQwB,GAOtB,OANAkB,GAAWlB,EAAO,CAChBW,eAAgBxgB,IAEdqe,EAAQgC,QACVW,GAAaC,IAAIjhB,EAAS6f,EAAMqB,KAAMhgB,GAEjCA,EAAGigB,MAAMnhB,EAAS,CAAC6f,GAC5B,CACF,CA6DoFwB,CAAiBrhB,EAAS0f,GAC5Gxe,EAAGye,mBAAqBM,EAAc5B,EAAU,KAChDnd,EAAGwe,SAAWA,EACdxe,EAAGmf,OAASA,EACZnf,EAAG8d,SAAWM,EACdoB,EAASpB,GAAOpe,EAChBlB,EAAQuL,iBAAiB2U,EAAWhf,EAAI+e,EAC1C,CACA,SAASqB,GAActhB,EAASyf,EAAQS,EAAW7B,EAASsB,GAC1D,MAAMze,EAAKse,GAAYC,EAAOS,GAAY7B,EAASsB,GAC9Cze,IAGLlB,EAAQyL,oBAAoByU,EAAWhf,EAAIqgB,QAAQ5B,WAC5CF,EAAOS,GAAWhf,EAAG8d,UAC9B,CACA,SAASwC,GAAyBxhB,EAASyf,EAAQS,EAAWuB,GAC5D,MAAMC,EAAoBjC,EAAOS,IAAc,CAAC,EAChD,IAAK,MAAOyB,EAAY9B,KAAUpiB,OAAOmkB,QAAQF,GAC3CC,EAAWE,SAASJ,IACtBH,GAActhB,EAASyf,EAAQS,EAAWL,EAAMH,SAAUG,EAAMF,mBAGtE,CACA,SAASQ,GAAaN,GAGpB,OADAA,EAAQA,EAAMjU,QAAQiT,GAAgB,IAC/BI,GAAaY,IAAUA,CAChC,CACA,MAAMmB,GAAe,CACnB,EAAAc,CAAG9hB,EAAS6f,EAAOxB,EAAS2B,GAC1BI,GAAWpgB,EAAS6f,EAAOxB,EAAS2B,GAAoB,EAC1D,EACA,GAAA+B,CAAI/hB,EAAS6f,EAAOxB,EAAS2B,GAC3BI,GAAWpgB,EAAS6f,EAAOxB,EAAS2B,GAAoB,EAC1D,EACA,GAAAiB,CAAIjhB,EAAS+f,EAAmB1B,EAAS2B,GACvC,GAAiC,iBAAtBD,IAAmC/f,EAC5C,OAEF,MAAOigB,EAAaP,EAAUQ,GAAaJ,GAAoBC,EAAmB1B,EAAS2B,GACrFgC,EAAc9B,IAAcH,EAC5BN,EAASF,GAAiBvf,GAC1B0hB,EAAoBjC,EAAOS,IAAc,CAAC,EAC1C+B,EAAclC,EAAkBmC,WAAW,KACjD,QAAwB,IAAbxC,EAAX,CAQA,GAAIuC,EACF,IAAK,MAAME,KAAgB1kB,OAAO4D,KAAKoe,GACrC+B,GAAyBxhB,EAASyf,EAAQ0C,EAAcpC,EAAkBlN,MAAM,IAGpF,IAAK,MAAOuP,EAAavC,KAAUpiB,OAAOmkB,QAAQF,GAAoB,CACpE,MAAMC,EAAaS,EAAYxW,QAAQkT,GAAe,IACjDkD,IAAejC,EAAkB8B,SAASF,IAC7CL,GAActhB,EAASyf,EAAQS,EAAWL,EAAMH,SAAUG,EAAMF,mBAEpE,CAXA,KAPA,CAEE,IAAKliB,OAAO4D,KAAKqgB,GAAmBvQ,OAClC,OAEFmQ,GAActhB,EAASyf,EAAQS,EAAWR,EAAUO,EAAc5B,EAAU,KAE9E,CAYF,EACA,OAAAgE,CAAQriB,EAAS6f,EAAOpI,GACtB,GAAqB,iBAAVoI,IAAuB7f,EAChC,OAAO,KAET,MAAM+c,EAAIR,KAGV,IAAI+F,EAAc,KACdC,GAAU,EACVC,GAAiB,EACjBC,GAAmB,EAJH5C,IADFM,GAAaN,IAMZ9C,IACjBuF,EAAcvF,EAAEhC,MAAM8E,EAAOpI,GAC7BsF,EAAE/c,GAASqiB,QAAQC,GACnBC,GAAWD,EAAYI,uBACvBF,GAAkBF,EAAYK,gCAC9BF,EAAmBH,EAAYM,sBAEjC,MAAMC,EAAM9B,GAAW,IAAIhG,MAAM8E,EAAO,CACtC0C,UACAO,YAAY,IACVrL,GAUJ,OATIgL,GACFI,EAAIE,iBAEFP,GACFxiB,EAAQ8a,cAAc+H,GAEpBA,EAAIJ,kBAAoBH,GAC1BA,EAAYS,iBAEPF,CACT,GAEF,SAAS9B,GAAWljB,EAAKmlB,EAAO,CAAC,GAC/B,IAAK,MAAOzlB,EAAKa,KAAUX,OAAOmkB,QAAQoB,GACxC,IACEnlB,EAAIN,GAAOa,CACb,CAAE,MAAO6kB,GACPxlB,OAAOC,eAAeG,EAAKN,EAAK,CAC9B2lB,cAAc,EACdtlB,IAAG,IACMQ,GAGb,CAEF,OAAOP,CACT,CASA,SAASslB,GAAc/kB,GACrB,GAAc,SAAVA,EACF,OAAO,EAET,GAAc,UAAVA,EACF,OAAO,EAET,GAAIA,IAAU4f,OAAO5f,GAAOkC,WAC1B,OAAO0d,OAAO5f,GAEhB,GAAc,KAAVA,GAA0B,SAAVA,EAClB,OAAO,KAET,GAAqB,iBAAVA,EACT,OAAOA,EAET,IACE,OAAOglB,KAAKC,MAAMC,mBAAmBllB,GACvC,CAAE,MAAO6kB,GACP,OAAO7kB,CACT,CACF,CACA,SAASmlB,GAAiBhmB,GACxB,OAAOA,EAAIqO,QAAQ,UAAU4X,GAAO,IAAIA,EAAItjB,iBAC9C,CACA,MAAMujB,GAAc,CAClB,gBAAAC,CAAiB1jB,EAASzC,EAAKa,GAC7B4B,EAAQ6B,aAAa,WAAW0hB,GAAiBhmB,KAAQa,EAC3D,EACA,mBAAAulB,CAAoB3jB,EAASzC,GAC3ByC,EAAQ4B,gBAAgB,WAAW2hB,GAAiBhmB,KACtD,EACA,iBAAAqmB,CAAkB5jB,GAChB,IAAKA,EACH,MAAO,CAAC,EAEV,MAAM0B,EAAa,CAAC,EACdmiB,EAASpmB,OAAO4D,KAAKrB,EAAQ8jB,SAASld,QAAOrJ,GAAOA,EAAI2kB,WAAW,QAAU3kB,EAAI2kB,WAAW,cAClG,IAAK,MAAM3kB,KAAOsmB,EAAQ,CACxB,IAAIE,EAAUxmB,EAAIqO,QAAQ,MAAO,IACjCmY,EAAUA,EAAQC,OAAO,GAAG9jB,cAAgB6jB,EAAQlR,MAAM,EAAGkR,EAAQ5S,QACrEzP,EAAWqiB,GAAWZ,GAAcnjB,EAAQ8jB,QAAQvmB,GACtD,CACA,OAAOmE,CACT,EACAuiB,iBAAgB,CAACjkB,EAASzC,IACjB4lB,GAAcnjB,EAAQic,aAAa,WAAWsH,GAAiBhmB,QAgB1E,MAAM2mB,GAEJ,kBAAWC,GACT,MAAO,CAAC,CACV,CACA,sBAAWC,GACT,MAAO,CAAC,CACV,CACA,eAAWpH,GACT,MAAM,IAAIqH,MAAM,sEAClB,CACA,UAAAC,CAAWC,GAIT,OAHAA,EAAS9D,KAAK+D,gBAAgBD,GAC9BA,EAAS9D,KAAKgE,kBAAkBF,GAChC9D,KAAKiE,iBAAiBH,GACfA,CACT,CACA,iBAAAE,CAAkBF,GAChB,OAAOA,CACT,CACA,eAAAC,CAAgBD,EAAQvkB,GACtB,MAAM2kB,EAAa,GAAU3kB,GAAWyjB,GAAYQ,iBAAiBjkB,EAAS,UAAY,CAAC,EAE3F,MAAO,IACFygB,KAAKmE,YAAYT,WACM,iBAAfQ,EAA0BA,EAAa,CAAC,KAC/C,GAAU3kB,GAAWyjB,GAAYG,kBAAkB5jB,GAAW,CAAC,KAC7C,iBAAXukB,EAAsBA,EAAS,CAAC,EAE/C,CACA,gBAAAG,CAAiBH,EAAQM,EAAcpE,KAAKmE,YAAYR,aACtD,IAAK,MAAO7hB,EAAUuiB,KAAkBrnB,OAAOmkB,QAAQiD,GAAc,CACnE,MAAMzmB,EAAQmmB,EAAOhiB,GACfwiB,EAAY,GAAU3mB,GAAS,UAhiBrC4c,OADSA,EAiiB+C5c,GA/hBnD,GAAG4c,IAELvd,OAAOM,UAAUuC,SAASrC,KAAK+c,GAAQL,MAAM,eAAe,GAAGza,cA8hBlE,IAAK,IAAI8kB,OAAOF,GAAehhB,KAAKihB,GAClC,MAAM,IAAIE,UAAU,GAAGxE,KAAKmE,YAAY5H,KAAKkI,0BAA0B3iB,qBAA4BwiB,yBAAiCD,MAExI,CAriBW9J,KAsiBb,EAqBF,MAAMmK,WAAsBjB,GAC1B,WAAAU,CAAY5kB,EAASukB,GACnBa,SACAplB,EAAUmb,GAAWnb,MAIrBygB,KAAK4E,SAAWrlB,EAChBygB,KAAK6E,QAAU7E,KAAK6D,WAAWC,GAC/BzK,GAAKtH,IAAIiO,KAAK4E,SAAU5E,KAAKmE,YAAYW,SAAU9E,MACrD,CAGA,OAAA+E,GACE1L,GAAKM,OAAOqG,KAAK4E,SAAU5E,KAAKmE,YAAYW,UAC5CvE,GAAaC,IAAIR,KAAK4E,SAAU5E,KAAKmE,YAAYa,WACjD,IAAK,MAAMC,KAAgBjoB,OAAOkoB,oBAAoBlF,MACpDA,KAAKiF,GAAgB,IAEzB,CACA,cAAAE,CAAe9I,EAAU9c,EAAS6lB,GAAa,GAC7CpI,GAAuBX,EAAU9c,EAAS6lB,EAC5C,CACA,UAAAvB,CAAWC,GAIT,OAHAA,EAAS9D,KAAK+D,gBAAgBD,EAAQ9D,KAAK4E,UAC3Cd,EAAS9D,KAAKgE,kBAAkBF,GAChC9D,KAAKiE,iBAAiBH,GACfA,CACT,CAGA,kBAAOuB,CAAY9lB,GACjB,OAAO8Z,GAAKlc,IAAIud,GAAWnb,GAAUygB,KAAK8E,SAC5C,CACA,0BAAOQ,CAAoB/lB,EAASukB,EAAS,CAAC,GAC5C,OAAO9D,KAAKqF,YAAY9lB,IAAY,IAAIygB,KAAKzgB,EAA2B,iBAAXukB,EAAsBA,EAAS,KAC9F,CACA,kBAAWyB,GACT,MA5CY,OA6Cd,CACA,mBAAWT,GACT,MAAO,MAAM9E,KAAKzD,MACpB,CACA,oBAAWyI,GACT,MAAO,IAAIhF,KAAK8E,UAClB,CACA,gBAAOU,CAAUllB,GACf,MAAO,GAAGA,IAAO0f,KAAKgF,WACxB,EAUF,MAAMS,GAAclmB,IAClB,IAAIwa,EAAWxa,EAAQic,aAAa,kBACpC,IAAKzB,GAAyB,MAAbA,EAAkB,CACjC,IAAI2L,EAAgBnmB,EAAQic,aAAa,QAMzC,IAAKkK,IAAkBA,EAActE,SAAS,OAASsE,EAAcjE,WAAW,KAC9E,OAAO,KAILiE,EAActE,SAAS,OAASsE,EAAcjE,WAAW,OAC3DiE,EAAgB,IAAIA,EAAcxjB,MAAM,KAAK,MAE/C6X,EAAW2L,GAAmC,MAAlBA,EAAwBA,EAAcC,OAAS,IAC7E,CACA,OAAO5L,EAAWA,EAAS7X,MAAM,KAAKY,KAAI8iB,GAAO9L,GAAc8L,KAAM1iB,KAAK,KAAO,IAAI,EAEjF2iB,GAAiB,CACrB1T,KAAI,CAAC4H,EAAUxa,EAAU8F,SAASC,kBACzB,GAAG3G,UAAUsB,QAAQ3C,UAAU8iB,iBAAiB5iB,KAAK+B,EAASwa,IAEvE+L,QAAO,CAAC/L,EAAUxa,EAAU8F,SAASC,kBAC5BrF,QAAQ3C,UAAU8K,cAAc5K,KAAK+B,EAASwa,GAEvDgM,SAAQ,CAACxmB,EAASwa,IACT,GAAGpb,UAAUY,EAAQwmB,UAAU5f,QAAOzB,GAASA,EAAMshB,QAAQjM,KAEtE,OAAAkM,CAAQ1mB,EAASwa,GACf,MAAMkM,EAAU,GAChB,IAAIC,EAAW3mB,EAAQwF,WAAWiW,QAAQjB,GAC1C,KAAOmM,GACLD,EAAQrU,KAAKsU,GACbA,EAAWA,EAASnhB,WAAWiW,QAAQjB,GAEzC,OAAOkM,CACT,EACA,IAAAE,CAAK5mB,EAASwa,GACZ,IAAIqM,EAAW7mB,EAAQ8mB,uBACvB,KAAOD,GAAU,CACf,GAAIA,EAASJ,QAAQjM,GACnB,MAAO,CAACqM,GAEVA,EAAWA,EAASC,sBACtB,CACA,MAAO,EACT,EAEA,IAAAxhB,CAAKtF,EAASwa,GACZ,IAAIlV,EAAOtF,EAAQ+mB,mBACnB,KAAOzhB,GAAM,CACX,GAAIA,EAAKmhB,QAAQjM,GACf,MAAO,CAAClV,GAEVA,EAAOA,EAAKyhB,kBACd,CACA,MAAO,EACT,EACA,iBAAAC,CAAkBhnB,GAChB,MAAMinB,EAAa,CAAC,IAAK,SAAU,QAAS,WAAY,SAAU,UAAW,aAAc,4BAA4B1jB,KAAIiX,GAAY,GAAGA,2BAAiC7W,KAAK,KAChL,OAAO8c,KAAK7N,KAAKqU,EAAYjnB,GAAS4G,QAAOsgB,IAAOvL,GAAWuL,IAAO9L,GAAU8L,IAClF,EACA,sBAAAC,CAAuBnnB,GACrB,MAAMwa,EAAW0L,GAAYlmB,GAC7B,OAAIwa,GACK8L,GAAeC,QAAQ/L,GAAYA,EAErC,IACT,EACA,sBAAA4M,CAAuBpnB,GACrB,MAAMwa,EAAW0L,GAAYlmB,GAC7B,OAAOwa,EAAW8L,GAAeC,QAAQ/L,GAAY,IACvD,EACA,+BAAA6M,CAAgCrnB,GAC9B,MAAMwa,EAAW0L,GAAYlmB,GAC7B,OAAOwa,EAAW8L,GAAe1T,KAAK4H,GAAY,EACpD,GAUI8M,GAAuB,CAACC,EAAWC,EAAS,UAChD,MAAMC,EAAa,gBAAgBF,EAAU9B,YACvC1kB,EAAOwmB,EAAUvK,KACvBgE,GAAac,GAAGhc,SAAU2hB,EAAY,qBAAqB1mB,OAAU,SAAU8e,GAI7E,GAHI,CAAC,IAAK,QAAQgC,SAASpB,KAAKiH,UAC9B7H,EAAMkD,iBAEJpH,GAAW8E,MACb,OAEF,MAAMzT,EAASsZ,GAAec,uBAAuB3G,OAASA,KAAKhF,QAAQ,IAAI1a,KAC9DwmB,EAAUxB,oBAAoB/Y,GAGtCwa,IACX,GAAE,EAiBEG,GAAc,YACdC,GAAc,QAAQD,KACtBE,GAAe,SAASF,KAQ9B,MAAMG,WAAc3C,GAElB,eAAWnI,GACT,MAfW,OAgBb,CAGA,KAAA+K,GAEE,GADmB/G,GAAaqB,QAAQ5B,KAAK4E,SAAUuC,IACxCnF,iBACb,OAEFhC,KAAK4E,SAASvJ,UAAU1B,OAlBF,QAmBtB,MAAMyL,EAAapF,KAAK4E,SAASvJ,UAAU7W,SApBrB,QAqBtBwb,KAAKmF,gBAAe,IAAMnF,KAAKuH,mBAAmBvH,KAAK4E,SAAUQ,EACnE,CAGA,eAAAmC,GACEvH,KAAK4E,SAASjL,SACd4G,GAAaqB,QAAQ5B,KAAK4E,SAAUwC,IACpCpH,KAAK+E,SACP,CAGA,sBAAOtI,CAAgBqH,GACrB,OAAO9D,KAAKwH,MAAK,WACf,MAAMnd,EAAOgd,GAAM/B,oBAAoBtF,MACvC,GAAsB,iBAAX8D,EAAX,CAGA,QAAqB/K,IAAjB1O,EAAKyZ,IAAyBA,EAAOrC,WAAW,MAAmB,gBAAXqC,EAC1D,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,GAAQ9D,KAJb,CAKF,GACF,EAOF6G,GAAqBQ,GAAO,SAM5BlL,GAAmBkL,IAcnB,MAKMI,GAAyB,4BAO/B,MAAMC,WAAehD,GAEnB,eAAWnI,GACT,MAfW,QAgBb,CAGA,MAAAoL,GAEE3H,KAAK4E,SAASxjB,aAAa,eAAgB4e,KAAK4E,SAASvJ,UAAUsM,OAjB3C,UAkB1B,CAGA,sBAAOlL,CAAgBqH,GACrB,OAAO9D,KAAKwH,MAAK,WACf,MAAMnd,EAAOqd,GAAOpC,oBAAoBtF,MACzB,WAAX8D,GACFzZ,EAAKyZ,IAET,GACF,EAOFvD,GAAac,GAAGhc,SAjCe,2BAiCmBoiB,IAAwBrI,IACxEA,EAAMkD,iBACN,MAAMsF,EAASxI,EAAM7S,OAAOyO,QAAQyM,IACvBC,GAAOpC,oBAAoBsC,GACnCD,QAAQ,IAOfxL,GAAmBuL,IAcnB,MACMG,GAAc,YACdC,GAAmB,aAAaD,KAChCE,GAAkB,YAAYF,KAC9BG,GAAiB,WAAWH,KAC5BI,GAAoB,cAAcJ,KAClCK,GAAkB,YAAYL,KAK9BM,GAAY,CAChBC,YAAa,KACbC,aAAc,KACdC,cAAe,MAEXC,GAAgB,CACpBH,YAAa,kBACbC,aAAc,kBACdC,cAAe,mBAOjB,MAAME,WAAc/E,GAClB,WAAAU,CAAY5kB,EAASukB,GACnBa,QACA3E,KAAK4E,SAAWrlB,EACXA,GAAYipB,GAAMC,gBAGvBzI,KAAK6E,QAAU7E,KAAK6D,WAAWC,GAC/B9D,KAAK0I,QAAU,EACf1I,KAAK2I,sBAAwB7H,QAAQlhB,OAAOgpB,cAC5C5I,KAAK6I,cACP,CAGA,kBAAWnF,GACT,OAAOyE,EACT,CACA,sBAAWxE,GACT,OAAO4E,EACT,CACA,eAAWhM,GACT,MA/CW,OAgDb,CAGA,OAAAwI,GACExE,GAAaC,IAAIR,KAAK4E,SAAUiD,GAClC,CAGA,MAAAiB,CAAO1J,GACAY,KAAK2I,sBAIN3I,KAAK+I,wBAAwB3J,KAC/BY,KAAK0I,QAAUtJ,EAAM4J,SAJrBhJ,KAAK0I,QAAUtJ,EAAM6J,QAAQ,GAAGD,OAMpC,CACA,IAAAE,CAAK9J,GACCY,KAAK+I,wBAAwB3J,KAC/BY,KAAK0I,QAAUtJ,EAAM4J,QAAUhJ,KAAK0I,SAEtC1I,KAAKmJ,eACLtM,GAAQmD,KAAK6E,QAAQuD,YACvB,CACA,KAAAgB,CAAMhK,GACJY,KAAK0I,QAAUtJ,EAAM6J,SAAW7J,EAAM6J,QAAQvY,OAAS,EAAI,EAAI0O,EAAM6J,QAAQ,GAAGD,QAAUhJ,KAAK0I,OACjG,CACA,YAAAS,GACE,MAAME,EAAYlnB,KAAKoC,IAAIyb,KAAK0I,SAChC,GAAIW,GAnEgB,GAoElB,OAEF,MAAM/b,EAAY+b,EAAYrJ,KAAK0I,QACnC1I,KAAK0I,QAAU,EACVpb,GAGLuP,GAAQvP,EAAY,EAAI0S,KAAK6E,QAAQyD,cAAgBtI,KAAK6E,QAAQwD,aACpE,CACA,WAAAQ,GACM7I,KAAK2I,uBACPpI,GAAac,GAAGrB,KAAK4E,SAAUqD,IAAmB7I,GAASY,KAAK8I,OAAO1J,KACvEmB,GAAac,GAAGrB,KAAK4E,SAAUsD,IAAiB9I,GAASY,KAAKkJ,KAAK9J,KACnEY,KAAK4E,SAASvJ,UAAU5E,IAlFG,mBAoF3B8J,GAAac,GAAGrB,KAAK4E,SAAUkD,IAAkB1I,GAASY,KAAK8I,OAAO1J,KACtEmB,GAAac,GAAGrB,KAAK4E,SAAUmD,IAAiB3I,GAASY,KAAKoJ,MAAMhK,KACpEmB,GAAac,GAAGrB,KAAK4E,SAAUoD,IAAgB5I,GAASY,KAAKkJ,KAAK9J,KAEtE,CACA,uBAAA2J,CAAwB3J,GACtB,OAAOY,KAAK2I,wBA3FS,QA2FiBvJ,EAAMkK,aA5FrB,UA4FyDlK,EAAMkK,YACxF,CAGA,kBAAOb,GACL,MAAO,iBAAkBpjB,SAASC,iBAAmB7C,UAAU8mB,eAAiB,CAClF,EAeF,MAEMC,GAAc,eACdC,GAAiB,YACjBC,GAAmB,YACnBC,GAAoB,aAGpBC,GAAa,OACbC,GAAa,OACbC,GAAiB,OACjBC,GAAkB,QAClBC,GAAc,QAAQR,KACtBS,GAAa,OAAOT,KACpBU,GAAkB,UAAUV,KAC5BW,GAAqB,aAAaX,KAClCY,GAAqB,aAAaZ,KAClCa,GAAmB,YAAYb,KAC/Bc,GAAwB,OAAOd,KAAcC,KAC7Cc,GAAyB,QAAQf,KAAcC,KAC/Ce,GAAsB,WACtBC,GAAsB,SAMtBC,GAAkB,UAClBC,GAAgB,iBAChBC,GAAuBF,GAAkBC,GAKzCE,GAAmB,CACvB,CAACnB,IAAmBK,GACpB,CAACJ,IAAoBG,IAEjBgB,GAAY,CAChBC,SAAU,IACVC,UAAU,EACVC,MAAO,QACPC,MAAM,EACNC,OAAO,EACPC,MAAM,GAEFC,GAAgB,CACpBN,SAAU,mBAEVC,SAAU,UACVC,MAAO,mBACPC,KAAM,mBACNC,MAAO,UACPC,KAAM,WAOR,MAAME,WAAiB5G,GACrB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAKuL,UAAY,KACjBvL,KAAKwL,eAAiB,KACtBxL,KAAKyL,YAAa,EAClBzL,KAAK0L,aAAe,KACpB1L,KAAK2L,aAAe,KACpB3L,KAAK4L,mBAAqB/F,GAAeC,QArCjB,uBAqC8C9F,KAAK4E,UAC3E5E,KAAK6L,qBACD7L,KAAK6E,QAAQqG,OAASV,IACxBxK,KAAK8L,OAET,CAGA,kBAAWpI,GACT,OAAOoH,EACT,CACA,sBAAWnH,GACT,OAAO0H,EACT,CACA,eAAW9O,GACT,MAnFW,UAoFb,CAGA,IAAA1X,GACEmb,KAAK+L,OAAOnC,GACd,CACA,eAAAoC,IAIO3mB,SAAS4mB,QAAUtR,GAAUqF,KAAK4E,WACrC5E,KAAKnb,MAET,CACA,IAAAshB,GACEnG,KAAK+L,OAAOlC,GACd,CACA,KAAAoB,GACMjL,KAAKyL,YACPrR,GAAqB4F,KAAK4E,UAE5B5E,KAAKkM,gBACP,CACA,KAAAJ,GACE9L,KAAKkM,iBACLlM,KAAKmM,kBACLnM,KAAKuL,UAAYa,aAAY,IAAMpM,KAAKgM,mBAAmBhM,KAAK6E,QAAQkG,SAC1E,CACA,iBAAAsB,GACOrM,KAAK6E,QAAQqG,OAGdlL,KAAKyL,WACPlL,GAAae,IAAItB,KAAK4E,SAAUqF,IAAY,IAAMjK,KAAK8L,UAGzD9L,KAAK8L,QACP,CACA,EAAAQ,CAAG7T,GACD,MAAM8T,EAAQvM,KAAKwM,YACnB,GAAI/T,EAAQ8T,EAAM7b,OAAS,GAAK+H,EAAQ,EACtC,OAEF,GAAIuH,KAAKyL,WAEP,YADAlL,GAAae,IAAItB,KAAK4E,SAAUqF,IAAY,IAAMjK,KAAKsM,GAAG7T,KAG5D,MAAMgU,EAAczM,KAAK0M,cAAc1M,KAAK2M,cAC5C,GAAIF,IAAgBhU,EAClB,OAEF,MAAMtC,EAAQsC,EAAQgU,EAAc7C,GAAaC,GACjD7J,KAAK+L,OAAO5V,EAAOoW,EAAM9T,GAC3B,CACA,OAAAsM,GACM/E,KAAK2L,cACP3L,KAAK2L,aAAa5G,UAEpBJ,MAAMI,SACR,CAGA,iBAAAf,CAAkBF,GAEhB,OADAA,EAAO8I,gBAAkB9I,EAAOiH,SACzBjH,CACT,CACA,kBAAA+H,GACM7L,KAAK6E,QAAQmG,UACfzK,GAAac,GAAGrB,KAAK4E,SAAUsF,IAAiB9K,GAASY,KAAK6M,SAASzN,KAE9C,UAAvBY,KAAK6E,QAAQoG,QACf1K,GAAac,GAAGrB,KAAK4E,SAAUuF,IAAoB,IAAMnK,KAAKiL,UAC9D1K,GAAac,GAAGrB,KAAK4E,SAAUwF,IAAoB,IAAMpK,KAAKqM,uBAE5DrM,KAAK6E,QAAQsG,OAAS3C,GAAMC,eAC9BzI,KAAK8M,yBAET,CACA,uBAAAA,GACE,IAAK,MAAMC,KAAOlH,GAAe1T,KArIX,qBAqImC6N,KAAK4E,UAC5DrE,GAAac,GAAG0L,EAAK1C,IAAkBjL,GAASA,EAAMkD,mBAExD,MAmBM0K,EAAc,CAClB3E,aAAc,IAAMrI,KAAK+L,OAAO/L,KAAKiN,kBAAkBnD,KACvDxB,cAAe,IAAMtI,KAAK+L,OAAO/L,KAAKiN,kBAAkBlD,KACxD3B,YAtBkB,KACS,UAAvBpI,KAAK6E,QAAQoG,QAYjBjL,KAAKiL,QACDjL,KAAK0L,cACPwB,aAAalN,KAAK0L,cAEpB1L,KAAK0L,aAAe7N,YAAW,IAAMmC,KAAKqM,qBAjLjB,IAiL+DrM,KAAK6E,QAAQkG,UAAS,GAOhH/K,KAAK2L,aAAe,IAAInD,GAAMxI,KAAK4E,SAAUoI,EAC/C,CACA,QAAAH,CAASzN,GACP,GAAI,kBAAkB/b,KAAK+b,EAAM7S,OAAO0a,SACtC,OAEF,MAAM3Z,EAAYud,GAAiBzL,EAAMtiB,KACrCwQ,IACF8R,EAAMkD,iBACNtC,KAAK+L,OAAO/L,KAAKiN,kBAAkB3f,IAEvC,CACA,aAAAof,CAAcntB,GACZ,OAAOygB,KAAKwM,YAAYrnB,QAAQ5F,EAClC,CACA,0BAAA4tB,CAA2B1U,GACzB,IAAKuH,KAAK4L,mBACR,OAEF,MAAMwB,EAAkBvH,GAAeC,QAAQ4E,GAAiB1K,KAAK4L,oBACrEwB,EAAgB/R,UAAU1B,OAAO8Q,IACjC2C,EAAgBjsB,gBAAgB,gBAChC,MAAMksB,EAAqBxH,GAAeC,QAAQ,sBAAsBrN,MAAWuH,KAAK4L,oBACpFyB,IACFA,EAAmBhS,UAAU5E,IAAIgU,IACjC4C,EAAmBjsB,aAAa,eAAgB,QAEpD,CACA,eAAA+qB,GACE,MAAM5sB,EAAUygB,KAAKwL,gBAAkBxL,KAAK2M,aAC5C,IAAKptB,EACH,OAEF,MAAM+tB,EAAkB/P,OAAOgQ,SAAShuB,EAAQic,aAAa,oBAAqB,IAClFwE,KAAK6E,QAAQkG,SAAWuC,GAAmBtN,KAAK6E,QAAQ+H,eAC1D,CACA,MAAAb,CAAO5V,EAAO5W,EAAU,MACtB,GAAIygB,KAAKyL,WACP,OAEF,MAAM1N,EAAgBiC,KAAK2M,aACrBa,EAASrX,IAAUyT,GACnB6D,EAAcluB,GAAWue,GAAqBkC,KAAKwM,YAAazO,EAAeyP,EAAQxN,KAAK6E,QAAQuG,MAC1G,GAAIqC,IAAgB1P,EAClB,OAEF,MAAM2P,EAAmB1N,KAAK0M,cAAce,GACtCE,EAAenI,GACZjF,GAAaqB,QAAQ5B,KAAK4E,SAAUY,EAAW,CACpD1F,cAAe2N,EACfngB,UAAW0S,KAAK4N,kBAAkBzX,GAClCuD,KAAMsG,KAAK0M,cAAc3O,GACzBuO,GAAIoB,IAIR,GADmBC,EAAa3D,IACjBhI,iBACb,OAEF,IAAKjE,IAAkB0P,EAGrB,OAEF,MAAMI,EAAY/M,QAAQd,KAAKuL,WAC/BvL,KAAKiL,QACLjL,KAAKyL,YAAa,EAClBzL,KAAKmN,2BAA2BO,GAChC1N,KAAKwL,eAAiBiC,EACtB,MAAMK,EAAuBN,EA3OR,sBADF,oBA6ObO,EAAiBP,EA3OH,qBACA,qBA2OpBC,EAAYpS,UAAU5E,IAAIsX,GAC1BlS,GAAO4R,GACP1P,EAAc1C,UAAU5E,IAAIqX,GAC5BL,EAAYpS,UAAU5E,IAAIqX,GAQ1B9N,KAAKmF,gBAPoB,KACvBsI,EAAYpS,UAAU1B,OAAOmU,EAAsBC,GACnDN,EAAYpS,UAAU5E,IAAIgU,IAC1B1M,EAAc1C,UAAU1B,OAAO8Q,GAAqBsD,EAAgBD,GACpE9N,KAAKyL,YAAa,EAClBkC,EAAa1D,GAAW,GAEYlM,EAAeiC,KAAKgO,eACtDH,GACF7N,KAAK8L,OAET,CACA,WAAAkC,GACE,OAAOhO,KAAK4E,SAASvJ,UAAU7W,SAhQV,QAiQvB,CACA,UAAAmoB,GACE,OAAO9G,GAAeC,QAAQ8E,GAAsB5K,KAAK4E,SAC3D,CACA,SAAA4H,GACE,OAAO3G,GAAe1T,KAAKwY,GAAe3K,KAAK4E,SACjD,CACA,cAAAsH,GACMlM,KAAKuL,YACP0C,cAAcjO,KAAKuL,WACnBvL,KAAKuL,UAAY,KAErB,CACA,iBAAA0B,CAAkB3f,GAChB,OAAI2O,KACK3O,IAAcwc,GAAiBD,GAAaD,GAE9Ctc,IAAcwc,GAAiBF,GAAaC,EACrD,CACA,iBAAA+D,CAAkBzX,GAChB,OAAI8F,KACK9F,IAAU0T,GAAaC,GAAiBC,GAE1C5T,IAAU0T,GAAaE,GAAkBD,EAClD,CAGA,sBAAOrN,CAAgBqH,GACrB,OAAO9D,KAAKwH,MAAK,WACf,MAAMnd,EAAOihB,GAAShG,oBAAoBtF,KAAM8D,GAChD,GAAsB,iBAAXA,GAIX,GAAsB,iBAAXA,EAAqB,CAC9B,QAAqB/K,IAAjB1O,EAAKyZ,IAAyBA,EAAOrC,WAAW,MAAmB,gBAAXqC,EAC1D,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IACP,OAREzZ,EAAKiiB,GAAGxI,EASZ,GACF,EAOFvD,GAAac,GAAGhc,SAAUklB,GAvSE,uCAuS2C,SAAUnL,GAC/E,MAAM7S,EAASsZ,GAAec,uBAAuB3G,MACrD,IAAKzT,IAAWA,EAAO8O,UAAU7W,SAASgmB,IACxC,OAEFpL,EAAMkD,iBACN,MAAM4L,EAAW5C,GAAShG,oBAAoB/Y,GACxC4hB,EAAanO,KAAKxE,aAAa,oBACrC,OAAI2S,GACFD,EAAS5B,GAAG6B,QACZD,EAAS7B,qBAGyC,SAAhDrJ,GAAYQ,iBAAiBxD,KAAM,UACrCkO,EAASrpB,YACTqpB,EAAS7B,sBAGX6B,EAAS/H,YACT+H,EAAS7B,oBACX,IACA9L,GAAac,GAAGzhB,OAAQ0qB,IAAuB,KAC7C,MAAM8D,EAAYvI,GAAe1T,KA5TR,6BA6TzB,IAAK,MAAM+b,KAAYE,EACrB9C,GAAShG,oBAAoB4I,EAC/B,IAOF/R,GAAmBmP,IAcnB,MAEM+C,GAAc,eAEdC,GAAe,OAAOD,KACtBE,GAAgB,QAAQF,KACxBG,GAAe,OAAOH,KACtBI,GAAiB,SAASJ,KAC1BK,GAAyB,QAAQL,cACjCM,GAAoB,OACpBC,GAAsB,WACtBC,GAAwB,aAExBC,GAA6B,WAAWF,OAAwBA,KAKhEG,GAAyB,8BACzBC,GAAY,CAChBvqB,OAAQ,KACRkjB,QAAQ,GAEJsH,GAAgB,CACpBxqB,OAAQ,iBACRkjB,OAAQ,WAOV,MAAMuH,WAAiBxK,GACrB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAKmP,kBAAmB,EACxBnP,KAAKoP,cAAgB,GACrB,MAAMC,EAAaxJ,GAAe1T,KAAK4c,IACvC,IAAK,MAAMO,KAAQD,EAAY,CAC7B,MAAMtV,EAAW8L,GAAea,uBAAuB4I,GACjDC,EAAgB1J,GAAe1T,KAAK4H,GAAU5T,QAAOqpB,GAAgBA,IAAiBxP,KAAK4E,WAChF,OAAb7K,GAAqBwV,EAAc7e,QACrCsP,KAAKoP,cAAcxd,KAAK0d,EAE5B,CACAtP,KAAKyP,sBACAzP,KAAK6E,QAAQpgB,QAChBub,KAAK0P,0BAA0B1P,KAAKoP,cAAepP,KAAK2P,YAEtD3P,KAAK6E,QAAQ8C,QACf3H,KAAK2H,QAET,CAGA,kBAAWjE,GACT,OAAOsL,EACT,CACA,sBAAWrL,GACT,OAAOsL,EACT,CACA,eAAW1S,GACT,MA9DW,UA+Db,CAGA,MAAAoL,GACM3H,KAAK2P,WACP3P,KAAK4P,OAEL5P,KAAK6P,MAET,CACA,IAAAA,GACE,GAAI7P,KAAKmP,kBAAoBnP,KAAK2P,WAChC,OAEF,IAAIG,EAAiB,GAQrB,GALI9P,KAAK6E,QAAQpgB,SACfqrB,EAAiB9P,KAAK+P,uBAhEH,wCAgE4C5pB,QAAO5G,GAAWA,IAAYygB,KAAK4E,WAAU9hB,KAAIvD,GAAW2vB,GAAS5J,oBAAoB/lB,EAAS,CAC/JooB,QAAQ,OAGRmI,EAAepf,QAAUof,EAAe,GAAGX,iBAC7C,OAGF,GADmB5O,GAAaqB,QAAQ5B,KAAK4E,SAAU0J,IACxCtM,iBACb,OAEF,IAAK,MAAMgO,KAAkBF,EAC3BE,EAAeJ,OAEjB,MAAMK,EAAYjQ,KAAKkQ,gBACvBlQ,KAAK4E,SAASvJ,UAAU1B,OAAOiV,IAC/B5O,KAAK4E,SAASvJ,UAAU5E,IAAIoY,IAC5B7O,KAAK4E,SAAS7jB,MAAMkvB,GAAa,EACjCjQ,KAAK0P,0BAA0B1P,KAAKoP,eAAe,GACnDpP,KAAKmP,kBAAmB,EACxB,MAQMgB,EAAa,SADUF,EAAU,GAAGxL,cAAgBwL,EAAU7d,MAAM,KAE1E4N,KAAKmF,gBATY,KACfnF,KAAKmP,kBAAmB,EACxBnP,KAAK4E,SAASvJ,UAAU1B,OAAOkV,IAC/B7O,KAAK4E,SAASvJ,UAAU5E,IAAImY,GAAqBD,IACjD3O,KAAK4E,SAAS7jB,MAAMkvB,GAAa,GACjC1P,GAAaqB,QAAQ5B,KAAK4E,SAAU2J,GAAc,GAItBvO,KAAK4E,UAAU,GAC7C5E,KAAK4E,SAAS7jB,MAAMkvB,GAAa,GAAGjQ,KAAK4E,SAASuL,MACpD,CACA,IAAAP,GACE,GAAI5P,KAAKmP,mBAAqBnP,KAAK2P,WACjC,OAGF,GADmBpP,GAAaqB,QAAQ5B,KAAK4E,SAAU4J,IACxCxM,iBACb,OAEF,MAAMiO,EAAYjQ,KAAKkQ,gBACvBlQ,KAAK4E,SAAS7jB,MAAMkvB,GAAa,GAAGjQ,KAAK4E,SAASthB,wBAAwB2sB,OAC1EpU,GAAOmE,KAAK4E,UACZ5E,KAAK4E,SAASvJ,UAAU5E,IAAIoY,IAC5B7O,KAAK4E,SAASvJ,UAAU1B,OAAOiV,GAAqBD,IACpD,IAAK,MAAM/M,KAAW5B,KAAKoP,cAAe,CACxC,MAAM7vB,EAAUsmB,GAAec,uBAAuB/E,GAClDriB,IAAYygB,KAAK2P,SAASpwB,IAC5BygB,KAAK0P,0BAA0B,CAAC9N,IAAU,EAE9C,CACA5B,KAAKmP,kBAAmB,EAOxBnP,KAAK4E,SAAS7jB,MAAMkvB,GAAa,GACjCjQ,KAAKmF,gBAPY,KACfnF,KAAKmP,kBAAmB,EACxBnP,KAAK4E,SAASvJ,UAAU1B,OAAOkV,IAC/B7O,KAAK4E,SAASvJ,UAAU5E,IAAImY,IAC5BrO,GAAaqB,QAAQ5B,KAAK4E,SAAU6J,GAAe,GAGvBzO,KAAK4E,UAAU,EAC/C,CACA,QAAA+K,CAASpwB,EAAUygB,KAAK4E,UACtB,OAAOrlB,EAAQ8b,UAAU7W,SAASmqB,GACpC,CAGA,iBAAA3K,CAAkBF,GAGhB,OAFAA,EAAO6D,OAAS7G,QAAQgD,EAAO6D,QAC/B7D,EAAOrf,OAASiW,GAAWoJ,EAAOrf,QAC3Bqf,CACT,CACA,aAAAoM,GACE,OAAOlQ,KAAK4E,SAASvJ,UAAU7W,SA3IL,uBAChB,QACC,QA0Ib,CACA,mBAAAirB,GACE,IAAKzP,KAAK6E,QAAQpgB,OAChB,OAEF,MAAMshB,EAAW/F,KAAK+P,uBAAuBhB,IAC7C,IAAK,MAAMxvB,KAAWwmB,EAAU,CAC9B,MAAMqK,EAAWvK,GAAec,uBAAuBpnB,GACnD6wB,GACFpQ,KAAK0P,0BAA0B,CAACnwB,GAAUygB,KAAK2P,SAASS,GAE5D,CACF,CACA,sBAAAL,CAAuBhW,GACrB,MAAMgM,EAAWF,GAAe1T,KAAK2c,GAA4B9O,KAAK6E,QAAQpgB,QAE9E,OAAOohB,GAAe1T,KAAK4H,EAAUiG,KAAK6E,QAAQpgB,QAAQ0B,QAAO5G,IAAYwmB,EAAS3E,SAAS7hB,IACjG,CACA,yBAAAmwB,CAA0BW,EAAcC,GACtC,GAAKD,EAAa3f,OAGlB,IAAK,MAAMnR,KAAW8wB,EACpB9wB,EAAQ8b,UAAUsM,OArKK,aAqKyB2I,GAChD/wB,EAAQ6B,aAAa,gBAAiBkvB,EAE1C,CAGA,sBAAO7T,CAAgBqH,GACrB,MAAMe,EAAU,CAAC,EAIjB,MAHsB,iBAAXf,GAAuB,YAAYzgB,KAAKygB,KACjDe,EAAQ8C,QAAS,GAEZ3H,KAAKwH,MAAK,WACf,MAAMnd,EAAO6kB,GAAS5J,oBAAoBtF,KAAM6E,GAChD,GAAsB,iBAAXf,EAAqB,CAC9B,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IACP,CACF,GACF,EAOFvD,GAAac,GAAGhc,SAAUqpB,GAAwBK,IAAwB,SAAU3P,IAErD,MAAzBA,EAAM7S,OAAO0a,SAAmB7H,EAAMW,gBAAmD,MAAjCX,EAAMW,eAAekH,UAC/E7H,EAAMkD,iBAER,IAAK,MAAM/iB,KAAWsmB,GAAee,gCAAgC5G,MACnEkP,GAAS5J,oBAAoB/lB,EAAS,CACpCooB,QAAQ,IACPA,QAEP,IAMAxL,GAAmB+S,IAcnB,MAAMqB,GAAS,WAETC,GAAc,eACdC,GAAiB,YAGjBC,GAAiB,UACjBC,GAAmB,YAGnBC,GAAe,OAAOJ,KACtBK,GAAiB,SAASL,KAC1BM,GAAe,OAAON,KACtBO,GAAgB,QAAQP,KACxBQ,GAAyB,QAAQR,KAAcC,KAC/CQ,GAAyB,UAAUT,KAAcC,KACjDS,GAAuB,QAAQV,KAAcC,KAC7CU,GAAoB,OAMpBC,GAAyB,4DACzBC,GAA6B,GAAGD,MAA0BD,KAC1DG,GAAgB,iBAIhBC,GAAgBtV,KAAU,UAAY,YACtCuV,GAAmBvV,KAAU,YAAc,UAC3CwV,GAAmBxV,KAAU,aAAe,eAC5CyV,GAAsBzV,KAAU,eAAiB,aACjD0V,GAAkB1V,KAAU,aAAe,cAC3C2V,GAAiB3V,KAAU,cAAgB,aAG3C4V,GAAY,CAChBC,WAAW,EACX7jB,SAAU,kBACV8jB,QAAS,UACT/pB,OAAQ,CAAC,EAAG,GACZgqB,aAAc,KACd1zB,UAAW,UAEP2zB,GAAgB,CACpBH,UAAW,mBACX7jB,SAAU,mBACV8jB,QAAS,SACT/pB,OAAQ,0BACRgqB,aAAc,yBACd1zB,UAAW,2BAOb,MAAM4zB,WAAiBxN,GACrB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAKmS,QAAU,KACfnS,KAAKoS,QAAUpS,KAAK4E,SAAS7f,WAE7Bib,KAAKqS,MAAQxM,GAAehhB,KAAKmb,KAAK4E,SAAU0M,IAAe,IAAMzL,GAAeM,KAAKnG,KAAK4E,SAAU0M,IAAe,IAAMzL,GAAeC,QAAQwL,GAAetR,KAAKoS,SACxKpS,KAAKsS,UAAYtS,KAAKuS,eACxB,CAGA,kBAAW7O,GACT,OAAOmO,EACT,CACA,sBAAWlO,GACT,OAAOsO,EACT,CACA,eAAW1V,GACT,OAAOgU,EACT,CAGA,MAAA5I,GACE,OAAO3H,KAAK2P,WAAa3P,KAAK4P,OAAS5P,KAAK6P,MAC9C,CACA,IAAAA,GACE,GAAI3U,GAAW8E,KAAK4E,WAAa5E,KAAK2P,WACpC,OAEF,MAAM7P,EAAgB,CACpBA,cAAeE,KAAK4E,UAGtB,IADkBrE,GAAaqB,QAAQ5B,KAAK4E,SAAUkM,GAAchR,GACtDkC,iBAAd,CASA,GANAhC,KAAKwS,gBAMD,iBAAkBntB,SAASC,kBAAoB0a,KAAKoS,QAAQpX,QAzExC,eA0EtB,IAAK,MAAMzb,IAAW,GAAGZ,UAAU0G,SAAS6G,KAAK6Z,UAC/CxF,GAAac,GAAG9hB,EAAS,YAAaqc,IAG1CoE,KAAK4E,SAAS6N,QACdzS,KAAK4E,SAASxjB,aAAa,iBAAiB,GAC5C4e,KAAKqS,MAAMhX,UAAU5E,IAAI0a,IACzBnR,KAAK4E,SAASvJ,UAAU5E,IAAI0a,IAC5B5Q,GAAaqB,QAAQ5B,KAAK4E,SAAUmM,GAAejR,EAhBnD,CAiBF,CACA,IAAA8P,GACE,GAAI1U,GAAW8E,KAAK4E,YAAc5E,KAAK2P,WACrC,OAEF,MAAM7P,EAAgB,CACpBA,cAAeE,KAAK4E,UAEtB5E,KAAK0S,cAAc5S,EACrB,CACA,OAAAiF,GACM/E,KAAKmS,SACPnS,KAAKmS,QAAQnZ,UAEf2L,MAAMI,SACR,CACA,MAAAha,GACEiV,KAAKsS,UAAYtS,KAAKuS,gBAClBvS,KAAKmS,SACPnS,KAAKmS,QAAQpnB,QAEjB,CAGA,aAAA2nB,CAAc5S,GAEZ,IADkBS,GAAaqB,QAAQ5B,KAAK4E,SAAUgM,GAAc9Q,GACtDkC,iBAAd,CAMA,GAAI,iBAAkB3c,SAASC,gBAC7B,IAAK,MAAM/F,IAAW,GAAGZ,UAAU0G,SAAS6G,KAAK6Z,UAC/CxF,GAAaC,IAAIjhB,EAAS,YAAaqc,IAGvCoE,KAAKmS,SACPnS,KAAKmS,QAAQnZ,UAEfgH,KAAKqS,MAAMhX,UAAU1B,OAAOwX,IAC5BnR,KAAK4E,SAASvJ,UAAU1B,OAAOwX,IAC/BnR,KAAK4E,SAASxjB,aAAa,gBAAiB,SAC5C4hB,GAAYE,oBAAoBlD,KAAKqS,MAAO,UAC5C9R,GAAaqB,QAAQ5B,KAAK4E,SAAUiM,GAAgB/Q,EAhBpD,CAiBF,CACA,UAAA+D,CAAWC,GAET,GAAgC,iBADhCA,EAASa,MAAMd,WAAWC,IACRxlB,YAA2B,GAAUwlB,EAAOxlB,YAAgE,mBAA3CwlB,EAAOxlB,UAAUgF,sBAElG,MAAM,IAAIkhB,UAAU,GAAG+L,GAAO9L,+GAEhC,OAAOX,CACT,CACA,aAAA0O,GACE,QAAsB,IAAX,EACT,MAAM,IAAIhO,UAAU,gEAEtB,IAAImO,EAAmB3S,KAAK4E,SACG,WAA3B5E,KAAK6E,QAAQvmB,UACfq0B,EAAmB3S,KAAKoS,QACf,GAAUpS,KAAK6E,QAAQvmB,WAChCq0B,EAAmBjY,GAAWsF,KAAK6E,QAAQvmB,WACA,iBAA3B0hB,KAAK6E,QAAQvmB,YAC7Bq0B,EAAmB3S,KAAK6E,QAAQvmB,WAElC,MAAM0zB,EAAehS,KAAK4S,mBAC1B5S,KAAKmS,QAAU,GAAoBQ,EAAkB3S,KAAKqS,MAAOL,EACnE,CACA,QAAArC,GACE,OAAO3P,KAAKqS,MAAMhX,UAAU7W,SAAS2sB,GACvC,CACA,aAAA0B,GACE,MAAMC,EAAiB9S,KAAKoS,QAC5B,GAAIU,EAAezX,UAAU7W,SArKN,WAsKrB,OAAOmtB,GAET,GAAImB,EAAezX,UAAU7W,SAvKJ,aAwKvB,OAAOotB,GAET,GAAIkB,EAAezX,UAAU7W,SAzKA,iBA0K3B,MA5JsB,MA8JxB,GAAIsuB,EAAezX,UAAU7W,SA3KE,mBA4K7B,MA9JyB,SAkK3B,MAAMuuB,EAAkF,QAA1E9tB,iBAAiB+a,KAAKqS,OAAOvX,iBAAiB,iBAAiB6K,OAC7E,OAAImN,EAAezX,UAAU7W,SArLP,UAsLbuuB,EAAQvB,GAAmBD,GAE7BwB,EAAQrB,GAAsBD,EACvC,CACA,aAAAc,GACE,OAAkD,OAA3CvS,KAAK4E,SAAS5J,QAnLD,UAoLtB,CACA,UAAAgY,GACE,MAAM,OACJhrB,GACEgY,KAAK6E,QACT,MAAsB,iBAAX7c,EACFA,EAAO9F,MAAM,KAAKY,KAAInF,GAAS4f,OAAOgQ,SAAS5vB,EAAO,MAEzC,mBAAXqK,EACFirB,GAAcjrB,EAAOirB,EAAYjT,KAAK4E,UAExC5c,CACT,CACA,gBAAA4qB,GACE,MAAMM,EAAwB,CAC5Bx0B,UAAWshB,KAAK6S,gBAChBzc,UAAW,CAAC,CACV9V,KAAM,kBACNmB,QAAS,CACPwM,SAAU+R,KAAK6E,QAAQ5W,WAExB,CACD3N,KAAM,SACNmB,QAAS,CACPuG,OAAQgY,KAAKgT,iBAanB,OAPIhT,KAAKsS,WAAsC,WAAzBtS,KAAK6E,QAAQkN,WACjC/O,GAAYC,iBAAiBjD,KAAKqS,MAAO,SAAU,UACnDa,EAAsB9c,UAAY,CAAC,CACjC9V,KAAM,cACNC,SAAS,KAGN,IACF2yB,KACArW,GAAQmD,KAAK6E,QAAQmN,aAAc,CAACkB,IAE3C,CACA,eAAAC,EAAgB,IACdr2B,EAAG,OACHyP,IAEA,MAAMggB,EAAQ1G,GAAe1T,KAhOF,8DAgO+B6N,KAAKqS,OAAOlsB,QAAO5G,GAAWob,GAAUpb,KAC7FgtB,EAAM7b,QAMXoN,GAAqByO,EAAOhgB,EAAQzP,IAAQ6zB,IAAmBpE,EAAMnL,SAAS7U,IAASkmB,OACzF,CAGA,sBAAOhW,CAAgBqH,GACrB,OAAO9D,KAAKwH,MAAK,WACf,MAAMnd,EAAO6nB,GAAS5M,oBAAoBtF,KAAM8D,GAChD,GAAsB,iBAAXA,EAAX,CAGA,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IAJL,CAKF,GACF,CACA,iBAAOsP,CAAWhU,GAChB,GA5QuB,IA4QnBA,EAAMwI,QAAgD,UAAfxI,EAAMqB,MA/QnC,QA+QuDrB,EAAMtiB,IACzE,OAEF,MAAMu2B,EAAcxN,GAAe1T,KAAKkf,IACxC,IAAK,MAAM1J,KAAU0L,EAAa,CAChC,MAAMC,EAAUpB,GAAS7M,YAAYsC,GACrC,IAAK2L,IAAyC,IAA9BA,EAAQzO,QAAQiN,UAC9B,SAEF,MAAMyB,EAAenU,EAAMmU,eACrBC,EAAeD,EAAanS,SAASkS,EAAQjB,OACnD,GAAIkB,EAAanS,SAASkS,EAAQ1O,WAA2C,WAA9B0O,EAAQzO,QAAQiN,YAA2B0B,GAA8C,YAA9BF,EAAQzO,QAAQiN,WAA2B0B,EACnJ,SAIF,GAAIF,EAAQjB,MAAM7tB,SAAS4a,EAAM7S,UAA2B,UAAf6S,EAAMqB,MA/RvC,QA+R2DrB,EAAMtiB,KAAqB,qCAAqCuG,KAAK+b,EAAM7S,OAAO0a,UACvJ,SAEF,MAAMnH,EAAgB,CACpBA,cAAewT,EAAQ1O,UAEN,UAAfxF,EAAMqB,OACRX,EAAckH,WAAa5H,GAE7BkU,EAAQZ,cAAc5S,EACxB,CACF,CACA,4BAAO2T,CAAsBrU,GAI3B,MAAMsU,EAAU,kBAAkBrwB,KAAK+b,EAAM7S,OAAO0a,SAC9C0M,EAjTW,WAiTKvU,EAAMtiB,IACtB82B,EAAkB,CAAClD,GAAgBC,IAAkBvP,SAAShC,EAAMtiB,KAC1E,IAAK82B,IAAoBD,EACvB,OAEF,GAAID,IAAYC,EACd,OAEFvU,EAAMkD,iBAGN,MAAMuR,EAAkB7T,KAAKgG,QAAQoL,IAA0BpR,KAAO6F,GAAeM,KAAKnG,KAAMoR,IAAwB,IAAMvL,GAAehhB,KAAKmb,KAAMoR,IAAwB,IAAMvL,GAAeC,QAAQsL,GAAwBhS,EAAMW,eAAehb,YACpPwF,EAAW2nB,GAAS5M,oBAAoBuO,GAC9C,GAAID,EAIF,OAHAxU,EAAM0U,kBACNvpB,EAASslB,YACTtlB,EAAS4oB,gBAAgB/T,GAGvB7U,EAASolB,aAEXvQ,EAAM0U,kBACNvpB,EAASqlB,OACTiE,EAAgBpB,QAEpB,EAOFlS,GAAac,GAAGhc,SAAU4rB,GAAwBG,GAAwBc,GAASuB,uBACnFlT,GAAac,GAAGhc,SAAU4rB,GAAwBK,GAAeY,GAASuB,uBAC1ElT,GAAac,GAAGhc,SAAU2rB,GAAwBkB,GAASkB,YAC3D7S,GAAac,GAAGhc,SAAU6rB,GAAsBgB,GAASkB,YACzD7S,GAAac,GAAGhc,SAAU2rB,GAAwBI,IAAwB,SAAUhS,GAClFA,EAAMkD,iBACN4P,GAAS5M,oBAAoBtF,MAAM2H,QACrC,IAMAxL,GAAmB+V,IAcnB,MAAM6B,GAAS,WAETC,GAAoB,OACpBC,GAAkB,gBAAgBF,KAClCG,GAAY,CAChBC,UAAW,iBACXC,cAAe,KACfhP,YAAY,EACZzK,WAAW,EAEX0Z,YAAa,QAETC,GAAgB,CACpBH,UAAW,SACXC,cAAe,kBACfhP,WAAY,UACZzK,UAAW,UACX0Z,YAAa,oBAOf,MAAME,WAAiB9Q,GACrB,WAAAU,CAAYL,GACVa,QACA3E,KAAK6E,QAAU7E,KAAK6D,WAAWC,GAC/B9D,KAAKwU,aAAc,EACnBxU,KAAK4E,SAAW,IAClB,CAGA,kBAAWlB,GACT,OAAOwQ,EACT,CACA,sBAAWvQ,GACT,OAAO2Q,EACT,CACA,eAAW/X,GACT,OAAOwX,EACT,CAGA,IAAAlE,CAAKxT,GACH,IAAK2D,KAAK6E,QAAQlK,UAEhB,YADAkC,GAAQR,GAGV2D,KAAKyU,UACL,MAAMl1B,EAAUygB,KAAK0U,cACjB1U,KAAK6E,QAAQO,YACfvJ,GAAOtc,GAETA,EAAQ8b,UAAU5E,IAAIud,IACtBhU,KAAK2U,mBAAkB,KACrB9X,GAAQR,EAAS,GAErB,CACA,IAAAuT,CAAKvT,GACE2D,KAAK6E,QAAQlK,WAIlBqF,KAAK0U,cAAcrZ,UAAU1B,OAAOqa,IACpChU,KAAK2U,mBAAkB,KACrB3U,KAAK+E,UACLlI,GAAQR,EAAS,KANjBQ,GAAQR,EAQZ,CACA,OAAA0I,GACO/E,KAAKwU,cAGVjU,GAAaC,IAAIR,KAAK4E,SAAUqP,IAChCjU,KAAK4E,SAASjL,SACdqG,KAAKwU,aAAc,EACrB,CAGA,WAAAE,GACE,IAAK1U,KAAK4E,SAAU,CAClB,MAAMgQ,EAAWvvB,SAASwvB,cAAc,OACxCD,EAAST,UAAYnU,KAAK6E,QAAQsP,UAC9BnU,KAAK6E,QAAQO,YACfwP,EAASvZ,UAAU5E,IApFD,QAsFpBuJ,KAAK4E,SAAWgQ,CAClB,CACA,OAAO5U,KAAK4E,QACd,CACA,iBAAAZ,CAAkBF,GAGhB,OADAA,EAAOuQ,YAAc3Z,GAAWoJ,EAAOuQ,aAChCvQ,CACT,CACA,OAAA2Q,GACE,GAAIzU,KAAKwU,YACP,OAEF,MAAMj1B,EAAUygB,KAAK0U,cACrB1U,KAAK6E,QAAQwP,YAAYS,OAAOv1B,GAChCghB,GAAac,GAAG9hB,EAAS00B,IAAiB,KACxCpX,GAAQmD,KAAK6E,QAAQuP,cAAc,IAErCpU,KAAKwU,aAAc,CACrB,CACA,iBAAAG,CAAkBtY,GAChBW,GAAuBX,EAAU2D,KAAK0U,cAAe1U,KAAK6E,QAAQO,WACpE,EAeF,MAEM2P,GAAc,gBACdC,GAAkB,UAAUD,KAC5BE,GAAoB,cAAcF,KAGlCG,GAAmB,WACnBC,GAAY,CAChBC,WAAW,EACXC,YAAa,MAETC,GAAgB,CACpBF,UAAW,UACXC,YAAa,WAOf,MAAME,WAAkB9R,GACtB,WAAAU,CAAYL,GACVa,QACA3E,KAAK6E,QAAU7E,KAAK6D,WAAWC,GAC/B9D,KAAKwV,WAAY,EACjBxV,KAAKyV,qBAAuB,IAC9B,CAGA,kBAAW/R,GACT,OAAOyR,EACT,CACA,sBAAWxR,GACT,OAAO2R,EACT,CACA,eAAW/Y,GACT,MArCW,WAsCb,CAGA,QAAAmZ,GACM1V,KAAKwV,YAGLxV,KAAK6E,QAAQuQ,WACfpV,KAAK6E,QAAQwQ,YAAY5C,QAE3BlS,GAAaC,IAAInb,SAAU0vB,IAC3BxU,GAAac,GAAGhc,SAAU2vB,IAAiB5V,GAASY,KAAK2V,eAAevW,KACxEmB,GAAac,GAAGhc,SAAU4vB,IAAmB7V,GAASY,KAAK4V,eAAexW,KAC1EY,KAAKwV,WAAY,EACnB,CACA,UAAAK,GACO7V,KAAKwV,YAGVxV,KAAKwV,WAAY,EACjBjV,GAAaC,IAAInb,SAAU0vB,IAC7B,CAGA,cAAAY,CAAevW,GACb,MAAM,YACJiW,GACErV,KAAK6E,QACT,GAAIzF,EAAM7S,SAAWlH,UAAY+Z,EAAM7S,SAAW8oB,GAAeA,EAAY7wB,SAAS4a,EAAM7S,QAC1F,OAEF,MAAM1L,EAAWglB,GAAeU,kBAAkB8O,GAC1B,IAApBx0B,EAAS6P,OACX2kB,EAAY5C,QACHzS,KAAKyV,uBAAyBP,GACvCr0B,EAASA,EAAS6P,OAAS,GAAG+hB,QAE9B5xB,EAAS,GAAG4xB,OAEhB,CACA,cAAAmD,CAAexW,GAzED,QA0ERA,EAAMtiB,MAGVkjB,KAAKyV,qBAAuBrW,EAAM0W,SAAWZ,GA5EzB,UA6EtB,EAeF,MAAMa,GAAyB,oDACzBC,GAA0B,cAC1BC,GAAmB,gBACnBC,GAAkB,eAMxB,MAAMC,GACJ,WAAAhS,GACEnE,KAAK4E,SAAWvf,SAAS6G,IAC3B,CAGA,QAAAkqB,GAEE,MAAMC,EAAgBhxB,SAASC,gBAAgBuC,YAC/C,OAAO1F,KAAKoC,IAAI3E,OAAO02B,WAAaD,EACtC,CACA,IAAAzG,GACE,MAAM/rB,EAAQmc,KAAKoW,WACnBpW,KAAKuW,mBAELvW,KAAKwW,sBAAsBxW,KAAK4E,SAAUqR,IAAkBQ,GAAmBA,EAAkB5yB,IAEjGmc,KAAKwW,sBAAsBT,GAAwBE,IAAkBQ,GAAmBA,EAAkB5yB,IAC1Gmc,KAAKwW,sBAAsBR,GAAyBE,IAAiBO,GAAmBA,EAAkB5yB,GAC5G,CACA,KAAAwO,GACE2N,KAAK0W,wBAAwB1W,KAAK4E,SAAU,YAC5C5E,KAAK0W,wBAAwB1W,KAAK4E,SAAUqR,IAC5CjW,KAAK0W,wBAAwBX,GAAwBE,IACrDjW,KAAK0W,wBAAwBV,GAAyBE,GACxD,CACA,aAAAS,GACE,OAAO3W,KAAKoW,WAAa,CAC3B,CAGA,gBAAAG,GACEvW,KAAK4W,sBAAsB5W,KAAK4E,SAAU,YAC1C5E,KAAK4E,SAAS7jB,MAAM+K,SAAW,QACjC,CACA,qBAAA0qB,CAAsBzc,EAAU8c,EAAexa,GAC7C,MAAMya,EAAiB9W,KAAKoW,WAS5BpW,KAAK+W,2BAA2Bhd,GARHxa,IAC3B,GAAIA,IAAYygB,KAAK4E,UAAYhlB,OAAO02B,WAAa/2B,EAAQsI,YAAcivB,EACzE,OAEF9W,KAAK4W,sBAAsBr3B,EAASs3B,GACpC,MAAMJ,EAAkB72B,OAAOqF,iBAAiB1F,GAASub,iBAAiB+b,GAC1Et3B,EAAQwB,MAAMi2B,YAAYH,EAAe,GAAGxa,EAASkB,OAAOC,WAAWiZ,QAAsB,GAGjG,CACA,qBAAAG,CAAsBr3B,EAASs3B,GAC7B,MAAMI,EAAc13B,EAAQwB,MAAM+Z,iBAAiB+b,GAC/CI,GACFjU,GAAYC,iBAAiB1jB,EAASs3B,EAAeI,EAEzD,CACA,uBAAAP,CAAwB3c,EAAU8c,GAWhC7W,KAAK+W,2BAA2Bhd,GAVHxa,IAC3B,MAAM5B,EAAQqlB,GAAYQ,iBAAiBjkB,EAASs3B,GAEtC,OAAVl5B,GAIJqlB,GAAYE,oBAAoB3jB,EAASs3B,GACzCt3B,EAAQwB,MAAMi2B,YAAYH,EAAel5B,IAJvC4B,EAAQwB,MAAMm2B,eAAeL,EAIgB,GAGnD,CACA,0BAAAE,CAA2Bhd,EAAUod,GACnC,GAAI,GAAUpd,GACZod,EAASpd,QAGX,IAAK,MAAM6L,KAAOC,GAAe1T,KAAK4H,EAAUiG,KAAK4E,UACnDuS,EAASvR,EAEb,EAeF,MAEMwR,GAAc,YAGdC,GAAe,OAAOD,KACtBE,GAAyB,gBAAgBF,KACzCG,GAAiB,SAASH,KAC1BI,GAAe,OAAOJ,KACtBK,GAAgB,QAAQL,KACxBM,GAAiB,SAASN,KAC1BO,GAAsB,gBAAgBP,KACtCQ,GAA0B,oBAAoBR,KAC9CS,GAA0B,kBAAkBT,KAC5CU,GAAyB,QAAQV,cACjCW,GAAkB,aAElBC,GAAoB,OACpBC,GAAoB,eAKpBC,GAAY,CAChBtD,UAAU,EACVnC,OAAO,EACPzH,UAAU,GAENmN,GAAgB,CACpBvD,SAAU,mBACVnC,MAAO,UACPzH,SAAU,WAOZ,MAAMoN,WAAc1T,GAClB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAKqY,QAAUxS,GAAeC,QArBV,gBAqBmC9F,KAAK4E,UAC5D5E,KAAKsY,UAAYtY,KAAKuY,sBACtBvY,KAAKwY,WAAaxY,KAAKyY,uBACvBzY,KAAK2P,UAAW,EAChB3P,KAAKmP,kBAAmB,EACxBnP,KAAK0Y,WAAa,IAAIvC,GACtBnW,KAAK6L,oBACP,CAGA,kBAAWnI,GACT,OAAOwU,EACT,CACA,sBAAWvU,GACT,OAAOwU,EACT,CACA,eAAW5b,GACT,MA1DW,OA2Db,CAGA,MAAAoL,CAAO7H,GACL,OAAOE,KAAK2P,SAAW3P,KAAK4P,OAAS5P,KAAK6P,KAAK/P,EACjD,CACA,IAAA+P,CAAK/P,GACCE,KAAK2P,UAAY3P,KAAKmP,kBAGR5O,GAAaqB,QAAQ5B,KAAK4E,SAAU4S,GAAc,CAClE1X,kBAEYkC,mBAGdhC,KAAK2P,UAAW,EAChB3P,KAAKmP,kBAAmB,EACxBnP,KAAK0Y,WAAW9I,OAChBvqB,SAAS6G,KAAKmP,UAAU5E,IAAIshB,IAC5B/X,KAAK2Y,gBACL3Y,KAAKsY,UAAUzI,MAAK,IAAM7P,KAAK4Y,aAAa9Y,KAC9C,CACA,IAAA8P,GACO5P,KAAK2P,WAAY3P,KAAKmP,mBAGT5O,GAAaqB,QAAQ5B,KAAK4E,SAAUyS,IACxCrV,mBAGdhC,KAAK2P,UAAW,EAChB3P,KAAKmP,kBAAmB,EACxBnP,KAAKwY,WAAW3C,aAChB7V,KAAK4E,SAASvJ,UAAU1B,OAAOqe,IAC/BhY,KAAKmF,gBAAe,IAAMnF,KAAK6Y,cAAc7Y,KAAK4E,SAAU5E,KAAKgO,gBACnE,CACA,OAAAjJ,GACExE,GAAaC,IAAI5gB,OAAQw3B,IACzB7W,GAAaC,IAAIR,KAAKqY,QAASjB,IAC/BpX,KAAKsY,UAAUvT,UACf/E,KAAKwY,WAAW3C,aAChBlR,MAAMI,SACR,CACA,YAAA+T,GACE9Y,KAAK2Y,eACP,CAGA,mBAAAJ,GACE,OAAO,IAAIhE,GAAS,CAClB5Z,UAAWmG,QAAQd,KAAK6E,QAAQ+P,UAEhCxP,WAAYpF,KAAKgO,eAErB,CACA,oBAAAyK,GACE,OAAO,IAAIlD,GAAU,CACnBF,YAAarV,KAAK4E,UAEtB,CACA,YAAAgU,CAAa9Y,GAENza,SAAS6G,KAAK1H,SAASwb,KAAK4E,WAC/Bvf,SAAS6G,KAAK4oB,OAAO9U,KAAK4E,UAE5B5E,KAAK4E,SAAS7jB,MAAMgxB,QAAU,QAC9B/R,KAAK4E,SAASzjB,gBAAgB,eAC9B6e,KAAK4E,SAASxjB,aAAa,cAAc,GACzC4e,KAAK4E,SAASxjB,aAAa,OAAQ,UACnC4e,KAAK4E,SAASnZ,UAAY,EAC1B,MAAMstB,EAAYlT,GAAeC,QA7GT,cA6GsC9F,KAAKqY,SAC/DU,IACFA,EAAUttB,UAAY,GAExBoQ,GAAOmE,KAAK4E,UACZ5E,KAAK4E,SAASvJ,UAAU5E,IAAIuhB,IAU5BhY,KAAKmF,gBATsB,KACrBnF,KAAK6E,QAAQ4N,OACfzS,KAAKwY,WAAW9C,WAElB1V,KAAKmP,kBAAmB,EACxB5O,GAAaqB,QAAQ5B,KAAK4E,SAAU6S,GAAe,CACjD3X,iBACA,GAEoCE,KAAKqY,QAASrY,KAAKgO,cAC7D,CACA,kBAAAnC,GACEtL,GAAac,GAAGrB,KAAK4E,SAAUiT,IAAyBzY,IAhJvC,WAiJXA,EAAMtiB,MAGNkjB,KAAK6E,QAAQmG,SACfhL,KAAK4P,OAGP5P,KAAKgZ,6BAA4B,IAEnCzY,GAAac,GAAGzhB,OAAQ83B,IAAgB,KAClC1X,KAAK2P,WAAa3P,KAAKmP,kBACzBnP,KAAK2Y,eACP,IAEFpY,GAAac,GAAGrB,KAAK4E,SAAUgT,IAAyBxY,IAEtDmB,GAAae,IAAItB,KAAK4E,SAAU+S,IAAqBsB,IAC/CjZ,KAAK4E,WAAaxF,EAAM7S,QAAUyT,KAAK4E,WAAaqU,EAAO1sB,SAGjC,WAA1ByT,KAAK6E,QAAQ+P,SAIb5U,KAAK6E,QAAQ+P,UACf5U,KAAK4P,OAJL5P,KAAKgZ,6BAKP,GACA,GAEN,CACA,UAAAH,GACE7Y,KAAK4E,SAAS7jB,MAAMgxB,QAAU,OAC9B/R,KAAK4E,SAASxjB,aAAa,eAAe,GAC1C4e,KAAK4E,SAASzjB,gBAAgB,cAC9B6e,KAAK4E,SAASzjB,gBAAgB,QAC9B6e,KAAKmP,kBAAmB,EACxBnP,KAAKsY,UAAU1I,MAAK,KAClBvqB,SAAS6G,KAAKmP,UAAU1B,OAAOoe,IAC/B/X,KAAKkZ,oBACLlZ,KAAK0Y,WAAWrmB,QAChBkO,GAAaqB,QAAQ5B,KAAK4E,SAAU2S,GAAe,GAEvD,CACA,WAAAvJ,GACE,OAAOhO,KAAK4E,SAASvJ,UAAU7W,SAjLT,OAkLxB,CACA,0BAAAw0B,GAEE,GADkBzY,GAAaqB,QAAQ5B,KAAK4E,SAAU0S,IACxCtV,iBACZ,OAEF,MAAMmX,EAAqBnZ,KAAK4E,SAASvX,aAAehI,SAASC,gBAAgBsC,aAC3EwxB,EAAmBpZ,KAAK4E,SAAS7jB,MAAMiL,UAEpB,WAArBotB,GAAiCpZ,KAAK4E,SAASvJ,UAAU7W,SAASyzB,MAGjEkB,IACHnZ,KAAK4E,SAAS7jB,MAAMiL,UAAY,UAElCgU,KAAK4E,SAASvJ,UAAU5E,IAAIwhB,IAC5BjY,KAAKmF,gBAAe,KAClBnF,KAAK4E,SAASvJ,UAAU1B,OAAOse,IAC/BjY,KAAKmF,gBAAe,KAClBnF,KAAK4E,SAAS7jB,MAAMiL,UAAYotB,CAAgB,GAC/CpZ,KAAKqY,QAAQ,GACfrY,KAAKqY,SACRrY,KAAK4E,SAAS6N,QAChB,CAMA,aAAAkG,GACE,MAAMQ,EAAqBnZ,KAAK4E,SAASvX,aAAehI,SAASC,gBAAgBsC,aAC3EkvB,EAAiB9W,KAAK0Y,WAAWtC,WACjCiD,EAAoBvC,EAAiB,EAC3C,GAAIuC,IAAsBF,EAAoB,CAC5C,MAAMr3B,EAAWma,KAAU,cAAgB,eAC3C+D,KAAK4E,SAAS7jB,MAAMe,GAAY,GAAGg1B,KACrC,CACA,IAAKuC,GAAqBF,EAAoB,CAC5C,MAAMr3B,EAAWma,KAAU,eAAiB,cAC5C+D,KAAK4E,SAAS7jB,MAAMe,GAAY,GAAGg1B,KACrC,CACF,CACA,iBAAAoC,GACElZ,KAAK4E,SAAS7jB,MAAMu4B,YAAc,GAClCtZ,KAAK4E,SAAS7jB,MAAMw4B,aAAe,EACrC,CAGA,sBAAO9c,CAAgBqH,EAAQhE,GAC7B,OAAOE,KAAKwH,MAAK,WACf,MAAMnd,EAAO+tB,GAAM9S,oBAAoBtF,KAAM8D,GAC7C,GAAsB,iBAAXA,EAAX,CAGA,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,GAAQhE,EAJb,CAKF,GACF,EAOFS,GAAac,GAAGhc,SAAUyyB,GA9OK,4BA8O2C,SAAU1Y,GAClF,MAAM7S,EAASsZ,GAAec,uBAAuB3G,MACjD,CAAC,IAAK,QAAQoB,SAASpB,KAAKiH,UAC9B7H,EAAMkD,iBAER/B,GAAae,IAAI/U,EAAQirB,IAAcgC,IACjCA,EAAUxX,kBAIdzB,GAAae,IAAI/U,EAAQgrB,IAAgB,KACnC5c,GAAUqF,OACZA,KAAKyS,OACP,GACA,IAIJ,MAAMgH,EAAc5T,GAAeC,QAnQb,eAoQlB2T,GACFrB,GAAM/S,YAAYoU,GAAa7J,OAEpBwI,GAAM9S,oBAAoB/Y,GAClCob,OAAO3H,KACd,IACA6G,GAAqBuR,IAMrBjc,GAAmBic,IAcnB,MAEMsB,GAAc,gBACdC,GAAiB,YACjBC,GAAwB,OAAOF,KAAcC,KAE7CE,GAAoB,OACpBC,GAAuB,UACvBC,GAAoB,SAEpBC,GAAgB,kBAChBC,GAAe,OAAOP,KACtBQ,GAAgB,QAAQR,KACxBS,GAAe,OAAOT,KACtBU,GAAuB,gBAAgBV,KACvCW,GAAiB,SAASX,KAC1BY,GAAe,SAASZ,KACxBa,GAAyB,QAAQb,KAAcC,KAC/Ca,GAAwB,kBAAkBd,KAE1Ce,GAAY,CAChB7F,UAAU,EACV5J,UAAU,EACVvgB,QAAQ,GAEJiwB,GAAgB,CACpB9F,SAAU,mBACV5J,SAAU,UACVvgB,OAAQ,WAOV,MAAMkwB,WAAkBjW,GACtB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAK2P,UAAW,EAChB3P,KAAKsY,UAAYtY,KAAKuY,sBACtBvY,KAAKwY,WAAaxY,KAAKyY,uBACvBzY,KAAK6L,oBACP,CAGA,kBAAWnI,GACT,OAAO+W,EACT,CACA,sBAAW9W,GACT,OAAO+W,EACT,CACA,eAAWne,GACT,MApDW,WAqDb,CAGA,MAAAoL,CAAO7H,GACL,OAAOE,KAAK2P,SAAW3P,KAAK4P,OAAS5P,KAAK6P,KAAK/P,EACjD,CACA,IAAA+P,CAAK/P,GACCE,KAAK2P,UAGSpP,GAAaqB,QAAQ5B,KAAK4E,SAAUqV,GAAc,CAClEna,kBAEYkC,mBAGdhC,KAAK2P,UAAW,EAChB3P,KAAKsY,UAAUzI,OACV7P,KAAK6E,QAAQpa,SAChB,IAAI0rB,IAAkBvG,OAExB5P,KAAK4E,SAASxjB,aAAa,cAAc,GACzC4e,KAAK4E,SAASxjB,aAAa,OAAQ,UACnC4e,KAAK4E,SAASvJ,UAAU5E,IAAIqjB,IAW5B9Z,KAAKmF,gBAVoB,KAClBnF,KAAK6E,QAAQpa,SAAUuV,KAAK6E,QAAQ+P,UACvC5U,KAAKwY,WAAW9C,WAElB1V,KAAK4E,SAASvJ,UAAU5E,IAAIojB,IAC5B7Z,KAAK4E,SAASvJ,UAAU1B,OAAOmgB,IAC/BvZ,GAAaqB,QAAQ5B,KAAK4E,SAAUsV,GAAe,CACjDpa,iBACA,GAEkCE,KAAK4E,UAAU,GACvD,CACA,IAAAgL,GACO5P,KAAK2P,WAGQpP,GAAaqB,QAAQ5B,KAAK4E,SAAUuV,IACxCnY,mBAGdhC,KAAKwY,WAAW3C,aAChB7V,KAAK4E,SAASgW,OACd5a,KAAK2P,UAAW,EAChB3P,KAAK4E,SAASvJ,UAAU5E,IAAIsjB,IAC5B/Z,KAAKsY,UAAU1I,OAUf5P,KAAKmF,gBAToB,KACvBnF,KAAK4E,SAASvJ,UAAU1B,OAAOkgB,GAAmBE,IAClD/Z,KAAK4E,SAASzjB,gBAAgB,cAC9B6e,KAAK4E,SAASzjB,gBAAgB,QACzB6e,KAAK6E,QAAQpa,SAChB,IAAI0rB,IAAkB9jB,QAExBkO,GAAaqB,QAAQ5B,KAAK4E,SAAUyV,GAAe,GAEfra,KAAK4E,UAAU,IACvD,CACA,OAAAG,GACE/E,KAAKsY,UAAUvT,UACf/E,KAAKwY,WAAW3C,aAChBlR,MAAMI,SACR,CAGA,mBAAAwT,GACE,MASM5d,EAAYmG,QAAQd,KAAK6E,QAAQ+P,UACvC,OAAO,IAAIL,GAAS,CAClBJ,UA3HsB,qBA4HtBxZ,YACAyK,YAAY,EACZiP,YAAarU,KAAK4E,SAAS7f,WAC3BqvB,cAAezZ,EAfK,KACU,WAA1BqF,KAAK6E,QAAQ+P,SAIjB5U,KAAK4P,OAHHrP,GAAaqB,QAAQ5B,KAAK4E,SAAUwV,GAG3B,EAUgC,MAE/C,CACA,oBAAA3B,GACE,OAAO,IAAIlD,GAAU,CACnBF,YAAarV,KAAK4E,UAEtB,CACA,kBAAAiH,GACEtL,GAAac,GAAGrB,KAAK4E,SAAU4V,IAAuBpb,IA5IvC,WA6ITA,EAAMtiB,MAGNkjB,KAAK6E,QAAQmG,SACfhL,KAAK4P,OAGPrP,GAAaqB,QAAQ5B,KAAK4E,SAAUwV,IAAqB,GAE7D,CAGA,sBAAO3d,CAAgBqH,GACrB,OAAO9D,KAAKwH,MAAK,WACf,MAAMnd,EAAOswB,GAAUrV,oBAAoBtF,KAAM8D,GACjD,GAAsB,iBAAXA,EAAX,CAGA,QAAqB/K,IAAjB1O,EAAKyZ,IAAyBA,EAAOrC,WAAW,MAAmB,gBAAXqC,EAC1D,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,GAAQ9D,KAJb,CAKF,GACF,EAOFO,GAAac,GAAGhc,SAAUk1B,GA7JK,gCA6J2C,SAAUnb,GAClF,MAAM7S,EAASsZ,GAAec,uBAAuB3G,MAIrD,GAHI,CAAC,IAAK,QAAQoB,SAASpB,KAAKiH,UAC9B7H,EAAMkD,iBAEJpH,GAAW8E,MACb,OAEFO,GAAae,IAAI/U,EAAQ8tB,IAAgB,KAEnC1f,GAAUqF,OACZA,KAAKyS,OACP,IAIF,MAAMgH,EAAc5T,GAAeC,QAAQkU,IACvCP,GAAeA,IAAgBltB,GACjCouB,GAAUtV,YAAYoU,GAAa7J,OAExB+K,GAAUrV,oBAAoB/Y,GACtCob,OAAO3H,KACd,IACAO,GAAac,GAAGzhB,OAAQg6B,IAAuB,KAC7C,IAAK,MAAM7f,KAAY8L,GAAe1T,KAAK6nB,IACzCW,GAAUrV,oBAAoBvL,GAAU8V,MAC1C,IAEFtP,GAAac,GAAGzhB,OAAQ06B,IAAc,KACpC,IAAK,MAAM/6B,KAAWsmB,GAAe1T,KAAK,gDACG,UAAvClN,iBAAiB1F,GAASiC,UAC5Bm5B,GAAUrV,oBAAoB/lB,GAASqwB,MAE3C,IAEF/I,GAAqB8T,IAMrBxe,GAAmBwe,IAUnB,MACME,GAAmB,CAEvB,IAAK,CAAC,QAAS,MAAO,KAAM,OAAQ,OAHP,kBAI7BhqB,EAAG,CAAC,SAAU,OAAQ,QAAS,OAC/BiqB,KAAM,GACNhqB,EAAG,GACHiqB,GAAI,GACJC,IAAK,GACLC,KAAM,GACNC,GAAI,GACJC,IAAK,GACLC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJxqB,EAAG,GACH0b,IAAK,CAAC,MAAO,SAAU,MAAO,QAAS,QAAS,UAChD+O,GAAI,GACJC,GAAI,GACJC,EAAG,GACHC,IAAK,GACLC,EAAG,GACHC,MAAO,GACPC,KAAM,GACNC,IAAK,GACLC,IAAK,GACLC,OAAQ,GACRC,EAAG,GACHC,GAAI,IAIAC,GAAgB,IAAIpmB,IAAI,CAAC,aAAc,OAAQ,OAAQ,WAAY,WAAY,SAAU,MAAO,eAShGqmB,GAAmB,0DACnBC,GAAmB,CAAC76B,EAAW86B,KACnC,MAAMC,EAAgB/6B,EAAUvC,SAASC,cACzC,OAAIo9B,EAAqBzb,SAAS0b,IAC5BJ,GAAc/lB,IAAImmB,IACbhc,QAAQ6b,GAAiBt5B,KAAKtB,EAAUg7B,YAM5CF,EAAqB12B,QAAO62B,GAAkBA,aAA0BzY,SAAQ9R,MAAKwqB,GAASA,EAAM55B,KAAKy5B,IAAe,EA0C3HI,GAAY,CAChBC,UAAWtC,GACXuC,QAAS,CAAC,EAEVC,WAAY,GACZxwB,MAAM,EACNywB,UAAU,EACVC,WAAY,KACZC,SAAU,eAENC,GAAgB,CACpBN,UAAW,SACXC,QAAS,SACTC,WAAY,oBACZxwB,KAAM,UACNywB,SAAU,UACVC,WAAY,kBACZC,SAAU,UAENE,GAAqB,CACzBC,MAAO,iCACP5jB,SAAU,oBAOZ,MAAM6jB,WAAwBna,GAC5B,WAAAU,CAAYL,GACVa,QACA3E,KAAK6E,QAAU7E,KAAK6D,WAAWC,EACjC,CAGA,kBAAWJ,GACT,OAAOwZ,EACT,CACA,sBAAWvZ,GACT,OAAO8Z,EACT,CACA,eAAWlhB,GACT,MA3CW,iBA4Cb,CAGA,UAAAshB,GACE,OAAO7gC,OAAOmiB,OAAOa,KAAK6E,QAAQuY,SAASt6B,KAAIghB,GAAU9D,KAAK8d,yBAAyBha,KAAS3d,OAAO2a,QACzG,CACA,UAAAid,GACE,OAAO/d,KAAK6d,aAAantB,OAAS,CACpC,CACA,aAAAstB,CAAcZ,GAMZ,OALApd,KAAKie,cAAcb,GACnBpd,KAAK6E,QAAQuY,QAAU,IAClBpd,KAAK6E,QAAQuY,WACbA,GAEEpd,IACT,CACA,MAAAke,GACE,MAAMC,EAAkB94B,SAASwvB,cAAc,OAC/CsJ,EAAgBC,UAAYpe,KAAKqe,eAAere,KAAK6E,QAAQ2Y,UAC7D,IAAK,MAAOzjB,EAAUukB,KAASthC,OAAOmkB,QAAQnB,KAAK6E,QAAQuY,SACzDpd,KAAKue,YAAYJ,EAAiBG,EAAMvkB,GAE1C,MAAMyjB,EAAWW,EAAgBpY,SAAS,GACpCsX,EAAard,KAAK8d,yBAAyB9d,KAAK6E,QAAQwY,YAI9D,OAHIA,GACFG,EAASniB,UAAU5E,OAAO4mB,EAAWn7B,MAAM,MAEtCs7B,CACT,CAGA,gBAAAvZ,CAAiBH,GACfa,MAAMV,iBAAiBH,GACvB9D,KAAKie,cAAcna,EAAOsZ,QAC5B,CACA,aAAAa,CAAcO,GACZ,IAAK,MAAOzkB,EAAUqjB,KAAYpgC,OAAOmkB,QAAQqd,GAC/C7Z,MAAMV,iBAAiB,CACrBlK,WACA4jB,MAAOP,GACNM,GAEP,CACA,WAAAa,CAAYf,EAAUJ,EAASrjB,GAC7B,MAAM0kB,EAAkB5Y,GAAeC,QAAQ/L,EAAUyjB,GACpDiB,KAGLrB,EAAUpd,KAAK8d,yBAAyBV,IAKpC,GAAUA,GACZpd,KAAK0e,sBAAsBhkB,GAAW0iB,GAAUqB,GAG9Cze,KAAK6E,QAAQhY,KACf4xB,EAAgBL,UAAYpe,KAAKqe,eAAejB,GAGlDqB,EAAgBE,YAAcvB,EAX5BqB,EAAgB9kB,SAYpB,CACA,cAAA0kB,CAAeG,GACb,OAAOxe,KAAK6E,QAAQyY,SApJxB,SAAsBsB,EAAYzB,EAAW0B,GAC3C,IAAKD,EAAWluB,OACd,OAAOkuB,EAET,GAAIC,GAAgD,mBAArBA,EAC7B,OAAOA,EAAiBD,GAE1B,MACME,GADY,IAAIl/B,OAAOm/B,WACKC,gBAAgBJ,EAAY,aACxD/9B,EAAW,GAAGlC,UAAUmgC,EAAgB5yB,KAAKkU,iBAAiB,MACpE,IAAK,MAAM7gB,KAAWsB,EAAU,CAC9B,MAAMo+B,EAAc1/B,EAAQC,SAASC,cACrC,IAAKzC,OAAO4D,KAAKu8B,GAAW/b,SAAS6d,GAAc,CACjD1/B,EAAQoa,SACR,QACF,CACA,MAAMulB,EAAgB,GAAGvgC,UAAUY,EAAQ0B,YACrCk+B,EAAoB,GAAGxgC,OAAOw+B,EAAU,MAAQ,GAAIA,EAAU8B,IAAgB,IACpF,IAAK,MAAMl9B,KAAam9B,EACjBtC,GAAiB76B,EAAWo9B,IAC/B5/B,EAAQ4B,gBAAgBY,EAAUvC,SAGxC,CACA,OAAOs/B,EAAgB5yB,KAAKkyB,SAC9B,CA2HmCgB,CAAaZ,EAAKxe,KAAK6E,QAAQsY,UAAWnd,KAAK6E,QAAQ0Y,YAAciB,CACtG,CACA,wBAAAV,CAAyBU,GACvB,OAAO3hB,GAAQ2hB,EAAK,CAACxe,MACvB,CACA,qBAAA0e,CAAsBn/B,EAASk/B,GAC7B,GAAIze,KAAK6E,QAAQhY,KAGf,OAFA4xB,EAAgBL,UAAY,QAC5BK,EAAgB3J,OAAOv1B,GAGzBk/B,EAAgBE,YAAcp/B,EAAQo/B,WACxC,EAeF,MACMU,GAAwB,IAAI/oB,IAAI,CAAC,WAAY,YAAa,eAC1DgpB,GAAoB,OAEpBC,GAAoB,OACpBC,GAAyB,iBACzBC,GAAiB,SACjBC,GAAmB,gBACnBC,GAAgB,QAChBC,GAAgB,QAahBC,GAAgB,CACpBC,KAAM,OACNC,IAAK,MACLC,MAAO/jB,KAAU,OAAS,QAC1BgkB,OAAQ,SACRC,KAAMjkB,KAAU,QAAU,QAEtBkkB,GAAY,CAChBhD,UAAWtC,GACXuF,WAAW,EACXnyB,SAAU,kBACVoyB,WAAW,EACXC,YAAa,GACbC,MAAO,EACPvwB,mBAAoB,CAAC,MAAO,QAAS,SAAU,QAC/CnD,MAAM,EACN7E,OAAQ,CAAC,EAAG,GACZtJ,UAAW,MACXszB,aAAc,KACdsL,UAAU,EACVC,WAAY,KACZxjB,UAAU,EACVyjB,SAAU,+GACVgD,MAAO,GACP5e,QAAS,eAEL6e,GAAgB,CACpBtD,UAAW,SACXiD,UAAW,UACXnyB,SAAU,mBACVoyB,UAAW,2BACXC,YAAa,oBACbC,MAAO,kBACPvwB,mBAAoB,QACpBnD,KAAM,UACN7E,OAAQ,0BACRtJ,UAAW,oBACXszB,aAAc,yBACdsL,SAAU,UACVC,WAAY,kBACZxjB,SAAU,mBACVyjB,SAAU,SACVgD,MAAO,4BACP5e,QAAS,UAOX,MAAM8e,WAAgBhc,GACpB,WAAAP,CAAY5kB,EAASukB,GACnB,QAAsB,IAAX,EACT,MAAM,IAAIU,UAAU,+DAEtBG,MAAMplB,EAASukB,GAGf9D,KAAK2gB,YAAa,EAClB3gB,KAAK4gB,SAAW,EAChB5gB,KAAK6gB,WAAa,KAClB7gB,KAAK8gB,eAAiB,CAAC,EACvB9gB,KAAKmS,QAAU,KACfnS,KAAK+gB,iBAAmB,KACxB/gB,KAAKghB,YAAc,KAGnBhhB,KAAKihB,IAAM,KACXjhB,KAAKkhB,gBACAlhB,KAAK6E,QAAQ9K,UAChBiG,KAAKmhB,WAET,CAGA,kBAAWzd,GACT,OAAOyc,EACT,CACA,sBAAWxc,GACT,OAAO8c,EACT,CACA,eAAWlkB,GACT,MAxGW,SAyGb,CAGA,MAAA6kB,GACEphB,KAAK2gB,YAAa,CACpB,CACA,OAAAU,GACErhB,KAAK2gB,YAAa,CACpB,CACA,aAAAW,GACEthB,KAAK2gB,YAAc3gB,KAAK2gB,UAC1B,CACA,MAAAhZ,GACO3H,KAAK2gB,aAGV3gB,KAAK8gB,eAAeS,OAASvhB,KAAK8gB,eAAeS,MAC7CvhB,KAAK2P,WACP3P,KAAKwhB,SAGPxhB,KAAKyhB,SACP,CACA,OAAA1c,GACEmI,aAAalN,KAAK4gB,UAClBrgB,GAAaC,IAAIR,KAAK4E,SAAS5J,QAAQykB,IAAiBC,GAAkB1f,KAAK0hB,mBAC3E1hB,KAAK4E,SAASpJ,aAAa,2BAC7BwE,KAAK4E,SAASxjB,aAAa,QAAS4e,KAAK4E,SAASpJ,aAAa,2BAEjEwE,KAAK2hB,iBACLhd,MAAMI,SACR,CACA,IAAA8K,GACE,GAAoC,SAAhC7P,KAAK4E,SAAS7jB,MAAMgxB,QACtB,MAAM,IAAInO,MAAM,uCAElB,IAAM5D,KAAK4hB,mBAAoB5hB,KAAK2gB,WAClC,OAEF,MAAMnH,EAAYjZ,GAAaqB,QAAQ5B,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UAlItD,SAoIXqc,GADapmB,GAAeuE,KAAK4E,WACL5E,KAAK4E,SAAS9kB,cAAcwF,iBAAiBd,SAASwb,KAAK4E,UAC7F,GAAI4U,EAAUxX,mBAAqB6f,EACjC,OAIF7hB,KAAK2hB,iBACL,MAAMV,EAAMjhB,KAAK8hB,iBACjB9hB,KAAK4E,SAASxjB,aAAa,mBAAoB6/B,EAAIzlB,aAAa,OAChE,MAAM,UACJ6kB,GACErgB,KAAK6E,QAYT,GAXK7E,KAAK4E,SAAS9kB,cAAcwF,gBAAgBd,SAASwb,KAAKihB,OAC7DZ,EAAUvL,OAAOmM,GACjB1gB,GAAaqB,QAAQ5B,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UAhJpC,cAkJnBxF,KAAKmS,QAAUnS,KAAKwS,cAAcyO,GAClCA,EAAI5lB,UAAU5E,IAAI8oB,IAMd,iBAAkBl6B,SAASC,gBAC7B,IAAK,MAAM/F,IAAW,GAAGZ,UAAU0G,SAAS6G,KAAK6Z,UAC/CxF,GAAac,GAAG9hB,EAAS,YAAaqc,IAU1CoE,KAAKmF,gBAPY,KACf5E,GAAaqB,QAAQ5B,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UAhKrC,WAiKQ,IAApBxF,KAAK6gB,YACP7gB,KAAKwhB,SAEPxhB,KAAK6gB,YAAa,CAAK,GAEK7gB,KAAKihB,IAAKjhB,KAAKgO,cAC/C,CACA,IAAA4B,GACE,GAAK5P,KAAK2P,aAGQpP,GAAaqB,QAAQ5B,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UA/KtD,SAgLHxD,iBAAd,CAQA,GALYhC,KAAK8hB,iBACbzmB,UAAU1B,OAAO4lB,IAIjB,iBAAkBl6B,SAASC,gBAC7B,IAAK,MAAM/F,IAAW,GAAGZ,UAAU0G,SAAS6G,KAAK6Z,UAC/CxF,GAAaC,IAAIjhB,EAAS,YAAaqc,IAG3CoE,KAAK8gB,eAA4B,OAAI,EACrC9gB,KAAK8gB,eAAelB,KAAiB,EACrC5f,KAAK8gB,eAAenB,KAAiB,EACrC3f,KAAK6gB,WAAa,KAYlB7gB,KAAKmF,gBAVY,KACXnF,KAAK+hB,yBAGJ/hB,KAAK6gB,YACR7gB,KAAK2hB,iBAEP3hB,KAAK4E,SAASzjB,gBAAgB,oBAC9Bof,GAAaqB,QAAQ5B,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UAzMpC,WAyM8D,GAEnDxF,KAAKihB,IAAKjhB,KAAKgO,cA1B7C,CA2BF,CACA,MAAAjjB,GACMiV,KAAKmS,SACPnS,KAAKmS,QAAQpnB,QAEjB,CAGA,cAAA62B,GACE,OAAO9gB,QAAQd,KAAKgiB,YACtB,CACA,cAAAF,GAIE,OAHK9hB,KAAKihB,MACRjhB,KAAKihB,IAAMjhB,KAAKiiB,kBAAkBjiB,KAAKghB,aAAehhB,KAAKkiB,2BAEtDliB,KAAKihB,GACd,CACA,iBAAAgB,CAAkB7E,GAChB,MAAM6D,EAAMjhB,KAAKmiB,oBAAoB/E,GAASc,SAG9C,IAAK+C,EACH,OAAO,KAETA,EAAI5lB,UAAU1B,OAAO2lB,GAAmBC,IAExC0B,EAAI5lB,UAAU5E,IAAI,MAAMuJ,KAAKmE,YAAY5H,aACzC,MAAM6lB,EAvuGKC,KACb,GACEA,GAAUlgC,KAAKmgC,MA/BH,IA+BSngC,KAAKogC,gBACnBl9B,SAASm9B,eAAeH,IACjC,OAAOA,CAAM,EAmuGGI,CAAOziB,KAAKmE,YAAY5H,MAAM1c,WAK5C,OAJAohC,EAAI7/B,aAAa,KAAMghC,GACnBpiB,KAAKgO,eACPiT,EAAI5lB,UAAU5E,IAAI6oB,IAEb2B,CACT,CACA,UAAAyB,CAAWtF,GACTpd,KAAKghB,YAAc5D,EACfpd,KAAK2P,aACP3P,KAAK2hB,iBACL3hB,KAAK6P,OAET,CACA,mBAAAsS,CAAoB/E,GAYlB,OAXIpd,KAAK+gB,iBACP/gB,KAAK+gB,iBAAiB/C,cAAcZ,GAEpCpd,KAAK+gB,iBAAmB,IAAInD,GAAgB,IACvC5d,KAAK6E,QAGRuY,UACAC,WAAYrd,KAAK8d,yBAAyB9d,KAAK6E,QAAQyb,eAGpDtgB,KAAK+gB,gBACd,CACA,sBAAAmB,GACE,MAAO,CACL,CAAC1C,IAAyBxf,KAAKgiB,YAEnC,CACA,SAAAA,GACE,OAAOhiB,KAAK8d,yBAAyB9d,KAAK6E,QAAQ2b,QAAUxgB,KAAK4E,SAASpJ,aAAa,yBACzF,CAGA,4BAAAmnB,CAA6BvjB,GAC3B,OAAOY,KAAKmE,YAAYmB,oBAAoBlG,EAAMW,eAAgBC,KAAK4iB,qBACzE,CACA,WAAA5U,GACE,OAAOhO,KAAK6E,QAAQub,WAAapgB,KAAKihB,KAAOjhB,KAAKihB,IAAI5lB,UAAU7W,SAAS86B,GAC3E,CACA,QAAA3P,GACE,OAAO3P,KAAKihB,KAAOjhB,KAAKihB,IAAI5lB,UAAU7W,SAAS+6B,GACjD,CACA,aAAA/M,CAAcyO,GACZ,MAAMviC,EAAYme,GAAQmD,KAAK6E,QAAQnmB,UAAW,CAACshB,KAAMihB,EAAKjhB,KAAK4E,WAC7Die,EAAahD,GAAcnhC,EAAU+lB,eAC3C,OAAO,GAAoBzE,KAAK4E,SAAUqc,EAAKjhB,KAAK4S,iBAAiBiQ,GACvE,CACA,UAAA7P,GACE,MAAM,OACJhrB,GACEgY,KAAK6E,QACT,MAAsB,iBAAX7c,EACFA,EAAO9F,MAAM,KAAKY,KAAInF,GAAS4f,OAAOgQ,SAAS5vB,EAAO,MAEzC,mBAAXqK,EACFirB,GAAcjrB,EAAOirB,EAAYjT,KAAK4E,UAExC5c,CACT,CACA,wBAAA81B,CAAyBU,GACvB,OAAO3hB,GAAQ2hB,EAAK,CAACxe,KAAK4E,UAC5B,CACA,gBAAAgO,CAAiBiQ,GACf,MAAM3P,EAAwB,CAC5Bx0B,UAAWmkC,EACXzsB,UAAW,CAAC,CACV9V,KAAM,OACNmB,QAAS,CACPuO,mBAAoBgQ,KAAK6E,QAAQ7U,qBAElC,CACD1P,KAAM,SACNmB,QAAS,CACPuG,OAAQgY,KAAKgT,eAEd,CACD1yB,KAAM,kBACNmB,QAAS,CACPwM,SAAU+R,KAAK6E,QAAQ5W,WAExB,CACD3N,KAAM,QACNmB,QAAS,CACPlC,QAAS,IAAIygB,KAAKmE,YAAY5H,eAE/B,CACDjc,KAAM,kBACNC,SAAS,EACTC,MAAO,aACPC,GAAI4J,IAGF2V,KAAK8hB,iBAAiB1gC,aAAa,wBAAyBiJ,EAAK1J,MAAMjC,UAAU,KAIvF,MAAO,IACFw0B,KACArW,GAAQmD,KAAK6E,QAAQmN,aAAc,CAACkB,IAE3C,CACA,aAAAgO,GACE,MAAM4B,EAAW9iB,KAAK6E,QAAQjD,QAAQ1f,MAAM,KAC5C,IAAK,MAAM0f,KAAWkhB,EACpB,GAAgB,UAAZlhB,EACFrB,GAAac,GAAGrB,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UAjVlC,SAiV4DxF,KAAK6E,QAAQ9K,UAAUqF,IAC/EY,KAAK2iB,6BAA6BvjB,GAC1CuI,QAAQ,SAEb,GA3VU,WA2VN/F,EAA4B,CACrC,MAAMmhB,EAAUnhB,IAAY+d,GAAgB3f,KAAKmE,YAAYqB,UAnV5C,cAmV0ExF,KAAKmE,YAAYqB,UArV5F,WAsVVwd,EAAWphB,IAAY+d,GAAgB3f,KAAKmE,YAAYqB,UAnV7C,cAmV2ExF,KAAKmE,YAAYqB,UArV5F,YAsVjBjF,GAAac,GAAGrB,KAAK4E,SAAUme,EAAS/iB,KAAK6E,QAAQ9K,UAAUqF,IAC7D,MAAMkU,EAAUtT,KAAK2iB,6BAA6BvjB,GAClDkU,EAAQwN,eAA8B,YAAf1hB,EAAMqB,KAAqBmf,GAAgBD,KAAiB,EACnFrM,EAAQmO,QAAQ,IAElBlhB,GAAac,GAAGrB,KAAK4E,SAAUoe,EAAUhjB,KAAK6E,QAAQ9K,UAAUqF,IAC9D,MAAMkU,EAAUtT,KAAK2iB,6BAA6BvjB,GAClDkU,EAAQwN,eAA8B,aAAf1hB,EAAMqB,KAAsBmf,GAAgBD,IAAiBrM,EAAQ1O,SAASpgB,SAAS4a,EAAMU,eACpHwT,EAAQkO,QAAQ,GAEpB,CAEFxhB,KAAK0hB,kBAAoB,KACnB1hB,KAAK4E,UACP5E,KAAK4P,MACP,EAEFrP,GAAac,GAAGrB,KAAK4E,SAAS5J,QAAQykB,IAAiBC,GAAkB1f,KAAK0hB,kBAChF,CACA,SAAAP,GACE,MAAMX,EAAQxgB,KAAK4E,SAASpJ,aAAa,SACpCglB,IAGAxgB,KAAK4E,SAASpJ,aAAa,eAAkBwE,KAAK4E,SAAS+Z,YAAYhZ,QAC1E3F,KAAK4E,SAASxjB,aAAa,aAAco/B,GAE3CxgB,KAAK4E,SAASxjB,aAAa,yBAA0Bo/B,GACrDxgB,KAAK4E,SAASzjB,gBAAgB,SAChC,CACA,MAAAsgC,GACMzhB,KAAK2P,YAAc3P,KAAK6gB,WAC1B7gB,KAAK6gB,YAAa,GAGpB7gB,KAAK6gB,YAAa,EAClB7gB,KAAKijB,aAAY,KACXjjB,KAAK6gB,YACP7gB,KAAK6P,MACP,GACC7P,KAAK6E,QAAQ0b,MAAM1Q,MACxB,CACA,MAAA2R,GACMxhB,KAAK+hB,yBAGT/hB,KAAK6gB,YAAa,EAClB7gB,KAAKijB,aAAY,KACVjjB,KAAK6gB,YACR7gB,KAAK4P,MACP,GACC5P,KAAK6E,QAAQ0b,MAAM3Q,MACxB,CACA,WAAAqT,CAAYrlB,EAASslB,GACnBhW,aAAalN,KAAK4gB,UAClB5gB,KAAK4gB,SAAW/iB,WAAWD,EAASslB,EACtC,CACA,oBAAAnB,GACE,OAAO/kC,OAAOmiB,OAAOa,KAAK8gB,gBAAgB1f,UAAS,EACrD,CACA,UAAAyC,CAAWC,GACT,MAAMqf,EAAiBngB,GAAYG,kBAAkBnD,KAAK4E,UAC1D,IAAK,MAAMwe,KAAiBpmC,OAAO4D,KAAKuiC,GAClC9D,GAAsB1oB,IAAIysB,WACrBD,EAAeC,GAU1B,OAPAtf,EAAS,IACJqf,KACmB,iBAAXrf,GAAuBA,EAASA,EAAS,CAAC,GAEvDA,EAAS9D,KAAK+D,gBAAgBD,GAC9BA,EAAS9D,KAAKgE,kBAAkBF,GAChC9D,KAAKiE,iBAAiBH,GACfA,CACT,CACA,iBAAAE,CAAkBF,GAchB,OAbAA,EAAOuc,WAAiC,IAArBvc,EAAOuc,UAAsBh7B,SAAS6G,KAAOwO,GAAWoJ,EAAOuc,WACtD,iBAAjBvc,EAAOyc,QAChBzc,EAAOyc,MAAQ,CACb1Q,KAAM/L,EAAOyc,MACb3Q,KAAM9L,EAAOyc,QAGW,iBAAjBzc,EAAO0c,QAChB1c,EAAO0c,MAAQ1c,EAAO0c,MAAM3gC,YAEA,iBAAnBikB,EAAOsZ,UAChBtZ,EAAOsZ,QAAUtZ,EAAOsZ,QAAQv9B,YAE3BikB,CACT,CACA,kBAAA8e,GACE,MAAM9e,EAAS,CAAC,EAChB,IAAK,MAAOhnB,EAAKa,KAAUX,OAAOmkB,QAAQnB,KAAK6E,SACzC7E,KAAKmE,YAAYT,QAAQ5mB,KAASa,IACpCmmB,EAAOhnB,GAAOa,GASlB,OANAmmB,EAAO/J,UAAW,EAClB+J,EAAOlC,QAAU,SAKVkC,CACT,CACA,cAAA6d,GACM3hB,KAAKmS,UACPnS,KAAKmS,QAAQnZ,UACbgH,KAAKmS,QAAU,MAEbnS,KAAKihB,MACPjhB,KAAKihB,IAAItnB,SACTqG,KAAKihB,IAAM,KAEf,CAGA,sBAAOxkB,CAAgBqH,GACrB,OAAO9D,KAAKwH,MAAK,WACf,MAAMnd,EAAOq2B,GAAQpb,oBAAoBtF,KAAM8D,GAC/C,GAAsB,iBAAXA,EAAX,CAGA,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IAJL,CAKF,GACF,EAOF3H,GAAmBukB,IAcnB,MACM2C,GAAiB,kBACjBC,GAAmB,gBACnBC,GAAY,IACb7C,GAAQhd,QACX0Z,QAAS,GACTp1B,OAAQ,CAAC,EAAG,GACZtJ,UAAW,QACX8+B,SAAU,8IACV5b,QAAS,SAEL4hB,GAAgB,IACjB9C,GAAQ/c,YACXyZ,QAAS,kCAOX,MAAMqG,WAAgB/C,GAEpB,kBAAWhd,GACT,OAAO6f,EACT,CACA,sBAAW5f,GACT,OAAO6f,EACT,CACA,eAAWjnB,GACT,MA7BW,SA8Bb,CAGA,cAAAqlB,GACE,OAAO5hB,KAAKgiB,aAAehiB,KAAK0jB,aAClC,CAGA,sBAAAxB,GACE,MAAO,CACL,CAACmB,IAAiBrjB,KAAKgiB,YACvB,CAACsB,IAAmBtjB,KAAK0jB,cAE7B,CACA,WAAAA,GACE,OAAO1jB,KAAK8d,yBAAyB9d,KAAK6E,QAAQuY,QACpD,CAGA,sBAAO3gB,CAAgBqH,GACrB,OAAO9D,KAAKwH,MAAK,WACf,MAAMnd,EAAOo5B,GAAQne,oBAAoBtF,KAAM8D,GAC/C,GAAsB,iBAAXA,EAAX,CAGA,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IAJL,CAKF,GACF,EAOF3H,GAAmBsnB,IAcnB,MAEME,GAAc,gBAEdC,GAAiB,WAAWD,KAC5BE,GAAc,QAAQF,KACtBG,GAAwB,OAAOH,cAE/BI,GAAsB,SAEtBC,GAAwB,SAExBC,GAAqB,YAGrBC,GAAsB,GAAGD,mBAA+CA,uBAGxEE,GAAY,CAChBn8B,OAAQ,KAERo8B,WAAY,eACZC,cAAc,EACd93B,OAAQ,KACR+3B,UAAW,CAAC,GAAK,GAAK,IAElBC,GAAgB,CACpBv8B,OAAQ,gBAERo8B,WAAY,SACZC,aAAc,UACd93B,OAAQ,UACR+3B,UAAW,SAOb,MAAME,WAAkB9f,GACtB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GAGf9D,KAAKykB,aAAe,IAAIvzB,IACxB8O,KAAK0kB,oBAAsB,IAAIxzB,IAC/B8O,KAAK2kB,aAA6D,YAA9C1/B,iBAAiB+a,KAAK4E,UAAU5Y,UAA0B,KAAOgU,KAAK4E,SAC1F5E,KAAK4kB,cAAgB,KACrB5kB,KAAK6kB,UAAY,KACjB7kB,KAAK8kB,oBAAsB,CACzBC,gBAAiB,EACjBC,gBAAiB,GAEnBhlB,KAAKilB,SACP,CAGA,kBAAWvhB,GACT,OAAOygB,EACT,CACA,sBAAWxgB,GACT,OAAO4gB,EACT,CACA,eAAWhoB,GACT,MAhEW,WAiEb,CAGA,OAAA0oB,GACEjlB,KAAKklB,mCACLllB,KAAKmlB,2BACDnlB,KAAK6kB,UACP7kB,KAAK6kB,UAAUO,aAEfplB,KAAK6kB,UAAY7kB,KAAKqlB,kBAExB,IAAK,MAAMC,KAAWtlB,KAAK0kB,oBAAoBvlB,SAC7Ca,KAAK6kB,UAAUU,QAAQD,EAE3B,CACA,OAAAvgB,GACE/E,KAAK6kB,UAAUO,aACfzgB,MAAMI,SACR,CAGA,iBAAAf,CAAkBF,GAShB,OAPAA,EAAOvX,OAASmO,GAAWoJ,EAAOvX,SAAWlH,SAAS6G,KAGtD4X,EAAOsgB,WAAatgB,EAAO9b,OAAS,GAAG8b,EAAO9b,oBAAsB8b,EAAOsgB,WAC3C,iBAArBtgB,EAAOwgB,YAChBxgB,EAAOwgB,UAAYxgB,EAAOwgB,UAAUpiC,MAAM,KAAKY,KAAInF,GAAS4f,OAAOC,WAAW7f,MAEzEmmB,CACT,CACA,wBAAAqhB,GACOnlB,KAAK6E,QAAQwf,eAKlB9jB,GAAaC,IAAIR,KAAK6E,QAAQtY,OAAQs3B,IACtCtjB,GAAac,GAAGrB,KAAK6E,QAAQtY,OAAQs3B,GAAaG,IAAuB5kB,IACvE,MAAMomB,EAAoBxlB,KAAK0kB,oBAAoBvnC,IAAIiiB,EAAM7S,OAAOtB,MACpE,GAAIu6B,EAAmB,CACrBpmB,EAAMkD,iBACN,MAAM3G,EAAOqE,KAAK2kB,cAAgB/kC,OAC5BmE,EAASyhC,EAAkBnhC,UAAY2b,KAAK4E,SAASvgB,UAC3D,GAAIsX,EAAK8pB,SAKP,YAJA9pB,EAAK8pB,SAAS,CACZ9jC,IAAKoC,EACL2hC,SAAU,WAMd/pB,EAAKlQ,UAAY1H,CACnB,KAEJ,CACA,eAAAshC,GACE,MAAM5jC,EAAU,CACdka,KAAMqE,KAAK2kB,aACXL,UAAWtkB,KAAK6E,QAAQyf,UACxBF,WAAYpkB,KAAK6E,QAAQuf,YAE3B,OAAO,IAAIuB,sBAAqBxkB,GAAWnB,KAAK4lB,kBAAkBzkB,IAAU1f,EAC9E,CAGA,iBAAAmkC,CAAkBzkB,GAChB,MAAM0kB,EAAgBlI,GAAS3d,KAAKykB,aAAatnC,IAAI,IAAIwgC,EAAMpxB,OAAO4N,MAChEub,EAAWiI,IACf3d,KAAK8kB,oBAAoBC,gBAAkBpH,EAAMpxB,OAAOlI,UACxD2b,KAAK8lB,SAASD,EAAclI,GAAO,EAE/BqH,GAAmBhlB,KAAK2kB,cAAgBt/B,SAASC,iBAAiBmG,UAClEs6B,EAAkBf,GAAmBhlB,KAAK8kB,oBAAoBE,gBACpEhlB,KAAK8kB,oBAAoBE,gBAAkBA,EAC3C,IAAK,MAAMrH,KAASxc,EAAS,CAC3B,IAAKwc,EAAMqI,eAAgB,CACzBhmB,KAAK4kB,cAAgB,KACrB5kB,KAAKimB,kBAAkBJ,EAAclI,IACrC,QACF,CACA,MAAMuI,EAA2BvI,EAAMpxB,OAAOlI,WAAa2b,KAAK8kB,oBAAoBC,gBAEpF,GAAIgB,GAAmBG,GAGrB,GAFAxQ,EAASiI,IAEJqH,EACH,YAMCe,GAAoBG,GACvBxQ,EAASiI,EAEb,CACF,CACA,gCAAAuH,GACEllB,KAAKykB,aAAe,IAAIvzB,IACxB8O,KAAK0kB,oBAAsB,IAAIxzB,IAC/B,MAAMi1B,EAActgB,GAAe1T,KAAK6xB,GAAuBhkB,KAAK6E,QAAQtY,QAC5E,IAAK,MAAM65B,KAAUD,EAAa,CAEhC,IAAKC,EAAOn7B,MAAQiQ,GAAWkrB,GAC7B,SAEF,MAAMZ,EAAoB3f,GAAeC,QAAQugB,UAAUD,EAAOn7B,MAAO+U,KAAK4E,UAG1EjK,GAAU6qB,KACZxlB,KAAKykB,aAAa1yB,IAAIs0B,UAAUD,EAAOn7B,MAAOm7B,GAC9CpmB,KAAK0kB,oBAAoB3yB,IAAIq0B,EAAOn7B,KAAMu6B,GAE9C,CACF,CACA,QAAAM,CAASv5B,GACHyT,KAAK4kB,gBAAkBr4B,IAG3ByT,KAAKimB,kBAAkBjmB,KAAK6E,QAAQtY,QACpCyT,KAAK4kB,cAAgBr4B,EACrBA,EAAO8O,UAAU5E,IAAIstB,IACrB/jB,KAAKsmB,iBAAiB/5B,GACtBgU,GAAaqB,QAAQ5B,KAAK4E,SAAUgf,GAAgB,CAClD9jB,cAAevT,IAEnB,CACA,gBAAA+5B,CAAiB/5B,GAEf,GAAIA,EAAO8O,UAAU7W,SA9LQ,iBA+L3BqhB,GAAeC,QArLc,mBAqLsBvZ,EAAOyO,QAtLtC,cAsLkEK,UAAU5E,IAAIstB,SAGtG,IAAK,MAAMwC,KAAa1gB,GAAeI,QAAQ1Z,EA9LnB,qBAiM1B,IAAK,MAAMxJ,KAAQ8iB,GAAeM,KAAKogB,EAAWrC,IAChDnhC,EAAKsY,UAAU5E,IAAIstB,GAGzB,CACA,iBAAAkC,CAAkBxhC,GAChBA,EAAO4W,UAAU1B,OAAOoqB,IACxB,MAAMyC,EAAc3gB,GAAe1T,KAAK,GAAG6xB,MAAyBD,KAAuBt/B,GAC3F,IAAK,MAAM9E,KAAQ6mC,EACjB7mC,EAAK0b,UAAU1B,OAAOoqB,GAE1B,CAGA,sBAAOtnB,CAAgBqH,GACrB,OAAO9D,KAAKwH,MAAK,WACf,MAAMnd,EAAOm6B,GAAUlf,oBAAoBtF,KAAM8D,GACjD,GAAsB,iBAAXA,EAAX,CAGA,QAAqB/K,IAAjB1O,EAAKyZ,IAAyBA,EAAOrC,WAAW,MAAmB,gBAAXqC,EAC1D,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IAJL,CAKF,GACF,EAOFvD,GAAac,GAAGzhB,OAAQkkC,IAAuB,KAC7C,IAAK,MAAM2C,KAAO5gB,GAAe1T,KApOT,0BAqOtBqyB,GAAUlf,oBAAoBmhB,EAChC,IAOFtqB,GAAmBqoB,IAcnB,MAEMkC,GAAc,UACdC,GAAe,OAAOD,KACtBE,GAAiB,SAASF,KAC1BG,GAAe,OAAOH,KACtBI,GAAgB,QAAQJ,KACxBK,GAAuB,QAAQL,KAC/BM,GAAgB,UAAUN,KAC1BO,GAAsB,OAAOP,KAC7BQ,GAAiB,YACjBC,GAAkB,aAClBC,GAAe,UACfC,GAAiB,YACjBC,GAAW,OACXC,GAAU,MACVC,GAAoB,SACpBC,GAAoB,OACpBC,GAAoB,OAEpBC,GAA2B,mBAE3BC,GAA+B,QAAQD,MAIvCE,GAAuB,2EACvBC,GAAsB,YAFOF,uBAAiDA,mBAA6CA,OAE/EC,KAC5CE,GAA8B,IAAIP,8BAA6CA,+BAA8CA,4BAMnI,MAAMQ,WAAYtjB,GAChB,WAAAP,CAAY5kB,GACVolB,MAAMplB,GACNygB,KAAKoS,QAAUpS,KAAK4E,SAAS5J,QAdN,uCAelBgF,KAAKoS,UAOVpS,KAAKioB,sBAAsBjoB,KAAKoS,QAASpS,KAAKkoB,gBAC9C3nB,GAAac,GAAGrB,KAAK4E,SAAUoiB,IAAe5nB,GAASY,KAAK6M,SAASzN,KACvE,CAGA,eAAW7C,GACT,MAnDW,KAoDb,CAGA,IAAAsT,GAEE,MAAMsY,EAAYnoB,KAAK4E,SACvB,GAAI5E,KAAKooB,cAAcD,GACrB,OAIF,MAAME,EAASroB,KAAKsoB,iBACdC,EAAYF,EAAS9nB,GAAaqB,QAAQymB,EAAQ1B,GAAc,CACpE7mB,cAAeqoB,IACZ,KACa5nB,GAAaqB,QAAQumB,EAAWtB,GAAc,CAC9D/mB,cAAeuoB,IAEHrmB,kBAAoBumB,GAAaA,EAAUvmB,mBAGzDhC,KAAKwoB,YAAYH,EAAQF,GACzBnoB,KAAKyoB,UAAUN,EAAWE,GAC5B,CAGA,SAAAI,CAAUlpC,EAASmpC,GACZnpC,IAGLA,EAAQ8b,UAAU5E,IAAI+wB,IACtBxnB,KAAKyoB,UAAU5iB,GAAec,uBAAuBpnB,IAcrDygB,KAAKmF,gBAZY,KACsB,QAAjC5lB,EAAQic,aAAa,SAIzBjc,EAAQ4B,gBAAgB,YACxB5B,EAAQ6B,aAAa,iBAAiB,GACtC4e,KAAK2oB,gBAAgBppC,GAAS,GAC9BghB,GAAaqB,QAAQriB,EAASunC,GAAe,CAC3ChnB,cAAe4oB,KAPfnpC,EAAQ8b,UAAU5E,IAAIixB,GAQtB,GAE0BnoC,EAASA,EAAQ8b,UAAU7W,SAASijC,KACpE,CACA,WAAAe,CAAYjpC,EAASmpC,GACdnpC,IAGLA,EAAQ8b,UAAU1B,OAAO6tB,IACzBjoC,EAAQq7B,OACR5a,KAAKwoB,YAAY3iB,GAAec,uBAAuBpnB,IAcvDygB,KAAKmF,gBAZY,KACsB,QAAjC5lB,EAAQic,aAAa,SAIzBjc,EAAQ6B,aAAa,iBAAiB,GACtC7B,EAAQ6B,aAAa,WAAY,MACjC4e,KAAK2oB,gBAAgBppC,GAAS,GAC9BghB,GAAaqB,QAAQriB,EAASqnC,GAAgB,CAC5C9mB,cAAe4oB,KAPfnpC,EAAQ8b,UAAU1B,OAAO+tB,GAQzB,GAE0BnoC,EAASA,EAAQ8b,UAAU7W,SAASijC,KACpE,CACA,QAAA5a,CAASzN,GACP,IAAK,CAAC8nB,GAAgBC,GAAiBC,GAAcC,GAAgBC,GAAUC,IAASnmB,SAAShC,EAAMtiB,KACrG,OAEFsiB,EAAM0U,kBACN1U,EAAMkD,iBACN,MAAMyD,EAAW/F,KAAKkoB,eAAe/hC,QAAO5G,IAAY2b,GAAW3b,KACnE,IAAIqpC,EACJ,GAAI,CAACtB,GAAUC,IAASnmB,SAAShC,EAAMtiB,KACrC8rC,EAAoB7iB,EAAS3G,EAAMtiB,MAAQwqC,GAAW,EAAIvhB,EAASrV,OAAS,OACvE,CACL,MAAM8c,EAAS,CAAC2Z,GAAiBE,IAAgBjmB,SAAShC,EAAMtiB,KAChE8rC,EAAoB9qB,GAAqBiI,EAAU3G,EAAM7S,OAAQihB,GAAQ,EAC3E,CACIob,IACFA,EAAkBnW,MAAM,CACtBoW,eAAe,IAEjBb,GAAI1iB,oBAAoBsjB,GAAmB/Y,OAE/C,CACA,YAAAqY,GAEE,OAAOriB,GAAe1T,KAAK21B,GAAqB9nB,KAAKoS,QACvD,CACA,cAAAkW,GACE,OAAOtoB,KAAKkoB,eAAe/1B,MAAKzN,GAASsb,KAAKooB,cAAc1jC,MAAW,IACzE,CACA,qBAAAujC,CAAsBxjC,EAAQshB,GAC5B/F,KAAK8oB,yBAAyBrkC,EAAQ,OAAQ,WAC9C,IAAK,MAAMC,KAASqhB,EAClB/F,KAAK+oB,6BAA6BrkC,EAEtC,CACA,4BAAAqkC,CAA6BrkC,GAC3BA,EAAQsb,KAAKgpB,iBAAiBtkC,GAC9B,MAAMukC,EAAWjpB,KAAKooB,cAAc1jC,GAC9BwkC,EAAYlpB,KAAKmpB,iBAAiBzkC,GACxCA,EAAMtD,aAAa,gBAAiB6nC,GAChCC,IAAcxkC,GAChBsb,KAAK8oB,yBAAyBI,EAAW,OAAQ,gBAE9CD,GACHvkC,EAAMtD,aAAa,WAAY,MAEjC4e,KAAK8oB,yBAAyBpkC,EAAO,OAAQ,OAG7Csb,KAAKopB,mCAAmC1kC,EAC1C,CACA,kCAAA0kC,CAAmC1kC,GACjC,MAAM6H,EAASsZ,GAAec,uBAAuBjiB,GAChD6H,IAGLyT,KAAK8oB,yBAAyBv8B,EAAQ,OAAQ,YAC1C7H,EAAMyV,IACR6F,KAAK8oB,yBAAyBv8B,EAAQ,kBAAmB,GAAG7H,EAAMyV,MAEtE,CACA,eAAAwuB,CAAgBppC,EAAS8pC,GACvB,MAAMH,EAAYlpB,KAAKmpB,iBAAiB5pC,GACxC,IAAK2pC,EAAU7tB,UAAU7W,SApKN,YAqKjB,OAEF,MAAMmjB,EAAS,CAAC5N,EAAUoa,KACxB,MAAM50B,EAAUsmB,GAAeC,QAAQ/L,EAAUmvB,GAC7C3pC,GACFA,EAAQ8b,UAAUsM,OAAOwM,EAAWkV,EACtC,EAEF1hB,EAAOggB,GAA0BH,IACjC7f,EA5K2B,iBA4KI+f,IAC/BwB,EAAU9nC,aAAa,gBAAiBioC,EAC1C,CACA,wBAAAP,CAAyBvpC,EAASwC,EAAWpE,GACtC4B,EAAQgc,aAAaxZ,IACxBxC,EAAQ6B,aAAaW,EAAWpE,EAEpC,CACA,aAAAyqC,CAAc9Y,GACZ,OAAOA,EAAKjU,UAAU7W,SAASgjC,GACjC,CAGA,gBAAAwB,CAAiB1Z,GACf,OAAOA,EAAKtJ,QAAQ8hB,IAAuBxY,EAAOzJ,GAAeC,QAAQgiB,GAAqBxY,EAChG,CAGA,gBAAA6Z,CAAiB7Z,GACf,OAAOA,EAAKtU,QA5LO,gCA4LoBsU,CACzC,CAGA,sBAAO7S,CAAgBqH,GACrB,OAAO9D,KAAKwH,MAAK,WACf,MAAMnd,EAAO29B,GAAI1iB,oBAAoBtF,MACrC,GAAsB,iBAAX8D,EAAX,CAGA,QAAqB/K,IAAjB1O,EAAKyZ,IAAyBA,EAAOrC,WAAW,MAAmB,gBAAXqC,EAC1D,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IAJL,CAKF,GACF,EAOFvD,GAAac,GAAGhc,SAAU0hC,GAAsBc,IAAsB,SAAUzoB,GAC1E,CAAC,IAAK,QAAQgC,SAASpB,KAAKiH,UAC9B7H,EAAMkD,iBAEJpH,GAAW8E,OAGfgoB,GAAI1iB,oBAAoBtF,MAAM6P,MAChC,IAKAtP,GAAac,GAAGzhB,OAAQqnC,IAAqB,KAC3C,IAAK,MAAM1nC,KAAWsmB,GAAe1T,KAAK41B,IACxCC,GAAI1iB,oBAAoB/lB,EAC1B,IAMF4c,GAAmB6rB,IAcnB,MAEMhjB,GAAY,YACZskB,GAAkB,YAAYtkB,KAC9BukB,GAAiB,WAAWvkB,KAC5BwkB,GAAgB,UAAUxkB,KAC1BykB,GAAiB,WAAWzkB,KAC5B0kB,GAAa,OAAO1kB,KACpB2kB,GAAe,SAAS3kB,KACxB4kB,GAAa,OAAO5kB,KACpB6kB,GAAc,QAAQ7kB,KAEtB8kB,GAAkB,OAClBC,GAAkB,OAClBC,GAAqB,UACrBrmB,GAAc,CAClByc,UAAW,UACX6J,SAAU,UACV1J,MAAO,UAEH7c,GAAU,CACd0c,WAAW,EACX6J,UAAU,EACV1J,MAAO,KAOT,MAAM2J,WAAcxlB,GAClB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAK4gB,SAAW,KAChB5gB,KAAKmqB,sBAAuB,EAC5BnqB,KAAKoqB,yBAA0B,EAC/BpqB,KAAKkhB,eACP,CAGA,kBAAWxd,GACT,OAAOA,EACT,CACA,sBAAWC,GACT,OAAOA,EACT,CACA,eAAWpH,GACT,MA/CS,OAgDX,CAGA,IAAAsT,GACoBtP,GAAaqB,QAAQ5B,KAAK4E,SAAUglB,IACxC5nB,mBAGdhC,KAAKqqB,gBACDrqB,KAAK6E,QAAQub,WACfpgB,KAAK4E,SAASvJ,UAAU5E,IA/CN,QAsDpBuJ,KAAK4E,SAASvJ,UAAU1B,OAAOmwB,IAC/BjuB,GAAOmE,KAAK4E,UACZ5E,KAAK4E,SAASvJ,UAAU5E,IAAIszB,GAAiBC,IAC7ChqB,KAAKmF,gBARY,KACfnF,KAAK4E,SAASvJ,UAAU1B,OAAOqwB,IAC/BzpB,GAAaqB,QAAQ5B,KAAK4E,SAAUilB,IACpC7pB,KAAKsqB,oBAAoB,GAKGtqB,KAAK4E,SAAU5E,KAAK6E,QAAQub,WAC5D,CACA,IAAAxQ,GACO5P,KAAKuqB,YAGQhqB,GAAaqB,QAAQ5B,KAAK4E,SAAU8kB,IACxC1nB,mBAQdhC,KAAK4E,SAASvJ,UAAU5E,IAAIuzB,IAC5BhqB,KAAKmF,gBANY,KACfnF,KAAK4E,SAASvJ,UAAU5E,IAAIqzB,IAC5B9pB,KAAK4E,SAASvJ,UAAU1B,OAAOqwB,GAAoBD,IACnDxpB,GAAaqB,QAAQ5B,KAAK4E,SAAU+kB,GAAa,GAGrB3pB,KAAK4E,SAAU5E,KAAK6E,QAAQub,YAC5D,CACA,OAAArb,GACE/E,KAAKqqB,gBACDrqB,KAAKuqB,WACPvqB,KAAK4E,SAASvJ,UAAU1B,OAAOowB,IAEjCplB,MAAMI,SACR,CACA,OAAAwlB,GACE,OAAOvqB,KAAK4E,SAASvJ,UAAU7W,SAASulC,GAC1C,CAIA,kBAAAO,GACOtqB,KAAK6E,QAAQolB,WAGdjqB,KAAKmqB,sBAAwBnqB,KAAKoqB,0BAGtCpqB,KAAK4gB,SAAW/iB,YAAW,KACzBmC,KAAK4P,MAAM,GACV5P,KAAK6E,QAAQ0b,QAClB,CACA,cAAAiK,CAAeprB,EAAOqrB,GACpB,OAAQrrB,EAAMqB,MACZ,IAAK,YACL,IAAK,WAEDT,KAAKmqB,qBAAuBM,EAC5B,MAEJ,IAAK,UACL,IAAK,WAEDzqB,KAAKoqB,wBAA0BK,EAIrC,GAAIA,EAEF,YADAzqB,KAAKqqB,gBAGP,MAAM5c,EAAcrO,EAAMU,cACtBE,KAAK4E,WAAa6I,GAAezN,KAAK4E,SAASpgB,SAASipB,IAG5DzN,KAAKsqB,oBACP,CACA,aAAApJ,GACE3gB,GAAac,GAAGrB,KAAK4E,SAAU0kB,IAAiBlqB,GAASY,KAAKwqB,eAAeprB,GAAO,KACpFmB,GAAac,GAAGrB,KAAK4E,SAAU2kB,IAAgBnqB,GAASY,KAAKwqB,eAAeprB,GAAO,KACnFmB,GAAac,GAAGrB,KAAK4E,SAAU4kB,IAAepqB,GAASY,KAAKwqB,eAAeprB,GAAO,KAClFmB,GAAac,GAAGrB,KAAK4E,SAAU6kB,IAAgBrqB,GAASY,KAAKwqB,eAAeprB,GAAO,IACrF,CACA,aAAAirB,GACEnd,aAAalN,KAAK4gB,UAClB5gB,KAAK4gB,SAAW,IAClB,CAGA,sBAAOnkB,CAAgBqH,GACrB,OAAO9D,KAAKwH,MAAK,WACf,MAAMnd,EAAO6/B,GAAM5kB,oBAAoBtF,KAAM8D,GAC7C,GAAsB,iBAAXA,EAAqB,CAC9B,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,GAAQ9D,KACf,CACF,GACF,ECr0IK,SAAS0qB,GAAcruB,GACD,WAAvBhX,SAASuX,WAAyBP,IACjChX,SAASyF,iBAAiB,mBAAoBuR,EACrD,CDy0IAwK,GAAqBqjB,IAMrB/tB,GAAmB+tB,IEpyInBQ,IAzCA,WAC2B,GAAGt4B,MAAM5U,KAChC6H,SAAS+a,iBAAiB,+BAETtd,KAAI,SAAU6nC,GAC/B,OAAO,IAAI,GAAkBA,EAAkB,CAC7CpK,MAAO,CAAE1Q,KAAM,IAAKD,KAAM,MAE9B,GACF,IAiCA8a,IA5BA,WACYrlC,SAASm9B,eAAe,mBAC9B13B,iBAAiB,SAAS,WAC5BzF,SAAS6G,KAAKT,UAAY,EAC1BpG,SAASC,gBAAgBmG,UAAY,CACvC,GACF,IAuBAi/B,IArBA,WACE,IAAIE,EAAMvlC,SAASm9B,eAAe,mBAC9BqI,EAASxlC,SACVylC,uBAAuB,aAAa,GACpCxnC,wBACH1D,OAAOkL,iBAAiB,UAAU,WAC5BkV,KAAK+qB,UAAY/qB,KAAKgrB,SAAWhrB,KAAKgrB,QAAUH,EAAOjtC,OACzDgtC,EAAI7pC,MAAMgxB,QAAU,QAEpB6Y,EAAI7pC,MAAMgxB,QAAU,OAEtB/R,KAAK+qB,UAAY/qB,KAAKgrB,OACxB,GACF,IAUAprC,OAAOqrC,UAAY","sources":["webpack://pydata_sphinx_theme/webpack/bootstrap","webpack://pydata_sphinx_theme/webpack/runtime/define property getters","webpack://pydata_sphinx_theme/webpack/runtime/hasOwnProperty shorthand","webpack://pydata_sphinx_theme/webpack/runtime/make namespace object","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/enums.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getNodeName.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getWindow.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/instanceOf.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/applyStyles.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getBasePlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/math.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/userAgent.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/isLayoutViewport.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getBoundingClientRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getLayoutRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/contains.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getComputedStyle.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/isTableElement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getDocumentElement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getParentNode.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getOffsetParent.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getMainAxisFromPlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/within.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/mergePaddingObject.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getFreshSideObject.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/expandToHashMap.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/arrow.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getVariation.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/computeStyles.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/eventListeners.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getOppositePlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getOppositeVariationPlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getWindowScroll.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getWindowScrollBarX.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/isScrollParent.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getScrollParent.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/listScrollParents.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/rectToClientRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getClippingRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getViewportRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getDocumentRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/computeOffsets.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/detectOverflow.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/flip.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/computeAutoPlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/hide.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/offset.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/popperOffsets.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/preventOverflow.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getAltAxis.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getCompositeRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getNodeScroll.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getHTMLElementScroll.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/orderModifiers.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/createPopper.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/debounce.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/mergeByName.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/popper.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/popper-lite.js","webpack://pydata_sphinx_theme/./node_modules/bootstrap/dist/js/bootstrap.esm.js","webpack://pydata_sphinx_theme/./src/pydata_sphinx_theme/assets/scripts/mixin.js","webpack://pydata_sphinx_theme/./src/pydata_sphinx_theme/assets/scripts/bootstrap.js"],"sourcesContent":["// The require scope\nvar __webpack_require__ = {};\n\n","// define getter functions for harmony exports\n__webpack_require__.d = (exports, definition) => {\n\tfor(var key in definition) {\n\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n\t\t}\n\t}\n};","__webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))","// define __esModule on exports\n__webpack_require__.r = (exports) => {\n\tif(typeof Symbol !== 'undefined' && Symbol.toStringTag) {\n\t\tObject.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });\n\t}\n\tObject.defineProperty(exports, '__esModule', { value: true });\n};","export var top = 'top';\nexport var bottom = 'bottom';\nexport var right = 'right';\nexport var left = 'left';\nexport var auto = 'auto';\nexport var basePlacements = [top, bottom, right, left];\nexport var start = 'start';\nexport var end = 'end';\nexport var clippingParents = 'clippingParents';\nexport var viewport = 'viewport';\nexport var popper = 'popper';\nexport var reference = 'reference';\nexport var variationPlacements = /*#__PURE__*/basePlacements.reduce(function (acc, placement) {\n return acc.concat([placement + \"-\" + start, placement + \"-\" + end]);\n}, []);\nexport var placements = /*#__PURE__*/[].concat(basePlacements, [auto]).reduce(function (acc, placement) {\n return acc.concat([placement, placement + \"-\" + start, placement + \"-\" + end]);\n}, []); // modifiers that need to read the DOM\n\nexport var beforeRead = 'beforeRead';\nexport var read = 'read';\nexport var afterRead = 'afterRead'; // pure-logic modifiers\n\nexport var beforeMain = 'beforeMain';\nexport var main = 'main';\nexport var afterMain = 'afterMain'; // modifier with the purpose to write to the DOM (or write into a framework state)\n\nexport var beforeWrite = 'beforeWrite';\nexport var write = 'write';\nexport var afterWrite = 'afterWrite';\nexport var modifierPhases = [beforeRead, read, afterRead, beforeMain, main, afterMain, beforeWrite, write, afterWrite];","export default function getNodeName(element) {\n return element ? (element.nodeName || '').toLowerCase() : null;\n}","export default function getWindow(node) {\n if (node == null) {\n return window;\n }\n\n if (node.toString() !== '[object Window]') {\n var ownerDocument = node.ownerDocument;\n return ownerDocument ? ownerDocument.defaultView || window : window;\n }\n\n return node;\n}","import getWindow from \"./getWindow.js\";\n\nfunction isElement(node) {\n var OwnElement = getWindow(node).Element;\n return node instanceof OwnElement || node instanceof Element;\n}\n\nfunction isHTMLElement(node) {\n var OwnElement = getWindow(node).HTMLElement;\n return node instanceof OwnElement || node instanceof HTMLElement;\n}\n\nfunction isShadowRoot(node) {\n // IE 11 has no ShadowRoot\n if (typeof ShadowRoot === 'undefined') {\n return false;\n }\n\n var OwnElement = getWindow(node).ShadowRoot;\n return node instanceof OwnElement || node instanceof ShadowRoot;\n}\n\nexport { isElement, isHTMLElement, isShadowRoot };","import getNodeName from \"../dom-utils/getNodeName.js\";\nimport { isHTMLElement } from \"../dom-utils/instanceOf.js\"; // This modifier takes the styles prepared by the `computeStyles` modifier\n// and applies them to the HTMLElements such as popper and arrow\n\nfunction applyStyles(_ref) {\n var state = _ref.state;\n Object.keys(state.elements).forEach(function (name) {\n var style = state.styles[name] || {};\n var attributes = state.attributes[name] || {};\n var element = state.elements[name]; // arrow is optional + virtual elements\n\n if (!isHTMLElement(element) || !getNodeName(element)) {\n return;\n } // Flow doesn't support to extend this property, but it's the most\n // effective way to apply styles to an HTMLElement\n // $FlowFixMe[cannot-write]\n\n\n Object.assign(element.style, style);\n Object.keys(attributes).forEach(function (name) {\n var value = attributes[name];\n\n if (value === false) {\n element.removeAttribute(name);\n } else {\n element.setAttribute(name, value === true ? '' : value);\n }\n });\n });\n}\n\nfunction effect(_ref2) {\n var state = _ref2.state;\n var initialStyles = {\n popper: {\n position: state.options.strategy,\n left: '0',\n top: '0',\n margin: '0'\n },\n arrow: {\n position: 'absolute'\n },\n reference: {}\n };\n Object.assign(state.elements.popper.style, initialStyles.popper);\n state.styles = initialStyles;\n\n if (state.elements.arrow) {\n Object.assign(state.elements.arrow.style, initialStyles.arrow);\n }\n\n return function () {\n Object.keys(state.elements).forEach(function (name) {\n var element = state.elements[name];\n var attributes = state.attributes[name] || {};\n var styleProperties = Object.keys(state.styles.hasOwnProperty(name) ? state.styles[name] : initialStyles[name]); // Set all values to an empty string to unset them\n\n var style = styleProperties.reduce(function (style, property) {\n style[property] = '';\n return style;\n }, {}); // arrow is optional + virtual elements\n\n if (!isHTMLElement(element) || !getNodeName(element)) {\n return;\n }\n\n Object.assign(element.style, style);\n Object.keys(attributes).forEach(function (attribute) {\n element.removeAttribute(attribute);\n });\n });\n };\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'applyStyles',\n enabled: true,\n phase: 'write',\n fn: applyStyles,\n effect: effect,\n requires: ['computeStyles']\n};","import { auto } from \"../enums.js\";\nexport default function getBasePlacement(placement) {\n return placement.split('-')[0];\n}","export var max = Math.max;\nexport var min = Math.min;\nexport var round = Math.round;","export default function getUAString() {\n var uaData = navigator.userAgentData;\n\n if (uaData != null && uaData.brands && Array.isArray(uaData.brands)) {\n return uaData.brands.map(function (item) {\n return item.brand + \"/\" + item.version;\n }).join(' ');\n }\n\n return navigator.userAgent;\n}","import getUAString from \"../utils/userAgent.js\";\nexport default function isLayoutViewport() {\n return !/^((?!chrome|android).)*safari/i.test(getUAString());\n}","import { isElement, isHTMLElement } from \"./instanceOf.js\";\nimport { round } from \"../utils/math.js\";\nimport getWindow from \"./getWindow.js\";\nimport isLayoutViewport from \"./isLayoutViewport.js\";\nexport default function getBoundingClientRect(element, includeScale, isFixedStrategy) {\n if (includeScale === void 0) {\n includeScale = false;\n }\n\n if (isFixedStrategy === void 0) {\n isFixedStrategy = false;\n }\n\n var clientRect = element.getBoundingClientRect();\n var scaleX = 1;\n var scaleY = 1;\n\n if (includeScale && isHTMLElement(element)) {\n scaleX = element.offsetWidth > 0 ? round(clientRect.width) / element.offsetWidth || 1 : 1;\n scaleY = element.offsetHeight > 0 ? round(clientRect.height) / element.offsetHeight || 1 : 1;\n }\n\n var _ref = isElement(element) ? getWindow(element) : window,\n visualViewport = _ref.visualViewport;\n\n var addVisualOffsets = !isLayoutViewport() && isFixedStrategy;\n var x = (clientRect.left + (addVisualOffsets && visualViewport ? visualViewport.offsetLeft : 0)) / scaleX;\n var y = (clientRect.top + (addVisualOffsets && visualViewport ? visualViewport.offsetTop : 0)) / scaleY;\n var width = clientRect.width / scaleX;\n var height = clientRect.height / scaleY;\n return {\n width: width,\n height: height,\n top: y,\n right: x + width,\n bottom: y + height,\n left: x,\n x: x,\n y: y\n };\n}","import getBoundingClientRect from \"./getBoundingClientRect.js\"; // Returns the layout rect of an element relative to its offsetParent. Layout\n// means it doesn't take into account transforms.\n\nexport default function getLayoutRect(element) {\n var clientRect = getBoundingClientRect(element); // Use the clientRect sizes if it's not been transformed.\n // Fixes https://github.com/popperjs/popper-core/issues/1223\n\n var width = element.offsetWidth;\n var height = element.offsetHeight;\n\n if (Math.abs(clientRect.width - width) <= 1) {\n width = clientRect.width;\n }\n\n if (Math.abs(clientRect.height - height) <= 1) {\n height = clientRect.height;\n }\n\n return {\n x: element.offsetLeft,\n y: element.offsetTop,\n width: width,\n height: height\n };\n}","import { isShadowRoot } from \"./instanceOf.js\";\nexport default function contains(parent, child) {\n var rootNode = child.getRootNode && child.getRootNode(); // First, attempt with faster native method\n\n if (parent.contains(child)) {\n return true;\n } // then fallback to custom implementation with Shadow DOM support\n else if (rootNode && isShadowRoot(rootNode)) {\n var next = child;\n\n do {\n if (next && parent.isSameNode(next)) {\n return true;\n } // $FlowFixMe[prop-missing]: need a better way to handle this...\n\n\n next = next.parentNode || next.host;\n } while (next);\n } // Give up, the result is false\n\n\n return false;\n}","import getWindow from \"./getWindow.js\";\nexport default function getComputedStyle(element) {\n return getWindow(element).getComputedStyle(element);\n}","import getNodeName from \"./getNodeName.js\";\nexport default function isTableElement(element) {\n return ['table', 'td', 'th'].indexOf(getNodeName(element)) >= 0;\n}","import { isElement } from \"./instanceOf.js\";\nexport default function getDocumentElement(element) {\n // $FlowFixMe[incompatible-return]: assume body is always available\n return ((isElement(element) ? element.ownerDocument : // $FlowFixMe[prop-missing]\n element.document) || window.document).documentElement;\n}","import getNodeName from \"./getNodeName.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport { isShadowRoot } from \"./instanceOf.js\";\nexport default function getParentNode(element) {\n if (getNodeName(element) === 'html') {\n return element;\n }\n\n return (// this is a quicker (but less type safe) way to save quite some bytes from the bundle\n // $FlowFixMe[incompatible-return]\n // $FlowFixMe[prop-missing]\n element.assignedSlot || // step into the shadow DOM of the parent of a slotted node\n element.parentNode || ( // DOM Element detected\n isShadowRoot(element) ? element.host : null) || // ShadowRoot detected\n // $FlowFixMe[incompatible-call]: HTMLElement is a Node\n getDocumentElement(element) // fallback\n\n );\n}","import getWindow from \"./getWindow.js\";\nimport getNodeName from \"./getNodeName.js\";\nimport getComputedStyle from \"./getComputedStyle.js\";\nimport { isHTMLElement, isShadowRoot } from \"./instanceOf.js\";\nimport isTableElement from \"./isTableElement.js\";\nimport getParentNode from \"./getParentNode.js\";\nimport getUAString from \"../utils/userAgent.js\";\n\nfunction getTrueOffsetParent(element) {\n if (!isHTMLElement(element) || // https://github.com/popperjs/popper-core/issues/837\n getComputedStyle(element).position === 'fixed') {\n return null;\n }\n\n return element.offsetParent;\n} // `.offsetParent` reports `null` for fixed elements, while absolute elements\n// return the containing block\n\n\nfunction getContainingBlock(element) {\n var isFirefox = /firefox/i.test(getUAString());\n var isIE = /Trident/i.test(getUAString());\n\n if (isIE && isHTMLElement(element)) {\n // In IE 9, 10 and 11 fixed elements containing block is always established by the viewport\n var elementCss = getComputedStyle(element);\n\n if (elementCss.position === 'fixed') {\n return null;\n }\n }\n\n var currentNode = getParentNode(element);\n\n if (isShadowRoot(currentNode)) {\n currentNode = currentNode.host;\n }\n\n while (isHTMLElement(currentNode) && ['html', 'body'].indexOf(getNodeName(currentNode)) < 0) {\n var css = getComputedStyle(currentNode); // This is non-exhaustive but covers the most common CSS properties that\n // create a containing block.\n // https://developer.mozilla.org/en-US/docs/Web/CSS/Containing_block#identifying_the_containing_block\n\n if (css.transform !== 'none' || css.perspective !== 'none' || css.contain === 'paint' || ['transform', 'perspective'].indexOf(css.willChange) !== -1 || isFirefox && css.willChange === 'filter' || isFirefox && css.filter && css.filter !== 'none') {\n return currentNode;\n } else {\n currentNode = currentNode.parentNode;\n }\n }\n\n return null;\n} // Gets the closest ancestor positioned element. Handles some edge cases,\n// such as table ancestors and cross browser bugs.\n\n\nexport default function getOffsetParent(element) {\n var window = getWindow(element);\n var offsetParent = getTrueOffsetParent(element);\n\n while (offsetParent && isTableElement(offsetParent) && getComputedStyle(offsetParent).position === 'static') {\n offsetParent = getTrueOffsetParent(offsetParent);\n }\n\n if (offsetParent && (getNodeName(offsetParent) === 'html' || getNodeName(offsetParent) === 'body' && getComputedStyle(offsetParent).position === 'static')) {\n return window;\n }\n\n return offsetParent || getContainingBlock(element) || window;\n}","export default function getMainAxisFromPlacement(placement) {\n return ['top', 'bottom'].indexOf(placement) >= 0 ? 'x' : 'y';\n}","import { max as mathMax, min as mathMin } from \"./math.js\";\nexport function within(min, value, max) {\n return mathMax(min, mathMin(value, max));\n}\nexport function withinMaxClamp(min, value, max) {\n var v = within(min, value, max);\n return v > max ? max : v;\n}","import getFreshSideObject from \"./getFreshSideObject.js\";\nexport default function mergePaddingObject(paddingObject) {\n return Object.assign({}, getFreshSideObject(), paddingObject);\n}","export default function getFreshSideObject() {\n return {\n top: 0,\n right: 0,\n bottom: 0,\n left: 0\n };\n}","export default function expandToHashMap(value, keys) {\n return keys.reduce(function (hashMap, key) {\n hashMap[key] = value;\n return hashMap;\n }, {});\n}","import getBasePlacement from \"../utils/getBasePlacement.js\";\nimport getLayoutRect from \"../dom-utils/getLayoutRect.js\";\nimport contains from \"../dom-utils/contains.js\";\nimport getOffsetParent from \"../dom-utils/getOffsetParent.js\";\nimport getMainAxisFromPlacement from \"../utils/getMainAxisFromPlacement.js\";\nimport { within } from \"../utils/within.js\";\nimport mergePaddingObject from \"../utils/mergePaddingObject.js\";\nimport expandToHashMap from \"../utils/expandToHashMap.js\";\nimport { left, right, basePlacements, top, bottom } from \"../enums.js\"; // eslint-disable-next-line import/no-unused-modules\n\nvar toPaddingObject = function toPaddingObject(padding, state) {\n padding = typeof padding === 'function' ? padding(Object.assign({}, state.rects, {\n placement: state.placement\n })) : padding;\n return mergePaddingObject(typeof padding !== 'number' ? padding : expandToHashMap(padding, basePlacements));\n};\n\nfunction arrow(_ref) {\n var _state$modifiersData$;\n\n var state = _ref.state,\n name = _ref.name,\n options = _ref.options;\n var arrowElement = state.elements.arrow;\n var popperOffsets = state.modifiersData.popperOffsets;\n var basePlacement = getBasePlacement(state.placement);\n var axis = getMainAxisFromPlacement(basePlacement);\n var isVertical = [left, right].indexOf(basePlacement) >= 0;\n var len = isVertical ? 'height' : 'width';\n\n if (!arrowElement || !popperOffsets) {\n return;\n }\n\n var paddingObject = toPaddingObject(options.padding, state);\n var arrowRect = getLayoutRect(arrowElement);\n var minProp = axis === 'y' ? top : left;\n var maxProp = axis === 'y' ? bottom : right;\n var endDiff = state.rects.reference[len] + state.rects.reference[axis] - popperOffsets[axis] - state.rects.popper[len];\n var startDiff = popperOffsets[axis] - state.rects.reference[axis];\n var arrowOffsetParent = getOffsetParent(arrowElement);\n var clientSize = arrowOffsetParent ? axis === 'y' ? arrowOffsetParent.clientHeight || 0 : arrowOffsetParent.clientWidth || 0 : 0;\n var centerToReference = endDiff / 2 - startDiff / 2; // Make sure the arrow doesn't overflow the popper if the center point is\n // outside of the popper bounds\n\n var min = paddingObject[minProp];\n var max = clientSize - arrowRect[len] - paddingObject[maxProp];\n var center = clientSize / 2 - arrowRect[len] / 2 + centerToReference;\n var offset = within(min, center, max); // Prevents breaking syntax highlighting...\n\n var axisProp = axis;\n state.modifiersData[name] = (_state$modifiersData$ = {}, _state$modifiersData$[axisProp] = offset, _state$modifiersData$.centerOffset = offset - center, _state$modifiersData$);\n}\n\nfunction effect(_ref2) {\n var state = _ref2.state,\n options = _ref2.options;\n var _options$element = options.element,\n arrowElement = _options$element === void 0 ? '[data-popper-arrow]' : _options$element;\n\n if (arrowElement == null) {\n return;\n } // CSS selector\n\n\n if (typeof arrowElement === 'string') {\n arrowElement = state.elements.popper.querySelector(arrowElement);\n\n if (!arrowElement) {\n return;\n }\n }\n\n if (!contains(state.elements.popper, arrowElement)) {\n return;\n }\n\n state.elements.arrow = arrowElement;\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'arrow',\n enabled: true,\n phase: 'main',\n fn: arrow,\n effect: effect,\n requires: ['popperOffsets'],\n requiresIfExists: ['preventOverflow']\n};","export default function getVariation(placement) {\n return placement.split('-')[1];\n}","import { top, left, right, bottom, end } from \"../enums.js\";\nimport getOffsetParent from \"../dom-utils/getOffsetParent.js\";\nimport getWindow from \"../dom-utils/getWindow.js\";\nimport getDocumentElement from \"../dom-utils/getDocumentElement.js\";\nimport getComputedStyle from \"../dom-utils/getComputedStyle.js\";\nimport getBasePlacement from \"../utils/getBasePlacement.js\";\nimport getVariation from \"../utils/getVariation.js\";\nimport { round } from \"../utils/math.js\"; // eslint-disable-next-line import/no-unused-modules\n\nvar unsetSides = {\n top: 'auto',\n right: 'auto',\n bottom: 'auto',\n left: 'auto'\n}; // Round the offsets to the nearest suitable subpixel based on the DPR.\n// Zooming can change the DPR, but it seems to report a value that will\n// cleanly divide the values into the appropriate subpixels.\n\nfunction roundOffsetsByDPR(_ref, win) {\n var x = _ref.x,\n y = _ref.y;\n var dpr = win.devicePixelRatio || 1;\n return {\n x: round(x * dpr) / dpr || 0,\n y: round(y * dpr) / dpr || 0\n };\n}\n\nexport function mapToStyles(_ref2) {\n var _Object$assign2;\n\n var popper = _ref2.popper,\n popperRect = _ref2.popperRect,\n placement = _ref2.placement,\n variation = _ref2.variation,\n offsets = _ref2.offsets,\n position = _ref2.position,\n gpuAcceleration = _ref2.gpuAcceleration,\n adaptive = _ref2.adaptive,\n roundOffsets = _ref2.roundOffsets,\n isFixed = _ref2.isFixed;\n var _offsets$x = offsets.x,\n x = _offsets$x === void 0 ? 0 : _offsets$x,\n _offsets$y = offsets.y,\n y = _offsets$y === void 0 ? 0 : _offsets$y;\n\n var _ref3 = typeof roundOffsets === 'function' ? roundOffsets({\n x: x,\n y: y\n }) : {\n x: x,\n y: y\n };\n\n x = _ref3.x;\n y = _ref3.y;\n var hasX = offsets.hasOwnProperty('x');\n var hasY = offsets.hasOwnProperty('y');\n var sideX = left;\n var sideY = top;\n var win = window;\n\n if (adaptive) {\n var offsetParent = getOffsetParent(popper);\n var heightProp = 'clientHeight';\n var widthProp = 'clientWidth';\n\n if (offsetParent === getWindow(popper)) {\n offsetParent = getDocumentElement(popper);\n\n if (getComputedStyle(offsetParent).position !== 'static' && position === 'absolute') {\n heightProp = 'scrollHeight';\n widthProp = 'scrollWidth';\n }\n } // $FlowFixMe[incompatible-cast]: force type refinement, we compare offsetParent with window above, but Flow doesn't detect it\n\n\n offsetParent = offsetParent;\n\n if (placement === top || (placement === left || placement === right) && variation === end) {\n sideY = bottom;\n var offsetY = isFixed && offsetParent === win && win.visualViewport ? win.visualViewport.height : // $FlowFixMe[prop-missing]\n offsetParent[heightProp];\n y -= offsetY - popperRect.height;\n y *= gpuAcceleration ? 1 : -1;\n }\n\n if (placement === left || (placement === top || placement === bottom) && variation === end) {\n sideX = right;\n var offsetX = isFixed && offsetParent === win && win.visualViewport ? win.visualViewport.width : // $FlowFixMe[prop-missing]\n offsetParent[widthProp];\n x -= offsetX - popperRect.width;\n x *= gpuAcceleration ? 1 : -1;\n }\n }\n\n var commonStyles = Object.assign({\n position: position\n }, adaptive && unsetSides);\n\n var _ref4 = roundOffsets === true ? roundOffsetsByDPR({\n x: x,\n y: y\n }, getWindow(popper)) : {\n x: x,\n y: y\n };\n\n x = _ref4.x;\n y = _ref4.y;\n\n if (gpuAcceleration) {\n var _Object$assign;\n\n return Object.assign({}, commonStyles, (_Object$assign = {}, _Object$assign[sideY] = hasY ? '0' : '', _Object$assign[sideX] = hasX ? '0' : '', _Object$assign.transform = (win.devicePixelRatio || 1) <= 1 ? \"translate(\" + x + \"px, \" + y + \"px)\" : \"translate3d(\" + x + \"px, \" + y + \"px, 0)\", _Object$assign));\n }\n\n return Object.assign({}, commonStyles, (_Object$assign2 = {}, _Object$assign2[sideY] = hasY ? y + \"px\" : '', _Object$assign2[sideX] = hasX ? x + \"px\" : '', _Object$assign2.transform = '', _Object$assign2));\n}\n\nfunction computeStyles(_ref5) {\n var state = _ref5.state,\n options = _ref5.options;\n var _options$gpuAccelerat = options.gpuAcceleration,\n gpuAcceleration = _options$gpuAccelerat === void 0 ? true : _options$gpuAccelerat,\n _options$adaptive = options.adaptive,\n adaptive = _options$adaptive === void 0 ? true : _options$adaptive,\n _options$roundOffsets = options.roundOffsets,\n roundOffsets = _options$roundOffsets === void 0 ? true : _options$roundOffsets;\n var commonStyles = {\n placement: getBasePlacement(state.placement),\n variation: getVariation(state.placement),\n popper: state.elements.popper,\n popperRect: state.rects.popper,\n gpuAcceleration: gpuAcceleration,\n isFixed: state.options.strategy === 'fixed'\n };\n\n if (state.modifiersData.popperOffsets != null) {\n state.styles.popper = Object.assign({}, state.styles.popper, mapToStyles(Object.assign({}, commonStyles, {\n offsets: state.modifiersData.popperOffsets,\n position: state.options.strategy,\n adaptive: adaptive,\n roundOffsets: roundOffsets\n })));\n }\n\n if (state.modifiersData.arrow != null) {\n state.styles.arrow = Object.assign({}, state.styles.arrow, mapToStyles(Object.assign({}, commonStyles, {\n offsets: state.modifiersData.arrow,\n position: 'absolute',\n adaptive: false,\n roundOffsets: roundOffsets\n })));\n }\n\n state.attributes.popper = Object.assign({}, state.attributes.popper, {\n 'data-popper-placement': state.placement\n });\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'computeStyles',\n enabled: true,\n phase: 'beforeWrite',\n fn: computeStyles,\n data: {}\n};","import getWindow from \"../dom-utils/getWindow.js\"; // eslint-disable-next-line import/no-unused-modules\n\nvar passive = {\n passive: true\n};\n\nfunction effect(_ref) {\n var state = _ref.state,\n instance = _ref.instance,\n options = _ref.options;\n var _options$scroll = options.scroll,\n scroll = _options$scroll === void 0 ? true : _options$scroll,\n _options$resize = options.resize,\n resize = _options$resize === void 0 ? true : _options$resize;\n var window = getWindow(state.elements.popper);\n var scrollParents = [].concat(state.scrollParents.reference, state.scrollParents.popper);\n\n if (scroll) {\n scrollParents.forEach(function (scrollParent) {\n scrollParent.addEventListener('scroll', instance.update, passive);\n });\n }\n\n if (resize) {\n window.addEventListener('resize', instance.update, passive);\n }\n\n return function () {\n if (scroll) {\n scrollParents.forEach(function (scrollParent) {\n scrollParent.removeEventListener('scroll', instance.update, passive);\n });\n }\n\n if (resize) {\n window.removeEventListener('resize', instance.update, passive);\n }\n };\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'eventListeners',\n enabled: true,\n phase: 'write',\n fn: function fn() {},\n effect: effect,\n data: {}\n};","var hash = {\n left: 'right',\n right: 'left',\n bottom: 'top',\n top: 'bottom'\n};\nexport default function getOppositePlacement(placement) {\n return placement.replace(/left|right|bottom|top/g, function (matched) {\n return hash[matched];\n });\n}","var hash = {\n start: 'end',\n end: 'start'\n};\nexport default function getOppositeVariationPlacement(placement) {\n return placement.replace(/start|end/g, function (matched) {\n return hash[matched];\n });\n}","import getWindow from \"./getWindow.js\";\nexport default function getWindowScroll(node) {\n var win = getWindow(node);\n var scrollLeft = win.pageXOffset;\n var scrollTop = win.pageYOffset;\n return {\n scrollLeft: scrollLeft,\n scrollTop: scrollTop\n };\n}","import getBoundingClientRect from \"./getBoundingClientRect.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport getWindowScroll from \"./getWindowScroll.js\";\nexport default function getWindowScrollBarX(element) {\n // If has a CSS width greater than the viewport, then this will be\n // incorrect for RTL.\n // Popper 1 is broken in this case and never had a bug report so let's assume\n // it's not an issue. I don't think anyone ever specifies width on \n // anyway.\n // Browsers where the left scrollbar doesn't cause an issue report `0` for\n // this (e.g. Edge 2019, IE11, Safari)\n return getBoundingClientRect(getDocumentElement(element)).left + getWindowScroll(element).scrollLeft;\n}","import getComputedStyle from \"./getComputedStyle.js\";\nexport default function isScrollParent(element) {\n // Firefox wants us to check `-x` and `-y` variations as well\n var _getComputedStyle = getComputedStyle(element),\n overflow = _getComputedStyle.overflow,\n overflowX = _getComputedStyle.overflowX,\n overflowY = _getComputedStyle.overflowY;\n\n return /auto|scroll|overlay|hidden/.test(overflow + overflowY + overflowX);\n}","import getParentNode from \"./getParentNode.js\";\nimport isScrollParent from \"./isScrollParent.js\";\nimport getNodeName from \"./getNodeName.js\";\nimport { isHTMLElement } from \"./instanceOf.js\";\nexport default function getScrollParent(node) {\n if (['html', 'body', '#document'].indexOf(getNodeName(node)) >= 0) {\n // $FlowFixMe[incompatible-return]: assume body is always available\n return node.ownerDocument.body;\n }\n\n if (isHTMLElement(node) && isScrollParent(node)) {\n return node;\n }\n\n return getScrollParent(getParentNode(node));\n}","import getScrollParent from \"./getScrollParent.js\";\nimport getParentNode from \"./getParentNode.js\";\nimport getWindow from \"./getWindow.js\";\nimport isScrollParent from \"./isScrollParent.js\";\n/*\ngiven a DOM element, return the list of all scroll parents, up the list of ancesors\nuntil we get to the top window object. This list is what we attach scroll listeners\nto, because if any of these parent elements scroll, we'll need to re-calculate the\nreference element's position.\n*/\n\nexport default function listScrollParents(element, list) {\n var _element$ownerDocumen;\n\n if (list === void 0) {\n list = [];\n }\n\n var scrollParent = getScrollParent(element);\n var isBody = scrollParent === ((_element$ownerDocumen = element.ownerDocument) == null ? void 0 : _element$ownerDocumen.body);\n var win = getWindow(scrollParent);\n var target = isBody ? [win].concat(win.visualViewport || [], isScrollParent(scrollParent) ? scrollParent : []) : scrollParent;\n var updatedList = list.concat(target);\n return isBody ? updatedList : // $FlowFixMe[incompatible-call]: isBody tells us target will be an HTMLElement here\n updatedList.concat(listScrollParents(getParentNode(target)));\n}","export default function rectToClientRect(rect) {\n return Object.assign({}, rect, {\n left: rect.x,\n top: rect.y,\n right: rect.x + rect.width,\n bottom: rect.y + rect.height\n });\n}","import { viewport } from \"../enums.js\";\nimport getViewportRect from \"./getViewportRect.js\";\nimport getDocumentRect from \"./getDocumentRect.js\";\nimport listScrollParents from \"./listScrollParents.js\";\nimport getOffsetParent from \"./getOffsetParent.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport getComputedStyle from \"./getComputedStyle.js\";\nimport { isElement, isHTMLElement } from \"./instanceOf.js\";\nimport getBoundingClientRect from \"./getBoundingClientRect.js\";\nimport getParentNode from \"./getParentNode.js\";\nimport contains from \"./contains.js\";\nimport getNodeName from \"./getNodeName.js\";\nimport rectToClientRect from \"../utils/rectToClientRect.js\";\nimport { max, min } from \"../utils/math.js\";\n\nfunction getInnerBoundingClientRect(element, strategy) {\n var rect = getBoundingClientRect(element, false, strategy === 'fixed');\n rect.top = rect.top + element.clientTop;\n rect.left = rect.left + element.clientLeft;\n rect.bottom = rect.top + element.clientHeight;\n rect.right = rect.left + element.clientWidth;\n rect.width = element.clientWidth;\n rect.height = element.clientHeight;\n rect.x = rect.left;\n rect.y = rect.top;\n return rect;\n}\n\nfunction getClientRectFromMixedType(element, clippingParent, strategy) {\n return clippingParent === viewport ? rectToClientRect(getViewportRect(element, strategy)) : isElement(clippingParent) ? getInnerBoundingClientRect(clippingParent, strategy) : rectToClientRect(getDocumentRect(getDocumentElement(element)));\n} // A \"clipping parent\" is an overflowable container with the characteristic of\n// clipping (or hiding) overflowing elements with a position different from\n// `initial`\n\n\nfunction getClippingParents(element) {\n var clippingParents = listScrollParents(getParentNode(element));\n var canEscapeClipping = ['absolute', 'fixed'].indexOf(getComputedStyle(element).position) >= 0;\n var clipperElement = canEscapeClipping && isHTMLElement(element) ? getOffsetParent(element) : element;\n\n if (!isElement(clipperElement)) {\n return [];\n } // $FlowFixMe[incompatible-return]: https://github.com/facebook/flow/issues/1414\n\n\n return clippingParents.filter(function (clippingParent) {\n return isElement(clippingParent) && contains(clippingParent, clipperElement) && getNodeName(clippingParent) !== 'body';\n });\n} // Gets the maximum area that the element is visible in due to any number of\n// clipping parents\n\n\nexport default function getClippingRect(element, boundary, rootBoundary, strategy) {\n var mainClippingParents = boundary === 'clippingParents' ? getClippingParents(element) : [].concat(boundary);\n var clippingParents = [].concat(mainClippingParents, [rootBoundary]);\n var firstClippingParent = clippingParents[0];\n var clippingRect = clippingParents.reduce(function (accRect, clippingParent) {\n var rect = getClientRectFromMixedType(element, clippingParent, strategy);\n accRect.top = max(rect.top, accRect.top);\n accRect.right = min(rect.right, accRect.right);\n accRect.bottom = min(rect.bottom, accRect.bottom);\n accRect.left = max(rect.left, accRect.left);\n return accRect;\n }, getClientRectFromMixedType(element, firstClippingParent, strategy));\n clippingRect.width = clippingRect.right - clippingRect.left;\n clippingRect.height = clippingRect.bottom - clippingRect.top;\n clippingRect.x = clippingRect.left;\n clippingRect.y = clippingRect.top;\n return clippingRect;\n}","import getWindow from \"./getWindow.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport getWindowScrollBarX from \"./getWindowScrollBarX.js\";\nimport isLayoutViewport from \"./isLayoutViewport.js\";\nexport default function getViewportRect(element, strategy) {\n var win = getWindow(element);\n var html = getDocumentElement(element);\n var visualViewport = win.visualViewport;\n var width = html.clientWidth;\n var height = html.clientHeight;\n var x = 0;\n var y = 0;\n\n if (visualViewport) {\n width = visualViewport.width;\n height = visualViewport.height;\n var layoutViewport = isLayoutViewport();\n\n if (layoutViewport || !layoutViewport && strategy === 'fixed') {\n x = visualViewport.offsetLeft;\n y = visualViewport.offsetTop;\n }\n }\n\n return {\n width: width,\n height: height,\n x: x + getWindowScrollBarX(element),\n y: y\n };\n}","import getDocumentElement from \"./getDocumentElement.js\";\nimport getComputedStyle from \"./getComputedStyle.js\";\nimport getWindowScrollBarX from \"./getWindowScrollBarX.js\";\nimport getWindowScroll from \"./getWindowScroll.js\";\nimport { max } from \"../utils/math.js\"; // Gets the entire size of the scrollable document area, even extending outside\n// of the `` and `` rect bounds if horizontally scrollable\n\nexport default function getDocumentRect(element) {\n var _element$ownerDocumen;\n\n var html = getDocumentElement(element);\n var winScroll = getWindowScroll(element);\n var body = (_element$ownerDocumen = element.ownerDocument) == null ? void 0 : _element$ownerDocumen.body;\n var width = max(html.scrollWidth, html.clientWidth, body ? body.scrollWidth : 0, body ? body.clientWidth : 0);\n var height = max(html.scrollHeight, html.clientHeight, body ? body.scrollHeight : 0, body ? body.clientHeight : 0);\n var x = -winScroll.scrollLeft + getWindowScrollBarX(element);\n var y = -winScroll.scrollTop;\n\n if (getComputedStyle(body || html).direction === 'rtl') {\n x += max(html.clientWidth, body ? body.clientWidth : 0) - width;\n }\n\n return {\n width: width,\n height: height,\n x: x,\n y: y\n };\n}","import getBasePlacement from \"./getBasePlacement.js\";\nimport getVariation from \"./getVariation.js\";\nimport getMainAxisFromPlacement from \"./getMainAxisFromPlacement.js\";\nimport { top, right, bottom, left, start, end } from \"../enums.js\";\nexport default function computeOffsets(_ref) {\n var reference = _ref.reference,\n element = _ref.element,\n placement = _ref.placement;\n var basePlacement = placement ? getBasePlacement(placement) : null;\n var variation = placement ? getVariation(placement) : null;\n var commonX = reference.x + reference.width / 2 - element.width / 2;\n var commonY = reference.y + reference.height / 2 - element.height / 2;\n var offsets;\n\n switch (basePlacement) {\n case top:\n offsets = {\n x: commonX,\n y: reference.y - element.height\n };\n break;\n\n case bottom:\n offsets = {\n x: commonX,\n y: reference.y + reference.height\n };\n break;\n\n case right:\n offsets = {\n x: reference.x + reference.width,\n y: commonY\n };\n break;\n\n case left:\n offsets = {\n x: reference.x - element.width,\n y: commonY\n };\n break;\n\n default:\n offsets = {\n x: reference.x,\n y: reference.y\n };\n }\n\n var mainAxis = basePlacement ? getMainAxisFromPlacement(basePlacement) : null;\n\n if (mainAxis != null) {\n var len = mainAxis === 'y' ? 'height' : 'width';\n\n switch (variation) {\n case start:\n offsets[mainAxis] = offsets[mainAxis] - (reference[len] / 2 - element[len] / 2);\n break;\n\n case end:\n offsets[mainAxis] = offsets[mainAxis] + (reference[len] / 2 - element[len] / 2);\n break;\n\n default:\n }\n }\n\n return offsets;\n}","import getClippingRect from \"../dom-utils/getClippingRect.js\";\nimport getDocumentElement from \"../dom-utils/getDocumentElement.js\";\nimport getBoundingClientRect from \"../dom-utils/getBoundingClientRect.js\";\nimport computeOffsets from \"./computeOffsets.js\";\nimport rectToClientRect from \"./rectToClientRect.js\";\nimport { clippingParents, reference, popper, bottom, top, right, basePlacements, viewport } from \"../enums.js\";\nimport { isElement } from \"../dom-utils/instanceOf.js\";\nimport mergePaddingObject from \"./mergePaddingObject.js\";\nimport expandToHashMap from \"./expandToHashMap.js\"; // eslint-disable-next-line import/no-unused-modules\n\nexport default function detectOverflow(state, options) {\n if (options === void 0) {\n options = {};\n }\n\n var _options = options,\n _options$placement = _options.placement,\n placement = _options$placement === void 0 ? state.placement : _options$placement,\n _options$strategy = _options.strategy,\n strategy = _options$strategy === void 0 ? state.strategy : _options$strategy,\n _options$boundary = _options.boundary,\n boundary = _options$boundary === void 0 ? clippingParents : _options$boundary,\n _options$rootBoundary = _options.rootBoundary,\n rootBoundary = _options$rootBoundary === void 0 ? viewport : _options$rootBoundary,\n _options$elementConte = _options.elementContext,\n elementContext = _options$elementConte === void 0 ? popper : _options$elementConte,\n _options$altBoundary = _options.altBoundary,\n altBoundary = _options$altBoundary === void 0 ? false : _options$altBoundary,\n _options$padding = _options.padding,\n padding = _options$padding === void 0 ? 0 : _options$padding;\n var paddingObject = mergePaddingObject(typeof padding !== 'number' ? padding : expandToHashMap(padding, basePlacements));\n var altContext = elementContext === popper ? reference : popper;\n var popperRect = state.rects.popper;\n var element = state.elements[altBoundary ? altContext : elementContext];\n var clippingClientRect = getClippingRect(isElement(element) ? element : element.contextElement || getDocumentElement(state.elements.popper), boundary, rootBoundary, strategy);\n var referenceClientRect = getBoundingClientRect(state.elements.reference);\n var popperOffsets = computeOffsets({\n reference: referenceClientRect,\n element: popperRect,\n strategy: 'absolute',\n placement: placement\n });\n var popperClientRect = rectToClientRect(Object.assign({}, popperRect, popperOffsets));\n var elementClientRect = elementContext === popper ? popperClientRect : referenceClientRect; // positive = overflowing the clipping rect\n // 0 or negative = within the clipping rect\n\n var overflowOffsets = {\n top: clippingClientRect.top - elementClientRect.top + paddingObject.top,\n bottom: elementClientRect.bottom - clippingClientRect.bottom + paddingObject.bottom,\n left: clippingClientRect.left - elementClientRect.left + paddingObject.left,\n right: elementClientRect.right - clippingClientRect.right + paddingObject.right\n };\n var offsetData = state.modifiersData.offset; // Offsets can be applied only to the popper element\n\n if (elementContext === popper && offsetData) {\n var offset = offsetData[placement];\n Object.keys(overflowOffsets).forEach(function (key) {\n var multiply = [right, bottom].indexOf(key) >= 0 ? 1 : -1;\n var axis = [top, bottom].indexOf(key) >= 0 ? 'y' : 'x';\n overflowOffsets[key] += offset[axis] * multiply;\n });\n }\n\n return overflowOffsets;\n}","import getOppositePlacement from \"../utils/getOppositePlacement.js\";\nimport getBasePlacement from \"../utils/getBasePlacement.js\";\nimport getOppositeVariationPlacement from \"../utils/getOppositeVariationPlacement.js\";\nimport detectOverflow from \"../utils/detectOverflow.js\";\nimport computeAutoPlacement from \"../utils/computeAutoPlacement.js\";\nimport { bottom, top, start, right, left, auto } from \"../enums.js\";\nimport getVariation from \"../utils/getVariation.js\"; // eslint-disable-next-line import/no-unused-modules\n\nfunction getExpandedFallbackPlacements(placement) {\n if (getBasePlacement(placement) === auto) {\n return [];\n }\n\n var oppositePlacement = getOppositePlacement(placement);\n return [getOppositeVariationPlacement(placement), oppositePlacement, getOppositeVariationPlacement(oppositePlacement)];\n}\n\nfunction flip(_ref) {\n var state = _ref.state,\n options = _ref.options,\n name = _ref.name;\n\n if (state.modifiersData[name]._skip) {\n return;\n }\n\n var _options$mainAxis = options.mainAxis,\n checkMainAxis = _options$mainAxis === void 0 ? true : _options$mainAxis,\n _options$altAxis = options.altAxis,\n checkAltAxis = _options$altAxis === void 0 ? true : _options$altAxis,\n specifiedFallbackPlacements = options.fallbackPlacements,\n padding = options.padding,\n boundary = options.boundary,\n rootBoundary = options.rootBoundary,\n altBoundary = options.altBoundary,\n _options$flipVariatio = options.flipVariations,\n flipVariations = _options$flipVariatio === void 0 ? true : _options$flipVariatio,\n allowedAutoPlacements = options.allowedAutoPlacements;\n var preferredPlacement = state.options.placement;\n var basePlacement = getBasePlacement(preferredPlacement);\n var isBasePlacement = basePlacement === preferredPlacement;\n var fallbackPlacements = specifiedFallbackPlacements || (isBasePlacement || !flipVariations ? [getOppositePlacement(preferredPlacement)] : getExpandedFallbackPlacements(preferredPlacement));\n var placements = [preferredPlacement].concat(fallbackPlacements).reduce(function (acc, placement) {\n return acc.concat(getBasePlacement(placement) === auto ? computeAutoPlacement(state, {\n placement: placement,\n boundary: boundary,\n rootBoundary: rootBoundary,\n padding: padding,\n flipVariations: flipVariations,\n allowedAutoPlacements: allowedAutoPlacements\n }) : placement);\n }, []);\n var referenceRect = state.rects.reference;\n var popperRect = state.rects.popper;\n var checksMap = new Map();\n var makeFallbackChecks = true;\n var firstFittingPlacement = placements[0];\n\n for (var i = 0; i < placements.length; i++) {\n var placement = placements[i];\n\n var _basePlacement = getBasePlacement(placement);\n\n var isStartVariation = getVariation(placement) === start;\n var isVertical = [top, bottom].indexOf(_basePlacement) >= 0;\n var len = isVertical ? 'width' : 'height';\n var overflow = detectOverflow(state, {\n placement: placement,\n boundary: boundary,\n rootBoundary: rootBoundary,\n altBoundary: altBoundary,\n padding: padding\n });\n var mainVariationSide = isVertical ? isStartVariation ? right : left : isStartVariation ? bottom : top;\n\n if (referenceRect[len] > popperRect[len]) {\n mainVariationSide = getOppositePlacement(mainVariationSide);\n }\n\n var altVariationSide = getOppositePlacement(mainVariationSide);\n var checks = [];\n\n if (checkMainAxis) {\n checks.push(overflow[_basePlacement] <= 0);\n }\n\n if (checkAltAxis) {\n checks.push(overflow[mainVariationSide] <= 0, overflow[altVariationSide] <= 0);\n }\n\n if (checks.every(function (check) {\n return check;\n })) {\n firstFittingPlacement = placement;\n makeFallbackChecks = false;\n break;\n }\n\n checksMap.set(placement, checks);\n }\n\n if (makeFallbackChecks) {\n // `2` may be desired in some cases – research later\n var numberOfChecks = flipVariations ? 3 : 1;\n\n var _loop = function _loop(_i) {\n var fittingPlacement = placements.find(function (placement) {\n var checks = checksMap.get(placement);\n\n if (checks) {\n return checks.slice(0, _i).every(function (check) {\n return check;\n });\n }\n });\n\n if (fittingPlacement) {\n firstFittingPlacement = fittingPlacement;\n return \"break\";\n }\n };\n\n for (var _i = numberOfChecks; _i > 0; _i--) {\n var _ret = _loop(_i);\n\n if (_ret === \"break\") break;\n }\n }\n\n if (state.placement !== firstFittingPlacement) {\n state.modifiersData[name]._skip = true;\n state.placement = firstFittingPlacement;\n state.reset = true;\n }\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'flip',\n enabled: true,\n phase: 'main',\n fn: flip,\n requiresIfExists: ['offset'],\n data: {\n _skip: false\n }\n};","import getVariation from \"./getVariation.js\";\nimport { variationPlacements, basePlacements, placements as allPlacements } from \"../enums.js\";\nimport detectOverflow from \"./detectOverflow.js\";\nimport getBasePlacement from \"./getBasePlacement.js\";\nexport default function computeAutoPlacement(state, options) {\n if (options === void 0) {\n options = {};\n }\n\n var _options = options,\n placement = _options.placement,\n boundary = _options.boundary,\n rootBoundary = _options.rootBoundary,\n padding = _options.padding,\n flipVariations = _options.flipVariations,\n _options$allowedAutoP = _options.allowedAutoPlacements,\n allowedAutoPlacements = _options$allowedAutoP === void 0 ? allPlacements : _options$allowedAutoP;\n var variation = getVariation(placement);\n var placements = variation ? flipVariations ? variationPlacements : variationPlacements.filter(function (placement) {\n return getVariation(placement) === variation;\n }) : basePlacements;\n var allowedPlacements = placements.filter(function (placement) {\n return allowedAutoPlacements.indexOf(placement) >= 0;\n });\n\n if (allowedPlacements.length === 0) {\n allowedPlacements = placements;\n } // $FlowFixMe[incompatible-type]: Flow seems to have problems with two array unions...\n\n\n var overflows = allowedPlacements.reduce(function (acc, placement) {\n acc[placement] = detectOverflow(state, {\n placement: placement,\n boundary: boundary,\n rootBoundary: rootBoundary,\n padding: padding\n })[getBasePlacement(placement)];\n return acc;\n }, {});\n return Object.keys(overflows).sort(function (a, b) {\n return overflows[a] - overflows[b];\n });\n}","import { top, bottom, left, right } from \"../enums.js\";\nimport detectOverflow from \"../utils/detectOverflow.js\";\n\nfunction getSideOffsets(overflow, rect, preventedOffsets) {\n if (preventedOffsets === void 0) {\n preventedOffsets = {\n x: 0,\n y: 0\n };\n }\n\n return {\n top: overflow.top - rect.height - preventedOffsets.y,\n right: overflow.right - rect.width + preventedOffsets.x,\n bottom: overflow.bottom - rect.height + preventedOffsets.y,\n left: overflow.left - rect.width - preventedOffsets.x\n };\n}\n\nfunction isAnySideFullyClipped(overflow) {\n return [top, right, bottom, left].some(function (side) {\n return overflow[side] >= 0;\n });\n}\n\nfunction hide(_ref) {\n var state = _ref.state,\n name = _ref.name;\n var referenceRect = state.rects.reference;\n var popperRect = state.rects.popper;\n var preventedOffsets = state.modifiersData.preventOverflow;\n var referenceOverflow = detectOverflow(state, {\n elementContext: 'reference'\n });\n var popperAltOverflow = detectOverflow(state, {\n altBoundary: true\n });\n var referenceClippingOffsets = getSideOffsets(referenceOverflow, referenceRect);\n var popperEscapeOffsets = getSideOffsets(popperAltOverflow, popperRect, preventedOffsets);\n var isReferenceHidden = isAnySideFullyClipped(referenceClippingOffsets);\n var hasPopperEscaped = isAnySideFullyClipped(popperEscapeOffsets);\n state.modifiersData[name] = {\n referenceClippingOffsets: referenceClippingOffsets,\n popperEscapeOffsets: popperEscapeOffsets,\n isReferenceHidden: isReferenceHidden,\n hasPopperEscaped: hasPopperEscaped\n };\n state.attributes.popper = Object.assign({}, state.attributes.popper, {\n 'data-popper-reference-hidden': isReferenceHidden,\n 'data-popper-escaped': hasPopperEscaped\n });\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'hide',\n enabled: true,\n phase: 'main',\n requiresIfExists: ['preventOverflow'],\n fn: hide\n};","import getBasePlacement from \"../utils/getBasePlacement.js\";\nimport { top, left, right, placements } from \"../enums.js\"; // eslint-disable-next-line import/no-unused-modules\n\nexport function distanceAndSkiddingToXY(placement, rects, offset) {\n var basePlacement = getBasePlacement(placement);\n var invertDistance = [left, top].indexOf(basePlacement) >= 0 ? -1 : 1;\n\n var _ref = typeof offset === 'function' ? offset(Object.assign({}, rects, {\n placement: placement\n })) : offset,\n skidding = _ref[0],\n distance = _ref[1];\n\n skidding = skidding || 0;\n distance = (distance || 0) * invertDistance;\n return [left, right].indexOf(basePlacement) >= 0 ? {\n x: distance,\n y: skidding\n } : {\n x: skidding,\n y: distance\n };\n}\n\nfunction offset(_ref2) {\n var state = _ref2.state,\n options = _ref2.options,\n name = _ref2.name;\n var _options$offset = options.offset,\n offset = _options$offset === void 0 ? [0, 0] : _options$offset;\n var data = placements.reduce(function (acc, placement) {\n acc[placement] = distanceAndSkiddingToXY(placement, state.rects, offset);\n return acc;\n }, {});\n var _data$state$placement = data[state.placement],\n x = _data$state$placement.x,\n y = _data$state$placement.y;\n\n if (state.modifiersData.popperOffsets != null) {\n state.modifiersData.popperOffsets.x += x;\n state.modifiersData.popperOffsets.y += y;\n }\n\n state.modifiersData[name] = data;\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'offset',\n enabled: true,\n phase: 'main',\n requires: ['popperOffsets'],\n fn: offset\n};","import computeOffsets from \"../utils/computeOffsets.js\";\n\nfunction popperOffsets(_ref) {\n var state = _ref.state,\n name = _ref.name;\n // Offsets are the actual position the popper needs to have to be\n // properly positioned near its reference element\n // This is the most basic placement, and will be adjusted by\n // the modifiers in the next step\n state.modifiersData[name] = computeOffsets({\n reference: state.rects.reference,\n element: state.rects.popper,\n strategy: 'absolute',\n placement: state.placement\n });\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'popperOffsets',\n enabled: true,\n phase: 'read',\n fn: popperOffsets,\n data: {}\n};","import { top, left, right, bottom, start } from \"../enums.js\";\nimport getBasePlacement from \"../utils/getBasePlacement.js\";\nimport getMainAxisFromPlacement from \"../utils/getMainAxisFromPlacement.js\";\nimport getAltAxis from \"../utils/getAltAxis.js\";\nimport { within, withinMaxClamp } from \"../utils/within.js\";\nimport getLayoutRect from \"../dom-utils/getLayoutRect.js\";\nimport getOffsetParent from \"../dom-utils/getOffsetParent.js\";\nimport detectOverflow from \"../utils/detectOverflow.js\";\nimport getVariation from \"../utils/getVariation.js\";\nimport getFreshSideObject from \"../utils/getFreshSideObject.js\";\nimport { min as mathMin, max as mathMax } from \"../utils/math.js\";\n\nfunction preventOverflow(_ref) {\n var state = _ref.state,\n options = _ref.options,\n name = _ref.name;\n var _options$mainAxis = options.mainAxis,\n checkMainAxis = _options$mainAxis === void 0 ? true : _options$mainAxis,\n _options$altAxis = options.altAxis,\n checkAltAxis = _options$altAxis === void 0 ? false : _options$altAxis,\n boundary = options.boundary,\n rootBoundary = options.rootBoundary,\n altBoundary = options.altBoundary,\n padding = options.padding,\n _options$tether = options.tether,\n tether = _options$tether === void 0 ? true : _options$tether,\n _options$tetherOffset = options.tetherOffset,\n tetherOffset = _options$tetherOffset === void 0 ? 0 : _options$tetherOffset;\n var overflow = detectOverflow(state, {\n boundary: boundary,\n rootBoundary: rootBoundary,\n padding: padding,\n altBoundary: altBoundary\n });\n var basePlacement = getBasePlacement(state.placement);\n var variation = getVariation(state.placement);\n var isBasePlacement = !variation;\n var mainAxis = getMainAxisFromPlacement(basePlacement);\n var altAxis = getAltAxis(mainAxis);\n var popperOffsets = state.modifiersData.popperOffsets;\n var referenceRect = state.rects.reference;\n var popperRect = state.rects.popper;\n var tetherOffsetValue = typeof tetherOffset === 'function' ? tetherOffset(Object.assign({}, state.rects, {\n placement: state.placement\n })) : tetherOffset;\n var normalizedTetherOffsetValue = typeof tetherOffsetValue === 'number' ? {\n mainAxis: tetherOffsetValue,\n altAxis: tetherOffsetValue\n } : Object.assign({\n mainAxis: 0,\n altAxis: 0\n }, tetherOffsetValue);\n var offsetModifierState = state.modifiersData.offset ? state.modifiersData.offset[state.placement] : null;\n var data = {\n x: 0,\n y: 0\n };\n\n if (!popperOffsets) {\n return;\n }\n\n if (checkMainAxis) {\n var _offsetModifierState$;\n\n var mainSide = mainAxis === 'y' ? top : left;\n var altSide = mainAxis === 'y' ? bottom : right;\n var len = mainAxis === 'y' ? 'height' : 'width';\n var offset = popperOffsets[mainAxis];\n var min = offset + overflow[mainSide];\n var max = offset - overflow[altSide];\n var additive = tether ? -popperRect[len] / 2 : 0;\n var minLen = variation === start ? referenceRect[len] : popperRect[len];\n var maxLen = variation === start ? -popperRect[len] : -referenceRect[len]; // We need to include the arrow in the calculation so the arrow doesn't go\n // outside the reference bounds\n\n var arrowElement = state.elements.arrow;\n var arrowRect = tether && arrowElement ? getLayoutRect(arrowElement) : {\n width: 0,\n height: 0\n };\n var arrowPaddingObject = state.modifiersData['arrow#persistent'] ? state.modifiersData['arrow#persistent'].padding : getFreshSideObject();\n var arrowPaddingMin = arrowPaddingObject[mainSide];\n var arrowPaddingMax = arrowPaddingObject[altSide]; // If the reference length is smaller than the arrow length, we don't want\n // to include its full size in the calculation. If the reference is small\n // and near the edge of a boundary, the popper can overflow even if the\n // reference is not overflowing as well (e.g. virtual elements with no\n // width or height)\n\n var arrowLen = within(0, referenceRect[len], arrowRect[len]);\n var minOffset = isBasePlacement ? referenceRect[len] / 2 - additive - arrowLen - arrowPaddingMin - normalizedTetherOffsetValue.mainAxis : minLen - arrowLen - arrowPaddingMin - normalizedTetherOffsetValue.mainAxis;\n var maxOffset = isBasePlacement ? -referenceRect[len] / 2 + additive + arrowLen + arrowPaddingMax + normalizedTetherOffsetValue.mainAxis : maxLen + arrowLen + arrowPaddingMax + normalizedTetherOffsetValue.mainAxis;\n var arrowOffsetParent = state.elements.arrow && getOffsetParent(state.elements.arrow);\n var clientOffset = arrowOffsetParent ? mainAxis === 'y' ? arrowOffsetParent.clientTop || 0 : arrowOffsetParent.clientLeft || 0 : 0;\n var offsetModifierValue = (_offsetModifierState$ = offsetModifierState == null ? void 0 : offsetModifierState[mainAxis]) != null ? _offsetModifierState$ : 0;\n var tetherMin = offset + minOffset - offsetModifierValue - clientOffset;\n var tetherMax = offset + maxOffset - offsetModifierValue;\n var preventedOffset = within(tether ? mathMin(min, tetherMin) : min, offset, tether ? mathMax(max, tetherMax) : max);\n popperOffsets[mainAxis] = preventedOffset;\n data[mainAxis] = preventedOffset - offset;\n }\n\n if (checkAltAxis) {\n var _offsetModifierState$2;\n\n var _mainSide = mainAxis === 'x' ? top : left;\n\n var _altSide = mainAxis === 'x' ? bottom : right;\n\n var _offset = popperOffsets[altAxis];\n\n var _len = altAxis === 'y' ? 'height' : 'width';\n\n var _min = _offset + overflow[_mainSide];\n\n var _max = _offset - overflow[_altSide];\n\n var isOriginSide = [top, left].indexOf(basePlacement) !== -1;\n\n var _offsetModifierValue = (_offsetModifierState$2 = offsetModifierState == null ? void 0 : offsetModifierState[altAxis]) != null ? _offsetModifierState$2 : 0;\n\n var _tetherMin = isOriginSide ? _min : _offset - referenceRect[_len] - popperRect[_len] - _offsetModifierValue + normalizedTetherOffsetValue.altAxis;\n\n var _tetherMax = isOriginSide ? _offset + referenceRect[_len] + popperRect[_len] - _offsetModifierValue - normalizedTetherOffsetValue.altAxis : _max;\n\n var _preventedOffset = tether && isOriginSide ? withinMaxClamp(_tetherMin, _offset, _tetherMax) : within(tether ? _tetherMin : _min, _offset, tether ? _tetherMax : _max);\n\n popperOffsets[altAxis] = _preventedOffset;\n data[altAxis] = _preventedOffset - _offset;\n }\n\n state.modifiersData[name] = data;\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'preventOverflow',\n enabled: true,\n phase: 'main',\n fn: preventOverflow,\n requiresIfExists: ['offset']\n};","export default function getAltAxis(axis) {\n return axis === 'x' ? 'y' : 'x';\n}","import getBoundingClientRect from \"./getBoundingClientRect.js\";\nimport getNodeScroll from \"./getNodeScroll.js\";\nimport getNodeName from \"./getNodeName.js\";\nimport { isHTMLElement } from \"./instanceOf.js\";\nimport getWindowScrollBarX from \"./getWindowScrollBarX.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport isScrollParent from \"./isScrollParent.js\";\nimport { round } from \"../utils/math.js\";\n\nfunction isElementScaled(element) {\n var rect = element.getBoundingClientRect();\n var scaleX = round(rect.width) / element.offsetWidth || 1;\n var scaleY = round(rect.height) / element.offsetHeight || 1;\n return scaleX !== 1 || scaleY !== 1;\n} // Returns the composite rect of an element relative to its offsetParent.\n// Composite means it takes into account transforms as well as layout.\n\n\nexport default function getCompositeRect(elementOrVirtualElement, offsetParent, isFixed) {\n if (isFixed === void 0) {\n isFixed = false;\n }\n\n var isOffsetParentAnElement = isHTMLElement(offsetParent);\n var offsetParentIsScaled = isHTMLElement(offsetParent) && isElementScaled(offsetParent);\n var documentElement = getDocumentElement(offsetParent);\n var rect = getBoundingClientRect(elementOrVirtualElement, offsetParentIsScaled, isFixed);\n var scroll = {\n scrollLeft: 0,\n scrollTop: 0\n };\n var offsets = {\n x: 0,\n y: 0\n };\n\n if (isOffsetParentAnElement || !isOffsetParentAnElement && !isFixed) {\n if (getNodeName(offsetParent) !== 'body' || // https://github.com/popperjs/popper-core/issues/1078\n isScrollParent(documentElement)) {\n scroll = getNodeScroll(offsetParent);\n }\n\n if (isHTMLElement(offsetParent)) {\n offsets = getBoundingClientRect(offsetParent, true);\n offsets.x += offsetParent.clientLeft;\n offsets.y += offsetParent.clientTop;\n } else if (documentElement) {\n offsets.x = getWindowScrollBarX(documentElement);\n }\n }\n\n return {\n x: rect.left + scroll.scrollLeft - offsets.x,\n y: rect.top + scroll.scrollTop - offsets.y,\n width: rect.width,\n height: rect.height\n };\n}","import getWindowScroll from \"./getWindowScroll.js\";\nimport getWindow from \"./getWindow.js\";\nimport { isHTMLElement } from \"./instanceOf.js\";\nimport getHTMLElementScroll from \"./getHTMLElementScroll.js\";\nexport default function getNodeScroll(node) {\n if (node === getWindow(node) || !isHTMLElement(node)) {\n return getWindowScroll(node);\n } else {\n return getHTMLElementScroll(node);\n }\n}","export default function getHTMLElementScroll(element) {\n return {\n scrollLeft: element.scrollLeft,\n scrollTop: element.scrollTop\n };\n}","import { modifierPhases } from \"../enums.js\"; // source: https://stackoverflow.com/questions/49875255\n\nfunction order(modifiers) {\n var map = new Map();\n var visited = new Set();\n var result = [];\n modifiers.forEach(function (modifier) {\n map.set(modifier.name, modifier);\n }); // On visiting object, check for its dependencies and visit them recursively\n\n function sort(modifier) {\n visited.add(modifier.name);\n var requires = [].concat(modifier.requires || [], modifier.requiresIfExists || []);\n requires.forEach(function (dep) {\n if (!visited.has(dep)) {\n var depModifier = map.get(dep);\n\n if (depModifier) {\n sort(depModifier);\n }\n }\n });\n result.push(modifier);\n }\n\n modifiers.forEach(function (modifier) {\n if (!visited.has(modifier.name)) {\n // check for visited object\n sort(modifier);\n }\n });\n return result;\n}\n\nexport default function orderModifiers(modifiers) {\n // order based on dependencies\n var orderedModifiers = order(modifiers); // order based on phase\n\n return modifierPhases.reduce(function (acc, phase) {\n return acc.concat(orderedModifiers.filter(function (modifier) {\n return modifier.phase === phase;\n }));\n }, []);\n}","import getCompositeRect from \"./dom-utils/getCompositeRect.js\";\nimport getLayoutRect from \"./dom-utils/getLayoutRect.js\";\nimport listScrollParents from \"./dom-utils/listScrollParents.js\";\nimport getOffsetParent from \"./dom-utils/getOffsetParent.js\";\nimport orderModifiers from \"./utils/orderModifiers.js\";\nimport debounce from \"./utils/debounce.js\";\nimport mergeByName from \"./utils/mergeByName.js\";\nimport detectOverflow from \"./utils/detectOverflow.js\";\nimport { isElement } from \"./dom-utils/instanceOf.js\";\nvar DEFAULT_OPTIONS = {\n placement: 'bottom',\n modifiers: [],\n strategy: 'absolute'\n};\n\nfunction areValidElements() {\n for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) {\n args[_key] = arguments[_key];\n }\n\n return !args.some(function (element) {\n return !(element && typeof element.getBoundingClientRect === 'function');\n });\n}\n\nexport function popperGenerator(generatorOptions) {\n if (generatorOptions === void 0) {\n generatorOptions = {};\n }\n\n var _generatorOptions = generatorOptions,\n _generatorOptions$def = _generatorOptions.defaultModifiers,\n defaultModifiers = _generatorOptions$def === void 0 ? [] : _generatorOptions$def,\n _generatorOptions$def2 = _generatorOptions.defaultOptions,\n defaultOptions = _generatorOptions$def2 === void 0 ? DEFAULT_OPTIONS : _generatorOptions$def2;\n return function createPopper(reference, popper, options) {\n if (options === void 0) {\n options = defaultOptions;\n }\n\n var state = {\n placement: 'bottom',\n orderedModifiers: [],\n options: Object.assign({}, DEFAULT_OPTIONS, defaultOptions),\n modifiersData: {},\n elements: {\n reference: reference,\n popper: popper\n },\n attributes: {},\n styles: {}\n };\n var effectCleanupFns = [];\n var isDestroyed = false;\n var instance = {\n state: state,\n setOptions: function setOptions(setOptionsAction) {\n var options = typeof setOptionsAction === 'function' ? setOptionsAction(state.options) : setOptionsAction;\n cleanupModifierEffects();\n state.options = Object.assign({}, defaultOptions, state.options, options);\n state.scrollParents = {\n reference: isElement(reference) ? listScrollParents(reference) : reference.contextElement ? listScrollParents(reference.contextElement) : [],\n popper: listScrollParents(popper)\n }; // Orders the modifiers based on their dependencies and `phase`\n // properties\n\n var orderedModifiers = orderModifiers(mergeByName([].concat(defaultModifiers, state.options.modifiers))); // Strip out disabled modifiers\n\n state.orderedModifiers = orderedModifiers.filter(function (m) {\n return m.enabled;\n });\n runModifierEffects();\n return instance.update();\n },\n // Sync update – it will always be executed, even if not necessary. This\n // is useful for low frequency updates where sync behavior simplifies the\n // logic.\n // For high frequency updates (e.g. `resize` and `scroll` events), always\n // prefer the async Popper#update method\n forceUpdate: function forceUpdate() {\n if (isDestroyed) {\n return;\n }\n\n var _state$elements = state.elements,\n reference = _state$elements.reference,\n popper = _state$elements.popper; // Don't proceed if `reference` or `popper` are not valid elements\n // anymore\n\n if (!areValidElements(reference, popper)) {\n return;\n } // Store the reference and popper rects to be read by modifiers\n\n\n state.rects = {\n reference: getCompositeRect(reference, getOffsetParent(popper), state.options.strategy === 'fixed'),\n popper: getLayoutRect(popper)\n }; // Modifiers have the ability to reset the current update cycle. The\n // most common use case for this is the `flip` modifier changing the\n // placement, which then needs to re-run all the modifiers, because the\n // logic was previously ran for the previous placement and is therefore\n // stale/incorrect\n\n state.reset = false;\n state.placement = state.options.placement; // On each update cycle, the `modifiersData` property for each modifier\n // is filled with the initial data specified by the modifier. This means\n // it doesn't persist and is fresh on each update.\n // To ensure persistent data, use `${name}#persistent`\n\n state.orderedModifiers.forEach(function (modifier) {\n return state.modifiersData[modifier.name] = Object.assign({}, modifier.data);\n });\n\n for (var index = 0; index < state.orderedModifiers.length; index++) {\n if (state.reset === true) {\n state.reset = false;\n index = -1;\n continue;\n }\n\n var _state$orderedModifie = state.orderedModifiers[index],\n fn = _state$orderedModifie.fn,\n _state$orderedModifie2 = _state$orderedModifie.options,\n _options = _state$orderedModifie2 === void 0 ? {} : _state$orderedModifie2,\n name = _state$orderedModifie.name;\n\n if (typeof fn === 'function') {\n state = fn({\n state: state,\n options: _options,\n name: name,\n instance: instance\n }) || state;\n }\n }\n },\n // Async and optimistically optimized update – it will not be executed if\n // not necessary (debounced to run at most once-per-tick)\n update: debounce(function () {\n return new Promise(function (resolve) {\n instance.forceUpdate();\n resolve(state);\n });\n }),\n destroy: function destroy() {\n cleanupModifierEffects();\n isDestroyed = true;\n }\n };\n\n if (!areValidElements(reference, popper)) {\n return instance;\n }\n\n instance.setOptions(options).then(function (state) {\n if (!isDestroyed && options.onFirstUpdate) {\n options.onFirstUpdate(state);\n }\n }); // Modifiers have the ability to execute arbitrary code before the first\n // update cycle runs. They will be executed in the same order as the update\n // cycle. This is useful when a modifier adds some persistent data that\n // other modifiers need to use, but the modifier is run after the dependent\n // one.\n\n function runModifierEffects() {\n state.orderedModifiers.forEach(function (_ref) {\n var name = _ref.name,\n _ref$options = _ref.options,\n options = _ref$options === void 0 ? {} : _ref$options,\n effect = _ref.effect;\n\n if (typeof effect === 'function') {\n var cleanupFn = effect({\n state: state,\n name: name,\n instance: instance,\n options: options\n });\n\n var noopFn = function noopFn() {};\n\n effectCleanupFns.push(cleanupFn || noopFn);\n }\n });\n }\n\n function cleanupModifierEffects() {\n effectCleanupFns.forEach(function (fn) {\n return fn();\n });\n effectCleanupFns = [];\n }\n\n return instance;\n };\n}\nexport var createPopper = /*#__PURE__*/popperGenerator(); // eslint-disable-next-line import/no-unused-modules\n\nexport { detectOverflow };","export default function debounce(fn) {\n var pending;\n return function () {\n if (!pending) {\n pending = new Promise(function (resolve) {\n Promise.resolve().then(function () {\n pending = undefined;\n resolve(fn());\n });\n });\n }\n\n return pending;\n };\n}","export default function mergeByName(modifiers) {\n var merged = modifiers.reduce(function (merged, current) {\n var existing = merged[current.name];\n merged[current.name] = existing ? Object.assign({}, existing, current, {\n options: Object.assign({}, existing.options, current.options),\n data: Object.assign({}, existing.data, current.data)\n }) : current;\n return merged;\n }, {}); // IE11 does not support Object.values\n\n return Object.keys(merged).map(function (key) {\n return merged[key];\n });\n}","import { popperGenerator, detectOverflow } from \"./createPopper.js\";\nimport eventListeners from \"./modifiers/eventListeners.js\";\nimport popperOffsets from \"./modifiers/popperOffsets.js\";\nimport computeStyles from \"./modifiers/computeStyles.js\";\nimport applyStyles from \"./modifiers/applyStyles.js\";\nimport offset from \"./modifiers/offset.js\";\nimport flip from \"./modifiers/flip.js\";\nimport preventOverflow from \"./modifiers/preventOverflow.js\";\nimport arrow from \"./modifiers/arrow.js\";\nimport hide from \"./modifiers/hide.js\";\nvar defaultModifiers = [eventListeners, popperOffsets, computeStyles, applyStyles, offset, flip, preventOverflow, arrow, hide];\nvar createPopper = /*#__PURE__*/popperGenerator({\n defaultModifiers: defaultModifiers\n}); // eslint-disable-next-line import/no-unused-modules\n\nexport { createPopper, popperGenerator, defaultModifiers, detectOverflow }; // eslint-disable-next-line import/no-unused-modules\n\nexport { createPopper as createPopperLite } from \"./popper-lite.js\"; // eslint-disable-next-line import/no-unused-modules\n\nexport * from \"./modifiers/index.js\";","import { popperGenerator, detectOverflow } from \"./createPopper.js\";\nimport eventListeners from \"./modifiers/eventListeners.js\";\nimport popperOffsets from \"./modifiers/popperOffsets.js\";\nimport computeStyles from \"./modifiers/computeStyles.js\";\nimport applyStyles from \"./modifiers/applyStyles.js\";\nvar defaultModifiers = [eventListeners, popperOffsets, computeStyles, applyStyles];\nvar createPopper = /*#__PURE__*/popperGenerator({\n defaultModifiers: defaultModifiers\n}); // eslint-disable-next-line import/no-unused-modules\n\nexport { createPopper, popperGenerator, defaultModifiers, detectOverflow };","/*!\n * Bootstrap v5.3.3 (https://getbootstrap.com/)\n * Copyright 2011-2024 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors)\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n */\nimport * as Popper from '@popperjs/core';\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap dom/data.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n/**\n * Constants\n */\n\nconst elementMap = new Map();\nconst Data = {\n set(element, key, instance) {\n if (!elementMap.has(element)) {\n elementMap.set(element, new Map());\n }\n const instanceMap = elementMap.get(element);\n\n // make it clear we only want one instance per element\n // can be removed later when multiple key/instances are fine to be used\n if (!instanceMap.has(key) && instanceMap.size !== 0) {\n // eslint-disable-next-line no-console\n console.error(`Bootstrap doesn't allow more than one instance per element. Bound instance: ${Array.from(instanceMap.keys())[0]}.`);\n return;\n }\n instanceMap.set(key, instance);\n },\n get(element, key) {\n if (elementMap.has(element)) {\n return elementMap.get(element).get(key) || null;\n }\n return null;\n },\n remove(element, key) {\n if (!elementMap.has(element)) {\n return;\n }\n const instanceMap = elementMap.get(element);\n instanceMap.delete(key);\n\n // free up element references if there are no instances left for an element\n if (instanceMap.size === 0) {\n elementMap.delete(element);\n }\n }\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/index.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\nconst MAX_UID = 1000000;\nconst MILLISECONDS_MULTIPLIER = 1000;\nconst TRANSITION_END = 'transitionend';\n\n/**\n * Properly escape IDs selectors to handle weird IDs\n * @param {string} selector\n * @returns {string}\n */\nconst parseSelector = selector => {\n if (selector && window.CSS && window.CSS.escape) {\n // document.querySelector needs escaping to handle IDs (html5+) containing for instance /\n selector = selector.replace(/#([^\\s\"#']+)/g, (match, id) => `#${CSS.escape(id)}`);\n }\n return selector;\n};\n\n// Shout-out Angus Croll (https://goo.gl/pxwQGp)\nconst toType = object => {\n if (object === null || object === undefined) {\n return `${object}`;\n }\n return Object.prototype.toString.call(object).match(/\\s([a-z]+)/i)[1].toLowerCase();\n};\n\n/**\n * Public Util API\n */\n\nconst getUID = prefix => {\n do {\n prefix += Math.floor(Math.random() * MAX_UID);\n } while (document.getElementById(prefix));\n return prefix;\n};\nconst getTransitionDurationFromElement = element => {\n if (!element) {\n return 0;\n }\n\n // Get transition-duration of the element\n let {\n transitionDuration,\n transitionDelay\n } = window.getComputedStyle(element);\n const floatTransitionDuration = Number.parseFloat(transitionDuration);\n const floatTransitionDelay = Number.parseFloat(transitionDelay);\n\n // Return 0 if element or transition duration is not found\n if (!floatTransitionDuration && !floatTransitionDelay) {\n return 0;\n }\n\n // If multiple durations are defined, take the first\n transitionDuration = transitionDuration.split(',')[0];\n transitionDelay = transitionDelay.split(',')[0];\n return (Number.parseFloat(transitionDuration) + Number.parseFloat(transitionDelay)) * MILLISECONDS_MULTIPLIER;\n};\nconst triggerTransitionEnd = element => {\n element.dispatchEvent(new Event(TRANSITION_END));\n};\nconst isElement = object => {\n if (!object || typeof object !== 'object') {\n return false;\n }\n if (typeof object.jquery !== 'undefined') {\n object = object[0];\n }\n return typeof object.nodeType !== 'undefined';\n};\nconst getElement = object => {\n // it's a jQuery object or a node element\n if (isElement(object)) {\n return object.jquery ? object[0] : object;\n }\n if (typeof object === 'string' && object.length > 0) {\n return document.querySelector(parseSelector(object));\n }\n return null;\n};\nconst isVisible = element => {\n if (!isElement(element) || element.getClientRects().length === 0) {\n return false;\n }\n const elementIsVisible = getComputedStyle(element).getPropertyValue('visibility') === 'visible';\n // Handle `details` element as its content may falsie appear visible when it is closed\n const closedDetails = element.closest('details:not([open])');\n if (!closedDetails) {\n return elementIsVisible;\n }\n if (closedDetails !== element) {\n const summary = element.closest('summary');\n if (summary && summary.parentNode !== closedDetails) {\n return false;\n }\n if (summary === null) {\n return false;\n }\n }\n return elementIsVisible;\n};\nconst isDisabled = element => {\n if (!element || element.nodeType !== Node.ELEMENT_NODE) {\n return true;\n }\n if (element.classList.contains('disabled')) {\n return true;\n }\n if (typeof element.disabled !== 'undefined') {\n return element.disabled;\n }\n return element.hasAttribute('disabled') && element.getAttribute('disabled') !== 'false';\n};\nconst findShadowRoot = element => {\n if (!document.documentElement.attachShadow) {\n return null;\n }\n\n // Can find the shadow root otherwise it'll return the document\n if (typeof element.getRootNode === 'function') {\n const root = element.getRootNode();\n return root instanceof ShadowRoot ? root : null;\n }\n if (element instanceof ShadowRoot) {\n return element;\n }\n\n // when we don't find a shadow root\n if (!element.parentNode) {\n return null;\n }\n return findShadowRoot(element.parentNode);\n};\nconst noop = () => {};\n\n/**\n * Trick to restart an element's animation\n *\n * @param {HTMLElement} element\n * @return void\n *\n * @see https://www.charistheo.io/blog/2021/02/restart-a-css-animation-with-javascript/#restarting-a-css-animation\n */\nconst reflow = element => {\n element.offsetHeight; // eslint-disable-line no-unused-expressions\n};\nconst getjQuery = () => {\n if (window.jQuery && !document.body.hasAttribute('data-bs-no-jquery')) {\n return window.jQuery;\n }\n return null;\n};\nconst DOMContentLoadedCallbacks = [];\nconst onDOMContentLoaded = callback => {\n if (document.readyState === 'loading') {\n // add listener on the first call when the document is in loading state\n if (!DOMContentLoadedCallbacks.length) {\n document.addEventListener('DOMContentLoaded', () => {\n for (const callback of DOMContentLoadedCallbacks) {\n callback();\n }\n });\n }\n DOMContentLoadedCallbacks.push(callback);\n } else {\n callback();\n }\n};\nconst isRTL = () => document.documentElement.dir === 'rtl';\nconst defineJQueryPlugin = plugin => {\n onDOMContentLoaded(() => {\n const $ = getjQuery();\n /* istanbul ignore if */\n if ($) {\n const name = plugin.NAME;\n const JQUERY_NO_CONFLICT = $.fn[name];\n $.fn[name] = plugin.jQueryInterface;\n $.fn[name].Constructor = plugin;\n $.fn[name].noConflict = () => {\n $.fn[name] = JQUERY_NO_CONFLICT;\n return plugin.jQueryInterface;\n };\n }\n });\n};\nconst execute = (possibleCallback, args = [], defaultValue = possibleCallback) => {\n return typeof possibleCallback === 'function' ? possibleCallback(...args) : defaultValue;\n};\nconst executeAfterTransition = (callback, transitionElement, waitForTransition = true) => {\n if (!waitForTransition) {\n execute(callback);\n return;\n }\n const durationPadding = 5;\n const emulatedDuration = getTransitionDurationFromElement(transitionElement) + durationPadding;\n let called = false;\n const handler = ({\n target\n }) => {\n if (target !== transitionElement) {\n return;\n }\n called = true;\n transitionElement.removeEventListener(TRANSITION_END, handler);\n execute(callback);\n };\n transitionElement.addEventListener(TRANSITION_END, handler);\n setTimeout(() => {\n if (!called) {\n triggerTransitionEnd(transitionElement);\n }\n }, emulatedDuration);\n};\n\n/**\n * Return the previous/next element of a list.\n *\n * @param {array} list The list of elements\n * @param activeElement The active element\n * @param shouldGetNext Choose to get next or previous element\n * @param isCycleAllowed\n * @return {Element|elem} The proper element\n */\nconst getNextActiveElement = (list, activeElement, shouldGetNext, isCycleAllowed) => {\n const listLength = list.length;\n let index = list.indexOf(activeElement);\n\n // if the element does not exist in the list return an element\n // depending on the direction and if cycle is allowed\n if (index === -1) {\n return !shouldGetNext && isCycleAllowed ? list[listLength - 1] : list[0];\n }\n index += shouldGetNext ? 1 : -1;\n if (isCycleAllowed) {\n index = (index + listLength) % listLength;\n }\n return list[Math.max(0, Math.min(index, listLength - 1))];\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap dom/event-handler.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst namespaceRegex = /[^.]*(?=\\..*)\\.|.*/;\nconst stripNameRegex = /\\..*/;\nconst stripUidRegex = /::\\d+$/;\nconst eventRegistry = {}; // Events storage\nlet uidEvent = 1;\nconst customEvents = {\n mouseenter: 'mouseover',\n mouseleave: 'mouseout'\n};\nconst nativeEvents = new Set(['click', 'dblclick', 'mouseup', 'mousedown', 'contextmenu', 'mousewheel', 'DOMMouseScroll', 'mouseover', 'mouseout', 'mousemove', 'selectstart', 'selectend', 'keydown', 'keypress', 'keyup', 'orientationchange', 'touchstart', 'touchmove', 'touchend', 'touchcancel', 'pointerdown', 'pointermove', 'pointerup', 'pointerleave', 'pointercancel', 'gesturestart', 'gesturechange', 'gestureend', 'focus', 'blur', 'change', 'reset', 'select', 'submit', 'focusin', 'focusout', 'load', 'unload', 'beforeunload', 'resize', 'move', 'DOMContentLoaded', 'readystatechange', 'error', 'abort', 'scroll']);\n\n/**\n * Private methods\n */\n\nfunction makeEventUid(element, uid) {\n return uid && `${uid}::${uidEvent++}` || element.uidEvent || uidEvent++;\n}\nfunction getElementEvents(element) {\n const uid = makeEventUid(element);\n element.uidEvent = uid;\n eventRegistry[uid] = eventRegistry[uid] || {};\n return eventRegistry[uid];\n}\nfunction bootstrapHandler(element, fn) {\n return function handler(event) {\n hydrateObj(event, {\n delegateTarget: element\n });\n if (handler.oneOff) {\n EventHandler.off(element, event.type, fn);\n }\n return fn.apply(element, [event]);\n };\n}\nfunction bootstrapDelegationHandler(element, selector, fn) {\n return function handler(event) {\n const domElements = element.querySelectorAll(selector);\n for (let {\n target\n } = event; target && target !== this; target = target.parentNode) {\n for (const domElement of domElements) {\n if (domElement !== target) {\n continue;\n }\n hydrateObj(event, {\n delegateTarget: target\n });\n if (handler.oneOff) {\n EventHandler.off(element, event.type, selector, fn);\n }\n return fn.apply(target, [event]);\n }\n }\n };\n}\nfunction findHandler(events, callable, delegationSelector = null) {\n return Object.values(events).find(event => event.callable === callable && event.delegationSelector === delegationSelector);\n}\nfunction normalizeParameters(originalTypeEvent, handler, delegationFunction) {\n const isDelegated = typeof handler === 'string';\n // TODO: tooltip passes `false` instead of selector, so we need to check\n const callable = isDelegated ? delegationFunction : handler || delegationFunction;\n let typeEvent = getTypeEvent(originalTypeEvent);\n if (!nativeEvents.has(typeEvent)) {\n typeEvent = originalTypeEvent;\n }\n return [isDelegated, callable, typeEvent];\n}\nfunction addHandler(element, originalTypeEvent, handler, delegationFunction, oneOff) {\n if (typeof originalTypeEvent !== 'string' || !element) {\n return;\n }\n let [isDelegated, callable, typeEvent] = normalizeParameters(originalTypeEvent, handler, delegationFunction);\n\n // in case of mouseenter or mouseleave wrap the handler within a function that checks for its DOM position\n // this prevents the handler from being dispatched the same way as mouseover or mouseout does\n if (originalTypeEvent in customEvents) {\n const wrapFunction = fn => {\n return function (event) {\n if (!event.relatedTarget || event.relatedTarget !== event.delegateTarget && !event.delegateTarget.contains(event.relatedTarget)) {\n return fn.call(this, event);\n }\n };\n };\n callable = wrapFunction(callable);\n }\n const events = getElementEvents(element);\n const handlers = events[typeEvent] || (events[typeEvent] = {});\n const previousFunction = findHandler(handlers, callable, isDelegated ? handler : null);\n if (previousFunction) {\n previousFunction.oneOff = previousFunction.oneOff && oneOff;\n return;\n }\n const uid = makeEventUid(callable, originalTypeEvent.replace(namespaceRegex, ''));\n const fn = isDelegated ? bootstrapDelegationHandler(element, handler, callable) : bootstrapHandler(element, callable);\n fn.delegationSelector = isDelegated ? handler : null;\n fn.callable = callable;\n fn.oneOff = oneOff;\n fn.uidEvent = uid;\n handlers[uid] = fn;\n element.addEventListener(typeEvent, fn, isDelegated);\n}\nfunction removeHandler(element, events, typeEvent, handler, delegationSelector) {\n const fn = findHandler(events[typeEvent], handler, delegationSelector);\n if (!fn) {\n return;\n }\n element.removeEventListener(typeEvent, fn, Boolean(delegationSelector));\n delete events[typeEvent][fn.uidEvent];\n}\nfunction removeNamespacedHandlers(element, events, typeEvent, namespace) {\n const storeElementEvent = events[typeEvent] || {};\n for (const [handlerKey, event] of Object.entries(storeElementEvent)) {\n if (handlerKey.includes(namespace)) {\n removeHandler(element, events, typeEvent, event.callable, event.delegationSelector);\n }\n }\n}\nfunction getTypeEvent(event) {\n // allow to get the native events from namespaced events ('click.bs.button' --> 'click')\n event = event.replace(stripNameRegex, '');\n return customEvents[event] || event;\n}\nconst EventHandler = {\n on(element, event, handler, delegationFunction) {\n addHandler(element, event, handler, delegationFunction, false);\n },\n one(element, event, handler, delegationFunction) {\n addHandler(element, event, handler, delegationFunction, true);\n },\n off(element, originalTypeEvent, handler, delegationFunction) {\n if (typeof originalTypeEvent !== 'string' || !element) {\n return;\n }\n const [isDelegated, callable, typeEvent] = normalizeParameters(originalTypeEvent, handler, delegationFunction);\n const inNamespace = typeEvent !== originalTypeEvent;\n const events = getElementEvents(element);\n const storeElementEvent = events[typeEvent] || {};\n const isNamespace = originalTypeEvent.startsWith('.');\n if (typeof callable !== 'undefined') {\n // Simplest case: handler is passed, remove that listener ONLY.\n if (!Object.keys(storeElementEvent).length) {\n return;\n }\n removeHandler(element, events, typeEvent, callable, isDelegated ? handler : null);\n return;\n }\n if (isNamespace) {\n for (const elementEvent of Object.keys(events)) {\n removeNamespacedHandlers(element, events, elementEvent, originalTypeEvent.slice(1));\n }\n }\n for (const [keyHandlers, event] of Object.entries(storeElementEvent)) {\n const handlerKey = keyHandlers.replace(stripUidRegex, '');\n if (!inNamespace || originalTypeEvent.includes(handlerKey)) {\n removeHandler(element, events, typeEvent, event.callable, event.delegationSelector);\n }\n }\n },\n trigger(element, event, args) {\n if (typeof event !== 'string' || !element) {\n return null;\n }\n const $ = getjQuery();\n const typeEvent = getTypeEvent(event);\n const inNamespace = event !== typeEvent;\n let jQueryEvent = null;\n let bubbles = true;\n let nativeDispatch = true;\n let defaultPrevented = false;\n if (inNamespace && $) {\n jQueryEvent = $.Event(event, args);\n $(element).trigger(jQueryEvent);\n bubbles = !jQueryEvent.isPropagationStopped();\n nativeDispatch = !jQueryEvent.isImmediatePropagationStopped();\n defaultPrevented = jQueryEvent.isDefaultPrevented();\n }\n const evt = hydrateObj(new Event(event, {\n bubbles,\n cancelable: true\n }), args);\n if (defaultPrevented) {\n evt.preventDefault();\n }\n if (nativeDispatch) {\n element.dispatchEvent(evt);\n }\n if (evt.defaultPrevented && jQueryEvent) {\n jQueryEvent.preventDefault();\n }\n return evt;\n }\n};\nfunction hydrateObj(obj, meta = {}) {\n for (const [key, value] of Object.entries(meta)) {\n try {\n obj[key] = value;\n } catch (_unused) {\n Object.defineProperty(obj, key, {\n configurable: true,\n get() {\n return value;\n }\n });\n }\n }\n return obj;\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap dom/manipulator.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\nfunction normalizeData(value) {\n if (value === 'true') {\n return true;\n }\n if (value === 'false') {\n return false;\n }\n if (value === Number(value).toString()) {\n return Number(value);\n }\n if (value === '' || value === 'null') {\n return null;\n }\n if (typeof value !== 'string') {\n return value;\n }\n try {\n return JSON.parse(decodeURIComponent(value));\n } catch (_unused) {\n return value;\n }\n}\nfunction normalizeDataKey(key) {\n return key.replace(/[A-Z]/g, chr => `-${chr.toLowerCase()}`);\n}\nconst Manipulator = {\n setDataAttribute(element, key, value) {\n element.setAttribute(`data-bs-${normalizeDataKey(key)}`, value);\n },\n removeDataAttribute(element, key) {\n element.removeAttribute(`data-bs-${normalizeDataKey(key)}`);\n },\n getDataAttributes(element) {\n if (!element) {\n return {};\n }\n const attributes = {};\n const bsKeys = Object.keys(element.dataset).filter(key => key.startsWith('bs') && !key.startsWith('bsConfig'));\n for (const key of bsKeys) {\n let pureKey = key.replace(/^bs/, '');\n pureKey = pureKey.charAt(0).toLowerCase() + pureKey.slice(1, pureKey.length);\n attributes[pureKey] = normalizeData(element.dataset[key]);\n }\n return attributes;\n },\n getDataAttribute(element, key) {\n return normalizeData(element.getAttribute(`data-bs-${normalizeDataKey(key)}`));\n }\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/config.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Class definition\n */\n\nclass Config {\n // Getters\n static get Default() {\n return {};\n }\n static get DefaultType() {\n return {};\n }\n static get NAME() {\n throw new Error('You have to implement the static method \"NAME\", for each component!');\n }\n _getConfig(config) {\n config = this._mergeConfigObj(config);\n config = this._configAfterMerge(config);\n this._typeCheckConfig(config);\n return config;\n }\n _configAfterMerge(config) {\n return config;\n }\n _mergeConfigObj(config, element) {\n const jsonConfig = isElement(element) ? Manipulator.getDataAttribute(element, 'config') : {}; // try to parse\n\n return {\n ...this.constructor.Default,\n ...(typeof jsonConfig === 'object' ? jsonConfig : {}),\n ...(isElement(element) ? Manipulator.getDataAttributes(element) : {}),\n ...(typeof config === 'object' ? config : {})\n };\n }\n _typeCheckConfig(config, configTypes = this.constructor.DefaultType) {\n for (const [property, expectedTypes] of Object.entries(configTypes)) {\n const value = config[property];\n const valueType = isElement(value) ? 'element' : toType(value);\n if (!new RegExp(expectedTypes).test(valueType)) {\n throw new TypeError(`${this.constructor.NAME.toUpperCase()}: Option \"${property}\" provided type \"${valueType}\" but expected type \"${expectedTypes}\".`);\n }\n }\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap base-component.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst VERSION = '5.3.3';\n\n/**\n * Class definition\n */\n\nclass BaseComponent extends Config {\n constructor(element, config) {\n super();\n element = getElement(element);\n if (!element) {\n return;\n }\n this._element = element;\n this._config = this._getConfig(config);\n Data.set(this._element, this.constructor.DATA_KEY, this);\n }\n\n // Public\n dispose() {\n Data.remove(this._element, this.constructor.DATA_KEY);\n EventHandler.off(this._element, this.constructor.EVENT_KEY);\n for (const propertyName of Object.getOwnPropertyNames(this)) {\n this[propertyName] = null;\n }\n }\n _queueCallback(callback, element, isAnimated = true) {\n executeAfterTransition(callback, element, isAnimated);\n }\n _getConfig(config) {\n config = this._mergeConfigObj(config, this._element);\n config = this._configAfterMerge(config);\n this._typeCheckConfig(config);\n return config;\n }\n\n // Static\n static getInstance(element) {\n return Data.get(getElement(element), this.DATA_KEY);\n }\n static getOrCreateInstance(element, config = {}) {\n return this.getInstance(element) || new this(element, typeof config === 'object' ? config : null);\n }\n static get VERSION() {\n return VERSION;\n }\n static get DATA_KEY() {\n return `bs.${this.NAME}`;\n }\n static get EVENT_KEY() {\n return `.${this.DATA_KEY}`;\n }\n static eventName(name) {\n return `${name}${this.EVENT_KEY}`;\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap dom/selector-engine.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\nconst getSelector = element => {\n let selector = element.getAttribute('data-bs-target');\n if (!selector || selector === '#') {\n let hrefAttribute = element.getAttribute('href');\n\n // The only valid content that could double as a selector are IDs or classes,\n // so everything starting with `#` or `.`. If a \"real\" URL is used as the selector,\n // `document.querySelector` will rightfully complain it is invalid.\n // See https://github.com/twbs/bootstrap/issues/32273\n if (!hrefAttribute || !hrefAttribute.includes('#') && !hrefAttribute.startsWith('.')) {\n return null;\n }\n\n // Just in case some CMS puts out a full URL with the anchor appended\n if (hrefAttribute.includes('#') && !hrefAttribute.startsWith('#')) {\n hrefAttribute = `#${hrefAttribute.split('#')[1]}`;\n }\n selector = hrefAttribute && hrefAttribute !== '#' ? hrefAttribute.trim() : null;\n }\n return selector ? selector.split(',').map(sel => parseSelector(sel)).join(',') : null;\n};\nconst SelectorEngine = {\n find(selector, element = document.documentElement) {\n return [].concat(...Element.prototype.querySelectorAll.call(element, selector));\n },\n findOne(selector, element = document.documentElement) {\n return Element.prototype.querySelector.call(element, selector);\n },\n children(element, selector) {\n return [].concat(...element.children).filter(child => child.matches(selector));\n },\n parents(element, selector) {\n const parents = [];\n let ancestor = element.parentNode.closest(selector);\n while (ancestor) {\n parents.push(ancestor);\n ancestor = ancestor.parentNode.closest(selector);\n }\n return parents;\n },\n prev(element, selector) {\n let previous = element.previousElementSibling;\n while (previous) {\n if (previous.matches(selector)) {\n return [previous];\n }\n previous = previous.previousElementSibling;\n }\n return [];\n },\n // TODO: this is now unused; remove later along with prev()\n next(element, selector) {\n let next = element.nextElementSibling;\n while (next) {\n if (next.matches(selector)) {\n return [next];\n }\n next = next.nextElementSibling;\n }\n return [];\n },\n focusableChildren(element) {\n const focusables = ['a', 'button', 'input', 'textarea', 'select', 'details', '[tabindex]', '[contenteditable=\"true\"]'].map(selector => `${selector}:not([tabindex^=\"-\"])`).join(',');\n return this.find(focusables, element).filter(el => !isDisabled(el) && isVisible(el));\n },\n getSelectorFromElement(element) {\n const selector = getSelector(element);\n if (selector) {\n return SelectorEngine.findOne(selector) ? selector : null;\n }\n return null;\n },\n getElementFromSelector(element) {\n const selector = getSelector(element);\n return selector ? SelectorEngine.findOne(selector) : null;\n },\n getMultipleElementsFromSelector(element) {\n const selector = getSelector(element);\n return selector ? SelectorEngine.find(selector) : [];\n }\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/component-functions.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\nconst enableDismissTrigger = (component, method = 'hide') => {\n const clickEvent = `click.dismiss${component.EVENT_KEY}`;\n const name = component.NAME;\n EventHandler.on(document, clickEvent, `[data-bs-dismiss=\"${name}\"]`, function (event) {\n if (['A', 'AREA'].includes(this.tagName)) {\n event.preventDefault();\n }\n if (isDisabled(this)) {\n return;\n }\n const target = SelectorEngine.getElementFromSelector(this) || this.closest(`.${name}`);\n const instance = component.getOrCreateInstance(target);\n\n // Method argument is left, for Alert and only, as it doesn't implement the 'hide' method\n instance[method]();\n });\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap alert.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$f = 'alert';\nconst DATA_KEY$a = 'bs.alert';\nconst EVENT_KEY$b = `.${DATA_KEY$a}`;\nconst EVENT_CLOSE = `close${EVENT_KEY$b}`;\nconst EVENT_CLOSED = `closed${EVENT_KEY$b}`;\nconst CLASS_NAME_FADE$5 = 'fade';\nconst CLASS_NAME_SHOW$8 = 'show';\n\n/**\n * Class definition\n */\n\nclass Alert extends BaseComponent {\n // Getters\n static get NAME() {\n return NAME$f;\n }\n\n // Public\n close() {\n const closeEvent = EventHandler.trigger(this._element, EVENT_CLOSE);\n if (closeEvent.defaultPrevented) {\n return;\n }\n this._element.classList.remove(CLASS_NAME_SHOW$8);\n const isAnimated = this._element.classList.contains(CLASS_NAME_FADE$5);\n this._queueCallback(() => this._destroyElement(), this._element, isAnimated);\n }\n\n // Private\n _destroyElement() {\n this._element.remove();\n EventHandler.trigger(this._element, EVENT_CLOSED);\n this.dispose();\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Alert.getOrCreateInstance(this);\n if (typeof config !== 'string') {\n return;\n }\n if (data[config] === undefined || config.startsWith('_') || config === 'constructor') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config](this);\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nenableDismissTrigger(Alert, 'close');\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Alert);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap button.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$e = 'button';\nconst DATA_KEY$9 = 'bs.button';\nconst EVENT_KEY$a = `.${DATA_KEY$9}`;\nconst DATA_API_KEY$6 = '.data-api';\nconst CLASS_NAME_ACTIVE$3 = 'active';\nconst SELECTOR_DATA_TOGGLE$5 = '[data-bs-toggle=\"button\"]';\nconst EVENT_CLICK_DATA_API$6 = `click${EVENT_KEY$a}${DATA_API_KEY$6}`;\n\n/**\n * Class definition\n */\n\nclass Button extends BaseComponent {\n // Getters\n static get NAME() {\n return NAME$e;\n }\n\n // Public\n toggle() {\n // Toggle class and sync the `aria-pressed` attribute with the return value of the `.toggle()` method\n this._element.setAttribute('aria-pressed', this._element.classList.toggle(CLASS_NAME_ACTIVE$3));\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Button.getOrCreateInstance(this);\n if (config === 'toggle') {\n data[config]();\n }\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$6, SELECTOR_DATA_TOGGLE$5, event => {\n event.preventDefault();\n const button = event.target.closest(SELECTOR_DATA_TOGGLE$5);\n const data = Button.getOrCreateInstance(button);\n data.toggle();\n});\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Button);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/swipe.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$d = 'swipe';\nconst EVENT_KEY$9 = '.bs.swipe';\nconst EVENT_TOUCHSTART = `touchstart${EVENT_KEY$9}`;\nconst EVENT_TOUCHMOVE = `touchmove${EVENT_KEY$9}`;\nconst EVENT_TOUCHEND = `touchend${EVENT_KEY$9}`;\nconst EVENT_POINTERDOWN = `pointerdown${EVENT_KEY$9}`;\nconst EVENT_POINTERUP = `pointerup${EVENT_KEY$9}`;\nconst POINTER_TYPE_TOUCH = 'touch';\nconst POINTER_TYPE_PEN = 'pen';\nconst CLASS_NAME_POINTER_EVENT = 'pointer-event';\nconst SWIPE_THRESHOLD = 40;\nconst Default$c = {\n endCallback: null,\n leftCallback: null,\n rightCallback: null\n};\nconst DefaultType$c = {\n endCallback: '(function|null)',\n leftCallback: '(function|null)',\n rightCallback: '(function|null)'\n};\n\n/**\n * Class definition\n */\n\nclass Swipe extends Config {\n constructor(element, config) {\n super();\n this._element = element;\n if (!element || !Swipe.isSupported()) {\n return;\n }\n this._config = this._getConfig(config);\n this._deltaX = 0;\n this._supportPointerEvents = Boolean(window.PointerEvent);\n this._initEvents();\n }\n\n // Getters\n static get Default() {\n return Default$c;\n }\n static get DefaultType() {\n return DefaultType$c;\n }\n static get NAME() {\n return NAME$d;\n }\n\n // Public\n dispose() {\n EventHandler.off(this._element, EVENT_KEY$9);\n }\n\n // Private\n _start(event) {\n if (!this._supportPointerEvents) {\n this._deltaX = event.touches[0].clientX;\n return;\n }\n if (this._eventIsPointerPenTouch(event)) {\n this._deltaX = event.clientX;\n }\n }\n _end(event) {\n if (this._eventIsPointerPenTouch(event)) {\n this._deltaX = event.clientX - this._deltaX;\n }\n this._handleSwipe();\n execute(this._config.endCallback);\n }\n _move(event) {\n this._deltaX = event.touches && event.touches.length > 1 ? 0 : event.touches[0].clientX - this._deltaX;\n }\n _handleSwipe() {\n const absDeltaX = Math.abs(this._deltaX);\n if (absDeltaX <= SWIPE_THRESHOLD) {\n return;\n }\n const direction = absDeltaX / this._deltaX;\n this._deltaX = 0;\n if (!direction) {\n return;\n }\n execute(direction > 0 ? this._config.rightCallback : this._config.leftCallback);\n }\n _initEvents() {\n if (this._supportPointerEvents) {\n EventHandler.on(this._element, EVENT_POINTERDOWN, event => this._start(event));\n EventHandler.on(this._element, EVENT_POINTERUP, event => this._end(event));\n this._element.classList.add(CLASS_NAME_POINTER_EVENT);\n } else {\n EventHandler.on(this._element, EVENT_TOUCHSTART, event => this._start(event));\n EventHandler.on(this._element, EVENT_TOUCHMOVE, event => this._move(event));\n EventHandler.on(this._element, EVENT_TOUCHEND, event => this._end(event));\n }\n }\n _eventIsPointerPenTouch(event) {\n return this._supportPointerEvents && (event.pointerType === POINTER_TYPE_PEN || event.pointerType === POINTER_TYPE_TOUCH);\n }\n\n // Static\n static isSupported() {\n return 'ontouchstart' in document.documentElement || navigator.maxTouchPoints > 0;\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap carousel.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$c = 'carousel';\nconst DATA_KEY$8 = 'bs.carousel';\nconst EVENT_KEY$8 = `.${DATA_KEY$8}`;\nconst DATA_API_KEY$5 = '.data-api';\nconst ARROW_LEFT_KEY$1 = 'ArrowLeft';\nconst ARROW_RIGHT_KEY$1 = 'ArrowRight';\nconst TOUCHEVENT_COMPAT_WAIT = 500; // Time for mouse compat events to fire after touch\n\nconst ORDER_NEXT = 'next';\nconst ORDER_PREV = 'prev';\nconst DIRECTION_LEFT = 'left';\nconst DIRECTION_RIGHT = 'right';\nconst EVENT_SLIDE = `slide${EVENT_KEY$8}`;\nconst EVENT_SLID = `slid${EVENT_KEY$8}`;\nconst EVENT_KEYDOWN$1 = `keydown${EVENT_KEY$8}`;\nconst EVENT_MOUSEENTER$1 = `mouseenter${EVENT_KEY$8}`;\nconst EVENT_MOUSELEAVE$1 = `mouseleave${EVENT_KEY$8}`;\nconst EVENT_DRAG_START = `dragstart${EVENT_KEY$8}`;\nconst EVENT_LOAD_DATA_API$3 = `load${EVENT_KEY$8}${DATA_API_KEY$5}`;\nconst EVENT_CLICK_DATA_API$5 = `click${EVENT_KEY$8}${DATA_API_KEY$5}`;\nconst CLASS_NAME_CAROUSEL = 'carousel';\nconst CLASS_NAME_ACTIVE$2 = 'active';\nconst CLASS_NAME_SLIDE = 'slide';\nconst CLASS_NAME_END = 'carousel-item-end';\nconst CLASS_NAME_START = 'carousel-item-start';\nconst CLASS_NAME_NEXT = 'carousel-item-next';\nconst CLASS_NAME_PREV = 'carousel-item-prev';\nconst SELECTOR_ACTIVE = '.active';\nconst SELECTOR_ITEM = '.carousel-item';\nconst SELECTOR_ACTIVE_ITEM = SELECTOR_ACTIVE + SELECTOR_ITEM;\nconst SELECTOR_ITEM_IMG = '.carousel-item img';\nconst SELECTOR_INDICATORS = '.carousel-indicators';\nconst SELECTOR_DATA_SLIDE = '[data-bs-slide], [data-bs-slide-to]';\nconst SELECTOR_DATA_RIDE = '[data-bs-ride=\"carousel\"]';\nconst KEY_TO_DIRECTION = {\n [ARROW_LEFT_KEY$1]: DIRECTION_RIGHT,\n [ARROW_RIGHT_KEY$1]: DIRECTION_LEFT\n};\nconst Default$b = {\n interval: 5000,\n keyboard: true,\n pause: 'hover',\n ride: false,\n touch: true,\n wrap: true\n};\nconst DefaultType$b = {\n interval: '(number|boolean)',\n // TODO:v6 remove boolean support\n keyboard: 'boolean',\n pause: '(string|boolean)',\n ride: '(boolean|string)',\n touch: 'boolean',\n wrap: 'boolean'\n};\n\n/**\n * Class definition\n */\n\nclass Carousel extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._interval = null;\n this._activeElement = null;\n this._isSliding = false;\n this.touchTimeout = null;\n this._swipeHelper = null;\n this._indicatorsElement = SelectorEngine.findOne(SELECTOR_INDICATORS, this._element);\n this._addEventListeners();\n if (this._config.ride === CLASS_NAME_CAROUSEL) {\n this.cycle();\n }\n }\n\n // Getters\n static get Default() {\n return Default$b;\n }\n static get DefaultType() {\n return DefaultType$b;\n }\n static get NAME() {\n return NAME$c;\n }\n\n // Public\n next() {\n this._slide(ORDER_NEXT);\n }\n nextWhenVisible() {\n // FIXME TODO use `document.visibilityState`\n // Don't call next when the page isn't visible\n // or the carousel or its parent isn't visible\n if (!document.hidden && isVisible(this._element)) {\n this.next();\n }\n }\n prev() {\n this._slide(ORDER_PREV);\n }\n pause() {\n if (this._isSliding) {\n triggerTransitionEnd(this._element);\n }\n this._clearInterval();\n }\n cycle() {\n this._clearInterval();\n this._updateInterval();\n this._interval = setInterval(() => this.nextWhenVisible(), this._config.interval);\n }\n _maybeEnableCycle() {\n if (!this._config.ride) {\n return;\n }\n if (this._isSliding) {\n EventHandler.one(this._element, EVENT_SLID, () => this.cycle());\n return;\n }\n this.cycle();\n }\n to(index) {\n const items = this._getItems();\n if (index > items.length - 1 || index < 0) {\n return;\n }\n if (this._isSliding) {\n EventHandler.one(this._element, EVENT_SLID, () => this.to(index));\n return;\n }\n const activeIndex = this._getItemIndex(this._getActive());\n if (activeIndex === index) {\n return;\n }\n const order = index > activeIndex ? ORDER_NEXT : ORDER_PREV;\n this._slide(order, items[index]);\n }\n dispose() {\n if (this._swipeHelper) {\n this._swipeHelper.dispose();\n }\n super.dispose();\n }\n\n // Private\n _configAfterMerge(config) {\n config.defaultInterval = config.interval;\n return config;\n }\n _addEventListeners() {\n if (this._config.keyboard) {\n EventHandler.on(this._element, EVENT_KEYDOWN$1, event => this._keydown(event));\n }\n if (this._config.pause === 'hover') {\n EventHandler.on(this._element, EVENT_MOUSEENTER$1, () => this.pause());\n EventHandler.on(this._element, EVENT_MOUSELEAVE$1, () => this._maybeEnableCycle());\n }\n if (this._config.touch && Swipe.isSupported()) {\n this._addTouchEventListeners();\n }\n }\n _addTouchEventListeners() {\n for (const img of SelectorEngine.find(SELECTOR_ITEM_IMG, this._element)) {\n EventHandler.on(img, EVENT_DRAG_START, event => event.preventDefault());\n }\n const endCallBack = () => {\n if (this._config.pause !== 'hover') {\n return;\n }\n\n // If it's a touch-enabled device, mouseenter/leave are fired as\n // part of the mouse compatibility events on first tap - the carousel\n // would stop cycling until user tapped out of it;\n // here, we listen for touchend, explicitly pause the carousel\n // (as if it's the second time we tap on it, mouseenter compat event\n // is NOT fired) and after a timeout (to allow for mouse compatibility\n // events to fire) we explicitly restart cycling\n\n this.pause();\n if (this.touchTimeout) {\n clearTimeout(this.touchTimeout);\n }\n this.touchTimeout = setTimeout(() => this._maybeEnableCycle(), TOUCHEVENT_COMPAT_WAIT + this._config.interval);\n };\n const swipeConfig = {\n leftCallback: () => this._slide(this._directionToOrder(DIRECTION_LEFT)),\n rightCallback: () => this._slide(this._directionToOrder(DIRECTION_RIGHT)),\n endCallback: endCallBack\n };\n this._swipeHelper = new Swipe(this._element, swipeConfig);\n }\n _keydown(event) {\n if (/input|textarea/i.test(event.target.tagName)) {\n return;\n }\n const direction = KEY_TO_DIRECTION[event.key];\n if (direction) {\n event.preventDefault();\n this._slide(this._directionToOrder(direction));\n }\n }\n _getItemIndex(element) {\n return this._getItems().indexOf(element);\n }\n _setActiveIndicatorElement(index) {\n if (!this._indicatorsElement) {\n return;\n }\n const activeIndicator = SelectorEngine.findOne(SELECTOR_ACTIVE, this._indicatorsElement);\n activeIndicator.classList.remove(CLASS_NAME_ACTIVE$2);\n activeIndicator.removeAttribute('aria-current');\n const newActiveIndicator = SelectorEngine.findOne(`[data-bs-slide-to=\"${index}\"]`, this._indicatorsElement);\n if (newActiveIndicator) {\n newActiveIndicator.classList.add(CLASS_NAME_ACTIVE$2);\n newActiveIndicator.setAttribute('aria-current', 'true');\n }\n }\n _updateInterval() {\n const element = this._activeElement || this._getActive();\n if (!element) {\n return;\n }\n const elementInterval = Number.parseInt(element.getAttribute('data-bs-interval'), 10);\n this._config.interval = elementInterval || this._config.defaultInterval;\n }\n _slide(order, element = null) {\n if (this._isSliding) {\n return;\n }\n const activeElement = this._getActive();\n const isNext = order === ORDER_NEXT;\n const nextElement = element || getNextActiveElement(this._getItems(), activeElement, isNext, this._config.wrap);\n if (nextElement === activeElement) {\n return;\n }\n const nextElementIndex = this._getItemIndex(nextElement);\n const triggerEvent = eventName => {\n return EventHandler.trigger(this._element, eventName, {\n relatedTarget: nextElement,\n direction: this._orderToDirection(order),\n from: this._getItemIndex(activeElement),\n to: nextElementIndex\n });\n };\n const slideEvent = triggerEvent(EVENT_SLIDE);\n if (slideEvent.defaultPrevented) {\n return;\n }\n if (!activeElement || !nextElement) {\n // Some weirdness is happening, so we bail\n // TODO: change tests that use empty divs to avoid this check\n return;\n }\n const isCycling = Boolean(this._interval);\n this.pause();\n this._isSliding = true;\n this._setActiveIndicatorElement(nextElementIndex);\n this._activeElement = nextElement;\n const directionalClassName = isNext ? CLASS_NAME_START : CLASS_NAME_END;\n const orderClassName = isNext ? CLASS_NAME_NEXT : CLASS_NAME_PREV;\n nextElement.classList.add(orderClassName);\n reflow(nextElement);\n activeElement.classList.add(directionalClassName);\n nextElement.classList.add(directionalClassName);\n const completeCallBack = () => {\n nextElement.classList.remove(directionalClassName, orderClassName);\n nextElement.classList.add(CLASS_NAME_ACTIVE$2);\n activeElement.classList.remove(CLASS_NAME_ACTIVE$2, orderClassName, directionalClassName);\n this._isSliding = false;\n triggerEvent(EVENT_SLID);\n };\n this._queueCallback(completeCallBack, activeElement, this._isAnimated());\n if (isCycling) {\n this.cycle();\n }\n }\n _isAnimated() {\n return this._element.classList.contains(CLASS_NAME_SLIDE);\n }\n _getActive() {\n return SelectorEngine.findOne(SELECTOR_ACTIVE_ITEM, this._element);\n }\n _getItems() {\n return SelectorEngine.find(SELECTOR_ITEM, this._element);\n }\n _clearInterval() {\n if (this._interval) {\n clearInterval(this._interval);\n this._interval = null;\n }\n }\n _directionToOrder(direction) {\n if (isRTL()) {\n return direction === DIRECTION_LEFT ? ORDER_PREV : ORDER_NEXT;\n }\n return direction === DIRECTION_LEFT ? ORDER_NEXT : ORDER_PREV;\n }\n _orderToDirection(order) {\n if (isRTL()) {\n return order === ORDER_PREV ? DIRECTION_LEFT : DIRECTION_RIGHT;\n }\n return order === ORDER_PREV ? DIRECTION_RIGHT : DIRECTION_LEFT;\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Carousel.getOrCreateInstance(this, config);\n if (typeof config === 'number') {\n data.to(config);\n return;\n }\n if (typeof config === 'string') {\n if (data[config] === undefined || config.startsWith('_') || config === 'constructor') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config]();\n }\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$5, SELECTOR_DATA_SLIDE, function (event) {\n const target = SelectorEngine.getElementFromSelector(this);\n if (!target || !target.classList.contains(CLASS_NAME_CAROUSEL)) {\n return;\n }\n event.preventDefault();\n const carousel = Carousel.getOrCreateInstance(target);\n const slideIndex = this.getAttribute('data-bs-slide-to');\n if (slideIndex) {\n carousel.to(slideIndex);\n carousel._maybeEnableCycle();\n return;\n }\n if (Manipulator.getDataAttribute(this, 'slide') === 'next') {\n carousel.next();\n carousel._maybeEnableCycle();\n return;\n }\n carousel.prev();\n carousel._maybeEnableCycle();\n});\nEventHandler.on(window, EVENT_LOAD_DATA_API$3, () => {\n const carousels = SelectorEngine.find(SELECTOR_DATA_RIDE);\n for (const carousel of carousels) {\n Carousel.getOrCreateInstance(carousel);\n }\n});\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Carousel);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap collapse.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$b = 'collapse';\nconst DATA_KEY$7 = 'bs.collapse';\nconst EVENT_KEY$7 = `.${DATA_KEY$7}`;\nconst DATA_API_KEY$4 = '.data-api';\nconst EVENT_SHOW$6 = `show${EVENT_KEY$7}`;\nconst EVENT_SHOWN$6 = `shown${EVENT_KEY$7}`;\nconst EVENT_HIDE$6 = `hide${EVENT_KEY$7}`;\nconst EVENT_HIDDEN$6 = `hidden${EVENT_KEY$7}`;\nconst EVENT_CLICK_DATA_API$4 = `click${EVENT_KEY$7}${DATA_API_KEY$4}`;\nconst CLASS_NAME_SHOW$7 = 'show';\nconst CLASS_NAME_COLLAPSE = 'collapse';\nconst CLASS_NAME_COLLAPSING = 'collapsing';\nconst CLASS_NAME_COLLAPSED = 'collapsed';\nconst CLASS_NAME_DEEPER_CHILDREN = `:scope .${CLASS_NAME_COLLAPSE} .${CLASS_NAME_COLLAPSE}`;\nconst CLASS_NAME_HORIZONTAL = 'collapse-horizontal';\nconst WIDTH = 'width';\nconst HEIGHT = 'height';\nconst SELECTOR_ACTIVES = '.collapse.show, .collapse.collapsing';\nconst SELECTOR_DATA_TOGGLE$4 = '[data-bs-toggle=\"collapse\"]';\nconst Default$a = {\n parent: null,\n toggle: true\n};\nconst DefaultType$a = {\n parent: '(null|element)',\n toggle: 'boolean'\n};\n\n/**\n * Class definition\n */\n\nclass Collapse extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._isTransitioning = false;\n this._triggerArray = [];\n const toggleList = SelectorEngine.find(SELECTOR_DATA_TOGGLE$4);\n for (const elem of toggleList) {\n const selector = SelectorEngine.getSelectorFromElement(elem);\n const filterElement = SelectorEngine.find(selector).filter(foundElement => foundElement === this._element);\n if (selector !== null && filterElement.length) {\n this._triggerArray.push(elem);\n }\n }\n this._initializeChildren();\n if (!this._config.parent) {\n this._addAriaAndCollapsedClass(this._triggerArray, this._isShown());\n }\n if (this._config.toggle) {\n this.toggle();\n }\n }\n\n // Getters\n static get Default() {\n return Default$a;\n }\n static get DefaultType() {\n return DefaultType$a;\n }\n static get NAME() {\n return NAME$b;\n }\n\n // Public\n toggle() {\n if (this._isShown()) {\n this.hide();\n } else {\n this.show();\n }\n }\n show() {\n if (this._isTransitioning || this._isShown()) {\n return;\n }\n let activeChildren = [];\n\n // find active children\n if (this._config.parent) {\n activeChildren = this._getFirstLevelChildren(SELECTOR_ACTIVES).filter(element => element !== this._element).map(element => Collapse.getOrCreateInstance(element, {\n toggle: false\n }));\n }\n if (activeChildren.length && activeChildren[0]._isTransitioning) {\n return;\n }\n const startEvent = EventHandler.trigger(this._element, EVENT_SHOW$6);\n if (startEvent.defaultPrevented) {\n return;\n }\n for (const activeInstance of activeChildren) {\n activeInstance.hide();\n }\n const dimension = this._getDimension();\n this._element.classList.remove(CLASS_NAME_COLLAPSE);\n this._element.classList.add(CLASS_NAME_COLLAPSING);\n this._element.style[dimension] = 0;\n this._addAriaAndCollapsedClass(this._triggerArray, true);\n this._isTransitioning = true;\n const complete = () => {\n this._isTransitioning = false;\n this._element.classList.remove(CLASS_NAME_COLLAPSING);\n this._element.classList.add(CLASS_NAME_COLLAPSE, CLASS_NAME_SHOW$7);\n this._element.style[dimension] = '';\n EventHandler.trigger(this._element, EVENT_SHOWN$6);\n };\n const capitalizedDimension = dimension[0].toUpperCase() + dimension.slice(1);\n const scrollSize = `scroll${capitalizedDimension}`;\n this._queueCallback(complete, this._element, true);\n this._element.style[dimension] = `${this._element[scrollSize]}px`;\n }\n hide() {\n if (this._isTransitioning || !this._isShown()) {\n return;\n }\n const startEvent = EventHandler.trigger(this._element, EVENT_HIDE$6);\n if (startEvent.defaultPrevented) {\n return;\n }\n const dimension = this._getDimension();\n this._element.style[dimension] = `${this._element.getBoundingClientRect()[dimension]}px`;\n reflow(this._element);\n this._element.classList.add(CLASS_NAME_COLLAPSING);\n this._element.classList.remove(CLASS_NAME_COLLAPSE, CLASS_NAME_SHOW$7);\n for (const trigger of this._triggerArray) {\n const element = SelectorEngine.getElementFromSelector(trigger);\n if (element && !this._isShown(element)) {\n this._addAriaAndCollapsedClass([trigger], false);\n }\n }\n this._isTransitioning = true;\n const complete = () => {\n this._isTransitioning = false;\n this._element.classList.remove(CLASS_NAME_COLLAPSING);\n this._element.classList.add(CLASS_NAME_COLLAPSE);\n EventHandler.trigger(this._element, EVENT_HIDDEN$6);\n };\n this._element.style[dimension] = '';\n this._queueCallback(complete, this._element, true);\n }\n _isShown(element = this._element) {\n return element.classList.contains(CLASS_NAME_SHOW$7);\n }\n\n // Private\n _configAfterMerge(config) {\n config.toggle = Boolean(config.toggle); // Coerce string values\n config.parent = getElement(config.parent);\n return config;\n }\n _getDimension() {\n return this._element.classList.contains(CLASS_NAME_HORIZONTAL) ? WIDTH : HEIGHT;\n }\n _initializeChildren() {\n if (!this._config.parent) {\n return;\n }\n const children = this._getFirstLevelChildren(SELECTOR_DATA_TOGGLE$4);\n for (const element of children) {\n const selected = SelectorEngine.getElementFromSelector(element);\n if (selected) {\n this._addAriaAndCollapsedClass([element], this._isShown(selected));\n }\n }\n }\n _getFirstLevelChildren(selector) {\n const children = SelectorEngine.find(CLASS_NAME_DEEPER_CHILDREN, this._config.parent);\n // remove children if greater depth\n return SelectorEngine.find(selector, this._config.parent).filter(element => !children.includes(element));\n }\n _addAriaAndCollapsedClass(triggerArray, isOpen) {\n if (!triggerArray.length) {\n return;\n }\n for (const element of triggerArray) {\n element.classList.toggle(CLASS_NAME_COLLAPSED, !isOpen);\n element.setAttribute('aria-expanded', isOpen);\n }\n }\n\n // Static\n static jQueryInterface(config) {\n const _config = {};\n if (typeof config === 'string' && /show|hide/.test(config)) {\n _config.toggle = false;\n }\n return this.each(function () {\n const data = Collapse.getOrCreateInstance(this, _config);\n if (typeof config === 'string') {\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config]();\n }\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$4, SELECTOR_DATA_TOGGLE$4, function (event) {\n // preventDefault only for elements (which change the URL) not inside the collapsible element\n if (event.target.tagName === 'A' || event.delegateTarget && event.delegateTarget.tagName === 'A') {\n event.preventDefault();\n }\n for (const element of SelectorEngine.getMultipleElementsFromSelector(this)) {\n Collapse.getOrCreateInstance(element, {\n toggle: false\n }).toggle();\n }\n});\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Collapse);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap dropdown.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$a = 'dropdown';\nconst DATA_KEY$6 = 'bs.dropdown';\nconst EVENT_KEY$6 = `.${DATA_KEY$6}`;\nconst DATA_API_KEY$3 = '.data-api';\nconst ESCAPE_KEY$2 = 'Escape';\nconst TAB_KEY$1 = 'Tab';\nconst ARROW_UP_KEY$1 = 'ArrowUp';\nconst ARROW_DOWN_KEY$1 = 'ArrowDown';\nconst RIGHT_MOUSE_BUTTON = 2; // MouseEvent.button value for the secondary button, usually the right button\n\nconst EVENT_HIDE$5 = `hide${EVENT_KEY$6}`;\nconst EVENT_HIDDEN$5 = `hidden${EVENT_KEY$6}`;\nconst EVENT_SHOW$5 = `show${EVENT_KEY$6}`;\nconst EVENT_SHOWN$5 = `shown${EVENT_KEY$6}`;\nconst EVENT_CLICK_DATA_API$3 = `click${EVENT_KEY$6}${DATA_API_KEY$3}`;\nconst EVENT_KEYDOWN_DATA_API = `keydown${EVENT_KEY$6}${DATA_API_KEY$3}`;\nconst EVENT_KEYUP_DATA_API = `keyup${EVENT_KEY$6}${DATA_API_KEY$3}`;\nconst CLASS_NAME_SHOW$6 = 'show';\nconst CLASS_NAME_DROPUP = 'dropup';\nconst CLASS_NAME_DROPEND = 'dropend';\nconst CLASS_NAME_DROPSTART = 'dropstart';\nconst CLASS_NAME_DROPUP_CENTER = 'dropup-center';\nconst CLASS_NAME_DROPDOWN_CENTER = 'dropdown-center';\nconst SELECTOR_DATA_TOGGLE$3 = '[data-bs-toggle=\"dropdown\"]:not(.disabled):not(:disabled)';\nconst SELECTOR_DATA_TOGGLE_SHOWN = `${SELECTOR_DATA_TOGGLE$3}.${CLASS_NAME_SHOW$6}`;\nconst SELECTOR_MENU = '.dropdown-menu';\nconst SELECTOR_NAVBAR = '.navbar';\nconst SELECTOR_NAVBAR_NAV = '.navbar-nav';\nconst SELECTOR_VISIBLE_ITEMS = '.dropdown-menu .dropdown-item:not(.disabled):not(:disabled)';\nconst PLACEMENT_TOP = isRTL() ? 'top-end' : 'top-start';\nconst PLACEMENT_TOPEND = isRTL() ? 'top-start' : 'top-end';\nconst PLACEMENT_BOTTOM = isRTL() ? 'bottom-end' : 'bottom-start';\nconst PLACEMENT_BOTTOMEND = isRTL() ? 'bottom-start' : 'bottom-end';\nconst PLACEMENT_RIGHT = isRTL() ? 'left-start' : 'right-start';\nconst PLACEMENT_LEFT = isRTL() ? 'right-start' : 'left-start';\nconst PLACEMENT_TOPCENTER = 'top';\nconst PLACEMENT_BOTTOMCENTER = 'bottom';\nconst Default$9 = {\n autoClose: true,\n boundary: 'clippingParents',\n display: 'dynamic',\n offset: [0, 2],\n popperConfig: null,\n reference: 'toggle'\n};\nconst DefaultType$9 = {\n autoClose: '(boolean|string)',\n boundary: '(string|element)',\n display: 'string',\n offset: '(array|string|function)',\n popperConfig: '(null|object|function)',\n reference: '(string|element|object)'\n};\n\n/**\n * Class definition\n */\n\nclass Dropdown extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._popper = null;\n this._parent = this._element.parentNode; // dropdown wrapper\n // TODO: v6 revert #37011 & change markup https://getbootstrap.com/docs/5.3/forms/input-group/\n this._menu = SelectorEngine.next(this._element, SELECTOR_MENU)[0] || SelectorEngine.prev(this._element, SELECTOR_MENU)[0] || SelectorEngine.findOne(SELECTOR_MENU, this._parent);\n this._inNavbar = this._detectNavbar();\n }\n\n // Getters\n static get Default() {\n return Default$9;\n }\n static get DefaultType() {\n return DefaultType$9;\n }\n static get NAME() {\n return NAME$a;\n }\n\n // Public\n toggle() {\n return this._isShown() ? this.hide() : this.show();\n }\n show() {\n if (isDisabled(this._element) || this._isShown()) {\n return;\n }\n const relatedTarget = {\n relatedTarget: this._element\n };\n const showEvent = EventHandler.trigger(this._element, EVENT_SHOW$5, relatedTarget);\n if (showEvent.defaultPrevented) {\n return;\n }\n this._createPopper();\n\n // If this is a touch-enabled device we add extra\n // empty mouseover listeners to the body's immediate children;\n // only needed because of broken event delegation on iOS\n // https://www.quirksmode.org/blog/archives/2014/02/mouse_event_bub.html\n if ('ontouchstart' in document.documentElement && !this._parent.closest(SELECTOR_NAVBAR_NAV)) {\n for (const element of [].concat(...document.body.children)) {\n EventHandler.on(element, 'mouseover', noop);\n }\n }\n this._element.focus();\n this._element.setAttribute('aria-expanded', true);\n this._menu.classList.add(CLASS_NAME_SHOW$6);\n this._element.classList.add(CLASS_NAME_SHOW$6);\n EventHandler.trigger(this._element, EVENT_SHOWN$5, relatedTarget);\n }\n hide() {\n if (isDisabled(this._element) || !this._isShown()) {\n return;\n }\n const relatedTarget = {\n relatedTarget: this._element\n };\n this._completeHide(relatedTarget);\n }\n dispose() {\n if (this._popper) {\n this._popper.destroy();\n }\n super.dispose();\n }\n update() {\n this._inNavbar = this._detectNavbar();\n if (this._popper) {\n this._popper.update();\n }\n }\n\n // Private\n _completeHide(relatedTarget) {\n const hideEvent = EventHandler.trigger(this._element, EVENT_HIDE$5, relatedTarget);\n if (hideEvent.defaultPrevented) {\n return;\n }\n\n // If this is a touch-enabled device we remove the extra\n // empty mouseover listeners we added for iOS support\n if ('ontouchstart' in document.documentElement) {\n for (const element of [].concat(...document.body.children)) {\n EventHandler.off(element, 'mouseover', noop);\n }\n }\n if (this._popper) {\n this._popper.destroy();\n }\n this._menu.classList.remove(CLASS_NAME_SHOW$6);\n this._element.classList.remove(CLASS_NAME_SHOW$6);\n this._element.setAttribute('aria-expanded', 'false');\n Manipulator.removeDataAttribute(this._menu, 'popper');\n EventHandler.trigger(this._element, EVENT_HIDDEN$5, relatedTarget);\n }\n _getConfig(config) {\n config = super._getConfig(config);\n if (typeof config.reference === 'object' && !isElement(config.reference) && typeof config.reference.getBoundingClientRect !== 'function') {\n // Popper virtual elements require a getBoundingClientRect method\n throw new TypeError(`${NAME$a.toUpperCase()}: Option \"reference\" provided type \"object\" without a required \"getBoundingClientRect\" method.`);\n }\n return config;\n }\n _createPopper() {\n if (typeof Popper === 'undefined') {\n throw new TypeError('Bootstrap\\'s dropdowns require Popper (https://popper.js.org)');\n }\n let referenceElement = this._element;\n if (this._config.reference === 'parent') {\n referenceElement = this._parent;\n } else if (isElement(this._config.reference)) {\n referenceElement = getElement(this._config.reference);\n } else if (typeof this._config.reference === 'object') {\n referenceElement = this._config.reference;\n }\n const popperConfig = this._getPopperConfig();\n this._popper = Popper.createPopper(referenceElement, this._menu, popperConfig);\n }\n _isShown() {\n return this._menu.classList.contains(CLASS_NAME_SHOW$6);\n }\n _getPlacement() {\n const parentDropdown = this._parent;\n if (parentDropdown.classList.contains(CLASS_NAME_DROPEND)) {\n return PLACEMENT_RIGHT;\n }\n if (parentDropdown.classList.contains(CLASS_NAME_DROPSTART)) {\n return PLACEMENT_LEFT;\n }\n if (parentDropdown.classList.contains(CLASS_NAME_DROPUP_CENTER)) {\n return PLACEMENT_TOPCENTER;\n }\n if (parentDropdown.classList.contains(CLASS_NAME_DROPDOWN_CENTER)) {\n return PLACEMENT_BOTTOMCENTER;\n }\n\n // We need to trim the value because custom properties can also include spaces\n const isEnd = getComputedStyle(this._menu).getPropertyValue('--bs-position').trim() === 'end';\n if (parentDropdown.classList.contains(CLASS_NAME_DROPUP)) {\n return isEnd ? PLACEMENT_TOPEND : PLACEMENT_TOP;\n }\n return isEnd ? PLACEMENT_BOTTOMEND : PLACEMENT_BOTTOM;\n }\n _detectNavbar() {\n return this._element.closest(SELECTOR_NAVBAR) !== null;\n }\n _getOffset() {\n const {\n offset\n } = this._config;\n if (typeof offset === 'string') {\n return offset.split(',').map(value => Number.parseInt(value, 10));\n }\n if (typeof offset === 'function') {\n return popperData => offset(popperData, this._element);\n }\n return offset;\n }\n _getPopperConfig() {\n const defaultBsPopperConfig = {\n placement: this._getPlacement(),\n modifiers: [{\n name: 'preventOverflow',\n options: {\n boundary: this._config.boundary\n }\n }, {\n name: 'offset',\n options: {\n offset: this._getOffset()\n }\n }]\n };\n\n // Disable Popper if we have a static display or Dropdown is in Navbar\n if (this._inNavbar || this._config.display === 'static') {\n Manipulator.setDataAttribute(this._menu, 'popper', 'static'); // TODO: v6 remove\n defaultBsPopperConfig.modifiers = [{\n name: 'applyStyles',\n enabled: false\n }];\n }\n return {\n ...defaultBsPopperConfig,\n ...execute(this._config.popperConfig, [defaultBsPopperConfig])\n };\n }\n _selectMenuItem({\n key,\n target\n }) {\n const items = SelectorEngine.find(SELECTOR_VISIBLE_ITEMS, this._menu).filter(element => isVisible(element));\n if (!items.length) {\n return;\n }\n\n // if target isn't included in items (e.g. when expanding the dropdown)\n // allow cycling to get the last item in case key equals ARROW_UP_KEY\n getNextActiveElement(items, target, key === ARROW_DOWN_KEY$1, !items.includes(target)).focus();\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Dropdown.getOrCreateInstance(this, config);\n if (typeof config !== 'string') {\n return;\n }\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config]();\n });\n }\n static clearMenus(event) {\n if (event.button === RIGHT_MOUSE_BUTTON || event.type === 'keyup' && event.key !== TAB_KEY$1) {\n return;\n }\n const openToggles = SelectorEngine.find(SELECTOR_DATA_TOGGLE_SHOWN);\n for (const toggle of openToggles) {\n const context = Dropdown.getInstance(toggle);\n if (!context || context._config.autoClose === false) {\n continue;\n }\n const composedPath = event.composedPath();\n const isMenuTarget = composedPath.includes(context._menu);\n if (composedPath.includes(context._element) || context._config.autoClose === 'inside' && !isMenuTarget || context._config.autoClose === 'outside' && isMenuTarget) {\n continue;\n }\n\n // Tab navigation through the dropdown menu or events from contained inputs shouldn't close the menu\n if (context._menu.contains(event.target) && (event.type === 'keyup' && event.key === TAB_KEY$1 || /input|select|option|textarea|form/i.test(event.target.tagName))) {\n continue;\n }\n const relatedTarget = {\n relatedTarget: context._element\n };\n if (event.type === 'click') {\n relatedTarget.clickEvent = event;\n }\n context._completeHide(relatedTarget);\n }\n }\n static dataApiKeydownHandler(event) {\n // If not an UP | DOWN | ESCAPE key => not a dropdown command\n // If input/textarea && if key is other than ESCAPE => not a dropdown command\n\n const isInput = /input|textarea/i.test(event.target.tagName);\n const isEscapeEvent = event.key === ESCAPE_KEY$2;\n const isUpOrDownEvent = [ARROW_UP_KEY$1, ARROW_DOWN_KEY$1].includes(event.key);\n if (!isUpOrDownEvent && !isEscapeEvent) {\n return;\n }\n if (isInput && !isEscapeEvent) {\n return;\n }\n event.preventDefault();\n\n // TODO: v6 revert #37011 & change markup https://getbootstrap.com/docs/5.3/forms/input-group/\n const getToggleButton = this.matches(SELECTOR_DATA_TOGGLE$3) ? this : SelectorEngine.prev(this, SELECTOR_DATA_TOGGLE$3)[0] || SelectorEngine.next(this, SELECTOR_DATA_TOGGLE$3)[0] || SelectorEngine.findOne(SELECTOR_DATA_TOGGLE$3, event.delegateTarget.parentNode);\n const instance = Dropdown.getOrCreateInstance(getToggleButton);\n if (isUpOrDownEvent) {\n event.stopPropagation();\n instance.show();\n instance._selectMenuItem(event);\n return;\n }\n if (instance._isShown()) {\n // else is escape and we check if it is shown\n event.stopPropagation();\n instance.hide();\n getToggleButton.focus();\n }\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_KEYDOWN_DATA_API, SELECTOR_DATA_TOGGLE$3, Dropdown.dataApiKeydownHandler);\nEventHandler.on(document, EVENT_KEYDOWN_DATA_API, SELECTOR_MENU, Dropdown.dataApiKeydownHandler);\nEventHandler.on(document, EVENT_CLICK_DATA_API$3, Dropdown.clearMenus);\nEventHandler.on(document, EVENT_KEYUP_DATA_API, Dropdown.clearMenus);\nEventHandler.on(document, EVENT_CLICK_DATA_API$3, SELECTOR_DATA_TOGGLE$3, function (event) {\n event.preventDefault();\n Dropdown.getOrCreateInstance(this).toggle();\n});\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Dropdown);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/backdrop.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$9 = 'backdrop';\nconst CLASS_NAME_FADE$4 = 'fade';\nconst CLASS_NAME_SHOW$5 = 'show';\nconst EVENT_MOUSEDOWN = `mousedown.bs.${NAME$9}`;\nconst Default$8 = {\n className: 'modal-backdrop',\n clickCallback: null,\n isAnimated: false,\n isVisible: true,\n // if false, we use the backdrop helper without adding any element to the dom\n rootElement: 'body' // give the choice to place backdrop under different elements\n};\nconst DefaultType$8 = {\n className: 'string',\n clickCallback: '(function|null)',\n isAnimated: 'boolean',\n isVisible: 'boolean',\n rootElement: '(element|string)'\n};\n\n/**\n * Class definition\n */\n\nclass Backdrop extends Config {\n constructor(config) {\n super();\n this._config = this._getConfig(config);\n this._isAppended = false;\n this._element = null;\n }\n\n // Getters\n static get Default() {\n return Default$8;\n }\n static get DefaultType() {\n return DefaultType$8;\n }\n static get NAME() {\n return NAME$9;\n }\n\n // Public\n show(callback) {\n if (!this._config.isVisible) {\n execute(callback);\n return;\n }\n this._append();\n const element = this._getElement();\n if (this._config.isAnimated) {\n reflow(element);\n }\n element.classList.add(CLASS_NAME_SHOW$5);\n this._emulateAnimation(() => {\n execute(callback);\n });\n }\n hide(callback) {\n if (!this._config.isVisible) {\n execute(callback);\n return;\n }\n this._getElement().classList.remove(CLASS_NAME_SHOW$5);\n this._emulateAnimation(() => {\n this.dispose();\n execute(callback);\n });\n }\n dispose() {\n if (!this._isAppended) {\n return;\n }\n EventHandler.off(this._element, EVENT_MOUSEDOWN);\n this._element.remove();\n this._isAppended = false;\n }\n\n // Private\n _getElement() {\n if (!this._element) {\n const backdrop = document.createElement('div');\n backdrop.className = this._config.className;\n if (this._config.isAnimated) {\n backdrop.classList.add(CLASS_NAME_FADE$4);\n }\n this._element = backdrop;\n }\n return this._element;\n }\n _configAfterMerge(config) {\n // use getElement() with the default \"body\" to get a fresh Element on each instantiation\n config.rootElement = getElement(config.rootElement);\n return config;\n }\n _append() {\n if (this._isAppended) {\n return;\n }\n const element = this._getElement();\n this._config.rootElement.append(element);\n EventHandler.on(element, EVENT_MOUSEDOWN, () => {\n execute(this._config.clickCallback);\n });\n this._isAppended = true;\n }\n _emulateAnimation(callback) {\n executeAfterTransition(callback, this._getElement(), this._config.isAnimated);\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/focustrap.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$8 = 'focustrap';\nconst DATA_KEY$5 = 'bs.focustrap';\nconst EVENT_KEY$5 = `.${DATA_KEY$5}`;\nconst EVENT_FOCUSIN$2 = `focusin${EVENT_KEY$5}`;\nconst EVENT_KEYDOWN_TAB = `keydown.tab${EVENT_KEY$5}`;\nconst TAB_KEY = 'Tab';\nconst TAB_NAV_FORWARD = 'forward';\nconst TAB_NAV_BACKWARD = 'backward';\nconst Default$7 = {\n autofocus: true,\n trapElement: null // The element to trap focus inside of\n};\nconst DefaultType$7 = {\n autofocus: 'boolean',\n trapElement: 'element'\n};\n\n/**\n * Class definition\n */\n\nclass FocusTrap extends Config {\n constructor(config) {\n super();\n this._config = this._getConfig(config);\n this._isActive = false;\n this._lastTabNavDirection = null;\n }\n\n // Getters\n static get Default() {\n return Default$7;\n }\n static get DefaultType() {\n return DefaultType$7;\n }\n static get NAME() {\n return NAME$8;\n }\n\n // Public\n activate() {\n if (this._isActive) {\n return;\n }\n if (this._config.autofocus) {\n this._config.trapElement.focus();\n }\n EventHandler.off(document, EVENT_KEY$5); // guard against infinite focus loop\n EventHandler.on(document, EVENT_FOCUSIN$2, event => this._handleFocusin(event));\n EventHandler.on(document, EVENT_KEYDOWN_TAB, event => this._handleKeydown(event));\n this._isActive = true;\n }\n deactivate() {\n if (!this._isActive) {\n return;\n }\n this._isActive = false;\n EventHandler.off(document, EVENT_KEY$5);\n }\n\n // Private\n _handleFocusin(event) {\n const {\n trapElement\n } = this._config;\n if (event.target === document || event.target === trapElement || trapElement.contains(event.target)) {\n return;\n }\n const elements = SelectorEngine.focusableChildren(trapElement);\n if (elements.length === 0) {\n trapElement.focus();\n } else if (this._lastTabNavDirection === TAB_NAV_BACKWARD) {\n elements[elements.length - 1].focus();\n } else {\n elements[0].focus();\n }\n }\n _handleKeydown(event) {\n if (event.key !== TAB_KEY) {\n return;\n }\n this._lastTabNavDirection = event.shiftKey ? TAB_NAV_BACKWARD : TAB_NAV_FORWARD;\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/scrollBar.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst SELECTOR_FIXED_CONTENT = '.fixed-top, .fixed-bottom, .is-fixed, .sticky-top';\nconst SELECTOR_STICKY_CONTENT = '.sticky-top';\nconst PROPERTY_PADDING = 'padding-right';\nconst PROPERTY_MARGIN = 'margin-right';\n\n/**\n * Class definition\n */\n\nclass ScrollBarHelper {\n constructor() {\n this._element = document.body;\n }\n\n // Public\n getWidth() {\n // https://developer.mozilla.org/en-US/docs/Web/API/Window/innerWidth#usage_notes\n const documentWidth = document.documentElement.clientWidth;\n return Math.abs(window.innerWidth - documentWidth);\n }\n hide() {\n const width = this.getWidth();\n this._disableOverFlow();\n // give padding to element to balance the hidden scrollbar width\n this._setElementAttributes(this._element, PROPERTY_PADDING, calculatedValue => calculatedValue + width);\n // trick: We adjust positive paddingRight and negative marginRight to sticky-top elements to keep showing fullwidth\n this._setElementAttributes(SELECTOR_FIXED_CONTENT, PROPERTY_PADDING, calculatedValue => calculatedValue + width);\n this._setElementAttributes(SELECTOR_STICKY_CONTENT, PROPERTY_MARGIN, calculatedValue => calculatedValue - width);\n }\n reset() {\n this._resetElementAttributes(this._element, 'overflow');\n this._resetElementAttributes(this._element, PROPERTY_PADDING);\n this._resetElementAttributes(SELECTOR_FIXED_CONTENT, PROPERTY_PADDING);\n this._resetElementAttributes(SELECTOR_STICKY_CONTENT, PROPERTY_MARGIN);\n }\n isOverflowing() {\n return this.getWidth() > 0;\n }\n\n // Private\n _disableOverFlow() {\n this._saveInitialAttribute(this._element, 'overflow');\n this._element.style.overflow = 'hidden';\n }\n _setElementAttributes(selector, styleProperty, callback) {\n const scrollbarWidth = this.getWidth();\n const manipulationCallBack = element => {\n if (element !== this._element && window.innerWidth > element.clientWidth + scrollbarWidth) {\n return;\n }\n this._saveInitialAttribute(element, styleProperty);\n const calculatedValue = window.getComputedStyle(element).getPropertyValue(styleProperty);\n element.style.setProperty(styleProperty, `${callback(Number.parseFloat(calculatedValue))}px`);\n };\n this._applyManipulationCallback(selector, manipulationCallBack);\n }\n _saveInitialAttribute(element, styleProperty) {\n const actualValue = element.style.getPropertyValue(styleProperty);\n if (actualValue) {\n Manipulator.setDataAttribute(element, styleProperty, actualValue);\n }\n }\n _resetElementAttributes(selector, styleProperty) {\n const manipulationCallBack = element => {\n const value = Manipulator.getDataAttribute(element, styleProperty);\n // We only want to remove the property if the value is `null`; the value can also be zero\n if (value === null) {\n element.style.removeProperty(styleProperty);\n return;\n }\n Manipulator.removeDataAttribute(element, styleProperty);\n element.style.setProperty(styleProperty, value);\n };\n this._applyManipulationCallback(selector, manipulationCallBack);\n }\n _applyManipulationCallback(selector, callBack) {\n if (isElement(selector)) {\n callBack(selector);\n return;\n }\n for (const sel of SelectorEngine.find(selector, this._element)) {\n callBack(sel);\n }\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap modal.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$7 = 'modal';\nconst DATA_KEY$4 = 'bs.modal';\nconst EVENT_KEY$4 = `.${DATA_KEY$4}`;\nconst DATA_API_KEY$2 = '.data-api';\nconst ESCAPE_KEY$1 = 'Escape';\nconst EVENT_HIDE$4 = `hide${EVENT_KEY$4}`;\nconst EVENT_HIDE_PREVENTED$1 = `hidePrevented${EVENT_KEY$4}`;\nconst EVENT_HIDDEN$4 = `hidden${EVENT_KEY$4}`;\nconst EVENT_SHOW$4 = `show${EVENT_KEY$4}`;\nconst EVENT_SHOWN$4 = `shown${EVENT_KEY$4}`;\nconst EVENT_RESIZE$1 = `resize${EVENT_KEY$4}`;\nconst EVENT_CLICK_DISMISS = `click.dismiss${EVENT_KEY$4}`;\nconst EVENT_MOUSEDOWN_DISMISS = `mousedown.dismiss${EVENT_KEY$4}`;\nconst EVENT_KEYDOWN_DISMISS$1 = `keydown.dismiss${EVENT_KEY$4}`;\nconst EVENT_CLICK_DATA_API$2 = `click${EVENT_KEY$4}${DATA_API_KEY$2}`;\nconst CLASS_NAME_OPEN = 'modal-open';\nconst CLASS_NAME_FADE$3 = 'fade';\nconst CLASS_NAME_SHOW$4 = 'show';\nconst CLASS_NAME_STATIC = 'modal-static';\nconst OPEN_SELECTOR$1 = '.modal.show';\nconst SELECTOR_DIALOG = '.modal-dialog';\nconst SELECTOR_MODAL_BODY = '.modal-body';\nconst SELECTOR_DATA_TOGGLE$2 = '[data-bs-toggle=\"modal\"]';\nconst Default$6 = {\n backdrop: true,\n focus: true,\n keyboard: true\n};\nconst DefaultType$6 = {\n backdrop: '(boolean|string)',\n focus: 'boolean',\n keyboard: 'boolean'\n};\n\n/**\n * Class definition\n */\n\nclass Modal extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._dialog = SelectorEngine.findOne(SELECTOR_DIALOG, this._element);\n this._backdrop = this._initializeBackDrop();\n this._focustrap = this._initializeFocusTrap();\n this._isShown = false;\n this._isTransitioning = false;\n this._scrollBar = new ScrollBarHelper();\n this._addEventListeners();\n }\n\n // Getters\n static get Default() {\n return Default$6;\n }\n static get DefaultType() {\n return DefaultType$6;\n }\n static get NAME() {\n return NAME$7;\n }\n\n // Public\n toggle(relatedTarget) {\n return this._isShown ? this.hide() : this.show(relatedTarget);\n }\n show(relatedTarget) {\n if (this._isShown || this._isTransitioning) {\n return;\n }\n const showEvent = EventHandler.trigger(this._element, EVENT_SHOW$4, {\n relatedTarget\n });\n if (showEvent.defaultPrevented) {\n return;\n }\n this._isShown = true;\n this._isTransitioning = true;\n this._scrollBar.hide();\n document.body.classList.add(CLASS_NAME_OPEN);\n this._adjustDialog();\n this._backdrop.show(() => this._showElement(relatedTarget));\n }\n hide() {\n if (!this._isShown || this._isTransitioning) {\n return;\n }\n const hideEvent = EventHandler.trigger(this._element, EVENT_HIDE$4);\n if (hideEvent.defaultPrevented) {\n return;\n }\n this._isShown = false;\n this._isTransitioning = true;\n this._focustrap.deactivate();\n this._element.classList.remove(CLASS_NAME_SHOW$4);\n this._queueCallback(() => this._hideModal(), this._element, this._isAnimated());\n }\n dispose() {\n EventHandler.off(window, EVENT_KEY$4);\n EventHandler.off(this._dialog, EVENT_KEY$4);\n this._backdrop.dispose();\n this._focustrap.deactivate();\n super.dispose();\n }\n handleUpdate() {\n this._adjustDialog();\n }\n\n // Private\n _initializeBackDrop() {\n return new Backdrop({\n isVisible: Boolean(this._config.backdrop),\n // 'static' option will be translated to true, and booleans will keep their value,\n isAnimated: this._isAnimated()\n });\n }\n _initializeFocusTrap() {\n return new FocusTrap({\n trapElement: this._element\n });\n }\n _showElement(relatedTarget) {\n // try to append dynamic modal\n if (!document.body.contains(this._element)) {\n document.body.append(this._element);\n }\n this._element.style.display = 'block';\n this._element.removeAttribute('aria-hidden');\n this._element.setAttribute('aria-modal', true);\n this._element.setAttribute('role', 'dialog');\n this._element.scrollTop = 0;\n const modalBody = SelectorEngine.findOne(SELECTOR_MODAL_BODY, this._dialog);\n if (modalBody) {\n modalBody.scrollTop = 0;\n }\n reflow(this._element);\n this._element.classList.add(CLASS_NAME_SHOW$4);\n const transitionComplete = () => {\n if (this._config.focus) {\n this._focustrap.activate();\n }\n this._isTransitioning = false;\n EventHandler.trigger(this._element, EVENT_SHOWN$4, {\n relatedTarget\n });\n };\n this._queueCallback(transitionComplete, this._dialog, this._isAnimated());\n }\n _addEventListeners() {\n EventHandler.on(this._element, EVENT_KEYDOWN_DISMISS$1, event => {\n if (event.key !== ESCAPE_KEY$1) {\n return;\n }\n if (this._config.keyboard) {\n this.hide();\n return;\n }\n this._triggerBackdropTransition();\n });\n EventHandler.on(window, EVENT_RESIZE$1, () => {\n if (this._isShown && !this._isTransitioning) {\n this._adjustDialog();\n }\n });\n EventHandler.on(this._element, EVENT_MOUSEDOWN_DISMISS, event => {\n // a bad trick to segregate clicks that may start inside dialog but end outside, and avoid listen to scrollbar clicks\n EventHandler.one(this._element, EVENT_CLICK_DISMISS, event2 => {\n if (this._element !== event.target || this._element !== event2.target) {\n return;\n }\n if (this._config.backdrop === 'static') {\n this._triggerBackdropTransition();\n return;\n }\n if (this._config.backdrop) {\n this.hide();\n }\n });\n });\n }\n _hideModal() {\n this._element.style.display = 'none';\n this._element.setAttribute('aria-hidden', true);\n this._element.removeAttribute('aria-modal');\n this._element.removeAttribute('role');\n this._isTransitioning = false;\n this._backdrop.hide(() => {\n document.body.classList.remove(CLASS_NAME_OPEN);\n this._resetAdjustments();\n this._scrollBar.reset();\n EventHandler.trigger(this._element, EVENT_HIDDEN$4);\n });\n }\n _isAnimated() {\n return this._element.classList.contains(CLASS_NAME_FADE$3);\n }\n _triggerBackdropTransition() {\n const hideEvent = EventHandler.trigger(this._element, EVENT_HIDE_PREVENTED$1);\n if (hideEvent.defaultPrevented) {\n return;\n }\n const isModalOverflowing = this._element.scrollHeight > document.documentElement.clientHeight;\n const initialOverflowY = this._element.style.overflowY;\n // return if the following background transition hasn't yet completed\n if (initialOverflowY === 'hidden' || this._element.classList.contains(CLASS_NAME_STATIC)) {\n return;\n }\n if (!isModalOverflowing) {\n this._element.style.overflowY = 'hidden';\n }\n this._element.classList.add(CLASS_NAME_STATIC);\n this._queueCallback(() => {\n this._element.classList.remove(CLASS_NAME_STATIC);\n this._queueCallback(() => {\n this._element.style.overflowY = initialOverflowY;\n }, this._dialog);\n }, this._dialog);\n this._element.focus();\n }\n\n /**\n * The following methods are used to handle overflowing modals\n */\n\n _adjustDialog() {\n const isModalOverflowing = this._element.scrollHeight > document.documentElement.clientHeight;\n const scrollbarWidth = this._scrollBar.getWidth();\n const isBodyOverflowing = scrollbarWidth > 0;\n if (isBodyOverflowing && !isModalOverflowing) {\n const property = isRTL() ? 'paddingLeft' : 'paddingRight';\n this._element.style[property] = `${scrollbarWidth}px`;\n }\n if (!isBodyOverflowing && isModalOverflowing) {\n const property = isRTL() ? 'paddingRight' : 'paddingLeft';\n this._element.style[property] = `${scrollbarWidth}px`;\n }\n }\n _resetAdjustments() {\n this._element.style.paddingLeft = '';\n this._element.style.paddingRight = '';\n }\n\n // Static\n static jQueryInterface(config, relatedTarget) {\n return this.each(function () {\n const data = Modal.getOrCreateInstance(this, config);\n if (typeof config !== 'string') {\n return;\n }\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config](relatedTarget);\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$2, SELECTOR_DATA_TOGGLE$2, function (event) {\n const target = SelectorEngine.getElementFromSelector(this);\n if (['A', 'AREA'].includes(this.tagName)) {\n event.preventDefault();\n }\n EventHandler.one(target, EVENT_SHOW$4, showEvent => {\n if (showEvent.defaultPrevented) {\n // only register focus restorer if modal will actually get shown\n return;\n }\n EventHandler.one(target, EVENT_HIDDEN$4, () => {\n if (isVisible(this)) {\n this.focus();\n }\n });\n });\n\n // avoid conflict when clicking modal toggler while another one is open\n const alreadyOpen = SelectorEngine.findOne(OPEN_SELECTOR$1);\n if (alreadyOpen) {\n Modal.getInstance(alreadyOpen).hide();\n }\n const data = Modal.getOrCreateInstance(target);\n data.toggle(this);\n});\nenableDismissTrigger(Modal);\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Modal);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap offcanvas.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$6 = 'offcanvas';\nconst DATA_KEY$3 = 'bs.offcanvas';\nconst EVENT_KEY$3 = `.${DATA_KEY$3}`;\nconst DATA_API_KEY$1 = '.data-api';\nconst EVENT_LOAD_DATA_API$2 = `load${EVENT_KEY$3}${DATA_API_KEY$1}`;\nconst ESCAPE_KEY = 'Escape';\nconst CLASS_NAME_SHOW$3 = 'show';\nconst CLASS_NAME_SHOWING$1 = 'showing';\nconst CLASS_NAME_HIDING = 'hiding';\nconst CLASS_NAME_BACKDROP = 'offcanvas-backdrop';\nconst OPEN_SELECTOR = '.offcanvas.show';\nconst EVENT_SHOW$3 = `show${EVENT_KEY$3}`;\nconst EVENT_SHOWN$3 = `shown${EVENT_KEY$3}`;\nconst EVENT_HIDE$3 = `hide${EVENT_KEY$3}`;\nconst EVENT_HIDE_PREVENTED = `hidePrevented${EVENT_KEY$3}`;\nconst EVENT_HIDDEN$3 = `hidden${EVENT_KEY$3}`;\nconst EVENT_RESIZE = `resize${EVENT_KEY$3}`;\nconst EVENT_CLICK_DATA_API$1 = `click${EVENT_KEY$3}${DATA_API_KEY$1}`;\nconst EVENT_KEYDOWN_DISMISS = `keydown.dismiss${EVENT_KEY$3}`;\nconst SELECTOR_DATA_TOGGLE$1 = '[data-bs-toggle=\"offcanvas\"]';\nconst Default$5 = {\n backdrop: true,\n keyboard: true,\n scroll: false\n};\nconst DefaultType$5 = {\n backdrop: '(boolean|string)',\n keyboard: 'boolean',\n scroll: 'boolean'\n};\n\n/**\n * Class definition\n */\n\nclass Offcanvas extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._isShown = false;\n this._backdrop = this._initializeBackDrop();\n this._focustrap = this._initializeFocusTrap();\n this._addEventListeners();\n }\n\n // Getters\n static get Default() {\n return Default$5;\n }\n static get DefaultType() {\n return DefaultType$5;\n }\n static get NAME() {\n return NAME$6;\n }\n\n // Public\n toggle(relatedTarget) {\n return this._isShown ? this.hide() : this.show(relatedTarget);\n }\n show(relatedTarget) {\n if (this._isShown) {\n return;\n }\n const showEvent = EventHandler.trigger(this._element, EVENT_SHOW$3, {\n relatedTarget\n });\n if (showEvent.defaultPrevented) {\n return;\n }\n this._isShown = true;\n this._backdrop.show();\n if (!this._config.scroll) {\n new ScrollBarHelper().hide();\n }\n this._element.setAttribute('aria-modal', true);\n this._element.setAttribute('role', 'dialog');\n this._element.classList.add(CLASS_NAME_SHOWING$1);\n const completeCallBack = () => {\n if (!this._config.scroll || this._config.backdrop) {\n this._focustrap.activate();\n }\n this._element.classList.add(CLASS_NAME_SHOW$3);\n this._element.classList.remove(CLASS_NAME_SHOWING$1);\n EventHandler.trigger(this._element, EVENT_SHOWN$3, {\n relatedTarget\n });\n };\n this._queueCallback(completeCallBack, this._element, true);\n }\n hide() {\n if (!this._isShown) {\n return;\n }\n const hideEvent = EventHandler.trigger(this._element, EVENT_HIDE$3);\n if (hideEvent.defaultPrevented) {\n return;\n }\n this._focustrap.deactivate();\n this._element.blur();\n this._isShown = false;\n this._element.classList.add(CLASS_NAME_HIDING);\n this._backdrop.hide();\n const completeCallback = () => {\n this._element.classList.remove(CLASS_NAME_SHOW$3, CLASS_NAME_HIDING);\n this._element.removeAttribute('aria-modal');\n this._element.removeAttribute('role');\n if (!this._config.scroll) {\n new ScrollBarHelper().reset();\n }\n EventHandler.trigger(this._element, EVENT_HIDDEN$3);\n };\n this._queueCallback(completeCallback, this._element, true);\n }\n dispose() {\n this._backdrop.dispose();\n this._focustrap.deactivate();\n super.dispose();\n }\n\n // Private\n _initializeBackDrop() {\n const clickCallback = () => {\n if (this._config.backdrop === 'static') {\n EventHandler.trigger(this._element, EVENT_HIDE_PREVENTED);\n return;\n }\n this.hide();\n };\n\n // 'static' option will be translated to true, and booleans will keep their value\n const isVisible = Boolean(this._config.backdrop);\n return new Backdrop({\n className: CLASS_NAME_BACKDROP,\n isVisible,\n isAnimated: true,\n rootElement: this._element.parentNode,\n clickCallback: isVisible ? clickCallback : null\n });\n }\n _initializeFocusTrap() {\n return new FocusTrap({\n trapElement: this._element\n });\n }\n _addEventListeners() {\n EventHandler.on(this._element, EVENT_KEYDOWN_DISMISS, event => {\n if (event.key !== ESCAPE_KEY) {\n return;\n }\n if (this._config.keyboard) {\n this.hide();\n return;\n }\n EventHandler.trigger(this._element, EVENT_HIDE_PREVENTED);\n });\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Offcanvas.getOrCreateInstance(this, config);\n if (typeof config !== 'string') {\n return;\n }\n if (data[config] === undefined || config.startsWith('_') || config === 'constructor') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config](this);\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$1, SELECTOR_DATA_TOGGLE$1, function (event) {\n const target = SelectorEngine.getElementFromSelector(this);\n if (['A', 'AREA'].includes(this.tagName)) {\n event.preventDefault();\n }\n if (isDisabled(this)) {\n return;\n }\n EventHandler.one(target, EVENT_HIDDEN$3, () => {\n // focus on trigger when it is closed\n if (isVisible(this)) {\n this.focus();\n }\n });\n\n // avoid conflict when clicking a toggler of an offcanvas, while another is open\n const alreadyOpen = SelectorEngine.findOne(OPEN_SELECTOR);\n if (alreadyOpen && alreadyOpen !== target) {\n Offcanvas.getInstance(alreadyOpen).hide();\n }\n const data = Offcanvas.getOrCreateInstance(target);\n data.toggle(this);\n});\nEventHandler.on(window, EVENT_LOAD_DATA_API$2, () => {\n for (const selector of SelectorEngine.find(OPEN_SELECTOR)) {\n Offcanvas.getOrCreateInstance(selector).show();\n }\n});\nEventHandler.on(window, EVENT_RESIZE, () => {\n for (const element of SelectorEngine.find('[aria-modal][class*=show][class*=offcanvas-]')) {\n if (getComputedStyle(element).position !== 'fixed') {\n Offcanvas.getOrCreateInstance(element).hide();\n }\n }\n});\nenableDismissTrigger(Offcanvas);\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Offcanvas);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/sanitizer.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n// js-docs-start allow-list\nconst ARIA_ATTRIBUTE_PATTERN = /^aria-[\\w-]*$/i;\nconst DefaultAllowlist = {\n // Global attributes allowed on any supplied element below.\n '*': ['class', 'dir', 'id', 'lang', 'role', ARIA_ATTRIBUTE_PATTERN],\n a: ['target', 'href', 'title', 'rel'],\n area: [],\n b: [],\n br: [],\n col: [],\n code: [],\n dd: [],\n div: [],\n dl: [],\n dt: [],\n em: [],\n hr: [],\n h1: [],\n h2: [],\n h3: [],\n h4: [],\n h5: [],\n h6: [],\n i: [],\n img: ['src', 'srcset', 'alt', 'title', 'width', 'height'],\n li: [],\n ol: [],\n p: [],\n pre: [],\n s: [],\n small: [],\n span: [],\n sub: [],\n sup: [],\n strong: [],\n u: [],\n ul: []\n};\n// js-docs-end allow-list\n\nconst uriAttributes = new Set(['background', 'cite', 'href', 'itemtype', 'longdesc', 'poster', 'src', 'xlink:href']);\n\n/**\n * A pattern that recognizes URLs that are safe wrt. XSS in URL navigation\n * contexts.\n *\n * Shout-out to Angular https://github.com/angular/angular/blob/15.2.8/packages/core/src/sanitization/url_sanitizer.ts#L38\n */\n// eslint-disable-next-line unicorn/better-regex\nconst SAFE_URL_PATTERN = /^(?!javascript:)(?:[a-z0-9+.-]+:|[^&:/?#]*(?:[/?#]|$))/i;\nconst allowedAttribute = (attribute, allowedAttributeList) => {\n const attributeName = attribute.nodeName.toLowerCase();\n if (allowedAttributeList.includes(attributeName)) {\n if (uriAttributes.has(attributeName)) {\n return Boolean(SAFE_URL_PATTERN.test(attribute.nodeValue));\n }\n return true;\n }\n\n // Check if a regular expression validates the attribute.\n return allowedAttributeList.filter(attributeRegex => attributeRegex instanceof RegExp).some(regex => regex.test(attributeName));\n};\nfunction sanitizeHtml(unsafeHtml, allowList, sanitizeFunction) {\n if (!unsafeHtml.length) {\n return unsafeHtml;\n }\n if (sanitizeFunction && typeof sanitizeFunction === 'function') {\n return sanitizeFunction(unsafeHtml);\n }\n const domParser = new window.DOMParser();\n const createdDocument = domParser.parseFromString(unsafeHtml, 'text/html');\n const elements = [].concat(...createdDocument.body.querySelectorAll('*'));\n for (const element of elements) {\n const elementName = element.nodeName.toLowerCase();\n if (!Object.keys(allowList).includes(elementName)) {\n element.remove();\n continue;\n }\n const attributeList = [].concat(...element.attributes);\n const allowedAttributes = [].concat(allowList['*'] || [], allowList[elementName] || []);\n for (const attribute of attributeList) {\n if (!allowedAttribute(attribute, allowedAttributes)) {\n element.removeAttribute(attribute.nodeName);\n }\n }\n }\n return createdDocument.body.innerHTML;\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/template-factory.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$5 = 'TemplateFactory';\nconst Default$4 = {\n allowList: DefaultAllowlist,\n content: {},\n // { selector : text , selector2 : text2 , }\n extraClass: '',\n html: false,\n sanitize: true,\n sanitizeFn: null,\n template: '
'\n};\nconst DefaultType$4 = {\n allowList: 'object',\n content: 'object',\n extraClass: '(string|function)',\n html: 'boolean',\n sanitize: 'boolean',\n sanitizeFn: '(null|function)',\n template: 'string'\n};\nconst DefaultContentType = {\n entry: '(string|element|function|null)',\n selector: '(string|element)'\n};\n\n/**\n * Class definition\n */\n\nclass TemplateFactory extends Config {\n constructor(config) {\n super();\n this._config = this._getConfig(config);\n }\n\n // Getters\n static get Default() {\n return Default$4;\n }\n static get DefaultType() {\n return DefaultType$4;\n }\n static get NAME() {\n return NAME$5;\n }\n\n // Public\n getContent() {\n return Object.values(this._config.content).map(config => this._resolvePossibleFunction(config)).filter(Boolean);\n }\n hasContent() {\n return this.getContent().length > 0;\n }\n changeContent(content) {\n this._checkContent(content);\n this._config.content = {\n ...this._config.content,\n ...content\n };\n return this;\n }\n toHtml() {\n const templateWrapper = document.createElement('div');\n templateWrapper.innerHTML = this._maybeSanitize(this._config.template);\n for (const [selector, text] of Object.entries(this._config.content)) {\n this._setContent(templateWrapper, text, selector);\n }\n const template = templateWrapper.children[0];\n const extraClass = this._resolvePossibleFunction(this._config.extraClass);\n if (extraClass) {\n template.classList.add(...extraClass.split(' '));\n }\n return template;\n }\n\n // Private\n _typeCheckConfig(config) {\n super._typeCheckConfig(config);\n this._checkContent(config.content);\n }\n _checkContent(arg) {\n for (const [selector, content] of Object.entries(arg)) {\n super._typeCheckConfig({\n selector,\n entry: content\n }, DefaultContentType);\n }\n }\n _setContent(template, content, selector) {\n const templateElement = SelectorEngine.findOne(selector, template);\n if (!templateElement) {\n return;\n }\n content = this._resolvePossibleFunction(content);\n if (!content) {\n templateElement.remove();\n return;\n }\n if (isElement(content)) {\n this._putElementInTemplate(getElement(content), templateElement);\n return;\n }\n if (this._config.html) {\n templateElement.innerHTML = this._maybeSanitize(content);\n return;\n }\n templateElement.textContent = content;\n }\n _maybeSanitize(arg) {\n return this._config.sanitize ? sanitizeHtml(arg, this._config.allowList, this._config.sanitizeFn) : arg;\n }\n _resolvePossibleFunction(arg) {\n return execute(arg, [this]);\n }\n _putElementInTemplate(element, templateElement) {\n if (this._config.html) {\n templateElement.innerHTML = '';\n templateElement.append(element);\n return;\n }\n templateElement.textContent = element.textContent;\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap tooltip.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$4 = 'tooltip';\nconst DISALLOWED_ATTRIBUTES = new Set(['sanitize', 'allowList', 'sanitizeFn']);\nconst CLASS_NAME_FADE$2 = 'fade';\nconst CLASS_NAME_MODAL = 'modal';\nconst CLASS_NAME_SHOW$2 = 'show';\nconst SELECTOR_TOOLTIP_INNER = '.tooltip-inner';\nconst SELECTOR_MODAL = `.${CLASS_NAME_MODAL}`;\nconst EVENT_MODAL_HIDE = 'hide.bs.modal';\nconst TRIGGER_HOVER = 'hover';\nconst TRIGGER_FOCUS = 'focus';\nconst TRIGGER_CLICK = 'click';\nconst TRIGGER_MANUAL = 'manual';\nconst EVENT_HIDE$2 = 'hide';\nconst EVENT_HIDDEN$2 = 'hidden';\nconst EVENT_SHOW$2 = 'show';\nconst EVENT_SHOWN$2 = 'shown';\nconst EVENT_INSERTED = 'inserted';\nconst EVENT_CLICK$1 = 'click';\nconst EVENT_FOCUSIN$1 = 'focusin';\nconst EVENT_FOCUSOUT$1 = 'focusout';\nconst EVENT_MOUSEENTER = 'mouseenter';\nconst EVENT_MOUSELEAVE = 'mouseleave';\nconst AttachmentMap = {\n AUTO: 'auto',\n TOP: 'top',\n RIGHT: isRTL() ? 'left' : 'right',\n BOTTOM: 'bottom',\n LEFT: isRTL() ? 'right' : 'left'\n};\nconst Default$3 = {\n allowList: DefaultAllowlist,\n animation: true,\n boundary: 'clippingParents',\n container: false,\n customClass: '',\n delay: 0,\n fallbackPlacements: ['top', 'right', 'bottom', 'left'],\n html: false,\n offset: [0, 6],\n placement: 'top',\n popperConfig: null,\n sanitize: true,\n sanitizeFn: null,\n selector: false,\n template: '
' + '
' + '
' + '
',\n title: '',\n trigger: 'hover focus'\n};\nconst DefaultType$3 = {\n allowList: 'object',\n animation: 'boolean',\n boundary: '(string|element)',\n container: '(string|element|boolean)',\n customClass: '(string|function)',\n delay: '(number|object)',\n fallbackPlacements: 'array',\n html: 'boolean',\n offset: '(array|string|function)',\n placement: '(string|function)',\n popperConfig: '(null|object|function)',\n sanitize: 'boolean',\n sanitizeFn: '(null|function)',\n selector: '(string|boolean)',\n template: 'string',\n title: '(string|element|function)',\n trigger: 'string'\n};\n\n/**\n * Class definition\n */\n\nclass Tooltip extends BaseComponent {\n constructor(element, config) {\n if (typeof Popper === 'undefined') {\n throw new TypeError('Bootstrap\\'s tooltips require Popper (https://popper.js.org)');\n }\n super(element, config);\n\n // Private\n this._isEnabled = true;\n this._timeout = 0;\n this._isHovered = null;\n this._activeTrigger = {};\n this._popper = null;\n this._templateFactory = null;\n this._newContent = null;\n\n // Protected\n this.tip = null;\n this._setListeners();\n if (!this._config.selector) {\n this._fixTitle();\n }\n }\n\n // Getters\n static get Default() {\n return Default$3;\n }\n static get DefaultType() {\n return DefaultType$3;\n }\n static get NAME() {\n return NAME$4;\n }\n\n // Public\n enable() {\n this._isEnabled = true;\n }\n disable() {\n this._isEnabled = false;\n }\n toggleEnabled() {\n this._isEnabled = !this._isEnabled;\n }\n toggle() {\n if (!this._isEnabled) {\n return;\n }\n this._activeTrigger.click = !this._activeTrigger.click;\n if (this._isShown()) {\n this._leave();\n return;\n }\n this._enter();\n }\n dispose() {\n clearTimeout(this._timeout);\n EventHandler.off(this._element.closest(SELECTOR_MODAL), EVENT_MODAL_HIDE, this._hideModalHandler);\n if (this._element.getAttribute('data-bs-original-title')) {\n this._element.setAttribute('title', this._element.getAttribute('data-bs-original-title'));\n }\n this._disposePopper();\n super.dispose();\n }\n show() {\n if (this._element.style.display === 'none') {\n throw new Error('Please use show on visible elements');\n }\n if (!(this._isWithContent() && this._isEnabled)) {\n return;\n }\n const showEvent = EventHandler.trigger(this._element, this.constructor.eventName(EVENT_SHOW$2));\n const shadowRoot = findShadowRoot(this._element);\n const isInTheDom = (shadowRoot || this._element.ownerDocument.documentElement).contains(this._element);\n if (showEvent.defaultPrevented || !isInTheDom) {\n return;\n }\n\n // TODO: v6 remove this or make it optional\n this._disposePopper();\n const tip = this._getTipElement();\n this._element.setAttribute('aria-describedby', tip.getAttribute('id'));\n const {\n container\n } = this._config;\n if (!this._element.ownerDocument.documentElement.contains(this.tip)) {\n container.append(tip);\n EventHandler.trigger(this._element, this.constructor.eventName(EVENT_INSERTED));\n }\n this._popper = this._createPopper(tip);\n tip.classList.add(CLASS_NAME_SHOW$2);\n\n // If this is a touch-enabled device we add extra\n // empty mouseover listeners to the body's immediate children;\n // only needed because of broken event delegation on iOS\n // https://www.quirksmode.org/blog/archives/2014/02/mouse_event_bub.html\n if ('ontouchstart' in document.documentElement) {\n for (const element of [].concat(...document.body.children)) {\n EventHandler.on(element, 'mouseover', noop);\n }\n }\n const complete = () => {\n EventHandler.trigger(this._element, this.constructor.eventName(EVENT_SHOWN$2));\n if (this._isHovered === false) {\n this._leave();\n }\n this._isHovered = false;\n };\n this._queueCallback(complete, this.tip, this._isAnimated());\n }\n hide() {\n if (!this._isShown()) {\n return;\n }\n const hideEvent = EventHandler.trigger(this._element, this.constructor.eventName(EVENT_HIDE$2));\n if (hideEvent.defaultPrevented) {\n return;\n }\n const tip = this._getTipElement();\n tip.classList.remove(CLASS_NAME_SHOW$2);\n\n // If this is a touch-enabled device we remove the extra\n // empty mouseover listeners we added for iOS support\n if ('ontouchstart' in document.documentElement) {\n for (const element of [].concat(...document.body.children)) {\n EventHandler.off(element, 'mouseover', noop);\n }\n }\n this._activeTrigger[TRIGGER_CLICK] = false;\n this._activeTrigger[TRIGGER_FOCUS] = false;\n this._activeTrigger[TRIGGER_HOVER] = false;\n this._isHovered = null; // it is a trick to support manual triggering\n\n const complete = () => {\n if (this._isWithActiveTrigger()) {\n return;\n }\n if (!this._isHovered) {\n this._disposePopper();\n }\n this._element.removeAttribute('aria-describedby');\n EventHandler.trigger(this._element, this.constructor.eventName(EVENT_HIDDEN$2));\n };\n this._queueCallback(complete, this.tip, this._isAnimated());\n }\n update() {\n if (this._popper) {\n this._popper.update();\n }\n }\n\n // Protected\n _isWithContent() {\n return Boolean(this._getTitle());\n }\n _getTipElement() {\n if (!this.tip) {\n this.tip = this._createTipElement(this._newContent || this._getContentForTemplate());\n }\n return this.tip;\n }\n _createTipElement(content) {\n const tip = this._getTemplateFactory(content).toHtml();\n\n // TODO: remove this check in v6\n if (!tip) {\n return null;\n }\n tip.classList.remove(CLASS_NAME_FADE$2, CLASS_NAME_SHOW$2);\n // TODO: v6 the following can be achieved with CSS only\n tip.classList.add(`bs-${this.constructor.NAME}-auto`);\n const tipId = getUID(this.constructor.NAME).toString();\n tip.setAttribute('id', tipId);\n if (this._isAnimated()) {\n tip.classList.add(CLASS_NAME_FADE$2);\n }\n return tip;\n }\n setContent(content) {\n this._newContent = content;\n if (this._isShown()) {\n this._disposePopper();\n this.show();\n }\n }\n _getTemplateFactory(content) {\n if (this._templateFactory) {\n this._templateFactory.changeContent(content);\n } else {\n this._templateFactory = new TemplateFactory({\n ...this._config,\n // the `content` var has to be after `this._config`\n // to override config.content in case of popover\n content,\n extraClass: this._resolvePossibleFunction(this._config.customClass)\n });\n }\n return this._templateFactory;\n }\n _getContentForTemplate() {\n return {\n [SELECTOR_TOOLTIP_INNER]: this._getTitle()\n };\n }\n _getTitle() {\n return this._resolvePossibleFunction(this._config.title) || this._element.getAttribute('data-bs-original-title');\n }\n\n // Private\n _initializeOnDelegatedTarget(event) {\n return this.constructor.getOrCreateInstance(event.delegateTarget, this._getDelegateConfig());\n }\n _isAnimated() {\n return this._config.animation || this.tip && this.tip.classList.contains(CLASS_NAME_FADE$2);\n }\n _isShown() {\n return this.tip && this.tip.classList.contains(CLASS_NAME_SHOW$2);\n }\n _createPopper(tip) {\n const placement = execute(this._config.placement, [this, tip, this._element]);\n const attachment = AttachmentMap[placement.toUpperCase()];\n return Popper.createPopper(this._element, tip, this._getPopperConfig(attachment));\n }\n _getOffset() {\n const {\n offset\n } = this._config;\n if (typeof offset === 'string') {\n return offset.split(',').map(value => Number.parseInt(value, 10));\n }\n if (typeof offset === 'function') {\n return popperData => offset(popperData, this._element);\n }\n return offset;\n }\n _resolvePossibleFunction(arg) {\n return execute(arg, [this._element]);\n }\n _getPopperConfig(attachment) {\n const defaultBsPopperConfig = {\n placement: attachment,\n modifiers: [{\n name: 'flip',\n options: {\n fallbackPlacements: this._config.fallbackPlacements\n }\n }, {\n name: 'offset',\n options: {\n offset: this._getOffset()\n }\n }, {\n name: 'preventOverflow',\n options: {\n boundary: this._config.boundary\n }\n }, {\n name: 'arrow',\n options: {\n element: `.${this.constructor.NAME}-arrow`\n }\n }, {\n name: 'preSetPlacement',\n enabled: true,\n phase: 'beforeMain',\n fn: data => {\n // Pre-set Popper's placement attribute in order to read the arrow sizes properly.\n // Otherwise, Popper mixes up the width and height dimensions since the initial arrow style is for top placement\n this._getTipElement().setAttribute('data-popper-placement', data.state.placement);\n }\n }]\n };\n return {\n ...defaultBsPopperConfig,\n ...execute(this._config.popperConfig, [defaultBsPopperConfig])\n };\n }\n _setListeners() {\n const triggers = this._config.trigger.split(' ');\n for (const trigger of triggers) {\n if (trigger === 'click') {\n EventHandler.on(this._element, this.constructor.eventName(EVENT_CLICK$1), this._config.selector, event => {\n const context = this._initializeOnDelegatedTarget(event);\n context.toggle();\n });\n } else if (trigger !== TRIGGER_MANUAL) {\n const eventIn = trigger === TRIGGER_HOVER ? this.constructor.eventName(EVENT_MOUSEENTER) : this.constructor.eventName(EVENT_FOCUSIN$1);\n const eventOut = trigger === TRIGGER_HOVER ? this.constructor.eventName(EVENT_MOUSELEAVE) : this.constructor.eventName(EVENT_FOCUSOUT$1);\n EventHandler.on(this._element, eventIn, this._config.selector, event => {\n const context = this._initializeOnDelegatedTarget(event);\n context._activeTrigger[event.type === 'focusin' ? TRIGGER_FOCUS : TRIGGER_HOVER] = true;\n context._enter();\n });\n EventHandler.on(this._element, eventOut, this._config.selector, event => {\n const context = this._initializeOnDelegatedTarget(event);\n context._activeTrigger[event.type === 'focusout' ? TRIGGER_FOCUS : TRIGGER_HOVER] = context._element.contains(event.relatedTarget);\n context._leave();\n });\n }\n }\n this._hideModalHandler = () => {\n if (this._element) {\n this.hide();\n }\n };\n EventHandler.on(this._element.closest(SELECTOR_MODAL), EVENT_MODAL_HIDE, this._hideModalHandler);\n }\n _fixTitle() {\n const title = this._element.getAttribute('title');\n if (!title) {\n return;\n }\n if (!this._element.getAttribute('aria-label') && !this._element.textContent.trim()) {\n this._element.setAttribute('aria-label', title);\n }\n this._element.setAttribute('data-bs-original-title', title); // DO NOT USE IT. Is only for backwards compatibility\n this._element.removeAttribute('title');\n }\n _enter() {\n if (this._isShown() || this._isHovered) {\n this._isHovered = true;\n return;\n }\n this._isHovered = true;\n this._setTimeout(() => {\n if (this._isHovered) {\n this.show();\n }\n }, this._config.delay.show);\n }\n _leave() {\n if (this._isWithActiveTrigger()) {\n return;\n }\n this._isHovered = false;\n this._setTimeout(() => {\n if (!this._isHovered) {\n this.hide();\n }\n }, this._config.delay.hide);\n }\n _setTimeout(handler, timeout) {\n clearTimeout(this._timeout);\n this._timeout = setTimeout(handler, timeout);\n }\n _isWithActiveTrigger() {\n return Object.values(this._activeTrigger).includes(true);\n }\n _getConfig(config) {\n const dataAttributes = Manipulator.getDataAttributes(this._element);\n for (const dataAttribute of Object.keys(dataAttributes)) {\n if (DISALLOWED_ATTRIBUTES.has(dataAttribute)) {\n delete dataAttributes[dataAttribute];\n }\n }\n config = {\n ...dataAttributes,\n ...(typeof config === 'object' && config ? config : {})\n };\n config = this._mergeConfigObj(config);\n config = this._configAfterMerge(config);\n this._typeCheckConfig(config);\n return config;\n }\n _configAfterMerge(config) {\n config.container = config.container === false ? document.body : getElement(config.container);\n if (typeof config.delay === 'number') {\n config.delay = {\n show: config.delay,\n hide: config.delay\n };\n }\n if (typeof config.title === 'number') {\n config.title = config.title.toString();\n }\n if (typeof config.content === 'number') {\n config.content = config.content.toString();\n }\n return config;\n }\n _getDelegateConfig() {\n const config = {};\n for (const [key, value] of Object.entries(this._config)) {\n if (this.constructor.Default[key] !== value) {\n config[key] = value;\n }\n }\n config.selector = false;\n config.trigger = 'manual';\n\n // In the future can be replaced with:\n // const keysWithDifferentValues = Object.entries(this._config).filter(entry => this.constructor.Default[entry[0]] !== this._config[entry[0]])\n // `Object.fromEntries(keysWithDifferentValues)`\n return config;\n }\n _disposePopper() {\n if (this._popper) {\n this._popper.destroy();\n this._popper = null;\n }\n if (this.tip) {\n this.tip.remove();\n this.tip = null;\n }\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Tooltip.getOrCreateInstance(this, config);\n if (typeof config !== 'string') {\n return;\n }\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config]();\n });\n }\n}\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Tooltip);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap popover.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$3 = 'popover';\nconst SELECTOR_TITLE = '.popover-header';\nconst SELECTOR_CONTENT = '.popover-body';\nconst Default$2 = {\n ...Tooltip.Default,\n content: '',\n offset: [0, 8],\n placement: 'right',\n template: '
' + '
' + '

' + '
' + '
',\n trigger: 'click'\n};\nconst DefaultType$2 = {\n ...Tooltip.DefaultType,\n content: '(null|string|element|function)'\n};\n\n/**\n * Class definition\n */\n\nclass Popover extends Tooltip {\n // Getters\n static get Default() {\n return Default$2;\n }\n static get DefaultType() {\n return DefaultType$2;\n }\n static get NAME() {\n return NAME$3;\n }\n\n // Overrides\n _isWithContent() {\n return this._getTitle() || this._getContent();\n }\n\n // Private\n _getContentForTemplate() {\n return {\n [SELECTOR_TITLE]: this._getTitle(),\n [SELECTOR_CONTENT]: this._getContent()\n };\n }\n _getContent() {\n return this._resolvePossibleFunction(this._config.content);\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Popover.getOrCreateInstance(this, config);\n if (typeof config !== 'string') {\n return;\n }\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config]();\n });\n }\n}\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Popover);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap scrollspy.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$2 = 'scrollspy';\nconst DATA_KEY$2 = 'bs.scrollspy';\nconst EVENT_KEY$2 = `.${DATA_KEY$2}`;\nconst DATA_API_KEY = '.data-api';\nconst EVENT_ACTIVATE = `activate${EVENT_KEY$2}`;\nconst EVENT_CLICK = `click${EVENT_KEY$2}`;\nconst EVENT_LOAD_DATA_API$1 = `load${EVENT_KEY$2}${DATA_API_KEY}`;\nconst CLASS_NAME_DROPDOWN_ITEM = 'dropdown-item';\nconst CLASS_NAME_ACTIVE$1 = 'active';\nconst SELECTOR_DATA_SPY = '[data-bs-spy=\"scroll\"]';\nconst SELECTOR_TARGET_LINKS = '[href]';\nconst SELECTOR_NAV_LIST_GROUP = '.nav, .list-group';\nconst SELECTOR_NAV_LINKS = '.nav-link';\nconst SELECTOR_NAV_ITEMS = '.nav-item';\nconst SELECTOR_LIST_ITEMS = '.list-group-item';\nconst SELECTOR_LINK_ITEMS = `${SELECTOR_NAV_LINKS}, ${SELECTOR_NAV_ITEMS} > ${SELECTOR_NAV_LINKS}, ${SELECTOR_LIST_ITEMS}`;\nconst SELECTOR_DROPDOWN = '.dropdown';\nconst SELECTOR_DROPDOWN_TOGGLE$1 = '.dropdown-toggle';\nconst Default$1 = {\n offset: null,\n // TODO: v6 @deprecated, keep it for backwards compatibility reasons\n rootMargin: '0px 0px -25%',\n smoothScroll: false,\n target: null,\n threshold: [0.1, 0.5, 1]\n};\nconst DefaultType$1 = {\n offset: '(number|null)',\n // TODO v6 @deprecated, keep it for backwards compatibility reasons\n rootMargin: 'string',\n smoothScroll: 'boolean',\n target: 'element',\n threshold: 'array'\n};\n\n/**\n * Class definition\n */\n\nclass ScrollSpy extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n\n // this._element is the observablesContainer and config.target the menu links wrapper\n this._targetLinks = new Map();\n this._observableSections = new Map();\n this._rootElement = getComputedStyle(this._element).overflowY === 'visible' ? null : this._element;\n this._activeTarget = null;\n this._observer = null;\n this._previousScrollData = {\n visibleEntryTop: 0,\n parentScrollTop: 0\n };\n this.refresh(); // initialize\n }\n\n // Getters\n static get Default() {\n return Default$1;\n }\n static get DefaultType() {\n return DefaultType$1;\n }\n static get NAME() {\n return NAME$2;\n }\n\n // Public\n refresh() {\n this._initializeTargetsAndObservables();\n this._maybeEnableSmoothScroll();\n if (this._observer) {\n this._observer.disconnect();\n } else {\n this._observer = this._getNewObserver();\n }\n for (const section of this._observableSections.values()) {\n this._observer.observe(section);\n }\n }\n dispose() {\n this._observer.disconnect();\n super.dispose();\n }\n\n // Private\n _configAfterMerge(config) {\n // TODO: on v6 target should be given explicitly & remove the {target: 'ss-target'} case\n config.target = getElement(config.target) || document.body;\n\n // TODO: v6 Only for backwards compatibility reasons. Use rootMargin only\n config.rootMargin = config.offset ? `${config.offset}px 0px -30%` : config.rootMargin;\n if (typeof config.threshold === 'string') {\n config.threshold = config.threshold.split(',').map(value => Number.parseFloat(value));\n }\n return config;\n }\n _maybeEnableSmoothScroll() {\n if (!this._config.smoothScroll) {\n return;\n }\n\n // unregister any previous listeners\n EventHandler.off(this._config.target, EVENT_CLICK);\n EventHandler.on(this._config.target, EVENT_CLICK, SELECTOR_TARGET_LINKS, event => {\n const observableSection = this._observableSections.get(event.target.hash);\n if (observableSection) {\n event.preventDefault();\n const root = this._rootElement || window;\n const height = observableSection.offsetTop - this._element.offsetTop;\n if (root.scrollTo) {\n root.scrollTo({\n top: height,\n behavior: 'smooth'\n });\n return;\n }\n\n // Chrome 60 doesn't support `scrollTo`\n root.scrollTop = height;\n }\n });\n }\n _getNewObserver() {\n const options = {\n root: this._rootElement,\n threshold: this._config.threshold,\n rootMargin: this._config.rootMargin\n };\n return new IntersectionObserver(entries => this._observerCallback(entries), options);\n }\n\n // The logic of selection\n _observerCallback(entries) {\n const targetElement = entry => this._targetLinks.get(`#${entry.target.id}`);\n const activate = entry => {\n this._previousScrollData.visibleEntryTop = entry.target.offsetTop;\n this._process(targetElement(entry));\n };\n const parentScrollTop = (this._rootElement || document.documentElement).scrollTop;\n const userScrollsDown = parentScrollTop >= this._previousScrollData.parentScrollTop;\n this._previousScrollData.parentScrollTop = parentScrollTop;\n for (const entry of entries) {\n if (!entry.isIntersecting) {\n this._activeTarget = null;\n this._clearActiveClass(targetElement(entry));\n continue;\n }\n const entryIsLowerThanPrevious = entry.target.offsetTop >= this._previousScrollData.visibleEntryTop;\n // if we are scrolling down, pick the bigger offsetTop\n if (userScrollsDown && entryIsLowerThanPrevious) {\n activate(entry);\n // if parent isn't scrolled, let's keep the first visible item, breaking the iteration\n if (!parentScrollTop) {\n return;\n }\n continue;\n }\n\n // if we are scrolling up, pick the smallest offsetTop\n if (!userScrollsDown && !entryIsLowerThanPrevious) {\n activate(entry);\n }\n }\n }\n _initializeTargetsAndObservables() {\n this._targetLinks = new Map();\n this._observableSections = new Map();\n const targetLinks = SelectorEngine.find(SELECTOR_TARGET_LINKS, this._config.target);\n for (const anchor of targetLinks) {\n // ensure that the anchor has an id and is not disabled\n if (!anchor.hash || isDisabled(anchor)) {\n continue;\n }\n const observableSection = SelectorEngine.findOne(decodeURI(anchor.hash), this._element);\n\n // ensure that the observableSection exists & is visible\n if (isVisible(observableSection)) {\n this._targetLinks.set(decodeURI(anchor.hash), anchor);\n this._observableSections.set(anchor.hash, observableSection);\n }\n }\n }\n _process(target) {\n if (this._activeTarget === target) {\n return;\n }\n this._clearActiveClass(this._config.target);\n this._activeTarget = target;\n target.classList.add(CLASS_NAME_ACTIVE$1);\n this._activateParents(target);\n EventHandler.trigger(this._element, EVENT_ACTIVATE, {\n relatedTarget: target\n });\n }\n _activateParents(target) {\n // Activate dropdown parents\n if (target.classList.contains(CLASS_NAME_DROPDOWN_ITEM)) {\n SelectorEngine.findOne(SELECTOR_DROPDOWN_TOGGLE$1, target.closest(SELECTOR_DROPDOWN)).classList.add(CLASS_NAME_ACTIVE$1);\n return;\n }\n for (const listGroup of SelectorEngine.parents(target, SELECTOR_NAV_LIST_GROUP)) {\n // Set triggered links parents as active\n // With both