From cb8a768b5004edd3ffa54cb64f54a39a3b66b943 Mon Sep 17 00:00:00 2001 From: DanSava Date: Thu, 31 Oct 2024 16:26:48 +0200 Subject: [PATCH] Add forward model output functionality --- docs/contribute/fm_ops_remover/parser.py | 8 +++-- src/everest_models/everest_hooks.py | 21 +++++++++++- .../jobs/fm_add_templates/parser.py | 7 ++-- .../jobs/fm_compute_economics/parser.py | 7 ++-- .../jobs/fm_drill_date_planner/parser.py | 6 ++-- .../jobs/fm_drill_planner/parser.py | 8 +++-- .../jobs/fm_extract_summary_data/parser.py | 5 +-- .../jobs/fm_interpret_well_drill/parser.py | 4 ++- src/everest_models/jobs/fm_npv/parser.py | 8 +++-- src/everest_models/jobs/fm_rf/parser.py | 11 +++++-- src/everest_models/jobs/fm_schmerge/parser.py | 6 ++-- .../jobs/fm_select_wells/parser.py | 6 ++-- src/everest_models/jobs/fm_stea/parser.py | 4 +-- .../jobs/fm_strip_dates/parser.py | 4 +-- .../jobs/fm_well_constraints/parser.py | 9 ++++-- .../jobs/fm_well_filter/parser.py | 8 +++-- .../jobs/fm_well_swapping/parser.py | 32 ++++++++++++------- .../jobs/fm_well_trajectory/parser.py | 6 ++-- src/everest_models/jobs/shared/arguments.py | 16 +++++++--- 19 files changed, 120 insertions(+), 56 deletions(-) diff --git a/docs/contribute/fm_ops_remover/parser.py b/docs/contribute/fm_ops_remover/parser.py index fed3670d..71150a0f 100644 --- a/docs/contribute/fm_ops_remover/parser.py +++ b/docs/contribute/fm_ops_remover/parser.py @@ -7,13 +7,15 @@ @bootstrap_parser -def build_argument_parser(): +def build_argument_parser(skip_type=False): parser, required_group = get_parser( description="Given everest generated wells.json file" "and a list of well names. remove the intersecting names' operations." ) - add_wells_input_argument(required_group, help="Everest generated wells.json file") - add_output_argument(required_group, help="Output File") + add_wells_input_argument( + required_group, help="Everest generated wells.json file", skip_type=skip_type + ) + add_output_argument(required_group, help="Output File", skip_type=skip_type) required_group.add_argument( "-w", "--wells", required=True, help="wells to modified.", nargs="+", type=str ) diff --git a/src/everest_models/everest_hooks.py b/src/everest_models/everest_hooks.py index c2d04a9e..9bffd79a 100644 --- a/src/everest_models/everest_hooks.py +++ b/src/everest_models/everest_hooks.py @@ -9,7 +9,7 @@ import pathlib import sys from importlib import import_module, resources -from typing import Any, Dict, List, Sequence, Type +from typing import Any, Dict, List, Sequence, Set, Type from pydantic import BaseModel @@ -129,3 +129,22 @@ def get_forward_model_documentations() -> Dict[str, Any]: "full_job_name": full_job_name, } return docs + + +@hookimpl +def custom_forward_model_outputs(forward_model_steps: List[str]) -> Set[str]: + forward_models = [step["name"] for step in get_forward_models()] + outputs = set() + for step in forward_model_steps: + step_name, *args = step.split() + if step_name in forward_models: + try: + parser = import_module( + f"{JOBS}.fm_{step_name}.parser" + ).build_argument_parser(skip_type=True) + options = parser.parse_args(args) + if options.output: + outputs.add(options.output) + except SystemExit: + pass + return outputs diff --git a/src/everest_models/jobs/fm_add_templates/parser.py b/src/everest_models/jobs/fm_add_templates/parser.py index c5668342..e45a1a21 100644 --- a/src/everest_models/jobs/fm_add_templates/parser.py +++ b/src/everest_models/jobs/fm_add_templates/parser.py @@ -16,7 +16,7 @@ @bootstrap_parser -def build_argument_parser(): +def build_argument_parser(skip_type=False): SchemaAction.register_models(SCHEMAS) parser, required_group = get_parser( description="Inserts template file paths for all well operations in the " @@ -29,11 +29,12 @@ def build_argument_parser(): required_group, schema=Wells, help="Input file that requires template paths. Json file expected ex: wells.json", + skip_type=skip_type, ) - add_output_argument(required_group, help="Output file") + add_output_argument(required_group, help="Output file", skip_type=skip_type) required_group.add_argument( *_CONFIG_ARGUMENT.split("/"), - type=partial(parse_file, schema=TemplateConfig), + type=partial(parse_file, schema=TemplateConfig) if not skip_type else str, required=True, help="Config file containing list of template file paths to be injected.", ) diff --git a/src/everest_models/jobs/fm_compute_economics/parser.py b/src/everest_models/jobs/fm_compute_economics/parser.py index 28aff29c..195818a8 100644 --- a/src/everest_models/jobs/fm_compute_economics/parser.py +++ b/src/everest_models/jobs/fm_compute_economics/parser.py @@ -18,7 +18,7 @@ @bootstrap_parser -def build_argument_parser(): +def build_argument_parser(skip_type=False): SchemaAction.register_models(SCHEMAS) parser, required_group = get_parser( description="Module to calculate economical indicators based on an eclipse simulation. " @@ -33,7 +33,9 @@ def build_argument_parser(): required_group.add_argument( *CONFIG_ARGUMENT.split("/"), required=True, - type=partial(parse_file, schema=EconomicIndicatorConfig), + type=partial(parse_file, schema=EconomicIndicatorConfig) + if not skip_type + else str, help="Path to config file containing at least prices", ) add_output_argument( @@ -41,6 +43,7 @@ def build_argument_parser(): required=False, default=None, help="Path to output-file where the economical indicators result is written to.", + skip_type=skip_type, ) parser.add_argument( "--output-currency", diff --git a/src/everest_models/jobs/fm_drill_date_planner/parser.py b/src/everest_models/jobs/fm_drill_date_planner/parser.py index 1abc6134..4f27ad74 100644 --- a/src/everest_models/jobs/fm_drill_date_planner/parser.py +++ b/src/everest_models/jobs/fm_drill_date_planner/parser.py @@ -10,23 +10,25 @@ @bootstrap_parser -def build_argument_parser(): +def build_argument_parser(skip_type=False): parser, required_group = get_parser( description="Calculate and write drill times from scaled controls.", ) add_wells_input_argument( required_group, help="Wells file generated by Everest (wells.json).", + skip_type=skip_type, ) add_output_argument( required_group, help="Output file: input for drill planner job.", + skip_type=skip_type, ) required_group.add_argument( "-opt", "--optimizer", required=True, - type=valid_input_file, + type=valid_input_file if not skip_type else str, help="File containing information related to wells. The format is " "consistent with the wells.json file when running everest and can " "be used directly.", diff --git a/src/everest_models/jobs/fm_drill_planner/parser.py b/src/everest_models/jobs/fm_drill_planner/parser.py index 777aa103..793e634a 100644 --- a/src/everest_models/jobs/fm_drill_planner/parser.py +++ b/src/everest_models/jobs/fm_drill_planner/parser.py @@ -18,7 +18,7 @@ @bootstrap_parser -def build_argument_parser(): +def build_argument_parser(skip_type=False): SchemaAction.register_models(SCHEMAS) parser, required_group = get_parser( description="A module that given a well priority list and a set of " @@ -35,6 +35,7 @@ def build_argument_parser(): "consistent with the wells.json file when running everest and can " "be used directly.", schema=Wells, + skip_type=skip_type, ) add_output_argument( required_group, @@ -43,11 +44,12 @@ def build_argument_parser(): "drill_planner. Please note that it is highly recommended to not use the " "same filename as the input-file. In cases where the same workflow is run " "twice, it is generally advised that the input-file for each job is consistent", + skip_type=skip_type, ) required_group.add_argument( *_CONFIG_ARGUMENT.split("/"), required=True, - type=partial(parse_file, schema=DrillPlanConfig), + type=partial(parse_file, schema=DrillPlanConfig) if not skip_type else str, help="Configuration file in yaml format describing the constraints of the " "field development. The file must contain information about rigs and slots " "that the wells can be drilled through. Additional information, such as " @@ -56,7 +58,7 @@ def build_argument_parser(): required_group.add_argument( *_OPTIMIZER_ARGUMENT.split("/"), required=True, - type=valid_input_file, + type=valid_input_file if not skip_type else str, help="The optimizer file is generated from everest it " "contains the well priority values - a float for each well.", ) diff --git a/src/everest_models/jobs/fm_extract_summary_data/parser.py b/src/everest_models/jobs/fm_extract_summary_data/parser.py index e73db8b3..bb59393b 100644 --- a/src/everest_models/jobs/fm_extract_summary_data/parser.py +++ b/src/everest_models/jobs/fm_extract_summary_data/parser.py @@ -8,14 +8,15 @@ from everest_models.jobs.shared.validators import valid_iso_date -def build_argument_parser(): +def build_argument_parser(skip_type=False): description = "Module to extract Eclipse Summary keyword data for single date or date interval" parser, requird_group = get_parser(description=description) - add_summary_argument(requird_group) + add_summary_argument(requird_group, skip_type=skip_type) add_output_argument( requird_group, help="Output file", + skip_type=skip_type, ) add_lint_argument(parser) parser.add_argument( diff --git a/src/everest_models/jobs/fm_interpret_well_drill/parser.py b/src/everest_models/jobs/fm_interpret_well_drill/parser.py index c9d9f508..bf2b1421 100644 --- a/src/everest_models/jobs/fm_interpret_well_drill/parser.py +++ b/src/everest_models/jobs/fm_interpret_well_drill/parser.py @@ -6,7 +6,7 @@ ) -def build_argument_parser(): +def build_argument_parser(skip_type=False): description = ( "This module transforms dakota well_drill output to a json object." "This object contains a list of well names to keep." @@ -19,11 +19,13 @@ def build_argument_parser(): "Yaml file that contains optimizer output, this should consist " "of a list of well names, with their associated value between 0 and 1" ), + skip_type=skip_type, ) add_lint_argument(parser) add_output_argument( required_group, help="File path to write the resulting json file to.", + skip_type=skip_type, ) return parser diff --git a/src/everest_models/jobs/fm_npv/parser.py b/src/everest_models/jobs/fm_npv/parser.py index 9a097a3b..0deb8103 100644 --- a/src/everest_models/jobs/fm_npv/parser.py +++ b/src/everest_models/jobs/fm_npv/parser.py @@ -18,13 +18,13 @@ @bootstrap_parser -def build_argument_parser(): +def build_argument_parser(skip_type=False): SchemaAction.register_models(SCHEMAS) parser, required_group = get_parser( description="Module to calculate the NPV based on an eclipse simulation. " "All optional args, except: lint, schemas, input and output, is also configurable through the config file." ) - add_summary_argument(required_group) + add_summary_argument(required_group, skip_type=skip_type) add_wells_input_argument( parser, required=False, @@ -32,17 +32,19 @@ def build_argument_parser(): "The format is consistent with the wells.json file when running " "everest. It must contain a 'readydate' key for each well for when " "it is considered completed and ready for production.", + skip_type=skip_type, ) add_output_argument( parser, required=False, default="npv", help="Path to output-file where the NPV result is written to.", + skip_type=skip_type, ) required_group.add_argument( *CONFIG_ARGUMENT.split("/"), required=True, - type=partial(parse_file, schema=NPVConfig), + type=partial(parse_file, schema=NPVConfig) if not skip_type else str, help="Path to config file containing at least prices", ) parser.add_argument( diff --git a/src/everest_models/jobs/fm_rf/parser.py b/src/everest_models/jobs/fm_rf/parser.py index 3d88720c..b10f079c 100644 --- a/src/everest_models/jobs/fm_rf/parser.py +++ b/src/everest_models/jobs/fm_rf/parser.py @@ -7,7 +7,7 @@ from everest_models.jobs.shared.validators import valid_iso_date -def build_argument_parser(): +def build_argument_parser(skip_type=False): parser, required_group = get_parser( description="Calculates the recovery factor given summary keys and dates.\n" "Requires a Summary instance to retrieve the volumes from. The summary " @@ -15,9 +15,14 @@ def build_argument_parser(): "the simulation range, they will be clamped to nearest. Will throw an " "error if the entire date range is outside the simulation range." ) - add_summary_argument(required_group) + add_summary_argument(required_group, skip_type=skip_type) add_lint_argument(parser) - add_output_argument(parser, required=False, help="Filename of the output file. ") + add_output_argument( + parser, + required=False, + help="Filename of the output file. ", + skip_type=skip_type, + ) parser.add_argument( "-pk", "--production_key", diff --git a/src/everest_models/jobs/fm_schmerge/parser.py b/src/everest_models/jobs/fm_schmerge/parser.py index 6b02d706..f9f802f3 100644 --- a/src/everest_models/jobs/fm_schmerge/parser.py +++ b/src/everest_models/jobs/fm_schmerge/parser.py @@ -10,7 +10,7 @@ @bootstrap_parser -def build_argument_parser(): +def build_argument_parser(skip_type=False): parser, required_group = get_parser( description="This module works on a schedule file intended for reservoir simulation" "(e.g. eclipse or flow), and injects templates at given dates. If the report" @@ -21,7 +21,7 @@ def build_argument_parser(): "-s", "--schedule", required=True, - type=valid_schedule_template, + type=valid_schedule_template if not skip_type else str, help="Schedule file to inject templates into. The only currently" " accepted date format is the following: one line consisting of the" " DATES keyword, followed by a date in the format of '1 JAN 2000'" @@ -37,9 +37,11 @@ def build_argument_parser(): " and the ops is a list of operations to be performed on the well." " The operations are defined within a dict with the required keys template," " date and any parameter values that are to be injected into the given template", + skip_type=skip_type, ) add_output_argument( required_group, help="File path to write the resulting schedule file to.", + skip_type=skip_type, ) return parser diff --git a/src/everest_models/jobs/fm_select_wells/parser.py b/src/everest_models/jobs/fm_select_wells/parser.py index 8c8e2b11..dcfb61e0 100644 --- a/src/everest_models/jobs/fm_select_wells/parser.py +++ b/src/everest_models/jobs/fm_select_wells/parser.py @@ -17,7 +17,7 @@ def scaled_well_number(value: str) -> float: @bootstrap_parser -def build_argument_parser(): +def build_argument_parser(skip_type=False): SchemaAction.register_models({"file": WellNumber}) parser, required_named_arguments = get_parser( description="Select the first wells from a drill planner output file." @@ -34,10 +34,12 @@ def build_argument_parser(): add_wells_input_argument( required_named_arguments, help="Input file: a drill planner output file.", + skip_type=skip_type, ) add_output_argument( required_named_arguments, help="Output file: updated drill planner output file", + skip_type=skip_type, ) parser.add_argument( "-m", @@ -60,7 +62,7 @@ def build_argument_parser(): ) well_number_file.add_argument( "file_path", - type=scaled_well_number, + type=scaled_well_number if not skip_type else str, ) sub_required_named_args.add_argument( "-r", diff --git a/src/everest_models/jobs/fm_stea/parser.py b/src/everest_models/jobs/fm_stea/parser.py index 9c8efdac..75624521 100644 --- a/src/everest_models/jobs/fm_stea/parser.py +++ b/src/everest_models/jobs/fm_stea/parser.py @@ -5,7 +5,7 @@ from everest_models.jobs.shared.arguments import add_lint_argument, get_parser -def build_argument_parser(): +def build_argument_parser(skip_type=False): description = ( "STEA is a powerful economic analysis tool used for complex economic " "analysis and portfolio optimization. STEA helps you analyze single " @@ -19,7 +19,7 @@ def build_argument_parser(): required_group.add_argument( "-c", "--config", - type=lambda value: stea.SteaInput(Path(value)), + type=lambda value: stea.SteaInput(Path(value)) if not skip_type else str, help="STEA (yaml) config file", required=True, ) diff --git a/src/everest_models/jobs/fm_strip_dates/parser.py b/src/everest_models/jobs/fm_strip_dates/parser.py index a51ab7fb..09451f75 100644 --- a/src/everest_models/jobs/fm_strip_dates/parser.py +++ b/src/everest_models/jobs/fm_strip_dates/parser.py @@ -10,14 +10,14 @@ def _valid_ecl_file(value: str): return valid_ecl_summary(value), value -def build_argument_parser(): +def build_argument_parser(skip_type=False): description = ( "Makes sure a given summary file contains only report steps at the " "list of dates given as an argument" ) parser, required_group = get_parser(description=description) - add_summary_argument(required_group, func=_valid_ecl_file) + add_summary_argument(required_group, func=_valid_ecl_file, skip_type=skip_type) add_lint_argument(parser) parser.add_argument( "-d", diff --git a/src/everest_models/jobs/fm_well_constraints/parser.py b/src/everest_models/jobs/fm_well_constraints/parser.py index 716f6863..67c8f4d5 100644 --- a/src/everest_models/jobs/fm_well_constraints/parser.py +++ b/src/everest_models/jobs/fm_well_constraints/parser.py @@ -34,7 +34,7 @@ } -def build_argument_parser() -> argparse.ArgumentParser: +def build_argument_parser(skip_type=False) -> argparse.ArgumentParser: SchemaAction.register_models(SCHEMAS) parser, required_group = get_parser( description="A module that given a list of boundaries and well constraints creates a " @@ -42,22 +42,27 @@ def build_argument_parser() -> argparse.ArgumentParser: "duration boundaries are given as min/max, and phase as a list of possibilities " "to choose from. Also support constants if boundaries are replaced by value.", ) + if skip_type: + constraint_parameters["type"] = str + add_wells_input_argument( required_group, schema=Wells, help="File in json format containing well names and well opening times, " "should be specified in Everest config (wells.json).", + skip_type=skip_type, ) add_file_schemas(parser) add_lint_argument(parser) add_output_argument( required_group, help="Name of the output file. The format will be yaml.", + skip_type=skip_type, ) required_group.add_argument( *CONFIG_ARG_KEY.split("/"), required=True, - type=partial(parse_file, schema=WellConstraintConfig), + type=partial(parse_file, schema=WellConstraintConfig) if not skip_type else str, help="Configuration file in yaml format with names, events and boundaries for constraints", ) parser.add_argument( diff --git a/src/everest_models/jobs/fm_well_filter/parser.py b/src/everest_models/jobs/fm_well_filter/parser.py index 46b186b5..795afc08 100644 --- a/src/everest_models/jobs/fm_well_filter/parser.py +++ b/src/everest_models/jobs/fm_well_filter/parser.py @@ -8,7 +8,7 @@ @bootstrap_parser -def build_argument_parser(): +def build_argument_parser(skip_type=False): parser, required_group = get_parser( description="This module filters out wells using a json string." "Either the --keep or the --remove flag needs to be set to a json file name" @@ -21,22 +21,24 @@ def build_argument_parser(): help=( "Json file that contains a list of dictionaries containing well information." ), + skip_type=skip_type, ) add_output_argument( required_group, help="File path to write the resulting wells file to.", + skip_type=skip_type, ) group = required_group.add_mutually_exclusive_group(required=True) group.add_argument( "-k", "--keep", - type=valid_input_file, + type=valid_input_file if not skip_type else str, help="JSON/Y(A)ML file that contains a list of well names to keep.", ) group.add_argument( "-r", "--remove", - type=valid_input_file, + type=valid_input_file if not skip_type else str, help="JSON/Y(A)ML file that contains a list of well names to remove.", ) return parser diff --git a/src/everest_models/jobs/fm_well_swapping/parser.py b/src/everest_models/jobs/fm_well_swapping/parser.py index 60ad95e7..739e3ba9 100644 --- a/src/everest_models/jobs/fm_well_swapping/parser.py +++ b/src/everest_models/jobs/fm_well_swapping/parser.py @@ -1,13 +1,17 @@ from functools import partial from typing import Dict, Tuple +from everest_models.jobs.shared.arguments import ( + SchemaAction, + bootstrap_parser, + get_parser, +) + from ..shared.arguments import ( - Parser, add_output_argument, add_wells_input_argument, ) from ..shared.io_utils import load_json -from ..shared.parsers import bootstrap_parser from ..shared.validators import ( is_gt_zero, parse_file, @@ -29,26 +33,28 @@ def _clean_constraint(value: str) -> Dict[str, Tuple[float, ...]]: # TODO: Change program name to state adjuster or something more related to what it does # omit anything to do with well -@bootstrap_parser( - schemas=SCHEMAS, # type: ignore - prog="Well Swapping", - description="Swap well operation status over multiple time intervals.", -) -def build_argument_parser(parser: Parser, lint: bool = False, *_) -> None: - parser.add_argument( +@bootstrap_parser +def build_argument_parser(**kwargs) -> None: + skip_type = kwargs.get("skip_type", False) + lint = kwargs.get("lint", False) + SchemaAction.register_models(SCHEMAS) + parser, required_group = get_parser( + description="Swap well operation status over multiple time intervals." + ) + required_group.add_argument( *_CONFIG_ARGUMENT.split("/"), required=True, - type=partial(parse_file, schema=ConfigSchema), + type=partial(parse_file, schema=ConfigSchema) if not skip_type else str, help="well swapping configuration file", ) parser.add_argument( *_CONSTRAINTS_ARGUMENT.split("/"), - type=_clean_constraint, + type=_clean_constraint if not skip_type else str, help="Everest generated optimized constraints", ) parser.add_argument( *_PRIORITIES_ARGUMENT.split("/"), - type=valid_optimizer, + type=valid_optimizer if not skip_type else str, help="Everest generated optimized priorities", ) parser.add_argument( @@ -64,10 +70,12 @@ def build_argument_parser(parser: Parser, lint: bool = False, *_) -> None: required=False, arg=("-cs", "--cases"), help="Everest generated wells.json file", + skip_type=skip_type, ) if not lint: add_output_argument( parser, required=False, help="Where to write output file to", + skip_type=skip_type, ) diff --git a/src/everest_models/jobs/fm_well_trajectory/parser.py b/src/everest_models/jobs/fm_well_trajectory/parser.py index d47b394a..3db43768 100644 --- a/src/everest_models/jobs/fm_well_trajectory/parser.py +++ b/src/everest_models/jobs/fm_well_trajectory/parser.py @@ -20,18 +20,18 @@ @bootstrap_parser -def build_argument_parser() -> argparse.ArgumentParser: +def build_argument_parser(skip_type=False) -> argparse.ArgumentParser: SchemaAction.register_models(SCHEMAS) parser, required_group = get_parser(description="Design a well trajectory.") required_group.add_argument( *CONFIG_ARG_KEY.split("/"), required=True, - type=partial(parse_file, schema=ConfigSchema), + type=partial(parse_file, schema=ConfigSchema) if not skip_type else str, help="forward model configuration file.", ) parser.add_argument( *ECLIPSE_FILES_ARG_KEY.split("/"), - type=validate_eclipse_path_argparse, + type=validate_eclipse_path_argparse if not skip_type else str, help="Path to Eclipse model: '/path/to/model'; extension not needed", ) diff --git a/src/everest_models/jobs/shared/arguments.py b/src/everest_models/jobs/shared/arguments.py index 3690818f..8a45bc93 100644 --- a/src/everest_models/jobs/shared/arguments.py +++ b/src/everest_models/jobs/shared/arguments.py @@ -37,11 +37,12 @@ def add_input_argument(parser: Parser, *args, **kwargs) -> None: Args: parser (argparse.ArgumentParser): Argument parser """ + skip_type = kwargs.pop("skip_type") if "skip_type" in kwargs else False parser.add_argument( "-i", "--input", *args, - type=valid_input_file, + type=valid_input_file if not skip_type else str, required=True, **kwargs, ) @@ -78,7 +79,9 @@ def add_file_schemas(parser: Parser) -> None: ) -def add_summary_argument(parser: Parser, *, func: Optional[Callable] = None) -> None: +def add_summary_argument( + parser: Parser, *, func: Optional[Callable] = None, **kwargs +) -> None: """Add summary argument to parser. - Set type to 'func' or 'valid_ecl_summary' function caller @@ -88,10 +91,11 @@ def add_summary_argument(parser: Parser, *, func: Optional[Callable] = None) -> parser (argparse.ArgumentParser): Argument parser func (Callable, optional): Function caller to use for type. Defaults to None. """ + skip_type = kwargs.pop("skip_type") if "skip_type" in kwargs else False parser.add_argument( "-s", "--summary", - type=func or valid_ecl_summary, + type=func or valid_ecl_summary if not skip_type else str, required=True, help="Eclipse summary file", ) @@ -115,9 +119,10 @@ def add_wells_input_argument( schema (models.BaseConfig, optional): Parser and validation schema to use. Defaults to models.WellListModel. """ + skip_type = kwargs.pop("skip_type") if "skip_type" in kwargs else False parser.add_argument( *arg, - type=partial(parse_file, schema=schema), + type=partial(parse_file, schema=schema) if not skip_type else str, required=required, **kwargs, ) @@ -133,11 +138,12 @@ def add_output_argument(parser: Parser, *, required: bool = True, **kwargs) -> N parser (argparse.ArgumentTypeError): Argument parser required (bool, optional): Is this argument required?. Defaults to True. """ + skip_type = kwargs.pop("skip_type") if "skip_type" in kwargs else False parser.add_argument( "-o", "--output", required=required, - type=is_writable_path, + type=is_writable_path if not skip_type else str, **kwargs, )