From b24e3e41877ba0681c06faac01c0d574c1d227e3 Mon Sep 17 00:00:00 2001 From: Greg Conan Date: Wed, 12 Oct 2022 23:00:28 -0500 Subject: [PATCH 01/21] Major incomplete edits adding T1- and T2-only modes - Added functionality for the user to specify a different BIBSnet model number using the --model flag - Added functionality for run.py to infer a default BIBSnet model number - run.py checks for T1w and T2w data in the input directory specified by the user, then picks the appropriate model by checking which one is_default in ./data/models.csv for the user's specific combination of T1 and/or T2 data (T1 only, T2 only, or both) - Incomplete T1-only and T2-only modes; they currently finish without error but only because they skip registration - Added lots of TODO comments after lines that assume the user is running both T1 and T2 - T1-only will need to do T1-to-BIBS, and T2-only T2-to-BIBS, registration - Changed the organization of image registration and resizing: modularized the formerly gargantuan resize_images function by splitting it into registration (register_all_preBIBSnet_imgs) and the actual transform (apply_final_prebibsnet_xfms) - apply_final_prebibsnet_xfms is further split into ACPC and non-ACPC functions - I tested 2 sessions each of 2 BCP subjects, and prebibsnet ran without error. However, I have not yet looked at the outputs. - Added lots of print statements to eta_squared function because it still needs to be fixed - The current eta_squared function sometimes produces a higher eta-squared value for the (visually) wrong image when the eta-squared values of ACPC and non-ACPC are close - Removed bibsnet model/task number from parameter-file-application.json --- .gitignore | 2 +- data/models.csv | 6 + parameter-file-application.json | 3 +- run.py | 170 ++++++++--- src/utilities.py | 489 +++++++++++++++++++------------- 5 files changed, 433 insertions(+), 237 deletions(-) create mode 100644 data/models.csv diff --git a/.gitignore b/.gitignore index bf9ae48..349064c 100644 --- a/.gitignore +++ b/.gitignore @@ -11,5 +11,5 @@ /src/img_processing/R_mask_holes_filled.nii.gz /src/img_processing/recombined_mask_LR.nii.gz /src/img_processing/Rmask.nii.gz -/data/ +/data/*/ **/trained_models diff --git a/data/models.csv b/data/models.csv new file mode 100644 index 0000000..e7e88bc --- /dev/null +++ b/data/models.csv @@ -0,0 +1,6 @@ +model_num,T1w,T2w,is_default,description,sørensen_dice_coefficient,image_augmentation,details +512,TRUE,TRUE,TRUE,"Default model for both T1w and T2w",0.85,TRUE,"https://github.com/DCAN-Labs/dcan-nn-unet/blob/main/doc/tasks/512/Task512.md" +513,TRUE,TRUE,FALSE,"Same as 512, but no image augmentation",0.89,FALSE,"https://github.com/DCAN-Labs/dcan-nn-unet/blob/main/doc/tasks/513/Task513.md" +514,TRUE,FALSE,TRUE,"T1-only model","https://github.com/DCAN-Labs/dcan-nn-unet/blob/main/doc/tasks/514/514-by-age.md",TRUE,"https://github.com/DCAN-Labs/dcan-nn-unet/blob/main/doc/tasks/514/514.md" +515,FALSE,TRUE,TRUE,"T2-only model","https://github.com/DCAN-Labs/dcan-nn-unet/blob/main/doc/tasks/515/515-by-age.md",TRUE,"https://github.com/DCAN-Labs/dcan-nn-unet/blob/main/doc/tasks/515/515.md" +526,TRUE,TRUE,FALSE,"Same as 512, but trained on all data. No test set.","not applicable",TRUE,"not applicable" \ No newline at end of file diff --git a/parameter-file-application.json b/parameter-file-application.json index 250681c..b87b778 100644 --- a/parameter-file-application.json +++ b/parameter-file-application.json @@ -17,8 +17,7 @@ "model": "3d_fullres", "nnUNet_predict_path": "/home/support/public/torch_cudnn8.2/bin/nnUNet_predict", "code_dir": "/home/faird/shared/code/internal/pipelines/bibsnet/BIBSnet", - "singularity_image_path": "/home/feczk001/gconan/placeholder.txt", - "task": "512" + "singularity_image_path": "/home/feczk001/gconan/placeholder.txt" }, "nibabies": { diff --git a/run.py b/run.py index e28e783..fdf5788 100755 --- a/run.py +++ b/run.py @@ -1,3 +1,4 @@ + #!/usr/bin/env python3 # coding: utf-8 @@ -5,7 +6,7 @@ Connectome ABCD-XCP niBabies Imaging nnu-NET (CABINET) Greg Conan: gconan@umn.edu Created: 2021-11-12 -Updated: 2022-08-30 +Updated: 2022-10-12 """ # Import standard libraries import argparse @@ -46,11 +47,13 @@ def find_myself(flg): # Custom local imports from src.utilities import ( - as_cli_attr, as_cli_arg, correct_chirality, create_anatomical_average, - crop_image, dict_has, dilate_LR_mask, ensure_prefixed, exit_with_time_info, - extract_from_json, get_age_closest_to, get_and_make_preBIBSnet_work_dirs, + apply_final_prebibsnet_xfms, as_cli_attr, as_cli_arg, correct_chirality, + create_anatomical_average, crop_image, dict_has, dilate_LR_mask, + ensure_prefixed, exit_with_time_info, extract_from_json, + get_age_closest_to, get_and_make_preBIBSnet_work_dirs, get_optional_args_in, get_stage_name, get_subj_ID_and_session, - get_template_age_closest_to, make_given_or_default_dir, resize_images, + get_template_age_closest_to, make_given_or_default_dir, + only_Ts_needed_for_bibsnet_model, register_all_preBIBSnet_imgs, run_all_stages, valid_readable_json, validate_parameter_types, valid_readable_dir, valid_subj_ses_ID, valid_whole_number ) @@ -94,6 +97,7 @@ def get_params_from_JSON(stage_names, logger): """ # default_brain_z_size = 120 default_end_stage = "postbibsnet" # TODO Change to stage_names[-1] once nibabies and XCPD run from CABINET + default_model_num_bibsnet = 512 msg_stage = ("Name of the stage to run {}. By default, this will be " "the {} stage. Valid choices: {}") parser = argparse.ArgumentParser("CABINET") @@ -143,24 +147,17 @@ def get_params_from_JSON(stage_names, logger): "Include this argument unless the age in months is specified in" "the participants.tsv file inside the BIDS input directory.") ) - parser.add_argument( - "-z", "--brain-z-size", action="store_true", # type=valid_whole_number, - # default=default_brain_z_size, - help=("Include this flag to infer participants' brain height (z) " - "using the participants.tsv brain_z_size column. Otherwise, " - "CABINET will estimate the brain height from the participant " - "age and averages of a large sample of infant brain heights.") # TODO rephase - # help=("Positive integer, the size of the participant's brain in millimeters along the z-axis. By default, this will be {}.".format(default_brain_z_size)) - ) parser.add_argument( "-end", "--ending-stage", dest="end", - choices=stage_names[:3], default=default_end_stage, + choices=stage_names[:3], default=default_end_stage, # TODO change to choices=stage_names, help=msg_stage.format("last", default_end_stage, ", ".join(stage_names[:3])) ) parser.add_argument( - "-ses", "--session", "--session-id", type=valid_subj_ses_ID, - help=("The name of the session to processes participant data for. " - "Example: baseline_year1") + "-model", "--model-number", "--bibsnet-model", + # default=default_model_num_bibsnet, # TODO By default, get the model number(s) for the participants.tsv file + type=valid_whole_number, dest="model", + help=("Model/task number for BIBSnet. By default, this will be {}." + .format(default_model_num_bibsnet)) ) parser.add_argument( "--overwrite", "--overwrite-old", # TODO Change this to "-skip" @@ -170,6 +167,11 @@ def get_params_from_JSON(stage_names, logger): "CABINET will skip creating any CABINET output files that " "already exist in the sub-directories of derivatives.") ) + parser.add_argument( + "-ses", "--session", "--session-id", type=valid_subj_ses_ID, + help=("The name of the session to processes participant data for. " + "Example: baseline_year1") + ) parser.add_argument( "-start", "--starting-stage", dest="start", choices=stage_names[:3], default=stage_names[0], # TODO Change default to start where we left off by checking which stages' prerequisites and outputs already exist @@ -181,6 +183,15 @@ def get_params_from_JSON(stage_names, logger): "command being run by CABINET to stdout. Otherwise CABINET " "will only print warnings, errors, and minimal output.") ) + parser.add_argument( + "-z", "--brain-z-size", action="store_true", # type=valid_whole_number, + # default=default_brain_z_size, + help=("Include this flag to infer participants' brain height (z) " + "using the participants.tsv brain_z_size column. Otherwise, " + "CABINET will estimate the brain height from the participant " + "age and averages of a large sample of infant brain heights.") # TODO rephrase + # help=("Positive integer, the size of the participant's brain in millimeters along the z-axis. By default, this will be {}.".format(default_brain_z_size)) + ) parser.add_argument( SCRIPT_DIR_ARG, dest=as_cli_attr(SCRIPT_DIR_ARG), type=valid_readable_dir, @@ -220,10 +231,7 @@ def validate_cli_args(cli_args, stage_names, parser, logger): "end": cli_args["end"]} # TODO Maybe save the stage_names list in here too to replace optional_out_dirs use cases? for arg_to_add in ("bids_dir", "overwrite", "verbose"): j_args["common"][arg_to_add] = cli_args[arg_to_add] - j_args["ID"] = {"subject": cli_args["participant_label"], - "session": cli_args["session"], - "age_months": cli_args["age_months"], - "brain_z_size": cli_args["brain_z_size"]} + # j_args["ID"] = {"subject": cli_args["participant_label"], "session": cli_args["session"], "age_months": cli_args["age_months"], "brain_z_size": cli_args["brain_z_size"], "model": cli_args["model"]} # TODO Remove all references to the optional_out_dirs arguments, and change # j_args[optional_out_dirs][derivatives] to instead be j_args[common][output_dir] @@ -279,7 +287,7 @@ def validate_cli_args(cli_args, stage_names, parser, logger): # using age_months and the age-to-head-radius table .csv file sub_ses_IDs[ix]["brain_z_size"] = read_from_participants_tsv( j_args, logger, "brain_z_size", *sub_ses - ) if j_args["ID"]["brain_z_size"] else get_brain_z_size( + ) if cli_args["brain_z_size"] else get_brain_z_size( sub_ses_IDs[ix]["age_months"], j_args, logger ) @@ -290,6 +298,20 @@ def validate_cli_args(cli_args, stage_names, parser, logger): cli_args["parameter_json"], parser, stage_names ) + # Check whether this sub ses has T1w and/or T2w input data + data_path_BIDS_T = dict() # Paths to expected input data to check + for t in (1, 2): + data_path_BIDS_T[t] = os.path.join(j_args["common"]["bids_dir"], + *sub_ses, "anat", + "*T{}w.nii.gz".format(t)) + sub_ses_IDs[ix]["has_T{}w".format(t) + ] = bool(glob(data_path_BIDS_T[t])) + + models_df = get_df_with_valid_bibsnet_models(sub_ses_IDs[ix]) + sub_ses_IDs[ix]["model"] = validate_model_num( + cli_args, data_path_BIDS_T, models_df, sub_ses_IDs[ix], parser + ) + # Create BIBSnet in/out directories dir_BIBSnet = dict() for io in ("in", "out"): @@ -306,8 +328,68 @@ def validate_cli_args(cli_args, stage_names, parser, logger): return j_args, sub_ses_IDs -def get_brain_z_size(age_months, j_args, logger, buffer=5): +def get_df_with_valid_bibsnet_models(sub_ses_ID): + + # Read in models.csv info mapping model num to which T(s) it has + models_df = pd.read_csv(os.path.join(SCRIPT_DIR, "data", "models.csv")) + + # Exclude any models which require (T1w or T2w) data the user lacks + for t in only_Ts_needed_for_bibsnet_model(sub_ses_ID): + # print("has_T{}w: {}".format(t, sub_ses_ID["has_T{}w".format(t)])) + # if not sub_ses_ID["has_T{}w".format(t)]: + # models_df = models_df.loc[~models_df["T{}w".format(t)]] + models_df = select_model_with_data_for_this_T(models_df, t, False) + return models_df + + +def validate_model_num(cli_args, data_path_BIDS_T, models_df, sub_ses_ID, parser): + """ + :param cli_args: Dictionary containing all command-line input arguments + :param data_path_BIDS_T: Dictionary mapping 1 and 2 to the (incomplete) + paths to expected T1w and T2w data respectively + :param j_args: Dictionary containing all args from parameter .JSON file + :param parser: + :return: Int, validated bibsnet model number """ + model = cli_args["model"] # Model number (if given from command line) + + # Exclude any models which require (T1w or T2w) data the user lacks + for t in (1, 2): + + # If user gave a model number but not the data the model needs, + # then crash with an informative error message + if model and (model not in models_df["model_num"]): + parser.error("CABINET needs T{}w data at the path below " + "to run model {}, but none was found.\n{}\n" + .format(t, model, data_path_BIDS_T[t])) + + if not model: # Get default model number if user did not give one + # print(models_df[models_df["is_default"]]) + models_df = models_df[models_df["is_default"]] + if len(models_df) > 1: + for t in (1, 2): + models_df = select_model_with_data_for_this_T( + models_df, t, sub_ses_ID["has_T{}w".format(t)] + ) + model = models_df.squeeze()["model_num"] + + return model + + +def select_model_with_data_for_this_T(models_df, t, has_T): + """ + :param models_df: pandas.DataFrame with columns called "T1w" and "T2w" + with bool values describing which T(s) a model needs + :param t: Int, either 1 or 2 (to signify T1w or T2w respectively) + :param has_T: bool, True if T{t}w data exists for this subject/ses + :return: pandas.DataFrame, all models_df rows with data for this sub/ses/t + """ + has_T_row = models_df["T{}w".format(t)] + return models_df.loc[has_T_row if has_T else ~has_T_row] + + +def get_brain_z_size(age_months, j_args, logger, buffer=5): + """ Infer a participant's brain z-size from their age and from the average brain diameters table at the AGE_TO_HEAD_RADIUS_TABLE path :param age_months: Int, participant's age in months @@ -416,11 +498,11 @@ def read_from_participants_tsv(j_args, logger, col_name, *sub_ses): # Get and return the col_name value from participants.tsv subj_row = part_tsv_df[ - part_tsv_df[sub_ID_col] == ensure_prefixed(sub_ses[0], "sub-") + part_tsv_df[sub_ID_col] == ensure_prefixed(sub_ses[0], "sub-") # TODO part_tsv_df[sub_ID_col] = part_tsv_df[sub_ID_col].apply(ensure_prefixed(...)) ] # select where "participant_id" matches if len(sub_ses) > 1: subj_row = subj_row[ - subj_row[ses_ID_col] == ensure_prefixed(sub_ses[1], "ses-") + subj_row[ses_ID_col] == ensure_prefixed(sub_ses[1], "ses-") # TODO part_tsv_df[ses_ID_col] = part_tsv_df[ses_ID_col].apply(ensure_prefixed(...)) ] # select where "session" matches if j_args["common"]["verbose"]: logger.info("Subject details from participants.tsv row:\n{}" @@ -443,27 +525,39 @@ def run_preBIBSnet(j_args, logger): # Crop T1w and T2w images cropped = dict() crop2full = dict() - for t in (1, 2): + for t in only_Ts_needed_for_bibsnet_model(j_args["ID"]): + # for t in (1, 2): # TODO Make this also work for T1-only or T2-only + # if j_args["ID"]["has_T{}w".format(t)]: cropped[t] = preBIBSnet_paths["crop_T{}w".format(t)] crop2full[t] = crop_image(preBIBSnet_paths["avg"]["T{}w_avg".format(t)], cropped[t], j_args, logger) logger.info(completion_msg.format("cropped")) - # Resize T1w and T2w images + # Resize T1w and T2w images if running a BIBSnet model using T1w and T2w # TODO Make ref_img an input parameter if someone wants a different reference image? reference_img = os.path.join(SCRIPT_DIR, "data", "MNI_templates", "INFANT_MNI_T{}_1mm.nii.gz") # TODO Pipeline should verify that these exist before running id_mx = os.path.join(SCRIPT_DIR, "data", "identity_matrix.mat") + if j_args["ID"]["has_T1w"] and j_args["ID"]["has_T2w"]: + all_registration_vars = register_all_preBIBSnet_imgs( + cropped, preBIBSnet_paths["resized"], reference_img, + id_mx, crop2full, preBIBSnet_paths["avg"], j_args, logger + ) + transformed_images = apply_final_prebibsnet_xfms( + all_registration_vars, preBIBSnet_paths["avg"], j_args, logger + ) + logger.info(completion_msg.format("resized")) - transformed_images = resize_images( - cropped, preBIBSnet_paths["resized"], reference_img, - id_mx, crop2full, preBIBSnet_paths["avg"], j_args, logger - ) - logger.info(completion_msg.format("resized")) - - # TODO Move this whole block to postBIBSnet, so it copies everything it needs first + # If running a T1w-only or T2w-only BIBSnet model, skip registration/resizing + #elif j_args["ID"]["has_T1w"]: + # transformed_images = "" + else: + # TODO Add T1-to-BIBS and T2-to-BIBS functionality without T2-to-T1 + transformed_images = {"T{}w".format(t): cropped[t] for t in cropped.keys()} + + # TODO Copy this whole block to postBIBSnet, so it copies everything it needs first # Copy into BIBSnet input dir to transformed_images[T1w] - for t in (1, 2): + for t in only_Ts_needed_for_bibsnet_model(j_args["ID"]): # (1, 2): # TODO Make this also work for T1-only or T2-only tw = "T{}w".format(t) out_nii_fpath = j_args["optimal_resized"][tw] os.makedirs(os.path.dirname(out_nii_fpath), exist_ok=True) @@ -506,7 +600,7 @@ def run_BIBSnet(j_args, logger): "nnUNet": j_args["bibsnet"]["nnUNet_predict_path"], "input": dir_BIBS.format("in"), "output": dir_BIBS.format("out"), - "task": str(j_args["bibsnet"]["task"])} + "task": "{:03d}".format(j_args["ID"]["model"])} # j_args["bibsnet"]["task"])} os.makedirs(inputs_BIBSnet["output"], exist_ok=True) if j_args["common"]["verbose"]: logger.info("Now running BIBSnet with these parameters:\n{}\n" @@ -566,7 +660,7 @@ def run_postBIBSnet(j_args, logger): # if age_months > 33: age_months = "34-38" # Run left/right registration script and chirality correction - left_right_mask_nifti_fpath = run_left_right_registration( + left_right_mask_nifti_fpath = run_left_right_registration( # NOTE Don't change this when implementing T1-/T2-only j_args, sub_ses, tmpl_age, 2 if int(age_months) < 22 else 1, logger # NOTE 22 cutoff might change ) logger.info("Left/right image registration completed") diff --git a/src/utilities.py b/src/utilities.py index 26bbeab..772d145 100755 --- a/src/utilities.py +++ b/src/utilities.py @@ -5,7 +5,7 @@ Common source for utility functions used by CABINET :) Greg Conan: gconan@umn.edu Created: 2021-11-12 -Updated: 2022-08-30 +Updated: 2022-10-12 """ # Import standard libraries import argparse @@ -99,6 +99,132 @@ def always_true(*_): return True +def apply_final_ACPC_xfm(xfm_ACPC_vars, xfm_ACPC_imgs, + avg_imgs, outputs, t, j_args, logger): + outputs["T{}w".format(t)] = os.path.join( + xfm_ACPC_vars["out_dir"], + "preBIBSnet_final_000{}.nii.gz".format(t-1) + ) + + # Concatenate rigidbody2acpc.mat and registration (identity/cropT2tocropT1.mat) + # First concatenate rigidbody2acpc with registration, then concatenate + # the output .mat with the template + acpc2rigidbody = xfm_ACPC_vars["mats_T{}w".format(t)]["acpc2rigidbody"] + to_rigidbody_final_mat = os.path.join(xfm_ACPC_vars["out_dir"], + "T2w_to_rigidbody.mat" + ) if t == 2 else acpc2rigidbody + + # final_mat differs between T1w and T2w because T2w has to go into T1w + # space before ACPC and T1w does not + if t == 2: + run_FSL_sh_script( + j_args, logger, "convert_xfm", "-omat", to_rigidbody_final_mat, + "-concat", xfm_ACPC_imgs["cropT{}tocropT1".format(t)], + acpc2rigidbody + ) + + crop2BIBS_mat = os.path.join(xfm_ACPC_vars["out_dir"], + "crop_T{}w_to_BIBS_template.mat".format(t)) + if not os.path.exists(crop2BIBS_mat): + shutil.copy2(to_rigidbody_final_mat, crop2BIBS_mat) + if j_args["common"]["verbose"]: + logger.info("Copying {} to {}".format(to_rigidbody_final_mat, + crop2BIBS_mat)) + outputs["T{}w_crop2BIBS_mat".format(t)] = crop2BIBS_mat + + # Do the applywarp FSL command from align_ACPC_1_img (for T1w and T2w, for ACPC) + # applywarp output is optimal_realigned_imgs input + # Apply registration and ACPC alignment to the T1ws and the T2ws + run_FSL_sh_script(j_args, logger, "applywarp", "--rel", + "--interp=spline", "-i", avg_imgs["T{}w_avg".format(t)], + "-r", xfm_ACPC_vars["ref_img"].format(t), + "--premat=" + crop2BIBS_mat, # preBIBS_ACPC_out["T{}w_crop2BIBS_mat".format(t)], + "-o", outputs["T{}w".format(t)]) + # pdb.set_trace() # TODO Add "debug" flag? + + return outputs + + +def apply_final_non_ACPC_xfm(xfm_non_ACPC_vars, xfm_imgs_non_ACPC, avg_imgs, + outputs, t, full2crop_ACPC, j_args, logger): + # TODO MODULARIZE (put this into a function to call once for ACPC and once for non to eliminate redundancy?) + outputs["T{}w".format(t)] = os.path.join( + xfm_non_ACPC_vars["out_dir"], + "preBIBSnet_final_000{}.nii.gz".format(t-1) + ) + + # Do convert_xfm to combine 2 .mat files (non-ACPC + # registration_T2w_to_T1w's cropT2tocropT1.mat, and then non-ACPC + # registration_T2w_to_T1w's crop_T1_to_BIBS_template.mat) + outputs["T{}w_crop2BIBS_mat".format(t)] = os.path.join( + xfm_non_ACPC_vars["out_dir"], "full_crop_T{}w_to_BIBS_template.mat".format(t) # TODO Changed this back to full_crop on 2022-08-30 + ) + full2cropT1w_mat = os.path.join(xfm_non_ACPC_vars["out_dir"], + "full2cropT1w.mat") + run_FSL_sh_script( + j_args, logger, "convert_xfm", + "-omat", full2cropT1w_mat, + "-concat", full2crop_ACPC, # NOTE The choice between 2022-08-11 and 2022-08-23 to use ACPC's full2crop probably was NOT a problem because it works + xfm_imgs_non_ACPC["cropT{}tocropT1".format(t)] + ) + run_FSL_sh_script( + j_args, logger, "convert_xfm", + "-omat", outputs["T{}w_crop2BIBS_mat".format(t)], + "-concat", full2cropT1w_mat, + xfm_imgs_non_ACPC["T{}w_crop2BIBS_mat".format(t)] + ) + # Do the applywarp FSL command from align_ACPC_1_img (for T2w and not T1w, for non-ACPC) + # applywarp output is optimal_realigned_imgs input + # Apply registration to the T1ws and the T2ws + run_FSL_sh_script(j_args, logger, "applywarp", "--rel", + "--interp=spline", "-i", avg_imgs["T{}w_avg".format(t)], # cropped_imgs[t], + "-r", xfm_non_ACPC_vars["ref_img"].format(t), + "--premat=" + outputs["T{}w_crop2BIBS_mat".format(t)], # full2BIBS_mat, # + "-o", outputs["T{}w".format(t)]) + return outputs + + +def apply_final_prebibsnet_xfms(all_regn_info, averaged_imgs, j_args, logger): + """ + :param all_regn_info: Dict mapping "ACPC" and "non-ACPC" to dicts mapping + "img_paths" to a dict of paths to image files and + "vars" to a dict of other variables. + {"ACPC": {"vars": {...}, "imgs": {...}}, "non-ACPC":} + :param averaged_imgs: + :param j_args: Dictionary containing all args from parameter .JSON file + :param logger: logging.Logger object to show messages and raise warnings + :return: _type_, _description_ + """ + out_ACPC = dict() + out_non_ACPC = dict() + + for t in (1, 2): + # Apply ACPC-then-registration transforms for this subject session (and T) + out_ACPC.update(apply_final_ACPC_xfm( + all_regn_info["ACPC"]["vars"], + all_regn_info["ACPC"]["img_paths"], + averaged_imgs, out_ACPC, t, j_args, logger + )) + + # Retrieve path to ACPC full2crop.mat file (to use for non-ACPC xfms) + # from the very (probably overly) nested dict of registration vars + full2crop_ACPC = all_regn_info["ACPC"]["vars"]["mats_T{}w".format(t) + ]["full2crop"] + + # Apply registration-only transforms for this subject session (and T) + out_non_ACPC.update(apply_final_non_ACPC_xfm( + all_regn_info["non_ACPC"]["vars"], + all_regn_info["non_ACPC"]["img_paths"], + averaged_imgs, out_non_ACPC, t, full2crop_ACPC, j_args, logger + )) + + # Outputs: 1 .mat file for ACPC and 1 for non-ACPC (only retain the -to-T1w .mat file after this point) + + # Return the best of the 2 resized images + return optimal_realigned_imgs(out_non_ACPC, # TODO Add 'if' statement to skip eta-squared functionality if T1-/T2-only, b/c only one T means we'll only register to ACPC space + out_ACPC, j_args, logger) + + def argify(argname, argval): """ :param argname: String naming a parameter for a script called from terminal @@ -128,28 +254,40 @@ def calculate_eta(img_paths): """ :param img_paths: Dictionary mapping "T1w" and "T2w" to strings that are valid paths to the existing respective image files - :return: Int(?), the eta value - """ + :return: Float(?), the eta value + """ # get the data from each nifti image as a flattened vector vectors = dict() - for t in (1, 2): + for t in (1, 2): # TODO Make this also work for (T1-only or?) T2-only by comparing to the registered image instead of the other T anat = "T{}w".format(t) - vectors[anat] = reshape_volume_to_array(nib.load(img_paths[anat])) + vectors[anat] = reshape_volume_to_array(nib.load(img_paths[anat])) # np.abs() + negatives = vectors[anat][vectors[anat] < 0] + print("{} has {} negatives: {}".format(anat, len(negatives), negatives)) - # mean value over all locations in both images - m_grand = (np.mean(vectors["T1w"]) + np.mean(vectors["T2w"])) / 2 + print("Vectors: {}".format(vectors)) + """ + medians = { + "grand": (np.median(vectors["T1w"]) + np.median(vectors["T2w"])) / 2, + "within": np.median(np.concatenate((vectors["T1w"], vectors["T2w"]))) + } + """ + # mean value over all locations in both images # TODO Add if statement to not average if T1-/T2-only + m_grand = (np.mean(vectors["T1w"]) + np.mean(vectors["T2w"])) / 2 # TODO Try using np.median instead of np.mean? # mean value matrix for each location in the 2 images - m_within = (vectors["T1w"] + vectors["T2w"]) / 2 + m_within = (vectors["T1w"] + vectors["T2w"]) / 2 # TODO Try combining both arrays and taking the median of the result? + print("Mean Within: {}\nMean Total: {}".format(m_within, m_grand)) - sswithin = (sum(np.square(vectors["T1w"] - m_within)) - + sum(np.square(vectors["T2w"] - m_within))) - sstot = (sum(np.square(vectors["T1w"] - m_grand)) - + sum(np.square(vectors["T2w"] - m_grand))) + # sswithin = (sum(np.square(vectors["T1w"] - m_within)) + sum(np.square(vectors["T2w"] - m_within))) + # sstot = (sum(np.square(vectors["T1w"] - m_grand)) + sum(np.square(vectors["T2w"] - m_grand))) + + sswithin = sum_of_2_sums_of_squares_of(vectors["T1w"], vectors["T2w"], m_within) # medians["within"]) + sstot = sum_of_2_sums_of_squares_of(vectors["T1w"], vectors["T2w"], m_grand) # medians["grand"]) # NOTE SStot = SSwithin + SSbetween so eta can also be # written as SSbetween/SStot - return 1 - sswithin / sstot + print("SumSq Within: {}\nSumSq Total: {}".format(sswithin, sstot)) + return 1 - sswithin / sstot # Should there be parentheses around (1 - sswithin)? def check_and_correct_region(should_be_left, region, segment_name_to_number, @@ -188,7 +326,8 @@ def correct_chirality(nifti_input_file_path, segment_lookup_table, :param segment_lookup_table: String, path to a FreeSurfer-style look-up table :param left_right_mask_nifti_file: String, path to a mask file that distinguishes between left and right :param nifti_output_file_path: String, path to location to write the corrected file - :param t1w_path: + :param t1w_path: String, path to T1w image to use as a reference image + when applying transform :param j_args: Dictionary containing all args from parameter .JSON file :param logger: logging.Logger object to show messages and raise warnings """ @@ -237,7 +376,7 @@ def correct_chirality(nifti_input_file_path, segment_lookup_table, dummy_copy = "_dummy".join(split_2_exts(nifti_corrected_file_path)) shutil.copy2(nifti_corrected_file_path, dummy_copy) - seg_to_T1w_nat = os.path.join(chiral_out_dir, "seg_reg_to_T1w_native.mat") + seg_to_T1w_nat = os.path.join(chiral_out_dir, "seg_reg_to_T1w_native.mat") # TODO Change naming to "T2w" in all these files if running T2-only preBIBSnet_mat_glob = os.path.join( j_args["optional_out_dirs"]["postbibsnet"], *sub_ses, "preBIBSnet_*crop_T1w_to_BIBS_template.mat" # TODO Name this outside of pre- and postBIBSnet then pass it to both @@ -246,7 +385,7 @@ def correct_chirality(nifti_input_file_path, segment_lookup_table, run_FSL_sh_script(j_args, logger, "convert_xfm", "-omat", seg_to_T1w_nat, "-inverse", preBIBSnet_mat) # TODO Define preBIBSnet_mat path outside of stages because it's used by preBIBSnet and postBIBSnet - run_FSL_sh_script(j_args, logger, "flirt", "-applyxfm", "-ref", t1w_path, + run_FSL_sh_script(j_args, logger, "flirt", "-applyxfm", "-ref", t1w_path, # TODO Change this to T2 if running T2-only; infer that info from the BIDS input anat dir "-in", dummy_copy, "-init", seg_to_T1w_nat, "-o", nifti_output_file_path, "-interp", "nearestneighbour") logger.info(msg.format("Finished", nifti_input_file_path)) @@ -262,7 +401,7 @@ def create_anatomical_average(avg_params): "T1w_avg": String, average T1w output file path "T2w_avg": String, average T2w output file path} """ - for t in (1, 2): + for t in (1, 2): # TODO Make this also work for T1-only or T2-only. This may not even need to be changed if avg_params["T{}w_input".format(t)]: register_and_average_files(avg_params["T{}w_input".format(t)], avg_params["T{}w_avg".format(t)]) @@ -487,7 +626,7 @@ def get_and_make_preBIBSnet_work_dirs(j_args): # Build paths to BIDS anatomical input images and (averaged, # nnU-Net-renamed) output images preBIBSnet_paths["avg"] = dict() - for t in (1, 2): + for t in (1, 2) : # TODO Make this also work for T1-only or T2-only by not creating unneeded T dir(s) preBIBSnet_paths["avg"]["T{}w_input".format(t)] = list() for eachfile in glob(os.path.join(j_args["common"]["bids_dir"], *sub_ses, "anat", "*T{}w*.nii.gz" @@ -573,7 +712,7 @@ def get_optimal_resized_paths(sub_ses, bibsnet_out_dir): return {"T{}w".format(t): os.path.join( input_dir_BIBSnet, "{}_optimal_resized_000{}.nii.gz" .format("_".join(sub_ses), t - 1) - ) for t in (1, 2)} + ) for t in (1, 2)} # TODO Make this also work for T1-only or T2-only by not building unneeded paths? def get_spatial_resolution_of(image_fpath, j_args, logger, fn_name="fslinfo"): @@ -707,18 +846,33 @@ def make_given_or_default_dir(dirs_dict, dirname_key, default_dirpath): return dirs_dict +def only_Ts_needed_for_bibsnet_model(sub_ses_ID): + # to_return = list() + for t in (1, 2): + if sub_ses_ID["has_T{}w".format(t)]: + yield t + # to_return.append(t) + # return to_return + + def optimal_realigned_imgs(xfm_imgs_non_ACPC, xfm_imgs_ACPC_and_reg, j_args, logger): """ Check whether the cost function shows that only the registration-T2-to-T1 or the ACPC-alignment-and-T2-to-T1-registration is better (check whether ACPC alignment improves the T2-to-T1 registration; compare the T2-to-T1 with and without first doing the ACPC registration) - :param j_args: - :param logger: logging.Logger object to raise warning + :param j_args: Dictionary containing all args from parameter .JSON file + :param logger: logging.Logger object to show messages and raise warnings """ sub_ses = get_subj_ID_and_session(j_args) msg = "Using {} T2w-to-T1w registration for resizing.\nT1w: {}\nT2w: {}" - if calculate_eta(xfm_imgs_non_ACPC) > calculate_eta(xfm_imgs_ACPC_and_reg): + eta = dict() + print("\nACPC:") + eta["ACPC"] = calculate_eta(xfm_imgs_ACPC_and_reg) + print("\nNon-ACPC:") + eta["non-ACPC"] = calculate_eta(xfm_imgs_non_ACPC) + print("Eta-Squared Values: {}".format(eta)) + if eta["non-ACPC"] > eta["ACPC"]: optimal_resize = xfm_imgs_non_ACPC logger.info(msg.format("only", optimal_resize["T1w"], optimal_resize["T2w"])) # TODO Verify that these print the absolute path @@ -736,10 +890,102 @@ def optimal_realigned_imgs(xfm_imgs_non_ACPC, xfm_imgs_ACPC_and_reg, j_args, log *sub_ses, "preBIBSnet_" + os.path.basename(concat_mat) ) if not os.path.exists(out_mat_fpath): - shutil.copy2(concat_mat, out_mat_fpath) + shutil.copy2(concat_mat, out_mat_fpath) # TODO Put this block elsewhere so the block runs if T1-/T2-only skips this function (which it should) if j_args["common"]["verbose"]: logger.info("Copying {} to {}".format(concat_mat, out_mat_fpath)) return optimal_resize + + +def register_all_preBIBSnet_imgs(cropped_imgs, output_dir, ref_image, ident_mx, + crop2full, averaged_imgs, j_args, logger): + """ + Resize the images to match the dimensions of images trained in the model, + and ensure that the first image (presumably a T1) is co-registered to the + second image (presumably a T2) before resizing. Use multiple alignments + of both images, and return whichever one is better (higher eta squared) + :param cropped_imgs: Dictionary mapping ints, (T) 1 or 2, to strings (valid + paths to existing image files to resize) + :param output_dir: String, valid path to a dir to save resized images into + :param ref_images: Dictionary mapping string keys to valid paths to real + image file strings for "ACPC" (alignment) and (T2-to-T1) + "reg"(istration) for flirt to use as a reference image. + The ACPC string has a "{}" in it to represent (T) 1 or 2 + :param ident_mx: String, valid path to existing identity matrix .mat file + :param crop2full: String, valid path to existing crop2full.mat file + :param averaged_imgs: Dictionary mapping ints, (T) 1 or 2, to strings + (valid paths to existing image files to resize) + :param j_args: Dictionary containing all args from parameter .JSON file + :param logger: logging.Logger object to show messages and raise warnings + """ + # TODO Add 'if' to skip most of the functionality here for T1-only or T2-only + + # Build dictionaries of variables used for image transformations with and + # without ACPC alignment + xfm_non_ACPC_vars = {"out_dir": os.path.join(output_dir, "xfms"), + "resolution": "1", "ident_mx": ident_mx, + "ref_img": ref_image} + xfm_ACPC_vars = xfm_non_ACPC_vars.copy() + xfm_ACPC_vars["out_dir"] = os.path.join(output_dir, "ACPC_align") + out_var = "output_T{}w_img" + reg_in_var = "reg_input_T{}w_img" + + for t, crop_img_path in cropped_imgs.items(): + img_ext = split_2_exts(crop_img_path)[-1] + + # Non-ACPC input to registration + # for keyname in ("crop_", "reg_input_"): + xfm_non_ACPC_vars["crop_T{}w_img".format(t)] = crop_img_path # TODO This variable appears to be unused for non-ACPC + xfm_non_ACPC_vars[reg_in_var.format(t)] = crop_img_path + + # Non-ACPC outputs to registration + outfname = "T{}w_registered_to_T1w".format(t) + img_ext + xfm_non_ACPC_vars[out_var.format(t)] = os.path.join( + xfm_non_ACPC_vars["out_dir"], outfname + ) + + # ACPC inputs to align and registration + xfm_ACPC_vars["crop_T{}w_img".format(t)] = crop_img_path + xfm_ACPC_vars[reg_in_var.format(t)] = os.path.join( + xfm_ACPC_vars["out_dir"], "ACPC_aligned_T{}w".format(t) + img_ext + ) + xfm_ACPC_vars[out_var.format(t)] = os.path.join( + xfm_ACPC_vars["out_dir"], "ACPC_" + outfname + ) + + if j_args["common"]["verbose"]: + msg_xfm = "Arguments for {}ACPC image transformation:\n{}" + logger.info(msg_xfm.format("non-", xfm_non_ACPC_vars)) + logger.info(msg_xfm.format("", xfm_ACPC_vars)) + + # Make output directories for transformed images + for each_xfm_vars_dict in (xfm_non_ACPC_vars, xfm_ACPC_vars): + os.makedirs(each_xfm_vars_dict["out_dir"], exist_ok=True) + + xfm_imgs_non_ACPC = registration_T2w_to_T1w( + j_args, logger, xfm_non_ACPC_vars, reg_in_var, acpc=False + ) + + # Do direct T1w-T2w alignment + for t in (1, 2): + + # Run ACPC alignment + xfm_ACPC_vars["mats_T{}w".format(t)] = align_ACPC_1_img( + j_args, logger, xfm_ACPC_vars, crop2full[t], reg_in_var, t, + averaged_imgs["T{}w_avg".format(t)] + ) + + # T1w-T2w alignment of ACPC-aligned images + xfm_ACPC_and_reg_imgs = registration_T2w_to_T1w( + j_args, logger, xfm_ACPC_vars, reg_in_var, acpc=True + ) + + # pdb.set_trace() # TODO Add "debug" flag? + + # TODO End function here and start a new function below? Maybe put everything above in "register_all_preBIBSnet_imgs" and everything below in "apply_final_preBIBSnet_xfm" ? + return { # A (*very* nested) dict with vars organized to apply final xfm + "ACPC": {"vars": xfm_ACPC_vars, "img_paths": xfm_ACPC_and_reg_imgs}, + "non_ACPC": {"vars": xfm_non_ACPC_vars, "img_paths": xfm_imgs_non_ACPC} + } def register_and_average_files(input_file_paths, output_file_path): @@ -784,11 +1030,19 @@ def registration_T2w_to_T1w(j_args, logger, xfm_vars, reg_input_var, acpc): :return: Dictionary mapping "T1w" and "T2w" to their respective newly registered image file paths """ + # TODO Add 'if' to skip most of the functionality here for T1-only or T2-only + # String naming the key in xfm_vars mapped to the path # to the image to use as an input for registration + + inputs_msg = "\n".join(["T{}w: {}".format(t, xfm_vars[reg_input_var.format(t)]) + for t in only_Ts_needed_for_bibsnet_model(j_args["ID"])]) + logger.info("Input images for T1w registration:\n" + inputs_msg) + """ logger.info("Input images for T1w registration:\nT1w: {}\nT2w: {}" .format(xfm_vars[reg_input_var.format(1)], xfm_vars[reg_input_var.format(2)])) + """ # Define paths to registration output matrices and images registration_outputs = {"cropT1tocropT1": xfm_vars["ident_mx"], @@ -805,6 +1059,7 @@ def registration_T2w_to_T1w(j_args, logger, xfm_vars, reg_input_var, acpc): 3. T2w Make transformed """ nonACPC_xfm_params_T = dict() + for t in (1, 2): # Define paths to registration output files registration_outputs["T{}w_crop2BIBS_mat".format(t)] = os.path.join( @@ -853,178 +1108,6 @@ def reshape_volume_to_array(array_img): return image_data.flatten() -def resize_images(cropped_imgs, output_dir, ref_image, ident_mx, - crop2full, averaged_imgs, j_args, logger): - """ - Resize the images to match the dimensions of images trained in the model, - and ensure that the first image (presumably a T1) is co-registered to the - second image (presumably a T2) before resizing. Use multiple alignments - of both images, and return whichever one is better (higher eta squared) - :param cropped_imgs: Dictionary mapping ints, (T) 1 or 2, to strings (valid - paths to existing image files to resize) - :param output_dir: String, valid path to a dir to save resized images into - :param ref_images: Dictionary mapping string keys to valid paths to real - image file strings for "ACPC" (alignment) and (T2-to-T1) - "reg"(istration) for flirt to use as a reference image. - The ACPC string has a "{}" in it to represent (T) 1 or 2 - :param ident_mx: String, valid path to existing identity matrix .mat file - :param crop2full: String, valid path to existing crop2full.mat file - :param averaged_imgs: Dictionary mapping ints, (T) 1 or 2, to strings - (valid paths to existing image files to resize) - :param j_args: Dictionary containing all args from parameter .JSON file - :param logger: logging.Logger object to show messages and raise warnings - """ - # Build dictionaries of variables used for image transformations with and - # without ACPC alignment - xfm_non_ACPC_vars = {"out_dir": os.path.join(output_dir, "xfms"), - "resolution": "1", "ident_mx": ident_mx, - "ref_img": ref_image} - xfm_ACPC_vars = xfm_non_ACPC_vars.copy() - xfm_ACPC_vars["out_dir"] = os.path.join(output_dir, "ACPC_align") - out_var = "output_T{}w_img" - reg_in_var = "reg_input_T{}w_img" - - for t, crop_img_path in cropped_imgs.items(): - img_ext = split_2_exts(crop_img_path)[-1] - - # Non-ACPC input to registration - # for keyname in ("crop_", "reg_input_"): - xfm_non_ACPC_vars["crop_T{}w_img".format(t)] = crop_img_path # TODO This variable appears to be unused for non-ACPC - xfm_non_ACPC_vars[reg_in_var.format(t)] = crop_img_path - - # Non-ACPC outputs to registration - outfname = "T{}w_registered_to_T1w".format(t) + img_ext - xfm_non_ACPC_vars[out_var.format(t)] = os.path.join( - xfm_non_ACPC_vars["out_dir"], outfname - ) - - # ACPC inputs to align and registration - xfm_ACPC_vars["crop_T{}w_img".format(t)] = crop_img_path - xfm_ACPC_vars[reg_in_var.format(t)] = os.path.join( - xfm_ACPC_vars["out_dir"], "ACPC_aligned_T{}w".format(t) + img_ext - ) - xfm_ACPC_vars[out_var.format(t)] = os.path.join( - xfm_ACPC_vars["out_dir"], "ACPC_" + outfname - ) - - if j_args["common"]["verbose"]: - msg_xfm = "Arguments for {}ACPC image transformation:\n{}" - logger.info(msg_xfm.format("non-", xfm_non_ACPC_vars)) - logger.info(msg_xfm.format("", xfm_ACPC_vars)) - - # Make output directories for transformed images - for each_xfm_vars_dict in (xfm_non_ACPC_vars, xfm_ACPC_vars): - os.makedirs(each_xfm_vars_dict["out_dir"], exist_ok=True) - - xfm_imgs_non_ACPC = registration_T2w_to_T1w( - j_args, logger, xfm_non_ACPC_vars, reg_in_var, acpc=False - ) - - # Do direct T1w-T2w alignment - for t in (1, 2): - - # Run ACPC alignment - xfm_ACPC_vars["mats_T{}w".format(t)] = align_ACPC_1_img( - j_args, logger, xfm_ACPC_vars, crop2full[t], reg_in_var, t, - averaged_imgs["T{}w_avg".format(t)] - ) - - # T1w-T2w alignment of ACPC-aligned images - xfm_ACPC_and_registered_imgs = registration_T2w_to_T1w( - j_args, logger, xfm_ACPC_vars, reg_in_var, acpc=True - ) - - # TODO End function here and start a new function below? Maybe put everything above in "register_all_preBIBSnet_imgs" and everything below in "apply_final_preBIBSnet_xfm" ? - - # ACPC - preBIBS_ACPC_out = dict() - preBIBS_nonACPC_out = dict() - for t in (1, 2): - preBIBS_ACPC_out["T{}w".format(t)] = os.path.join( - xfm_ACPC_vars["out_dir"], - "preBIBSnet_final_000{}.nii.gz".format(t-1) - ) - - # Concatenate rigidbody2acpc.mat and registration (identity/cropT2tocropT1.mat) - # First concatenate rigidbody2acpc with registration, then concatenate - # the output .mat with the template - acpc2rigidbody = xfm_ACPC_vars["mats_T{}w".format(t)]["acpc2rigidbody"] - to_rigidbody_final_mat = os.path.join(xfm_ACPC_vars["out_dir"], - "T2w_to_rigidbody.mat" - ) if t == 2 else acpc2rigidbody - - # final_mat differs between T1w and T2w because T2w has to go into T1w - # space before ACPC and T1w does not - if t == 2: - run_FSL_sh_script( - j_args, logger, "convert_xfm", "-omat", to_rigidbody_final_mat, - "-concat", - xfm_ACPC_and_registered_imgs["cropT{}tocropT1".format(t)], - acpc2rigidbody - ) - - crop2BIBS_mat = os.path.join(xfm_ACPC_vars["out_dir"], - "crop_T{}w_to_BIBS_template.mat".format(t)) - if not os.path.exists(crop2BIBS_mat): - shutil.copy2(to_rigidbody_final_mat, crop2BIBS_mat) - if j_args["common"]["verbose"]: - logger.info("Copying {} to {}".format(to_rigidbody_final_mat, - crop2BIBS_mat)) - preBIBS_ACPC_out["T{}w_crop2BIBS_mat".format(t)] = crop2BIBS_mat - - # Do the applywarp FSL command from align_ACPC_1_img (for T1w and T2w, for ACPC) - # applywarp output is optimal_realigned_imgs input - # Apply registration and ACPC alignment to the T1ws and the T2ws - run_FSL_sh_script(j_args, logger, "applywarp", "--rel", - "--interp=spline", "-i", averaged_imgs["T{}w_avg".format(t)], - "-r", xfm_ACPC_vars["ref_img"].format(t), - "--premat=" + crop2BIBS_mat, # preBIBS_ACPC_out["T{}w_crop2BIBS_mat".format(t)], - "-o", preBIBS_ACPC_out["T{}w".format(t)]) - # pdb.set_trace() # TODO Add "debug" flag? - - # Non-ACPC # TODO MODULARIZE (put this into a function and call it once for ACPC and once for non to eliminate redundancy) - preBIBS_nonACPC_out["T{}w".format(t)] = os.path.join( - xfm_non_ACPC_vars["out_dir"], - "preBIBSnet_final_000{}.nii.gz".format(t-1) - ) - - # Do convert_xfm to combine 2 .mat files (non-ACPC - # registration_T2w_to_T1w's cropT2tocropT1.mat, and then non-ACPC - # registration_T2w_to_T1w's crop_T1_to_BIBS_template.mat) - preBIBS_nonACPC_out["T{}w_crop2BIBS_mat".format(t)] = os.path.join( - xfm_non_ACPC_vars["out_dir"], "full_crop_T{}w_to_BIBS_template.mat".format(t) # TODO Changing this back to full_crop on 2022-08-30 - ) - full2cropT1w_mat = os.path.join(xfm_non_ACPC_vars["out_dir"], - "full2cropT1w.mat") - run_FSL_sh_script( - j_args, logger, "convert_xfm", - "-omat", full2cropT1w_mat, - "-concat", xfm_ACPC_vars["mats_T{}w".format(t)]["full2crop"], # TODO Was this messed up between 2022-08-11 and 2022-08-23? - xfm_imgs_non_ACPC["cropT{}tocropT1".format(t)] - ) - run_FSL_sh_script( - j_args, logger, "convert_xfm", - "-omat", preBIBS_nonACPC_out["T{}w_crop2BIBS_mat".format(t)], - "-concat", full2cropT1w_mat, - xfm_imgs_non_ACPC["T{}w_crop2BIBS_mat".format(t)] - ) - # Do the applywarp FSL command from align_ACPC_1_img (for T2w and not T1w, for non-ACPC) - # applywarp output is optimal_realigned_imgs input - # Apply registration to the T1ws and the T2ws - run_FSL_sh_script(j_args, logger, "applywarp", "--rel", - "--interp=spline", "-i", averaged_imgs["T{}w_avg".format(t)], # cropped_imgs[t], - "-r", xfm_non_ACPC_vars["ref_img"].format(t), - "--premat=" + preBIBS_nonACPC_out["T{}w_crop2BIBS_mat".format(t)], # full2BIBS_mat, # - "-o", preBIBS_nonACPC_out["T{}w".format(t)]) - - # Outputs: 1 .mat file for ACPC and 1 for non-ACPC (only retain the -to-T1w .mat file after this point) - - # Return the best of the 2 resized images - # pdb.set_trace() # TODO Add "debug" flag? - return optimal_realigned_imgs(preBIBS_nonACPC_out, - preBIBS_ACPC_out, j_args, logger) - - def run_FSL_sh_script(j_args, logger, fsl_fn_name, *fsl_args): """ Run any FSL function in a Bash subprocess, unless its outputs exist and the @@ -1120,6 +1203,20 @@ def split_2_exts(a_path): base, ext2 = os.path.splitext(a_path) base, ext1 = os.path.splitext(base) return base, ext1 + ext2 + + +def sum_of_2_sums_of_squares_of(np_vector1, np_vector2, a_mean): + """ + _summary_ + :param np_vector1: Numpy array of numbers + :param np_vector2: Numpy array of numbers + :param a_mean: Float, _description_ + :return: Float, _description_ + """ + total_sum = 0 + for each_vec in (np_vector1, np_vector2): + total_sum += sum(np.square(each_vec - a_mean)) + return total_sum def valid_float_0_to_1(val): @@ -1386,7 +1483,7 @@ def verify_CABINET_inputs_exist(sub_ses, j_args, logger): subject_heads = [os.path.join( j_args["optional_out_dirs"]["bibsnet"], *sub_ses, "input", "*{}*_000{}.nii.gz".format("_".join(sub_ses), t1or2 - 1) - ) for t1or2 in (1, 2)] + ) for t1or2 in (1, 2)] # TODO Make this work for T1-only or T2-only out_paths_BIBSnet = [os.path.join(j_args["optional_out_dirs"]["bibsnet"], "*{}*.nii.gz".format(x)) for x in ("aseg", "mask")] From 5f057e11f88da0f4033b6980a2a5a872790cff1f Mon Sep 17 00:00:00 2001 From: Greg Conan Date: Fri, 14 Oct 2022 21:20:35 -0500 Subject: [PATCH 02/21] T1- and T2-only modes ran without error in tests Ran T1- and T2-only modes for prebibsnet (and postbibsnet, using old bibsnet outputs) without error on an HBCD subject session. Changes listed below - Added functionality so T1- and T2-only modes run without error - Changed functionality so preBIBSnet crop2BIBS template .mat file is saved for any mode - Changed T1-/T2-only mode preBIBSnet output file names so they work with later stages - Changed the --help message for --model-number - Changed the last digit of the preBIBSnet final image so it's 0 if in T1-/T2-only mode - Removed completed TODO comments - Removed the triply-nested all_regn_info dict and replaced it with individual ACPC and nonACPC dicts - Removed commented-out old code from working on T1-/T2-only modes --- run.py | 108 ++++++++++++----- src/param-types.json | 2 +- src/utilities.py | 272 +++++++++++++++++++++++++------------------ 3 files changed, 242 insertions(+), 140 deletions(-) diff --git a/run.py b/run.py index fdf5788..9011703 100755 --- a/run.py +++ b/run.py @@ -6,7 +6,7 @@ Connectome ABCD-XCP niBabies Imaging nnu-NET (CABINET) Greg Conan: gconan@umn.edu Created: 2021-11-12 -Updated: 2022-10-12 +Updated: 2022-10-14 """ # Import standard libraries import argparse @@ -48,17 +48,19 @@ def find_myself(flg): # Custom local imports from src.utilities import ( apply_final_prebibsnet_xfms, as_cli_attr, as_cli_arg, correct_chirality, - create_anatomical_average, crop_image, dict_has, dilate_LR_mask, - ensure_prefixed, exit_with_time_info, extract_from_json, - get_age_closest_to, get_and_make_preBIBSnet_work_dirs, - get_optional_args_in, get_stage_name, get_subj_ID_and_session, + create_anatomical_average, crop_image, dilate_LR_mask, ensure_prefixed, + exit_with_time_info, extract_from_json, get_age_closest_to, + get_and_make_preBIBSnet_work_dirs, get_optional_args_in, + get_preBIBS_final_img_fpath_T, get_stage_name, get_subj_ID_and_session, get_template_age_closest_to, make_given_or_default_dir, - only_Ts_needed_for_bibsnet_model, register_all_preBIBSnet_imgs, + only_Ts_needed_for_bibsnet_model, register_preBIBSnet_imgs_ACPC, + register_preBIBSnet_imgs_non_ACPC, run_FSL_sh_script, run_all_stages, valid_readable_json, validate_parameter_types, valid_readable_dir, valid_subj_ses_ID, valid_whole_number ) + def main(): start_time = datetime.now() # Time how long the script takes logger = make_logger() # Make object to log error/warning/status messages @@ -156,8 +158,9 @@ def get_params_from_JSON(stage_names, logger): "-model", "--model-number", "--bibsnet-model", # default=default_model_num_bibsnet, # TODO By default, get the model number(s) for the participants.tsv file type=valid_whole_number, dest="model", - help=("Model/task number for BIBSnet. By default, this will be {}." - .format(default_model_num_bibsnet)) + help=("Model/task number for BIBSnet. By default, this will be " + "inferred from {}/data/models.csv based on which data (T1, T2, " + "or both) exists in the --bids-dir.".format(SCRIPT_DIR)) ) parser.add_argument( "--overwrite", "--overwrite-old", # TODO Change this to "-skip" @@ -261,7 +264,7 @@ def validate_cli_args(cli_args, stage_names, parser, logger): for ix in range(len(sub_ses_IDs)): # Create a list with the subject ID and (if it exists) the session ID sub_ses = [sub_ses_IDs[ix]["subject"]] - if dict_has(sub_ses_IDs[ix], "session"): + if sub_ses_IDs[ix].get("session"): sub_ses.append(sub_ses_IDs[ix]["session"]) j_args = ensure_j_args_has_bids_subdirs( @@ -275,9 +278,8 @@ def validate_cli_args(cli_args, stage_names, parser, logger): "your participant_label and session are correct." .format(sub_ses_dir)) - # Using dict_has instead of easier ensure_dict_has so that the user only - # needs a participants.tsv file if they didn't specify age_months - if not dict_has(j_args["common"], "age_months"): + # User only needs participants.tsv if they didn't specify age_months + if not j_args["common"].get("age_months"): sub_ses_IDs[ix]["age_months"] = read_from_participants_tsv( j_args, logger, "age", *sub_ses ) @@ -335,10 +337,7 @@ def get_df_with_valid_bibsnet_models(sub_ses_ID): # Exclude any models which require (T1w or T2w) data the user lacks for t in only_Ts_needed_for_bibsnet_model(sub_ses_ID): - # print("has_T{}w: {}".format(t, sub_ses_ID["has_T{}w".format(t)])) - # if not sub_ses_ID["has_T{}w".format(t)]: - # models_df = models_df.loc[~models_df["T{}w".format(t)]] - models_df = select_model_with_data_for_this_T(models_df, t, False) + models_df = select_model_with_data_for_T(t, models_df, sub_ses_ID["has_T{}w".format(t)]) return models_df @@ -364,19 +363,18 @@ def validate_model_num(cli_args, data_path_BIDS_T, models_df, sub_ses_ID, parser .format(t, model, data_path_BIDS_T[t])) if not model: # Get default model number if user did not give one - # print(models_df[models_df["is_default"]]) models_df = models_df[models_df["is_default"]] if len(models_df) > 1: for t in (1, 2): - models_df = select_model_with_data_for_this_T( - models_df, t, sub_ses_ID["has_T{}w".format(t)] + models_df = select_model_with_data_for_T( + t, models_df, sub_ses_ID["has_T{}w".format(t)] ) model = models_df.squeeze()["model_num"] return model -def select_model_with_data_for_this_T(models_df, t, has_T): +def select_model_with_data_for_T(t, models_df, has_T): """ :param models_df: pandas.DataFrame with columns called "T1w" and "T2w" with bool values describing which T(s) a model needs @@ -518,6 +516,7 @@ def run_preBIBSnet(j_args, logger): """ completion_msg = "The anatomical images have been {} for use in BIBSnet" preBIBSnet_paths = get_and_make_preBIBSnet_work_dirs(j_args) + sub_ses = get_subj_ID_and_session(j_args) # If there are multiple T1ws/T2ws, then average them create_anatomical_average(preBIBSnet_paths["avg"]) # TODO make averaging optional with later BIBSnet model? @@ -538,13 +537,34 @@ def run_preBIBSnet(j_args, logger): reference_img = os.path.join(SCRIPT_DIR, "data", "MNI_templates", "INFANT_MNI_T{}_1mm.nii.gz") # TODO Pipeline should verify that these exist before running id_mx = os.path.join(SCRIPT_DIR, "data", "identity_matrix.mat") + resolution = "1" if j_args["ID"]["has_T1w"] and j_args["ID"]["has_T2w"]: + msg_xfm = "Arguments for {}ACPC image transformation:\n{}" + + # Non-ACPC + regn_non_ACPC = register_preBIBSnet_imgs_non_ACPC( + cropped, preBIBSnet_paths["resized"], reference_img, + id_mx, resolution, j_args, logger + ) + if j_args["common"]["verbose"]: + logger.info(msg_xfm.format("non-", regn_non_ACPC["vars"])) + + # ACPC + regn_ACPC = register_preBIBSnet_imgs_ACPC( + cropped, preBIBSnet_paths["resized"], regn_non_ACPC["vars"], + crop2full, preBIBSnet_paths["avg"], j_args, logger + ) + """ all_registration_vars = register_all_preBIBSnet_imgs( cropped, preBIBSnet_paths["resized"], reference_img, id_mx, crop2full, preBIBSnet_paths["avg"], j_args, logger ) + """ + if j_args["common"]["verbose"]: + logger.info(msg_xfm.format("", regn_ACPC["vars"])) + transformed_images = apply_final_prebibsnet_xfms( - all_registration_vars, preBIBSnet_paths["avg"], j_args, logger + regn_non_ACPC, regn_ACPC, preBIBSnet_paths["avg"], j_args, logger ) logger.info(completion_msg.format("resized")) @@ -552,17 +572,50 @@ def run_preBIBSnet(j_args, logger): #elif j_args["ID"]["has_T1w"]: # transformed_images = "" else: + # Define variables and paths needed for the final (only) xfm needed + t1or2 = 1 if j_args["ID"]["has_T1w"] else 2 + # img_ext = split_2_exts(cropped[t1or2])[-1] + outdir = os.path.join(preBIBSnet_paths["resized"], "xfms") + os.makedirs(outdir, exist_ok=True) + out_img = get_preBIBS_final_img_fpath_T(t1or2, outdir, j_args["ID"]) + out_mat = os.path.join(outdir, "full_crop_T{}w_to_BIBS_template.mat" + .format(t1or2)) + + run_FSL_sh_script( # Move the T1 (or T2) into BIBS space + j_args, logger, "flirt", "-in", cropped[t1or2], + "-ref", reference_img.format(t1or2), "-applyisoxfm", resolution, + "-init", id_mx, # TODO Should this be a matrix that does a transformation?? + "-o", out_img, "-omat", out_mat + ) + # TODO Add T1-to-BIBS and T2-to-BIBS functionality without T2-to-T1 - transformed_images = {"T{}w".format(t): cropped[t] for t in cropped.keys()} + transformed_images = {"T{}w".format(t1or2): out_img, + "T{}w_crop2BIBS_mat".format(t1or2): out_mat} # TODO Copy this whole block to postBIBSnet, so it copies everything it needs first - # Copy into BIBSnet input dir to transformed_images[T1w] - for t in only_Ts_needed_for_bibsnet_model(j_args["ID"]): # (1, 2): # TODO Make this also work for T1-only or T2-only + # Copy preBIBSnet outputs into BIBSnet input dir + for t in only_Ts_needed_for_bibsnet_model(j_args["ID"]): tw = "T{}w".format(t) + + # Copy image files out_nii_fpath = j_args["optimal_resized"][tw] os.makedirs(os.path.dirname(out_nii_fpath), exist_ok=True) - if not os.path.exists(out_nii_fpath): # j_args["common"]["overwrite"] or + if j_args["common"]["overwrite"]: # TODO Should --overwrite delete old image file(s)? + os.remove(out_nii_fpath) + if not os.path.exists(out_nii_fpath): shutil.copy2(transformed_images[tw], out_nii_fpath) + + # Copy .mat into postbibsnet dir with the same name regardless of which + # is chosen, so postBIBSnet can use the correct/chosen .mat file + concat_mat = transformed_images["T{}w_crop2BIBS_mat".format(t)] + out_mat_fpath = os.path.join( # TODO Pass this in (or out) from the beginning so we don't have to build the path twice (once here and once in postBIBSnet) + j_args["optional_out_dirs"]["postbibsnet"], + *sub_ses, "preBIBSnet_" + os.path.basename(concat_mat) + ) + if not os.path.exists(out_mat_fpath): + shutil.copy2(concat_mat, out_mat_fpath) + if j_args["common"]["verbose"]: + logger.info("Copying {} to {}".format(concat_mat, out_mat_fpath)) logger.info("PreBIBSnet has completed") return j_args @@ -719,10 +772,13 @@ def run_left_right_registration(j_args, sub_ses, age_months, t1or2, logger): tmpl_head = os.path.join(chiral_in_dir, "{}mo_T{}w_acpc_dc_restore.nii.gz") tmpl_mask = os.path.join(chiral_in_dir, "{}mo_template_LRmask.nii.gz") # "brainmasks", {}mo_template_brainmask.nii.gz") + # Grab the first resized T?w from preBIBSnet to use for L/R registration + last_digit = (t1or2 - 1 if j_args["ID"]["has_T1w"] + and j_args["ID"]["has_T2w"] else 0) first_subject_head = glob(os.path.join( j_args["optional_out_dirs"]["bibsnet"], *sub_ses, "input", - "*{}*_000{}.nii.gz".format("_".join(sub_ses), t1or2 - 1) + "*{}*_000{}.nii.gz".format("_".join(sub_ses), last_digit) ))[0] # Make postBIBSnet output directory for this subject/session diff --git a/src/param-types.json b/src/param-types.json index 6c98c74..a790f9a 100644 --- a/src/param-types.json +++ b/src/param-types.json @@ -17,7 +17,7 @@ "code_dir": "existing_directory_path", "model": "str", "nnUNet_predict_path": "existing_file_path", - "task": "str" + "singularity_image_path": "existing_file_path" }, "nibabies": { diff --git a/src/utilities.py b/src/utilities.py index 772d145..07c9d97 100755 --- a/src/utilities.py +++ b/src/utilities.py @@ -5,7 +5,7 @@ Common source for utility functions used by CABINET :) Greg Conan: gconan@umn.edu Created: 2021-11-12 -Updated: 2022-10-12 +Updated: 2022-10-14 """ # Import standard libraries import argparse @@ -26,8 +26,7 @@ LEFT = "Left-" RIGHT = "Right-" -# Other constants: Directory containing the main pipeline script, and -# SLURM-/SBATCH-related arguments' default names +# Other constant: Directory containing the main pipeline script SCRIPT_DIR = os.path.dirname(os.path.dirname(__file__)) @@ -101,9 +100,8 @@ def always_true(*_): def apply_final_ACPC_xfm(xfm_ACPC_vars, xfm_ACPC_imgs, avg_imgs, outputs, t, j_args, logger): - outputs["T{}w".format(t)] = os.path.join( - xfm_ACPC_vars["out_dir"], - "preBIBSnet_final_000{}.nii.gz".format(t-1) + outputs["T{}w".format(t)] = get_preBIBS_final_img_fpath_T( + t, xfm_ACPC_vars["out_dir"], j_args["ID"] ) # Concatenate rigidbody2acpc.mat and registration (identity/cropT2tocropT1.mat) @@ -148,23 +146,23 @@ def apply_final_ACPC_xfm(xfm_ACPC_vars, xfm_ACPC_imgs, def apply_final_non_ACPC_xfm(xfm_non_ACPC_vars, xfm_imgs_non_ACPC, avg_imgs, outputs, t, full2crop_ACPC, j_args, logger): # TODO MODULARIZE (put this into a function to call once for ACPC and once for non to eliminate redundancy?) - outputs["T{}w".format(t)] = os.path.join( - xfm_non_ACPC_vars["out_dir"], - "preBIBSnet_final_000{}.nii.gz".format(t-1) + outputs["T{}w".format(t)] = get_preBIBS_final_img_fpath_T( + t, xfm_non_ACPC_vars["out_dir"], j_args["ID"] ) # Do convert_xfm to combine 2 .mat files (non-ACPC # registration_T2w_to_T1w's cropT2tocropT1.mat, and then non-ACPC # registration_T2w_to_T1w's crop_T1_to_BIBS_template.mat) outputs["T{}w_crop2BIBS_mat".format(t)] = os.path.join( - xfm_non_ACPC_vars["out_dir"], "full_crop_T{}w_to_BIBS_template.mat".format(t) # TODO Changed this back to full_crop on 2022-08-30 + xfm_non_ACPC_vars["out_dir"], + "full_crop_T{}w_to_BIBS_template.mat".format(t) # NOTE Changed this back to full_crop on 2022-08-30 ) full2cropT1w_mat = os.path.join(xfm_non_ACPC_vars["out_dir"], "full2cropT1w.mat") run_FSL_sh_script( j_args, logger, "convert_xfm", "-omat", full2cropT1w_mat, - "-concat", full2crop_ACPC, # NOTE The choice between 2022-08-11 and 2022-08-23 to use ACPC's full2crop probably was NOT a problem because it works + "-concat", full2crop_ACPC, xfm_imgs_non_ACPC["cropT{}tocropT1".format(t)] ) run_FSL_sh_script( @@ -177,20 +175,28 @@ def apply_final_non_ACPC_xfm(xfm_non_ACPC_vars, xfm_imgs_non_ACPC, avg_imgs, # applywarp output is optimal_realigned_imgs input # Apply registration to the T1ws and the T2ws run_FSL_sh_script(j_args, logger, "applywarp", "--rel", - "--interp=spline", "-i", avg_imgs["T{}w_avg".format(t)], # cropped_imgs[t], + "--interp=spline", "-i", avg_imgs["T{}w_avg".format(t)], "-r", xfm_non_ACPC_vars["ref_img"].format(t), - "--premat=" + outputs["T{}w_crop2BIBS_mat".format(t)], # full2BIBS_mat, # + "--premat=" + outputs["T{}w_crop2BIBS_mat".format(t)], "-o", outputs["T{}w".format(t)]) return outputs -def apply_final_prebibsnet_xfms(all_regn_info, averaged_imgs, j_args, logger): +def apply_final_prebibsnet_xfms(regn_non_ACPC, regn_ACPC, averaged_imgs, + j_args, logger): """ - :param all_regn_info: Dict mapping "ACPC" and "non-ACPC" to dicts mapping - "img_paths" to a dict of paths to image files and - "vars" to a dict of other variables. - {"ACPC": {"vars": {...}, "imgs": {...}}, "non-ACPC":} - :param averaged_imgs: + Resize the images to match the dimensions of images trained in the model, + and ensure that the first image (presumably a T1) is co-registered to the + second image (presumably a T2) before resizing. Use multiple alignments + of both images, and return whichever one is better (higher eta squared) + :param regn_non_ACPC: Dict mapping "img_paths" to a dict of paths to image + files and "vars" to a dict of other variables. + {"vars": {...}, "imgs": {...}} + :param regn_ACPC: Dict mapping "img_paths" to a dict of paths to image + files and "vars" to a dict of other variables. + {"vars": {...}, "imgs": {...}} + :param averaged_imgs: Dictionary mapping ints, (T) 1 or 2, to strings + (valid paths to existing image files to resize) :param j_args: Dictionary containing all args from parameter .JSON file :param logger: logging.Logger object to show messages and raise warnings :return: _type_, _description_ @@ -199,22 +205,18 @@ def apply_final_prebibsnet_xfms(all_regn_info, averaged_imgs, j_args, logger): out_non_ACPC = dict() for t in (1, 2): - # Apply ACPC-then-registration transforms for this subject session (and T) + # Apply ACPC-then-registration transforms for this subject session & T out_ACPC.update(apply_final_ACPC_xfm( - all_regn_info["ACPC"]["vars"], - all_regn_info["ACPC"]["img_paths"], + regn_ACPC["vars"], regn_ACPC["img_paths"], averaged_imgs, out_ACPC, t, j_args, logger )) # Retrieve path to ACPC full2crop.mat file (to use for non-ACPC xfms) - # from the very (probably overly) nested dict of registration vars - full2crop_ACPC = all_regn_info["ACPC"]["vars"]["mats_T{}w".format(t) - ]["full2crop"] + full2crop_ACPC = regn_ACPC["vars"]["mats_T{}w".format(t)]["full2crop"] # Apply registration-only transforms for this subject session (and T) out_non_ACPC.update(apply_final_non_ACPC_xfm( - all_regn_info["non_ACPC"]["vars"], - all_regn_info["non_ACPC"]["img_paths"], + regn_non_ACPC["vars"], regn_non_ACPC["img_paths"], averaged_imgs, out_non_ACPC, t, full2crop_ACPC, j_args, logger )) @@ -401,7 +403,7 @@ def create_anatomical_average(avg_params): "T1w_avg": String, average T1w output file path "T2w_avg": String, average T2w output file path} """ - for t in (1, 2): # TODO Make this also work for T1-only or T2-only. This may not even need to be changed + for t in (1, 2): if avg_params["T{}w_input".format(t)]: register_and_average_files(avg_params["T{}w_input".format(t)], avg_params["T{}w_avg".format(t)]) @@ -706,15 +708,49 @@ def get_optional_args_in(a_dict): return optional_args -def get_optimal_resized_paths(sub_ses, bibsnet_out_dir): - # 1. Symlinks to resized images chosen by the preBIBSnet cost function - input_dir_BIBSnet = os.path.join(bibsnet_out_dir, *sub_ses, "input") - return {"T{}w".format(t): os.path.join( - input_dir_BIBSnet, "{}_optimal_resized_000{}.nii.gz" - .format("_".join(sub_ses), t - 1) - ) for t in (1, 2)} # TODO Make this also work for T1-only or T2-only by not building unneeded paths? +def get_optimal_resized_paths(sub_ses, j_args): # bibsnet_out_dir): + """ + :param sub_ses: List with either only the subject ID str or the session too + :param j_args: Dict mapping (A) "optional_out_dirs" to a dict mapping + "bibsnet" to the bibsnet derivatives dir path, and + (B) "ID" to a dict mapping "has_T1w" and "has_T2w" to bools + :return: Dict mapping "T1w" and "T2w" to their respective optimal (chosen + by the cost function) resized (by prebibsnet) image file paths + """ + input_dir_BIBSnet = os.path.join(j_args["optional_out_dirs"]["bibsnet"], + *sub_ses, "input") + return {"T{}w".format(t): os.path.join(input_dir_BIBSnet, + "{}_optimal_resized_000{}.nii.gz".format( + "_".join(sub_ses), + get_preBIBS_final_digit_T(t, j_args["ID"]) + ) + ) for t in only_Ts_needed_for_bibsnet_model(j_args["ID"])} +def get_preBIBS_final_digit_T(t, sub_ses_ID): + """ + :param t: Int, either 1 or 2 (to signify T1w or T2w respectively) + :param sub_ses_ID: _type_, _description_ + :return: Int, the last digit of the preBIBSnet final image filename: 0 or 1 + """ + return (t - 1 if sub_ses_ID["has_T1w"] + and sub_ses_ID["has_T2w"] else 0) + + +def get_preBIBS_final_img_fpath_T(t, parent_dir, sub_ses_ID): + """ + Running in T1-/T2-only mode means the image name should always be + preBIBSnet_final_0000.nii.gz and otherwise it's _000{t-1}.nii.gz + :param t: Int, either 1 or 2 (to signify T1w or T2w respectively) + :param parent_dir: + :param sub_ses_ID: + :return: + """ + return os.path.join(parent_dir, + "preBIBSnet_final_000{}.nii.gz".format( + get_preBIBS_final_digit_T(t, sub_ses_ID))) + + def get_spatial_resolution_of(image_fpath, j_args, logger, fn_name="fslinfo"): """ :param j_args: Dictionary containing all args from parameter .JSON file @@ -881,49 +917,23 @@ def optimal_realigned_imgs(xfm_imgs_non_ACPC, xfm_imgs_ACPC_and_reg, j_args, log logger.info(msg.format("ACPC and", optimal_resize["T1w"], optimal_resize["T2w"])) # TODO Verify that these print the absolute path - # Copy files into postbibsnet dir with the same name regardless of which - # is chosen, so postBIBSnet can use the correct/chosen .mat file - concat_mat = optimal_resize["T1w_crop2BIBS_mat"] - # TODO Rename T2w_crop2BIBS.mat to T2w_crop_to_T1w_to_BIBS.mat or something - out_mat_fpath = os.path.join( # TODO Pass this in (or out) from the beginning so we don't have to build the path twice (once here and once in postBIBSnet) - j_args["optional_out_dirs"]["postbibsnet"], - *sub_ses, "preBIBSnet_" + os.path.basename(concat_mat) - ) - if not os.path.exists(out_mat_fpath): - shutil.copy2(concat_mat, out_mat_fpath) # TODO Put this block elsewhere so the block runs if T1-/T2-only skips this function (which it should) - if j_args["common"]["verbose"]: - logger.info("Copying {} to {}".format(concat_mat, out_mat_fpath)) return optimal_resize -def register_all_preBIBSnet_imgs(cropped_imgs, output_dir, ref_image, ident_mx, - crop2full, averaged_imgs, j_args, logger): +def register_preBIBSnet_imgs_ACPC(cropped_imgs, output_dir, xfm_non_ACPC_vars, + crop2full, averaged_imgs, j_args, logger): """ - Resize the images to match the dimensions of images trained in the model, - and ensure that the first image (presumably a T1) is co-registered to the - second image (presumably a T2) before resizing. Use multiple alignments - of both images, and return whichever one is better (higher eta squared) :param cropped_imgs: Dictionary mapping ints, (T) 1 or 2, to strings (valid paths to existing image files to resize) :param output_dir: String, valid path to a dir to save resized images into - :param ref_images: Dictionary mapping string keys to valid paths to real - image file strings for "ACPC" (alignment) and (T2-to-T1) - "reg"(istration) for flirt to use as a reference image. - The ACPC string has a "{}" in it to represent (T) 1 or 2 - :param ident_mx: String, valid path to existing identity matrix .mat file + :param xfm_non_ACPC_vars: Dict TODO Fix this function description :param crop2full: String, valid path to existing crop2full.mat file :param averaged_imgs: Dictionary mapping ints, (T) 1 or 2, to strings (valid paths to existing image files to resize) :param j_args: Dictionary containing all args from parameter .JSON file :param logger: logging.Logger object to show messages and raise warnings """ - # TODO Add 'if' to skip most of the functionality here for T1-only or T2-only - - # Build dictionaries of variables used for image transformations with and - # without ACPC alignment - xfm_non_ACPC_vars = {"out_dir": os.path.join(output_dir, "xfms"), - "resolution": "1", "ident_mx": ident_mx, - "ref_img": ref_image} + # Build dict of variables used for image transformation with ACPC alignment xfm_ACPC_vars = xfm_non_ACPC_vars.copy() xfm_ACPC_vars["out_dir"] = os.path.join(output_dir, "ACPC_align") out_var = "output_T{}w_img" @@ -932,18 +942,8 @@ def register_all_preBIBSnet_imgs(cropped_imgs, output_dir, ref_image, ident_mx, for t, crop_img_path in cropped_imgs.items(): img_ext = split_2_exts(crop_img_path)[-1] - # Non-ACPC input to registration - # for keyname in ("crop_", "reg_input_"): - xfm_non_ACPC_vars["crop_T{}w_img".format(t)] = crop_img_path # TODO This variable appears to be unused for non-ACPC - xfm_non_ACPC_vars[reg_in_var.format(t)] = crop_img_path - - # Non-ACPC outputs to registration - outfname = "T{}w_registered_to_T1w".format(t) + img_ext - xfm_non_ACPC_vars[out_var.format(t)] = os.path.join( - xfm_non_ACPC_vars["out_dir"], outfname - ) - # ACPC inputs to align and registration + outfname = "T{}w_registered_to_T1w".format(t) + img_ext xfm_ACPC_vars["crop_T{}w_img".format(t)] = crop_img_path xfm_ACPC_vars[reg_in_var.format(t)] = os.path.join( xfm_ACPC_vars["out_dir"], "ACPC_aligned_T{}w".format(t) + img_ext @@ -952,20 +952,10 @@ def register_all_preBIBSnet_imgs(cropped_imgs, output_dir, ref_image, ident_mx, xfm_ACPC_vars["out_dir"], "ACPC_" + outfname ) - if j_args["common"]["verbose"]: - msg_xfm = "Arguments for {}ACPC image transformation:\n{}" - logger.info(msg_xfm.format("non-", xfm_non_ACPC_vars)) - logger.info(msg_xfm.format("", xfm_ACPC_vars)) - # Make output directories for transformed images - for each_xfm_vars_dict in (xfm_non_ACPC_vars, xfm_ACPC_vars): - os.makedirs(each_xfm_vars_dict["out_dir"], exist_ok=True) - - xfm_imgs_non_ACPC = registration_T2w_to_T1w( - j_args, logger, xfm_non_ACPC_vars, reg_in_var, acpc=False - ) + os.makedirs(xfm_ACPC_vars["out_dir"], exist_ok=True) - # Do direct T1w-T2w alignment + # Do direct T2w-T1w alignment for t in (1, 2): # Run ACPC alignment @@ -974,18 +964,64 @@ def register_all_preBIBSnet_imgs(cropped_imgs, output_dir, ref_image, ident_mx, averaged_imgs["T{}w_avg".format(t)] ) - # T1w-T2w alignment of ACPC-aligned images + # T2w-T1w alignment of ACPC-aligned images xfm_ACPC_and_reg_imgs = registration_T2w_to_T1w( j_args, logger, xfm_ACPC_vars, reg_in_var, acpc=True ) # pdb.set_trace() # TODO Add "debug" flag? - # TODO End function here and start a new function below? Maybe put everything above in "register_all_preBIBSnet_imgs" and everything below in "apply_final_preBIBSnet_xfm" ? - return { # A (*very* nested) dict with vars organized to apply final xfm - "ACPC": {"vars": xfm_ACPC_vars, "img_paths": xfm_ACPC_and_reg_imgs}, - "non_ACPC": {"vars": xfm_non_ACPC_vars, "img_paths": xfm_imgs_non_ACPC} - } + return {"vars": xfm_ACPC_vars, "img_paths": xfm_ACPC_and_reg_imgs}, + + +def register_preBIBSnet_imgs_non_ACPC(cropped_imgs, output_dir, ref_image, + ident_mx, resolution, j_args, logger): + """ + :param cropped_imgs: Dictionary mapping ints, (T) 1 or 2, to strings (valid + paths to existing image files to resize) + :param output_dir: String, valid path to a dir to save resized images into + :param ref_images: Dictionary mapping string keys to valid paths to real + image file strings for "ACPC" (alignment) and (T2-to-T1) + "reg"(istration) for flirt to use as a reference image. + The ACPC string has a "{}" in it to represent (T) 1 or 2 + :param ident_mx: String, valid path to existing identity matrix .mat file + :param resolution: + :param j_args: Dictionary containing all args from parameter .JSON file + :param logger: logging.Logger object to show messages and raise warnings + """ + # TODO Add 'if' to skip most of the functionality here for T1-only or T2-only + + # Build dictionaries of variables used for image transformations with and + # without ACPC alignment + xfm_non_ACPC_vars = {"out_dir": os.path.join(output_dir, "xfms"), + "resolution": resolution, "ident_mx": ident_mx, + "ref_img": ref_image} + out_var = "output_T{}w_img" + reg_in_var = "reg_input_T{}w_img" + + for t, crop_img_path in cropped_imgs.items(): + img_ext = split_2_exts(crop_img_path)[-1] + + # Non-ACPC input to registration + # for keyname in ("crop_", "reg_input_"): + xfm_non_ACPC_vars[reg_in_var.format(t)] = crop_img_path + + # Non-ACPC outputs to registration + outfname = "T{}w_registered_to_T1w".format(t) + img_ext + xfm_non_ACPC_vars[out_var.format(t)] = os.path.join( + xfm_non_ACPC_vars["out_dir"], outfname + ) + + # Make output directory for transformed images + os.makedirs(xfm_non_ACPC_vars["out_dir"], exist_ok=True) + + xfm_imgs_non_ACPC = registration_T2w_to_T1w( + j_args, logger, xfm_non_ACPC_vars, reg_in_var, acpc=False + ) + + # pdb.set_trace() # TODO Add "debug" flag? + + return {"vars": xfm_non_ACPC_vars, "img_paths": xfm_imgs_non_ACPC} def register_and_average_files(input_file_paths, output_file_path): @@ -1030,23 +1066,17 @@ def registration_T2w_to_T1w(j_args, logger, xfm_vars, reg_input_var, acpc): :return: Dictionary mapping "T1w" and "T2w" to their respective newly registered image file paths """ - # TODO Add 'if' to skip most of the functionality here for T1-only or T2-only - # String naming the key in xfm_vars mapped to the path # to the image to use as an input for registration inputs_msg = "\n".join(["T{}w: {}".format(t, xfm_vars[reg_input_var.format(t)]) for t in only_Ts_needed_for_bibsnet_model(j_args["ID"])]) - logger.info("Input images for T1w registration:\n" + inputs_msg) - """ - logger.info("Input images for T1w registration:\nT1w: {}\nT2w: {}" - .format(xfm_vars[reg_input_var.format(1)], - xfm_vars[reg_input_var.format(2)])) - """ + logger.info("Input images for T1w registration:\n" + inputs_msg) # Define paths to registration output matrices and images registration_outputs = {"cropT1tocropT1": xfm_vars["ident_mx"], - "cropT2tocropT1": os.path.join(xfm_vars["out_dir"], "cropT2tocropT1.mat")} + "cropT2tocropT1": os.path.join(xfm_vars["out_dir"], + "cropT2tocropT1.mat")} """ ACPC Order: @@ -1057,9 +1087,7 @@ def registration_T2w_to_T1w(j_args, logger, xfm_vars, reg_input_var, acpc): 1. T1w Make transformed 2. T2w Make T2w-to-T1w matrix 3. T2w Make transformed - """ - nonACPC_xfm_params_T = dict() - + """ for t in (1, 2): # Define paths to registration output files registration_outputs["T{}w_crop2BIBS_mat".format(t)] = os.path.join( @@ -1086,6 +1114,11 @@ def registration_T2w_to_T1w(j_args, logger, xfm_vars, reg_input_var, acpc): # Make transformed T1ws and T2ws if not acpc: # TODO Should this go in its own function? + transform_image_T( + t, (xfm_vars[reg_input_var.format(t)] if t == 1 else + registration_outputs["T2w"]), + xfm_vars, registration_outputs, j_args, logger + ) run_FSL_sh_script( # TODO Should the output image even be created here, or during applywarp? j_args, logger, "flirt", "-in", xfm_vars[reg_input_var.format(t)] if t == 1 else registration_outputs["T2w"], # Input: Cropped image @@ -1099,6 +1132,18 @@ def registration_T2w_to_T1w(j_args, logger, xfm_vars, reg_input_var, acpc): return registration_outputs +def transform_image_T(t, cropped_in_img, xfm_vars, regn_outs, j_args, logger): + run_FSL_sh_script( # TODO Should the output image even be created here, or during applywarp? + j_args, logger, "flirt", + "-in", cropped_in_img, # xfm_vars[reg_input_var.format(t)] if t == 1 else registration_outputs["T2w"], # Input: Cropped image + "-ref", xfm_vars["ref_img"].format(t), + "-applyisoxfm", xfm_vars["resolution"], + "-init", xfm_vars["ident_mx"], # registration_outputs["cropT{}tocropT1".format(t)], + "-o", regn_outs["T{}w_to_BIBS".format(t)], # registration_outputs["T{}w".format(t)], # TODO Should we eventually exclude the (unneeded?) -o flags? + "-omat", regn_outs["T{}w_crop2BIBS_mat".format(t)] + ) + + def reshape_volume_to_array(array_img): """ :param array_img: nibabel.Nifti1Image (or Nifti2Image?) @@ -1161,6 +1206,10 @@ def run_all_stages(all_stages, sub_ses_IDs, start, end, :param ubiquitous_j_args: Dictionary of all args needed by each stage :param logger: logging.Logger object to show messages and raise warnings """ + if ubiquitous_j_args["common"]["verbose"]: + logger.info("All parameters from input args and input .JSON file:\n{}" + .format(ubiquitous_j_args)) + # For every session of every subject... running = False for dict_with_IDs in sub_ses_IDs: @@ -1170,16 +1219,13 @@ def run_all_stages(all_stages, sub_ses_IDs, start, end, sub_ses_j_args["ID"] = dict_with_IDs sub_ses = get_subj_ID_and_session(sub_ses_j_args) sub_ses_j_args["optimal_resized"] = get_optimal_resized_paths( - sub_ses, ubiquitous_j_args["optional_out_dirs"]["bibsnet"] + sub_ses, sub_ses_j_args # ubiquitous_j_args["optional_out_dirs"]["bibsnet"] ) # ...check that all required input files exist for the stages to run verify_CABINET_inputs_exist(sub_ses, sub_ses_j_args, logger) # ...run all stages that the user said to run - if sub_ses_j_args["common"]["verbose"]: - logger.info("Parameters from input .JSON file:\n{}" - .format(sub_ses_j_args)) # TODO Only print j_args[ID] every time; print everything else in j_args before the sub-ses-loop for stage in all_stages: name = get_stage_name(stage) if name == start: @@ -1187,8 +1233,8 @@ def run_all_stages(all_stages, sub_ses_IDs, start, end, if running: stage_start = datetime.now() if sub_ses_j_args["common"]["verbose"]: - logger.info("Now running {} stage on subject {}." - .format(name, " session ".join(sub_ses))) + logger.info("Now running {} stage on:\n{}" + .format(name, sub_ses_j_args["ID"])) sub_ses_j_args = stage(sub_ses_j_args, logger) log_stage_finished(name, stage_start, sub_ses, logger) if name == end: @@ -1483,7 +1529,7 @@ def verify_CABINET_inputs_exist(sub_ses, j_args, logger): subject_heads = [os.path.join( j_args["optional_out_dirs"]["bibsnet"], *sub_ses, "input", "*{}*_000{}.nii.gz".format("_".join(sub_ses), t1or2 - 1) - ) for t1or2 in (1, 2)] # TODO Make this work for T1-only or T2-only + ) for t1or2 in only_Ts_needed_for_bibsnet_model(j_args["ID"])] # TODO Make this work for T1-only or T2-only out_paths_BIBSnet = [os.path.join(j_args["optional_out_dirs"]["bibsnet"], "*{}*.nii.gz".format(x)) for x in ("aseg", "mask")] From 5b935d2bf4c518a469b4e57a3f0da8d3ac4a64b7 Mon Sep 17 00:00:00 2001 From: Greg Conan Date: Fri, 21 Oct 2022 17:54:16 -0500 Subject: [PATCH 03/21] T1- & T2-only modes work on pre- thru postbibsnet - Successfully tested full segmentation pipeline (prebibsnet through postbibsnet) in T1- and T2-only modes on an HBCD subject session - Added FSL commands to T1-/T2-only prebibsnet to crop and move into BIBSnet space in one transformation, which improved prebibsnet output image quality significantly over previous code - Fixed T2-only mode bibsnet input validation bug of looking for the wrong input filename(s) - Now the code looks for files ending in _0001.* for T2-only and _0000.* for other modes - Fixed a similar bug in chirality correction - Removed "task" (model) variable from container param file - Added some details to function headers - Added some TODOs - Removed commented-out older/unneeded code --- parameter-file-container.json | 3 +- run.py | 89 ++++++++++++++++++++++++----------- src/utilities.py | 77 +++++++++++++++++------------- 3 files changed, 105 insertions(+), 64 deletions(-) diff --git a/parameter-file-container.json b/parameter-file-container.json index a099f7c..936186f 100644 --- a/parameter-file-container.json +++ b/parameter-file-container.json @@ -17,8 +17,7 @@ "model": "3d_fullres", "nnUNet_predict_path": "/opt/conda/bin/nnUNet_predict", "code_dir": "/home/cabinet/SW/BIBSnet", - "singularity_image_path": "/home/feczk001/gconan/placeholder.txt", - "task": "512" + "singularity_image_path": "/home/feczk001/gconan/placeholder.txt" }, "nibabies": { diff --git a/run.py b/run.py index 9011703..79ed112 100755 --- a/run.py +++ b/run.py @@ -6,7 +6,7 @@ Connectome ABCD-XCP niBabies Imaging nnu-NET (CABINET) Greg Conan: gconan@umn.edu Created: 2021-11-12 -Updated: 2022-10-14 +Updated: 2022-10-21 """ # Import standard libraries import argparse @@ -97,9 +97,7 @@ def get_params_from_JSON(stage_names, logger): :param stage_names: List of strings; each names a stage to run :return: Dictionary containing all parameters from parameter .JSON file """ - # default_brain_z_size = 120 default_end_stage = "postbibsnet" # TODO Change to stage_names[-1] once nibabies and XCPD run from CABINET - default_model_num_bibsnet = 512 msg_stage = ("Name of the stage to run {}. By default, this will be " "the {} stage. Valid choices: {}") parser = argparse.ArgumentParser("CABINET") @@ -156,7 +154,6 @@ def get_params_from_JSON(stage_names, logger): ) parser.add_argument( "-model", "--model-number", "--bibsnet-model", - # default=default_model_num_bibsnet, # TODO By default, get the model number(s) for the participants.tsv file type=valid_whole_number, dest="model", help=("Model/task number for BIBSnet. By default, this will be " "inferred from {}/data/models.csv based on which data (T1, T2, " @@ -188,7 +185,6 @@ def get_params_from_JSON(stage_names, logger): ) parser.add_argument( "-z", "--brain-z-size", action="store_true", # type=valid_whole_number, - # default=default_brain_z_size, help=("Include this flag to infer participants' brain height (z) " "using the participants.tsv brain_z_size column. Otherwise, " "CABINET will estimate the brain height from the participant " @@ -331,7 +327,12 @@ def validate_cli_args(cli_args, stage_names, parser, logger): def get_df_with_valid_bibsnet_models(sub_ses_ID): - + """ + :param sub_ses_ID: Dictionary mapping subject-session-specific input + parameters' names (as strings) to their values for + this subject session; the same as j_args[ID] + :return: pandas.DataFrame of all bibsnet models viable for the input data + """ # Read in models.csv info mapping model num to which T(s) it has models_df = pd.read_csv(os.path.join(SCRIPT_DIR, "data", "models.csv")) @@ -347,7 +348,7 @@ def validate_model_num(cli_args, data_path_BIDS_T, models_df, sub_ses_ID, parser :param data_path_BIDS_T: Dictionary mapping 1 and 2 to the (incomplete) paths to expected T1w and T2w data respectively :param j_args: Dictionary containing all args from parameter .JSON file - :param parser: + :param parser: argparse.ArgumentParser to raise error if anything's invalid :return: Int, validated bibsnet model number """ model = cli_args["model"] # Model number (if given from command line) @@ -423,6 +424,8 @@ def get_brain_z_size(age_months, j_args, logger, buffer=5): def get_all_sub_ses_IDs(j_args, subj_or_none, ses_or_none): """ :param j_args: Dictionary containing all args from parameter .JSON file + :param subj_or_none: String (the subject ID) or a falsey value + :param ses_or_none: String (the session name) or a falsey value :return: List of dicts; each dict maps "subject" to its subject ID string and may also map "session" to its session ID string """ @@ -525,8 +528,6 @@ def run_preBIBSnet(j_args, logger): cropped = dict() crop2full = dict() for t in only_Ts_needed_for_bibsnet_model(j_args["ID"]): - # for t in (1, 2): # TODO Make this also work for T1-only or T2-only - # if j_args["ID"]["has_T{}w".format(t)]: cropped[t] = preBIBSnet_paths["crop_T{}w".format(t)] crop2full[t] = crop_image(preBIBSnet_paths["avg"]["T{}w_avg".format(t)], cropped[t], j_args, logger) @@ -534,10 +535,12 @@ def run_preBIBSnet(j_args, logger): # Resize T1w and T2w images if running a BIBSnet model using T1w and T2w # TODO Make ref_img an input parameter if someone wants a different reference image? + # TODO Pipeline should verify that reference_img files exist before running reference_img = os.path.join(SCRIPT_DIR, "data", "MNI_templates", - "INFANT_MNI_T{}_1mm.nii.gz") # TODO Pipeline should verify that these exist before running + "INFANT_MNI_T{}_1mm.nii.gz") id_mx = os.path.join(SCRIPT_DIR, "data", "identity_matrix.mat") - resolution = "1" + # TODO Resolution is hardcoded; infer it or get it from the command-line + resolution = "1" if j_args["ID"]["has_T1w"] and j_args["ID"]["has_T2w"]: msg_xfm = "Arguments for {}ACPC image transformation:\n{}" @@ -554,12 +557,6 @@ def run_preBIBSnet(j_args, logger): cropped, preBIBSnet_paths["resized"], regn_non_ACPC["vars"], crop2full, preBIBSnet_paths["avg"], j_args, logger ) - """ - all_registration_vars = register_all_preBIBSnet_imgs( - cropped, preBIBSnet_paths["resized"], reference_img, - id_mx, crop2full, preBIBSnet_paths["avg"], j_args, logger - ) - """ if j_args["common"]["verbose"]: logger.info(msg_xfm.format("", regn_ACPC["vars"])) @@ -569,8 +566,6 @@ def run_preBIBSnet(j_args, logger): logger.info(completion_msg.format("resized")) # If running a T1w-only or T2w-only BIBSnet model, skip registration/resizing - #elif j_args["ID"]["has_T1w"]: - # transformed_images = "" else: # Define variables and paths needed for the final (only) xfm needed t1or2 = 1 if j_args["ID"]["has_T1w"] else 2 @@ -578,17 +573,43 @@ def run_preBIBSnet(j_args, logger): outdir = os.path.join(preBIBSnet_paths["resized"], "xfms") os.makedirs(outdir, exist_ok=True) out_img = get_preBIBS_final_img_fpath_T(t1or2, outdir, j_args["ID"]) + crop2BIBS_mat = os.path.join(outdir, + "crop2BIBS_T{}w_only.mat".format(t1or2)) out_mat = os.path.join(outdir, "full_crop_T{}w_to_BIBS_template.mat" .format(t1or2)) - run_FSL_sh_script( # Move the T1 (or T2) into BIBS space + run_FSL_sh_script( # Get xfm moving the T1 (or T2) into BIBS space j_args, logger, "flirt", "-in", cropped[t1or2], "-ref", reference_img.format(t1or2), "-applyisoxfm", resolution, - "-init", id_mx, # TODO Should this be a matrix that does a transformation?? - "-o", out_img, "-omat", out_mat + "-init", id_mx, # TODO Should this be a matrix that does a transformation? + "-omat", crop2BIBS_mat ) - # TODO Add T1-to-BIBS and T2-to-BIBS functionality without T2-to-T1 + # Invert crop2full to get full2crop + # TODO Move this to right after making crop2full, then delete the + # duplicated functionality in align_ACPC_1_image + full2crop = os.path.join( + os.path.dirname(preBIBSnet_paths["avg"]["T{}w_avg".format(t)]), + "full2crop_T{}w_only.mat".format(t) + ) + run_FSL_sh_script(j_args, logger, "convert_xfm", "-inverse", + crop2full[t], "-omat", full2crop) + + # - Concatenate crop .mat to out_mat (in that order) and apply the + # concatenated .mat to the averaged image as the output + # - Treat that concatenated output .mat as the output to pass + # along to postBIBSnet, and the image output to BIBSnet + run_FSL_sh_script( # Combine ACPC-alignment with robustFOV output + j_args, logger, "convert_xfm", "-omat", out_mat, + "-concat", full2crop, crop2BIBS_mat + ) + + run_FSL_sh_script( # Apply concat xfm to crop and move into BIBS space + j_args, logger, "applywarp", "--rel", "--interp=spline", + "-i", preBIBSnet_paths["avg"]["T{}w_avg".format(t)], + "-r", reference_img.format(t1or2), + "--premat=" + out_mat, "-o", out_img + ) transformed_images = {"T{}w".format(t1or2): out_img, "T{}w_crop2BIBS_mat".format(t1or2): out_mat} @@ -712,9 +733,18 @@ def run_postBIBSnet(j_args, logger): logger.info("Closest template-age is {} months".format(tmpl_age)) # if age_months > 33: age_months = "34-38" + # For left/right registration, use T1 for T1-only and T2 for T2-only, but + # for T1-and-T2 combined use T2 for <22 months otherwise T1 (img quality) + if j_args["ID"]["has_T1w"] and j_args["ID"]["has_T2w"]: + t1or2 = 2 if int(age_months) < 22 else 1 # NOTE 22 cutoff might change + elif j_args["ID"]["has_T1w"]: + t1or2 = 1 + else: # if j_args["ID"]["has_T2w"]: + t1or2 = 2 + # Run left/right registration script and chirality correction - left_right_mask_nifti_fpath = run_left_right_registration( # NOTE Don't change this when implementing T1-/T2-only - j_args, sub_ses, tmpl_age, 2 if int(age_months) < 22 else 1, logger # NOTE 22 cutoff might change + left_right_mask_nifti_fpath = run_left_right_registration( + j_args, sub_ses, tmpl_age, t1or2, logger ) logger.info("Left/right image registration completed") @@ -850,13 +880,16 @@ def run_chirality_correction(l_r_mask_nifti_fpath, j_args, logger): sys.exit() # Select an arbitrary T1w image path to use to get T1w space - path_T1w = glob(os.path.join(j_args["common"]["bids_dir"], - *sub_ses, "anat", "*_T1w.nii.gz"))[0] + # (unless in T2w-only mode, in which case use an arbitrary T2w image) + t = 2 if not j_args["ID"]["has_T1w"] else 1 + chiral_ref_img_fpath = glob(os.path.join( + j_args["common"]["bids_dir"], *sub_ses, "anat", f"*_T{t}w.nii.gz" + ))[0] # Run chirality correction script nii_outfpath = correct_chirality(seg_BIBSnet_outfiles[0], segment_lookup_table_path, l_r_mask_nifti_fpath, chiral_out_dir, - path_T1w, j_args, logger) + chiral_ref_img_fpath, j_args, logger) return nii_outfpath # chiral_out_dir diff --git a/src/utilities.py b/src/utilities.py index 07c9d97..d77d637 100755 --- a/src/utilities.py +++ b/src/utilities.py @@ -5,7 +5,7 @@ Common source for utility functions used by CABINET :) Greg Conan: gconan@umn.edu Created: 2021-11-12 -Updated: 2022-10-14 +Updated: 2022-10-21 """ # Import standard libraries import argparse @@ -68,7 +68,7 @@ def align_ACPC_1_img(j_args, logger, xfm_ACPC_vars, crop2full, output_var, t, # Invert crop2full to get full2crop run_FSL_sh_script(j_args, logger, "convert_xfm", "-inverse", crop2full, - "-omat", mats["full2crop"]) + "-omat", mats["full2crop"]) # TODO Move this to right after making crop2full to use it in both T?w-only and here run_FSL_sh_script( # Combine ACPC-alignment with robustFOV output j_args, logger, "convert_xfm", "-omat", mats["full2acpc"], @@ -145,7 +145,6 @@ def apply_final_ACPC_xfm(xfm_ACPC_vars, xfm_ACPC_imgs, def apply_final_non_ACPC_xfm(xfm_non_ACPC_vars, xfm_imgs_non_ACPC, avg_imgs, outputs, t, full2crop_ACPC, j_args, logger): - # TODO MODULARIZE (put this into a function to call once for ACPC and once for non to eliminate redundancy?) outputs["T{}w".format(t)] = get_preBIBS_final_img_fpath_T( t, xfm_non_ACPC_vars["out_dir"], j_args["ID"] ) @@ -321,26 +320,27 @@ def check_and_correct_region(should_be_left, region, segment_name_to_number, def correct_chirality(nifti_input_file_path, segment_lookup_table, left_right_mask_nifti_file, chiral_out_dir, - t1w_path, j_args, logger): + xfm_ref_img, j_args, logger): """ Creates an output file with chirality corrections fixed. - :param nifti_input_file_path: String, path to a segmentation file with possible chirality problems - :param segment_lookup_table: String, path to a FreeSurfer-style look-up table - :param left_right_mask_nifti_file: String, path to a mask file that distinguishes between left and right - :param nifti_output_file_path: String, path to location to write the corrected file - :param t1w_path: String, path to T1w image to use as a reference image - when applying transform + :param nifti_input_file_path: String, path to a segmentation file with + possible chirality problems + :param segment_lookup_table: String, path to FreeSurfer-style look-up table + :param left_right_mask_nifti_file: String, path to a mask file that + distinguishes between left and right + :param nifti_output_file_path: String, path to save the corrected file into + :param xfm_ref_img: String, path to (T1w, unless running in T2w-only mode) + image to use as a reference when applying transform :param j_args: Dictionary containing all args from parameter .JSON file :param logger: logging.Logger object to show messages and raise warnings """ sub_ses = get_subj_ID_and_session(j_args) msg = "{} chirality correction on {}" - nifti_corrected_file_path = os.path.join( - chiral_out_dir, "corrected_" + os.path.basename(nifti_input_file_path) - )# j_args["BIBSnet"]["aseg_outfile"]) - nifti_output_file_path = os.path.join( - chiral_out_dir, "native_" + os.path.basename(nifti_input_file_path) - )# j_args["BIBSnet"]["aseg_outfile"]) + nifti_file_paths = dict() + for which_nii in ("native", "corrected"): + nifti_file_paths[which_nii] = os.path.join(chiral_out_dir, "_".join(( + which_nii, os.path.basename(nifti_input_file_path) + ))) logger.info(msg.format("Running", nifti_input_file_path)) free_surfer_label_to_region = get_id_to_region_mapping(segment_lookup_table) @@ -370,28 +370,30 @@ def correct_chirality(nifti_input_file_path, segment_lookup_table, check_and_correct_region( chirality_voxel == CHIRALITY_CONST["LEFT"], region, segment_name_to_number, new_data, i, j, k) fixed_img = nib.Nifti1Image(new_data, img.affine, img.header) - nib.save(fixed_img, nifti_corrected_file_path) + nib.save(fixed_img, nifti_file_paths["corrected"]) # TODO Make everything below its own function called "reverse_registration" or "revert_to_native" or something # Undo resizing right here (do inverse transform) using RobustFOV so padding isn't necessary; revert aseg to native space - dummy_copy = "_dummy".join(split_2_exts(nifti_corrected_file_path)) - shutil.copy2(nifti_corrected_file_path, dummy_copy) + dummy_copy = "_dummy".join(split_2_exts(nifti_file_paths["corrected"])) + shutil.copy2(nifti_file_paths["corrected"], dummy_copy) + t = 2 if not j_args["ID"]["has_T1w"] else 1 - seg_to_T1w_nat = os.path.join(chiral_out_dir, "seg_reg_to_T1w_native.mat") # TODO Change naming to "T2w" in all these files if running T2-only + seg2native = os.path.join(chiral_out_dir, f"seg_reg_to_T{t}w_native.mat") preBIBSnet_mat_glob = os.path.join( j_args["optional_out_dirs"]["postbibsnet"], *sub_ses, - "preBIBSnet_*crop_T1w_to_BIBS_template.mat" # TODO Name this outside of pre- and postBIBSnet then pass it to both + f"preBIBSnet_*crop_T{t}w_to_BIBS_template.mat" # TODO Name this outside of pre- and postBIBSnet then pass it to both ) preBIBSnet_mat = glob(preBIBSnet_mat_glob).pop() run_FSL_sh_script(j_args, logger, "convert_xfm", "-omat", - seg_to_T1w_nat, "-inverse", preBIBSnet_mat) # TODO Define preBIBSnet_mat path outside of stages because it's used by preBIBSnet and postBIBSnet + seg2native, "-inverse", preBIBSnet_mat) # TODO Define preBIBSnet_mat path outside of stages because it's used by preBIBSnet and postBIBSnet - run_FSL_sh_script(j_args, logger, "flirt", "-applyxfm", "-ref", t1w_path, # TODO Change this to T2 if running T2-only; infer that info from the BIDS input anat dir - "-in", dummy_copy, "-init", seg_to_T1w_nat, - "-o", nifti_output_file_path, "-interp", "nearestneighbour") + run_FSL_sh_script(j_args, logger, "flirt", "-applyxfm", + "-ref", xfm_ref_img, "-in", dummy_copy, + "-init", seg2native, "-o", nifti_file_paths["native"], + "-interp", "nearestneighbour") logger.info(msg.format("Finished", nifti_input_file_path)) - return nifti_output_file_path + return nifti_file_paths["native"] def create_anatomical_average(avg_params): @@ -628,7 +630,7 @@ def get_and_make_preBIBSnet_work_dirs(j_args): # Build paths to BIDS anatomical input images and (averaged, # nnU-Net-renamed) output images preBIBSnet_paths["avg"] = dict() - for t in (1, 2) : # TODO Make this also work for T1-only or T2-only by not creating unneeded T dir(s) + for t in (1, 2): # TODO Make this also work for T1-only or T2-only by not creating unneeded T dir(s) preBIBSnet_paths["avg"]["T{}w_input".format(t)] = list() for eachfile in glob(os.path.join(j_args["common"]["bids_dir"], *sub_ses, "anat", "*T{}w*.nii.gz" @@ -730,7 +732,9 @@ def get_optimal_resized_paths(sub_ses, j_args): # bibsnet_out_dir): def get_preBIBS_final_digit_T(t, sub_ses_ID): """ :param t: Int, either 1 or 2 (to signify T1w or T2w respectively) - :param sub_ses_ID: _type_, _description_ + :param sub_ses_ID: Dictionary mapping subject-session-specific input + parameters' names (as strings) to their values for + this subject session; the same as j_args[ID] :return: Int, the last digit of the preBIBSnet final image filename: 0 or 1 """ return (t - 1 if sub_ses_ID["has_T1w"] @@ -742,8 +746,10 @@ def get_preBIBS_final_img_fpath_T(t, parent_dir, sub_ses_ID): Running in T1-/T2-only mode means the image name should always be preBIBSnet_final_0000.nii.gz and otherwise it's _000{t-1}.nii.gz :param t: Int, either 1 or 2 (to signify T1w or T2w respectively) - :param parent_dir: - :param sub_ses_ID: + :param parent_dir: String, valid path to directory to hold output images + :param sub_ses_ID: Dictionary mapping subject-session-specific input + parameters' names (as strings) to their values for + this subject session; the same as j_args[ID] :return: """ return os.path.join(parent_dir, @@ -1199,8 +1205,8 @@ def run_all_stages(all_stages, sub_ses_IDs, start, end, """ Run stages sequentially, starting and ending at stages specified by user :param all_stages: List of functions in order where each runs one stage - :param sub_ses_IDs: List of dicts mapping "age_months", "subject", and - "session" to their unique values per subject session + :param sub_ses_IDs: List of dicts mapping "age_months", "subject", + "session", etc. to unique values per subject session :param start: String naming the first stage the user wants to run :param end: String naming the last stage the user wants to run :param ubiquitous_j_args: Dictionary of all args needed by each stage @@ -1526,10 +1532,13 @@ def verify_CABINET_inputs_exist(sub_ses, j_args, logger): # Define globbable paths to prereq files for the script to check out_BIBSnet_seg = os.path.join(j_args["optional_out_dirs"]["bibsnet"], *sub_ses, "output", "*.nii.gz") + all_T_suffixes = ["0000"] + if j_args["ID"]["has_T1w"] and j_args["ID"]["has_T2w"]: + all_T_suffixes.append("0001") # Only check for _0001 file for T1-and-T2 subject_heads = [os.path.join( j_args["optional_out_dirs"]["bibsnet"], *sub_ses, "input", - "*{}*_000{}.nii.gz".format("_".join(sub_ses), t1or2 - 1) - ) for t1or2 in only_Ts_needed_for_bibsnet_model(j_args["ID"])] # TODO Make this work for T1-only or T2-only + "*{}*_{}.nii.gz".format("_".join(sub_ses), suffix_T) + ) for suffix_T in all_T_suffixes] out_paths_BIBSnet = [os.path.join(j_args["optional_out_dirs"]["bibsnet"], "*{}*.nii.gz".format(x)) for x in ("aseg", "mask")] From f9a79e8800d415476d6d7010cd42547676001636 Mon Sep 17 00:00:00 2001 From: tjhendrickson Date: Mon, 24 Oct 2022 14:53:11 -0500 Subject: [PATCH 04/21] Added Tasks 514 and 515 into Dockerfile --- Dockerfile | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index d04baf9..972edba 100755 --- a/Dockerfile +++ b/Dockerfile @@ -113,8 +113,13 @@ ENV RESULTS_FOLDER="/opt/nnUNet/nnUNet_raw_data_base/nnUNet_trained_models" RUN mkdir -p /opt/nnUNet/nnUNet_raw_data_base/ /opt/nnUNet/nnUNet_raw_data_base/nnUNet_preprocessed /opt/nnUNet/nnUNet_raw_data_base/nnUNet_trained_models/nnUNet /home/cabinet/data #COPY trained_models/Task512_BCP_ABCD_Neonates_SynthSegDownsample.zip /opt/nnUNet/nnUNet_raw_data_base/nnUNet_trained_models/nnUNet -RUN wget https://s3.msi.umn.edu/CABINET_data/Task512_BCP_ABCD_Neonates_SynthSegDownsample.zip -O /opt/nnUNet/nnUNet_raw_data_base/nnUNet_trained_models/nnUNet/Task512_BCP_ABCD_Neonates_SynthSegDownsample.zip -RUN cd /opt/nnUNet/nnUNet_raw_data_base/nnUNet_trained_models/nnUNet && unzip -qq Task512_BCP_ABCD_Neonates_SynthSegDownsample.zip +RUN wget https://s3.msi.umn.edu/CABINET_data/Task512_BCP_ABCD_Neonates_SynthSegDownsample.zip -O /opt/nnUNet/nnUNet_raw_data_base/nnUNet_trained_models/nnUNet/Task512_BCP_ABCD_Neonates_SynthSegDownsample.zip && \ + wget https://s3.msi.umn.edu/CABINET_data/Task514_BCP_ABCD_Neonates_SynthSeg_T1Only.zip -O /opt/nnUNet/nnUNet_raw_data_base/nnUNet_trained_models/nnUNet/Task514_BCP_ABCD_Neonates_SynthSeg_T1Only.zip && \ + wget https://s3.msi.umn.edu/CABINET_data/Task515_BCP_ABCD_Neonates_SynthSeg_T2Only.zip -O /opt/nnUNet/nnUNet_raw_data_base/nnUNet_trained_models/nnUNet/Task515_BCP_ABCD_Neonates_SynthSeg_T2Only.zip +RUN cd /opt/nnUNet/nnUNet_raw_data_base/nnUNet_trained_models/nnUNet && \ + unzip -qq Task512_BCP_ABCD_Neonates_SynthSegDownsample.zip && \ + unzip -qq Task514_BCP_ABCD_Neonates_SynthSeg_T1Only.zip && \ + unzip -qq Task515_BCP_ABCD_Neonates_SynthSeg_T2Only.zip COPY run.py /home/cabinet/run.py COPY src /home/cabinet/src COPY bin /home/cabinet/bin @@ -127,6 +132,6 @@ COPY requirements.txt /home/cabinet/requirements.txt RUN cd /home/cabinet/ && pip install -r requirements.txt RUN cd /home/cabinet/ && chmod 555 -R run.py bin src parameter-file-application.json parameter-file-container.json -RUN chmod 666 /opt/nnUNet/nnUNet_raw_data_base/nnUNet_trained_models/nnUNet/3d_fullres/Task512_BCP_ABCD_Neonates_SynthSegDownsample/nnUNetTrainerV2__nnUNetPlansv2.1/postprocessing.json /home/cabinet/data/dataset_description.json +RUN chmod 666 /opt/nnUNet/nnUNet_raw_data_base/nnUNet_trained_models/nnUNet/3d_fullres/Task*/nnUNetTrainerV2__nnUNetPlansv2.1/postprocessing.json ENTRYPOINT ["/home/cabinet/run.py"] From 5c5fe2e2dfd513c894fa0af24171f412f08c8246 Mon Sep 17 00:00:00 2001 From: Greg Conan Date: Tue, 25 Oct 2022 20:50:06 -0500 Subject: [PATCH 05/21] Updated --help message in README.md for T1-/T2-only --- README.md | 93 ++++++++++++++++++++++++++++++++++--------------------- run.py | 8 ++--- 2 files changed, 62 insertions(+), 39 deletions(-) diff --git a/README.md b/README.md index f4a7fcb..966ce85 100755 --- a/README.md +++ b/README.md @@ -51,49 +51,73 @@ The BIBSnet portion of CABINET needs a Volta (v), Ampere (a), or Turing (t) NVID ### Command-Line Arguments ``` -usage: CABINET [-h] -jargs PARAMETER_JSON [-participant PARTICIPANT_LABEL] [-age AGE_MONTHS] [-z] - [-end {prebibsnet,bibsnet,postbibsnet}] [-ses SESSION] [--overwrite] - [-start {prebibsnet,bibsnet,postbibsnet}] [-v] [--script-dir SCRIPT_DIR] +usage: CABINET [-h] -jargs PARAMETER_JSON [-participant PARTICIPANT_LABEL] + [-age AGE_MONTHS] [-end {prebibsnet,bibsnet,postbibsnet}] + [-model MODEL] [--overwrite] [-ses SESSION] + [-start {prebibsnet,bibsnet,postbibsnet}] [-v] [-z] + [--script-dir SCRIPT_DIR] bids_dir output_dir {participant} positional arguments: - bids_dir Valid absolute path to existing base study directory containing BIDS-valid input subject data - directories. Example: /path/to/bids/input/ - output_dir Valid absolute path to existing derivatives directory to save each stage's outputs by subject - session into. Example: /path/to/output/derivatives/ - {participant} Processing level. Currently the only choice is 'participant'.See BIDS-Apps specification. + bids_dir Valid absolute path to existing base study directory + containing BIDS-valid input subject data directories. + Example: /path/to/bids/input/ + output_dir Valid absolute path to existing derivatives directory + to save each stage's outputs by subject session into. + Example: /path/to/output/derivatives/ + {participant} Processing level. Currently the only choice is + 'participant'. See BIDS-Apps specification. optional arguments: -h, --help show this help message and exit -jargs PARAMETER_JSON, -params PARAMETER_JSON, --parameter-json PARAMETER_JSON - Valid path to existing readable parameter .JSON file. See README.md and example parameter - .JSON files for more information on parameters. + Required. Valid path to existing readable parameter + .JSON file. See README.md and example parameter .JSON + files for more information on parameters. -participant PARTICIPANT_LABEL, --subject PARTICIPANT_LABEL, -sub PARTICIPANT_LABEL, --participant-label PARTICIPANT_LABEL - The participant's unique subject identifier, without 'sub-'prefix. Example: 'ABC12345' + The participant's unique subject identifier, without + 'sub-' prefix. Example: 'ABC12345' -age AGE_MONTHS, -months AGE_MONTHS, --age-months AGE_MONTHS - Positive integer, the participant's age in months. For example, -age 5 would mean the - participant is 5 months old.Include this argument unless the age in months is specified inthe - participants.tsv file inside the BIDS input directory. - -z, --brain-z-size Include this flag to infer participants' brain height (z) using the participants.tsv - brain_z_size column. Otherwise, CABINET will estimate the brain height from the participant - age and averages of a large sample of infant brain heights. + Positive integer, the participant's age in months. For + example, -age 5 would mean the participant is 5 months + old. Include this argument unless the age in months is + specified in the participants.tsv file inside the BIDS + input directory. -end {prebibsnet,bibsnet,postbibsnet}, --ending-stage {prebibsnet,bibsnet,postbibsnet} - Name of the stage to run last. By default, this will be the postbibsnet stage. Valid choices: - prebibsnet, bibsnet, postbibsnet - -ses SESSION, --session SESSION, --session-id SESSION - The name of the session to processes participant data for, without 'ses-'prefix. Example: baselineyear1 + Name of the stage to run last. By default, this will + be the postbibsnet stage. Valid choices: prebibsnet, + bibsnet, postbibsnet + -model MODEL, --model-number MODEL, --bibsnet-model MODEL + Model/task number for BIBSnet. By default, this will + be inferred from CABINET/data/models.csv based + on which data (T1, T2, or both) exists in the --bids- + dir. --overwrite, --overwrite-old - Include this flag to overwrite any previous CABINET outputs in the derivatives sub- - directories. Otherwise, by default CABINET will skip creating any CABINET output files that - already exist in the sub-directories of derivatives. + Include this flag to overwrite any previous CABINET + outputs in the derivatives sub-directories. Otherwise, + by default CABINET will skip creating any CABINET + output files that already exist in the sub-directories + of derivatives. + -ses SESSION, --session SESSION, --session-id SESSION + The name of the session to processes participant data + for. Example: baseline_year1 -start {prebibsnet,bibsnet,postbibsnet}, --starting-stage {prebibsnet,bibsnet,postbibsnet} - Name of the stage to run first. By default, this will be the prebibsnet stage. Valid choices: - prebibsnet, bibsnet, postbibsnet - -v, --verbose Include this flag to print detailed information and every command being run by CABINET to - stdout. Otherwise CABINET will only print warnings, errors, and minimal output. + Name of the stage to run first. By default, this will + be the prebibsnet stage. Valid choices: prebibsnet, + bibsnet, postbibsnet + -v, --verbose Include this flag to print detailed information and + every command being run by CABINET to stdout. + Otherwise CABINET will only print warnings, errors, + and minimal output. + -z, --brain-z-size Include this flag to infer participants' brain height + (z) using the participants.tsv brain_z_size column. + Otherwise, CABINET will estimate the brain height from + the participant age and averages of a large sample of + infant brain heights. --script-dir SCRIPT_DIR - Valid path to the existing parent directory of this run.py script. Include this argument if - and only if you are running the script as a SLURM/SBATCH job. + Valid path to the existing parent directory of this + run.py script. Include this argument if and only if + you are running the script as a SLURM/SBATCH job. ```
@@ -117,7 +141,6 @@ The repository contains two parameter files, one recommended to run CABINET insi - `"nnUNet_predict_path"`: string, a valid path to nnUNet_predict executable file. Example: `"/opt/conda/bin/nnUNet_predict"` - `"code_dir"`: string, a valid path to directory containing BIBSnet python wrapper `run.py`. Example: `"/home/cabinet/SW/BIBSnet"` - `"singularity_image_path"`: string, a valid path to BIBSnet singularity image `.sif` file: Example: `"/home/cabinet/user/bibsnet.sif"` -- `"task"`: string naming the BIBSnet task performed by the participant to processes data for. Examples: `"512"` #### "nibabies": [see here](https://nibabies.readthedocs.io/en/latest/index.html) @@ -153,10 +176,10 @@ This has been primarily tested in Singularity. We are less able to provide techn ### Application -We do not recommend running `CABINET` outside of the container for the following: -1. Installing nnU-Net can be complicated -2. Running it inside the container ensures you have the proper versions of all softwares -3. It is hard to diagnose your errors if you are working in a different environment +We do not recommend running `CABINET` outside of the container for the following reasons: +1. Installing nnU-Net can be complicated. +2. Running `CABINET` inside the container ensures you have the proper versions of all software. +3. It is hard to diagnose your errors if you are working in a different environment. However, if you run `CABINET` outside of the container as an application, then you will need to do the following: 1. Download the `data` directory from the `https://s3.msi.umn.edu/CABINET_data/data.zip` URL, unzip it, and move it into your cloned `CABINET` repository directory here: `CABINET/data/` diff --git a/run.py b/run.py index 3fd9cbe..adc0741 100755 --- a/run.py +++ b/run.py @@ -121,7 +121,7 @@ def get_params_from_JSON(stage_names, logger): ) parser.add_argument( "analysis_level", choices=["participant"], # TODO Will we ever need to add group-level analysis functionality? Currently this argument does absolutely nothing - help=("Processing level. Currently the only choice is 'participant'." + help=("Processing level. Currently the only choice is 'participant'. " "See BIDS-Apps specification.") ) @@ -138,7 +138,7 @@ def get_params_from_JSON(stage_names, logger): parser.add_argument( "-participant", "--subject", "-sub", "--participant-label", dest="participant_label", type=valid_subj_ses_ID, - help=("The participant's unique subject identifier, without 'sub-'" + help=("The participant's unique subject identifier, without 'sub-' " "prefix. Example: 'ABC12345'") # TODO Make CABINET able to accept with OR without 'sub-' prefix ) @@ -146,8 +146,8 @@ def get_params_from_JSON(stage_names, logger): parser.add_argument( "-age", "-months", "--age-months", type=valid_whole_number, help=("Positive integer, the participant's age in months. For " - "example, -age 5 would mean the participant is 5 months old." - "Include this argument unless the age in months is specified in" + "example, -age 5 would mean the participant is 5 months old. " + "Include this argument unless the age in months is specified in " "the participants.tsv file inside the BIDS input directory.") ) parser.add_argument( From 5c54c62545ff3003e9e2510bd75299775c89bbc7 Mon Sep 17 00:00:00 2001 From: Greg Conan Date: Wed, 26 Oct 2022 13:12:09 -0500 Subject: [PATCH 06/21] Fixed bug from extra newline from line 1 --- run.py | 1 - 1 file changed, 1 deletion(-) diff --git a/run.py b/run.py index adc0741..5f9a8c3 100755 --- a/run.py +++ b/run.py @@ -1,4 +1,3 @@ - #!/usr/bin/env python3 # coding: utf-8 From 1b6ae572918c5ced673c62f3e6177f9c0fdb9f83 Mon Sep 17 00:00:00 2001 From: Greg Conan Date: Wed, 26 Oct 2022 16:36:45 -0500 Subject: [PATCH 07/21] Minor bug fix, removed unused parameter --- data/models.csv | 0 parameter-file-application.json | 3 +-- parameter-file-container.json | 3 +-- run.py | 1 - src/param-types.json | 3 +-- src/utilities.py | 2 +- 6 files changed, 4 insertions(+), 8 deletions(-) mode change 100644 => 100755 data/models.csv diff --git a/data/models.csv b/data/models.csv old mode 100644 new mode 100755 diff --git a/parameter-file-application.json b/parameter-file-application.json index b87b778..80045f5 100644 --- a/parameter-file-application.json +++ b/parameter-file-application.json @@ -16,8 +16,7 @@ "bibsnet": { "model": "3d_fullres", "nnUNet_predict_path": "/home/support/public/torch_cudnn8.2/bin/nnUNet_predict", - "code_dir": "/home/faird/shared/code/internal/pipelines/bibsnet/BIBSnet", - "singularity_image_path": "/home/feczk001/gconan/placeholder.txt" + "code_dir": "/home/faird/shared/code/internal/pipelines/bibsnet/BIBSnet" }, "nibabies": { diff --git a/parameter-file-container.json b/parameter-file-container.json index 936186f..191926d 100644 --- a/parameter-file-container.json +++ b/parameter-file-container.json @@ -16,8 +16,7 @@ "bibsnet": { "model": "3d_fullres", "nnUNet_predict_path": "/opt/conda/bin/nnUNet_predict", - "code_dir": "/home/cabinet/SW/BIBSnet", - "singularity_image_path": "/home/feczk001/gconan/placeholder.txt" + "code_dir": "/home/cabinet/SW/BIBSnet" }, "nibabies": { diff --git a/run.py b/run.py index 5f9a8c3..f102a4b 100755 --- a/run.py +++ b/run.py @@ -59,7 +59,6 @@ def find_myself(flg): ) - def main(): start_time = datetime.now() # Time how long the script takes logger = make_logger() # Make object to log error/warning/status messages diff --git a/src/param-types.json b/src/param-types.json index a790f9a..1caf1b9 100644 --- a/src/param-types.json +++ b/src/param-types.json @@ -16,8 +16,7 @@ "bibsnet": { "code_dir": "existing_directory_path", "model": "str", - "nnUNet_predict_path": "existing_file_path", - "singularity_image_path": "existing_file_path" + "nnUNet_predict_path": "existing_file_path" }, "nibabies": { diff --git a/src/utilities.py b/src/utilities.py index d77d637..9dfd34e 100755 --- a/src/utilities.py +++ b/src/utilities.py @@ -977,7 +977,7 @@ def register_preBIBSnet_imgs_ACPC(cropped_imgs, output_dir, xfm_non_ACPC_vars, # pdb.set_trace() # TODO Add "debug" flag? - return {"vars": xfm_ACPC_vars, "img_paths": xfm_ACPC_and_reg_imgs}, + return {"vars": xfm_ACPC_vars, "img_paths": xfm_ACPC_and_reg_imgs} def register_preBIBSnet_imgs_non_ACPC(cropped_imgs, output_dir, ref_image, From 1e987dfd1c30f062d8750a2ba60a402382a8b404 Mon Sep 17 00:00:00 2001 From: lundq163 <102316699+lundq163@users.noreply.github.com> Date: Fri, 28 Oct 2022 16:45:14 -0500 Subject: [PATCH 08/21] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 966ce85..844ff3f 100755 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ This [BIDS App](https://bids-apps.neuroimaging.io/about/) provides the utility o
-![CABINET - Stages for MRI Processing](https://user-images.githubusercontent.com/102316699/195385888-77f627e1-1389-4f0c-991d-eeb0c9e663b8.png) +![CABINET - Stages for MRI Processing](https://user-images.githubusercontent.com/102316699/198738036-eb3e64c8-c846-4e1d-a523-a8bf66eac865.png)
From b8ade39a741f50df36c71cb55fba843824294986 Mon Sep 17 00:00:00 2001 From: lundq163 <102316699+lundq163@users.noreply.github.com> Date: Fri, 28 Oct 2022 16:50:19 -0500 Subject: [PATCH 09/21] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 844ff3f..c6c4c57 100755 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ This [BIDS App](https://bids-apps.neuroimaging.io/about/) provides the utility o
-![CABINET - Stages for MRI Processing](https://user-images.githubusercontent.com/102316699/198738036-eb3e64c8-c846-4e1d-a523-a8bf66eac865.png) +![CABINET - Stages for MRI Processing](https://user-images.githubusercontent.com/102316699/198738785-f3681ff7-824d-482c-b6fd-8467ed1779db.png)
From f7fd688513579ebf48e421a2f6a1e92a82f34e03 Mon Sep 17 00:00:00 2001 From: Greg Conan Date: Tue, 1 Nov 2022 17:51:28 -0500 Subject: [PATCH 10/21] Fixed minor nonACPC T2-to-T1 bug --- src/utilities.py | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/src/utilities.py b/src/utilities.py index 9dfd34e..ead217b 100755 --- a/src/utilities.py +++ b/src/utilities.py @@ -122,7 +122,7 @@ def apply_final_ACPC_xfm(xfm_ACPC_vars, xfm_ACPC_imgs, ) crop2BIBS_mat = os.path.join(xfm_ACPC_vars["out_dir"], - "crop_T{}w_to_BIBS_template.mat".format(t)) + "crop_T{}w_to_BIBS_template.mat".format(t)) if not os.path.exists(crop2BIBS_mat): shutil.copy2(to_rigidbody_final_mat, crop2BIBS_mat) if j_args["common"]["verbose"]: @@ -164,12 +164,24 @@ def apply_final_non_ACPC_xfm(xfm_non_ACPC_vars, xfm_imgs_non_ACPC, avg_imgs, "-concat", full2crop_ACPC, xfm_imgs_non_ACPC["cropT{}tocropT1".format(t)] ) - run_FSL_sh_script( - j_args, logger, "convert_xfm", - "-omat", outputs["T{}w_crop2BIBS_mat".format(t)], - "-concat", full2cropT1w_mat, - xfm_imgs_non_ACPC["T{}w_crop2BIBS_mat".format(t)] - ) + if t == 1: + run_FSL_sh_script( + j_args, logger, "convert_xfm", + "-omat", outputs["T{}w_crop2BIBS_mat".format(t)], + "-concat", full2cropT1w_mat, + xfm_imgs_non_ACPC["T{}w_crop2BIBS_mat".format(t)] + ) + else: # if t == 2: + crop_and_reg_mat = os.path.join(xfm_non_ACPC_vars["out_dir"], + "full2cropT2toT1.mat") + run_FSL_sh_script( + j_args, logger, "convert_xfm", "-omat", crop_and_reg_mat, "-concat", xfm_imgs_non_ACPC["cropT{}tocropT1".format(t)], full2cropT1w_mat + ) + run_FSL_sh_script( + j_args, logger, "convert_xfm", "-omat", outputs["T{}w_crop2BIBS_mat".format(t)], "-concat", xfm_imgs_non_ACPC["T{}w_crop2BIBS_mat".format(t)], crop_and_reg_mat + ) + + # Do the applywarp FSL command from align_ACPC_1_img (for T2w and not T1w, for non-ACPC) # applywarp output is optimal_realigned_imgs input # Apply registration to the T1ws and the T2ws From b6665eb99c5c614a562f20db16e8a2c5249875ef Mon Sep 17 00:00:00 2001 From: Greg Conan Date: Tue, 1 Nov 2022 18:02:54 -0500 Subject: [PATCH 11/21] Revert "Fixed minor nonACPC T2-to-T1 bug" This reverts commit f7fd688513579ebf48e421a2f6a1e92a82f34e03. --- src/utilities.py | 26 +++++++------------------- 1 file changed, 7 insertions(+), 19 deletions(-) diff --git a/src/utilities.py b/src/utilities.py index ead217b..9dfd34e 100755 --- a/src/utilities.py +++ b/src/utilities.py @@ -122,7 +122,7 @@ def apply_final_ACPC_xfm(xfm_ACPC_vars, xfm_ACPC_imgs, ) crop2BIBS_mat = os.path.join(xfm_ACPC_vars["out_dir"], - "crop_T{}w_to_BIBS_template.mat".format(t)) + "crop_T{}w_to_BIBS_template.mat".format(t)) if not os.path.exists(crop2BIBS_mat): shutil.copy2(to_rigidbody_final_mat, crop2BIBS_mat) if j_args["common"]["verbose"]: @@ -164,24 +164,12 @@ def apply_final_non_ACPC_xfm(xfm_non_ACPC_vars, xfm_imgs_non_ACPC, avg_imgs, "-concat", full2crop_ACPC, xfm_imgs_non_ACPC["cropT{}tocropT1".format(t)] ) - if t == 1: - run_FSL_sh_script( - j_args, logger, "convert_xfm", - "-omat", outputs["T{}w_crop2BIBS_mat".format(t)], - "-concat", full2cropT1w_mat, - xfm_imgs_non_ACPC["T{}w_crop2BIBS_mat".format(t)] - ) - else: # if t == 2: - crop_and_reg_mat = os.path.join(xfm_non_ACPC_vars["out_dir"], - "full2cropT2toT1.mat") - run_FSL_sh_script( - j_args, logger, "convert_xfm", "-omat", crop_and_reg_mat, "-concat", xfm_imgs_non_ACPC["cropT{}tocropT1".format(t)], full2cropT1w_mat - ) - run_FSL_sh_script( - j_args, logger, "convert_xfm", "-omat", outputs["T{}w_crop2BIBS_mat".format(t)], "-concat", xfm_imgs_non_ACPC["T{}w_crop2BIBS_mat".format(t)], crop_and_reg_mat - ) - - + run_FSL_sh_script( + j_args, logger, "convert_xfm", + "-omat", outputs["T{}w_crop2BIBS_mat".format(t)], + "-concat", full2cropT1w_mat, + xfm_imgs_non_ACPC["T{}w_crop2BIBS_mat".format(t)] + ) # Do the applywarp FSL command from align_ACPC_1_img (for T2w and not T1w, for non-ACPC) # applywarp output is optimal_realigned_imgs input # Apply registration to the T1ws and the T2ws From 077d9b89893e4bf8dcc191ff715358fe01ec1ad9 Mon Sep 17 00:00:00 2001 From: Greg Conan Date: Tue, 1 Nov 2022 18:03:49 -0500 Subject: [PATCH 12/21] Revert "Revert "Fixed minor nonACPC T2-to-T1 bug"" This reverts commit b6665eb99c5c614a562f20db16e8a2c5249875ef. --- src/utilities.py | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/src/utilities.py b/src/utilities.py index 9dfd34e..ead217b 100755 --- a/src/utilities.py +++ b/src/utilities.py @@ -122,7 +122,7 @@ def apply_final_ACPC_xfm(xfm_ACPC_vars, xfm_ACPC_imgs, ) crop2BIBS_mat = os.path.join(xfm_ACPC_vars["out_dir"], - "crop_T{}w_to_BIBS_template.mat".format(t)) + "crop_T{}w_to_BIBS_template.mat".format(t)) if not os.path.exists(crop2BIBS_mat): shutil.copy2(to_rigidbody_final_mat, crop2BIBS_mat) if j_args["common"]["verbose"]: @@ -164,12 +164,24 @@ def apply_final_non_ACPC_xfm(xfm_non_ACPC_vars, xfm_imgs_non_ACPC, avg_imgs, "-concat", full2crop_ACPC, xfm_imgs_non_ACPC["cropT{}tocropT1".format(t)] ) - run_FSL_sh_script( - j_args, logger, "convert_xfm", - "-omat", outputs["T{}w_crop2BIBS_mat".format(t)], - "-concat", full2cropT1w_mat, - xfm_imgs_non_ACPC["T{}w_crop2BIBS_mat".format(t)] - ) + if t == 1: + run_FSL_sh_script( + j_args, logger, "convert_xfm", + "-omat", outputs["T{}w_crop2BIBS_mat".format(t)], + "-concat", full2cropT1w_mat, + xfm_imgs_non_ACPC["T{}w_crop2BIBS_mat".format(t)] + ) + else: # if t == 2: + crop_and_reg_mat = os.path.join(xfm_non_ACPC_vars["out_dir"], + "full2cropT2toT1.mat") + run_FSL_sh_script( + j_args, logger, "convert_xfm", "-omat", crop_and_reg_mat, "-concat", xfm_imgs_non_ACPC["cropT{}tocropT1".format(t)], full2cropT1w_mat + ) + run_FSL_sh_script( + j_args, logger, "convert_xfm", "-omat", outputs["T{}w_crop2BIBS_mat".format(t)], "-concat", xfm_imgs_non_ACPC["T{}w_crop2BIBS_mat".format(t)], crop_and_reg_mat + ) + + # Do the applywarp FSL command from align_ACPC_1_img (for T2w and not T1w, for non-ACPC) # applywarp output is optimal_realigned_imgs input # Apply registration to the T1ws and the T2ws From 35e88bcb706c99c3092fc0c68222ca197f3dbb9e Mon Sep 17 00:00:00 2001 From: Greg Conan Date: Tue, 1 Nov 2022 19:17:30 -0500 Subject: [PATCH 13/21] Merging minor README update from main --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index c6c4c57..fb8c761 100755 --- a/README.md +++ b/README.md @@ -14,7 +14,8 @@ This [BIDS App](https://bids-apps.neuroimaging.io/about/) provides the utility o
-![CABINET - Stages for MRI Processing](https://user-images.githubusercontent.com/102316699/198738785-f3681ff7-824d-482c-b6fd-8467ed1779db.png) +![CABINET - Stages for MRI Processing](https://user-images.githubusercontent.com/102316699/198738919-189a8c5b-d58f-40c6-9734-01d9a966e219.png) +
From ff6922aea5d1a2d2502d3e3723267c9a8d5d8a2d Mon Sep 17 00:00:00 2001 From: Greg Conan Date: Thu, 3 Nov 2022 23:25:27 -0500 Subject: [PATCH 14/21] Added head radius table description and example to README --- README.md | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index fb8c761..c9e67a4 100755 --- a/README.md +++ b/README.md @@ -188,6 +188,10 @@ However, if you run `CABINET` outside of the container as an application, then y
+## Different Modes + + + ## Multiple Participant Requirements ### `participants.tsv` @@ -204,9 +208,21 @@ NOTE: `sub-` and `ses-` prefixes are currently required for `participant_id` and When running multiple subjects and/or sessions, the `participants.tsv` file in the `bids_dir` must include an `age` column. In that column, each row has one positive integer, the participant's age in months at that session. -If the user wants to specify the brain height (shown below) for each subject session, then the user must also include an additional `"brain_z_size"` column. That column also must have a positive integer for each row, which is the size of the participant's brain along the z-axis in millimeters. Without a `brain_z_size` column, `CABINET` will calculate the `brain_z_size` value based on a table with [BCP](https://babyconnectomeproject.org/) participants' average head radius per age. That table is called `age_to_avg_head_radius_BCP.csv` under the `data` directory. +If the user wants to specify the brain height (shown below) for each subject session, then the user must also include an additional `"brain_z_size"` column. That column also must have a positive integer for each row, which is the size of the participant's brain along the z-axis in millimeters. + + + +### `age_to_avg_head_radius_BCP.csv` + +Without a `brain_z_size` column in the `participants.tsv` file, `CABINET` will calculate the `brain_z_size` value based on a table with [BCP](https://babyconnectomeproject.org/) participants' average head radius per age. That table is called `age_to_avg_head_radius_BCP.csv` under the `data` directory. It has 2 columns: age in months and the average head radius (in inches) of BCP participants at that age. Part of the table is shown below. -
+| Candidate_Age(mo.) | Head_Radius(in.) | +|:-:|:-:| +| 1 | 2.26 | +| 2 | 2.54 | +| 5 | 2.70 | +| 10 | 2.87 | +| 20 | 3.12 |
From 46e906082bf2fdbe35a5b69342b4cad3f1f08bb4 Mon Sep 17 00:00:00 2001 From: Greg Conan Date: Fri, 4 Nov 2022 17:08:50 -0500 Subject: [PATCH 15/21] Revert "Added head radius table description and example to README" This reverts commit ff6922aea5d1a2d2502d3e3723267c9a8d5d8a2d. --- README.md | 20 ++------------------ 1 file changed, 2 insertions(+), 18 deletions(-) diff --git a/README.md b/README.md index c9e67a4..fb8c761 100755 --- a/README.md +++ b/README.md @@ -188,10 +188,6 @@ However, if you run `CABINET` outside of the container as an application, then y
-## Different Modes - - - ## Multiple Participant Requirements ### `participants.tsv` @@ -208,21 +204,9 @@ NOTE: `sub-` and `ses-` prefixes are currently required for `participant_id` and When running multiple subjects and/or sessions, the `participants.tsv` file in the `bids_dir` must include an `age` column. In that column, each row has one positive integer, the participant's age in months at that session. -If the user wants to specify the brain height (shown below) for each subject session, then the user must also include an additional `"brain_z_size"` column. That column also must have a positive integer for each row, which is the size of the participant's brain along the z-axis in millimeters. - - - -### `age_to_avg_head_radius_BCP.csv` - -Without a `brain_z_size` column in the `participants.tsv` file, `CABINET` will calculate the `brain_z_size` value based on a table with [BCP](https://babyconnectomeproject.org/) participants' average head radius per age. That table is called `age_to_avg_head_radius_BCP.csv` under the `data` directory. It has 2 columns: age in months and the average head radius (in inches) of BCP participants at that age. Part of the table is shown below. +If the user wants to specify the brain height (shown below) for each subject session, then the user must also include an additional `"brain_z_size"` column. That column also must have a positive integer for each row, which is the size of the participant's brain along the z-axis in millimeters. Without a `brain_z_size` column, `CABINET` will calculate the `brain_z_size` value based on a table with [BCP](https://babyconnectomeproject.org/) participants' average head radius per age. That table is called `age_to_avg_head_radius_BCP.csv` under the `data` directory. -| Candidate_Age(mo.) | Head_Radius(in.) | -|:-:|:-:| -| 1 | 2.26 | -| 2 | 2.54 | -| 5 | 2.70 | -| 10 | 2.87 | -| 20 | 3.12 | +

From dfdfce87b0377a014fefafa04574c70dd8f33d69 Mon Sep 17 00:00:00 2001 From: Greg Conan Date: Fri, 4 Nov 2022 17:34:17 -0500 Subject: [PATCH 16/21] README update: Added brain_z_size table --- README.md | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index fb8c761..2258cd4 100755 --- a/README.md +++ b/README.md @@ -204,9 +204,18 @@ NOTE: `sub-` and `ses-` prefixes are currently required for `participant_id` and When running multiple subjects and/or sessions, the `participants.tsv` file in the `bids_dir` must include an `age` column. In that column, each row has one positive integer, the participant's age in months at that session. -If the user wants to specify the brain height (shown below) for each subject session, then the user must also include an additional `"brain_z_size"` column. That column also must have a positive integer for each row, which is the size of the participant's brain along the z-axis in millimeters. Without a `brain_z_size` column, `CABINET` will calculate the `brain_z_size` value based on a table with [BCP](https://babyconnectomeproject.org/) participants' average head radius per age. That table is called `age_to_avg_head_radius_BCP.csv` under the `data` directory. +
+ +
+ +If the user wants to specify the brain height (shown above) for each subject session, then the user must also include an additional `"brain_z_size"` column. That column also must have a positive integer for each row: the size of the participant's brain along the z-axis in millimeters. The `participants.tsv` file should look like the example below: + +| participant_id | session | age | brain_z_size | +|:-:|:-:|:-:|:-:| +| sub-123456 | ses-X | 1 | 120 | +| sub-234567 | ses-X | 6 | 145 | -
+Without a `brain_z_size` column, `CABINET` will calculate the `brain_z_size` value based on a table with [BCP](https://babyconnectomeproject.org/) participants' average head radius per age. That table is called `age_to_avg_head_radius_BCP.csv` under the `data` directory.
From 161e0d2395810a9695ee0db60cf9186a84eb3083 Mon Sep 17 00:00:00 2001 From: Greg Conan Date: Wed, 9 Nov 2022 15:21:49 -0600 Subject: [PATCH 17/21] Added to description of BIBSnet models --- README.md | 19 +++++++++++++++---- run.py | 6 ++++-- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 2258cd4..d875dfb 100755 --- a/README.md +++ b/README.md @@ -89,10 +89,11 @@ optional arguments: be the postbibsnet stage. Valid choices: prebibsnet, bibsnet, postbibsnet -model MODEL, --model-number MODEL, --bibsnet-model MODEL - Model/task number for BIBSnet. By default, this will - be inferred from CABINET/data/models.csv based - on which data (T1, T2, or both) exists in the --bids- - dir. + Model/task number for BIBSnet. By default, this will + be inferred from CABINET/data/models.csv based on + which data exists in the --bids-dir. BIBSnet will run + model 514 by default for T1w-only, model 515 for + T2w-only, and model 512 for both T1w and T2w. --overwrite, --overwrite-old Include this flag to overwrite any previous CABINET outputs in the derivatives sub-directories. Otherwise, @@ -235,6 +236,16 @@ Prepares the anatomical BIDS images for BIBSnet segmentation generation. Quickly and accurately segments an optimally-aligned T1 and T2 pair with a deep neural network trained via nnU-Net and SynthSeg with a large 0 to 8 month old infant MRI brain dataset. +### BIBSnet Segmentation Models + +`data/models.csv` lists all available BIBSnet models to run. Below are the default BIBSnet models, all trained on manually-segmented 0- to 8-month-old BCP subjects' segmentations. + +| Model | Description | +|:-:|:--| +| 512 | Default T1w and T2w model | +| 514 | Default T1w-only model | +| 515 | Default T2w-only model | +
## 3. PostBIBSnet diff --git a/run.py b/run.py index f102a4b..c565389 100755 --- a/run.py +++ b/run.py @@ -157,8 +157,10 @@ def get_params_from_JSON(stage_names, logger): "-model", "--model-number", "--bibsnet-model", type=valid_whole_number, dest="model", help=("Model/task number for BIBSnet. By default, this will be " - "inferred from {}/data/models.csv based on which data (T1, T2, " - "or both) exists in the --bids-dir.".format(SCRIPT_DIR)) + "inferred from {} based on which data exists in the " + "--bids-dir. BIBSnet will run model 514 by default for T1w-" + "only, model 515 for T2w-only, and model 512 for both T1w and " + "T2w.".format(os.path.join(SCRIPT_DIR, "data", "models.csv"))) ) parser.add_argument( "--overwrite", "--overwrite-old", # TODO Change this to "-skip" From 17e97ea9d7a8c42a46092a06c7ba67297fe2dced Mon Sep 17 00:00:00 2001 From: lundq163 <102316699+lundq163@users.noreply.github.com> Date: Wed, 9 Nov 2022 15:33:21 -0600 Subject: [PATCH 18/21] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d875dfb..a1be92e 100755 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ This [BIDS App](https://bids-apps.neuroimaging.io/about/) provides the utility o
-![CABINET - Stages for MRI Processing](https://user-images.githubusercontent.com/102316699/198738919-189a8c5b-d58f-40c6-9734-01d9a966e219.png) +![CABINET - Stages for MRI Processing](https://user-images.githubusercontent.com/102316699/200946315-4983870b-9c8e-4a5f-b344-47d0e1d674c1.png)
From 0943c5620126325af810be7958c86529fb451311 Mon Sep 17 00:00:00 2001 From: Greg Conan Date: Thu, 10 Nov 2022 18:35:31 -0600 Subject: [PATCH 19/21] Fixed nonACPC T2 bug; updated Docker pull in README --- README.md | 2 +- src/utilities.py | 20 ++++++++++++-------- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index d875dfb..ac249de 100755 --- a/README.md +++ b/README.md @@ -41,7 +41,7 @@ Container hosted here: https://hub.docker.com/r/dcanumn/cabinet #### Docker - docker pull dcanumn/cabinet + docker pull dcanumn/cabinet:t1-only_t2-only
diff --git a/src/utilities.py b/src/utilities.py index ead217b..a1fd362 100755 --- a/src/utilities.py +++ b/src/utilities.py @@ -156,29 +156,33 @@ def apply_final_non_ACPC_xfm(xfm_non_ACPC_vars, xfm_imgs_non_ACPC, avg_imgs, xfm_non_ACPC_vars["out_dir"], "full_crop_T{}w_to_BIBS_template.mat".format(t) # NOTE Changed this back to full_crop on 2022-08-30 ) - full2cropT1w_mat = os.path.join(xfm_non_ACPC_vars["out_dir"], - "full2cropT1w.mat") + full2crop_mat = os.path.join(xfm_non_ACPC_vars["out_dir"], + f"full2cropT{t}w.mat") + # full2cropT1w_mat = os.path.join(xfm_non_ACPC_vars["out_dir"], "full2cropT1w.mat") run_FSL_sh_script( j_args, logger, "convert_xfm", - "-omat", full2cropT1w_mat, - "-concat", full2crop_ACPC, - xfm_imgs_non_ACPC["cropT{}tocropT1".format(t)] + "-omat", full2crop_mat, # full2cropT1w_mat, + "-concat", full2crop_ACPC, xfm_imgs_non_ACPC["cropT1tocropT1"] + # xfm_imgs_non_ACPC["cropT{}tocropT1".format(t)] ) if t == 1: run_FSL_sh_script( j_args, logger, "convert_xfm", "-omat", outputs["T{}w_crop2BIBS_mat".format(t)], - "-concat", full2cropT1w_mat, + "-concat", full2crop_mat, # full2cropT1w_mat, xfm_imgs_non_ACPC["T{}w_crop2BIBS_mat".format(t)] ) else: # if t == 2: crop_and_reg_mat = os.path.join(xfm_non_ACPC_vars["out_dir"], "full2cropT2toT1.mat") run_FSL_sh_script( - j_args, logger, "convert_xfm", "-omat", crop_and_reg_mat, "-concat", xfm_imgs_non_ACPC["cropT{}tocropT1".format(t)], full2cropT1w_mat + j_args, logger, "convert_xfm", "-omat", crop_and_reg_mat, "-concat", + xfm_imgs_non_ACPC["cropT{}tocropT1".format(t)], full2crop_mat # full2cropT1w_mat ) run_FSL_sh_script( - j_args, logger, "convert_xfm", "-omat", outputs["T{}w_crop2BIBS_mat".format(t)], "-concat", xfm_imgs_non_ACPC["T{}w_crop2BIBS_mat".format(t)], crop_and_reg_mat + j_args, logger, "convert_xfm", "-omat", + outputs["T{}w_crop2BIBS_mat".format(t)], "-concat", + xfm_imgs_non_ACPC["T{}w_crop2BIBS_mat".format(t)], crop_and_reg_mat ) From 9b72c4fe171c23daabdf86ac3104d8d975ef92c3 Mon Sep 17 00:00:00 2001 From: hough129 <77338310+hough129@users.noreply.github.com> Date: Thu, 17 Nov 2022 11:18:22 -0600 Subject: [PATCH 20/21] Update README.md clarified task_id parameter --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 2569af4..970a786 100755 --- a/README.md +++ b/README.md @@ -134,7 +134,7 @@ The repository contains two parameter files, one recommended to run CABINET insi #### "common": parameters used by multiple stages within CABINET - `"fsl_bin_path"`: string, a valid absolute path to existing `bin` directory in [FMRIB Software Library](https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/). Example: `"/opt/fsl-6.0.5.1/bin/"` -- `"task_id"`: string, the name of the task performed by the participant to processes data for. This parameter can also be `null` for non-task data. Example: `nback` +- `"task_id"`: string, the name of the task performed by the participant to processes data for. This parameter can also be `null` for non-task data. Example: `nback` (note: this is not utilized by cabinet yet, please designate it as null) #### "resource_management": parameters to determine resource use when running parallel scripts. These parameters are only needed for nibabies and XCPD. From 87a4403875e5ec7e8b61537a1bf165630b05258b Mon Sep 17 00:00:00 2001 From: Greg Conan Date: Fri, 18 Nov 2022 15:50:51 -0600 Subject: [PATCH 21/21] Comment cleanup Removed old comments and updated/finished function headers without changing functionality --- run.py | 65 ++++++++++++++++++++++++++++------------------------------ 1 file changed, 31 insertions(+), 34 deletions(-) diff --git a/run.py b/run.py index c565389..19de67f 100755 --- a/run.py +++ b/run.py @@ -5,7 +5,7 @@ Connectome ABCD-XCP niBabies Imaging nnu-NET (CABINET) Greg Conan: gconan@umn.edu Created: 2021-11-12 -Updated: 2022-10-21 +Updated: 2022-11-18 """ # Import standard libraries import argparse @@ -186,13 +186,12 @@ def get_params_from_JSON(stage_names, logger): "command being run by CABINET to stdout. Otherwise CABINET " "will only print warnings, errors, and minimal output.") ) - parser.add_argument( - "-z", "--brain-z-size", action="store_true", # type=valid_whole_number, + parser.add_argument( + "-z", "--brain-z-size", action="store_true", help=("Include this flag to infer participants' brain height (z) " "using the participants.tsv brain_z_size column. Otherwise, " "CABINET will estimate the brain height from the participant " "age and averages of a large sample of infant brain heights.") # TODO rephrase - # help=("Positive integer, the size of the participant's brain in millimeters along the z-axis. By default, this will be {}.".format(default_brain_z_size)) ) parser.add_argument( SCRIPT_DIR_ARG, dest=as_cli_attr(SCRIPT_DIR_ARG), @@ -210,6 +209,7 @@ def validate_cli_args(cli_args, stage_names, parser, logger): :param cli_args: Dictionary containing all command-line input arguments :param stage_names: List of strings naming stages to run (in order) :param logger: logging.Logger object to show messages and raise warnings + :param parser: argparse.ArgumentParser to raise error if anything's invalid :return: Tuple of 2 objects: 1. Dictionary of validated parameters from parameter .JSON file 2. List of dicts which each map "subject" to the subject ID string, @@ -233,7 +233,6 @@ def validate_cli_args(cli_args, stage_names, parser, logger): "end": cli_args["end"]} # TODO Maybe save the stage_names list in here too to replace optional_out_dirs use cases? for arg_to_add in ("bids_dir", "overwrite", "verbose"): j_args["common"][arg_to_add] = cli_args[arg_to_add] - # j_args["ID"] = {"subject": cli_args["participant_label"], "session": cli_args["session"], "age_months": cli_args["age_months"], "brain_z_size": cli_args["brain_z_size"], "model": cli_args["model"]} # TODO Remove all references to the optional_out_dirs arguments, and change # j_args[optional_out_dirs][derivatives] to instead be j_args[common][output_dir] @@ -341,7 +340,9 @@ def get_df_with_valid_bibsnet_models(sub_ses_ID): # Exclude any models which require (T1w or T2w) data the user lacks for t in only_Ts_needed_for_bibsnet_model(sub_ses_ID): - models_df = select_model_with_data_for_T(t, models_df, sub_ses_ID["has_T{}w".format(t)]) + models_df = select_model_with_data_for_T( + t, models_df, sub_ses_ID["has_T{}w".format(t)] + ) return models_df @@ -350,7 +351,9 @@ def validate_model_num(cli_args, data_path_BIDS_T, models_df, sub_ses_ID, parser :param cli_args: Dictionary containing all command-line input arguments :param data_path_BIDS_T: Dictionary mapping 1 and 2 to the (incomplete) paths to expected T1w and T2w data respectively - :param j_args: Dictionary containing all args from parameter .JSON file + :param models_df: pd.DataFrame of all bibsnet models viable for input data + :param sub_ses_ID: Dict mapping (string) names to values for sub- & + ses-specific input parameters; same as j_args[ID] :param parser: argparse.ArgumentParser to raise error if anything's invalid :return: Int, validated bibsnet model number """ @@ -380,9 +383,9 @@ def validate_model_num(cli_args, data_path_BIDS_T, models_df, sub_ses_ID, parser def select_model_with_data_for_T(t, models_df, has_T): """ + :param t: Int, either 1 or 2 (to signify T1w or T2w respectively) :param models_df: pandas.DataFrame with columns called "T1w" and "T2w" with bool values describing which T(s) a model needs - :param t: Int, either 1 or 2 (to signify T1w or T2w respectively) :param has_T: bool, True if T{t}w data exists for this subject/ses :return: pandas.DataFrame, all models_df rows with data for this sub/ses/t """ @@ -395,6 +398,8 @@ def get_brain_z_size(age_months, j_args, logger, buffer=5): Infer a participant's brain z-size from their age and from the average brain diameters table at the AGE_TO_HEAD_RADIUS_TABLE path :param age_months: Int, participant's age in months + :param j_args: Dictionary containing all args from parameter .JSON file + :param logger: logging.Logger object to show messages and raise warnings :param buffer: Int, extra space (in mm), defaults to 5 :return: Int, the brain z-size (height) in millimeters """ @@ -461,9 +466,10 @@ def get_all_sub_ses_IDs(j_args, subj_or_none, ses_or_none): def ensure_j_args_has_bids_subdirs(j_args, derivs, sub_ses, default_parent): """ :param j_args: Dictionary containing all args from parameter .JSON file - :param subdirnames: Unpacked list of strings. Each names 1 part of a path - under j_args[common][bids_dir]. The last string is - mapped by j_args[optional_out_dirs] to the subdir path. + :param derivs: Unpacked list of strings. Each names 1 part of a path under + j_args[common][bids_dir]. The last string is mapped by + j_args[optional_out_dirs] to the subdir path. + :param sub_ses: List with either only the subject ID str or the session too :return: j_args, but with the (now-existing) subdirectory path """ j_args["optional_out_dirs"] = make_given_or_default_dir( @@ -483,6 +489,7 @@ def ensure_j_args_has_bids_subdirs(j_args, derivs, sub_ses, default_parent): def read_from_participants_tsv(j_args, logger, col_name, *sub_ses): """ :param j_args: Dictionary containing all args from parameter .JSON file + :param logger: logging.Logger object to show messages and raise warnings :param col_name: String naming the column of participants.tsv to return a value from (for this subject or subject-session) :return: Int, either the subject's age (in months) or the subject's @@ -572,7 +579,6 @@ def run_preBIBSnet(j_args, logger): else: # Define variables and paths needed for the final (only) xfm needed t1or2 = 1 if j_args["ID"]["has_T1w"] else 2 - # img_ext = split_2_exts(cropped[t1or2])[-1] outdir = os.path.join(preBIBSnet_paths["resized"], "xfms") os.makedirs(outdir, exist_ok=True) out_img = get_preBIBS_final_img_fpath_T(t1or2, outdir, j_args["ID"]) @@ -734,7 +740,6 @@ def run_postBIBSnet(j_args, logger): ) if j_args["common"]["verbose"]: logger.info("Closest template-age is {} months".format(tmpl_age)) - # if age_months > 33: age_months = "34-38" # For left/right registration, use T1 for T1-only and T2 for T2-only, but # for T1-and-T2 combined use T2 for <22 months otherwise T1 (img quality) @@ -747,7 +752,7 @@ def run_postBIBSnet(j_args, logger): # Run left/right registration script and chirality correction left_right_mask_nifti_fpath = run_left_right_registration( - j_args, sub_ses, tmpl_age, t1or2, logger + sub_ses, tmpl_age, t1or2, j_args, logger ) logger.info("Left/right image registration completed") @@ -775,11 +780,6 @@ def run_postBIBSnet(j_args, logger): derivs_dir = os.path.join(precomputed_dir, *sub_ses, "anat") os.makedirs(derivs_dir, exist_ok=True) copy_to_derivatives_dir(nii_outfpath, derivs_dir, sub_ses, "aseg_dseg") - """ - for eachfile in os.scandir(chiral_out_dir): - if "native" in os.path.basename(eachfile): - copy_to_derivatives_dir(eachfile, derivs_dir, sub_ses, "aseg_dseg") # TODO Can these be symlinks? - """ copy_to_derivatives_dir(aseg_mask, derivs_dir, sub_ses, "brain_mask") # Copy dataset_description.json into precomputed directory for nibabies @@ -792,19 +792,19 @@ def run_postBIBSnet(j_args, logger): return j_args -def run_left_right_registration(j_args, sub_ses, age_months, t1or2, logger): +def run_left_right_registration(sub_ses, age_months, t1or2, j_args, logger): """ - :param j_args: Dictionary containing all args from parameter .JSON file :param sub_ses: List with either only the subject ID str or the session too :param age_months: String or int, the subject's age [range] in months :param t1or2: Int, 1 to use T1w image for registration or 2 to use T2w + :param j_args: Dictionary containing all args from parameter .JSON file + :param logger: logging.Logger object to show messages and raise warnings :return: String, path to newly created left/right registration output file """ # Paths for left & right registration chiral_in_dir = os.path.join(SCRIPT_DIR, "data", "chirality_masks") tmpl_head = os.path.join(chiral_in_dir, "{}mo_T{}w_acpc_dc_restore.nii.gz") - tmpl_mask = os.path.join(chiral_in_dir, "{}mo_template_LRmask.nii.gz") # "brainmasks", {}mo_template_brainmask.nii.gz") - + tmpl_mask = os.path.join(chiral_in_dir, "{}mo_template_LRmask.nii.gz") # Grab the first resized T?w from preBIBSnet to use for L/R registration last_digit = (t1or2 - 1 if j_args["ID"]["has_T1w"] @@ -822,13 +822,13 @@ def run_left_right_registration(j_args, sub_ses, age_months, t1or2, logger): # Left/right registration output file path (this function's return value) left_right_mask_nifti_fpath = os.path.join(outdir_LR_reg, "LRmask.nii.gz") - # Run left & right registration # NOTE: Script ran successfully until here 2022-03-08 + # Run left & right registration msg = "{} left/right registration on {}" if (j_args["common"]["overwrite"] or not os.path.exists(left_right_mask_nifti_fpath)): - # logger.info(msg.format("Running", first_subject_head)) try: - # SubjectHead TemplateHead TemplateMask OutputMaskFile + # In bin/LR_mask_registration.sh, the last 4 vars in cmd_LR_reg are + # named SubjectHead, TemplateHead, TemplateMask, and OutputMaskFile cmd_LR_reg = (LR_REGISTR_PATH, first_subject_head, tmpl_head.format(age_months, t1or2), tmpl_mask.format(age_months), @@ -890,7 +890,8 @@ def run_chirality_correction(l_r_mask_nifti_fpath, j_args, logger): ))[0] # Run chirality correction script - nii_outfpath = correct_chirality(seg_BIBSnet_outfiles[0], segment_lookup_table_path, + nii_outfpath = correct_chirality(seg_BIBSnet_outfiles[0], + segment_lookup_table_path, l_r_mask_nifti_fpath, chiral_out_dir, chiral_ref_img_fpath, j_args, logger) return nii_outfpath # chiral_out_dir @@ -899,8 +900,10 @@ def run_chirality_correction(l_r_mask_nifti_fpath, j_args, logger): def make_asegderived_mask(j_args, aseg_dir, nii_outfpath): """ Create mask file(s) derived from aseg file(s) in aseg_dir + :param j_args: Dictionary containing all args from parameter .JSON file :param aseg_dir: String, valid path to existing directory with output files from chirality correction + :param nii_outfpath: String, valid path to existing anat file :return: List of strings; each is a valid path to an aseg mask file """ # binarize, fillh, and erode aseg to make mask: @@ -909,7 +912,7 @@ def make_asegderived_mask(j_args, aseg_dir, nii_outfpath): ) if (j_args["common"]["overwrite"] or not os.path.exists(output_mask_fpath)): - maths = fsl.ImageMaths(in_file=nii_outfpath, # (nii_outfpath is anat file) + maths = fsl.ImageMaths(in_file=nii_outfpath, op_string=("-bin -dilM -dilM -dilM -dilM " "-fillh -ero -ero -ero -ero"), out_file=output_mask_fpath) @@ -958,12 +961,6 @@ def run_nibabies(j_args, logger): else: derivs = list() # TODO If j_args[nibabies][derivatives] has a path, use that instead - """ - warn_user_of_conditions( - ("Missing {{}} files in {}\nNow running nibabies with JLF but not " - "BIBSnet.".format(j_args["optional_out_dirs"]["bibsnet"])), - logger, mask=mask_glob, aseg=aseg_glob - ) """ # Run nibabies print()