Skip to content

Commit

Permalink
Weird bug
Browse files Browse the repository at this point in the history
  • Loading branch information
mathieuboudreau committed Jan 8, 2024
1 parent b3f9d25 commit 3e643e3
Showing 1 changed file with 0 additions and 165 deletions.
165 changes: 0 additions & 165 deletions content/index.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -340,171 +340,6 @@
"Figure 4 d-f displays the scatter plot data for human datasets submitted to this challenge, showing mean and standard deviation T1 values from the WM (genu and splenium) and GM (cerebral cortex and deep GM) ROIs. Mean WM T1 values across all submissions were 828 ± 38 ms in the genu and 852 ± 49 ms in the splenium, and mean GM T1 values were 1548 ± 156 ms in the cortex and 1188 ± 133 ms in the deep GM, with less variations overall in WM compared to GM, possibly due to better ROI placement and less partial voluming in WM. The lower standard deviations for the ROIs of human database ID site 9 (submission 18, Figure 1) are due to good slice positioning, cutting through the AC-PC line and the genu for proper ROI placement, particularly for the corpus callosum and deep GM."
]
},
{
"cell_type": "markdown",
"metadata": {
"tags": [
"report_output"
]
},
"source": [
"# Imports\n",
"import warnings\n",
"warnings.filterwarnings(\"ignore\")\n",
"\n",
"from pathlib import Path\n",
"\n",
"from os import path\n",
"import os\n",
"\n",
"from repo2data.repo2data import Repo2Data\n",
"\n",
"\n",
"if build == 'latest':\n",
" if path.isdir('analysis')== False:\n",
" !git clone https://github.com/rrsg2020/analysis.git\n",
" dir_name = 'analysis'\n",
" analysis = os.listdir(dir_name)\n",
"\n",
" for item in analysis:\n",
" if item.endswith(\".ipynb\"):\n",
" os.remove(os.path.join(dir_name, item))\n",
" if item.endswith(\".md\"):\n",
" os.remove(os.path.join(dir_name, item))\n",
"elif build == 'archive':\n",
" if os.path.isdir(Path('../../data')):\n",
" data_path = ['../../data/rrsg-2020-neurolibre']\n",
" else:\n",
" # define data requirement path\n",
" data_req_path = os.path.join(\"..\", \"binder\", \"data_requirement.json\")\n",
" # download data\n",
" repo2data = Repo2Data(data_req_path)\n",
" data_path = repo2data.install() \n",
"\n",
"# Imports\n",
"import warnings\n",
"warnings.filterwarnings(\"ignore\")\n",
"\n",
"from pathlib import Path\n",
"import pandas as pd\n",
"import nibabel as nib\n",
"import numpy as np\n",
"\n",
"from analysis.src.database import *\n",
"import matplotlib.pyplot as plt\n",
"plt.style.use('analysis/custom_matplotlibrc')\n",
"plt.rcParams[\"figure.figsize\"] = (20,5)\n",
"fig_id = 0\n",
"\n",
"# Configurations\n",
"if build == 'latest':\n",
" database_path = Path('analysis/databases/3T_human_T1maps_database.pkl')\n",
" output_folder = Path(\"analysis/plots/08_wholedataset_scatter_Human/\")\n",
"elif build=='archive':\n",
" database_path = Path(data_path[0] + '/analysis/databases/3T_human_T1maps_database.pkl')\n",
" output_folder = Path(data_path[0] + '/analysis/plots/08_wholedataset_scatter_Human/')\n",
"\n",
"estimate_type = 'mean' # median or mean\n",
"\n",
"# Define functions\n",
"\n",
"def plot_both_scatter(x1, x2, y, y_std,\n",
" title, x1_label, x2_label, y_label,\n",
" file_prefix, folder_path, fig_id):\n",
" \n",
" plt.rcParams[\"figure.figsize\"] = (20,10)\n",
"\n",
" fig, axs = plt.subplots(2)\n",
" fig.suptitle(title)\n",
" axs[0].errorbar(x1, y, y_std, fmt='o', solid_capstyle='projecting')\n",
" axs[0].set_xlabel(x1_label)\n",
" axs[0].set_ylabel(y_label)\n",
" axs[0].set_xticks(np.arange(0, np.max(x1), step=1))\n",
"\n",
"\n",
" axs[1].errorbar(x2, y, y_std, fmt='o', solid_capstyle='projecting')\n",
" axs[1].set_xlabel(x2_label)\n",
" axs[1].set_ylabel(y_label)\n",
" axs[1].set_xticklabels(labels=x2, rotation=90)\n",
"\n",
"\n",
" if fig_id<10:\n",
" filename = \"0\" + str(fig_id) + \"_\" + file_prefix\n",
" else:\n",
" filename = str(fig_id) + \"_\" + file_prefix\n",
"\n",
" fig.savefig(folder_path / (str(filename) + '.svg'), facecolor='white')\n",
" fig.savefig(folder_path / (str(filename) + '.png'), facecolor='white')\n",
" fig_id = fig_id + 1\n",
" plt.show()\n",
" return fig_id\n",
"\n",
"# Load database\n",
"\n",
"df = pd.read_pickle(database_path)\n",
"\n",
"genu_estimate = np.array([])\n",
"genu_std = np.array([])\n",
"splenium_estimate = np.array([])\n",
"splenium_std = np.array([])\n",
"deepgm_estimate = np.array([])\n",
"deepgm_std = np.array([])\n",
"cgm_estimate = np.array([])\n",
"cgm_std = np.array([])\n",
"\n",
"ii = 0\n",
"for index, row in df.iterrows():\n",
" \n",
" if estimate_type is 'mean':\n",
" genu_estimate = np.append(genu_estimate, np.mean(df.loc[index]['T1 - genu (WM)']))\n",
" genu_std = np.append(genu_std, np.std(df.loc[index]['T1 - genu (WM)']))\n",
" splenium_estimate = np.append(splenium_estimate, np.mean(df.loc[index]['T1 - splenium (WM)']))\n",
" splenium_std = np.append(splenium_std, np.std(df.loc[index]['T1 - splenium (WM)']))\n",
" deepgm_estimate = np.append(deepgm_estimate, np.mean(df.loc[index]['T1 - deep GM']))\n",
" deepgm_std = np.append(deepgm_std, np.std(df.loc[index]['T1 - deep GM']))\n",
" cgm_estimate = np.append(cgm_estimate, np.mean(df.loc[index]['T1 - cortical GM']))\n",
" cgm_std = np.append(cgm_std, np.std(df.loc[index]['T1 - cortical GM']))\n",
" elif estimate_type is 'median':\n",
" genu_estimate = np.append(genu_estimate, np.median(df.loc[index]['T1 - genu (WM)']))\n",
" genu_std = np.append(genu_std, np.std(df.loc[index]['T1 - genu (WM)']))\n",
" splenium_estimate = np.append(splenium_estimate, np.median(df.loc[index]['T1 - splenium (WM)']))\n",
" splenium_std = np.append(splenium_std, np.std(df.loc[index]['T1 - splenium (WM)']))\n",
" deepgm_estimate = np.append(deepgm_estimate, np.median(df.loc[index]['T1 - deep GM']))\n",
" deepgm_std = np.append(deepgm_std, np.std(df.loc[index]['T1 - deep GM']))\n",
" cgm_estimate = np.append(cgm_estimate, np.median(df.loc[index]['T1 - cortical GM']))\n",
" cgm_std = np.append(cgm_std, np.std(df.loc[index]['T1 - cortical GM']))\n",
" else:\n",
" Exception('Unsupported dataset estimate type.')\n",
" ii = ii +1\n",
"\n",
"# Store the IDs\n",
"indexes_numbers = df.index\n",
"indexes_strings = indexes_numbers.map(str)\n",
"\n",
"x1_label='Site #'\n",
"x2_label='Site #.Meas #'\n",
"y_label=\"T$_1$ (ms)\"\n",
"file_prefix = 'WM_and_GM'\n",
"folder_path=output_folder\n",
"\n",
"x1=indexes_numbers\n",
"x2=indexes_strings\n",
"y=genu_estimate\n",
"y_std=genu_std\n",
"\n",
"# Paper formatting of x tick labels (remove leading zero, pad zero at the end for multiples of 10)\n",
"x3=[]\n",
"for num in x2:\n",
" x3.append(num.replace('.0', '.'))\n",
"\n",
"index=0\n",
"for num in x3:\n",
" if num[-3] != '.':\n",
" x3[index]=num+'0'\n",
" index+=1\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 1,
Expand Down

0 comments on commit 3e643e3

Please sign in to comment.