Skip to content

Commit

Permalink
v1.3.0 updates (#149)
Browse files Browse the repository at this point in the history
* generalize method of accessing likelihood in apps

* generalize subdirectory names used by video app

* move version def from setup.py to __init__

* refactor train_hydra script to import a train function for greater flexibility
  • Loading branch information
themattinthehatt authored Apr 17, 2024
1 parent cd10c87 commit b8cb8f4
Show file tree
Hide file tree
Showing 8 changed files with 377 additions and 295 deletions.
1 change: 1 addition & 0 deletions lightning_pose/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
__version__ = "1.3.0"
52 changes: 32 additions & 20 deletions lightning_pose/apps/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@


@st.cache_resource
def update_labeled_file_list(model_preds_folders: list, use_ood: bool = False):
def update_labeled_file_list(model_preds_folders: List[str], use_ood: bool = False) -> List[list]:
per_model_preds = []
for model_pred_folder in model_preds_folders:
# pull labeled results from each model folder
Expand All @@ -40,15 +40,20 @@ def update_labeled_file_list(model_preds_folders: list, use_ood: bool = False):


@st.cache_resource
def update_vid_metric_files_list(video: str, model_preds_folders: list):
def update_vid_metric_files_list(
video: str,
model_preds_folders: List[str],
video_subdir: str = "video_preds",
) -> List[list]:
per_vid_preds = []
for model_preds_folder in model_preds_folders:
# pull each prediction file associated with a particular video
# wrap in Path so that it looks like an UploadedFile object
video_dir = os.path.join(model_preds_folder, video_subdir)
if not os.path.isdir(video_dir):
continue
model_preds = [
f
for f in os.listdir(os.path.join(model_preds_folder, "video_preds"))
if os.path.isfile(os.path.join(model_preds_folder, "video_preds", f))
f for f in os.listdir(video_dir) if os.path.isfile(os.path.join(video_dir, f))
]
ret_files = []
for file in model_preds:
Expand All @@ -59,16 +64,17 @@ def update_vid_metric_files_list(video: str, model_preds_folders: list):


@st.cache_resource
def get_all_videos(model_preds_folders: list):
def get_all_videos(model_preds_folders: List[str], video_subdir: str = "video_preds") -> list:
# find each video that is predicted on by the models
# wrap in Path so that it looks like an UploadedFile object
# returned by streamlit's file_uploader
ret_videos = set()
for model_preds_folder in model_preds_folders:
video_dir = os.path.join(model_preds_folder, video_subdir)
if not os.path.isdir(video_dir):
continue
model_preds = [
f
for f in os.listdir(os.path.join(model_preds_folder, "video_preds"))
if os.path.isfile(os.path.join(model_preds_folder, "video_preds", f))
f for f in os.listdir(video_dir) if os.path.isfile(os.path.join(video_dir, f))
]
for file in model_preds:
if "temporal" in file:
Expand Down Expand Up @@ -97,7 +103,7 @@ def concat_dfs(dframes: Dict[str, pd.DataFrame]) -> Tuple[pd.DataFrame, List[str


@st.cache_data
def get_df_box(df_orig, keypoint_names, model_names):
def get_df_box(df_orig: pd.DataFrame, keypoint_names: list, model_names: list) -> pd.DataFrame:
df_boxes = []
for keypoint in keypoint_names:
for model_curr in model_names:
Expand All @@ -112,7 +118,13 @@ def get_df_box(df_orig, keypoint_names, model_names):


@st.cache_data
def get_df_scatter(df_0, df_1, data_type, model_names, keypoint_names):
def get_df_scatter(
df_0: pd.DataFrame,
df_1: pd.DataFrame,
data_type: str,
model_names: list,
keypoint_names: list
) -> pd.DataFrame:
df_scatters = []
for keypoint in keypoint_names:
df_scatters.append(
Expand Down Expand Up @@ -147,7 +159,7 @@ def get_full_name(keypoint: str, coordinate: str, model: str) -> str:
# ----------------------------------------------
@st.cache_data
def build_precomputed_metrics_df(
dframes: Dict[str, pd.DataFrame], keypoint_names: List[str], **kwargs
dframes: Dict[str, pd.DataFrame], keypoint_names: List[str], **kwargs,
) -> dict:
concat_dfs = defaultdict(list)
for model_name, df_dict in dframes.items():
Expand Down Expand Up @@ -179,7 +191,7 @@ def build_precomputed_metrics_df(

@st.cache_data
def get_precomputed_error(
df: pd.DataFrame, keypoint_names: List[str], model_name: str
df: pd.DataFrame, keypoint_names: List[str], model_name: str,
) -> pd.DataFrame:
# collect results
df_ = df
Expand All @@ -192,17 +204,17 @@ def get_precomputed_error(

@st.cache_data
def compute_confidence(
df: pd.DataFrame, keypoint_names: List[str], model_name: str, **kwargs
df: pd.DataFrame, keypoint_names: List[str], model_name: str, **kwargs,
) -> pd.DataFrame:

if df.shape[1] % 3 == 1:
# get rid of "set" column if present
tmp = df.iloc[:, :-1].to_numpy().reshape(df.shape[0], -1, 3)
# collect "set" column if present
set = df.iloc[:, -1].to_numpy()
else:
tmp = df.to_numpy().reshape(df.shape[0], -1, 3)
set = None

results = tmp[:, :, 2]
mask = df.columns.get_level_values("coords").isin(["likelihood"])
results = df.loc[:, mask].to_numpy()

# collect results
df_ = pd.DataFrame(columns=keypoint_names)
Expand All @@ -219,7 +231,7 @@ def compute_confidence(

# ------------ utils related to model finding in dir ---------
# write a function that finds all model folders in the model_dir
def get_model_folders(model_dir):
def get_model_folders(model_dir: str) -> List[str]:
# strip trailing slash if present
if model_dir[-1] == os.sep:
model_dir = model_dir[:-1]
Expand All @@ -232,7 +244,7 @@ def get_model_folders(model_dir):


# just to get the last two levels of the path
def get_model_folders_vis(model_folders):
def get_model_folders_vis(model_folders: List[str]) -> List[str]:
fs = []
for f in model_folders:
fs.append(f.split("/")[-2:])
Expand Down
18 changes: 7 additions & 11 deletions lightning_pose/apps/video_diagnostics.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@


def run():

args = parser.parse_args()

st.title("Video Diagnostics")
Expand All @@ -53,23 +54,19 @@ def run():
# get the last two levels of each path to be presented to user
model_folders_vis = get_model_folders_vis(model_folders)

selected_models_vis = st.sidebar.multiselect(
"Select models", model_folders_vis, default=None
)
selected_models_vis = st.sidebar.multiselect("Select models", model_folders_vis, default=None)

# append this to full path
selected_models = [
"/" + os.path.join(args.model_dir, f) for f in selected_models_vis
]
selected_models = ["/" + os.path.join(args.model_dir, f) for f in selected_models_vis]

# ----- selecting videos to analyze -----
all_videos_: list = get_all_videos(selected_models)
all_videos_: list = get_all_videos(selected_models, video_subdir=args.video_subdir)

# choose from the different videos that were predicted
video_to_plot = st.sidebar.selectbox("Select a video:", [*all_videos_], key="video")

prediction_files = update_vid_metric_files_list(
video=video_to_plot, model_preds_folders=selected_models
video=video_to_plot, model_preds_folders=selected_models, video_subdir=args.video_subdir,
)

model_names = copy.copy(selected_models_vis)
Expand Down Expand Up @@ -100,9 +97,7 @@ def run():
dframe = pd.read_csv(model_pred_file_path, index_col=None)
dframes_metrics[model_name][str(model_pred_file)] = dframe
else:
dframe = pd.read_csv(
model_pred_file_path, header=[1, 2], index_col=0
)
dframe = pd.read_csv(model_pred_file_path, header=[1, 2], index_col=0)
dframes_traces[model_name] = dframe
dframes_metrics[model_name]["confidence"] = dframe
# data_types = dframe.iloc[:, -1].unique()
Expand Down Expand Up @@ -221,6 +216,7 @@ def run():
parser = argparse.ArgumentParser()

parser.add_argument("--model_dir", type=str, default=[])
parser.add_argument("--video_subdir", type=str, default="video_preds")
parser.add_argument("--make_dir", action="store_true", default=False)

run()
Loading

0 comments on commit b8cb8f4

Please sign in to comment.