diff --git a/notebooks/aggregate_rlbench_results.ipynb b/notebooks/aggregate_rlbench_results.ipynb new file mode 100644 index 0000000..88da5fa --- /dev/null +++ b/notebooks/aggregate_rlbench_results.ipynb @@ -0,0 +1,149 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Aggregate the various rlbench evals into a single dataframe which can be easily copied." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import wandb\n", + "import pandas as pd\n", + "import json" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# For the following wandb run ids, download the results tables and present them as a df.\n", + "\n", + "# no colliison checking,\n", + "# run_ids = [\n", + "# \"wrf9hzpf\",\n", + "# \"me0cnlhq\",\n", + "# \"3a3l59af\",\n", + "# \"4vc2ogr4\",\n", + "# \"ca47vr4g\",\n", + "# \"6kfacxc2\",\n", + "# \"yz9f3xv7\",\n", + "# \"4cn8q3ch\",\n", + "# \"ieyeei8l\",\n", + "# \"jxl4v41h\",\n", + "# ]\n", + "\n", + "\n", + "# These are run ids for runs with action repeat, no collision-checking\n", + "run_ids = [\n", + " \"qw5uiwkh\",\n", + " \"g7eftjyc\",\n", + " \"40d2zf1f\",\n", + " \"1few52rl\",\n", + " \"4gt6apgc\",\n", + " \"7tnbl966\",\n", + " \"532p3esh\",\n", + " \"ztfm27yt\",\n", + " \"xuwwkznq\",\n", + " \"3qz1uzpj\",\n", + "]\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Get the second run\n", + "def get_results_table(run_id):\n", + " api = wandb.Api()\n", + " json_file = api.artifact(f'r-pad/taxpose/run-{run_id}-results_table:v0').get_entry('results_table.table.json').download()\n", + " with open(json_file) as file:\n", + " json_dict = json.load(file)\n", + " return pd.DataFrame(json_dict[\"data\"], columns=json_dict[\"columns\"])\n", + "\n", + "# Get the config from the run.\n", + "def get_config(run_id):\n", + " api = wandb.Api()\n", + " run = api.run(f'r-pad/taxpose/{run_id}')\n", + " return run.config\n", + "\n", + "df = get_results_table(run_ids[1])\n", + "df" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "display(df)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for run_id in run_ids:\n", + " cfg = get_config(run_id)\n", + " print(f\"Run ID: {run_id}\")\n", + " print(f\"Task: {cfg['task']['name']}\")\n", + " try:\n", + " df = get_results_table(run_id)\n", + " display(df)\n", + " print(\"\\n\\n\")\n", + " except Exception as e:\n", + " print(\"did not complete\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "taxpose_repro", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.12" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/explore_rlbench_dataset.ipynb b/notebooks/explore_rlbench_dataset.ipynb new file mode 100644 index 0000000..80f5f16 --- /dev/null +++ b/notebooks/explore_rlbench_dataset.ipynb @@ -0,0 +1,127 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Explore the RLBench dataset (which we made)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import torch\n", + "import numpy as np\n", + "\n", + "\n", + "from taxpose.datasets.rlbench import RLBenchPointCloudDataset, RLBenchPointCloudDatasetConfig" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dset = RLBenchPointCloudDataset(RLBenchPointCloudDatasetConfig(\n", + " dataset_root=os.path.expanduser(\"/data/rlbench10/\"),\n", + " task_name=\"stack_wine\",\n", + " episodes=list(range(1, 5)),\n", + " phase=\"all\",\n", + "))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from rpad.visualize_3d.plots import segmentation_fig\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data = dset[19]\n", + "\n", + "segmentation_fig(\n", + " data=np.concatenate([data[\"points_action\"][0], data[\"points_anchor\"][0]], axis=0),\n", + " labels=np.concatenate([np.zeros(data[\"points_action\"].shape[1]), np.ones(data[\"points_anchor\"].shape[1])], axis=0).astype(np.int32),\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "np.concatenate([data[\"points_action\"][0], data[\"points_anchor\"][0]], axis=0).shape" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "np.concatenate([np.zeros(data[\"points_action\"].shape[1]), np.ones(data[\"points_anchor\"].shape[1])], axis=0).shape" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data[\"phase\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.12" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/notebooks/inspect_real_world_dataset.ipynb b/notebooks/inspect_real_world_dataset.ipynb new file mode 100644 index 0000000..0896ecb --- /dev/null +++ b/notebooks/inspect_real_world_dataset.ipynb @@ -0,0 +1,150 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from taxpose.datasets.real_world_mug import RealWorldMugPointCloudDatasetConfig, RealWorldMugPointCloudDataset\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dset = RealWorldMugPointCloudDataset(\n", + " RealWorldMugPointCloudDatasetConfig(\n", + " dataset_root=\"/home/beisner/code/multi_project/cam_ready_trainingdata\",\n", + " dataset_indices=list(range(20)),\n", + " start_anchor=False\n", + " )\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data = dset[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data['points_action'].shape" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data['action_symmetry_features'].shape" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pickle as pkl" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "with open(\"/home/beisner/code/multi_project/cam_ready_trainingdata/0.pkl\", \"rb\") as f:\n", + " d = pkl.load(f)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "d" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create an open3d with 2 point clouds:\n", + "\n", + "import open3d as o3d\n", + "\n", + "points_anchor = data[\"points_anchor\"][0]\n", + "points_action = data[\"points_action\"][0]\n", + "\n", + "pcd1 = o3d.geometry.PointCloud()\n", + "pcd1.points = o3d.utility.Vector3dVector(points_anchor)\n", + "pcd2 = o3d.geometry.PointCloud()\n", + "pcd2.points = o3d.utility.Vector3dVector(points_action)\n", + "\n", + "# Visualize the point clouds:\n", + "o3d.visualization.draw_geometries([pcd1, pcd2])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "taxpose_repro", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.12" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/mlat_iclr_2024_rebuttal_tables.ipynb b/notebooks/mlat_iclr_2024_rebuttal_tables.ipynb new file mode 100644 index 0000000..36647a2 --- /dev/null +++ b/notebooks/mlat_iclr_2024_rebuttal_tables.ipynb @@ -0,0 +1,421 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "import wandb\n", + "import json" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Function which downloads the results from the W&B server\n", + "def get_results_table(run_id, table_name):\n", + " api = wandb.Api()\n", + " json_file = api.artifact(f'r-pad/taxpose/run-{run_id}-{table_name}:v0').get_path(f'{table_name}.table.json').download()\n", + "\n", + " # Get the config from the run\n", + " run = api.run(f'r-pad/taxpose/{run_id}')\n", + " config = run.config\n", + " \n", + " table_name = config[\"object_class\"][\"name\"]\n", + " model_name = config[\"model\"][\"name\"]\n", + "\n", + " with open(json_file) as file:\n", + " json_dict = json.load(file)\n", + " df = pd.DataFrame(json_dict[\"data\"], columns=json_dict[\"columns\"])\n", + " \n", + " df.columns = pd.MultiIndex.from_product([[table_name], df.columns])\n", + "\n", + " # Set the row index as model_name\n", + " df.index = [model_name]\n", + "\n", + " return df" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Function which takes a list of results tables, and concatenates them into a single table with a multi-index.\n", + "def concat_results(tables):\n", + " df = pd.concat(tables, axis=1)\n", + " return df" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "MLAT_IDS = [\n", + " \"jqyrs601\", # Stack Wine\n", + " \"h7a9oxtp\", # Put Toilet Roll on Stand\n", + " \"v075mup0\", # Place Hanger on Rack\n", + " \"fpu8sirp\", # Phone on Base\n", + " \"2rtpvbn3\", # Insert Onto Square Base\n", + "]\n", + "\n", + "TAXPOSE_IDS = [\n", + " \"xxecq5xe\", # Stack Wine\n", + " \"dhkc7eva\", # Put Toilet Roll on Stand\n", + " \"cs4gc0lg\", # Place Hanger on Rack\n", + " \"tp9wuqcw\", # Phone on Base\n", + " \"mae2i315\", # Insert Onto Square Base\n", + "\n", + "]\n", + "\n", + "mlat_dfs_train = []\n", + "mlat_dfs_val = []\n", + "for run_id in MLAT_IDS:\n", + " mlat_dfs_train.append(get_results_table(run_id, \"train_metrics\"))\n", + " mlat_dfs_val.append(get_results_table(run_id, \"val_metrics\"))\n", + "\n", + "mlat_train_table = concat_results(mlat_dfs_train)\n", + "mlat_val_table = concat_results(mlat_dfs_val)\n", + "\n", + "taxpose_dfs_train = []\n", + "taxpose_dfs_val = []\n", + "for run_id in TAXPOSE_IDS:\n", + " taxpose_dfs_train.append(get_results_table(run_id, \"train_metrics\"))\n", + " taxpose_dfs_val.append(get_results_table(run_id, \"val_metrics\"))\n", + "\n", + "taxpose_train_table = concat_results(taxpose_dfs_train)\n", + "taxpose_val_table = concat_results(taxpose_dfs_val)\n", + "\n", + "full_train_table = pd.concat([taxpose_train_table, mlat_train_table], axis=0)\n", + "full_train_table\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(full_train_table.style.format('{:.3f}').to_latex())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for df1, df2 in zip(taxpose_dfs_train, mlat_dfs_train):\n", + " # print(df1)\n", + " df = pd.concat([df1, df2], axis=0).droplevel(0, axis=1)\n", + " # print(df.style.format('{:.2f}').to_latex())\n", + " print(df.to_markdown())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(full_train_table.to_markdown())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "| | stack_wine | | put_toilet_roll_on_stand | | place_hanger_on_rack | | phone_on_base | | insert_onto_square_peg | |\n", + "| | angle_err | t_err | angle_err | t_err | angle_err | t_err | angle_err | t_err | angle_err | t_err |\n", + "|:--------------|------------------------------:|--------------------------:|--------------------------------------------:|----------------------------------------:|----------------------------------------:|------------------------------------:|---------------------------------:|-----------------------------:|------------------------------------------:|--------------------------------------:|\n", + "| taxpose | 1.48548 | 0.00308973 | 1.17297 | 0.001249 | 5.47136 | 0.0119683 | 4.14353 | 0.00543616 | 7.0977 | 0.00351971 |\n", + "| mlat_s256_vnn | 0.764146 | 0.00122502 | 1.14988 | 0.00134385 | 0.623557 | 0.00195536 | 0.803998 | 0.00106143 | 1.20883 | 0.00328621 |" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "| | stack_wine\\\\ angle_err (°) | t_err (mm) | put_toilet_roll_on_stand\\\\ angle_err (°) | t_err (mm) | place_hanger_on_rack\\\\ angle_err (°) | t_err (mm) | phone_on_base\\\\ angle_err (°) | t_err (mm) | insert_onto_square_peg\\\\ angle_err (°) | t_err (mm) |\n", + "|:--------------|------------------------------:|--------------------------:|--------------------------------------------:|----------------------------------------:|----------------------------------------:|------------------------------------:|---------------------------------:|-----------------------------:|------------------------------------------:|--------------------------------------:|\n", + "| TAX-Pose | 1.47 | 3.09 | 1.17 | **1.25** | 5.47 | 12.0 | 4.14 | 5.43 | 7.10 | 3.52 |\n", + "| Ours (RelDist) | **0.76** | **1.02** | **1.15** | 1.34 | **0.62** | **2.00** | **0.80** | **1.06** | **1.21** | **3.29** |\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "full_val_table = pd.concat([taxpose_val_table, mlat_val_table], axis=0)\n", + "full_val_table" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "t" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "concat_results(dfs_train)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create two sample DataFrames with MultiIndex columns\n", + "data1 = {'A': [1, 2, 3],\n", + " 'B': [4, 5, 6]}\n", + "index1 = pd.MultiIndex.from_tuples([('Group1', 'X'), ('Group1', 'Y'), ('Group1', 'Z')], names=['Group', 'Subgroup'])\n", + "df1 = pd.DataFrame(data1, index=index1)\n", + "\n", + "data2 = {'C': [7, 8, 9],\n", + " 'D': [10, 11, 12]}\n", + "index2 = pd.MultiIndex.from_tuples([('Group2', 'X'), ('Group2', 'Y'), ('Group2', 'Z')], names=['Group', 'Subgroup'])\n", + "df2 = pd.DataFrame(data2, index=index2)\n", + "\n", + "# Concatenate the DataFrames column-wise\n", + "result = pd.concat([df1, df2], axis=1)\n", + "\n", + "# Display the result\n", + "print(result)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "MLAT_ABLATION_IDS = [\n", + " \"zswyokhc\", # 1 demo\n", + " \"1hhy8jy8\", # 5 demos\n", + " \"ry1ggn0r\", # 10 demos\n", + "]\n", + "\n", + "TAXPOSE_ABLATION_IDS = [\n", + " \"5do9r1ft\", # 1 demo\n", + " \"awbr16hl\", # 5 demos\n", + " \"n9likyeo\", # 10 demos\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "taxpose_train_dfs = []\n", + "taxpose_val_dfs = []\n", + "\n", + "for run_id in TAXPOSE_ABLATION_IDS:\n", + " taxpose_train_dfs.append(get_results_table(run_id, \"train_metrics\"))\n", + " taxpose_val_dfs.append(get_results_table(run_id, \"val_metrics\"))\n", + "\n", + "mlat_train_dfs = []\n", + "mlat_val_dfs = []\n", + "\n", + "for run_id in MLAT_ABLATION_IDS:\n", + " mlat_train_dfs.append(get_results_table(run_id, \"train_metrics\"))\n", + " mlat_val_dfs.append(get_results_table(run_id, \"val_metrics\"))\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create a table with only the angle error, where the index is the number of demonstrations.\n", + "taxpose_train_table = concat_results(taxpose_train_dfs)\n", + "taxpose_val_table = concat_results(taxpose_val_dfs)\n", + "\n", + "taxpose_train_table_angle = taxpose_train_table.xs(\"angle_err\", axis=1, level=1)\n", + "taxpose_val_table_angle = taxpose_val_table.xs(\"angle_err\", axis=1, level=1)\n", + "taxpose_train_table_t = taxpose_train_table.xs(\"t_err\", axis=1, level=1)\n", + "taxpose_val_table_t = taxpose_val_table.xs(\"t_err\", axis=1, level=1)\n", + "\n", + "# Rename the columns to be the number of demonstrations\n", + "taxpose_train_table_angle.columns = [1, 5, 10]\n", + "taxpose_val_table_angle.columns = [1, 5, 10]\n", + "taxpose_train_table_t.columns = [1, 5, 10]\n", + "taxpose_val_table_t.columns = [1, 5, 10]\n", + "\n", + "# MLAT\n", + "mlat_train_table = concat_results(mlat_train_dfs)\n", + "mlat_val_table = concat_results(mlat_val_dfs)\n", + "\n", + "mlat_train_table_angle = mlat_train_table.xs(\"angle_err\", axis=1, level=1)\n", + "mlat_val_table_angle = mlat_val_table.xs(\"angle_err\", axis=1, level=1)\n", + "mlat_train_table_t = mlat_train_table.xs(\"t_err\", axis=1, level=1)\n", + "mlat_val_table_t = mlat_val_table.xs(\"t_err\", axis=1, level=1)\n", + "\n", + "# Rename the columns to be the number of demonstrations\n", + "mlat_train_table_angle.columns = [1, 5, 10]\n", + "mlat_val_table_angle.columns = [1, 5, 10]\n", + "mlat_train_table_t.columns = [1, 5, 10]\n", + "mlat_val_table_t.columns = [1, 5, 10]\n", + "\n", + "# Concatenate the tables\n", + "full_train_table_angle = pd.concat([taxpose_train_table_angle, mlat_train_table_angle], axis=0)\n", + "full_val_table_angle = pd.concat([taxpose_val_table_angle, mlat_val_table_angle], axis=0)\n", + "full_train_table_t = pd.concat([taxpose_train_table_t, mlat_train_table_t], axis=0)\n", + "full_val_table_t = pd.concat([taxpose_val_table_t, mlat_val_table_t], axis=0)\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "full_train_table_angle" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(full_train_table_angle.to_markdown())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "full_train_table_t" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(full_train_table_t.to_markdown())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "full_val_table_angle" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "full_val_table_t" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Make two line plots of the angle error and translation error, with the number of demonstrations on the x-axis.\n", + "import matplotlib.pyplot as plt\n", + "fig = plt.figure(figsize=(10, 5))\n", + "\n", + "ax = fig.add_subplot(1, 2, 1)\n", + "full_train_table_angle.T.plot(ax=ax)\n", + "plt.title(\"Angle Error\")\n", + "plt.ylabel(\"Angle Error (°)\")\n", + "plt.xlabel(\"Number of Demonstrations\")\n", + "plt.ylim(0, 5.5)\n", + "\n", + "plt.xticks([1, 5, 10])\n", + "\n", + "# Rename the lines on the legend\n", + "lines = ax.get_lines()\n", + "lines[0].set_label(\"TAX-Pose\")\n", + "lines[1].set_label(\"Ours (RelDist)\")\n", + "\n", + "\n", + "ax = fig.add_subplot(1, 2, 2)\n", + "full_train_table_t.T.plot(ax=ax)\n", + "plt.title(\"Translation Error\")\n", + "plt.ylabel(\"Translation Error (mm)\")\n", + "plt.xlabel(\"Number of Demonstrations\")\n", + "plt.ylim(0, 0.0125)\n", + "\n", + "# Only plot ticks 1, 5, 10\n", + "plt.xticks([1, 5, 10])\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "taxpose_repro", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.12" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/ndf_eval_results_table.ipynb b/notebooks/ndf_eval_results_table.ipynb new file mode 100644 index 0000000..8c2e05b --- /dev/null +++ b/notebooks/ndf_eval_results_table.ipynb @@ -0,0 +1,582 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# NDF evaluation result aggregation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import wandb\n", + "import pandas as pd\n", + "import json" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "\n", + "\n", + "def get_results_table(run_id):\n", + " api = wandb.Api()\n", + " json_file = api.artifact(f'r-pad/taxpose/run-{run_id}-eval_results:v0').get_path('eval_results.table.json').download()\n", + " with open(json_file) as file:\n", + " json_dict = json.load(file)\n", + " return pd.DataFrame(json_dict[\"data\"], columns=json_dict[\"columns\"])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "df = get_results_table(\"1lhn802x\")\n", + "df" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Function to select the following columns from the dataframe:\n", + "# Grasp, Place@0.00, Place@0.01, Place@0.02, Place@0.03 Place, Overall@0.00, Overall@0.01, Overall@0.02, Overall@0.03, Overall\n", + "def select_columns(df):\n", + " return df[['Grasp', 'Place@0.0', 'Place@0.01', 'Place@0.02', 'Place@0.03', 'Place', 'Overall@0.0', 'Overall@0.01', 'Overall@0.02', 'Overall@0.03', 'Overall']]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "select_columns(df)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "cols = ['Grasp', 'Place@0.01', 'Place@0.03', 'Place',\n", + " 'Overall@0.01', 'Overall@0.03',\n", + " 'Overall']" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "df.columns" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "def merge_table(table1, table2):\n", + " \"\"\"Merge two tables by putting a slash between each cell.\n", + "\n", + " Args:\n", + " table1 (pd.DataFrame): A table.\n", + " table2 (pd.DataFrame): A table.\n", + "\n", + " Returns:\n", + " pd.DataFrame: A merged table.\n", + " \"\"\"\n", + " assert table1.shape == table2.shape\n", + " # create a new table to store the merged values\n", + " table1 = table1.copy()\n", + "\n", + " # Iterate over the rows.\n", + " for i in range(table1.shape[0]):\n", + " # Iterate over the columns.\n", + " for j in range(table1.shape[1]):\n", + " # Merge the two cells.\n", + " table1.iloc[i, j] = f\"{table1.iloc[i, j]}/{table2.iloc[i, j]}\"\n", + " return table1\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "merge_table(df, df)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "df" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Mug - upright\n", + "ids = [\"1earfvy7\", \"6iwbbui3\"]\n", + "methods = [\"TAX-Pose\", \"Ours\"]\n", + "\n", + "# Get the dfs:\n", + "dfs = [get_results_table(id) for id in ids]\n", + "\n", + "# Prune the dfs:\n", + "dfs = [select_columns(df) for df in dfs]\n", + "\n", + "# Concatenate the dfs, and make methods the index.\n", + "df_mug_upright = pd.concat(dfs, keys=methods)\n", + "\n", + "df_mug_upright\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "df_mug = merge_table(df_mug_upright, df_mug_arbitrary)\n", + "df_mug" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "print(df_mug[cols].to_latex())\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Mug - arbitrary\n", + "ids = [\"oua2prpu\", \"mv78haro\"]\n", + "methods = [\"TAX-Pose\", \"Ours\"]\n", + "\n", + "# Get the dfs:\n", + "dfs = [get_results_table(id) for id in ids]\n", + "\n", + "# Prune the dfs:\n", + "dfs = [select_columns(df) for df in dfs]\n", + "\n", + "# Concatenate the dfs, and make methods the index.\n", + "df_mug_arbitrary = pd.concat(dfs, keys=methods)\n", + "\n", + "df_mug_arbitrary" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Bottle - upright\n", + "ids = [\"922blg3t\", \"98k2uzwf\"]\n", + "methods = [\"TAX-Pose\", \"Ours\"]\n", + "\n", + "# Get the dfs:\n", + "dfs = [get_results_table(id) for id in ids]\n", + "\n", + "# Prune the dfs:\n", + "dfs = [select_columns(df) for df in dfs]\n", + "\n", + "# Concatenate the dfs, and make methods the index.\n", + "df_bottle_upright = pd.concat(dfs, keys=methods)\n", + "\n", + "df_bottle_upright" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(df.to_markdown())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(df.style.format(precision=2).to_latex())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Bottle - arbitrary\n", + "ids = [\"86qjwjwa\", \"32dfgjg1\"]\n", + "methods = [\"TAX-Pose\", \"Ours\"]\n", + "\n", + "# Get the dfs:\n", + "dfs = [get_results_table(id) for id in ids]\n", + "\n", + "# Prune the dfs:\n", + "dfs = [select_columns(df) for df in dfs]\n", + "\n", + "# Concatenate the dfs, and make methods the index.\n", + "df_bottle_arbitrary = pd.concat(dfs, keys=methods)\n", + "\n", + "df_bottle_arbitrary" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "df_bottle = merge_table(df_bottle_upright, df_bottle_arbitrary)\n", + "df_bottle" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "print(df_bottle[cols].to_latex())\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(df.to_markdown())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(df.style.format(precision=2).to_latex())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Bowl - upright\n", + "ids = [\"2p4k9lts\", \"awjq5lwj\"]\n", + "methods = [\"TAX-Pose\", \"Ours\"]\n", + "\n", + "# Get the dfs:\n", + "dfs = [get_results_table(id) for id in ids]\n", + "\n", + "# Prune the dfs:\n", + "dfs = [select_columns(df) for df in dfs]\n", + "\n", + "# Concatenate the dfs, and make methods the index.\n", + "df_bowl_upright = pd.concat(dfs, keys=methods)\n", + "\n", + "df_bowl_upright" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(df.to_markdown())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(df.style.format(precision=2).to_latex())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Bowl - arbitrary\n", + "ids = [\"ve3g0j19\", \"v0pzmw7i\"]\n", + "methods = [\"TAX-Pose\", \"Ours\"]\n", + "\n", + "# Get the dfs:\n", + "dfs = [get_results_table(id) for id in ids]\n", + "\n", + "# Prune the dfs:\n", + "dfs = [select_columns(df) for df in dfs]\n", + "\n", + "# Concatenate the dfs, and make methods the index.\n", + "df_bowl_arbitrary = pd.concat(dfs, keys=methods)\n", + "\n", + "df_bowl_arbitrary" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "df_bowl = merge_table(df_bowl_upright, df_bowl_arbitrary)\n", + "df_bowl" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "print(df_bowl[cols].to_latex())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(df.to_markdown())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(df.style.format(precision=2).to_latex())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# SE(3) comparison\n", + "ids = [\"uiemocd5\", \"ormuoxbj\"]\n", + "methods = [\"TAX-Pose\", \"Ours\"]\n", + "\n", + "# Get the dfs:\n", + "dfs = [get_results_table(id) for id in ids]\n", + "\n", + "# Prune the dfs:\n", + "dfs = [select_columns(df) for df in dfs]\n", + "\n", + "# Concatenate the dfs, and make methods the index.\n", + "df = pd.concat(dfs, keys=methods)\n", + "\n", + "df" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(df.to_markdown())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(df.style.format(precision=2).to_latex())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Write a function which takes a dataframe as input and returns a markdown table as output.\n", + "def to_markdown(df):\n", + " df.to_markdown()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "max_to_markdown(df)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Write a function which bolds the largest number in each column of a markdown table.\n", + "# def bold_largest_number(table):\n", + "# # Split the table into rows\n", + "# rows = table.split('\\n')\n", + "# # Split each row into columns\n", + "# columns = [row.split('|') for row in rows]\n", + "# # Remove the first and last columns\n", + "# columns = [column[1:-1] for column in columns]\n", + "# # Remove the first and last rows\n", + "# columns = columns[1:-1]\n", + "# # Convert each column to a list of integers\n", + "# columns = [[int(number) for number in column] for column in columns]\n", + "# # Find the largest number in each column\n", + "# largest_numbers = [max(column) for column in columns]\n", + "# # Replace the largest number in each column with a bold version of itself\n", + "# for i, largest_number in enumerate(largest_numbers):\n", + "# for j, number in enumerate(columns[i]):\n", + "# if number == largest_number:\n", + "# columns[i][j] = f'**{number}**'\n", + "# # Convert each column back to a list of strings\n", + "# columns = [[str(number) for number in column] for column in columns]\n", + "# # Join each column back into a row\n", + "# rows = ['|'.join(column) for column in columns]\n", + "# # Join each row back into a table\n", + "# table = '\\n'.join(rows)\n", + "# # Return the table\n", + "# return table" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(df.to_markdown())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "to_markdown_with_largest_bolded(df)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.12" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/notebooks/ndf_mug_grid.ipynb b/notebooks/ndf_mug_grid.ipynb new file mode 100644 index 0000000..e9f3efa --- /dev/null +++ b/notebooks/ndf_mug_grid.ipynb @@ -0,0 +1,104 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!ls /home/beisner/code/rpad/taxpose/logs/eval_mug_upright_10/2023-08-15/15-31-03/teleport_imgs/" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# There are jpg images in src_dir. Let's visualize them\n", + "\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "import os\n", + "\n", + "src_dir = \"/home/beisner/code/rpad/taxpose/logs/eval_mug_upright_10/2023-08-15/15-31-03/teleport_imgs/\"\n", + "\n", + "# Load images\n", + "imgs = []\n", + "for img_name in [f\"post_teleport_{i}.png\" for i in range(100)]:\n", + " img = plt.imread(os.path.join(src_dir, img_name))\n", + " imgs.append(img)\n", + "\n", + "# Plot images in a 10x10 grid\n", + "fig, axs = plt.subplots(10, 10, figsize=(10, 10))\n", + "for i in range(10):\n", + " for j in range(10):\n", + "\n", + "\n", + " axs[i, j].imshow(imgs[i * 10 + j])\n", + " axs[i, j].axis(\"off\")\n", + "\n", + " axs[i, j].set_xlim(350, 550)\n", + " axs[i, j].set_ylim(350, 150)\n", + "\n", + " # Add a title to each axis\n", + " axs[i, j].set_title(f\"Image {i * 10 + j}\")\n", + "plt.show()\n", + "\n", + "# Good Images:\n", + "imgs = [6, 17, 19, 21, 25, 27, 34, 37, 48, 74]\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "imgs = [6, 17, 19, 21, 25, 27, 34, 37, 48, 74]\n", + "\n", + "from PIL import Image\n", + "\n", + "# make a 2x5 grid of images\n", + "fig, axs = plt.subplots(2, 5, figsize=(10, 4))\n", + "axs = axs.flatten()\n", + "\n", + "for i, img in enumerate(imgs):\n", + " axs[i].imshow(np.array(Image.open(f\"{src_dir}/post_teleport_{img}.png\")))\n", + " axs[i].axis(\"off\")\n", + " axs[i].set_xlim(350, 550)\n", + " axs[i].set_ylim(350, 150)\n", + "\n", + "plt.tight_layout()\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "taxpose_repro", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.12" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/ndf_penetration_analysis.ipynb b/notebooks/ndf_penetration_analysis.ipynb new file mode 100644 index 0000000..f4f7f6c --- /dev/null +++ b/notebooks/ndf_penetration_analysis.ipynb @@ -0,0 +1,131 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "78d80778-5e18-494f-bc42-e553427eb4f3", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import numpy as np\n", + "import os" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "29a6dfb0-a2da-4cd1-834d-4fc0eb68c5c7", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "49bd51eb-ed4e-40b6-8c73-4d42fb2eee5d", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "path = \"/home/beisner/code/multi_project/taxpose/logs/ndf_evals/mug_upright_mlat_s100/2023-05-26_002259/10\"\n", + "res1 = np.load(os.path.join(path, \"trial_60/success_rate_eval_implicit.npz\"))\n", + "plt.hist(res1[\"penetration_list\"], bins=100)\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1e68321f-8acb-40ba-a5de-26d8c843faf7", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "path = \"/home/beisner/code/multi_project/taxpose/logs/ndf_evals/mug_upright_taxpose_(release)/2023-05-26_010902/10\"\n", + "res2 = np.load(os.path.join(path, \"trial_99/success_rate_eval_implicit.npz\"))\n", + "plt.hist(res2[\"penetration_list\"], bins=100)\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "48e18802-31be-419a-82cf-48f58e7b3ff5", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "x = np.linspace(0, max(max(res1[\"penetration_list\"]), max(res2[\"penetration_list\"])), 21)\n", + "y1 = np.stack([np.logical_and(res1[\"place_success_teleport_list\"], res1[\"penetration_list\"] < c ) for c in x]).mean(axis=-1)\n", + "y2 = np.stack([np.logical_and(res2[\"place_success_teleport_list\"], res2[\"penetration_list\"] < c ) for c in x]).mean(axis=-1)\n", + "plt.plot(x, y1, label=\"multilateration\")\n", + "plt.plot(x, y2, label=\"taxpose\")\n", + "plt.xlabel(\"threshold for maximum allowed amount of intersection (cm)\")\n", + "plt.ylabel(\"success rate at that threshold\")\n", + "plt.legend()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "db9d2423-8c2d-4ac9-81fc-dbe3c8eefb12", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "y" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0fdc0bae-c089-46d5-b79f-46149334e602", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "x[1]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b33d8ca2-8b5d-47df-a8d0-e9ce04ee2dbd", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/ndf_vis_symmetries.ipynb b/notebooks/ndf_vis_symmetries.ipynb new file mode 100644 index 0000000..9710d35 --- /dev/null +++ b/notebooks/ndf_vis_symmetries.ipynb @@ -0,0 +1,353 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "e8b706cc-2522-4a18-b19b-4524e46eba6c", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e137e89c-9780-4e92-ad5c-5b55521a10a5", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import numpy as np\n", + "from rpad.visualize_3d.plots import pointcloud_fig, flow_fig\n", + "from rpad.visualize_3d.primitives import vector \n", + "from taxpose.datasets.ndf import compute_symmetry_features\n", + "import os" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7d3dc84d-a8de-48d3-ab61-0eb4459dfe63", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "d = \"/home/beisner/code/rpad/taxpose/notebooks/data\"\n", + "ndf_grasp_data = np.load(os.path.join(d, \"ndfeval_grasp_data.npz\"))\n", + "ndf_place_data = np.load(os.path.join(d, \"ndfeval_place_data.npz\"))\n", + "dm_data = np.load(os.path.join(d, \"raweval_data.npz\"))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6823803f-a80c-4710-94dd-f5fa08daa795", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "pointcloud_fig(\n", + " ndf_data[\"points_action_np\"],\n", + " downsample=1,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7460bf69-e35d-4425-8d42-e0f9539976aa", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "pointcloud_fig(\n", + " dm_data[\"points_action_np\"],\n", + " downsample=1,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3b0ec435-2b66-487e-ac16-da691aa3d741", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "pointcloud_fig(\n", + " ndf_data[\"points_anchor_np\"],\n", + " downsample=1,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5477b4e6-8f34-4f6b-ad18-7a064f8b1749", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "pointcloud_fig(\n", + " dm_data[\"points_anchor_np\"],\n", + " downsample=1,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "65870788-a0a9-4b81-a6fa-454f2b8a09e7", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "\n", + "dm_act_sym, dm_anc_sym, dm_act_rgb, dm_anc_rgb = compute_symmetry_features(\n", + " dm_data[\"points_action_np\"][None],\n", + " dm_data[\"points_anchor_np\"][None],\n", + " \"bottle\",\n", + " \"grasp\",\n", + " 2,\n", + " 0,\n", + " True,\n", + " skip_symmetry=False,\n", + ")\n", + "pointcloud_fig(\n", + " dm_data[\"points_action_np\"],\n", + " downsample=1,\n", + " colors=dm_act_rgb[0],\n", + ").show()\n", + "pointcloud_fig(\n", + " dm_data[\"points_anchor_np\"],\n", + " downsample=1,\n", + " colors=dm_anc_rgb[0],\n", + ").show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0d0ebb77-88b6-4bb1-b1ff-012eb8a8c0c3", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "ndf_act_sym, ndf_anc_sym, ndf_act_rgb, ndf_anc_rgb = compute_symmetry_features(\n", + " ndf_data[\"points_action_np\"][None],\n", + " ndf_data[\"points_anchor_np\"][None],\n", + " \"bottle\",\n", + " \"grasp\",\n", + " 2,\n", + " 0,\n", + " True,\n", + " skip_symmetry=False,\n", + ")\n", + "pointcloud_fig(\n", + " ndf_data[\"points_action_np\"],\n", + " downsample=1,\n", + " colors=ndf_act_rgb[0],\n", + ").show()\n", + "pointcloud_fig(\n", + " ndf_data[\"points_anchor_np\"],\n", + " downsample=1,\n", + " colors=ndf_anc_rgb[0],\n", + ").show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9f669fa6-70bf-41ee-9899-27904e77c9eb", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2c9b1904-02eb-4c7b-8f19-5fd2ed8c8e4a", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "from taxpose.datasets.symmetry_utils import scalars_to_rgb, gripper_symmetry_labels, rotational_symmetry_labels" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "397689a7-2e37-4054-a6bf-0d913af890f9", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "g_sym_labels, g_axis, g_centroid = gripper_symmetry_labels(ndf_data[\"points_action_np\"])\n", + "g_sym_colors = scalars_to_rgb(g_sym_labels)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9c571db3-be86-49a9-983f-5e081cbb6df8", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "g_sym_labels, g_axis, g_centroid = gripper_symmetry_labels(ndf_data[\"points_action_np\"])\n", + "g_sym_colors = scalars_to_rgb(g_sym_labels)\n", + "\n", + "fig = pointcloud_fig(\n", + " ndf_data[\"points_action_np\"],\n", + " downsample=1,\n", + " colors=g_sym_colors,\n", + ")\n", + "\n", + "fig.add_trace(vector(*g_centroid, *g_axis, color=[0, 1, 0], scene=\"scene1\"))\n", + "fig.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "852d5a64-0712-4167-88a9-85e7f35075d5", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "g_sym_labels, g_axis, g_centroid = gripper_symmetry_labels(dm_data[\"points_action_np\"])\n", + "g_sym_colors = scalars_to_rgb(g_sym_labels)\n", + "\n", + "fig = pointcloud_fig(\n", + " dm_data[\"points_action_np\"],\n", + " downsample=1,\n", + " colors=g_sym_colors,\n", + ")\n", + "\n", + "fig.add_trace(vector(*g_centroid, *g_axis, color=[0, 1, 0], scene=\"scene1\"))\n", + "fig.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "eabe6b22-5870-40e6-a3d9-7a88c066b4a8", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "g_centroid.shape" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "006f5fd7-83e2-42f0-be6f-59f9a4227002", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "ndf_data[\"points_anchor_np\"].shape" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6bd488b3-9955-4c94-9a44-2e0efd7f66a6", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "g_axis" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b4d8958e-a3c4-41ad-8d95-8a4788fd52cc", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "ndf_data[\"points_action_np\"].mean(axis=0)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f3b9f42d-a48a-425d-a126-badad5cee82e", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "for i in range(3):\n", + " b_sym_labels, b_axis1, b_axis2, b_centroid = rotational_symmetry_labels(\n", + " ndf_data[\"points_anchor_np\"],\n", + " \"bowl\",\n", + " # g_centroid,\n", + " )\n", + " b_sym_colors = scalars_to_rgb(b_sym_labels)\n", + "\n", + " fig = pointcloud_fig(\n", + " ndf_data[\"points_anchor_np\"],\n", + " downsample=1,\n", + " colors=b_sym_colors,\n", + " )\n", + "\n", + " fig.add_trace(vector(*b_centroid, *b_axis1, color=[0, 1, 0], scene=\"scene1\"))\n", + " fig.add_trace(vector(*b_centroid, *b_axis2, color=[1, 0, 0], scene=\"scene1\"))\n", + " fig.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6cfa949a-4916-4064-94f2-ccfff0006430", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/ndf_visualize_eval_imgs.ipynb b/notebooks/ndf_visualize_eval_imgs.ipynb new file mode 100644 index 0000000..2400878 --- /dev/null +++ b/notebooks/ndf_visualize_eval_imgs.ipynb @@ -0,0 +1,123 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "82cc4ffc-e076-44bc-894d-dd2850a884cc", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import os\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "import PIL\n", + "from mpl_toolkits.axes_grid1 import ImageGrid\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "28ec50e2-b902-4217-ac9c-19d4cdab568b", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "\n", + "# Taxpose, upright\n", + "# eval_dir = \"/home/beisner/code/multi_project/taxpose/logs/ndf_evals/mug_upright_taxpose_(release)/2023-05-24_130144/10\"\n", + "\n", + "# MLat100, upright\n", + "# eval_dir = \"/home/beisner/code/multi_project/taxpose/logs/ndf_evals/mug_upright_mlat_s100/2023-05-24_191149/10\"\n", + "\n", + "# MLat+VN, upright\n", + "# eval_dir = \"/home/beisner/code/multi_project/taxpose/logs/ndf_evals/mug_upright_mlat_s256_vnn/2023-05-24_225144/10\"\n", + "\n", + "# Taxpose, arbitrary\n", + "# eval_dir = \"/home/beisner/code/multi_project/taxpose/logs/ndf_evals/mug_arbitrary_taxpose_(release)/2023-05-24_132420/10\"\n", + "\n", + "# MLat100, arbitrary\n", + "# eval_dir = \"/home/beisner/code/multi_project/taxpose/logs/ndf_evals/mug_arbitrary_mlat_s100/2023-05-24_210016/10\"\n", + "\n", + "# Mlat+VN, arbitrary\n", + "# eval_dir = \"/home/beisner/code/multi_project/taxpose/logs/ndf_evals/mug_upright_mlat_s256_vnn/2023-05-24_225144/10\"\n", + "\n", + "# MLat100, upright, eval on\n", + "eval_dir = \"/home/beisner/code/multi_project/taxpose/logs/ndf_evals/mug_upright_mlat_s100/2023-05-24_234831/10\"\n", + "\n", + "img_dir = os.path.join(eval_dir, \"teleport_imgs\")\n", + "res_file = os.path.join(eval_dir, \"trial_99/success_rate_eval_implicit.npz\")\n", + "results_dict = np.load(res_file)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f78d7c1b-04ec-4d29-b004-fa0649660235", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "np.asarray(PIL.Image.open(os.path.join(img_dir, f\"teleport_{i}.png\"))).shape" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "507cce75-32fd-41cf-bc60-d0e914c46170", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "fig = plt.figure(figsize=(32., 16.))\n", + "\n", + "grid = ImageGrid(fig, 111, nrows_ncols=(5, 20), share_all=True, axes_pad=0.3)\n", + "grid[0].get_yaxis().set_ticks([])\n", + "grid[0].get_xaxis().set_ticks([])\n", + "\n", + "for i, ax in enumerate(grid):\n", + " ax.imshow(PIL.Image.open(os.path.join(img_dir, f\"teleport_{i}.png\")))\n", + " ax.set_xlim(350, 550)\n", + " ax.set_ylim(350, 150)\n", + " \n", + " succ = results_dict[\"place_success_teleport_list\"][i]\n", + " ax.set_title(\"success\" if succ else \"fail\", color=\"blue\" if succ else \"red\")\n", + " \n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e72d7065-8c89-4af1-9520-08228a921c17", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/visualize_eval_predictions.ipynb b/notebooks/ndf_visualize_eval_predictions.ipynb similarity index 76% rename from notebooks/visualize_eval_predictions.ipynb rename to notebooks/ndf_visualize_eval_predictions.ipynb index cc5e22e..37bf4f1 100644 --- a/notebooks/visualize_eval_predictions.ipynb +++ b/notebooks/ndf_visualize_eval_predictions.ipynb @@ -34,8 +34,12 @@ "source": [ "# pred_dir = \"../results/ndf/mug/upright/release/2023-05-03_152512/predictions\"\n", "# pred_dir = \"/home/beisner/code/rpad/taxpose/mug_place\"\n", - "pred_dir = \"/home/beisner/code/multi_project/taxpose/results/ndf/mug/upright/release/2023-05-03_220706/repro_mug\"\n", - "result_ix = 0" + "# pred_dir = \"/home/beisner/code/multi_project/taxpose/results/ndf/mug/upright/release/2023-05-03_220706/repro_mug\"\n", + "# pred_dir = \"/home/beisner/code/multi_project/taxpose/results/ndf/mug/upright/multilateration/2023-05-04_000959/predictions/\"\n", + "# pred_dir = \"/home/beisner/code/multi_project/taxpose/results/ndf/mug/upright/multilateration/2023-05-04_001514/predictions/\"\n", + "# pred_dir = \"/home/beisner/code/multi_project/taxpose/logs/ndf_evals/mug_upright_taxpose_(release)/2023-05-23_230436/10/pointclouds\"\n", + "pred_dir = \"/home/beisner/code/multi_project/taxpose/logs/ndf_evals/mug_arbitrary_taxpose_(release)/2023-05-24_110143/10/pointclouds\"\n", + "result_ix = 9" ] }, { @@ -72,7 +76,37 @@ "# pre_grasp_all_pts = load_and_print(f\"{pred_dir}/{result_ix}_pre_grasp_all_points.npz\")\n", "pre_grasp_obj_pts = load_and_print(f\"{pred_dir}/{result_ix}_pre_grasp_obj_points.npz\")\n", "# teleport_all_pts = load_and_print(f\"{pred_dir}/{result_ix}_teleport_all_points.npz\")\n", - "teleport_obj_pts = load_and_print(f\"{pred_dir}/{result_ix}_teleport_obj_points.npz\")" + "teleport_obj_pts = load_and_print(f\"{pred_dir}/{result_ix}_teleport_obj_points.npz\")\n", + "# pre_teleport_obj_pts = load_and_print(f\"{pred_dir}/{result_ix}_init_teleport_obj_points.npz\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "init_obj_pts[\"points_mug_raw\"].shape" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "np.concatenate(\n", + " [\n", + " init_obj_pts[\"points_mug_raw\"],\n", + " init_obj_pts[\"points_gripper_raw\"],\n", + " init_obj_pts[\"points_rack_raw\"],\n", + " ],\n", + " axis=1,\n", + " ).squeeze().shape" ] }, { @@ -91,13 +125,14 @@ " cols=2, \n", " subplot_titles=[\n", " \"init_obj_points\",\n", + " \"pre_teleport_points\",\n", " \"pre_grasp_obj_points\",\n", " \"post_grasp_obj_points\",\n", " \"teleport_obj_points\",\n", " \"post_teleport_obj_points\",\n", " ],\n", " specs=[\n", - " [{\"type\": \"scene\"}, None],\n", + " [{\"type\": \"scene\"}, {\"type\": \"scene\"}],\n", " [{\"type\": \"scene\"}, {\"type\": \"scene\"}],\n", " [{\"type\": \"scene\"}, {\"type\": \"scene\"}],\n", " ],\n", @@ -135,6 +170,7 @@ " )\n", "\n", "# Input figure.\n", + "n = init_obj_pts[\"points_mug_raw\"].shape[1]\n", "add_seg_figure(\n", " np.concatenate(\n", " [\n", @@ -144,7 +180,7 @@ " ],\n", " axis=1,\n", " ).squeeze(),\n", - " labels=np.repeat([0, 1, 2], [1024, 1024, 1024]),\n", + " labels=np.repeat([0, 1, 2], [n, n, n]),\n", " labelmap={-1: \"background\", 0: \"mug\", 1: \"gripper\", 2: \"rack\"}, \n", " scene=\"scene1\",\n", " row=1,\n", @@ -152,7 +188,7 @@ ")\n", "\n", "\n", - "\n", + "add_inter_seg_figure(pre_grasp_obj_pts, \"scene6\", 1, 2)\n", "add_inter_seg_figure(pre_grasp_obj_pts, \"scene2\", 2, 1)\n", "add_inter_seg_figure(post_grasp_obj_pts, \"scene3\", 2, 2)\n", "add_inter_seg_figure(teleport_obj_pts, \"scene4\", 3, 1)\n", diff --git a/notebooks/original_taxpose_ndf_symmetry.ipynb b/notebooks/original_taxpose_ndf_symmetry.ipynb new file mode 100644 index 0000000..4221658 --- /dev/null +++ b/notebooks/original_taxpose_ndf_symmetry.ipynb @@ -0,0 +1,1541 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "4fc347fe", + "metadata": {}, + "outputs": [], + "source": [ + "import sys\n", + "import os\n", + "import torch\n", + "\n", + "os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'\n", + "os.environ['CUDA_VISIBLE_DEVICES']='1'\n", + "\n", + "import numpy as np\n", + "import plotly.graph_objects as go" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a22f1aa6", + "metadata": {}, + "outputs": [], + "source": [ + "def toDisplay(x, target_dim = 2):\n", + " while(x.dim() > target_dim):\n", + " x = x[0]\n", + " return x.detach().cpu().numpy()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ea342299", + "metadata": {}, + "outputs": [], + "source": [ + "import sys\n", + "sys.path.insert(1, '/home/exx/Documents/equivariant_pose_graph/python')\n", + "from equivariant_pose_graph.models.transformer_flow import ResidualFlow, ResidualFlow_V1\n", + "from equivariant_pose_graph.training.flow_equivariance_training_module import EquivarianceTrainingModule\n", + "\n", + "# from ndf_robot.eval.test_trained_model_place import load_data\n", + "from pytorch3d.ops import sample_farthest_points\n", + "from pathlib import Path\n", + "import torch.nn.functional as F\n", + "from equivariant_pose_graph.utils.se3 import random_se3\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c764f281", + "metadata": {}, + "outputs": [], + "source": [ + "from equivariant_pose_graph.models.transformer_flow import ResidualFlow_DiffEmbTransformer\n", + "from equivariant_pose_graph.training.flow_equivariance_training_module_nocentering_eval_init_symmetry import EquivarianceTestingModule\n", + "checkpoint_file='/home/exx/media/DataDrive/singularity_chuerp/epg_results/residual_flow/residual_flow_occlusion_bottle_grasp_sym_breaking/2022-07-12_010420/residual_flow_occlusion_bottle_grasp_sym_breaking/3l22wmgi/checkpoints/epoch_431_global_step_54000.ckpt'\n", + " \n", + "network = ResidualFlow_DiffEmbTransformer(\n", + " emb_nn='dgcnn', return_flow_component=False, center_feature=True,\n", + " inital_sampling_ratio=1)\n", + "model = EquivarianceTestingModule(\n", + " network,\n", + " lr=1e-4,\n", + " image_log_period=100,\n", + " weight_normalize='l1',\n", + " softmax_temperature = 0.1,\n", + " loop=1, \n", + " object_type = 'bottle'\n", + ")\n", + "\n", + "model.cuda()\n", + "model.load_state_dict(torch.load(checkpoint_file)['state_dict'])\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b6ee2b02", + "metadata": {}, + "outputs": [], + "source": [ + "def plot(points_action, points_anchor):\n", + " colors = [\n", + " '#1f77b4', # muted blue\n", + " '#ff7f0e', # safety orange\n", + " '#2ca02c', # cooked asparagus green\n", + " '#d62728', # brick red\n", + " '#9467bd', # muted purple\n", + " '#8c564b', # chestnut brown\n", + " '#e377c2', # raspberry yogurt pink\n", + " '#7f7f7f', # middle gray\n", + " '#bcbd22', # curry yellow-green\n", + " '#17becf' # blue-teal\n", + "]\n", + " skip = 1\n", + " points_action_dp = toDisplay(points_action)\n", + " points_anchor_dp = toDisplay(points_anchor)\n", + " go_data=[\n", + " go.Scatter3d(x=points_action_dp[::skip,0], y=points_action_dp[::skip,1], z=points_action_dp[::skip,2], \n", + " mode='markers', marker=dict(size=1, color=colors[0],\n", + " symbol='circle')),\n", + " go.Scatter3d(x=points_anchor_dp[::skip,0], y=points_anchor_dp[::skip,1], z=points_anchor_dp[::skip,2], \n", + " mode='markers', marker=dict(size=1, color=colors[1],\n", + " symbol='circle')),\n", + " ]\n", + " layout = go.Layout(\n", + " scene=dict(\n", + " aspectmode='data'\n", + " )\n", + " )\n", + "\n", + " fig = go.Figure(data=go_data, layout=layout)\n", + " fig.show()\n", + "\n", + "def plot_multi(plist):\n", + " colors = [\n", + " '#1f77b4', # muted blue\n", + " '#ff7f0e', # safety orange\n", + " '#2ca02c', # cooked asparagus green\n", + " '#d62728', # brick red\n", + " '#9467bd', # muted purple\n", + " '#e377c2', # raspberry yogurt pink\n", + " '#8c564b', # chestnut brown\n", + " '#7f7f7f', # middle gray\n", + " '#bcbd22', # curry yellow-green\n", + " '#17becf' # blue-teal\n", + "]\n", + " skip = 1\n", + " go_data = []\n", + " for i in range(len(plist)):\n", + " p_dp = toDisplay(plist[i])\n", + " plot = go.Scatter3d(x=p_dp[::skip,0], y=p_dp[::skip,1], z=p_dp[::skip,2], \n", + " mode='markers', marker=dict(size=1, color=colors[i],\n", + " symbol='circle'))\n", + " go_data.append(plot)\n", + " \n", + " layout = go.Layout(\n", + " scene=dict(\n", + " aspectmode='data'\n", + " )\n", + " )\n", + " \n", + " fig = go.Figure(data=go_data, layout=layout)\n", + " fig.show()\n", + "def plot_color(plist, color_list):\n", + " colors = [\n", + " '#8c564b', # chestnut brown\n", + " '#1f77b4', # muted blue\n", + " '#ff7f0e', # safety orange\n", + " '#2ca02c', # cooked asparagus green\n", + " '#d62728', # brick red\n", + " '#9467bd', # muted purple\n", + " '#e377c2', # raspberry yogurt pink\n", + " '#7f7f7f', # middle gray\n", + " '#bcbd22', # curry yellow-green\n", + " '#17becf' # blue-teal\n", + "]\n", + " skip = 1\n", + " go_data = []\n", + " for i in range(len(plist)):\n", + " if color_list[i]==None:\n", + " p_dp = toDisplay(plist[i])\n", + " \n", + " plot = go.Scatter3d(x=p_dp[::skip,0], y=p_dp[::skip,1], z=p_dp[::skip,2], \n", + " mode='markers', marker=dict(size=1, color=colors[i],\n", + " symbol='circle'))\n", + " go_data.append(plot)\n", + " else:\n", + " p_dp = toDisplay(plist[i])\n", + " color_list[i] = toDisplay(color_list[i])\n", + " plot = go.Scatter3d(x=p_dp[::skip,0], y=p_dp[::skip,1], z=p_dp[::skip,2], \n", + " mode='markers', marker=dict(size=1, color=color_list[i],\n", + " symbol='circle'))\n", + " go_data.append(plot)\n", + " \n", + " layout = go.Layout(\n", + " scene=dict(\n", + " aspectmode='data'\n", + " )\n", + " )\n", + " \n", + " fig = go.Figure(data=go_data, layout=layout)\n", + " fig.show()\n", + " \n", + "\n", + "\n", + "def xyz2homo(xyz):\n", + " \"\"\"\n", + " xyz:shape 1,num_points, 3\n", + " \"\"\"\n", + " num_points = xyz.shape[1]\n", + " homo = torch.cat([xyz.squeeze(0).detach().cpu(),torch.ones(num_points,1)],dim=-1)\n", + " return homo\n", + "def transform(T,points):\n", + " \"\"\"\n", + " points: num_points, 4\n", + " \"\"\"\n", + " points = torch.permute(points,(-1,-2)) # 4,1000\n", + " apply_here= torch.from_numpy(T).cuda()@points.cuda()\n", + " apply_here = torch.permute(apply_here, (-1, -2))\n", + " return apply_here[:,:3]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d94e3381", + "metadata": {}, + "outputs": [], + "source": [ + "def load_data(num_points, point_data, action_class, anchor_class):\n", + " clouds = point_data['clouds'] \n", + " classes = point_data['classes']\n", + " print(\"clouds.shape:{}\".format(clouds.shape))\n", + " print(\"classes.shape:{}\".format(classes.shape))\n", + " points_raw_np = clouds\n", + " classes_raw_np = classes\n", + "\n", + " points_action_np = points_raw_np[classes_raw_np == action_class].copy()\n", + "# points_action_mean_np = points_action_np.mean(axis=0)\n", + "# points_action_np = points_action_np - points_action_mean_np\n", + " \n", + " points_anchor_np = points_raw_np[classes_raw_np == anchor_class].copy()\n", + "# points_anchor_np = points_anchor_np - points_action_mean_np\n", + "\n", + " points_action = torch.from_numpy(points_action_np).float().unsqueeze(0)\n", + " points_anchor = torch.from_numpy(points_anchor_np).float().unsqueeze(0)\n", + " points_action, points_anchor = subsample(num_points,points_action, points_anchor)\n", + " return points_action.cuda(), points_anchor.cuda()\n", + "\n", + "def load_data2(num_points, point_data, action_class, anchor_class):\n", + " clouds = point_data['clouds'] \n", + " classes = point_data['classes']\n", + " pred_T_action_transformed = point_data[\"pred_T_action_transformed\"]\n", + " points_raw_np = clouds\n", + " classes_raw_np = classes\n", + "\n", + " points_action_np = points_raw_np[classes_raw_np == action_class].copy()\n", + " points_action_mean_np = points_action_np.mean(axis=0)\n", + " pred_T_action_transformed = pred_T_action_transformed+ points_action_mean_np\n", + " print(\"points_action_mean_np.shape:{}\".format(points_action_mean_np.shape))\n", + " \n", + " points_anchor_np = points_raw_np[classes_raw_np == anchor_class].copy()\n", + "# points_anchor_np = points_anchor_np - points_action_mean_np\n", + "\n", + " points_action = torch.from_numpy(points_action_np).float().unsqueeze(0)\n", + " points_anchor = torch.from_numpy(points_anchor_np).float().unsqueeze(0)\n", + " points_action, points_anchor = subsample(num_points,points_action, points_anchor)\n", + " return torch.from_numpy(pred_T_action_transformed).cuda()\n", + "\n", + "def subsample(num_points,points_action,points_anchor):\n", + " if(points_action.shape[1] > num_points):\n", + " points_action, _ = sample_farthest_points(points_action, \n", + " K=num_points, random_start_point=True)\n", + " elif(points_action.shape[1] < num_points):\n", + " raise NotImplementedError(f'Action point cloud is smaller than cloud size ({points_action.shape[1]} < {num_points})')\n", + "\n", + " if(points_anchor.shape[1] > num_points):\n", + " points_anchor, _ = sample_farthest_points(points_anchor, \n", + " K=num_points, random_start_point=True)\n", + " elif(points_anchor.shape[1] < num_points):\n", + " raise NotImplementedError(f'Anchor point cloud is smaller than cloud size ({points_anchor.shape[1]} < {num_points})')\n", + " \n", + " return points_action, points_anchor\n", + "\n", + "# def get_sym_label(num_points, point_data, action_class, anchor_class):\n", + "# clouds = point_data['clouds'] \n", + "# classes = point_data['classes']\n", + "# print(\"clouds.shape:{}\".format(clouds.shape))\n", + "# print(\"classes.shape:{}\".format(classes.shape))\n", + "# points_raw_np = clouds\n", + "# classes_raw_np = classes\n", + "# assert 0 in [action_class, anchor_class], \"class 0 must be here somewhere as the manipulation object of interest\"\n", + "# if action_class == 0:\n", + "# sym_breaking_class = action_class\n", + "# center_class = anchor_class\n", + "# elif anchor_class == 0:\n", + "# sym_breaking_class = anchor_class\n", + "# center_class = action_class\n", + "\n", + "# points_sym = points_raw_np[classes_raw_np == sym_breaking_class].copy()\n", + "# points_nonsym = points_raw_np[classes_raw_np == center_class].copy()\n", + "# non_sym_center = points_nonsym.mean(axis=0)\n", + "# sym_center = points_sym.mean(axis=0)\n", + "# sym2nonsym = torch.from_numpy(non_sym_center - sym_center) #(3,)\n", + "# sym_vec = torch.from_numpy(points_sym - sym_center)\n", + "# sym_cls = torch.sign(torch.matmul(sym_vec, sym2nonsym)).unsqueeze(-1) # num_points, 1\n", + " \n", + "# return sym_cls \n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16758500", + "metadata": {}, + "outputs": [], + "source": [ + "def project_to_xy(vector):\n", + " \"\"\"\n", + " vector: num_poins, 3\n", + " \"\"\"\n", + " if len(vector.shape)>1:\n", + " vector[:,-1] = 0\n", + " elif len(vector.shape)==1:\n", + " vector[-1] = 0\n", + " return vector\n", + "\n", + "def get_sym_label(action_cloud, anchor_cloud, action_class, anchor_class):\n", + "\n", + " assert 0 in [action_class, anchor_class], \"class 0 must be here somewhere as the manipulation object of interest\"\n", + " if action_class == 0:\n", + " sym_breaking_class = action_class\n", + " center_class = anchor_class\n", + " points_sym = action_cloud[0]\n", + " points_nonsym = anchor_cloud[0]\n", + " elif anchor_class == 0:\n", + " sym_breaking_class = anchor_class\n", + " center_class = action_class\n", + " points_sym = anchor_cloud[0]\n", + " points_nonsym = action_cloud[0]\n", + "\n", + " non_sym_center = points_nonsym.mean(axis=0)\n", + " sym_center = points_sym.mean(axis=0)\n", + " sym2nonsym = (non_sym_center - sym_center) \n", + " sym2nonsym = project_to_xy(sym2nonsym) # [a,b,0]\n", + " orth_sym2nonsym = torch.zeros(sym2nonsym.shape).to(sym2nonsym.device)\n", + " orth_sym2nonsym[0] = -orth_sym2nonsym[1]\n", + " orth_sym2nonsym[1] = orth_sym2nonsym[0]\n", + " \n", + " sym_vec = points_sym- sym_center\n", + " sym_vec = project_to_xy(sym_vec)\n", + " \n", + " sym_cls = torch.sign(torch.matmul(sym_vec, sym2nonsym)).unsqueeze(0) # num_points, 1\n", + " \n", + " return sym_cls \n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a588fdec", + "metadata": {}, + "outputs": [], + "source": [ + "num_classes = 3\n", + "data_idx = 0\n", + "cloud_types = ['init', 'pre_grasp', 'post_grasp', 'teleport', 'post_place', 'final']\n", + "cloud_type = 'teleport'\n", + "# data_path = Path('/home/exx/Documents/ndf_robot/src/ndf_robot/test_grasp_place_demo_12_new') \n", + "data_path = Path('/home/exx/Documents/ndf_robot/src/ndf_robot/test_bottle_upright_Seed2') \n", + "point_data = np.load(data_path / f'{data_idx}_{cloud_type}_obj_points.npz', allow_pickle = True)\n", + "\n", + "points_action, points_anchor = load_data(num_points=1000, point_data=point_data, action_class= 2, anchor_class= 0)\n", + "sym_cls = get_sym_label(action_cloud = points_action, anchor_cloud = points_anchor, action_class= 2, anchor_class= 0) # num_points, 1\n", + "print((sym_cls>0).shape)\n", + "points_anchor_left = points_anchor[sym_cls>0]\n", + "points_anchor_right = points_anchor[sym_cls<=0]\n", + "plot_multi([points_anchor_left,points_anchor_right,points_action])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6b7e2ffc", + "metadata": {}, + "outputs": [], + "source": [ + "def plot_multi_pca(pcd, axis, index_list = [0,1,2]):\n", + " \n", + " \"\"\"\n", + " pcd: point cloud data of shape (1,num_points, 3)\n", + " axis: 3 principal component axis \n", + " \n", + " \"\"\"\n", + " colors = [\n", + " '#1f77b4', # muted blue\n", + " '#ff7f0e', # safety orange\n", + " '#2ca02c', # cooked asparagus green\n", + " '#d62728', # brick red\n", + " '#9467bd', # muted purple\n", + " '#8c564b', # chestnut brown\n", + " '#e377c2', # raspberry yogurt pink\n", + " '#7f7f7f', # middle gray\n", + " '#bcbd22', # curry yellow-green\n", + " '#17becf' # blue-teal\n", + " ]\n", + " go_data = []\n", + " skip=1\n", + " p_dp = toDisplay(pcd)\n", + " points = go.Scatter3d(x=p_dp[::skip,0], y=p_dp[::skip,1], z=p_dp[::skip,2], \n", + " mode='markers', marker=dict(size=1, color=colors[-1],\n", + " symbol='circle'))\n", + " go_data.append(points)\n", + " points_mean = pcd[0].mean(axis=0).detach().cpu().numpy()\n", + " \n", + " for i in index_list:\n", + " vector = go.Scatter3d( x = [points_mean[0],points_mean[0]+axis[i,0]],\n", + " y = [points_mean[1],points_mean[1]+axis[i,1]],\n", + " z = [points_mean[2],points_mean[2]+axis[i,2]],\n", + " marker = dict( size = 1,\n", + " color = colors[i]),\n", + " line = dict(color = colors[i], width = 6))\n", + " go_data.append(vector)\n", + " \n", + " \n", + " layout = go.Layout(\n", + " scene=dict(\n", + " aspectmode='data'\n", + " )\n", + " )\n", + " \n", + " fig = go.Figure(data=go_data, layout=layout)\n", + " fig.show()\n", + "def plot_multi_axis(pcd, axis):\n", + " \n", + " \"\"\"\n", + " pcd: point cloud data of shape (1,num_points, 3)\n", + " axis: 3 principal component axis \n", + " \n", + " \"\"\"\n", + " colors = [\n", + " '#1f77b4', # muted blue\n", + " '#ff7f0e', # safety orange\n", + " '#2ca02c', # cooked asparagus green\n", + " '#d62728', # brick red\n", + " '#9467bd', # muted purple\n", + " '#8c564b', # chestnut brown\n", + " '#e377c2', # raspberry yogurt pink\n", + " '#7f7f7f', # middle gray\n", + " '#bcbd22', # curry yellow-green\n", + " '#17becf' # blue-teal\n", + " ]\n", + " go_data = []\n", + " skip=1\n", + " p_dp = toDisplay(pcd)\n", + " points = go.Scatter3d(x=p_dp[::skip,0], y=p_dp[::skip,1], z=p_dp[::skip,2], \n", + " mode='markers', marker=dict(size=1, color=colors[-1],\n", + " symbol='circle'))\n", + " go_data.append(points)\n", + " points_mean = pcd[0].mean(axis=0).detach().cpu().numpy()\n", + " \n", + "\n", + " vector = go.Scatter3d( x = [points_mean[0],points_mean[0]+axis[0]],\n", + " y = [points_mean[1],points_mean[1]+axis[1]],\n", + " z = [points_mean[2],points_mean[2]+axis[2]],\n", + " marker = dict( size = 1,\n", + " color = colors[0]),\n", + " line = dict(color = colors[0], width = 6))\n", + " go_data.append(vector)\n", + " \n", + " \n", + " layout = go.Layout(\n", + " scene=dict(\n", + " aspectmode='data'\n", + " )\n", + " )\n", + " \n", + " fig = go.Figure(data=go_data, layout=layout)\n", + " fig.show()\n", + " \n", + "def plot_axis(plist, list_axis):\n", + " \n", + " \"\"\"\n", + " pcd: point cloud data of shape (1,num_points, 3)\n", + " list_axis: [[axis_start1, axis_end1],[axis_start2, axis_end2],...]\n", + " \n", + " \"\"\"\n", + " colors = [\n", + " '#ff7f0e', # safety orange\n", + " '#2ca02c', # cooked asparagus green\n", + " '#d62728', # brick red\n", + " '#9467bd', # muted purple\n", + " '#8c564b', # chestnut brown\n", + " '#e377c2', # raspberry yogurt pink\n", + " '#7f7f7f', # middle gray\n", + " '#bcbd22', # curry yellow-green\n", + " '#17becf', # blue-teal\n", + " '#1f77b4', # muted blue\n", + " ]\n", + " go_data = []\n", + " skip=1\n", + " \n", + " for j in range(len(plist)):\n", + " p_dp = toDisplay(plist[j])\n", + " plot = go.Scatter3d(x=p_dp[::skip,0], y=p_dp[::skip,1], z=p_dp[::skip,2], \n", + " mode='markers', marker=dict(size=1, color=colors[j],\n", + " symbol='circle'))\n", + " go_data.append(plot)\n", + " for i in range(len(list_axis)):\n", + " axis_start = list_axis[i][0]\n", + " axis_end = list_axis[i][1]\n", + " vector = go.Scatter3d( x = [axis_start[0],axis_end[0]],\n", + " y = [axis_start[1],axis_end[1]],\n", + " z = [axis_start[2],axis_end[2]],\n", + " marker = dict( size = 1,\n", + " color = colors[i+j]),\n", + " line = dict(color = colors[i+j], width = 6))\n", + " go_data.append(vector)\n", + "\n", + " \n", + " layout = go.Layout(\n", + " scene=dict(\n", + " aspectmode='data'\n", + " )\n", + " )\n", + " \n", + " fig = go.Figure(data=go_data, layout=layout)\n", + " fig.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c4d06aaf", + "metadata": {}, + "outputs": [], + "source": [ + "import open3d as o3d\n", + "def to_np(tensor):\n", + " return tensor.detach().cpu().numpy()\n", + "\n", + "def to_torch(numpy, device_tensor = None):\n", + " tensor = torch.from_numpy(numpy).double()\n", + " if device_tensor != None:\n", + " tensor = tensor.to(device_tensor.device)\n", + " return tensor\n", + "\n", + "def vec_norm(vec):\n", + " \"\"\"\n", + " vec: (3) torch tensor\n", + " \"\"\"\n", + " return vec/torch.norm(vec)\n", + "\n", + "def dot_product(x,y):\n", + " \"\"\"\n", + " x: (3) torch tensor\n", + " y: (3) torch tensor\n", + " \"\"\"\n", + " assert len(x.shape) ==1, \"x should be of shape (3), but got (.., ..)\"\n", + " assert len(y.shape) ==1, \"y should be of shape (3), but got (.., ..)\"\n", + " # norm vec\n", + " return vec_norm(x).T@vec_norm(y)\n", + "\n", + "def project_to_axis(vector,axis):\n", + " \"\"\"\n", + " vector: (3)\n", + " axis: 3, can be not unit-normed\n", + " \"\"\"\n", + " # make axis unit normed\n", + " axis = vec_norm(axis)\n", + " vector = vector.double()\n", + " projected_vec = (vector@axis)*axis\n", + " return projected_vec\n", + "\n", + "def get_sym_label_pca(action_cloud, anchor_cloud, action_class, anchor_class):\n", + " assert 0 in [action_class, anchor_class], \"class 0 must be here somewhere as the manipulation object of interest\"\n", + " if action_class == 0:\n", + " sym_breaking_class = action_class\n", + " center_class = anchor_class\n", + " points_sym = action_cloud[0]\n", + " points_nonsym = anchor_cloud[0]\n", + " elif anchor_class == 0:\n", + " sym_breaking_class = anchor_class\n", + " center_class = action_class\n", + " points_sym = anchor_cloud[0]\n", + " points_nonsym = action_cloud[0]\n", + "\n", + " non_sym_center = points_nonsym.mean(axis=0)\n", + " points_sym_np = to_np(points_sym)\n", + " \n", + " pcd = o3d.geometry.PointCloud()\n", + " pcd.points = o3d.utility.Vector3dVector(points_sym_np)\n", + " pcd_mean, pcd_cov = pcd.compute_mean_and_covariance()\n", + " evals, evecs = np.linalg.eig(pcd_cov) \n", + " evecs = np.transpose(evecs)\n", + " major_axis = evecs[0]\n", + " \n", + " major_axis = to_torch(major_axis, device_tensor = non_sym_center)\n", + " pcd_mean = to_torch(pcd_mean, device_tensor = non_sym_center) \n", + " projected_point = project_to_axis(vector = non_sym_center-pcd_mean, axis = major_axis) + pcd_mean \n", + " \n", + " print(\"dot product should be 1\")\n", + " self_dot = dot_product(projected_point-pcd_mean, major_axis)\n", + " print(self_dot.item()) \n", + " \n", + " air_point = non_sym_center+(pcd_mean-projected_point)\n", + " sym_vec = points_sym - pcd_mean\n", + " sym2nonsym = air_point-pcd_mean\n", + " vec = to_np(projected_point - pcd_mean)\n", + " axes = [[to_np(pcd_mean), to_np(projected_point)],\n", + " [to_np(projected_point), to_np(non_sym_center)],\n", + " [to_np(pcd_mean), to_np(major_axis+pcd_mean)],\n", + " [to_np(pcd_mean), to_np(air_point)]]\n", + " # plot_axis([anchor_cloud, action_cloud], axes) \n", + "# print(torch.matmul(sym_vec, sym2nonsym))\n", + " \n", + " projected_point_0 = project_to_axis(vector = points_sym[0]-pcd_mean, axis = major_axis) + pcd_mean \n", + " \n", + " sym_cls = torch.sign(torch.matmul(sym_vec, sym2nonsym)).unsqueeze(0) # num_points, 1\n", + " return sym_cls \n", + "\n", + "def get_sym_label_pca_cts(action_cloud, anchor_cloud, action_class, anchor_class):\n", + " assert 0 in [action_class, anchor_class], \"class 0 must be here somewhere as the manipulation object of interest\"\n", + " if action_class == 0:\n", + " sym_breaking_class = action_class\n", + " center_class = anchor_class\n", + " points_sym = action_cloud[0]\n", + " points_nonsym = anchor_cloud[0]\n", + " elif anchor_class == 0:\n", + " sym_breaking_class = anchor_class\n", + " center_class = action_class\n", + " points_sym = anchor_cloud[0]\n", + " points_nonsym = action_cloud[0]\n", + "\n", + " non_sym_center = points_nonsym.mean(axis=0)\n", + " points_sym_np = to_np(points_sym)\n", + " \n", + " pcd = o3d.geometry.PointCloud()\n", + " pcd.points = o3d.utility.Vector3dVector(points_sym_np)\n", + " pcd_mean, pcd_cov = pcd.compute_mean_and_covariance()\n", + " evals, evecs = np.linalg.eig(pcd_cov) \n", + " evecs = np.transpose(evecs)\n", + " major_axis = evecs[0]\n", + " \n", + " major_axis = to_torch(major_axis, device_tensor = non_sym_center)\n", + " pcd_mean = to_torch(pcd_mean, device_tensor = non_sym_center) \n", + " projected_point = project_to_axis(vector = non_sym_center-pcd_mean, axis = major_axis) + pcd_mean \n", + " \n", + " print(\"dot product should be 1\")\n", + " self_dot = dot_product(projected_point-pcd_mean, major_axis)\n", + " print(self_dot.item()) \n", + " \n", + " air_point = non_sym_center+(pcd_mean-projected_point)\n", + " sym_vec = points_sym - pcd_mean\n", + " sym2nonsym = air_point-pcd_mean\n", + " vec = to_np(projected_point - pcd_mean)\n", + " axes = [[to_np(pcd_mean), to_np(projected_point)],\n", + " [to_np(projected_point), to_np(non_sym_center)],\n", + " [to_np(pcd_mean), to_np(major_axis+pcd_mean)],\n", + " [to_np(pcd_mean), to_np(air_point)]]\n", + " # plot_axis([anchor_cloud, action_cloud], axes) \n", + "# print(torch.matmul(sym_vec, sym2nonsym))\n", + " mcenter_point = sym_vec[0]\n", + "# # radial distance from major axis\n", + "# projected_point_0 = project_to_axis(vector = mcenter_point, axis = major_axis) \n", + "# dist = torch.norm(projected_point_0 - mcenter_point) # radial distance from major axis\n", + "\n", + "# projected_point_sym2nonsym = project_to_axis(vector = mcenter_point, axis = sym2nonsym) \n", + "# dist_projected_point_sym2nonsym = torch.norm(projected_point_sym2nonsym) \n", + " axes0 = [\n", + " # [to_np(projected_point_0+pcd_mean), to_np(mcenter_point+pcd_mean)],\n", + " [to_np(projected_point_sym2nonsym+pcd_mean), to_np(pcd_mean)],\n", + " [to_np(sym2nonsym+pcd_mean),to_np(pcd_mean)],\n", + " [to_np(mcenter_point+pcd_mean),to_np(pcd_mean)],\n", + " [to_np(major_axis+pcd_mean),to_np(pcd_mean)]]\n", + " plot_axis([anchor_cloud, action_cloud],axes0)\n", + " dotproduct = torch.matmul(mcenter_point,sym2nonsym)/torch.norm(sym2nonsym)\n", + " \n", + " project_to_axis(vector = points_sym[0]-pcd_mean, axis = major_axis)\n", + " sym_cls = torch.sign(torch.matmul(sym_vec, sym2nonsym)).unsqueeze(0) # num_points, 1\n", + "\n", + " return sym_cls " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5546af53", + "metadata": {}, + "outputs": [], + "source": [ + "def get_sym_label_pca_bowl_grasp(action_cloud, anchor_cloud, action_class, anchor_class, object_type = 'bowl'):\n", + " assert 0 in [action_class, anchor_class], \"class 0 must be here somewhere as the manipulation object of interest\"\n", + " if action_class == 0:\n", + " sym_breaking_class = action_class\n", + " center_class = anchor_class\n", + " points_sym = action_cloud[0]\n", + " points_nonsym = anchor_cloud[0]\n", + " elif anchor_class == 0:\n", + " sym_breaking_class = anchor_class\n", + " center_class = action_class\n", + " points_sym = anchor_cloud[0]\n", + " points_nonsym = action_cloud[0]\n", + "\n", + " non_sym_center = points_nonsym.mean(axis=0)\n", + " points_sym_np = to_np(points_sym)\n", + " \n", + " pcd = o3d.geometry.PointCloud()\n", + " pcd.points = o3d.utility.Vector3dVector(points_sym_np)\n", + " pcd_mean, pcd_cov = pcd.compute_mean_and_covariance()\n", + " evals, evecs = np.linalg.eig(pcd_cov) \n", + " evecs = np.transpose(evecs)\n", + " major_axis = evecs[-1]\n", + " if object_type == 'bottle':\n", + " major_axis = evecs[0]\n", + " \n", + " plot_multi_pca(points_sym.unsqueeze(0), axis = evecs)\n", + " \n", + " major_axis = to_torch(major_axis, device_tensor = non_sym_center)\n", + " pcd_mean = to_torch(pcd_mean, device_tensor = non_sym_center) \n", + " projected_point = project_to_axis(vector = non_sym_center-pcd_mean, axis = major_axis) + pcd_mean \n", + " \n", + " print(\"dot product should be 1\")\n", + " self_dot = dot_product(projected_point-pcd_mean, major_axis)\n", + " print(self_dot.item()) \n", + " \n", + " air_point = non_sym_center+(pcd_mean-projected_point)\n", + " sym_vec = points_sym - pcd_mean\n", + " sym2nonsym = air_point-pcd_mean\n", + " if object_type == 'bottle':\n", + " sym2nonsym = torch.cross(sym2nonsym, major_axis)\n", + " vec = to_np(projected_point - pcd_mean)\n", + " axes = [[to_np(pcd_mean), to_np(projected_point)],\n", + " [to_np(projected_point), to_np(non_sym_center)],\n", + " [to_np(pcd_mean), to_np(major_axis+pcd_mean)],\n", + " [to_np(pcd_mean), to_np(air_point)]]\n", + " # plot_axis([anchor_cloud, action_cloud], axes) \n", + "# print(torch.matmul(sym_vec, sym2nonsym))\n", + " mcenter_point = sym_vec[0]\n", + " # radial distance from major axis\n", + " projected_point_0 = project_to_axis(vector = mcenter_point, axis = major_axis) \n", + " dist = torch.norm(projected_point_0 - mcenter_point) # radial distance from major axis\n", + "\n", + " projected_point_sym2nonsym = project_to_axis(vector = mcenter_point, axis = sym2nonsym) \n", + " dist_projected_point_sym2nonsym = torch.norm(projected_point_sym2nonsym) \n", + " axes0 = [\n", + " [to_np(non_sym_center), to_np(pcd_mean)],\n", + " [to_np(projected_point_sym2nonsym+pcd_mean), to_np(pcd_mean)],\n", + " [to_np(sym2nonsym+pcd_mean),to_np(pcd_mean)],\n", + " [to_np(mcenter_point+pcd_mean),to_np(pcd_mean)],\n", + " [to_np(major_axis+pcd_mean),to_np(pcd_mean)]]\n", + " plot_axis([anchor_cloud, action_cloud],axes0)\n", + " dotproduct = torch.matmul(mcenter_point,sym2nonsym)/torch.norm(sym2nonsym)\n", + " \n", + " project_to_axis(vector = points_sym[0]-pcd_mean, axis = major_axis)\n", + " sym_cls = torch.sign(torch.matmul(sym_vec, sym2nonsym)) # 1, num_points \n", + " cts_cls = torch.matmul(sym_vec,sym2nonsym)/torch.norm(sym2nonsym) \n", + " cts_cls_norm = cts_cls/torch.max(torch.abs(cts_cls))\n", + " print(\"cts_cls_norm\", torch.max(cts_cls_norm), torch.min(cts_cls_norm), cts_cls_norm.shape)\n", + " print(\"cts_cls.shape\",cts_cls.shape,points_sym.shape)\n", + " # coloring cts_cls for vis (+: red, -: blue)\n", + " color = (cts_cls/torch.abs(torch.max(cts_cls))*255)\n", + " color_cts = torch.zeros(points_sym.shape).to(points_sym.device).double()\n", + " \n", + " color_cts[cts_cls>=0,0] = color[cts_cls>=0]\n", + " color_cts[cts_cls<0,2] = torch.abs(color[cts_cls<0])\n", + " color_cts[cts_cls>=0]\n", + " \n", + " # another color scheme (+: red, -: green, 0: yellow )\n", + " # https://stackoverflow.com/questions/6394304/algorithm-how-do-i-fade-from-red-to-green-via-yellow-using-rgb-values\n", + " color = (cts_cls/torch.abs(torch.max(cts_cls))*(255/2))+(255/2)\n", + " color_cts = torch.zeros(points_sym.shape).to(points_sym.device).double()\n", + " \n", + " color_cts[:,0] = torch.minimum(torch.ones(color.shape).to(color.device)*255,color*2)\n", + " color_cts[:,1] = torch.minimum(torch.ones(color.shape).to(color.device)*255,(255-color)*2)\n", + " plot_color([points_sym,points_nonsym],[color_cts,None])\n", + " return sym_cls.unsqueeze(0), cts_cls.unsqueeze(0)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "952fc50c", + "metadata": {}, + "outputs": [], + "source": [ + "def get_sym_label_pca_bowl_place(action_cloud, anchor_cloud, action_class, anchor_class, object_type = 'bowl'):\n", + " assert 0 in [action_class, anchor_class], \"class 0 must be here somewhere as the manipulation object of interest\"\n", + " if action_class == 0:\n", + " sym_breaking_class = action_class\n", + " center_class = anchor_class\n", + " points_sym = action_cloud[0]\n", + " points_nonsym = anchor_cloud[0]\n", + " elif anchor_class == 0:\n", + " sym_breaking_class = anchor_class\n", + " center_class = action_class\n", + " points_sym = anchor_cloud[0]\n", + " points_nonsym = action_cloud[0]\n", + "\n", + " non_sym_center = points_nonsym.mean(axis=0)\n", + " points_sym_np = to_np(points_sym)\n", + " \n", + " pcd = o3d.geometry.PointCloud()\n", + " pcd.points = o3d.utility.Vector3dVector(points_sym_np)\n", + " pcd_mean, pcd_cov = pcd.compute_mean_and_covariance()\n", + " evals, evecs = np.linalg.eig(pcd_cov) \n", + " evecs = np.transpose(evecs)\n", + " major_axis = evecs[0]\n", + " major_axis = to_torch(major_axis, device_tensor = non_sym_center)\n", + " radial_axis = torch.cross(major_axis,to_torch(evecs[1],device_tensor = major_axis))\n", + " \n", + " \n", + " \n", + " plot_multi_pca(points_sym.unsqueeze(0), axis = evecs)\n", + " \n", + " points_nonsym_np = to_np(points_nonsym)\n", + " pcd_nonsym = o3d.geometry.PointCloud()\n", + " pcd_nonsym.points = o3d.utility.Vector3dVector(points_nonsym_np)\n", + " pcd_nonsym_mean, pcd_nonsym_cov = pcd_nonsym.compute_mean_and_covariance()\n", + " nonsym_evals, nonsym_evecs = np.linalg.eig(pcd_nonsym_cov) \n", + " nonsym_evecs = np.transpose(nonsym_evecs)\n", + " nonsym_major_axis = nonsym_evecs[0]\n", + " nonsym_major_axis = to_torch(nonsym_major_axis, device_tensor = non_sym_center)\n", + " \n", + " pcd_mean = to_torch(pcd_mean, device_tensor = non_sym_center) \n", + " sym2nonsym = pcd_mean-non_sym_center\n", + " \n", + " symmetry_plane_normal = nonsym_major_axis \n", + " sym_vec = points_sym-pcd_mean\n", + " \n", + " # make sure the z-axis always point towards the opening of the bowl\n", + " \n", + " if object_type == 'bowl':\n", + " radial_dist = torch.matmul(sym_vec,radial_axis)/torch.norm(radial_axis)\n", + " max_radial_idx = torch.argmax(radial_dist)\n", + " max_radial_vec = sym_vec[max_radial_idx]\n", + " sign = torch.sign(torch.matmul(max_radial_vec,major_axis))\n", + " print('sign',sign)\n", + "\n", + "\n", + " if object_type == 'bottle':\n", + " vertial_dist = torch.matmul(sym_vec,major_axis)/torch.norm(major_axis)\n", + " max_radial_idx = torch.argmax(vertial_dist)\n", + " max_radial_vec = sym_vec[max_radial_idx]\n", + " min_radial_idx = torch.argmin(vertial_dist)\n", + " min_radial_vec = sym_vec[min_radial_idx]\n", + " radial_dist = torch.matmul(sym_vec,radial_axis)/torch.norm(radial_axis)\n", + " if torch.abs(radial_dist[max_radial_idx]) < torch.abs(radial_dist[min_radial_idx]):\n", + " sign = 1\n", + " else:\n", + " sign = -1\n", + " if sign <0:\n", + " major_axis*=-1\n", + " \n", + " evecs[0] = to_np(major_axis)\n", + " plot_multi_pca(points_sym.unsqueeze(0), axis = evecs)\n", + " \n", + "\n", + " \n", + " mcenter_point = sym_vec[0]\n", + " # radial distance from major axis\n", + " projected_point_0 = project_to_axis(vector = mcenter_point, axis = symmetry_plane_normal) \n", + " dist = torch.norm(projected_point_0) # radial distance from major axis\n", + "\n", + " projected_point_sym2nonsym = project_to_axis(vector = mcenter_point, axis = sym2nonsym) \n", + " dist_projected_point_sym2nonsym = torch.norm(projected_point_sym2nonsym) \n", + " axes0 = [\n", + " \n", + " [to_np(nonsym_major_axis+non_sym_center),to_np(non_sym_center)],\n", + " [to_np(symmetry_plane_normal+pcd_mean),to_np(pcd_mean)],\n", + " [to_np(non_sym_center),to_np(pcd_mean)],\n", + " [to_np(projected_point_0+pcd_mean),to_np(pcd_mean)],\n", + " [to_np(pcd_mean), to_np(mcenter_point+pcd_mean)]\n", + " ]\n", + " plot_axis([anchor_cloud, action_cloud],axes0)\n", + " dotproduct = torch.matmul(mcenter_point,symmetry_plane_normal)/torch.norm(symmetry_plane_normal)\n", + " \n", + " \n", + " sym_cls = torch.sign(torch.matmul(sym_vec, symmetry_plane_normal)) # 1, num_points \n", + " cts_cls = torch.matmul(sym_vec,symmetry_plane_normal)/torch.norm(symmetry_plane_normal) \n", + " # coloring cts_cls for vis (+: red, -: blue)\n", + " print(\"cts_cls.shape\",cts_cls.shape,points_sym.shape)\n", + " color = (cts_cls/torch.abs(torch.max(cts_cls))*255)\n", + " color_cts = torch.zeros(points_sym.shape).to(points_sym.device).double()\n", + " \n", + " color_cts[cts_cls>=0,0] = color[cts_cls>=0]\n", + " color_cts[cts_cls<0,2] = torch.abs(color[cts_cls<0])\n", + " color_cts[cts_cls>=0]\n", + " \n", + " # another color scheme (+: red, -: green, 0: yellow )\n", + " # https://stackoverflow.com/questions/6394304/algorithm-how-do-i-fade-from-red-to-green-via-yellow-using-rgb-values\n", + " color = (cts_cls/torch.abs(torch.max(cts_cls))*(255/2))+(255/2)\n", + " color_cts = torch.zeros(points_sym.shape).to(points_sym.device).double()\n", + " \n", + " color_cts[:,0] = torch.minimum(torch.ones(color.shape).to(color.device)*255,color*2)\n", + " color_cts[:,1] = torch.minimum(torch.ones(color.shape).to(color.device)*255,(255-color)*2)\n", + " plot_color([points_sym,points_nonsym],[color_cts,None])\n", + " return sym_cls.unsqueeze(0), cts_cls.unsqueeze(0)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1e680586", + "metadata": {}, + "outputs": [], + "source": [ + "def get_sym_label_pca_gripper(action_cloud, anchor_cloud, action_class, anchor_class, object_type = 'bottle'):\n", + " assert 0 in [action_class, anchor_class], \"class 0 must be here somewhere as the manipulation object of interest\"\n", + " if action_class == 0:\n", + " sym_breaking_class = action_class\n", + " center_class = anchor_class\n", + " points_sym = action_cloud[0]\n", + " points_nonsym = anchor_cloud[0]\n", + " elif anchor_class == 0:\n", + " sym_breaking_class = anchor_class\n", + " center_class = action_class\n", + " points_sym = anchor_cloud[0]\n", + " points_nonsym = action_cloud[0]\n", + "\n", + " non_sym_center = points_nonsym.mean(axis=0)\n", + " points_sym_np = to_np(points_sym)\n", + " \n", + " pcd = o3d.geometry.PointCloud()\n", + " pcd.points = o3d.utility.Vector3dVector(points_sym_np)\n", + " pcd_mean, pcd_cov = pcd.compute_mean_and_covariance()\n", + " evals, evecs = np.linalg.eig(pcd_cov) \n", + " evecs = np.transpose(evecs)\n", + " major_axis = evecs[-1]\n", + " if object_type == 'bottle':\n", + " major_axis = evecs[0]\n", + "\n", + " points_nonsym_np = to_np(points_nonsym)\n", + " pcd_nonsym = o3d.geometry.PointCloud()\n", + " pcd_nonsym.points = o3d.utility.Vector3dVector(points_nonsym_np)\n", + " pcd_nonsym_mean, pcd_nonsym_cov = pcd_nonsym.compute_mean_and_covariance()\n", + " nonsym_evals, nonsym_evecs = np.linalg.eig(pcd_nonsym_cov) \n", + " nonsym_evecs = np.transpose(nonsym_evecs)\n", + " nonsym_major_axis = nonsym_evecs[0]\n", + " nonsym_major_axis = to_torch(nonsym_major_axis, device_tensor = non_sym_center).float()\n", + " \n", + " \n", + " plot_multi_pca(points_sym.unsqueeze(0), axis = evecs)\n", + " plot_multi_pca(points_nonsym.unsqueeze(0), axis = nonsym_evecs)\n", + " \n", + " major_axis = to_torch(major_axis, device_tensor = non_sym_center)\n", + " pcd_mean = to_torch(pcd_mean, device_tensor = non_sym_center) \n", + " projected_point = project_to_axis(vector = non_sym_center-pcd_mean, axis = major_axis) + pcd_mean \n", + " \n", + " \n", + " air_point = non_sym_center+(pcd_mean-projected_point)\n", + " sym_vec = points_sym - pcd_mean\n", + " sym2nonsym = air_point-pcd_mean\n", + " if object_type == 'bottle':\n", + " sym2nonsym = torch.cross(sym2nonsym, major_axis)\n", + " if torch.matmul(sym2nonsym.float(),nonsym_major_axis)<0:\n", + " sym2nonsym*=-1\n", + " \n", + " vec = to_np(projected_point - pcd_mean)\n", + " axes = [[to_np(pcd_mean), to_np(projected_point)],\n", + " [to_np(projected_point), to_np(non_sym_center)],\n", + " [to_np(pcd_mean), to_np(major_axis+pcd_mean)],\n", + " [to_np(pcd_mean), to_np(air_point)]]\n", + " # plot_axis([anchor_cloud, action_cloud], axes) \n", + "# print(torch.matmul(sym_vec, sym2nonsym))\n", + " mcenter_point = sym_vec[0]\n", + " # radial distance from major axis\n", + " projected_point_0 = project_to_axis(vector = mcenter_point, axis = major_axis) \n", + " dist = torch.norm(projected_point_0 - mcenter_point) # radial distance from major axis\n", + "\n", + " projected_point_sym2nonsym = project_to_axis(vector = mcenter_point, axis = sym2nonsym) \n", + " dist_projected_point_sym2nonsym = torch.norm(projected_point_sym2nonsym) \n", + " axes0 = [\n", + " [to_np(non_sym_center), to_np(pcd_mean)],\n", + " [to_np(projected_point_sym2nonsym+pcd_mean), to_np(pcd_mean)],\n", + " [to_np(sym2nonsym+pcd_mean),to_np(pcd_mean)],\n", + " [to_np(mcenter_point+pcd_mean),to_np(pcd_mean)],\n", + " [to_np(major_axis+pcd_mean),to_np(pcd_mean)]]\n", + " plot_axis([anchor_cloud, action_cloud],axes0)\n", + " dotproduct = torch.matmul(mcenter_point,sym2nonsym)/torch.norm(sym2nonsym)\n", + " \n", + " project_to_axis(vector = points_sym[0]-pcd_mean, axis = major_axis)\n", + " sym_cls = torch.sign(torch.matmul(sym_vec, sym2nonsym)) # 1, num_points \n", + " cts_cls = torch.matmul(sym_vec,sym2nonsym)/torch.norm(sym2nonsym) \n", + " cts_cls_norm = cts_cls/torch.max(torch.abs(cts_cls))\n", + " nonsym_vec = points_nonsym - non_sym_center\n", + " print(\"nonsym_vec.type\", nonsym_vec.dtype, nonsym_major_axis.dtype)\n", + " cts_cls_nonsym = torch.matmul(nonsym_vec,nonsym_major_axis)/torch.norm(nonsym_major_axis) \n", + " \n", + " # coloring cts_cls for vis (+: red, -: blue)\n", + " color = (cts_cls/torch.abs(torch.max(cts_cls))*255)\n", + " color_cts = torch.zeros(points_sym.shape).to(points_sym.device).double()\n", + " \n", + " color_cts[cts_cls>=0,0] = color[cts_cls>=0]\n", + " color_cts[cts_cls<0,2] = torch.abs(color[cts_cls<0])\n", + " color_cts[cts_cls>=0]\n", + " \n", + " # another color scheme (+: red, -: green, 0: yellow )\n", + " # https://stackoverflow.com/questions/6394304/algorithm-how-do-i-fade-from-red-to-green-via-yellow-using-rgb-values\n", + " color = (cts_cls/torch.abs(torch.max(cts_cls))*(255/2))+(255/2)\n", + " color_cts = torch.zeros(points_sym.shape).to(points_sym.device).double()\n", + " \n", + " color_cts[:,0] = torch.minimum(torch.ones(color.shape).to(color.device)*255,color*2)\n", + " color_cts[:,1] = torch.minimum(torch.ones(color.shape).to(color.device)*255,(255-color)*2)\n", + " \n", + "# plot_color([points_sym,points_nonsym],[color_cts,None])\n", + " \n", + " color_nonsym = (cts_cls_nonsym/torch.abs(torch.max(cts_cls_nonsym))*(255/2))+(255/2)\n", + " color_cts_nonsym = torch.zeros(points_nonsym.shape).to(points_nonsym.device).double()\n", + " \n", + " color_cts_nonsym[:,0] = torch.minimum(torch.ones(color_nonsym.shape).to(color_nonsym.device)*255,color_nonsym*2)\n", + " color_cts_nonsym[:,1] = torch.minimum(torch.ones(color_nonsym.shape).to(color_nonsym.device)*255,(255-color_nonsym)*2)\n", + " \n", + " plot_color([points_sym,points_nonsym],[color_cts,color_cts_nonsym])\n", + " return cts_cls_nonsym.unsqueeze(0), cts_cls.unsqueeze(0)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f7e25752", + "metadata": {}, + "outputs": [], + "source": [ + "def get_sym_label_pca_test(rack, action_cloud, anchor_cloud, action_class, anchor_class, object_type = 'bowl'):\n", + " assert 0 in [action_class, anchor_class], \"class 0 must be here somewhere as the manipulation object of interest\"\n", + " if action_class == 0:\n", + " sym_breaking_class = action_class\n", + " center_class = anchor_class\n", + " points_sym = action_cloud[0]\n", + " points_nonsym = anchor_cloud[0]\n", + " elif anchor_class == 0:\n", + " sym_breaking_class = anchor_class\n", + " center_class = action_class\n", + " points_sym = anchor_cloud[0]\n", + " points_nonsym = action_cloud[0]\n", + " points_ref = rack[0]\n", + "\n", + " non_sym_center = points_nonsym.mean(axis=0)\n", + " points_sym_np = to_np(points_sym)\n", + " \n", + " pcd = o3d.geometry.PointCloud()\n", + " pcd.points = o3d.utility.Vector3dVector(points_sym_np)\n", + " pcd_mean, pcd_cov = pcd.compute_mean_and_covariance()\n", + " evals, evecs = np.linalg.eig(pcd_cov) \n", + " evecs = np.transpose(evecs)\n", + " major_axis = evecs[0]\n", + " \n", + " plot_multi_pca(points_sym.unsqueeze(0), axis = evecs,index_list = [0,1,2]) \n", + " \n", + " points_ref_center = points_ref.mean(axis=0)\n", + " points_ref_np = to_np(points_ref)\n", + " pcd = o3d.geometry.PointCloud()\n", + " pcd.points = o3d.utility.Vector3dVector(points_ref_np)\n", + " rack_pcd_mean, pcd_cov = pcd.compute_mean_and_covariance()\n", + " evals, rack_evecs = np.linalg.eig(pcd_cov) \n", + " rack_evecs = np.transpose(rack_evecs)\n", + " rack_major_axis = rack_evecs[0]\n", + " \n", + " major_axis = to_torch(major_axis, device_tensor = non_sym_center)\n", + " rack_major_axis = to_torch(rack_major_axis,device_tensor = non_sym_center)\n", + " pcd_mean = to_torch(pcd_mean, device_tensor = non_sym_center) \n", + " projected_point = project_to_axis(vector = non_sym_center-pcd_mean, axis = major_axis) + pcd_mean \n", + " \n", + " sym_vec = points_sym - pcd_mean\n", + " \n", + " sym2nonsym = torch.cross(rack_major_axis,major_axis)\n", + " sym2nonsym = rack_major_axis\n", + " \n", + " axes = [[to_np(pcd_mean), to_np(sym2nonsym+pcd_mean)],\n", + " [to_np(pcd_mean), to_np(major_axis+pcd_mean)],\n", + "# [to_np(pcd_mean), to_np(major_axis+pcd_mean)],\n", + "# [to_np(pcd_mean), to_np(air_point)]\n", + " ]\n", + " plot_axis([anchor_cloud, action_cloud], axes) \n", + " \n", + " sym_cls = torch.sign(torch.matmul(sym_vec, sym2nonsym)) # 1, num_points \n", + " cts_cls = torch.matmul(sym_vec,sym2nonsym)/torch.norm(sym2nonsym) \n", + " cts_cls_norm = cts_cls/torch.max(torch.abs(cts_cls))\n", + " \n", + " \n", + " # coloring cts_cls for vis (+: red, -: blue)\n", + " color = (cts_cls/torch.abs(torch.max(cts_cls))*255)\n", + " color_cts = torch.zeros(points_sym.shape).to(points_sym.device).double()\n", + " \n", + " color_cts[cts_cls>=0,0] = color[cts_cls>=0]\n", + " color_cts[cts_cls<0,2] = torch.abs(color[cts_cls<0])\n", + " color_cts[cts_cls>=0]\n", + " \n", + " # another color scheme (+: red, -: green, 0: yellow )\n", + " # https://stackoverflow.com/questions/6394304/algorithm-how-do-i-fade-from-red-to-green-via-yellow-using-rgb-values\n", + " color = (cts_cls/torch.abs(torch.max(cts_cls))*(255/2))+(255/2)\n", + " color_cts = torch.zeros(points_sym.shape).to(points_sym.device).double()\n", + " \n", + " color_cts[:,0] = torch.minimum(torch.ones(color.shape).to(color.device)*255,color*2)\n", + " color_cts[:,1] = torch.minimum(torch.ones(color.shape).to(color.device)*255,(255-color)*2)\n", + " \n", + " plot_color([points_sym, points_nonsym, points_ref],[color_cts,None, None])\n", + " return cts_cls_nonsym.unsqueeze(0), cts_cls.unsqueeze(0)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2af11397", + "metadata": {}, + "outputs": [], + "source": [ + "from equivariant_pose_graph.utils.symmetry_utils import get_sym_label_pca_test_bottle_graspable\n", + "num_classes = 3\n", + "data_idx = 1\n", + "cloud_types = ['init', 'pre_grasp', 'post_grasp', 'teleport', 'post_place', 'final']\n", + "cloud_type = 'plan2'\n", + "timestamp = '2022-07-13_093105/2022-07-13-09-31-05'\n", + "data_path = Path(f'/home/exx/media/DataDrive/singularity_chuerp/epg_results/residual_flow_test/residual_flow_test_partial_cloud_initpose/{timestamp}/renders/')\n", + "# data_path = Path('/home/exx/Documents/ndf_robot/bottle_train_data_ndf_cons_3/renders') \n", + "action_class, anchor_class = 2,0\n", + "# data_path = Path('/home/exx/Documents/ndf_robot/bowl_test_new_3_pregrasp/renders') \n", + "# action_class, anchor_class = 2,0\n", + "\n", + "# data_path = Path('/home/exx/Documents/ndf_robot/bowl_train_data_ndf_cons_3/renders') \n", + "# action_class, anchor_class = 0,1\n", + "point_data = np.load(data_path / f'{data_idx}_{cloud_type}_obj_points.npz', allow_pickle = True)\n", + "\n", + "points_action, points_anchor = load_data(num_points=1000, point_data=point_data, action_class= action_class, anchor_class= anchor_class)\n", + "points_rack, points_anchor = load_data(num_points=1000, point_data=point_data, action_class= 1, anchor_class= anchor_class)\n", + "ans_dict = get_sym_label_pca_test_bottle_graspable(points_rack, \n", + " action_cloud = points_action, \n", + " anchor_cloud = points_anchor, \n", + " action_class= action_class, \n", + " anchor_class= anchor_class, \n", + " normalize_dist= True,\n", + " object_type = 'bowl')\n", + "plot_multi([points_action, points_anchor, points_rack])\n", + "# [ans_dict['cts_cls'][0], ans_dict['cts_cls_nonsym'][0], None])\n", + "print(\"cts_cls.shape\", ans_dict['cts_cls'].shape)\n", + "plot_color([points_anchor[0], points_action[0], points_rack[0]],\n", + " [ans_dict['cts_cls'][0], ans_dict['cts_cls_nonsym'][0], None])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cf9a0958", + "metadata": {}, + "outputs": [], + "source": [ + "from equivariant_pose_graph.utils.symmetry_utils import get_sym_label_pca_test_bottle_graspable\n", + "num_classes = 3\n", + "data_idx = 4\n", + "cloud_types = ['init', 'pre_grasp', 'post_grasp', 'teleport', 'post_place', 'final']\n", + "cloud_type = 'init'\n", + "timestamp = '2022-07-12_142311/2022-07-12-14-23-11'\n", + "data_path = Path(f'/home/exx/media/DataDrive/singularity_chuerp/epg_results/residual_flow_test/residual_flow_test_partial_cloud_initpose/{timestamp}/renders/')\n", + "# data_path = Path('/home/exx/Documents/ndf_robot/bottle_train_data_ndf_cons_3/renders') \n", + "action_class, anchor_class = 2,0\n", + "# data_path = Path('/home/exx/Documents/ndf_robot/bowl_test_new_3_pregrasp/renders') \n", + "# action_class, anchor_class = 2,0\n", + "\n", + "# data_path = Path('/home/exx/Documents/ndf_robot/bowl_train_data_ndf_cons_3/renders') \n", + "# action_class, anchor_class = 0,1\n", + "point_data = np.load(data_path / f'{data_idx}_{cloud_type}_obj_points.npz', allow_pickle = True)\n", + "\n", + "points_action, points_anchor = load_data(num_points=1000, point_data=point_data, action_class= action_class, anchor_class= anchor_class)\n", + "points_rack, points_anchor = load_data(num_points=1000, point_data=point_data, action_class= 1, anchor_class= anchor_class)\n", + "ans_dict = get_sym_label_pca_test_bottle_graspable(points_rack, \n", + " action_cloud = points_action, \n", + " anchor_cloud = points_anchor, \n", + " action_class= action_class, \n", + " anchor_class= anchor_class, \n", + " normalize_dist= True,\n", + " object_type = 'bowl')\n", + "print(\"cts_cls.shape\", ans_dict['cts_cls'].shape)\n", + "plot_color([points_anchor[0], points_action[0], points_rack[0]],\n", + " [ans_dict['cts_cls'][0], ans_dict['cts_cls_nonsym'][0], None])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fb7fa659", + "metadata": {}, + "outputs": [], + "source": [ + "from equivariant_pose_graph.utils.symmetry_utils import get_sym_label_pca_test_bottle_graspable\n", + "num_classes = 3\n", + "data_idx = 1\n", + "cloud_types = ['init', 'pre_grasp', 'post_grasp', 'teleport', 'post_place', 'final']\n", + "cloud_type = 'pre_grasp'\n", + "\n", + "data_path = Path('/home/exx/Documents/ndf_robot/bottle_train_data_ndf_cons_3/renders') \n", + "action_class, anchor_class = 2,0\n", + "# data_path = Path('/home/exx/Documents/ndf_robot/bowl_test_new_3_pregrasp/renders') \n", + "# action_class, anchor_class = 2,0\n", + "\n", + "# data_path = Path('/home/exx/Documents/ndf_robot/bowl_train_data_ndf_cons_3/renders') \n", + "# action_class, anchor_class = 0,1\n", + "point_data = np.load(data_path / f'{data_idx}_{cloud_type}_obj_points.npz', allow_pickle = True)\n", + "\n", + "points_action, points_anchor = load_data(num_points=1000, point_data=point_data, action_class= action_class, anchor_class= anchor_class)\n", + "points_rack, points_anchor = load_data(num_points=1000, point_data=point_data, action_class= 1, anchor_class= anchor_class)\n", + "#v cts_cls_nonsym, cts_cls = get_sym_label_pca_test(action_cloud = points_action, anchor_cloud = points_anchor, action_class= action_class, anchor_class= anchor_class, object_type = 'bowl') # num_points, 1\n", + "ans_dict = get_sym_label_pca_test_bottle_graspable(points_rack, action_cloud = points_action, anchor_cloud = points_anchor, action_class= action_class, anchor_class= anchor_class, object_type = 'bowl')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9bca1c7a", + "metadata": {}, + "outputs": [], + "source": [ + "data_idx = 5\n", + "cloud_types = ['init', 'pre_grasp', 'post_grasp', 'teleport', 'post_place', 'final']\n", + "cloud_type = 'pre_grasp'\n", + "timestamp = '2022-07-12_142311/2022-07-12-14-23-11'\n", + "data_path = Path(f'/home/exx/media/DataDrive/singularity_chuerp/epg_results/residual_flow_test/residual_flow_test_partial_cloud_initpose/{timestamp}/renders/')\n", + "point_data = np.load(data_path / f'{data_idx}_{cloud_type}_obj_points.npz', allow_pickle = True)\n", + "points_action, points_anchor = load_data(num_points=1000, point_data=point_data, action_class= 2, anchor_class= 0)\n", + "points_rack, points_anchor = load_data(num_points=1000, point_data=point_data, action_class= 1, anchor_class= 0)\n", + "plot_multi([points_action,points_anchor, points_rack])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2388ea9d", + "metadata": {}, + "outputs": [], + "source": [ + " \n", + "data_idx = 1\n", + "cloud_types = ['init', 'pre_grasp', 'post_grasp', 'teleport', 'post_place', 'final']\n", + "cloud_type = 'pre_grasp'\n", + "data_path = Path('/home/exx/Documents/ndf_robot/bottle_train_data_ndf_cons_3/renders')\n", + "point_data = np.load(data_path / f'{data_idx}_{cloud_type}_obj_points.npz', allow_pickle = True)\n", + "points_action, points_anchor = load_data(num_points=1000, point_data=point_data, action_class= 2, anchor_class= 0)\n", + "points_rack, points_anchor = load_data(num_points=1000, point_data=point_data, action_class= 1, anchor_class= 0)\n", + "plot_multi([points_action,points_anchor, points_rack])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "487cf099", + "metadata": {}, + "outputs": [], + "source": [ + "data_idx = 5\n", + "cloud_types = ['init', 'pre_grasp', 'post_grasp', 'teleport', 'post_place', 'final']\n", + "cloud_type = 'pre_grasp'\n", + "timestamp = '2022-07-12_142311/2022-07-12-14-23-11'\n", + "data_path = Path(f'/home/exx/media/DataDrive/singularity_chuerp/epg_results/residual_flow_test/residual_flow_test_partial_cloud_initpose/{timestamp}/renders/')\n", + "point_data = np.load(data_path / f'{data_idx}_{cloud_type}_obj_points.npz', allow_pickle = True)\n", + "points_action, points_anchor = load_data(num_points=1000, point_data=point_data, action_class= 2, anchor_class= 0)\n", + "points_rack, points_anchor = load_data(num_points=1000, point_data=point_data, action_class= 1, anchor_class= 0)\n", + "plot_multi([points_action,points_anchor, points_rack])" + ] + }, + { + "cell_type": "markdown", + "id": "aa0967e2", + "metadata": {}, + "source": [ + "data_idx = 5\n", + "cloud_types = ['init', 'pre_grasp', 'post_grasp', 'teleport', 'post_place', 'final']\n", + "cloud_type = 'pre_grasp'\n", + "timestamp = '2022-07-12_142311/2022-07-12-14-23-11'\n", + "data_path = Path(f'/home/exx/media/DataDrive/singularity_chuerp/epg_results/residual_flow_test/residual_flow_test_partial_cloud_initpose/{timestamp}/renders/')\n", + "point_data = np.load(data_path / f'{data_idx}_{cloud_type}_obj_points.npz', allow_pickle = True)\n", + "points_action, points_anchor = load_data(num_points=1000, point_data=point_data, action_class= 2, anchor_class= 0)\n", + "points_action, points_anchor = load_data(num_points=1000, point_data=point_data, action_class= , anchor_class= 0)\n", + "plot_multi([points_action,points_anchor])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a909d6dc", + "metadata": {}, + "outputs": [], + "source": [ + "cloud_type = 'post_grasp'\n", + "data_path = Path('/home/exx/media/DataDrive/singularity_chuerp/epg_results/residual_flow_test/residual_flow_test_partial_cloud_initpose/2022-07-12_134501/2022-07-12-13-45-01/renders/')\n", + "point_data = np.load(data_path / f'{data_idx}_{cloud_type}_obj_points.npz', allow_pickle = True)\n", + "points_action, points_anchor = load_data(num_points=1000, point_data=point_data, action_class= 2, anchor_class= 0)\n", + "plot_multi([points_action,points_anchor])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fa7b5c62", + "metadata": {}, + "outputs": [], + "source": [ + "num_classes = 3\n", + "data_idx = 0\n", + "cloud_types = ['init', 'pre_grasp', 'post_grasp', 'teleport', 'post_place', 'final']\n", + "cloud_type = 'init'\n", + "data_path = Path('/home/exx/Documents/ndf_robot/bottle_train_new_3_pregrasp/renders') \n", + "point_data = np.load(data_path / f'{data_idx}_{cloud_type}_obj_points.npz', allow_pickle = True)\n", + "\n", + "points_action, points_anchor = load_data(num_points=1000, point_data=point_data, action_class= 2, anchor_class= 0)\n", + "ans_dict = model.get_transform(\n", + " points_action, points_anchor, action_class=2, anchor_class=0)\n", + "pred_T_action = ans_dict['pred_T_action']\n", + "points_action_pred = pred_T_action.transform_points(points_action)\n", + " \n", + "\n", + "plot_multi([points_action,points_anchor,points_action_pred])\n", + "print(points_action_pred.shape)\n", + "\n", + "gripper_pred_center = points_action_pred[0].mean(axis=0) # 1,3\n", + "anchor = points_anchor[0]\n", + "anchor_center = anchor.mean(axis=0)\n", + "points_sym_np = to_np(anchor)\n", + "\n", + "pcd = o3d.geometry.PointCloud()\n", + "pcd.points = o3d.utility.Vector3dVector(points_sym_np)\n", + "pcd_mean, pcd_cov = pcd.compute_mean_and_covariance()\n", + "evals, evecs = np.linalg.eig(pcd_cov) \n", + "evecs = np.transpose(evecs)\n", + "major_axis = evecs[0]\n", + "major_axis = to_torch(major_axis, device_tensor= gripper_pred_center)\n", + "plot_multi_pca(points_anchor, axis = evecs)\n", + "\n", + "bottle2gripper_center = gripper_pred_center - anchor_center\n", + "projected_point_on_anchor_axis = torch.matmul(bottle2gripper_center.float(), major_axis.float())/torch.norm(major_axis.float())*major_axis+anchor_center.float()\n", + "radial_vector_towards_gripper = gripper_pred_center-projected_point_on_anchor_axis\n", + "points_anchor-=radial_vector_towards_gripper*1\n", + "axes0 = [\n", + " [to_np(gripper_pred_center), to_np(projected_point_on_anchor_axis)],\n", + " [to_np(major_axis+anchor_center), to_np(anchor_center)],\n", + " [to_np(gripper_pred_center),to_np(anchor_center)],\n", + " ]\n", + "plot_axis([points_action, points_anchor, points_action_pred],axes0)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "07f46567", + "metadata": {}, + "outputs": [], + "source": [ + "num_classes = 3\n", + "data_idx = 1\n", + "cloud_types = ['init', 'pre_grasp', 'post_grasp', 'teleport', 'post_place', 'final']\n", + "cloud_type = 'init'\n", + "data_path = Path('/home/exx/Documents/ndf_robot/bottle_train_new_3_pregrasp/renders') \n", + "point_data = np.load(data_path / f'{data_idx}_{cloud_type}_obj_points.npz', allow_pickle = True)\n", + "\n", + "points_action, points_anchor = load_data(num_points=1000, point_data=point_data, action_class= 2, anchor_class= 0)\n", + "sym_cls, cts_cls = get_sym_label_pca_gripper(action_cloud = points_action, anchor_cloud = points_anchor, \\\n", + " action_class= 2, anchor_class= 0) # num_points, 1\n", + "print((sym_cls>0).shape)\n", + "\n", + "points_anchor_left = points_anchor[sym_cls>0]\n", + "points_anchor_right = points_anchor[sym_cls<=0]\n", + "\n", + "plot_multi([points_anchor_left,points_anchor_right,points_action])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7fd97b46", + "metadata": {}, + "outputs": [], + "source": [ + "num_classes = 3\n", + "data_idx = 1\n", + "cloud_types = ['init', 'pre_grasp', 'post_grasp', 'teleport', 'post_place', 'final']\n", + "cloud_type = 'pre_grasp'\n", + "data_path = Path('/home/exx/Documents/ndf_robot/src/ndf_robot/test_bowl_arbitrary') \n", + "# data_path = Path('/home/exx/Documents/ndf_robot/bowl_train_data_ndf_cons_3/renders') \n", + "point_data = np.load(data_path / f'{data_idx}_{cloud_type}_obj_points.npz', allow_pickle = True)\n", + "\n", + "points_action, points_anchor = load_data(num_points=1000, point_data=point_data, action_class= 0, anchor_class= 1)\n", + "sym_cls, cts_cls = get_sym_label_pca_bowl_place(action_cloud = points_action, anchor_cloud = points_anchor, \\\n", + " action_class= 0, anchor_class= 1) # num_points, 1\n", + "print((sym_cls>0).shape)\n", + "\n", + "points_action_left = points_action[sym_cls>0]\n", + "points_action_right = points_action[sym_cls<=0]\n", + "\n", + "plot_multi([points_action_left,points_action_right,points_anchor])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f8dbfab7", + "metadata": {}, + "outputs": [], + "source": [ + "num_classes = 3\n", + "data_idx = 1\n", + "cloud_types = ['init', 'pre_grasp', 'post_grasp', 'teleport', 'post_place', 'final']\n", + "cloud_type = 'pre_grasp'\n", + "data_path = Path('/home/exx/Documents/ndf_robot/bottle_train_data_ndf_cons_3/renders') \n", + "# data_path = Path('/home/exx/Documents/ndf_robot/bowl_train_data_ndf_cons_3/renders') \n", + "point_data = np.load(data_path / f'{data_idx}_{cloud_type}_obj_points.npz', allow_pickle = True)\n", + "\n", + "points_action, points_anchor = load_data(num_points=1000, point_data=point_data, action_class= 0, anchor_class= 1)\n", + "sym_cls, cts_cls = get_sym_label_pca_bowl_place(action_cloud = points_action, anchor_cloud = points_anchor, \\\n", + " action_class= 0, anchor_class= 1, object_type = 'bottle') # num_points, 1\n", + "print((sym_cls>0).shape)\n", + "\n", + "points_action_left = points_action[sym_cls>0]\n", + "points_action_right = points_action[sym_cls<=0]\n", + "\n", + "plot_multi([points_action_left,points_action_right,points_anchor])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "38712760", + "metadata": {}, + "outputs": [], + "source": [ + "num_classes = 3\n", + "data_idx = 2\n", + "cloud_types = ['init', 'pre_grasp', 'post_grasp', 'teleport', 'post_place', 'final']\n", + "cloud_type = 'teleport'\n", + "# data_path = Path('/home/exx/Documents/ndf_robot/src/ndf_robot/test_bottle_arbitrary_Seed2') \n", + "data_path = Path('/home/exx/Documents/ndf_robot/bottle_train_data_ndf_cons_3/renders') \n", + "point_data = np.load(data_path / f'{data_idx}_{cloud_type}_obj_points.npz', allow_pickle = True)\n", + "\n", + "points_action, points_anchor = load_data(num_points=1000, point_data=point_data, action_class= 0, anchor_class= 1)\n", + "sym_cls, cts_cls = get_sym_label_pca_bowl_place(action_cloud = points_action, anchor_cloud = points_anchor, \\\n", + " action_class= 0, anchor_class= 1) # num_points, 1\n", + "print((sym_cls>0).shape)\n", + "\n", + "points_action_left = points_action[sym_cls>0]\n", + "points_action_right = points_action[sym_cls<=0]\n", + "\n", + "plot_multi([points_action_left,points_action_right,points_anchor])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9d270f26", + "metadata": {}, + "outputs": [], + "source": [ + "num_classes = 3\n", + "data_idx = 2\n", + "cloud_types = ['init', 'pre_grasp', 'post_grasp', 'teleport', 'post_place', 'final']\n", + "cloud_type = 'pre_grasp'\n", + "data_path = Path('/home/exx/Documents/ndf_robot/src/ndf_robot/test_bottle_arbitrary_Seed2') \n", + "point_data = np.load(data_path / f'{data_idx}_{cloud_type}_obj_points.npz', allow_pickle = True)\n", + "\n", + "points_action, points_anchor = load_data(num_points=1000, point_data=point_data, action_class= 2, anchor_class= 0)\n", + "sym_cls = get_sym_label_pca_bottle_playground(action_cloud = points_action, anchor_cloud = points_anchor, action_class= 2, anchor_class= 0) # num_points, 1\n", + "print((sym_cls>0).shape)\n", + "\n", + "points_anchor_left = points_anchor[sym_cls>0]\n", + "points_anchor_right = points_anchor[sym_cls<=0]\n", + "\n", + "plot_multi([points_anchor_left,points_anchor_right,points_action])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6336c143", + "metadata": {}, + "outputs": [], + "source": [ + "import open3d as o3d\n", + "import numpy as np\n", + "num_classes = 3\n", + "data_idx = 2\n", + "cloud_types = ['init', 'pre_grasp', 'post_grasp', 'teleport', 'post_place', 'final']\n", + "cloud_type = 'pre_grasp'\n", + "data_path = Path('/home/exx/Documents/ndf_robot/bowl_test_new_3_pregrasp/renders') \n", + "# data_path = Path('/home/exx/Documents/ndf_robot/bowl_train_data_ndf_cons_3/renders') \n", + "# data_path = Path('/home/exx/Documents/ndf_robot/src/ndf_robot/test_bowl_upright') \n", + "point_data = np.load(data_path / f'{data_idx}_{cloud_type}_obj_points.npz', allow_pickle = True)\n", + "points_action_init, points_anchor = load_data(num_points=1000, point_data=point_data, action_class= 2, anchor_class= 0)\n", + "points_anchor_np = points_anchor[0].detach().cpu().numpy()\n", + "points_action_np = points_action[0].detach().cpu().numpy()\n", + "pcd = o3d.geometry.PointCloud()\n", + "pcd.points = o3d.utility.Vector3dVector(points_action_np)\n", + "pcd_mean, pcd_cov = pcd.compute_mean_and_covariance()\n", + "evals, evecs = np.linalg.eig(pcd_cov) \n", + "evecs = np.transpose(evecs)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1085f61a", + "metadata": {}, + "outputs": [], + "source": [ + "plot_multi_pca(points_action, axis = evecs)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1bb51b8d", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "83e1e1d6", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cbf87efc", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "ndf_new", + "language": "python", + "name": "ndf_new" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/rlbench_make_demo_grid.ipynb b/notebooks/rlbench_make_demo_grid.ipynb new file mode 100644 index 0000000..745073f --- /dev/null +++ b/notebooks/rlbench_make_demo_grid.ipynb @@ -0,0 +1,248 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# make a grid of 2x5 images of pngs, read from a folder\n", + "\n", + "import matplotlib.pyplot as plt\n", + "\n", + "import numpy as np\n", + "import os\n", + "\n", + "from PIL import Image\n", + "\n", + "# path to folder with pngs\n", + "path = os.path.expanduser(\"~/datasets/rlbench\")\n", + "\n", + "# task = \"put_toilet_roll_on_stand\"\n", + "task = \"stack_wine\"\n", + "# task = \"phone_on_base\"\n", + "# task = \"insert_onto_square_peg\"\n", + "# task = \"place_hanger_on_rack\"\n", + "# task = \"solve_puzzle\"\n", + "\n", + "dirs = [os.path.join(path, task, \"variation0\", \"episodes\", f\"episode{i}\", \"overhead_rgb\") for i in range(10)]\n", + "start_pngs = []\n", + "end_pngs = []\n", + "for d in dirs:\n", + " # The files have the format 1.png, 2.png, 3.png, etc.\n", + " # We want the last one, so we need a sort which is not lexicographic\n", + " files = os.listdir(d)\n", + "\n", + " files = sorted(files, key=lambda x: int(x.split(\".\")[0]))\n", + " start_pngs.append(os.path.join(d, files[0]))\n", + " end_pngs.append(os.path.join(d, files[-1]))\n", + "\n", + " # sort the files by \n", + " print(files[-1])\n", + "\n", + "# create a grid of 2x5 images\n", + "fig, axs = plt.subplots(2, 5, figsize=(20, 10))\n", + "axs = axs.flatten()\n", + "\n", + "for i, png in enumerate(start_pngs):\n", + " img = Image.open(png)\n", + " axs[i].imshow(img)\n", + " axs[i].axis(\"off\")\n", + "\n", + " # Add a title to the image\n", + " axs[i].set_title(f\"Episode {i}\")\n", + "\n", + "# create a grid of 2x5 images\n", + "fig, axs = plt.subplots(2, 5, figsize=(20, 10))\n", + "axs = axs.flatten()\n", + "\n", + "for i, png in enumerate(end_pngs):\n", + " img = Image.open(png)\n", + " axs[i].imshow(img)\n", + " axs[i].axis(\"off\")\n", + "\n", + " # Add a title to the image\n", + " axs[i].set_title(f\"Episode {i}\")\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "from taxpose.datasets.rlbench import RLBenchPointCloudDataset, RLBenchPointCloudDatasetConfig\n", + "\n", + "\n", + "dset = RLBenchPointCloudDataset(cfg=RLBenchPointCloudDatasetConfig(\n", + " dataset_root=os.path.expanduser(\"~/datasets/rlbench\"),\n", + " # task_name=\"insert_onto_square_peg\",\n", + " # task_name=\"stack_wine\",\n", + " # task_name=\"put_toilet_roll_on_stand\",\n", + " # task_name=\"phone_on_base\",\n", + " # task_name=\"place_hanger_on_rack\",\n", + " task_name=\"solve_puzzle\",\n", + " episodes=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],\n", + " phase=\"place\",\n", + ")) " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for i in range(10):\n", + " print(dset[i]['points_action'].shape)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import open3d as o3d\n", + "import open3d.web_visualizer as w3d\n", + "\n", + "# for i in range(len(dset)):\n", + "data = dset[0]\n", + "\n", + "pcd = o3d.geometry.PointCloud()\n", + "\n", + "\n", + "\n", + "# Yellow points\n", + "pcd.points = o3d.utility.Vector3dVector(data[\"points_action\"][0])\n", + "pcd.colors = o3d.utility.Vector3dVector(data[\"action_symmetry_rgb\"][0] / 255.0)\n", + "\n", + "pcd1 = o3d.geometry.PointCloud()\n", + "\n", + "# Red points\n", + "pcd1.points = o3d.utility.Vector3dVector(data[\"points_anchor\"][0])\n", + "pcd1.colors = o3d.utility.Vector3dVector(data[\"anchor_symmetry_rgb\"][0] / 255.0)\n", + "\n", + "# Draw the point clouds\n", + "# o3d.visualization.draw_geometries([\n", + "# # pcd,\n", + "# pcd1,\n", + "# ])\n", + "w3d.draw(pcd)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Iterate through the dataset, and plot 2D renders of 3D point clouds in matplotlib.\n", + "# We want to plot on 3D matplotlib axes.\n", + "# Make a grid of 2 x 5\n", + "\n", + "import matplotlib.pyplot as plt\n", + "from mpl_toolkits.mplot3d import Axes3D\n", + "\n", + "fig = plt.figure(figsize=(20, 10))\n", + "axs = fig.subplots(2, 5, subplot_kw={\"projection\": \"3d\"})\n", + "axs = axs.flatten()\n", + "\n", + "for i, data in enumerate(dset):\n", + " axs[i].scatter(data[\"points_anchor\"][0][:, 0], data[\"points_anchor\"][0][:, 1], data[\"points_anchor\"][0][:, 2], c=data[\"anchor_symmetry_rgb\"][0] / 255.0)\n", + " axs[i].scatter(data[\"points_action\"][0][:, 0], data[\"points_action\"][0][:, 1], data[\"points_action\"][0][:, 2], c=data[\"action_symmetry_rgb\"][0] / 255.0)\n", + "\n", + " axs[i].set_title(f\"Episode {i}\")\n", + "\n", + " # Get the combined points\n", + " points = np.concatenate([data[\"points_action\"][0], data[\"points_anchor\"][0]], axis=0)\n", + "\n", + " # Set the axes limits\n", + " axs[i].set_xlim3d(points[:, 0].min(), points[:, 0].max())\n", + " axs[i].set_ylim3d(points[:, 1].min(), points[:, 1].max())\n", + " axs[i].set_zlim3d(points[:, 2].min(), points[:, 2].max())\n", + "\n", + " # Remove the ticks\n", + " axs[i].set_xticks([])\n", + " axs[i].set_yticks([])\n", + " axs[i].set_zticks([])\n", + "\n", + " # Remove the axes\n", + " axs[i].set_axis_off()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data[\"points_action\"][0][:, 0].min()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data[\"action_symmetry_features\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.12" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/scripts/README.md b/scripts/README.md index deed6b0..5ed77af 100644 --- a/scripts/README.md +++ b/scripts/README.md @@ -1,3 +1,108 @@ -# Scripts for your porject +# Scripts for your project If you write some scripts which are meant to be run stand-alone, and not imported as part of the library, put them in this directory. + +Here are some scratch commands for now. + +# New RLBench. + +# Cool thing I'm trying... + + +mkdir -p configs/checkpoints/rlbench/pick_and_lift/pretraining && touch configs/checkpoints/rlbench/pick_and_lift/pretraining/all.yaml +./scripts/train_eval.sh \ + "./launch.sh autobot 0 python scripts/train_residual_flow.py --config-name commands/rlbench/pick_and_lift/train_taxpose_all.yaml dm.train_dset.demo_dset.anchor_mode=background_robot_removed dm.train_dset.demo_dset.action_mode=gripper_and_object dm.train_dset.demo_dset.num_points=512 wandb.group=rlbench_pick_and_lift resources.num_workers=10" \ + "./launch.sh autobot 0 ./configs/commands/rlbench/pick_and_lift/taxpose_all/precision_eval/precision_eval.sh dm.train_dset.demo_dset.anchor_mode=background_robot_removed dm.train_dset.demo_dset.action_mode=gripper_and_object dm.train_dset.demo_dset.num_points=512" \ + echo + + +mkdir -p configs/checkpoints/rlbench/pick_up_cup/pretraining && touch configs/checkpoints/rlbench/pick_up_cup/pretraining/all.yaml +./scripts/train_eval.sh \ + "./launch.sh autobot 1 python scripts/train_residual_flow.py --config-name commands/rlbench/pick_up_cup/train_taxpose_all.yaml dm.train_dset.demo_dset.anchor_mode=background_robot_removed dm.train_dset.demo_dset.action_mode=gripper_and_object dm.train_dset.demo_dset.num_points=512 wandb.group=rlbench_pick_up_cup resources.num_workers=10" \ + "./launch.sh autobot 1 ./configs/commands/rlbench/pick_up_cup/taxpose_all/precision_eval/precision_eval.sh dm.train_dset.demo_dset.anchor_mode=background_robot_removed dm.train_dset.demo_dset.action_mode=gripper_and_object dm.train_dset.demo_dset.num_points=512" \ + echo + + +mkdir -p configs/checkpoints/rlbench/put_knife_on_chopping_board/pretraining && touch configs/checkpoints/rlbench/put_knife_on_chopping_board/pretraining/all.yaml +./scripts/train_eval.sh \ + "./launch.sh autobot 2 python scripts/train_residual_flow.py --config-name commands/rlbench/put_knife_on_chopping_board/train_taxpose_all.yaml dm.train_dset.demo_dset.anchor_mode=background_robot_removed dm.train_dset.demo_dset.action_mode=gripper_and_object dm.train_dset.demo_dset.num_points=512 wandb.group=rlbench_put_knife_on_chopping_board resources.num_workers=10" \ + "./launch.sh autobot 2 ./configs/commands/rlbench/put_knife_on_chopping_board/taxpose_all/precision_eval/precision_eval.sh dm.train_dset.demo_dset.anchor_mode=background_robot_removed dm.train_dset.demo_dset.action_mode=gripper_and_object dm.train_dset.demo_dset.num_points=512" \ + echo + + +mkdir -p configs/checkpoints/rlbench/put_money_in_safe/pretraining && touch configs/checkpoints/rlbench/put_money_in_safe/pretraining/all.yaml +./scripts/train_eval.sh \ + "./launch.sh autobot 3 python scripts/train_residual_flow.py --config-name commands/rlbench/put_money_in_safe/train_taxpose_all.yaml dm.train_dset.demo_dset.anchor_mode=background_robot_removed dm.train_dset.demo_dset.action_mode=gripper_and_object dm.train_dset.demo_dset.num_points=512 wandb.group=rlbench_put_money_in_safe resources.num_workers=10" \ + "./launch.sh autobot 3 ./configs/commands/rlbench/put_money_in_safe/taxpose_all/precision_eval/precision_eval.sh dm.train_dset.demo_dset.anchor_mode=background_robot_removed dm.train_dset.demo_dset.action_mode=gripper_and_object dm.train_dset.demo_dset.num_points=512" \ + echo + + +mkdir -p configs/checkpoints/rlbench/push_button/pretraining && touch configs/checkpoints/rlbench/push_button/pretraining/all.yaml +./scripts/train_eval.sh \ + "./launch.sh autobot 4 python scripts/train_residual_flow.py --config-name commands/rlbench/push_button/train_taxpose_all.yaml dm.train_dset.demo_dset.anchor_mode=background_robot_removed dm.train_dset.demo_dset.action_mode=gripper_and_object dm.train_dset.demo_dset.num_points=512 wandb.group=rlbench_push_button resources.num_workers=10" \ + "./launch.sh autobot 4 ./configs/commands/rlbench/push_button/taxpose_all/precision_eval/precision_eval.sh dm.train_dset.demo_dset.anchor_mode=background_robot_removed dm.train_dset.demo_dset.action_mode=gripper_and_object dm.train_dset.demo_dset.num_points=512" \ + echo + + +mkdir -p configs/checkpoints/rlbench/reach_target/pretraining && touch configs/checkpoints/rlbench/reach_target/pretraining/all.yaml +EXTRA_PARAMS="dm.train_dset.demo_dset.anchor_mode=background_robot_removed dm.train_dset.demo_dset.action_mode=gripper_and_object dm.train_dset.demo_dset.num_points=512" \ + ./scripts/train_eval.sh \ + "./launch.sh local-docker 0 python scripts/train_residual_flow.py --config-name commands/rlbench/reach_target/train_taxpose_all.yaml wandb.group=rlbench_reach_target resources.num_workers=10 ${EXTRA_PARAMS}" \ + "./launch.sh local-docker 0 ./configs/commands/rlbench/reach_target/taxpose_all/precision_eval/precision_eval.sh ${EXTRA_PARAMS}" \ + "./launch.sh local-docker 0 python scripts/eval_rlbench.py --config-name commands/rlbench/reach_target/taxpose_all/eval_rlbench.yaml num_trials=100 resources.num_workers=10 wandb.group=rlbench_reach_target headless=True ${EXTRA_PARAMS}" + +EXTRA_PARAMS="dm.train_dset.demo_dset.anchor_mode=background_robot_removed dm.train_dset.demo_dset.action_mode=gripper_and_object dm.train_dset.demo_dset.num_points=512" \ + ./scripts/train_eval.sh \ + "./launch.sh autobot 0 python scripts/train_residual_flow.py --config-name commands/rlbench/reach_target/train_taxpose_all.yaml wandb.group=rlbench_reach_target resources.num_workers=10 ${EXTRA_PARAMS}" \ + "./launch.sh autobot 0 ./configs/commands/rlbench/reach_target/taxpose_all/precision_eval/precision_eval.sh ${EXTRA_PARAMS}" \ + "./launch.sh autobot 0 python scripts/eval_rlbench.py --config-name commands/rlbench/reach_target/taxpose_all/eval_rlbench.yaml num_trials=100 resources.num_workers=10 wandb.group=rlbench_reach_target headless=True ${EXTRA_PARAMS}" + + +mkdir -p configs/checkpoints/rlbench/slide_block_to_target/pretraining && touch configs/checkpoints/rlbench/slide_block_to_target/pretraining/all.yaml +./scripts/train_eval.sh \ + "./launch.sh autobot 6 python scripts/train_residual_flow.py --config-name commands/rlbench/slide_block_to_target/train_taxpose_all.yaml dm.train_dset.demo_dset.anchor_mode=background_robot_removed dm.train_dset.demo_dset.action_mode=gripper_and_object dm.train_dset.demo_dset.num_points=512 wandb.group=rlbench_slide_block_to_target resources.num_workers=10" \ + "./launch.sh autobot 6 ./configs/commands/rlbench/slide_block_to_target/taxpose_all/precision_eval/precision_eval.sh dm.train_dset.demo_dset.anchor_mode=background_robot_removed dm.train_dset.demo_dset.action_mode=gripper_and_object dm.train_dset.demo_dset.num_points=512" \ + echo + + +mkdir -p configs/checkpoints/rlbench/stack_wine/pretraining && touch configs/checkpoints/rlbench/stack_wine/pretraining/all.yaml +./scripts/train_eval.sh \ + "./launch.sh autobot 7 python scripts/train_residual_flow.py --config-name commands/rlbench/stack_wine/train_taxpose_all.yaml dm.train_dset.demo_dset.anchor_mode=background_robot_removed dm.train_dset.demo_dset.action_mode=gripper_and_object dm.train_dset.demo_dset.num_points=512 wandb.group=rlbench_stack_wine resources.num_workers=10" \ + "./launch.sh autobot 7 ./configs/commands/rlbench/stack_wine/taxpose_all/precision_eval/precision_eval.sh dm.train_dset.demo_dset.anchor_mode=background_robot_removed dm.train_dset.demo_dset.action_mode=gripper_and_object dm.train_dset.demo_dset.num_points=512" \ + echo + + +mkdir -p configs/checkpoints/rlbench/take_money_out_safe/pretraining && touch configs/checkpoints/rlbench/take_money_out_safe/pretraining/all.yaml +./scripts/train_eval.sh \ + "./launch.sh autobot 0 python scripts/train_residual_flow.py --config-name commands/rlbench/take_money_out_safe/train_taxpose_all.yaml dm.train_dset.demo_dset.anchor_mode=background_robot_removed dm.train_dset.demo_dset.action_mode=gripper_and_object dm.train_dset.demo_dset.num_points=512 wandb.group=rlbench_take_money_out_safe resources.num_workers=10 data_root=/home/beisner/datasets" \ + "./launch.sh autobot 0 ./configs/commands/rlbench/take_money_out_safe/taxpose_all/precision_eval/precision_eval.sh dm.train_dset.demo_dset.anchor_mode=background_robot_removed dm.train_dset.demo_dset.action_mode=gripper_and_object dm.train_dset.demo_dset.num_points=512 data_root=/home/beisner/datasets" \ + echo + + +mkdir -p configs/checkpoints/rlbench/take_umbrella_out_of_umbrella_stand/pretraining && touch configs/checkpoints/rlbench/take_umbrella_out_of_umbrella_stand/pretraining/all.yaml +./scripts/train_eval.sh \ + "./launch.sh autobot 1 python scripts/train_residual_flow.py --config-name commands/rlbench/take_umbrella_out_of_umbrella_stand/train_taxpose_all.yaml dm.train_dset.demo_dset.anchor_mode=background_robot_removed dm.train_dset.demo_dset.action_mode=gripper_and_object dm.train_dset.demo_dset.num_points=512 wandb.group=rlbench_take_umbrella_out_of_umbrella_stand resources.num_workers=10 data_root=/home/beisner/datasets" \ + "./launch.sh autobot 1 ./configs/commands/rlbench/take_umbrella_out_of_umbrella_stand/taxpose_all/precision_eval/precision_eval.sh dm.train_dset.demo_dset.anchor_mode=background_robot_removed dm.train_dset.demo_dset.action_mode=gripper_and_object dm.train_dset.demo_dset.num_points=512 data_root=/home/beisner/datasets" \ + echo + + +### Redo the evals... +./launch.sh autobot 0 ./configs/commands/rlbench/pick_and_lift/taxpose_all/precision_eval/precision_eval.sh dm.train_dset.demo_dset.anchor_mode=background_robot_removed dm.train_dset.demo_dset.action_mode=gripper_and_object dm.train_dset.demo_dset.num_points=512 checkpoints.ckpt_file=r-pad/taxpose/model-a8vhdn5b:v0 + +./launch.sh autobot 1 ./configs/commands/rlbench/pick_up_cup/taxpose_all/precision_eval/precision_eval.sh dm.train_dset.demo_dset.anchor_mode=background_robot_removed dm.train_dset.demo_dset.action_mode=gripper_and_object dm.train_dset.demo_dset.num_points=512 checkpoints.ckpt_file=r-pad/taxpose/model-nay0eueg:v0 + +./launch.sh autobot 2 ./configs/commands/rlbench/put_knife_on_chopping_board/taxpose_all/precision_eval/precision_eval.sh dm.train_dset.demo_dset.anchor_mode=background_robot_removed dm.train_dset.demo_dset.action_mode=gripper_and_object dm.train_dset.demo_dset.num_points=512 checkpoints.ckpt_file=r-pad/taxpose/model-opjzx3mi:v0 + +./launch.sh autobot 3 ./configs/commands/rlbench/put_money_in_safe/taxpose_all/precision_eval/precision_eval.sh dm.train_dset.demo_dset.anchor_mode=background_robot_removed dm.train_dset.demo_dset.action_mode=gripper_and_object dm.train_dset.demo_dset.num_points=512 checkpoints.ckpt_file=r-pad/taxpose/model-m2eb9u1s:v0 + +./launch.sh autobot 4 ./configs/commands/rlbench/push_button/taxpose_all/precision_eval/precision_eval.sh dm.train_dset.demo_dset.anchor_mode=background_robot_removed dm.train_dset.demo_dset.action_mode=gripper_and_object dm.train_dset.demo_dset.num_points=512 checkpoints.ckpt_file=r-pad/taxpose/model-gjzsgkkh:v0 + +./launch.sh autobot 5 ./configs/commands/rlbench/reach_target/taxpose_all/precision_eval/precision_eval.sh dm.train_dset.demo_dset.anchor_mode=background_robot_removed dm.train_dset.demo_dset.action_mode=gripper_and_object dm.train_dset.demo_dset.num_points=512 checkpoints.ckpt_file=r-pad/taxpose/model-yz9e7iz8:v0 + +./launch.sh autobot 6 ./configs/commands/rlbench/slide_block_to_target/taxpose_all/precision_eval/precision_eval.sh dm.train_dset.demo_dset.anchor_mode=background_robot_removed dm.train_dset.demo_dset.action_mode=gripper_and_object dm.train_dset.demo_dset.num_points=512 checkpoints.ckpt_file=r-pad/taxpose/model-kpbx8qf4:v0 + +./launch.sh autobot 7 ./configs/commands/rlbench/stack_wine/taxpose_all/precision_eval/precision_eval.sh dm.train_dset.demo_dset.anchor_mode=background_robot_removed dm.train_dset.demo_dset.action_mode=gripper_and_object dm.train_dset.demo_dset.num_points=512 checkpoints.ckpt_file=r-pad/taxpose/model-nixx3qii:v0 + +./launch.sh autobot 8 ./configs/commands/rlbench/take_money_out_safe/taxpose_all/precision_eval/precision_eval.sh dm.train_dset.demo_dset.anchor_mode=background_robot_removed dm.train_dset.demo_dset.action_mode=gripper_and_object dm.train_dset.demo_dset.num_points=512 checkpoints.ckpt_file=r-pad/taxpose/model-1gmi342v:v0 + +./launch.sh local 1 ./configs/commands/rlbench/take_umbrella_out_of_umbrella_stand/taxpose_all/precision_eval/precision_eval.sh dm.train_dset.demo_dset.anchor_mode=background_robot_removed dm.train_dset.demo_dset.action_mode=gripper_and_object dm.train_dset.demo_dset.num_points=512 checkpoints.ckpt_file=r-pad/taxpose/model-1y7t5g4o:v0 data_root=/home/beisner/datasets diff --git a/scripts/get_results.py b/scripts/get_results.py index d411f39..943d502 100644 --- a/scripts/get_results.py +++ b/scripts/get_results.py @@ -1,9 +1,10 @@ import os -import numpy as np import pandas as pd import typer +from taxpose.utils.compile_result import get_result_df + def main(res_dir: str, n_trials: int = 100): # Get the name of every directory in res_dir @@ -20,42 +21,7 @@ def main(res_dir: str, n_trials: int = 100): # Get the path to the result file result_path = os.path.join(res_dir, seed, result_file) - res = np.load(result_path) - # Set the index of the dataframe to be the seed - - succ = np.logical_and( - res["grasp_success_list"], res["place_success_teleport_list"] - ) - mp_succ = np.logical_and(res["grasp_success_list"], res["place_success_list"]) - - ress = { - "seed": seed, - "Grasp": res["grasp_success_list"].astype(float).mean(), - "Place": res["place_success_teleport_list"].astype(float).mean(), - "Overall": succ.astype(float).mean(), - "Overall_mp": mp_succ.astype(float).mean(), - } - - for thresh in [0.0, 0.01, 0.02, 0.03, 0.04, 0.05]: - ps_at_t = np.logical_and( - res["place_success_teleport_list"], res["penetration_list"] < thresh - ).mean(axis=-1) - s_at_t = np.logical_and(succ, res["penetration_list"] < thresh).mean( - axis=-1 - ) - - ress[f"Place@{thresh}"] = ps_at_t.mean() - ress[f"Overall@{thresh}"] = s_at_t.mean() - - results = pd.DataFrame( - ress, - columns=["seed", "Grasp", "Place", "Overall", "Overall_mp"] - + [f"Place@{thresh}" for thresh in [0.0, 0.01, 0.02, 0.03, 0.04, 0.05]] - + [f"Overall@{thresh}" for thresh in [0.0, 0.01, 0.02, 0.03, 0.04, 0.05]], - index=[0], - ) - # set the seed column as the index - results.set_index("seed", inplace=True) + results = get_result_df(result_path, seed) dfs.append(results) # Concatenate all the results @@ -64,6 +30,7 @@ def main(res_dir: str, n_trials: int = 100): # Save the results df.to_csv(os.path.join(res_dir, "results.csv")) print(df) + return df if __name__ == "__main__": diff --git a/scripts/merge_table.py b/scripts/merge_table.py new file mode 100644 index 0000000..fd9d899 --- /dev/null +++ b/scripts/merge_table.py @@ -0,0 +1,40 @@ +# function which takes two tables with the same shape, and merges each cell +# by putting a slash between the two values. + + +def merge_table(table1, table2): + """Merge two tables by putting a slash between each cell. + + Args: + table1 (pd.DataFrame): A table. + table2 (pd.DataFrame): A table. + + Returns: + pd.DataFrame: A merged table. + """ + assert table1.shape == table2.shape + # create a new table to store the merged values + table1 = table1.copy() + + # Iterate over the rows. + for i in range(table1.shape[0]): + # Iterate over the columns. + for j in range(table1.shape[1]): + # Merge the two cells. + table1.iloc[i, j] = f"{table1.iloc[i, j]}/{table2.iloc[i, j]}" + return table1 + + +# A function which takes a list of columns and a table, and returns a new table +# with only the specified columns. +def filter_table(columns, table): + """Filter a table by columns. + + Args: + columns (List[str]): A list of columns to keep. + table (pd.DataFrame): A table. + + Returns: + pd.DataFrame: A filtered table. + """ + return table[columns]