diff --git a/docs/generate_task_docs.py b/docs/generate_task_docs.py index aaf84952c..856ad5082 100644 --- a/docs/generate_task_docs.py +++ b/docs/generate_task_docs.py @@ -2,12 +2,13 @@ TASK_CATEGORIES_TO_INCLUDE = [ "tabletop", "humanoid", - "mobile_manipulation", "quadruped", "control", "drawing" + "mobile_manipulation", + "quadruped", + "control", + "drawing", ] -TASK_CATEGORIES_NAME_MAP = { - "tabletop": "table_top_gripper" -} +TASK_CATEGORIES_NAME_MAP = {"tabletop": "table_top_gripper"} GENERATED_TASKS_DOCS_FOLDER = "tasks" GLOBAL_TASK_HEADER = """ [asset-badge]: https://img.shields.io/badge/download%20asset-yes-blue.svg @@ -25,25 +26,20 @@ "tabletop": """# Table-Top 2 Finger Gripper Tasks These are tasks situated on table and involve a two-finger gripper arm robot manipulating objects on the surface.""", - "humanoid": """# Humanoid Tasks Both real-world humanoids and the Mujoco humanoid are supported in ManiSkill, and we are still in the process of adding more tasks. Humanoid category of tasks generally considers control of robots with two legs and two arms.""", - "mobile_manipulation": """# Mobile Manipulation Tasks These are tasks where a mobile manipulator is used to manipulate objects. This cateogry primarily uses robots with mobile bases like Fetch or Stretch robots. -For additional tasks, including scene-level mobile manipulation, please check out the [community contributions page](../community_contributions/index.md). +For additional tasks, including scene-level mobile manipulation, please check out the [external benchmarks/tasks page](../external/index.md). """, - "quadruped": """# Quadruped Tasks These are tasks where a quadruped robot is used for locomotion and/or manipulation. This cateogry primarily uses robots with four legs like the ANYmal or Unitree go robots.""", - "control": """# Control Tasks These are classic control tasks where the objective is to control a robot to reach a particular state, similar to the [DM Control suite](https://github.com/deepmind/dm_control) but with GPU parallelized simulation and rendering.""", - "drawing": """# Drawing Tasks These are tasks where the robot is controlled to draw a specific shape or pattern.""", @@ -59,8 +55,8 @@ import cv2 import tempfile -def main(): +def main(): # Get the path to mani_skill/envs/tasks tasks_dir = Path(mani_skill.envs.__file__).parent / "tasks" @@ -71,10 +67,10 @@ def main(): # Walk through all subfolders in tasks directory for root, dirs, files in os.walk(tasks_dir): for file in files: - if file.endswith('.py') and not file.startswith('__'): + if file.endswith(".py") and not file.startswith("__"): # Get relative import path rel_path = os.path.relpath(os.path.join(root, file), tasks_dir.parent) - module_path = rel_path.replace(os.sep, '.')[:-3] # Remove .py + module_path = rel_path.replace(os.sep, ".")[:-3] # Remove .py # Import the module try: @@ -84,8 +80,11 @@ def main(): classes = inspect.getmembers(module, inspect.isclass) # Store classes that are defined in this module (not imported) - local_classes = [cls for name, cls in classes - if cls.__module__ == f"mani_skill.envs.{module_path}"] + local_classes = [ + cls + for name, cls in classes + if cls.__module__ == f"mani_skill.envs.{module_path}" + ] if local_classes: task_info[module_path] = local_classes @@ -110,8 +109,8 @@ def main(): categorized_tasks = {k: [] for k in TASK_CATEGORIES_TO_INCLUDE} for module_path in task_info.keys(): - parts = module_path.split('.') - if parts[0] == 'tasks': + parts = module_path.split(".") + if parts[0] == "tasks": category = parts[1] if category in categorized_tasks: categorized_tasks[category].append(module_path) @@ -122,31 +121,41 @@ def main(): print(f"\n{category}:") # Create directory if it doesn't exist category_name = TASK_CATEGORIES_NAME_MAP.get(category, category) - os.makedirs(f"source/{GENERATED_TASKS_DOCS_FOLDER}/{category_name}", exist_ok=True) + os.makedirs( + f"source/{GENERATED_TASKS_DOCS_FOLDER}/{category_name}", exist_ok=True + ) # Delete existing index.md file for this category - if os.path.exists(f"source/{GENERATED_TASKS_DOCS_FOLDER}/{category_name}/index.md"): + if os.path.exists( + f"source/{GENERATED_TASKS_DOCS_FOLDER}/{category_name}/index.md" + ): os.remove(f"source/{GENERATED_TASKS_DOCS_FOLDER}/{category_name}/index.md") if category in TASK_CATEGORIES_HEADERS: - with open(f"source/{GENERATED_TASKS_DOCS_FOLDER}/{category_name}/index.md", "w") as f: + with open( + f"source/{GENERATED_TASKS_DOCS_FOLDER}/{category_name}/index.md", "w" + ) as f: f.write(GLOBAL_TASK_HEADER) f.write(TASK_CATEGORIES_HEADERS[category]) f.write(GLOBAL_TASK_POST_HEADER) # Generate the short TLDR table of tasks env_id_to_thumbnail_path = {} - with open(f"source/{GENERATED_TASKS_DOCS_FOLDER}/{category_name}/index.md", "a") as f: + with open( + f"source/{GENERATED_TASKS_DOCS_FOLDER}/{category_name}/index.md", "a" + ) as f: f.write("\n## Task Table\n") - f.write("Table of all tasks/environments in this category. Task column is the environment ID, Preview is a thumbnail pair of the first and last frames of an example success demonstration. Max steps is the task's default max episode steps, generally tuned for RL workflows.") - f.write("\n") + f.write( + "Table of all tasks/environments in this category. Task column is the environment ID, Preview is a thumbnail pair of the first and last frames of an example success demonstration. Max steps is the task's default max episode steps, generally tuned for RL workflows." + ) + f.write('\n
') f.write("\n") - f.write("\n") - f.write("\n") - f.write("\n") - f.write("\n") - f.write("\n") - f.write("\n") - f.write("\n") + f.write('\n') + f.write('\n') + f.write('\n') + f.write('\n') + f.write('\n') + f.write('\n') + f.write('\n') f.write("\n") f.write("\n") f.write("\n") @@ -158,16 +167,33 @@ def main(): # Add row for each environment for row_idx, (cls, env_id) in enumerate(zip(classes, env_ids)): # Get reward mode info - dense = "✅" if hasattr(cls, 'SUPPORTED_REWARD_MODES') and "dense" in cls.SUPPORTED_REWARD_MODES else "❌" - sparse = "✅" if hasattr(cls, 'SUPPORTED_REWARD_MODES') and "sparse" in cls.SUPPORTED_REWARD_MODES else "❌" - max_eps_steps = REGISTERED_ENVS[env_id].max_episode_steps if REGISTERED_ENVS[env_id].max_episode_steps is not None else "N/A" + dense = ( + "✅" + if hasattr(cls, "SUPPORTED_REWARD_MODES") + and "dense" in cls.SUPPORTED_REWARD_MODES + else "❌" + ) + sparse = ( + "✅" + if hasattr(cls, "SUPPORTED_REWARD_MODES") + and "sparse" in cls.SUPPORTED_REWARD_MODES + else "❌" + ) + max_eps_steps = ( + REGISTERED_ENVS[env_id].max_episode_steps + if REGISTERED_ENVS[env_id].max_episode_steps is not None + else "N/A" + ) demos = "✅" if env_id in DATASET_SOURCES else "❌" # Get video thumbnail if available thumbnail = "" thumbnail_last = "" - if hasattr(cls, '_sample_video_link') and cls._sample_video_link: + if hasattr(cls, "_sample_video_link") and cls._sample_video_link: video_url = cls._sample_video_link - thumbnail_paths = [video_url.replace('.mp4', '_thumb_first.png'), video_url.replace('.mp4', '_thumb_last.png')] + thumbnail_paths = [ + video_url.replace(".mp4", "_thumb_first.png"), + video_url.replace(".mp4", "_thumb_last.png"), + ] # Check if thumbnail already exists online thumbnails_exist = False for thumbnail_path in thumbnail_paths: @@ -179,17 +205,32 @@ def main(): thumbnails_exist = False break # Also check locally in figures/env_demos - local_thumbnail_paths = [os.path.join(os.path.dirname(__file__), "source/_static/env_thumbnails", os.path.basename(thumbnail_path)) for thumbnail_path in thumbnail_paths] - if os.path.exists(local_thumbnail_paths[0]) and os.path.exists(local_thumbnail_paths[1]): + local_thumbnail_paths = [ + os.path.join( + os.path.dirname(__file__), + "source/_static/env_thumbnails", + os.path.basename(thumbnail_path), + ) + for thumbnail_path in thumbnail_paths + ] + if os.path.exists(local_thumbnail_paths[0]) and os.path.exists( + local_thumbnail_paths[1] + ): thumbnails_exist = True if not thumbnails_exist: # Create temp file to store video # with tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) as tmp_video: - # urllib.request.urlretrieve(video_url, tmp_video.name) + # urllib.request.urlretrieve(video_url, tmp_video.name) # Extract first frame and resize maintaining aspect ratio - cap = cv2.VideoCapture(os.path.join(os.path.dirname(__file__), "../figures/environment_demos", os.path.basename(video_url))) + cap = cv2.VideoCapture( + os.path.join( + os.path.dirname(__file__), + "../figures/environment_demos", + os.path.basename(video_url), + ) + ) # Get first frame ret, first_frame = cap.read() @@ -200,35 +241,71 @@ def main(): cap.release() # Process both frames - for frame, output_path in [(first_frame, thumbnail_paths[0]), (last_frame, thumbnail_paths[1])]: + for frame, output_path in [ + (first_frame, thumbnail_paths[0]), + (last_frame, thumbnail_paths[1]), + ]: height, width = frame.shape[:2] if height > width: # Scale height to 256, maintain aspect ratio scale = 256.0 / height new_width = int(width * scale) - frame = cv2.resize(frame, (new_width, 256), interpolation=cv2.INTER_AREA) + frame = cv2.resize( + frame, + (new_width, 256), + interpolation=cv2.INTER_AREA, + ) else: # Scale width to 256, maintain aspect ratio scale = 256.0 / width new_height = int(height * scale) - frame = cv2.resize(frame, (256, new_height), interpolation=cv2.INTER_AREA) + frame = cv2.resize( + frame, + (256, new_height), + interpolation=cv2.INTER_AREA, + ) # Save as compressed PNG - output_path = os.path.join(os.path.dirname(__file__), "source/_static/env_thumbnails", os.path.basename(output_path)) - cv2.imwrite(output_path, frame, [cv2.IMWRITE_PNG_COMPRESSION, 9]) + output_path = os.path.join( + os.path.dirname(__file__), + "source/_static/env_thumbnails", + os.path.basename(output_path), + ) + cv2.imwrite( + output_path, frame, [cv2.IMWRITE_PNG_COMPRESSION, 9] + ) # # Clean up temp file # os.unlink(tmp_video.name) - thumbnail_first_path = os.path.join('../../_static/env_thumbnails', os.path.basename(video_url.replace('.mp4', '_thumb_first.png'))) - thumbnail_last_path = os.path.join('../../_static/env_thumbnails', os.path.basename(video_url.replace('.mp4', '_thumb_last.png'))) + thumbnail_first_path = os.path.join( + "../../_static/env_thumbnails", + os.path.basename( + video_url.replace(".mp4", "_thumb_first.png") + ), + ) + thumbnail_last_path = os.path.join( + "../../_static/env_thumbnails", + os.path.basename( + video_url.replace(".mp4", "_thumb_last.png") + ), + ) thumbnail = f"{env_id}" thumbnail_last = f"{env_id}" - env_id_to_thumbnail_path[env_id] = [thumbnail_first_path, thumbnail_last_path] + env_id_to_thumbnail_path[env_id] = [ + thumbnail_first_path, + thumbnail_last_path, + ] # f.write(f"| {env_id} |
{thumbnail} {thumbnail_last}
| {dense} | {sparse} |") - f.write(f"\n") - f.write(f"\n") - f.write(f"\n") + f.write( + f"\n" + ) + f.write( + f'\n' + ) + f.write( + f"\n" + ) f.write(f"\n") f.write(f"\n") f.write(f"\n") @@ -248,22 +325,33 @@ def main(): for cls, env_id in zip(classes, env_ids): # Check if dense reward function is overridden from mani_skill.envs.sapien_env import BaseEnv - has_custom_dense = ( - hasattr(cls, "compute_dense_reward") and - (cls.compute_dense_reward != BaseEnv.compute_dense_reward or cls.compute_normalized_dense_reward != BaseEnv.compute_normalized_dense_reward) + + has_custom_dense = hasattr(cls, "compute_dense_reward") and ( + cls.compute_dense_reward != BaseEnv.compute_dense_reward + or cls.compute_normalized_dense_reward + != BaseEnv.compute_normalized_dense_reward ) if has_custom_dense and "dense" not in cls.SUPPORTED_REWARD_MODES: - print(f"Warning: {cls.__name__}, {env_id} has custom dense reward but dense not in SUPPORTED_REWARD_MODES") - does_not_have_custom_dense = not has_custom_dense and "dense" in cls.SUPPORTED_REWARD_MODES + print( + f"Warning: {cls.__name__}, {env_id} has custom dense reward but dense not in SUPPORTED_REWARD_MODES" + ) + does_not_have_custom_dense = ( + not has_custom_dense and "dense" in cls.SUPPORTED_REWARD_MODES + ) if does_not_have_custom_dense: - print(f"Warning: {cls.__name__}, {env_id} does not have custom dense reward but dense is in SUPPORTED_REWARD_MODES") + print( + f"Warning: {cls.__name__}, {env_id} does not have custom dense reward but dense is in SUPPORTED_REWARD_MODES" + ) # Extract docstring if cls.__doc__: - with open(f"source/{GENERATED_TASKS_DOCS_FOLDER}/{category_name}/index.md", "a") as f: + with open( + f"source/{GENERATED_TASKS_DOCS_FOLDER}/{category_name}/index.md", + "a", + ) as f: f.write(f"\n## {env_id}\n\n") # Write reward modes if available - if hasattr(cls, 'SUPPORTED_REWARD_MODES'): + if hasattr(cls, "SUPPORTED_REWARD_MODES"): if "dense" in cls.SUPPORTED_REWARD_MODES: f.write("![dense-reward][dense-reward-badge]\n") else: @@ -274,12 +362,17 @@ def main(): f.write("![no-sparse-reward][no-sparse-reward-badge]\n") if env_id in DATASET_SOURCES: f.write("![demos][demos-badge]\n") - if REGISTERED_ENVS[env_id].asset_download_ids is not None and len(REGISTERED_ENVS[env_id].asset_download_ids) > 0: + if ( + REGISTERED_ENVS[env_id].asset_download_ids is not None + and len(REGISTERED_ENVS[env_id].asset_download_ids) > 0 + ): f.write("![asset-badge][asset-badge]\n") """:::{dropdown} Task Card\n:icon: note\n:color: primary""" # Clean up docstring and write to file - f.write(":::{dropdown} Task Card\n:icon: note\n:color: primary\n\n") - doc_lines = [line.strip() for line in cls.__doc__.split('\n')] + f.write( + ":::{dropdown} Task Card\n:icon: note\n:color: primary\n\n" + ) + doc_lines = [line.strip() for line in cls.__doc__.split("\n")] while doc_lines and not doc_lines[0]: doc_lines.pop(0) while doc_lines and not doc_lines[-1]: @@ -289,16 +382,28 @@ def main(): f.write("\n") f.write(":::\n") # Add video link if available - if hasattr(cls, '_sample_video_link') and cls._sample_video_link is not None: - f.write("\n
\n") - f.write(f"

Task

Preview

Dense Reward

Success/Fail Conditions

Demos

Max Episode Steps

Task

Preview

Dense Reward

Success/Fail Conditions

Demos

Max Episode Steps

{env_id}

{thumbnail if thumbnail != '' else ''} {thumbnail_last if thumbnail_last != '' else ''}

{env_id}

{thumbnail if thumbnail != '' else ''} {thumbnail_last if thumbnail_last != '' else ''}

{dense}

{sparse}

{demos}