Skip to content

Commit

Permalink
work
Browse files Browse the repository at this point in the history
  • Loading branch information
StoneT2000 committed Jan 29, 2024
1 parent 0937fcd commit e15615e
Show file tree
Hide file tree
Showing 3 changed files with 54 additions and 33 deletions.
53 changes: 27 additions & 26 deletions mani_skill2/envs/sapien_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -987,29 +987,30 @@ def render(self):
else:
raise NotImplementedError(f"Unsupported render mode {self.render_mode}.")

# ---------------------------------------------------------------------------- #
# Advanced
# ---------------------------------------------------------------------------- #
def gen_scene_pcd(self, num_points: int = int(1e5)) -> np.ndarray:
"""Generate scene point cloud for motion planning, excluding the robot"""
meshes = []
articulations = self._scene.get_all_articulations()
if self.agent is not None:
articulations.pop(articulations.index(self.agent.robot))
for articulation in articulations:
articulation_mesh = merge_meshes(get_articulation_meshes(articulation))
if articulation_mesh:
meshes.append(articulation_mesh)

for actor in self._scene.get_all_actors():
actor_mesh = merge_meshes(get_component_meshes(actor))
if actor_mesh:
meshes.append(
actor_mesh.apply_transform(
actor.get_pose().to_transformation_matrix()
)
)

scene_mesh = merge_meshes(meshes)
scene_pcd = scene_mesh.sample(num_points)
return scene_pcd
# TODO (stao): re implement later
# # ---------------------------------------------------------------------------- #
# # Advanced
# # ---------------------------------------------------------------------------- #
# def gen_scene_pcd(self, num_points: int = int(1e5)) -> np.ndarray:
# """Generate scene point cloud for motion planning, excluding the robot"""
# meshes = []
# articulations = self._scene.get_all_articulations()
# if self.agent is not None:
# articulations.pop(articulations.index(self.agent.robot))
# for articulation in articulations:
# articulation_mesh = merge_meshes(get_articulation_meshes(articulation))
# if articulation_mesh:
# meshes.append(articulation_mesh)

# for actor in self._scene.get_all_actors():
# actor_mesh = merge_meshes(get_component_meshes(actor))
# if actor_mesh:
# meshes.append(
# actor_mesh.apply_transform(
# actor.get_pose().to_transformation_matrix()
# )
# )

# scene_mesh = merge_meshes(meshes)
# scene_pcd = scene_mesh.sample(num_points)
# return scene_pcd
3 changes: 1 addition & 2 deletions mani_skill2/envs/tasks/open_cabinet_drawer.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,12 +138,11 @@ def _initialize_actors(self):

stime = time.time()
# this is not pure uniform but for faster initialization to deal with different cabinet DOFs we just sample 0 to 10000 and take the modulo which is close enough
link_indices = torch.randint(0, 10000, size=(len(self.handle_links),)) * 0
link_indices = torch.randint(0, 10000, size=(len(self.handle_links),))
self.handle_link = Link.merge(
[x[link_indices[i] % len(x)] for i, x in enumerate(self.handle_links)],
self.cabinet,
)
print([link_indices[i] % len(x) for i, x in enumerate(self.handle_links)])
handle_link_positions = to_tensor(
np.array(
[
Expand Down
31 changes: 26 additions & 5 deletions mani_skill2/utils/structs/articulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,9 @@
import sapien
import sapien.physx as physx
import torch
import trimesh

from mani_skill2.utils.geometry.trimesh_utils import get_component_meshes, merge_meshes
from mani_skill2.utils.sapien_utils import to_numpy, to_tensor
from mani_skill2.utils.structs.base import BaseStruct
from mani_skill2.utils.structs.joint import Joint
Expand Down Expand Up @@ -192,6 +194,28 @@ def max_dof(self) -> int:
# g0, g1, g2, g3 = s.get_collision_groups()
# s.set_collision_groups([g0, g1, g2 | (1 << 29), g3])

def get_collision_mesh(
self, to_world_frame: bool = True, first_only: bool = True
) -> trimesh.Trimesh:
"""
Returns the collision mesh of each managed articulation object. Results of this are cached
TODO (stao): Can we have a batched version of trimesh?
"""
meshes = []
mat = self.pose.to_transformation_matrix()
for i, art in enumerate(self._objs):
art_meshes = []
for link in art.links:
art_meshes += get_component_meshes(link)
mesh = merge_meshes(art_meshes)
if to_world_frame:
mesh.apply_transform(mat[i])
meshes.append(mesh)
if first_only:
break
return meshes

# -------------------------------------------------------------------------- #
# Functions from physx.PhysxArticulation
# -------------------------------------------------------------------------- #
Expand Down Expand Up @@ -326,14 +350,11 @@ def dof(self) -> int:
# def name(self, arg1: str) -> None:
# pass
@property
def pose(self) -> sapien.Pose:
"""
:type: sapien.pysapien.Pose
"""
def pose(self) -> Pose:
return self.root_pose

@pose.setter
def pose(self, arg1: sapien.Pose) -> None:
def pose(self, arg1: Union[Pose, sapien.Pose]) -> None:
self.root_pose = arg1

# @property
Expand Down

0 comments on commit e15615e

Please sign in to comment.