From 3f8152cccb613a6749c18a41ec9480a4a24b35a6 Mon Sep 17 00:00:00 2001 From: Shuhei Kozasa Date: Tue, 26 Dec 2023 18:35:18 +0900 Subject: [PATCH] Adds english comments (Still need to edit preset_pid_gain_example.py and waist_joint_trajectory_example.py) --- .../scripts/chest_camera_tracking.py | 34 +++++++++- .../scripts/depth_camera_tracking.py | 39 ++++++++++++ .../scripts/head_camera_tracking.py | 44 ++++++++++++- .../scripts/pick_and_place_left_arm_demo.py | 19 ++++++ .../scripts/pick_and_place_right_arm_demo.py | 19 ++++++ .../scripts/pick_and_place_two_arm_demo.py | 62 +++++++++++++++++++ sciurus17_examples/src/object_detection.cpp | 15 +++++ 7 files changed, 230 insertions(+), 2 deletions(-) diff --git a/sciurus17_examples/scripts/chest_camera_tracking.py b/sciurus17_examples/scripts/chest_camera_tracking.py index 52570290..190a85aa 100755 --- a/sciurus17_examples/scripts/chest_camera_tracking.py +++ b/sciurus17_examples/scripts/chest_camera_tracking.py @@ -52,12 +52,14 @@ def _image_callback(self, ros_image): input_image = self._bridge.imgmsg_to_cv2(ros_image, "bgr8") except CvBridgeError as e: rospy.logerr(e) - + # 画像のwidth, heightを取得 + # Obtain the width and height of the image self._image_shape.x = input_image.shape[1] self._image_shape.y = input_image.shape[0] # 特定色のオブジェクトを検出 + # Detect an object with a certain color output_image = self._detect_orange_object(input_image) # output_image = self._detect_blue_object(input_image) @@ -67,6 +69,8 @@ def _image_callback(self, ros_image): def get_object_position(self): # 画像中心を0, 0とした座標系におけるオブジェクトの座標を出力 # オブジェクトの座標は-1.0 ~ 1.0に正規化される + # Returns the object coordinates where the image center is 0, 0 + # The coordinate is normalized into -1.0 to 1.0 object_center = Point( self._object_rect[0] + self._object_rect[2] * 0.5, @@ -74,11 +78,13 @@ def get_object_position(self): 0) # 画像の中心を0, 0とした座標系に変換 + # Convert the coordinate where the image center is 0, 0 translated_point = Point() translated_point.x = object_center.x - self._image_shape.x * 0.5 translated_point.y = -(object_center.y - self._image_shape.y * 0.5) # 正規化 + # Normalize normalized_point = Point() if self._image_shape.x != 0 and self._image_shape.y != 0: normalized_point.x = translated_point.x / (self._image_shape.x * 0.5) @@ -93,22 +99,27 @@ def object_detected(self): def _detect_color_object(self, bgr_image, lower_color, upper_color): # 画像から指定された色の物体を検出する + # Detec the specified colored object from the image MIN_OBJECT_SIZE = 7000 # px * px # BGR画像をHSV色空間に変換 + # Convert the BGR image to HSV model hsv = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2HSV) # 色を抽出するマスクを生成 + # Creates a mask to extract the color mask = cv2.inRange(hsv, lower_color, upper_color) # マスクから輪郭を抽出 + # Extract the contours with the mask if self._CV_MAJOR_VERSION == '4': contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) else: _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # 輪郭を長方形に変換し、配列に格納 + # Convert the contour to a rectangle and store it in a vector rects = [] for contour in contours: approx = cv2.convexHull(contour) @@ -118,11 +129,14 @@ def _detect_color_object(self, bgr_image, lower_color, upper_color): self._object_detected = False if len(rects) > 0: # 最も大きい長方形を抽出 + # Extract the largest rectangle rect = max(rects, key=(lambda x: x[2] * x[3])) # 長方形が小さければ検出判定にしない + # Updates the detection result to false if the rectangle is small if rect[2] * rect[3] > MIN_OBJECT_SIZE: # 抽出した長方形を画像に描画する + # Draw the rectangle on the image cv2.rectangle(bgr_image, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), @@ -172,6 +186,7 @@ def __init__(self): def _state_callback(self, state): # 腰の現在角度を取得 + # Returns the current angle of the waist self._state_received = True yaw_radian = state.actual.positions[0] @@ -188,6 +203,7 @@ def get_current_yaw(self): def set_angle(self, yaw_angle, goal_secs=1.0e-9): # 腰を指定角度に動かす + # Move the waist to the specified angle goal = FollowJointTrajectoryGoal() goal.trajectory.joint_names = ["waist_yaw_joint"] @@ -203,6 +219,7 @@ def set_angle(self, yaw_angle, goal_secs=1.0e-9): def hook_shutdown(): # shutdown時に0度へ戻る + # Sets the waist angle to 0 deg when shutting down waist_yaw.set_angle(math.radians(0), 3.0) @@ -213,24 +230,33 @@ def main(): # オブジェクト追跡のしきい値 # 正規化された座標系(px, px) + # The threshold of the object tracking + # Normalized coordinates is (px, px) THRESH_X = 0.05 # 腰の初期角度 Degree + # The inital angle of the waist in degrees INITIAL_YAW_ANGLE = 0 # 腰の制御角度リミット値 Degree + # The waist angle limit (max/min) in degrees MAX_YAW_ANGLE = 120 MIN_YAW_ANGLE = -120 # 腰の制御量 # 値が大きいほど腰を大きく動かす + # The control amount of the waist + # The waist moves more if the gain is bigger OPERATION_GAIN_X = 5.0 # 初期角度に戻る時の制御角度 Degree + # The degree when returning to the initial pose RESET_OPERATION_ANGLE = 1 # 現在の腰角度を取得する # ここで現在の腰角度を取得することで、ゆっくり初期角度へ戻る + # Recieves the current waist angle + # By recieving the waist angle here, it moves to the initial pose slowly while not waist_yaw.state_received(): pass yaw_angle = waist_yaw.get_current_yaw() @@ -240,6 +266,7 @@ def main(): while not rospy.is_shutdown(): # 正規化されたオブジェクトの座標を取得 + # Recieves the normalized object coordinates object_position = object_tracker.get_object_position() if object_tracker.object_detected(): @@ -248,15 +275,19 @@ def main(): else: lost_time = rospy.Time.now() - detection_timestamp # 一定時間オブジェクトが見つからない場合は初期角度に戻る + # If it doesn't detect any object for a certain amount of time, + # it will return to the initial angle if lost_time.to_sec() > 1.0: look_object = False if look_object: # オブジェクトが画像中心にくるように首を動かす + # Moves the neck so that the object comes to the middle of the image if math.fabs(object_position.x) > THRESH_X: yaw_angle += -object_position.x * OPERATION_GAIN_X # 腰の角度を制限する + # Limits the waist angles if yaw_angle > MAX_YAW_ANGLE: yaw_angle = MAX_YAW_ANGLE if yaw_angle < MIN_YAW_ANGLE: @@ -264,6 +295,7 @@ def main(): else: # ゆっくり初期角度へ戻る + # Returns to the initial angle slowly diff_yaw_angle = yaw_angle - INITIAL_YAW_ANGLE if math.fabs(diff_yaw_angle) > RESET_OPERATION_ANGLE: yaw_angle -= math.copysign(RESET_OPERATION_ANGLE, diff_yaw_angle) diff --git a/sciurus17_examples/scripts/depth_camera_tracking.py b/sciurus17_examples/scripts/depth_camera_tracking.py index 90a6383d..d576d175 100755 --- a/sciurus17_examples/scripts/depth_camera_tracking.py +++ b/sciurus17_examples/scripts/depth_camera_tracking.py @@ -41,9 +41,12 @@ def __init__(self): self._bridge = CvBridge() # 頭カメラのカラー画像 + # The color image from the head camera self._image_sub = rospy.Subscriber("/sciurus17/camera/color/image_raw", Image, self._image_callback, queue_size=1) # 頭カメラの深度画像 # カラー画像と視界が一致するように補正されている + # The depth image from the head camera + # It is aligned to the color image to match the view self._depth_sub = rospy.Subscriber("/sciurus17/camera/aligned_depth_to_color/image_raw", Image, self._depth_callback, queue_size=1) self._image_pub = rospy.Publisher("~output_image", Image, queue_size=1) @@ -77,6 +80,7 @@ def _depth_callback(self, ros_image): return # 画像のwidth, heightを取得 + # Obtains the width and height of the image self._image_shape.x = input_image.shape[1] self._image_shape.y = input_image.shape[0] @@ -90,6 +94,8 @@ def _depth_callback(self, ros_image): def get_object_position(self): # 画像中心を0, 0とした座標系におけるオブジェクトの座標を出力 # オブジェクトの座標は-1.0 ~ 1.0に正規化される + # Returns the object coordinates where the image center is 0, 0 + # The coordinate is normalized into -1.0 to 1.0 object_center = Point( self._object_rect[0] + self._object_rect[2] * 0.5, @@ -97,11 +103,13 @@ def get_object_position(self): 0) # 画像の中心を0, 0とした座標系に変換 + # Convert the coordinate where the image center is 0, 0 translated_point = Point() translated_point.x = object_center.x - self._image_shape.x * 0.5 translated_point.y = -(object_center.y - self._image_shape.y * 0.5) # 正規化 + # Normalize normalized_point = Point() if self._image_shape.x != 0 and self._image_shape.y != 0: normalized_point.x = translated_point.x / (self._image_shape.x * 0.5) @@ -116,17 +124,21 @@ def object_detected(self): def _detect_object(self, input_depth_image): # 検出するオブジェクトの大きさを制限する + # Limits the size of the object to be detected MIN_OBJECT_SIZE = 10000 # px * px MAX_OBJECT_SIZE = 80000 # px * px # 検出範囲を4段階設ける # 単位はmm + # Sets 4 layers of detection range + # Unit is in mm DETECTION_DEPTH = [ (500, 700), (600, 800), (700, 900), (800, 1000)] # 検出したオブジェクトを囲う長方形の色 + # The color of the rectangle around the detected object RECT_COLOR = [ (0, 0, 255), (0, 255, 0), @@ -135,6 +147,7 @@ def _detect_object(self, input_depth_image): # カメラのカラー画像を加工して出力する # カラー画像を受け取ってない場合はFalseを返す + # Outputs an edited color image output_image = self._color_image if output_image is None: return False @@ -143,9 +156,11 @@ def _detect_object(self, input_depth_image): self._median_depth = 0 for i, depth in enumerate(DETECTION_DEPTH): # depth[0] ~ depth[1]の範囲でオブジェクトを抽出する + # Extracts an object between depth[0] and depth[1] mask = cv2.inRange(input_depth_image, depth[0], depth[1]) # マスクから輪郭を抽出 + # Extract the contours with the mask if self._CV_MAJOR_VERSION == '4': contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) else: @@ -160,13 +175,16 @@ def _detect_object(self, input_depth_image): if len(rects) > 0: # 最も大きい長方形を抽出 + # Extract the largest rectangle rect = max(rects, key=(lambda x: x[2] * x[3])) rect_size = rect[2] * rect[3] # 長方形の大きさが指定範囲内であるかチェック + # Check if the size of the rectangle is within the limit if rect_size > MIN_OBJECT_SIZE and rect_size < MAX_OBJECT_SIZE: # 抽出した長方形を画像に描画する + # Draw the rectangle on the image cv2.rectangle(output_image, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), @@ -176,13 +194,16 @@ def _detect_object(self, input_depth_image): self._object_detected = True # 深度画像から長方形の領域を切り取る + # Clips the range of the rectangle from the depth image object_depth = input_depth_image[ rect[1]:rect[1]+rect[3], rect[0]:rect[0]+rect[2]] # 深度の中央値を求める + # Calculates the median of the depth self._median_depth = int(np.median(object_depth)) # 中央値のテキストを出力画像に描画する + # Write the median on the output image cv2.putText(output_image, str(self._median_depth), (rect[0], rect[1]+30), cv2.FONT_HERSHEY_SIMPLEX, @@ -212,6 +233,7 @@ def __init__(self): def _state_callback(self, state): # 首の現在角度を取得 + # Returns the current neck angle self._state_received = True yaw_radian = state.actual.positions[0] @@ -235,6 +257,7 @@ def get_current_pitch(self): def set_angle(self, yaw_angle, pitch_angle, goal_secs=1.0e-9): # 首を指定角度に動かす + # Move the neck to the specified angle goal = FollowJointTrajectoryGoal() goal.trajectory.joint_names = ["neck_yaw_joint", "neck_pitch_joint"] @@ -251,6 +274,7 @@ def set_angle(self, yaw_angle, pitch_angle, goal_secs=1.0e-9): def hook_shutdown(): # shutdown時に0度へ戻る + # Sets the neck angle to 0 deg when shutting down neck.set_angle(math.radians(0), math.radians(0), 3.0) @@ -261,14 +285,18 @@ def main(): # オブジェクト追跡のしきい値 # 正規化された座標系(px, px) + # The threshold of the object tracking + # Normalized coordinates is (px, px) THRESH_X = 0.05 THRESH_Y = 0.05 # 首の初期角度 Degree + # The inital angle of the neck in degrees INITIAL_YAW_ANGLE = 0 INITIAL_PITCH_ANGLE = 0 # 首の制御角度リミット値 Degree + # The neck angle limit (max/min) in degrees MAX_YAW_ANGLE = 120 MIN_YAW_ANGLE = -120 MAX_PITCH_ANGLE = 50 @@ -276,14 +304,19 @@ def main(): # 首の制御量 # 値が大きいほど首を大きく動かす + # The control amount of the neck + # The neck moves more if the gain is bigger OPERATION_GAIN_X = 5.0 OPERATION_GAIN_Y = 5.0 # 初期角度に戻る時の制御角度 Degree + # The degree when returning to the initial pose RESET_OPERATION_ANGLE = 3 # 現在の首角度を取得する # ここで現在の首角度を取得することで、ゆっくり初期角度へ戻る + # Recieves the current neck angle + # By recieving the neck angle here, it moves to the initial pose slowly while not neck.state_received(): pass yaw_angle = neck.get_current_yaw() @@ -294,6 +327,7 @@ def main(): while not rospy.is_shutdown(): # 正規化されたオブジェクトの座標を取得 + # Recieves the normalized object coordinates object_position = object_tracker.get_object_position() if object_tracker.object_detected(): @@ -302,11 +336,14 @@ def main(): else: lost_time = rospy.Time.now() - detection_timestamp # 一定時間オブジェクトが見つからない場合は初期角度に戻る + # If it doesn't detect any object for a certain amount of time, + # it will return to the initial angle if lost_time.to_sec() > 1.0: look_object = False if look_object: # オブジェクトが画像中心にくるように首を動かす + # Moves the neck so that the object comes to the middle of the image if math.fabs(object_position.x) > THRESH_X: yaw_angle += -object_position.x * OPERATION_GAIN_X @@ -314,6 +351,7 @@ def main(): pitch_angle += object_position.y * OPERATION_GAIN_Y # 首の制御角度を制限する + # Limits the neck angles if yaw_angle > MAX_YAW_ANGLE: yaw_angle = MAX_YAW_ANGLE if yaw_angle < MIN_YAW_ANGLE: @@ -326,6 +364,7 @@ def main(): else: # ゆっくり初期角度へ戻る + # Returns to the initial angle slowly diff_yaw_angle = yaw_angle - INITIAL_YAW_ANGLE if math.fabs(diff_yaw_angle) > RESET_OPERATION_ANGLE: yaw_angle -= math.copysign(RESET_OPERATION_ANGLE, diff_yaw_angle) diff --git a/sciurus17_examples/scripts/head_camera_tracking.py b/sciurus17_examples/scripts/head_camera_tracking.py index 93f5a314..01aa406d 100755 --- a/sciurus17_examples/scripts/head_camera_tracking.py +++ b/sciurus17_examples/scripts/head_camera_tracking.py @@ -47,7 +47,8 @@ def __init__(self): self._CV_MAJOR_VERSION, _, _ = cv2.__version__.split('.') # カスケードファイルの読み込み - # 例 + # Reads the cascade file + # 例 | Examples # self._face_cascade = cv2.CascadeClassifier("/home/USER_NAME/.local/lib/python2.7/site-packages/cv2/data/haarcascade_frontalface_alt2.xml") # self._eyes_cascade = cv2.CascadeClassifier("/home/USER_NAME/.local/lib/python2.7/site-packages/cv2/data/haarcascade_eye.xml") self._face_cascade = "" @@ -61,10 +62,12 @@ def _image_callback(self, ros_image): rospy.logerr(e) # 画像のwidth, heightを取得 + # Obtains the width and height of the image self._image_shape.x = input_image.shape[1] self._image_shape.y = input_image.shape[0] # オブジェクト(特定色 or 顔) の検出 + # Detecting the object (a specific color or a face) output_image = self._detect_orange_object(input_image) # output_image = self._detect_blue_object(input_image) # output_image = self._detect_face(input_image) @@ -75,6 +78,8 @@ def _image_callback(self, ros_image): def get_object_position(self): # 画像中心を0, 0とした座標系におけるオブジェクトの座標を出力 # オブジェクトの座標は-1.0 ~ 1.0に正規化される + # Returns the object coordinates where the image center is 0, 0 + # The coordinate is normalized into -1.0 to 1.0 object_center = Point( self._object_rect[0] + self._object_rect[2] * 0.5, @@ -82,11 +87,13 @@ def get_object_position(self): 0) # 画像の中心を0, 0とした座標系に変換 + # Convert the coordinate where the image center is 0, 0 translated_point = Point() translated_point.x = object_center.x - self._image_shape.x * 0.5 translated_point.y = -(object_center.y - self._image_shape.y * 0.5) # 正規化 + # Normalize normalized_point = Point() if self._image_shape.x != 0 and self._image_shape.y != 0: normalized_point.x = translated_point.x / (self._image_shape.x * 0.5) @@ -101,22 +108,27 @@ def object_detected(self): def _detect_color_object(self, bgr_image, lower_color, upper_color): # 画像から指定された色の物体を検出する + # Detect an object with the specified color from the image MIN_OBJECT_SIZE = 1000 # px * px # BGR画像をHSV色空間に変換 + # Converts the BGR image to HSV color space hsv = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2HSV) # 色を抽出するマスクを生成 + # Creates a mask to extract the color mask = cv2.inRange(hsv, lower_color, upper_color) # マスクから輪郭を抽出 + # Extract the contours with the mask if self._CV_MAJOR_VERSION == '4': contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) else: _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # 輪郭を長方形に変換し、配列に格納 + # Convert the contour to a rectangle and store it in a vector rects = [] for contour in contours: approx = cv2.convexHull(contour) @@ -126,11 +138,14 @@ def _detect_color_object(self, bgr_image, lower_color, upper_color): self._object_detected = False if len(rects) > 0: # 最も大きい長方形を抽出 + # Extract the largest rectangle rect = max(rects, key=(lambda x: x[2] * x[3])) # 長方形が小さければ検出判定にしない + # Updates the detection result to false if the rectangle is small if rect[2] * rect[3] > MIN_OBJECT_SIZE: # 抽出した長方形を画像に描画する + # Draw the rectangle on the image cv2.rectangle(bgr_image, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), @@ -164,38 +179,47 @@ def _detect_blue_object(self, bgr_image): def _detect_face(self, bgr_image): # 画像から顔(正面)を検出する + # Detects a face (front) from the image SCALE = 4 # カスケードファイルがセットされているかを確認 + # Checks if a cascade file is set if self._face_cascade == "" or self._eyes_cascade == "": rospy.logerr("cascade file does not set") return bgr_image # BGR画像をグレー画像に変換 + # Converts the BGR image to a gray scaled image gray = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY) # 処理時間短縮のため画像を縮小 + # Compress the image to shorten the process height, width = gray.shape[:2] small_gray = cv2.resize(gray, (width/SCALE, height/SCALE)) # カスケードファイルを使って顔認識 + # Use the cascade file for face detection small_faces = self._face_cascade.detectMultiScale(small_gray) self._object_detected = False for small_face in small_faces: # 顔の領域を元のサイズに戻す + # Resize the face area to the original size face = small_face*SCALE # グレー画像から顔部分を抽出 + # Find the face area from the gray scaled image roi_gray = gray[ face[1]:face[1]+face[3], face[0]:face[0]+face[2]] # 顔の中から目を検知 + # Detect the eyes from the face eyes = self._eyes_cascade.detectMultiScale(roi_gray) # 目を検出したら、対象のrect(座標と大きさ)を記録する + # If the eyes are detected, record the size and coordinates if len(eyes) > 0: cv2.rectangle(bgr_image, (face[0],face[1]), @@ -229,6 +253,7 @@ def __init__(self): def _state_callback(self, state): # 首の現在角度を取得 + # Returns the current angle of the neck self._state_received = True yaw_radian = state.actual.positions[0] @@ -252,6 +277,7 @@ def get_current_pitch(self): def set_angle(self, yaw_angle, pitch_angle, goal_secs=1.0e-9): # 首を指定角度に動かす + # Moves the neck to the specified angle goal = FollowJointTrajectoryGoal() goal.trajectory.joint_names = ["neck_yaw_joint", "neck_pitch_joint"] @@ -268,6 +294,7 @@ def set_angle(self, yaw_angle, pitch_angle, goal_secs=1.0e-9): def hook_shutdown(): # shutdown時に0度へ戻る + # Sets the neck angle to 0 deg when shutting down neck.set_angle(math.radians(0), math.radians(0), 3.0) @@ -278,14 +305,18 @@ def main(): # オブジェクト追跡のしきい値 # 正規化された座標系(px, px) + # The threshold of the object tracking + # Normalized coordinates is (px, px) THRESH_X = 0.05 THRESH_Y = 0.05 # 首の初期角度 Degree + # The inital angle of the neck in degrees INITIAL_YAW_ANGLE = 0 INITIAL_PITCH_ANGLE = 0 # 首の制御角度リミット値 Degree + # The neck angle limit (max/min) in degrees MAX_YAW_ANGLE = 120 MIN_YAW_ANGLE = -120 MAX_PITCH_ANGLE = 50 @@ -293,14 +324,19 @@ def main(): # 首の制御量 # 値が大きいほど首を大きく動かす + # The control amount of the neck + # The neck moves more if the gain is bigger OPERATION_GAIN_X = 5.0 OPERATION_GAIN_Y = 5.0 # 初期角度に戻る時の制御角度 Degree + # The degree when returning to the initial pose RESET_OPERATION_ANGLE = 3 # 現在の首角度を取得する # ここで現在の首角度を取得することで、ゆっくり初期角度へ戻る + # Recieves the current neck angle + # By recieving the neck angle here, it moves to the initial pose slowly while not neck.state_received(): pass yaw_angle = neck.get_current_yaw() @@ -311,6 +347,7 @@ def main(): while not rospy.is_shutdown(): # 正規化されたオブジェクトの座標を取得 + # Recieves the normalized object coordinates object_position = object_tracker.get_object_position() if object_tracker.object_detected(): @@ -319,11 +356,14 @@ def main(): else: lost_time = rospy.Time.now() - detection_timestamp # 一定時間オブジェクトが見つからない場合は初期角度に戻る + # If it doesn't detect any object for a certain amount of time, + # it will return to the initial angle if lost_time.to_sec() > 1.0: look_object = False if look_object: # オブジェクトが画像中心にくるように首を動かす + # Moves the neck so that the object comes to the middle of the image if math.fabs(object_position.x) > THRESH_X: yaw_angle += -object_position.x * OPERATION_GAIN_X @@ -331,6 +371,7 @@ def main(): pitch_angle += object_position.y * OPERATION_GAIN_Y # 首の制御角度を制限する + # Limits the neck angles if yaw_angle > MAX_YAW_ANGLE: yaw_angle = MAX_YAW_ANGLE if yaw_angle < MIN_YAW_ANGLE: @@ -343,6 +384,7 @@ def main(): else: # ゆっくり初期角度へ戻る + # Returns to the initial angle slowly diff_yaw_angle = yaw_angle - INITIAL_YAW_ANGLE if math.fabs(diff_yaw_angle) > RESET_OPERATION_ANGLE: yaw_angle -= math.copysign(RESET_OPERATION_ANGLE, diff_yaw_angle) diff --git a/sciurus17_examples/scripts/pick_and_place_left_arm_demo.py b/sciurus17_examples/scripts/pick_and_place_left_arm_demo.py index 9a42bb5b..e3a601f3 100755 --- a/sciurus17_examples/scripts/pick_and_place_left_arm_demo.py +++ b/sciurus17_examples/scripts/pick_and_place_left_arm_demo.py @@ -43,16 +43,19 @@ def main(): print(robot.get_current_state()) # アーム初期ポーズを表示 + # Display the arm's initial pose arm_initial_pose = arm.get_current_pose().pose print("Arm initial pose:") print(arm_initial_pose) # 何かを掴んでいた時のためにハンドを開く + # Open the hand in case it's holding on to something gripper_goal.command.position = -0.9 gripper.send_goal(gripper_goal) gripper.wait_for_result(rospy.Duration(1.0)) # SRDFに定義されている"home"の姿勢にする + # Moves to the pose declared as "home" in the SRDF arm.set_named_target("l_arm_init_pose") arm.go() gripper_goal.command.position = 0.0 @@ -60,10 +63,12 @@ def main(): gripper.wait_for_result(rospy.Duration(1.0)) # 掴む準備をする + # Prepare for grasping target_pose = geometry_msgs.msg.Pose() target_pose.position.x = 0.25 target_pose.position.y = 0.0 target_pose.position.z = 0.3 + # When grasping from above q = quaternion_from_euler(-3.14/2.0, 0.0, 0.0) # 上方から掴みに行く場合 target_pose.orientation.x = q[0] target_pose.orientation.y = q[1] @@ -73,15 +78,18 @@ def main(): arm.go() # 実行 # ハンドを開く + # Open the hand gripper_goal.command.position = -0.7 gripper.send_goal(gripper_goal) gripper.wait_for_result(rospy.Duration(1.0)) # 掴みに行く + # Grasp! target_pose = geometry_msgs.msg.Pose() target_pose.position.x = 0.25 target_pose.position.y = 0.0 target_pose.position.z = 0.13 + # When grasping from above q = quaternion_from_euler(-3.14/2.0, 0.0, 0.0) # 上方から掴みに行く場合 target_pose.orientation.x = q[0] target_pose.orientation.y = q[1] @@ -91,15 +99,18 @@ def main(): arm.go() # 実行 # ハンドを閉じる + # Close the hand gripper_goal.command.position = -0.4 gripper.send_goal(gripper_goal) gripper.wait_for_result(rospy.Duration(1.0)) # 持ち上げる + # Pick up target_pose = geometry_msgs.msg.Pose() target_pose.position.x = 0.25 target_pose.position.y = 0.0 target_pose.position.z = 0.3 + # When grasping from above q = quaternion_from_euler(-3.14/2.0, 0.0, 0.0) # 上方から掴みに行く場合 target_pose.orientation.x = q[0] target_pose.orientation.y = q[1] @@ -109,10 +120,12 @@ def main(): arm.go() # 実行 # 移動する + # Moves to a different position target_pose = geometry_msgs.msg.Pose() target_pose.position.x = 0.4 target_pose.position.y = 0.0 target_pose.position.z = 0.3 + # When grasping from above q = quaternion_from_euler(-3.14/2.0, 0.0, 0.0) # 上方から掴みに行く場合 target_pose.orientation.x = q[0] target_pose.orientation.y = q[1] @@ -122,10 +135,12 @@ def main(): arm.go() # 実行 # 下ろす + # Lower the arm target_pose = geometry_msgs.msg.Pose() target_pose.position.x = 0.4 target_pose.position.y = 0.0 target_pose.position.z = 0.13 + # When grasping from above q = quaternion_from_euler(-3.14/2.0, 0.0, 0.0) # 上方から掴みに行く場合 target_pose.orientation.x = q[0] target_pose.orientation.y = q[1] @@ -135,15 +150,18 @@ def main(): arm.go() # 実行 # ハンドを開く + # Opens the hand gripper_goal.command.position = -0.7 gripper.send_goal(gripper_goal) gripper.wait_for_result(rospy.Duration(1.0)) # 少しだけハンドを持ち上げる + # Raise the hand a little target_pose = geometry_msgs.msg.Pose() target_pose.position.x = 0.4 target_pose.position.y = 0.0 target_pose.position.z = 0.2 + # When grasping from above q = quaternion_from_euler(-3.14/2.0, 0.0, 0.0) # 上方から掴みに行く場合 target_pose.orientation.x = q[0] target_pose.orientation.y = q[1] @@ -153,6 +171,7 @@ def main(): arm.go() # 実行 # SRDFに定義されている"home"の姿勢にする + # Moves to the pose declared as "home" in the SRDF arm.set_named_target("l_arm_init_pose") arm.go() diff --git a/sciurus17_examples/scripts/pick_and_place_right_arm_demo.py b/sciurus17_examples/scripts/pick_and_place_right_arm_demo.py index 6a0d4a05..3b814dcf 100755 --- a/sciurus17_examples/scripts/pick_and_place_right_arm_demo.py +++ b/sciurus17_examples/scripts/pick_and_place_right_arm_demo.py @@ -43,16 +43,19 @@ def main(): print(robot.get_current_state()) # アーム初期ポーズを表示 + # Display the arm's initial pose arm_initial_pose = arm.get_current_pose().pose print("Arm initial pose:") print(arm_initial_pose) # 何かを掴んでいた時のためにハンドを開く + # Open the hand in case it's holding on to something gripper_goal.command.position = 0.9 gripper.send_goal(gripper_goal) gripper.wait_for_result(rospy.Duration(1.0)) # SRDFに定義されている"home"の姿勢にする + # Moves to the pose declared as "home" in the SRDF arm.set_named_target("r_arm_waist_init_pose") arm.go() gripper_goal.command.position = 0.0 @@ -60,10 +63,12 @@ def main(): gripper.wait_for_result(rospy.Duration(1.0)) # 掴む準備をする + # Prepare for grasping target_pose = geometry_msgs.msg.Pose() target_pose.position.x = 0.25 target_pose.position.y = 0.0 target_pose.position.z = 0.3 + # When grasping from above q = quaternion_from_euler(3.14/2.0, 0.0, 0.0) # 上方から掴みに行く場合 target_pose.orientation.x = q[0] target_pose.orientation.y = q[1] @@ -73,15 +78,18 @@ def main(): arm.go() # 実行 # ハンドを開く + # Open the hand gripper_goal.command.position = 0.7 gripper.send_goal(gripper_goal) gripper.wait_for_result(rospy.Duration(1.0)) # 掴みに行く + # Grasp! target_pose = geometry_msgs.msg.Pose() target_pose.position.x = 0.25 target_pose.position.y = 0.0 target_pose.position.z = 0.13 + # When grasping from above q = quaternion_from_euler(3.14/2.0, 0.0, 0.0) # 上方から掴みに行く場合 target_pose.orientation.x = q[0] target_pose.orientation.y = q[1] @@ -91,15 +99,18 @@ def main(): arm.go() # 実行 # ハンドを閉じる + # Close the hand gripper_goal.command.position = 0.4 gripper.send_goal(gripper_goal) gripper.wait_for_result(rospy.Duration(1.0)) # 持ち上げる + # Pick up target_pose = geometry_msgs.msg.Pose() target_pose.position.x = 0.25 target_pose.position.y = 0.0 target_pose.position.z = 0.3 + # When grasping from above q = quaternion_from_euler(3.14/2.0, 0.0, 0.0) # 上方から掴みに行く場合 target_pose.orientation.x = q[0] target_pose.orientation.y = q[1] @@ -109,10 +120,12 @@ def main(): arm.go() # 実行 # 移動する + # Move to a different positoin target_pose = geometry_msgs.msg.Pose() target_pose.position.x = 0.4 target_pose.position.y = 0.0 target_pose.position.z = 0.3 + # When grasping from above q = quaternion_from_euler(3.14/2.0, 0.0, 0.0) # 上方から掴みに行く場合 target_pose.orientation.x = q[0] target_pose.orientation.y = q[1] @@ -122,10 +135,12 @@ def main(): arm.go() # 実行 # 下ろす + # Lower the arm target_pose = geometry_msgs.msg.Pose() target_pose.position.x = 0.4 target_pose.position.y = 0.0 target_pose.position.z = 0.13 + # When grasping from above q = quaternion_from_euler(3.14/2.0, 0.0, 0.0) # 上方から掴みに行く場合 target_pose.orientation.x = q[0] target_pose.orientation.y = q[1] @@ -135,15 +150,18 @@ def main(): arm.go() # 実行 # ハンドを開く + # Open the hand gripper_goal.command.position = 0.7 gripper.send_goal(gripper_goal) gripper.wait_for_result(rospy.Duration(1.0)) # 少しだけハンドを持ち上げる + # Raise the hand a little target_pose = geometry_msgs.msg.Pose() target_pose.position.x = 0.4 target_pose.position.y = 0.0 target_pose.position.z = 0.2 + # When grasping from above q = quaternion_from_euler(3.14/2.0, 0.0, 0.0) # 上方から掴みに行く場合 target_pose.orientation.x = q[0] target_pose.orientation.y = q[1] @@ -153,6 +171,7 @@ def main(): arm.go() # 実行 # SRDFに定義されている"home"の姿勢にする + # Moves to the pose declared as "home" in the SRDF arm.set_named_target("r_arm_waist_init_pose") arm.go() diff --git a/sciurus17_examples/scripts/pick_and_place_two_arm_demo.py b/sciurus17_examples/scripts/pick_and_place_two_arm_demo.py index df3c131e..682ecea4 100755 --- a/sciurus17_examples/scripts/pick_and_place_two_arm_demo.py +++ b/sciurus17_examples/scripts/pick_and_place_two_arm_demo.py @@ -33,16 +33,20 @@ def main(): r_eef_oc = OrientationConstraint() # 両腕グループの制御 + # Sets the controls both arms in MoveGroup arm = moveit_commander.MoveGroupCommander("two_arm_group") # 速度と加速度をゆっくりにする + # Lower the velocity and acceleration arm.set_max_velocity_scaling_factor(0.2) arm.set_max_acceleration_scaling_factor(0.2) # 右ハンド初期化 + # Initilize the right hand r_gripper = actionlib.SimpleActionClient("/sciurus17/controller1/right_hand_controller/gripper_cmd", GripperCommandAction) r_gripper.wait_for_server() r_gripper_goal = GripperCommandGoal() r_gripper_goal.command.max_effort = 2.0 # 左ハンド初期化 + # Initilize the left hand l_gripper = actionlib.SimpleActionClient("/sciurus17/controller2/left_hand_controller/gripper_cmd", GripperCommandAction) l_gripper.wait_for_server() l_gripper_goal = GripperCommandGoal() @@ -51,60 +55,75 @@ def main(): rospy.sleep(1.0) # グループリスト表示 + # Display the group names print("Group names:") print(robot.get_group_names()) # ステータス表示 + # Display the current status print("Current state:") print(robot.get_current_state()) # 右アーム初期ポーズを表示 + # Display the initial pose of the right arm r_arm_initial_pose = arm.get_current_pose("r_link7").pose print("Right arm initial pose:") print(r_arm_initial_pose) # 左アーム初期ポーズを表示 + # Display the initial pose of the left arm l_arm_initial_pose = arm.get_current_pose("l_link7").pose print("Left arm initial pose:") print(l_arm_initial_pose) # 何かを掴んでいた時のためにハンドを開く + # Open the hand in case it's holding on to something # 右手を開く + # Open the right hand r_gripper_goal.command.position = 0.9 r_gripper.send_goal(r_gripper_goal) r_gripper.wait_for_result(rospy.Duration(1.0)) # 左手を開く + # Open the left hand l_gripper_goal.command.position = -0.9 l_gripper.send_goal(l_gripper_goal) l_gripper.wait_for_result(rospy.Duration(1.0)) # SRDFに定義されている"home"の姿勢にする + # Moves to the pose declared as "home" in the SRDF arm.set_named_target("two_arm_init_pose") arm.go() # 右手を閉じる + # Close the right hand r_gripper_goal.command.position = 0.0 r_gripper.send_goal(r_gripper_goal) r_gripper.wait_for_result(rospy.Duration(1.0)) # 左手を閉じる + # Close the leftt hand l_gripper_goal.command.position = 0.0 l_gripper.send_goal(l_gripper_goal) l_gripper.wait_for_result(rospy.Duration(1.0)) # 掴む準備をする + # Prepare for grasping # 右腕 + # Right arm r_target_pose = geometry_msgs.msg.Pose() r_target_pose.position.x = 0.18 r_target_pose.position.y = -0.25 r_target_pose.position.z = 0.2 + # Point the hand down q = quaternion_from_euler(3.14/2.0, 0.0, 0.0) # まずは手を下に向ける r_target_pose.orientation.x = q[0] r_target_pose.orientation.y = q[1] r_target_pose.orientation.z = q[2] r_target_pose.orientation.w = q[3] # 左腕 + # Left arm l_target_pose = geometry_msgs.msg.Pose() l_target_pose.position.x = 0.18 l_target_pose.position.y = 0.25 l_target_pose.position.z = 0.2 + # Point the hand down q = quaternion_from_euler(-3.14/2.0, 0.0, 0.0) # まずは手を下に向ける l_target_pose.orientation.x = q[0] l_target_pose.orientation.y = q[1] @@ -116,31 +135,39 @@ def main(): arm.go() # 実行 # ハンドを開く + # Open the hand # 右手を開く + # Open the right hand r_gripper_goal.command.position = 0.7 r_gripper.send_goal(r_gripper_goal) r_gripper.wait_for_result(rospy.Duration(1.0)) # 左手を開く + # Open the leftt hand l_gripper_goal.command.position = -0.7 l_gripper.send_goal(l_gripper_goal) l_gripper.wait_for_result(rospy.Duration(1.0)) # 手を内向きにする + # Turn the hands inward # 右腕 + # Right arm r_target_pose = geometry_msgs.msg.Pose() r_target_pose.position.x = 0.18 r_target_pose.position.y = -0.25 r_target_pose.position.z = 0.08 + # Turn the hand inward q = quaternion_from_euler(3.14, 0.0, 0.0) # 手を内向きにする r_target_pose.orientation.x = q[0] r_target_pose.orientation.y = q[1] r_target_pose.orientation.z = q[2] r_target_pose.orientation.w = q[3] # 左腕 + # Left arm l_target_pose = geometry_msgs.msg.Pose() l_target_pose.position.x = 0.18 l_target_pose.position.y = 0.25 l_target_pose.position.z = 0.08 + # Turn the hand inward q = quaternion_from_euler(-3.14, 0.0, 0.0) # 手を内向きにする l_target_pose.orientation.x = q[0] l_target_pose.orientation.y = q[1] @@ -152,17 +179,22 @@ def main(): arm.go() # 実行 # ハンドを開く + # Open the hand # 右手を開く + # Open the right hand r_gripper_goal.command.position = 1.0 r_gripper.send_goal(r_gripper_goal) r_gripper.wait_for_result(rospy.Duration(1.0)) # 左手を開く + # Open the left hand l_gripper_goal.command.position = -1.0 l_gripper.send_goal(l_gripper_goal) l_gripper.wait_for_result(rospy.Duration(1.0)) # 掴みに行く準備 + # Prepare to grasp # 右腕 + # Right arm r_target_pose = geometry_msgs.msg.Pose() r_target_pose.position.x = 0.18 r_target_pose.position.y = -0.10 @@ -173,6 +205,7 @@ def main(): r_target_pose.orientation.z = q[2] r_target_pose.orientation.w = q[3] # 左腕 + # Left arm l_target_pose = geometry_msgs.msg.Pose() l_target_pose.position.x = 0.18 l_target_pose.position.y = 0.10 @@ -183,7 +216,9 @@ def main(): l_target_pose.orientation.z = q[2] l_target_pose.orientation.w = q[3] # 軌道制約の追加 + # Adds a path constraint # 右手を平行に寄せる + # Move the right hand towards the center r_eef_pose = arm.get_current_pose("r_link7").pose r_eef_oc.header.stamp = rospy.Time.now() r_eef_oc.header.frame_id = "/base_link" @@ -194,6 +229,7 @@ def main(): r_eef_oc.absolute_z_axis_tolerance = 0.7 r_eef_oc.weight = 1 # 左手を平行に寄せる + # Move the left hand towards the center l_eef_pose = arm.get_current_pose("l_link7").pose l_eef_oc.header.stamp = rospy.Time.now() l_eef_oc.header.frame_id = "/base_link" @@ -212,7 +248,9 @@ def main(): arm.go() # 実行 # 掴みに行く + # Grasp! # 右腕 + # Right arm r_target_pose = geometry_msgs.msg.Pose() r_target_pose.position.x = 0.18 r_target_pose.position.y = -0.08 @@ -223,6 +261,7 @@ def main(): r_target_pose.orientation.z = q[2] r_target_pose.orientation.w = q[3] # 左腕 + # Left arm l_target_pose = geometry_msgs.msg.Pose() l_target_pose.position.x = 0.18 l_target_pose.position.y = 0.08 @@ -238,17 +277,22 @@ def main(): arm.go() # 実行 # ハンドを閉じる + # Close the hand # 右手を閉じる + # Close the right hand r_gripper_goal.command.position = 0.4 r_gripper.send_goal(r_gripper_goal) r_gripper.wait_for_result(rospy.Duration(1.0)) # 左手を閉じる + # Close the left hand l_gripper_goal.command.position = -0.4 l_gripper.send_goal(l_gripper_goal) l_gripper.wait_for_result(rospy.Duration(1.0)) # 前へ押し出す + # Push forward # 右腕 + # Right arm r_target_pose = geometry_msgs.msg.Pose() r_target_pose.position.x = 0.35 r_target_pose.position.y = -0.09 @@ -259,6 +303,7 @@ def main(): r_target_pose.orientation.z = q[2] r_target_pose.orientation.w = q[3] # 左腕 + # Left arm l_target_pose = geometry_msgs.msg.Pose() l_target_pose.position.x = 0.35 l_target_pose.position.y = 0.09 @@ -274,37 +319,47 @@ def main(): arm.go() # 実行 # ハンドを開く + # Open the hand # 右手を開く + # Open the right hand r_gripper_goal.command.position = 1.5 r_gripper.send_goal(r_gripper_goal) r_gripper.wait_for_result(rospy.Duration(1.0)) # 左手を開く + # Open the left hand l_gripper_goal.command.position = -1.5 l_gripper.send_goal(l_gripper_goal) l_gripper.wait_for_result(rospy.Duration(1.0)) # 軌道制約の解除 + # Reset the path constraints arm.set_path_constraints(None) # 1秒待つ + # Wait one second rospy.sleep(1.0) # 離す + # Release # 右腕 + # Right arm r_target_pose = geometry_msgs.msg.Pose() r_target_pose.position.x = 0.28 r_target_pose.position.y = -0.20 r_target_pose.position.z = 0.12 + # Point the hand down q = quaternion_from_euler(3.14/2.0, 0.0, 0.0) # 手を下向きにして逃がす r_target_pose.orientation.x = q[0] r_target_pose.orientation.y = q[1] r_target_pose.orientation.z = q[2] r_target_pose.orientation.w = q[3] # 左腕 + # Left arm l_target_pose = geometry_msgs.msg.Pose() l_target_pose.position.x = 0.28 l_target_pose.position.y = 0.20 l_target_pose.position.z = 0.12 + # Point the hand down q = quaternion_from_euler(-3.14/2.0, 0.0, 0.0) # 手を下向きにして逃がす l_target_pose.orientation.x = q[0] l_target_pose.orientation.y = q[1] @@ -316,17 +371,22 @@ def main(): arm.go() # 実行 # ハンドを閉じる + # Close the hand # 右手を閉じる + # Close the right hand r_gripper_goal.command.position = 0.4 r_gripper.send_goal(r_gripper_goal) r_gripper.wait_for_result(rospy.Duration(1.0)) # 左手を閉じる + # Close the left hand l_gripper_goal.command.position = -0.4 l_gripper.send_goal(l_gripper_goal) l_gripper.wait_for_result(rospy.Duration(1.0)) # 腕を持ち上げてターゲットから離れる + # Raise the arm to move away from the target # 右腕 + # Right arm r_target_pose = geometry_msgs.msg.Pose() r_target_pose.position.x = 0.32 r_target_pose.position.y = -0.25 @@ -337,6 +397,7 @@ def main(): r_target_pose.orientation.z = q[2] r_target_pose.orientation.w = q[3] # 左腕 + # Left arm l_target_pose = geometry_msgs.msg.Pose() l_target_pose.position.x = 0.32 l_target_pose.position.y = 0.25 @@ -352,6 +413,7 @@ def main(): arm.go() # 実行 # SRDFに定義されている"home"の姿勢にする + # Moves to the pose declared as "home" in the SRDF arm.set_named_target("two_arm_init_pose") arm.go() diff --git a/sciurus17_examples/src/object_detection.cpp b/sciurus17_examples/src/object_detection.cpp index 3eb34f53..66b10a0c 100644 --- a/sciurus17_examples/src/object_detection.cpp +++ b/sciurus17_examples/src/object_detection.cpp @@ -39,6 +39,7 @@ void convert_to_marker(visualization_msgs::Marker *marker, const int marker_id, const pcl::PointXYZRGB &min_pt, const pcl::PointXYZRGB &max_pt) { // pcl::Pointの最大最小値をBox型のマーカに変換する + // Convert the max/min of the pcl::Point to a Box-type marker marker->header.frame_id = frame_id; marker->header.stamp = ros::Time(); @@ -88,6 +89,8 @@ void cloud_cb (const sensor_msgs::PointCloud2ConstPtr& cloud_msg) // point cloudの座標を変換 // base_linkとカメラ間のTFを取得する + // Converts the point cloud coordinates + // Recieves the TF between base_link and camera while(true){ try{ listener.lookupTransform(FRAME_ID, cloud_msg->header.frame_id, ros::Time(0), transform); @@ -101,10 +104,12 @@ void cloud_cb (const sensor_msgs::PointCloud2ConstPtr& cloud_msg) pcl_ros::transformPointCloud(FRAME_ID, transform, *cloud_msg, cloud_transformed); // sensor_msgs/PointCloud2からpcl/PointCloudに変換 + // Converts sensor_msgs/PointCloud2 to pcl/PointCloud pcl::PointCloud::Ptr cloud (new pcl::PointCloud()); pcl::fromROSMsg(cloud_transformed, *cloud); // Z方向の範囲でフィルタリング + // Filters in Z-axis pcl::PointCloud::Ptr cloud_passthrough (new pcl::PointCloud()); pcl::PassThrough pass; pass.setInputCloud (cloud); @@ -113,6 +118,7 @@ void cloud_cb (const sensor_msgs::PointCloud2ConstPtr& cloud_msg) pass.filter (*cloud_passthrough); // voxelgridでダウンサンプリング + // Down samples with voxelgrid pcl::PointCloud::Ptr cloud_voxelgrid (new pcl::PointCloud()); pcl::VoxelGrid voxelgrid; voxelgrid.setInputCloud (cloud_passthrough); @@ -120,11 +126,13 @@ void cloud_cb (const sensor_msgs::PointCloud2ConstPtr& cloud_msg) voxelgrid.filter (*cloud_voxelgrid); // pointがなければ処理を抜ける + // Exit the process if there's no point if(cloud_voxelgrid->size() <= 0){ return; } // クラスタ抽出のためKdTreeオブジェクトを作成 + // Creates KdTree object for cluster extraction pcl::search::KdTree::Ptr tree (new pcl::search::KdTree()); tree->setInputCloud (cloud_voxelgrid); @@ -142,12 +150,15 @@ void cloud_cb (const sensor_msgs::PointCloud2ConstPtr& cloud_msg) // クラスタごとにPointのRGB値を変更する // クラスタをもとにMarkerを生成する + // Changes the RGBs of the Point in each clusters + // Creates the Marker based on the cluster pcl::PointCloud::Ptr cloud_output (new pcl::PointCloud()); for (std::vector::const_iterator it = cluster_indices.begin (); it != cluster_indices.end (); ++it) { pcl::PointCloud::Ptr cloud_cluster (new pcl::PointCloud()); // クラスタ内のPointRGB値を変更 + // Changes the PointRGB within the cluster for (std::vector::const_iterator pit = it->indices.begin (); pit != it->indices.end (); ++pit){ cloud_voxelgrid->points[*pit].r = CLUSTER_COLOR[cluster_i][RED]; @@ -156,13 +167,16 @@ void cloud_cb (const sensor_msgs::PointCloud2ConstPtr& cloud_msg) cloud_cluster->points.push_back (cloud_voxelgrid->points[*pit]); //* } // Unorganized datasetsとしてwidth, heightを入力 + // Inserts the width and height as unorganized datasets cloud_cluster->width = cloud_cluster->points.size (); cloud_cluster->height = 1; // 無効なpointがないのでis_denseはtrue + // is_dense is true since there's no invalid points cloud_cluster->is_dense = true; *cloud_output += *cloud_cluster; // Markerの作成 + // Create the marker visualization_msgs::Marker marker; pcl::PointXYZRGB min_pt, max_pt; pcl::getMinMax3D(*cloud_cluster, min_pt, max_pt); @@ -176,6 +190,7 @@ void cloud_cb (const sensor_msgs::PointCloud2ConstPtr& cloud_msg) } // pcl/PointCloudからsensor_msgs/PointCloud2に変換 + // Converts the pcl/PointCloud to sensor_msgs/PointCloud2 sensor_msgs::PointCloud2 output; // pcl::toROSMsg(*cloud, output); // pcl::toROSMsg(*cloud_passthrough, output);