diff --git a/sciurus17_examples/scripts/box_stacking_example.py b/sciurus17_examples/scripts/box_stacking_example.py index ace61050..503220fc 100755 --- a/sciurus17_examples/scripts/box_stacking_example.py +++ b/sciurus17_examples/scripts/box_stacking_example.py @@ -51,7 +51,7 @@ def __init__(self): def set_angle(self, yaw_angle, pitch_angle, goal_secs=1.0e-9): # 首を指定角度に動かす - # Move the neck to the designated angle. + # Moves the neck to the designated angle. goal = FollowJointTrajectoryGoal() goal.trajectory.joint_names = ["neck_yaw_joint", "neck_pitch_joint"] @@ -94,7 +94,7 @@ def __init__(self): self._gripper_goal.command.max_effort = 2.0 # アームとグリッパー姿勢の初期化 - # Initialize arm and gripper pose + # Initializes arm and gripper pose self.initialize_arms() self._current_arm = None @@ -108,7 +108,7 @@ def get_num_of_markers(self): def _get_lowest_object_pose(self): # 一番高さが低いオブジェクトのPoseを返す - # Return the Pose of the object with the lowest height. + # Returns the Pose of the object with the lowest height. lowest_z = 1000 lowest_pose = Pose() @@ -124,7 +124,7 @@ def _get_lowest_object_pose(self): def _get_highest_object_pose(self): # 一番高さが高いオブジェクトのPoseを返す - # Return the Pose of the object with the highest height + # Returns the Pose of the object with the highest height highest_z = 0 highest_pose = Pose() @@ -145,7 +145,7 @@ def _get_highest_object_pose(self): def _move_arm(self, current_arm, target_pose): if current_arm == self._RIGHT_ARM: # 手先を下に向ける - # Pointing the hand tip downwards. + # Points the hand tip downwards. q = quaternion_from_euler(3.14/2.0, 0.0, 0.0) target_pose.orientation.x = q[0] target_pose.orientation.y = q[1] @@ -155,7 +155,7 @@ def _move_arm(self, current_arm, target_pose): return self._right_arm.go() elif current_arm == self._LEFT_ARM: # 手先を下に向ける - # Pointing the hand tip downwards. + # Points the hand tip downwards. q = quaternion_from_euler(-3.14/2.0, 0.0, 0.0) target_pose.orientation.x = q[0] target_pose.orientation.y = q[1] @@ -211,7 +211,7 @@ def initialize_arms(self): def pick_up(self, check_result): # 一番高さが低いオブジェクトを持ち上げる - # Lift the object with the lowest height. + # Lifts the object with the lowest height. rospy.loginfo("PICK UP") result = True # 制御対象を初期化 @@ -219,7 +219,7 @@ def pick_up(self, check_result): self._current_arm = None # オブジェクトがなければ終了 - # Exit if no object + # Exits if no object rospy.sleep(1.0) if self.get_num_of_markers() == 0: rospy.logwarn("NO OBJECTS") @@ -228,7 +228,7 @@ def pick_up(self, check_result): object_pose = self._get_lowest_object_pose() # オブジェクトの位置によって左右のどちらの手で取るかを判定する - # Determine whether the object should be taken with the left or right hand depending on its position. + # Determines whether the object should be taken with the left or right hand depending on its position. if object_pose.position.y < 0: self._current_arm = self._RIGHT_ARM rospy.loginfo("Set right arm") @@ -239,20 +239,20 @@ def pick_up(self, check_result): self._open_gripper(self._current_arm) # Z軸方向のオフセット meters - # Offset in Z-axis (meters) + # Offset in Z-axis (meters) APPROACH_OFFSET = 0.10 PREPARE_OFFSET = 0.06 LEAVE_OFFSET = 0.10 # 目標手先姿勢の生成 - # Generate target hand pose + # Generates target hand pose target_pose = Pose() target_pose.position.x = object_pose.position.x target_pose.position.y = object_pose.position.y target_pose.position.z = object_pose.position.z # 掴む準備をする - # Prepare to grasp + # Prepares to grasp target_pose.position.z = object_pose.position.z + APPROACH_OFFSET if self._move_arm(self._current_arm, target_pose) is False and check_result: rospy.logwarn("Approach failed") @@ -261,7 +261,7 @@ def pick_up(self, check_result): else: rospy.sleep(1.0) # ハンドを下げる - # Lower the hand + # Lowers the hand target_pose.position.z = object_pose.position.z + PREPARE_OFFSET if self._move_arm(self._current_arm, target_pose) is False and check_result: rospy.logwarn("Preparation grasping failed") @@ -270,14 +270,14 @@ def pick_up(self, check_result): else: rospy.sleep(1.0) # つかむ - # Grasp + # Grasps if self._close_gripper(self._current_arm) is False and check_result: rospy.logwarn("Grasping failed") result = False rospy.sleep(1.0) # ハンドを上げる - # Raise the hand + # Raises the hand target_pose.position.z = object_pose.position.z + LEAVE_OFFSET self._move_arm(self._current_arm, target_pose) @@ -285,12 +285,12 @@ def pick_up(self, check_result): if result is False: rospy.sleep(1.0) # 失敗したときは安全のため手を広げる - # Open the hand for safety if it fails + # Opens the hand for safety if it fails self._open_gripper(self._current_arm) rospy.sleep(1.0) # 初期位置に戻る - # Return to initial position + # Returns to initial position self._move_arm_to_init_pose(self._current_arm) return result @@ -298,12 +298,12 @@ def pick_up(self, check_result): def place_on(self, check_result, target_x=0.3, target_y=0): # 座標x,yにオブジェクトを配置する - # Place the object at coordinates x,y + # Places the object at coordinates x,y rospy.loginfo("PLACE on :" + str(target_x) + ", " + str(target_y)) result = True # 制御アームが設定されているかチェック - # Check if the control arm is set + # Checks if the control arm is set if self._current_arm is None: rospy.logwarn("NO ARM SELECTED") return False @@ -315,14 +315,14 @@ def place_on(self, check_result, target_x=0.3, target_y=0): LEAVE_OFFSET = 0.14 # 目標手先姿勢の生成 - # Generate target hand pose + # Generates target hand pose target_pose = Pose() target_pose.position.x = target_x target_pose.position.y = target_y target_pose.position.z = 0.0 # 置く準備をする - # Prepare to place + # Prepares to place target_pose.position.z = APPROACH_OFFSET if self._move_arm(self._current_arm, target_pose) is False and check_result: rospy.logwarn("Approach failed") @@ -330,7 +330,7 @@ def place_on(self, check_result, target_x=0.3, target_y=0): else: rospy.sleep(1.0) # ハンドを下げる - # Lower the hand + # Lowers the hand target_pose.position.z = PREPARE_OFFSET if self._move_arm(self._current_arm, target_pose) is False and check_result: rospy.logwarn("Preparation release failed") @@ -341,19 +341,19 @@ def place_on(self, check_result, target_x=0.3, target_y=0): # Release self._open_gripper(self._current_arm) # ハンドを上げる - # Raise the hand + # Raises the hand target_pose.position.z = LEAVE_OFFSET self._move_arm(self._current_arm, target_pose) if result is False: rospy.sleep(1.0) # 失敗したときは安全のため手を広げる - # Open the hand for safety if it fails to move the arm. + # Opens the hand for safety if it fails to move the arm. self._open_gripper(self._current_arm) rospy.sleep(1.0) # 初期位置に戻る - # Return to initial position + # Returns to initial position self._move_arm_to_init_pose(self._current_arm) return result @@ -361,18 +361,18 @@ def place_on(self, check_result, target_x=0.3, target_y=0): def place_on_highest_object(self, check_result): # 一番高さが高いオブジェクトの上に配置する - # Place on top of the highest object + # Places on top of the highest object rospy.loginfo("PLACE ON HIGHEST OBJECT") result = True # 制御アームが設定されているかチェック - # Check if the control arm is set + # Checks if the control arm is set if self._current_arm is None: rospy.logwarn("NO ARM SELECTED") return False # オブジェクトが他になければデフォルト位置に置く - # Place the object in the default position if no other object exists + # Places the object in the default position if no other object exists rospy.sleep(1.0) if self.get_num_of_markers() == 0: rospy.logwarn("NO OBJECTS") @@ -387,14 +387,14 @@ def place_on_highest_object(self, check_result): LEAVE_OFFSET = 0.15 # 目標手先姿勢の生成 - # Generate target hand pose + # Generates target hand pose target_pose = Pose() target_pose.position.x = object_pose.position.x target_pose.position.y = object_pose.position.y target_pose.position.z = object_pose.position.z # 置く準備をする - # Prepare to place + # Prepares to place target_pose.position.z = object_pose.position.z + APPROACH_OFFSET if self._move_arm(self._current_arm, target_pose) is False and check_result: rospy.logwarn("Approach failed") @@ -402,7 +402,7 @@ def place_on_highest_object(self, check_result): else: rospy.sleep(1.0) # ハンドを下げる - # Lower the hand + # Lowers the hand target_pose.position.z = object_pose.position.z + PREPARE_OFFSET if self._move_arm(self._current_arm, target_pose) is False and check_result: rospy.logwarn("Preparation release failed") @@ -413,19 +413,19 @@ def place_on_highest_object(self, check_result): # Release self._open_gripper(self._current_arm) # ハンドを上げる - # Raise the hand + # Raises the hand target_pose.position.z = object_pose.position.z + LEAVE_OFFSET self._move_arm(self._current_arm, target_pose) if result is False: rospy.sleep(1.0) # 失敗したときは安全のため手を広げる - # Open the hand for safety if it fails to move the arm. + # Opens the hand for safety if it fails to move the arm. self._open_gripper(self._current_arm) rospy.sleep(1.0) # 初期位置に戻る - # Return to initial position + # Returns to initial position self._move_arm_to_init_pose(self._current_arm) return result @@ -433,10 +433,10 @@ def place_on_highest_object(self, check_result): def hook_shutdown(): # 首の角度を戻す - # Move the neck to 0 degrees + # Moves the neck to 0 degrees neck.set_angle(math.radians(0), math.radians(0), 3.0) # 両腕を初期位置に戻す - # Move both arms to initial position + # Moves both arms to initial position stacker.initialize_arms() @@ -446,11 +446,11 @@ def main(): rospy.on_shutdown(hook_shutdown) # 首の初期角度 degree - # Initial angle of neck. degree + # Initial angle of neck in degree INITIAL_YAW_ANGLE = 0 INITIAL_PITCH_ANGLE = -70 # 首を下に傾けてテーブル上のものを見る - # Tilt neck down to see objects on the table + # Tilts neck down to see objects on the table neck.set_angle(math.radians(INITIAL_YAW_ANGLE), math.radians(INITIAL_PITCH_ANGLE)) PICKING_MODE = 0 @@ -458,19 +458,19 @@ def main(): current_mode = PICKING_MODE # 箱の配置位置 meter - # Box placement position. meter + # Box placement position in meter PLACE_X = 0.3 PLACE_Y = 0.0 # アームとグリッパが正しく動いたかチェックする - # Check if arm and gripper moved correctly + # Checks if arm and gripper moved correctly CHECK_RESULT = True while not rospy.is_shutdown(): if current_mode == PICKING_MODE: if stacker.get_num_of_markers() > 0: # マーカがあれば、1番高さが低いオブジェクトを掴み上げる - # If there is a marker, pick up the object with the lowest height. + # If there is a marker, picks up the object with the lowest height. if stacker.pick_up(CHECK_RESULT) is False: rospy.logwarn("PickUp Failed") else: @@ -483,7 +483,7 @@ def main(): elif current_mode == PLACE_MODE: if stacker.get_num_of_markers() == 0: # マーカがなければ、指定位置に配置 - # If there is no marker, place it at the specified position + # If there is no marker, places it at the specified position if stacker.place_on(CHECK_RESULT, PLACE_X, PLACE_Y) is False: rospy.logwarn("Place Failed") else: @@ -491,7 +491,7 @@ def main(): else: # マーカがあれば、一番高い位置にあるオブジェクトの上に配置 - # If there is a marker, place it on top of the highest object + # If there is a marker, places it on top of the highest object if stacker.place_on_highest_object(CHECK_RESULT) is False: rospy.logwarn("Place Failed") else: diff --git a/sciurus17_examples/scripts/chest_camera_tracking.py b/sciurus17_examples/scripts/chest_camera_tracking.py index 52570290..16bf7f3e 100755 --- a/sciurus17_examples/scripts/chest_camera_tracking.py +++ b/sciurus17_examples/scripts/chest_camera_tracking.py @@ -52,12 +52,14 @@ def _image_callback(self, ros_image): input_image = self._bridge.imgmsg_to_cv2(ros_image, "bgr8") except CvBridgeError as e: rospy.logerr(e) - + # 画像のwidth, heightを取得 + # Obtains the width and height of the image self._image_shape.x = input_image.shape[1] self._image_shape.y = input_image.shape[0] # 特定色のオブジェクトを検出 + # Detects an object with a certain color output_image = self._detect_orange_object(input_image) # output_image = self._detect_blue_object(input_image) @@ -67,6 +69,8 @@ def _image_callback(self, ros_image): def get_object_position(self): # 画像中心を0, 0とした座標系におけるオブジェクトの座標を出力 # オブジェクトの座標は-1.0 ~ 1.0に正規化される + # Returns the object coordinates where the image center is 0, 0 + # The coordinate is normalized into -1.0 to 1.0 object_center = Point( self._object_rect[0] + self._object_rect[2] * 0.5, @@ -74,11 +78,13 @@ def get_object_position(self): 0) # 画像の中心を0, 0とした座標系に変換 + # Converts the coordinate where the image center is 0, 0 translated_point = Point() translated_point.x = object_center.x - self._image_shape.x * 0.5 translated_point.y = -(object_center.y - self._image_shape.y * 0.5) # 正規化 + # Normalize normalized_point = Point() if self._image_shape.x != 0 and self._image_shape.y != 0: normalized_point.x = translated_point.x / (self._image_shape.x * 0.5) @@ -93,22 +99,27 @@ def object_detected(self): def _detect_color_object(self, bgr_image, lower_color, upper_color): # 画像から指定された色の物体を検出する + # Detects the specified colored object from the image MIN_OBJECT_SIZE = 7000 # px * px # BGR画像をHSV色空間に変換 + # Converts the BGR image to HSV model hsv = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2HSV) # 色を抽出するマスクを生成 + # Creates a mask to extract the color mask = cv2.inRange(hsv, lower_color, upper_color) # マスクから輪郭を抽出 + # Extracts the contours with the mask if self._CV_MAJOR_VERSION == '4': contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) else: _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # 輪郭を長方形に変換し、配列に格納 + # Converts the contour to a rectangle and store it in a vector rects = [] for contour in contours: approx = cv2.convexHull(contour) @@ -118,11 +129,14 @@ def _detect_color_object(self, bgr_image, lower_color, upper_color): self._object_detected = False if len(rects) > 0: # 最も大きい長方形を抽出 + # Extracts the largest rectangle rect = max(rects, key=(lambda x: x[2] * x[3])) # 長方形が小さければ検出判定にしない + # Updates the detection result to false if the rectangle is small if rect[2] * rect[3] > MIN_OBJECT_SIZE: # 抽出した長方形を画像に描画する + # Draw the rectangle on the image cv2.rectangle(bgr_image, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), @@ -172,6 +186,7 @@ def __init__(self): def _state_callback(self, state): # 腰の現在角度を取得 + # Returns the current angle of the waist self._state_received = True yaw_radian = state.actual.positions[0] @@ -188,6 +203,7 @@ def get_current_yaw(self): def set_angle(self, yaw_angle, goal_secs=1.0e-9): # 腰を指定角度に動かす + # Moves the waist to the specified angle goal = FollowJointTrajectoryGoal() goal.trajectory.joint_names = ["waist_yaw_joint"] @@ -203,6 +219,7 @@ def set_angle(self, yaw_angle, goal_secs=1.0e-9): def hook_shutdown(): # shutdown時に0度へ戻る + # Sets the waist angle to 0 deg when shutting down waist_yaw.set_angle(math.radians(0), 3.0) @@ -213,24 +230,33 @@ def main(): # オブジェクト追跡のしきい値 # 正規化された座標系(px, px) + # The threshold of the object tracking + # Normalized coordinates is (px, px) THRESH_X = 0.05 # 腰の初期角度 Degree + # The inital angle of the waist in degrees INITIAL_YAW_ANGLE = 0 # 腰の制御角度リミット値 Degree + # The waist angle limit (max/min) in degrees MAX_YAW_ANGLE = 120 MIN_YAW_ANGLE = -120 # 腰の制御量 # 値が大きいほど腰を大きく動かす + # The control amount of the waist + # The waist moves more if the gain is bigger OPERATION_GAIN_X = 5.0 # 初期角度に戻る時の制御角度 Degree + # The degree when returning to the initial pose RESET_OPERATION_ANGLE = 1 # 現在の腰角度を取得する # ここで現在の腰角度を取得することで、ゆっくり初期角度へ戻る + # Recieves the current waist angle + # By recieving the waist angle here, it moves to the initial pose slowly while not waist_yaw.state_received(): pass yaw_angle = waist_yaw.get_current_yaw() @@ -240,6 +266,7 @@ def main(): while not rospy.is_shutdown(): # 正規化されたオブジェクトの座標を取得 + # Recieves the normalized object coordinates object_position = object_tracker.get_object_position() if object_tracker.object_detected(): @@ -248,15 +275,19 @@ def main(): else: lost_time = rospy.Time.now() - detection_timestamp # 一定時間オブジェクトが見つからない場合は初期角度に戻る + # If it doesn't detect any object for a certain amount of time, + # it will return to the initial angle if lost_time.to_sec() > 1.0: look_object = False if look_object: # オブジェクトが画像中心にくるように首を動かす + # Moves the neck so that the object comes to the middle of the image if math.fabs(object_position.x) > THRESH_X: yaw_angle += -object_position.x * OPERATION_GAIN_X # 腰の角度を制限する + # Limits the waist angles if yaw_angle > MAX_YAW_ANGLE: yaw_angle = MAX_YAW_ANGLE if yaw_angle < MIN_YAW_ANGLE: @@ -264,6 +295,7 @@ def main(): else: # ゆっくり初期角度へ戻る + # Returns to the initial angle slowly diff_yaw_angle = yaw_angle - INITIAL_YAW_ANGLE if math.fabs(diff_yaw_angle) > RESET_OPERATION_ANGLE: yaw_angle -= math.copysign(RESET_OPERATION_ANGLE, diff_yaw_angle) diff --git a/sciurus17_examples/scripts/control_effort_wrist.py b/sciurus17_examples/scripts/control_effort_wrist.py index 58e13baa..c79f2230 100755 --- a/sciurus17_examples/scripts/control_effort_wrist.py +++ b/sciurus17_examples/scripts/control_effort_wrist.py @@ -59,7 +59,7 @@ def joint_state_callback(msg): def main(): global joint_state - # Set P and D gain to avoid the wrist oscillation. + # Sets P and D gain to avoid the wrist oscillation. pid_controller = PIDController(0.5, 0.0, 3.0) r = rospy.Rate(60) @@ -71,7 +71,7 @@ def main(): wrist_angle = joint_state.position[6] - # Switch the target angle every 5 seconds. + # Switches the target angle every 5 seconds. present_time = rospy.Time.now().secs if present_time - start_time > 5: start_time = present_time diff --git a/sciurus17_examples/scripts/depth_camera_tracking.py b/sciurus17_examples/scripts/depth_camera_tracking.py index 90a6383d..a12e1ed8 100755 --- a/sciurus17_examples/scripts/depth_camera_tracking.py +++ b/sciurus17_examples/scripts/depth_camera_tracking.py @@ -41,9 +41,12 @@ def __init__(self): self._bridge = CvBridge() # 頭カメラのカラー画像 + # The color image from the head camera self._image_sub = rospy.Subscriber("/sciurus17/camera/color/image_raw", Image, self._image_callback, queue_size=1) # 頭カメラの深度画像 # カラー画像と視界が一致するように補正されている + # The depth image from the head camera + # It is aligned to the color image to match the view self._depth_sub = rospy.Subscriber("/sciurus17/camera/aligned_depth_to_color/image_raw", Image, self._depth_callback, queue_size=1) self._image_pub = rospy.Publisher("~output_image", Image, queue_size=1) @@ -77,6 +80,7 @@ def _depth_callback(self, ros_image): return # 画像のwidth, heightを取得 + # Obtains the width and height of the image self._image_shape.x = input_image.shape[1] self._image_shape.y = input_image.shape[0] @@ -90,6 +94,8 @@ def _depth_callback(self, ros_image): def get_object_position(self): # 画像中心を0, 0とした座標系におけるオブジェクトの座標を出力 # オブジェクトの座標は-1.0 ~ 1.0に正規化される + # Returns the object coordinates where the image center is 0, 0 + # The coordinate is normalized into -1.0 to 1.0 object_center = Point( self._object_rect[0] + self._object_rect[2] * 0.5, @@ -97,11 +103,13 @@ def get_object_position(self): 0) # 画像の中心を0, 0とした座標系に変換 + # Converts the coordinate where the image center is 0, 0 translated_point = Point() translated_point.x = object_center.x - self._image_shape.x * 0.5 translated_point.y = -(object_center.y - self._image_shape.y * 0.5) # 正規化 + # Normalize normalized_point = Point() if self._image_shape.x != 0 and self._image_shape.y != 0: normalized_point.x = translated_point.x / (self._image_shape.x * 0.5) @@ -116,17 +124,21 @@ def object_detected(self): def _detect_object(self, input_depth_image): # 検出するオブジェクトの大きさを制限する + # Limits the size of the object to be detected MIN_OBJECT_SIZE = 10000 # px * px MAX_OBJECT_SIZE = 80000 # px * px # 検出範囲を4段階設ける # 単位はmm + # Sets 4 layers of detection range + # Unit is in mm DETECTION_DEPTH = [ (500, 700), (600, 800), (700, 900), (800, 1000)] # 検出したオブジェクトを囲う長方形の色 + # The color of the rectangle around the detected object RECT_COLOR = [ (0, 0, 255), (0, 255, 0), @@ -135,6 +147,7 @@ def _detect_object(self, input_depth_image): # カメラのカラー画像を加工して出力する # カラー画像を受け取ってない場合はFalseを返す + # Outputs an edited color image output_image = self._color_image if output_image is None: return False @@ -143,9 +156,11 @@ def _detect_object(self, input_depth_image): self._median_depth = 0 for i, depth in enumerate(DETECTION_DEPTH): # depth[0] ~ depth[1]の範囲でオブジェクトを抽出する + # Extracts an object between depth[0] and depth[1] mask = cv2.inRange(input_depth_image, depth[0], depth[1]) # マスクから輪郭を抽出 + # Extracts the contours with the mask if self._CV_MAJOR_VERSION == '4': contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) else: @@ -160,13 +175,16 @@ def _detect_object(self, input_depth_image): if len(rects) > 0: # 最も大きい長方形を抽出 + # Extracts the largest rectangle rect = max(rects, key=(lambda x: x[2] * x[3])) rect_size = rect[2] * rect[3] # 長方形の大きさが指定範囲内であるかチェック + # Check if the size of the rectangle is within the limit if rect_size > MIN_OBJECT_SIZE and rect_size < MAX_OBJECT_SIZE: # 抽出した長方形を画像に描画する + # Draws the rectangle on the image cv2.rectangle(output_image, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), @@ -176,13 +194,16 @@ def _detect_object(self, input_depth_image): self._object_detected = True # 深度画像から長方形の領域を切り取る + # Clips the range of the rectangle from the depth image object_depth = input_depth_image[ rect[1]:rect[1]+rect[3], rect[0]:rect[0]+rect[2]] # 深度の中央値を求める + # Calculates the median of the depth self._median_depth = int(np.median(object_depth)) # 中央値のテキストを出力画像に描画する + # Writes the median on the output image cv2.putText(output_image, str(self._median_depth), (rect[0], rect[1]+30), cv2.FONT_HERSHEY_SIMPLEX, @@ -212,6 +233,7 @@ def __init__(self): def _state_callback(self, state): # 首の現在角度を取得 + # Returns the current neck angle self._state_received = True yaw_radian = state.actual.positions[0] @@ -235,6 +257,7 @@ def get_current_pitch(self): def set_angle(self, yaw_angle, pitch_angle, goal_secs=1.0e-9): # 首を指定角度に動かす + # Moves the neck to the specified angle goal = FollowJointTrajectoryGoal() goal.trajectory.joint_names = ["neck_yaw_joint", "neck_pitch_joint"] @@ -251,6 +274,7 @@ def set_angle(self, yaw_angle, pitch_angle, goal_secs=1.0e-9): def hook_shutdown(): # shutdown時に0度へ戻る + # Sets the neck angle to 0 deg when shutting down neck.set_angle(math.radians(0), math.radians(0), 3.0) @@ -261,14 +285,18 @@ def main(): # オブジェクト追跡のしきい値 # 正規化された座標系(px, px) + # The threshold of the object tracking + # Normalized coordinates is (px, px) THRESH_X = 0.05 THRESH_Y = 0.05 # 首の初期角度 Degree + # The inital angle of the neck in degrees INITIAL_YAW_ANGLE = 0 INITIAL_PITCH_ANGLE = 0 # 首の制御角度リミット値 Degree + # The neck angle limit (max/min) in degrees MAX_YAW_ANGLE = 120 MIN_YAW_ANGLE = -120 MAX_PITCH_ANGLE = 50 @@ -276,14 +304,19 @@ def main(): # 首の制御量 # 値が大きいほど首を大きく動かす + # The control amount of the neck + # The neck moves more if the gain is bigger OPERATION_GAIN_X = 5.0 OPERATION_GAIN_Y = 5.0 # 初期角度に戻る時の制御角度 Degree + # The degree when returning to the initial pose RESET_OPERATION_ANGLE = 3 # 現在の首角度を取得する # ここで現在の首角度を取得することで、ゆっくり初期角度へ戻る + # Recieves the current neck angle + # By recieving the neck angle here, it moves to the initial pose slowly while not neck.state_received(): pass yaw_angle = neck.get_current_yaw() @@ -294,6 +327,7 @@ def main(): while not rospy.is_shutdown(): # 正規化されたオブジェクトの座標を取得 + # Recieves the normalized object coordinates object_position = object_tracker.get_object_position() if object_tracker.object_detected(): @@ -302,11 +336,14 @@ def main(): else: lost_time = rospy.Time.now() - detection_timestamp # 一定時間オブジェクトが見つからない場合は初期角度に戻る + # If it doesn't detect any object for a certain amount of time, + # it will return to the initial angle if lost_time.to_sec() > 1.0: look_object = False if look_object: # オブジェクトが画像中心にくるように首を動かす + # Moves the neck so that the object comes to the middle of the image if math.fabs(object_position.x) > THRESH_X: yaw_angle += -object_position.x * OPERATION_GAIN_X @@ -314,6 +351,7 @@ def main(): pitch_angle += object_position.y * OPERATION_GAIN_Y # 首の制御角度を制限する + # Limits the neck angles if yaw_angle > MAX_YAW_ANGLE: yaw_angle = MAX_YAW_ANGLE if yaw_angle < MIN_YAW_ANGLE: @@ -326,6 +364,7 @@ def main(): else: # ゆっくり初期角度へ戻る + # Returns to the initial angle slowly diff_yaw_angle = yaw_angle - INITIAL_YAW_ANGLE if math.fabs(diff_yaw_angle) > RESET_OPERATION_ANGLE: yaw_angle -= math.copysign(RESET_OPERATION_ANGLE, diff_yaw_angle) diff --git a/sciurus17_examples/scripts/gripper_action_example.py b/sciurus17_examples/scripts/gripper_action_example.py index 9c000109..05e2465b 100755 --- a/sciurus17_examples/scripts/gripper_action_example.py +++ b/sciurus17_examples/scripts/gripper_action_example.py @@ -37,7 +37,7 @@ def __init__(self): self._goalR = GripperCommandGoal() self._goalL = GripperCommandGoal() - # Wait 10 Seconds for the gripper action server to start or exit + # Waits 10 Seconds for the gripper action server to start or exit self._clientR.wait_for_server(rospy.Duration(5.0)) if not self._clientR.wait_for_server(rospy.Duration(5.0)): rospy.logerr("Exiting - Gripper R Action Server Not Found") @@ -45,7 +45,7 @@ def __init__(self): sys.exit(1) self.clear() - # Wait 10 Seconds for the gripper action server to start or exit + # Waits 10 Seconds for the gripper action server to start or exit self._clientL.wait_for_server(rospy.Duration(5.0)) if not self._clientL.wait_for_server(rospy.Duration(5.0)): rospy.logerr("Exiting - Gripper L Action Server Not Found") diff --git a/sciurus17_examples/scripts/head_camera_tracking.py b/sciurus17_examples/scripts/head_camera_tracking.py index 93f5a314..bc49f42f 100755 --- a/sciurus17_examples/scripts/head_camera_tracking.py +++ b/sciurus17_examples/scripts/head_camera_tracking.py @@ -47,7 +47,8 @@ def __init__(self): self._CV_MAJOR_VERSION, _, _ = cv2.__version__.split('.') # カスケードファイルの読み込み - # 例 + # Reads the cascade file + # 例 | Examples # self._face_cascade = cv2.CascadeClassifier("/home/USER_NAME/.local/lib/python2.7/site-packages/cv2/data/haarcascade_frontalface_alt2.xml") # self._eyes_cascade = cv2.CascadeClassifier("/home/USER_NAME/.local/lib/python2.7/site-packages/cv2/data/haarcascade_eye.xml") self._face_cascade = "" @@ -61,10 +62,12 @@ def _image_callback(self, ros_image): rospy.logerr(e) # 画像のwidth, heightを取得 + # Obtains the width and height of the image self._image_shape.x = input_image.shape[1] self._image_shape.y = input_image.shape[0] # オブジェクト(特定色 or 顔) の検出 + # Detects the object (a specific color or a face) output_image = self._detect_orange_object(input_image) # output_image = self._detect_blue_object(input_image) # output_image = self._detect_face(input_image) @@ -75,6 +78,8 @@ def _image_callback(self, ros_image): def get_object_position(self): # 画像中心を0, 0とした座標系におけるオブジェクトの座標を出力 # オブジェクトの座標は-1.0 ~ 1.0に正規化される + # Returns the object coordinates where the image center is 0, 0 + # The coordinate is normalized into -1.0 to 1.0 object_center = Point( self._object_rect[0] + self._object_rect[2] * 0.5, @@ -82,11 +87,13 @@ def get_object_position(self): 0) # 画像の中心を0, 0とした座標系に変換 + # Converts the coordinate where the image center is 0, 0 translated_point = Point() translated_point.x = object_center.x - self._image_shape.x * 0.5 translated_point.y = -(object_center.y - self._image_shape.y * 0.5) # 正規化 + # Normalize normalized_point = Point() if self._image_shape.x != 0 and self._image_shape.y != 0: normalized_point.x = translated_point.x / (self._image_shape.x * 0.5) @@ -101,22 +108,27 @@ def object_detected(self): def _detect_color_object(self, bgr_image, lower_color, upper_color): # 画像から指定された色の物体を検出する + # Detects an object with the specified color from the image MIN_OBJECT_SIZE = 1000 # px * px # BGR画像をHSV色空間に変換 + # Converts the BGR image to HSV color space hsv = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2HSV) # 色を抽出するマスクを生成 + # Creates a mask to extract the color mask = cv2.inRange(hsv, lower_color, upper_color) # マスクから輪郭を抽出 + # Extracts the contours with the mask if self._CV_MAJOR_VERSION == '4': contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) else: _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # 輪郭を長方形に変換し、配列に格納 + # Converts the contour to a rectangle and store it in a vector rects = [] for contour in contours: approx = cv2.convexHull(contour) @@ -126,11 +138,14 @@ def _detect_color_object(self, bgr_image, lower_color, upper_color): self._object_detected = False if len(rects) > 0: # 最も大きい長方形を抽出 + # Extracts the largest rectangle rect = max(rects, key=(lambda x: x[2] * x[3])) # 長方形が小さければ検出判定にしない + # Updates the detection result to false if the rectangle is small if rect[2] * rect[3] > MIN_OBJECT_SIZE: # 抽出した長方形を画像に描画する + # Draw the rectangle on the image cv2.rectangle(bgr_image, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), @@ -164,38 +179,47 @@ def _detect_blue_object(self, bgr_image): def _detect_face(self, bgr_image): # 画像から顔(正面)を検出する + # Detects a face (front) from the image SCALE = 4 # カスケードファイルがセットされているかを確認 + # Checks if a cascade file is set if self._face_cascade == "" or self._eyes_cascade == "": rospy.logerr("cascade file does not set") return bgr_image # BGR画像をグレー画像に変換 + # Converts the BGR image to a gray scaled image gray = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY) # 処理時間短縮のため画像を縮小 + # Compresses the image to shorten the process height, width = gray.shape[:2] small_gray = cv2.resize(gray, (width/SCALE, height/SCALE)) # カスケードファイルを使って顔認識 + # Uses the cascade file for face detection small_faces = self._face_cascade.detectMultiScale(small_gray) self._object_detected = False for small_face in small_faces: # 顔の領域を元のサイズに戻す + # Resizes the face area to the original size face = small_face*SCALE # グレー画像から顔部分を抽出 + # Finds the face area from the gray scaled image roi_gray = gray[ face[1]:face[1]+face[3], face[0]:face[0]+face[2]] # 顔の中から目を検知 + # Detects the eyes from the face eyes = self._eyes_cascade.detectMultiScale(roi_gray) # 目を検出したら、対象のrect(座標と大きさ)を記録する + # If the eyes are detected, records the size and coordinates if len(eyes) > 0: cv2.rectangle(bgr_image, (face[0],face[1]), @@ -229,6 +253,7 @@ def __init__(self): def _state_callback(self, state): # 首の現在角度を取得 + # Returns the current angle of the neck self._state_received = True yaw_radian = state.actual.positions[0] @@ -252,6 +277,7 @@ def get_current_pitch(self): def set_angle(self, yaw_angle, pitch_angle, goal_secs=1.0e-9): # 首を指定角度に動かす + # Moves the neck to the specified angle goal = FollowJointTrajectoryGoal() goal.trajectory.joint_names = ["neck_yaw_joint", "neck_pitch_joint"] @@ -268,6 +294,7 @@ def set_angle(self, yaw_angle, pitch_angle, goal_secs=1.0e-9): def hook_shutdown(): # shutdown時に0度へ戻る + # Sets the neck angle to 0 deg when shutting down neck.set_angle(math.radians(0), math.radians(0), 3.0) @@ -278,14 +305,18 @@ def main(): # オブジェクト追跡のしきい値 # 正規化された座標系(px, px) + # The threshold of the object tracking + # Normalized coordinates is (px, px) THRESH_X = 0.05 THRESH_Y = 0.05 # 首の初期角度 Degree + # The inital angle of the neck in degrees INITIAL_YAW_ANGLE = 0 INITIAL_PITCH_ANGLE = 0 # 首の制御角度リミット値 Degree + # The neck angle limit (max/min) in degrees MAX_YAW_ANGLE = 120 MIN_YAW_ANGLE = -120 MAX_PITCH_ANGLE = 50 @@ -293,14 +324,19 @@ def main(): # 首の制御量 # 値が大きいほど首を大きく動かす + # The control amount of the neck + # The neck moves more if the gain is bigger OPERATION_GAIN_X = 5.0 OPERATION_GAIN_Y = 5.0 # 初期角度に戻る時の制御角度 Degree + # The degree when returning to the initial pose RESET_OPERATION_ANGLE = 3 # 現在の首角度を取得する # ここで現在の首角度を取得することで、ゆっくり初期角度へ戻る + # Recieves the current neck angle + # By recieving the neck angle here, it moves to the initial pose slowly while not neck.state_received(): pass yaw_angle = neck.get_current_yaw() @@ -311,6 +347,7 @@ def main(): while not rospy.is_shutdown(): # 正規化されたオブジェクトの座標を取得 + # Recieves the normalized object coordinates object_position = object_tracker.get_object_position() if object_tracker.object_detected(): @@ -319,11 +356,14 @@ def main(): else: lost_time = rospy.Time.now() - detection_timestamp # 一定時間オブジェクトが見つからない場合は初期角度に戻る + # If it doesn't detect any object for a certain amount of time, + # it will return to the initial angle if lost_time.to_sec() > 1.0: look_object = False if look_object: # オブジェクトが画像中心にくるように首を動かす + # Moves the neck so that the object comes to the middle of the image if math.fabs(object_position.x) > THRESH_X: yaw_angle += -object_position.x * OPERATION_GAIN_X @@ -331,6 +371,7 @@ def main(): pitch_angle += object_position.y * OPERATION_GAIN_Y # 首の制御角度を制限する + # Limits the neck angles if yaw_angle > MAX_YAW_ANGLE: yaw_angle = MAX_YAW_ANGLE if yaw_angle < MIN_YAW_ANGLE: @@ -343,6 +384,7 @@ def main(): else: # ゆっくり初期角度へ戻る + # Returns to the initial angle slowly diff_yaw_angle = yaw_angle - INITIAL_YAW_ANGLE if math.fabs(diff_yaw_angle) > RESET_OPERATION_ANGLE: yaw_angle -= math.copysign(RESET_OPERATION_ANGLE, diff_yaw_angle) diff --git a/sciurus17_examples/scripts/pick_and_place_left_arm_demo.py b/sciurus17_examples/scripts/pick_and_place_left_arm_demo.py index 9a42bb5b..d992d4f9 100755 --- a/sciurus17_examples/scripts/pick_and_place_left_arm_demo.py +++ b/sciurus17_examples/scripts/pick_and_place_left_arm_demo.py @@ -43,16 +43,19 @@ def main(): print(robot.get_current_state()) # アーム初期ポーズを表示 + # Displays the arm's initial pose arm_initial_pose = arm.get_current_pose().pose print("Arm initial pose:") print(arm_initial_pose) # 何かを掴んでいた時のためにハンドを開く + # Opens the hand in case it's holding on to something gripper_goal.command.position = -0.9 gripper.send_goal(gripper_goal) gripper.wait_for_result(rospy.Duration(1.0)) # SRDFに定義されている"home"の姿勢にする + # Moves to the pose declared as "home" in the SRDF arm.set_named_target("l_arm_init_pose") arm.go() gripper_goal.command.position = 0.0 @@ -60,10 +63,12 @@ def main(): gripper.wait_for_result(rospy.Duration(1.0)) # 掴む準備をする + # Prepares for grasping target_pose = geometry_msgs.msg.Pose() target_pose.position.x = 0.25 target_pose.position.y = 0.0 target_pose.position.z = 0.3 + # When grasping from above q = quaternion_from_euler(-3.14/2.0, 0.0, 0.0) # 上方から掴みに行く場合 target_pose.orientation.x = q[0] target_pose.orientation.y = q[1] @@ -73,15 +78,18 @@ def main(): arm.go() # 実行 # ハンドを開く + # Opens the hand gripper_goal.command.position = -0.7 gripper.send_goal(gripper_goal) gripper.wait_for_result(rospy.Duration(1.0)) # 掴みに行く + # Grasp! target_pose = geometry_msgs.msg.Pose() target_pose.position.x = 0.25 target_pose.position.y = 0.0 target_pose.position.z = 0.13 + # When grasping from above q = quaternion_from_euler(-3.14/2.0, 0.0, 0.0) # 上方から掴みに行く場合 target_pose.orientation.x = q[0] target_pose.orientation.y = q[1] @@ -91,15 +99,18 @@ def main(): arm.go() # 実行 # ハンドを閉じる + # Closes the hand gripper_goal.command.position = -0.4 gripper.send_goal(gripper_goal) gripper.wait_for_result(rospy.Duration(1.0)) # 持ち上げる + # Pick up target_pose = geometry_msgs.msg.Pose() target_pose.position.x = 0.25 target_pose.position.y = 0.0 target_pose.position.z = 0.3 + # When grasping from above q = quaternion_from_euler(-3.14/2.0, 0.0, 0.0) # 上方から掴みに行く場合 target_pose.orientation.x = q[0] target_pose.orientation.y = q[1] @@ -109,10 +120,12 @@ def main(): arm.go() # 実行 # 移動する + # Moves to a different position target_pose = geometry_msgs.msg.Pose() target_pose.position.x = 0.4 target_pose.position.y = 0.0 target_pose.position.z = 0.3 + # When grasping from above q = quaternion_from_euler(-3.14/2.0, 0.0, 0.0) # 上方から掴みに行く場合 target_pose.orientation.x = q[0] target_pose.orientation.y = q[1] @@ -122,10 +135,12 @@ def main(): arm.go() # 実行 # 下ろす + # Lowers the arm target_pose = geometry_msgs.msg.Pose() target_pose.position.x = 0.4 target_pose.position.y = 0.0 target_pose.position.z = 0.13 + # When grasping from above q = quaternion_from_euler(-3.14/2.0, 0.0, 0.0) # 上方から掴みに行く場合 target_pose.orientation.x = q[0] target_pose.orientation.y = q[1] @@ -135,15 +150,18 @@ def main(): arm.go() # 実行 # ハンドを開く + # Opens the hand gripper_goal.command.position = -0.7 gripper.send_goal(gripper_goal) gripper.wait_for_result(rospy.Duration(1.0)) # 少しだけハンドを持ち上げる + # Raises the hand a little target_pose = geometry_msgs.msg.Pose() target_pose.position.x = 0.4 target_pose.position.y = 0.0 target_pose.position.z = 0.2 + # When grasping from above q = quaternion_from_euler(-3.14/2.0, 0.0, 0.0) # 上方から掴みに行く場合 target_pose.orientation.x = q[0] target_pose.orientation.y = q[1] @@ -153,6 +171,7 @@ def main(): arm.go() # 実行 # SRDFに定義されている"home"の姿勢にする + # Moves to the pose declared as "home" in the SRDF arm.set_named_target("l_arm_init_pose") arm.go() diff --git a/sciurus17_examples/scripts/pick_and_place_right_arm_demo.py b/sciurus17_examples/scripts/pick_and_place_right_arm_demo.py index 6a0d4a05..30d29bac 100755 --- a/sciurus17_examples/scripts/pick_and_place_right_arm_demo.py +++ b/sciurus17_examples/scripts/pick_and_place_right_arm_demo.py @@ -43,16 +43,19 @@ def main(): print(robot.get_current_state()) # アーム初期ポーズを表示 + # Displays the arm's initial pose arm_initial_pose = arm.get_current_pose().pose print("Arm initial pose:") print(arm_initial_pose) # 何かを掴んでいた時のためにハンドを開く + # Opens the hand in case it's holding on to something gripper_goal.command.position = 0.9 gripper.send_goal(gripper_goal) gripper.wait_for_result(rospy.Duration(1.0)) # SRDFに定義されている"home"の姿勢にする + # Moves to the pose declared as "home" in the SRDF arm.set_named_target("r_arm_waist_init_pose") arm.go() gripper_goal.command.position = 0.0 @@ -60,10 +63,12 @@ def main(): gripper.wait_for_result(rospy.Duration(1.0)) # 掴む準備をする + # Prepares for grasping target_pose = geometry_msgs.msg.Pose() target_pose.position.x = 0.25 target_pose.position.y = 0.0 target_pose.position.z = 0.3 + # When grasping from above q = quaternion_from_euler(3.14/2.0, 0.0, 0.0) # 上方から掴みに行く場合 target_pose.orientation.x = q[0] target_pose.orientation.y = q[1] @@ -73,15 +78,18 @@ def main(): arm.go() # 実行 # ハンドを開く + # Opens the hand gripper_goal.command.position = 0.7 gripper.send_goal(gripper_goal) gripper.wait_for_result(rospy.Duration(1.0)) # 掴みに行く + # Grasp! target_pose = geometry_msgs.msg.Pose() target_pose.position.x = 0.25 target_pose.position.y = 0.0 target_pose.position.z = 0.13 + # When grasping from above q = quaternion_from_euler(3.14/2.0, 0.0, 0.0) # 上方から掴みに行く場合 target_pose.orientation.x = q[0] target_pose.orientation.y = q[1] @@ -91,15 +99,18 @@ def main(): arm.go() # 実行 # ハンドを閉じる + # Closes the hand gripper_goal.command.position = 0.4 gripper.send_goal(gripper_goal) gripper.wait_for_result(rospy.Duration(1.0)) # 持ち上げる + # Pick up target_pose = geometry_msgs.msg.Pose() target_pose.position.x = 0.25 target_pose.position.y = 0.0 target_pose.position.z = 0.3 + # When grasping from above q = quaternion_from_euler(3.14/2.0, 0.0, 0.0) # 上方から掴みに行く場合 target_pose.orientation.x = q[0] target_pose.orientation.y = q[1] @@ -109,10 +120,12 @@ def main(): arm.go() # 実行 # 移動する + # Moves to a different positoin target_pose = geometry_msgs.msg.Pose() target_pose.position.x = 0.4 target_pose.position.y = 0.0 target_pose.position.z = 0.3 + # When grasping from above q = quaternion_from_euler(3.14/2.0, 0.0, 0.0) # 上方から掴みに行く場合 target_pose.orientation.x = q[0] target_pose.orientation.y = q[1] @@ -122,10 +135,12 @@ def main(): arm.go() # 実行 # 下ろす + # Lowers the arm target_pose = geometry_msgs.msg.Pose() target_pose.position.x = 0.4 target_pose.position.y = 0.0 target_pose.position.z = 0.13 + # When grasping from above q = quaternion_from_euler(3.14/2.0, 0.0, 0.0) # 上方から掴みに行く場合 target_pose.orientation.x = q[0] target_pose.orientation.y = q[1] @@ -135,15 +150,18 @@ def main(): arm.go() # 実行 # ハンドを開く + # Opens the hand gripper_goal.command.position = 0.7 gripper.send_goal(gripper_goal) gripper.wait_for_result(rospy.Duration(1.0)) # 少しだけハンドを持ち上げる + # Raises the hand a little target_pose = geometry_msgs.msg.Pose() target_pose.position.x = 0.4 target_pose.position.y = 0.0 target_pose.position.z = 0.2 + # When grasping from above q = quaternion_from_euler(3.14/2.0, 0.0, 0.0) # 上方から掴みに行く場合 target_pose.orientation.x = q[0] target_pose.orientation.y = q[1] @@ -153,6 +171,7 @@ def main(): arm.go() # 実行 # SRDFに定義されている"home"の姿勢にする + # Moves to the pose declared as "home" in the SRDF arm.set_named_target("r_arm_waist_init_pose") arm.go() diff --git a/sciurus17_examples/scripts/pick_and_place_two_arm_demo.py b/sciurus17_examples/scripts/pick_and_place_two_arm_demo.py index df3c131e..9bb4a6a6 100755 --- a/sciurus17_examples/scripts/pick_and_place_two_arm_demo.py +++ b/sciurus17_examples/scripts/pick_and_place_two_arm_demo.py @@ -33,16 +33,20 @@ def main(): r_eef_oc = OrientationConstraint() # 両腕グループの制御 + # Sets the controls both arms in MoveGroup arm = moveit_commander.MoveGroupCommander("two_arm_group") # 速度と加速度をゆっくりにする + # Lowers the velocity and acceleration arm.set_max_velocity_scaling_factor(0.2) arm.set_max_acceleration_scaling_factor(0.2) # 右ハンド初期化 + # Initializes the right hand r_gripper = actionlib.SimpleActionClient("/sciurus17/controller1/right_hand_controller/gripper_cmd", GripperCommandAction) r_gripper.wait_for_server() r_gripper_goal = GripperCommandGoal() r_gripper_goal.command.max_effort = 2.0 # 左ハンド初期化 + # Initializes the left hand l_gripper = actionlib.SimpleActionClient("/sciurus17/controller2/left_hand_controller/gripper_cmd", GripperCommandAction) l_gripper.wait_for_server() l_gripper_goal = GripperCommandGoal() @@ -51,60 +55,75 @@ def main(): rospy.sleep(1.0) # グループリスト表示 + # Displays the group names print("Group names:") print(robot.get_group_names()) # ステータス表示 + # Displays the current status print("Current state:") print(robot.get_current_state()) # 右アーム初期ポーズを表示 + # Displays the initial pose of the right arm r_arm_initial_pose = arm.get_current_pose("r_link7").pose print("Right arm initial pose:") print(r_arm_initial_pose) # 左アーム初期ポーズを表示 + # Displays the initial pose of the left arm l_arm_initial_pose = arm.get_current_pose("l_link7").pose print("Left arm initial pose:") print(l_arm_initial_pose) # 何かを掴んでいた時のためにハンドを開く + # Opens the hand in case it's holding on to something # 右手を開く + # Opens the right hand r_gripper_goal.command.position = 0.9 r_gripper.send_goal(r_gripper_goal) r_gripper.wait_for_result(rospy.Duration(1.0)) # 左手を開く + # Opens the left hand l_gripper_goal.command.position = -0.9 l_gripper.send_goal(l_gripper_goal) l_gripper.wait_for_result(rospy.Duration(1.0)) # SRDFに定義されている"home"の姿勢にする + # Moves to the pose declared as "home" in the SRDF arm.set_named_target("two_arm_init_pose") arm.go() # 右手を閉じる + # Closes the right hand r_gripper_goal.command.position = 0.0 r_gripper.send_goal(r_gripper_goal) r_gripper.wait_for_result(rospy.Duration(1.0)) # 左手を閉じる + # Closes the leftt hand l_gripper_goal.command.position = 0.0 l_gripper.send_goal(l_gripper_goal) l_gripper.wait_for_result(rospy.Duration(1.0)) # 掴む準備をする + # Prepares for grasping # 右腕 + # Right arm r_target_pose = geometry_msgs.msg.Pose() r_target_pose.position.x = 0.18 r_target_pose.position.y = -0.25 r_target_pose.position.z = 0.2 + # Points the hand down q = quaternion_from_euler(3.14/2.0, 0.0, 0.0) # まずは手を下に向ける r_target_pose.orientation.x = q[0] r_target_pose.orientation.y = q[1] r_target_pose.orientation.z = q[2] r_target_pose.orientation.w = q[3] # 左腕 + # Left arm l_target_pose = geometry_msgs.msg.Pose() l_target_pose.position.x = 0.18 l_target_pose.position.y = 0.25 l_target_pose.position.z = 0.2 + # Points the hand down q = quaternion_from_euler(-3.14/2.0, 0.0, 0.0) # まずは手を下に向ける l_target_pose.orientation.x = q[0] l_target_pose.orientation.y = q[1] @@ -116,31 +135,39 @@ def main(): arm.go() # 実行 # ハンドを開く + # Opens the hand # 右手を開く + # Opens the right hand r_gripper_goal.command.position = 0.7 r_gripper.send_goal(r_gripper_goal) r_gripper.wait_for_result(rospy.Duration(1.0)) # 左手を開く + # Opens the leftt hand l_gripper_goal.command.position = -0.7 l_gripper.send_goal(l_gripper_goal) l_gripper.wait_for_result(rospy.Duration(1.0)) # 手を内向きにする + # Turns the hands inward # 右腕 + # Right arm r_target_pose = geometry_msgs.msg.Pose() r_target_pose.position.x = 0.18 r_target_pose.position.y = -0.25 r_target_pose.position.z = 0.08 + # Turnsthe hand inward q = quaternion_from_euler(3.14, 0.0, 0.0) # 手を内向きにする r_target_pose.orientation.x = q[0] r_target_pose.orientation.y = q[1] r_target_pose.orientation.z = q[2] r_target_pose.orientation.w = q[3] # 左腕 + # Left arm l_target_pose = geometry_msgs.msg.Pose() l_target_pose.position.x = 0.18 l_target_pose.position.y = 0.25 l_target_pose.position.z = 0.08 + # Turns the hand inward q = quaternion_from_euler(-3.14, 0.0, 0.0) # 手を内向きにする l_target_pose.orientation.x = q[0] l_target_pose.orientation.y = q[1] @@ -152,17 +179,22 @@ def main(): arm.go() # 実行 # ハンドを開く + # Opens the hand # 右手を開く + # Opens the right hand r_gripper_goal.command.position = 1.0 r_gripper.send_goal(r_gripper_goal) r_gripper.wait_for_result(rospy.Duration(1.0)) # 左手を開く + # Opens the left hand l_gripper_goal.command.position = -1.0 l_gripper.send_goal(l_gripper_goal) l_gripper.wait_for_result(rospy.Duration(1.0)) # 掴みに行く準備 + # Prepares to grasp # 右腕 + # Right arm r_target_pose = geometry_msgs.msg.Pose() r_target_pose.position.x = 0.18 r_target_pose.position.y = -0.10 @@ -173,6 +205,7 @@ def main(): r_target_pose.orientation.z = q[2] r_target_pose.orientation.w = q[3] # 左腕 + # Left arm l_target_pose = geometry_msgs.msg.Pose() l_target_pose.position.x = 0.18 l_target_pose.position.y = 0.10 @@ -183,7 +216,9 @@ def main(): l_target_pose.orientation.z = q[2] l_target_pose.orientation.w = q[3] # 軌道制約の追加 + # Adds a path constraint # 右手を平行に寄せる + # Moves the right hand towards the center r_eef_pose = arm.get_current_pose("r_link7").pose r_eef_oc.header.stamp = rospy.Time.now() r_eef_oc.header.frame_id = "/base_link" @@ -194,6 +229,7 @@ def main(): r_eef_oc.absolute_z_axis_tolerance = 0.7 r_eef_oc.weight = 1 # 左手を平行に寄せる + # Moves the left hand towards the center l_eef_pose = arm.get_current_pose("l_link7").pose l_eef_oc.header.stamp = rospy.Time.now() l_eef_oc.header.frame_id = "/base_link" @@ -207,12 +243,15 @@ def main(): eef_constraints.orientation_constraints.append(l_eef_oc) arm.set_path_constraints(eef_constraints) # 目標ポーズ設定 + # Sets the target pose arm.set_pose_target(r_target_pose,"r_link7") arm.set_pose_target(l_target_pose,"l_link7") - arm.go() # 実行 + arm.go() # 実行 | Execute # 掴みに行く + # Grasp! # 右腕 + # Right arm r_target_pose = geometry_msgs.msg.Pose() r_target_pose.position.x = 0.18 r_target_pose.position.y = -0.08 @@ -223,6 +262,7 @@ def main(): r_target_pose.orientation.z = q[2] r_target_pose.orientation.w = q[3] # 左腕 + # Left arm l_target_pose = geometry_msgs.msg.Pose() l_target_pose.position.x = 0.18 l_target_pose.position.y = 0.08 @@ -233,22 +273,28 @@ def main(): l_target_pose.orientation.z = q[2] l_target_pose.orientation.w = q[3] # 目標ポーズ設定 + # Sets the target pose arm.set_pose_target(r_target_pose,"r_link7") arm.set_pose_target(l_target_pose,"l_link7") - arm.go() # 実行 + arm.go() # 実行 | Execute # ハンドを閉じる + # Closes the hand # 右手を閉じる + # Closes the right hand r_gripper_goal.command.position = 0.4 r_gripper.send_goal(r_gripper_goal) r_gripper.wait_for_result(rospy.Duration(1.0)) # 左手を閉じる + # Closes the left hand l_gripper_goal.command.position = -0.4 l_gripper.send_goal(l_gripper_goal) l_gripper.wait_for_result(rospy.Duration(1.0)) # 前へ押し出す + # Push forward # 右腕 + # Right arm r_target_pose = geometry_msgs.msg.Pose() r_target_pose.position.x = 0.35 r_target_pose.position.y = -0.09 @@ -259,6 +305,7 @@ def main(): r_target_pose.orientation.z = q[2] r_target_pose.orientation.w = q[3] # 左腕 + # Left arm l_target_pose = geometry_msgs.msg.Pose() l_target_pose.position.x = 0.35 l_target_pose.position.y = 0.09 @@ -269,64 +316,81 @@ def main(): l_target_pose.orientation.z = q[2] l_target_pose.orientation.w = q[3] # 目標ポーズ設定 + # Sets the target pose arm.set_pose_target(r_target_pose,"r_link7") arm.set_pose_target(l_target_pose,"l_link7") - arm.go() # 実行 + arm.go() # 実行 | Execute # ハンドを開く + # Opens the hand # 右手を開く + # Opens the right hand r_gripper_goal.command.position = 1.5 r_gripper.send_goal(r_gripper_goal) r_gripper.wait_for_result(rospy.Duration(1.0)) # 左手を開く + # Opens the left hand l_gripper_goal.command.position = -1.5 l_gripper.send_goal(l_gripper_goal) l_gripper.wait_for_result(rospy.Duration(1.0)) # 軌道制約の解除 + # Resets the path constraints arm.set_path_constraints(None) # 1秒待つ + # Waits one second rospy.sleep(1.0) # 離す + # Release # 右腕 + # Right arm r_target_pose = geometry_msgs.msg.Pose() r_target_pose.position.x = 0.28 r_target_pose.position.y = -0.20 r_target_pose.position.z = 0.12 + # Points the hand down q = quaternion_from_euler(3.14/2.0, 0.0, 0.0) # 手を下向きにして逃がす r_target_pose.orientation.x = q[0] r_target_pose.orientation.y = q[1] r_target_pose.orientation.z = q[2] r_target_pose.orientation.w = q[3] # 左腕 + # Left arm l_target_pose = geometry_msgs.msg.Pose() l_target_pose.position.x = 0.28 l_target_pose.position.y = 0.20 l_target_pose.position.z = 0.12 + # Points the hand down q = quaternion_from_euler(-3.14/2.0, 0.0, 0.0) # 手を下向きにして逃がす l_target_pose.orientation.x = q[0] l_target_pose.orientation.y = q[1] l_target_pose.orientation.z = q[2] l_target_pose.orientation.w = q[3] # 目標ポーズ設定 + # Sets the target pose arm.set_pose_target(r_target_pose,"r_link7") arm.set_pose_target(l_target_pose,"l_link7") - arm.go() # 実行 + arm.go() # 実行 | Execute # ハンドを閉じる + # Closes the hand # 右手を閉じる + # Closes the right hand r_gripper_goal.command.position = 0.4 r_gripper.send_goal(r_gripper_goal) r_gripper.wait_for_result(rospy.Duration(1.0)) # 左手を閉じる + # Closes the left hand l_gripper_goal.command.position = -0.4 l_gripper.send_goal(l_gripper_goal) l_gripper.wait_for_result(rospy.Duration(1.0)) # 腕を持ち上げてターゲットから離れる + # Raises the arm to move away from the target # 右腕 + # Right arm r_target_pose = geometry_msgs.msg.Pose() r_target_pose.position.x = 0.32 r_target_pose.position.y = -0.25 @@ -337,6 +401,7 @@ def main(): r_target_pose.orientation.z = q[2] r_target_pose.orientation.w = q[3] # 左腕 + # Left arm l_target_pose = geometry_msgs.msg.Pose() l_target_pose.position.x = 0.32 l_target_pose.position.y = 0.25 @@ -347,11 +412,13 @@ def main(): l_target_pose.orientation.z = q[2] l_target_pose.orientation.w = q[3] # 目標ポーズ設定 + # Sets the target pose arm.set_pose_target(r_target_pose,"r_link7") arm.set_pose_target(l_target_pose,"l_link7") - arm.go() # 実行 + arm.go() # 実行 | Execute # SRDFに定義されている"home"の姿勢にする + # Moves to the pose declared as "home" in the SRDF arm.set_named_target("two_arm_init_pose") arm.go() diff --git a/sciurus17_examples/scripts/preset_pid_gain_example.py b/sciurus17_examples/scripts/preset_pid_gain_example.py index 27a5d67f..da3fbac4 100755 --- a/sciurus17_examples/scripts/preset_pid_gain_example.py +++ b/sciurus17_examples/scripts/preset_pid_gain_example.py @@ -23,10 +23,13 @@ def preset_pid_gain(pid_gain_no): # サーボモータのPIDゲインをプリセットする # プリセットの内容はsciurus7_control/scripts/preset_reconfigure.pyに書かれている + # Presets the PID gains of the servo motor + # The presets are written in sciurus7_control/scripts/preset_reconfigure.py print("PID Gain Preset. No." + str(pid_gain_no)) preset_no = UInt8() preset_no.data = pid_gain_no pub_preset.publish(preset_no) + # Waits until the PID gain is set rospy.sleep(1) # PIDゲインがセットされるまで待つ @@ -36,27 +39,34 @@ def main(): arm.set_max_velocity_scaling_factor(0.1) # サーボモータのPIDゲインをプリセット + # Presets the PID gain of the servo motor preset_pid_gain(0) # SRDFに定義されている"r_arm_init_pose"の姿勢にする + # Moves the robot the the "r_arm_init_pose" pose defined in the SRDF print("set named target : r_arm_init_pose") arm.set_named_target("r_arm_init_pose") arm.go() # サーボモータのPIDゲインをプリセット # Pゲインが小さくなるので、Sciurus17の右手を人間の手で動かすことが可能 + # Presets the PID gain of the servo motor + # You can move the Sciurus17's right arm because the P gain decreases preset_pid_gain(4) # 動作確認のため数秒間待つ + # Waits a couple seconds for operation confirmation sleep_seconds = 10 for i in range(sleep_seconds): print( str(sleep_seconds-i) + " counts left.") rospy.sleep(1) # 安全のため、現在の右手先姿勢を目標姿勢に変更する + # Updates the target pose to the current pose for safety reasons arm.set_pose_target(arm.get_current_pose()) arm.go() # サーボモータのPIDゲインをプリセット + # Presets the PID gain of the servo motor preset_pid_gain(0) print("set named target : r_arm_init_pose") @@ -68,6 +78,7 @@ def main(): if __name__ == '__main__': # PIDゲインプリセット用のPublisher + # The Publisher for the PID gain presets pub_preset = rospy.Publisher("preset_gain_no", UInt8, queue_size=1) try: diff --git a/sciurus17_examples/scripts/waist_joint_trajectory_example.py b/sciurus17_examples/scripts/waist_joint_trajectory_example.py index d3ecf9c3..60916431 100755 --- a/sciurus17_examples/scripts/waist_joint_trajectory_example.py +++ b/sciurus17_examples/scripts/waist_joint_trajectory_example.py @@ -27,6 +27,7 @@ class WaistYaw(object): # 初期化処理 + # Initialize def __init__(self): self.__client = actionlib.SimpleActionClient("/sciurus17/controller3/waist_yaw_controller/follow_joint_trajectory", FollowJointTrajectoryAction) @@ -39,15 +40,18 @@ def __init__(self): self.present_angle = 0.0 # 現在角度を設定 + # Sets the current angle def set_present_angle(self, angle): self.present_angle = angle # 目標角度の設定と実行 + # Sets the target angle and executes def set_angle(self, yaw_angle, goal_sec): goal = FollowJointTrajectoryGoal() goal.trajectory.joint_names = ["waist_yaw_joint"] # 現在の角度から開始(遷移時間は現在時刻) + # Starts from the current angle (The transition time is the current time) yawpoint = JointTrajectoryPoint() yawpoint.positions.append(self.present_angle) yawpoint.time_from_start = rospy.Duration(nsecs=1) @@ -55,8 +59,11 @@ def set_angle(self, yaw_angle, goal_sec): # 途中に角度と時刻をappendすると細かい速度制御が可能 # 参考=> http://wiki.ros.org/joint_trajectory_controller/UnderstandingTrajectoryReplacement + # You can do a fine speed control if you append the angle and time in between + # Ref=> http://wiki.ros.org/joint_trajectory_controller/UnderstandingTrajectoryReplacement # ゴール角度を設定(指定されたゴール時間で到達) + # Sets the goal angle (Arrives at the specified goal time) yawpoint = JointTrajectoryPoint() yawpoint.positions.append(yaw_angle) yawpoint.time_from_start = rospy.Duration(goal_sec) @@ -64,6 +71,7 @@ def set_angle(self, yaw_angle, goal_sec): self.present_angle = yaw_angle # 軌道計画を実行 + # Executes the planned trajectory self.__client.send_goal(goal) self.__client.wait_for_result(rospy.Duration(goal_sec)) return self.__client.get_result() @@ -75,16 +83,21 @@ def set_angle(self, yaw_angle, goal_sec): wy.set_present_angle(math.radians(0.0)) # まず正面を向く + # Faces the front wy.set_angle(math.radians(0.0), 0.1) rospy.sleep(1.0) # 左45度,1秒 + # Left 45 degrees, 1 second wy.set_angle(math.radians(45.0), 1.0) rospy.sleep(1.0) # 正面:1秒 + # Fornt 1 second wy.set_angle(math.radians(0.0), 1.0) rospy.sleep(1.0) # 右45度,1秒 + # Right 45 degrees, 1 second wy.set_angle(math.radians(-45.0), 1.0) rospy.sleep(1.0) # 正面,1秒 + # Front 1 second wy.set_angle(math.radians(0.0), 1.0) diff --git a/sciurus17_examples/src/object_detection.cpp b/sciurus17_examples/src/object_detection.cpp index 3eb34f53..dd6998c4 100644 --- a/sciurus17_examples/src/object_detection.cpp +++ b/sciurus17_examples/src/object_detection.cpp @@ -39,6 +39,7 @@ void convert_to_marker(visualization_msgs::Marker *marker, const int marker_id, const pcl::PointXYZRGB &min_pt, const pcl::PointXYZRGB &max_pt) { // pcl::Pointの最大最小値をBox型のマーカに変換する + // Converts the max/min of the pcl::Point to a Box-type marker marker->header.frame_id = frame_id; marker->header.stamp = ros::Time(); @@ -88,6 +89,8 @@ void cloud_cb (const sensor_msgs::PointCloud2ConstPtr& cloud_msg) // point cloudの座標を変換 // base_linkとカメラ間のTFを取得する + // Converts the point cloud coordinates + // Recieves the TF between base_link and camera while(true){ try{ listener.lookupTransform(FRAME_ID, cloud_msg->header.frame_id, ros::Time(0), transform); @@ -101,10 +104,12 @@ void cloud_cb (const sensor_msgs::PointCloud2ConstPtr& cloud_msg) pcl_ros::transformPointCloud(FRAME_ID, transform, *cloud_msg, cloud_transformed); // sensor_msgs/PointCloud2からpcl/PointCloudに変換 + // Converts sensor_msgs/PointCloud2 to pcl/PointCloud pcl::PointCloud::Ptr cloud (new pcl::PointCloud()); pcl::fromROSMsg(cloud_transformed, *cloud); // Z方向の範囲でフィルタリング + // Filters in Z-axis pcl::PointCloud::Ptr cloud_passthrough (new pcl::PointCloud()); pcl::PassThrough pass; pass.setInputCloud (cloud); @@ -113,6 +118,7 @@ void cloud_cb (const sensor_msgs::PointCloud2ConstPtr& cloud_msg) pass.filter (*cloud_passthrough); // voxelgridでダウンサンプリング + // Down samples with voxelgrid pcl::PointCloud::Ptr cloud_voxelgrid (new pcl::PointCloud()); pcl::VoxelGrid voxelgrid; voxelgrid.setInputCloud (cloud_passthrough); @@ -120,11 +126,13 @@ void cloud_cb (const sensor_msgs::PointCloud2ConstPtr& cloud_msg) voxelgrid.filter (*cloud_voxelgrid); // pointがなければ処理を抜ける + // Exits the process if there's no point if(cloud_voxelgrid->size() <= 0){ return; } // クラスタ抽出のためKdTreeオブジェクトを作成 + // Creates KdTree object for cluster extraction pcl::search::KdTree::Ptr tree (new pcl::search::KdTree()); tree->setInputCloud (cloud_voxelgrid); @@ -142,12 +150,15 @@ void cloud_cb (const sensor_msgs::PointCloud2ConstPtr& cloud_msg) // クラスタごとにPointのRGB値を変更する // クラスタをもとにMarkerを生成する + // Changes the RGBs of the Point in each clusters + // Creates the Marker based on the cluster pcl::PointCloud::Ptr cloud_output (new pcl::PointCloud()); for (std::vector::const_iterator it = cluster_indices.begin (); it != cluster_indices.end (); ++it) { pcl::PointCloud::Ptr cloud_cluster (new pcl::PointCloud()); // クラスタ内のPointRGB値を変更 + // Changes the PointRGB within the cluster for (std::vector::const_iterator pit = it->indices.begin (); pit != it->indices.end (); ++pit){ cloud_voxelgrid->points[*pit].r = CLUSTER_COLOR[cluster_i][RED]; @@ -156,13 +167,16 @@ void cloud_cb (const sensor_msgs::PointCloud2ConstPtr& cloud_msg) cloud_cluster->points.push_back (cloud_voxelgrid->points[*pit]); //* } // Unorganized datasetsとしてwidth, heightを入力 + // Inserts the width and height as unorganized datasets cloud_cluster->width = cloud_cluster->points.size (); cloud_cluster->height = 1; // 無効なpointがないのでis_denseはtrue + // is_dense is true since there's no invalid points cloud_cluster->is_dense = true; *cloud_output += *cloud_cluster; // Markerの作成 + // Creates the marker visualization_msgs::Marker marker; pcl::PointXYZRGB min_pt, max_pt; pcl::getMinMax3D(*cloud_cluster, min_pt, max_pt); @@ -176,6 +190,7 @@ void cloud_cb (const sensor_msgs::PointCloud2ConstPtr& cloud_msg) } // pcl/PointCloudからsensor_msgs/PointCloud2に変換 + // Converts the pcl/PointCloud to sensor_msgs/PointCloud2 sensor_msgs::PointCloud2 output; // pcl::toROSMsg(*cloud, output); // pcl::toROSMsg(*cloud_passthrough, output);