Skip to content

Commit

Permalink
Adds english comments (Still need to edit preset_pid_gain_example.py …
Browse files Browse the repository at this point in the history
…and waist_joint_trajectory_example.py)
  • Loading branch information
Shuhei Kozasa committed Dec 26, 2023
1 parent 02182bb commit 3f8152c
Show file tree
Hide file tree
Showing 7 changed files with 230 additions and 2 deletions.
34 changes: 33 additions & 1 deletion sciurus17_examples/scripts/chest_camera_tracking.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,12 +52,14 @@ def _image_callback(self, ros_image):
input_image = self._bridge.imgmsg_to_cv2(ros_image, "bgr8")
except CvBridgeError as e:
rospy.logerr(e)

# 画像のwidth, heightを取得
# Obtain the width and height of the image
self._image_shape.x = input_image.shape[1]
self._image_shape.y = input_image.shape[0]

# 特定色のオブジェクトを検出
# Detect an object with a certain color
output_image = self._detect_orange_object(input_image)
# output_image = self._detect_blue_object(input_image)

Expand All @@ -67,18 +69,22 @@ def _image_callback(self, ros_image):
def get_object_position(self):
# 画像中心を0, 0とした座標系におけるオブジェクトの座標を出力
# オブジェクトの座標は-1.0 ~ 1.0に正規化される
# Returns the object coordinates where the image center is 0, 0
# The coordinate is normalized into -1.0 to 1.0

object_center = Point(
self._object_rect[0] + self._object_rect[2] * 0.5,
self._object_rect[1] + self._object_rect[3] * 0.5,
0)

# 画像の中心を0, 0とした座標系に変換
# Convert the coordinate where the image center is 0, 0
translated_point = Point()
translated_point.x = object_center.x - self._image_shape.x * 0.5
translated_point.y = -(object_center.y - self._image_shape.y * 0.5)

# 正規化
# Normalize
normalized_point = Point()
if self._image_shape.x != 0 and self._image_shape.y != 0:
normalized_point.x = translated_point.x / (self._image_shape.x * 0.5)
Expand All @@ -93,22 +99,27 @@ def object_detected(self):

def _detect_color_object(self, bgr_image, lower_color, upper_color):
# 画像から指定された色の物体を検出する
# Detec the specified colored object from the image

MIN_OBJECT_SIZE = 7000 # px * px

# BGR画像をHSV色空間に変換
# Convert the BGR image to HSV model
hsv = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2HSV)

# 色を抽出するマスクを生成
# Creates a mask to extract the color
mask = cv2.inRange(hsv, lower_color, upper_color)

# マスクから輪郭を抽出
# Extract the contours with the mask
if self._CV_MAJOR_VERSION == '4':
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
else:
_, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

# 輪郭を長方形に変換し、配列に格納
# Convert the contour to a rectangle and store it in a vector
rects = []
for contour in contours:
approx = cv2.convexHull(contour)
Expand All @@ -118,11 +129,14 @@ def _detect_color_object(self, bgr_image, lower_color, upper_color):
self._object_detected = False
if len(rects) > 0:
# 最も大きい長方形を抽出
# Extract the largest rectangle
rect = max(rects, key=(lambda x: x[2] * x[3]))

# 長方形が小さければ検出判定にしない
# Updates the detection result to false if the rectangle is small
if rect[2] * rect[3] > MIN_OBJECT_SIZE:
# 抽出した長方形を画像に描画する
# Draw the rectangle on the image
cv2.rectangle(bgr_image,
(rect[0], rect[1]),
(rect[0] + rect[2], rect[1] + rect[3]),
Expand Down Expand Up @@ -172,6 +186,7 @@ def __init__(self):

def _state_callback(self, state):
# 腰の現在角度を取得
# Returns the current angle of the waist

self._state_received = True
yaw_radian = state.actual.positions[0]
Expand All @@ -188,6 +203,7 @@ def get_current_yaw(self):

def set_angle(self, yaw_angle, goal_secs=1.0e-9):
# 腰を指定角度に動かす
# Move the waist to the specified angle
goal = FollowJointTrajectoryGoal()
goal.trajectory.joint_names = ["waist_yaw_joint"]

Expand All @@ -203,6 +219,7 @@ def set_angle(self, yaw_angle, goal_secs=1.0e-9):

def hook_shutdown():
# shutdown時に0度へ戻る
# Sets the waist angle to 0 deg when shutting down
waist_yaw.set_angle(math.radians(0), 3.0)


Expand All @@ -213,24 +230,33 @@ def main():

# オブジェクト追跡のしきい値
# 正規化された座標系(px, px)
# The threshold of the object tracking
# Normalized coordinates is (px, px)
THRESH_X = 0.05

# 腰の初期角度 Degree
# The inital angle of the waist in degrees
INITIAL_YAW_ANGLE = 0

# 腰の制御角度リミット値 Degree
# The waist angle limit (max/min) in degrees
MAX_YAW_ANGLE = 120
MIN_YAW_ANGLE = -120

# 腰の制御量
# 値が大きいほど腰を大きく動かす
# The control amount of the waist
# The waist moves more if the gain is bigger
OPERATION_GAIN_X = 5.0

# 初期角度に戻る時の制御角度 Degree
# The degree when returning to the initial pose
RESET_OPERATION_ANGLE = 1

# 現在の腰角度を取得する
# ここで現在の腰角度を取得することで、ゆっくり初期角度へ戻る
# Recieves the current waist angle
# By recieving the waist angle here, it moves to the initial pose slowly
while not waist_yaw.state_received():
pass
yaw_angle = waist_yaw.get_current_yaw()
Expand All @@ -240,6 +266,7 @@ def main():

while not rospy.is_shutdown():
# 正規化されたオブジェクトの座標を取得
# Recieves the normalized object coordinates
object_position = object_tracker.get_object_position()

if object_tracker.object_detected():
Expand All @@ -248,22 +275,27 @@ def main():
else:
lost_time = rospy.Time.now() - detection_timestamp
# 一定時間オブジェクトが見つからない場合は初期角度に戻る
# If it doesn't detect any object for a certain amount of time,
# it will return to the initial angle
if lost_time.to_sec() > 1.0:
look_object = False

if look_object:
# オブジェクトが画像中心にくるように首を動かす
# Moves the neck so that the object comes to the middle of the image
if math.fabs(object_position.x) > THRESH_X:
yaw_angle += -object_position.x * OPERATION_GAIN_X

# 腰の角度を制限する
# Limits the waist angles
if yaw_angle > MAX_YAW_ANGLE:
yaw_angle = MAX_YAW_ANGLE
if yaw_angle < MIN_YAW_ANGLE:
yaw_angle = MIN_YAW_ANGLE

else:
# ゆっくり初期角度へ戻る
# Returns to the initial angle slowly
diff_yaw_angle = yaw_angle - INITIAL_YAW_ANGLE
if math.fabs(diff_yaw_angle) > RESET_OPERATION_ANGLE:
yaw_angle -= math.copysign(RESET_OPERATION_ANGLE, diff_yaw_angle)
Expand Down
39 changes: 39 additions & 0 deletions sciurus17_examples/scripts/depth_camera_tracking.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,9 +41,12 @@ def __init__(self):
self._bridge = CvBridge()

# 頭カメラのカラー画像
# The color image from the head camera
self._image_sub = rospy.Subscriber("/sciurus17/camera/color/image_raw", Image, self._image_callback, queue_size=1)
# 頭カメラの深度画像
# カラー画像と視界が一致するように補正されている
# The depth image from the head camera
# It is aligned to the color image to match the view
self._depth_sub = rospy.Subscriber("/sciurus17/camera/aligned_depth_to_color/image_raw", Image, self._depth_callback, queue_size=1)

self._image_pub = rospy.Publisher("~output_image", Image, queue_size=1)
Expand Down Expand Up @@ -77,6 +80,7 @@ def _depth_callback(self, ros_image):
return

# 画像のwidth, heightを取得
# Obtains the width and height of the image
self._image_shape.x = input_image.shape[1]
self._image_shape.y = input_image.shape[0]

Expand All @@ -90,18 +94,22 @@ def _depth_callback(self, ros_image):
def get_object_position(self):
# 画像中心を0, 0とした座標系におけるオブジェクトの座標を出力
# オブジェクトの座標は-1.0 ~ 1.0に正規化される
# Returns the object coordinates where the image center is 0, 0
# The coordinate is normalized into -1.0 to 1.0

object_center = Point(
self._object_rect[0] + self._object_rect[2] * 0.5,
self._object_rect[1] + self._object_rect[3] * 0.5,
0)

# 画像の中心を0, 0とした座標系に変換
# Convert the coordinate where the image center is 0, 0
translated_point = Point()
translated_point.x = object_center.x - self._image_shape.x * 0.5
translated_point.y = -(object_center.y - self._image_shape.y * 0.5)

# 正規化
# Normalize
normalized_point = Point()
if self._image_shape.x != 0 and self._image_shape.y != 0:
normalized_point.x = translated_point.x / (self._image_shape.x * 0.5)
Expand All @@ -116,17 +124,21 @@ def object_detected(self):

def _detect_object(self, input_depth_image):
# 検出するオブジェクトの大きさを制限する
# Limits the size of the object to be detected
MIN_OBJECT_SIZE = 10000 # px * px
MAX_OBJECT_SIZE = 80000 # px * px

# 検出範囲を4段階設ける
# 単位はmm
# Sets 4 layers of detection range
# Unit is in mm
DETECTION_DEPTH = [
(500, 700),
(600, 800),
(700, 900),
(800, 1000)]
# 検出したオブジェクトを囲う長方形の色
# The color of the rectangle around the detected object
RECT_COLOR = [
(0, 0, 255),
(0, 255, 0),
Expand All @@ -135,6 +147,7 @@ def _detect_object(self, input_depth_image):

# カメラのカラー画像を加工して出力する
# カラー画像を受け取ってない場合はFalseを返す
# Outputs an edited color image
output_image = self._color_image
if output_image is None:
return False
Expand All @@ -143,9 +156,11 @@ def _detect_object(self, input_depth_image):
self._median_depth = 0
for i, depth in enumerate(DETECTION_DEPTH):
# depth[0] ~ depth[1]の範囲でオブジェクトを抽出する
# Extracts an object between depth[0] and depth[1]
mask = cv2.inRange(input_depth_image, depth[0], depth[1])

# マスクから輪郭を抽出
# Extract the contours with the mask
if self._CV_MAJOR_VERSION == '4':
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
else:
Expand All @@ -160,13 +175,16 @@ def _detect_object(self, input_depth_image):

if len(rects) > 0:
# 最も大きい長方形を抽出
# Extract the largest rectangle
rect = max(rects, key=(lambda x: x[2] * x[3]))

rect_size = rect[2] * rect[3]

# 長方形の大きさが指定範囲内であるかチェック
# Check if the size of the rectangle is within the limit
if rect_size > MIN_OBJECT_SIZE and rect_size < MAX_OBJECT_SIZE:
# 抽出した長方形を画像に描画する
# Draw the rectangle on the image
cv2.rectangle(output_image,
(rect[0], rect[1]),
(rect[0] + rect[2], rect[1] + rect[3]),
Expand All @@ -176,13 +194,16 @@ def _detect_object(self, input_depth_image):
self._object_detected = True

# 深度画像から長方形の領域を切り取る
# Clips the range of the rectangle from the depth image
object_depth = input_depth_image[
rect[1]:rect[1]+rect[3],
rect[0]:rect[0]+rect[2]]
# 深度の中央値を求める
# Calculates the median of the depth
self._median_depth = int(np.median(object_depth))

# 中央値のテキストを出力画像に描画する
# Write the median on the output image
cv2.putText(output_image, str(self._median_depth),
(rect[0], rect[1]+30),
cv2.FONT_HERSHEY_SIMPLEX,
Expand Down Expand Up @@ -212,6 +233,7 @@ def __init__(self):

def _state_callback(self, state):
# 首の現在角度を取得
# Returns the current neck angle

self._state_received = True
yaw_radian = state.actual.positions[0]
Expand All @@ -235,6 +257,7 @@ def get_current_pitch(self):

def set_angle(self, yaw_angle, pitch_angle, goal_secs=1.0e-9):
# 首を指定角度に動かす
# Move the neck to the specified angle
goal = FollowJointTrajectoryGoal()
goal.trajectory.joint_names = ["neck_yaw_joint", "neck_pitch_joint"]

Expand All @@ -251,6 +274,7 @@ def set_angle(self, yaw_angle, pitch_angle, goal_secs=1.0e-9):

def hook_shutdown():
# shutdown時に0度へ戻る
# Sets the neck angle to 0 deg when shutting down
neck.set_angle(math.radians(0), math.radians(0), 3.0)


Expand All @@ -261,29 +285,38 @@ def main():

# オブジェクト追跡のしきい値
# 正規化された座標系(px, px)
# The threshold of the object tracking
# Normalized coordinates is (px, px)
THRESH_X = 0.05
THRESH_Y = 0.05

# 首の初期角度 Degree
# The inital angle of the neck in degrees
INITIAL_YAW_ANGLE = 0
INITIAL_PITCH_ANGLE = 0

# 首の制御角度リミット値 Degree
# The neck angle limit (max/min) in degrees
MAX_YAW_ANGLE = 120
MIN_YAW_ANGLE = -120
MAX_PITCH_ANGLE = 50
MIN_PITCH_ANGLE = -70

# 首の制御量
# 値が大きいほど首を大きく動かす
# The control amount of the neck
# The neck moves more if the gain is bigger
OPERATION_GAIN_X = 5.0
OPERATION_GAIN_Y = 5.0

# 初期角度に戻る時の制御角度 Degree
# The degree when returning to the initial pose
RESET_OPERATION_ANGLE = 3

# 現在の首角度を取得する
# ここで現在の首角度を取得することで、ゆっくり初期角度へ戻る
# Recieves the current neck angle
# By recieving the neck angle here, it moves to the initial pose slowly
while not neck.state_received():
pass
yaw_angle = neck.get_current_yaw()
Expand All @@ -294,6 +327,7 @@ def main():

while not rospy.is_shutdown():
# 正規化されたオブジェクトの座標を取得
# Recieves the normalized object coordinates
object_position = object_tracker.get_object_position()

if object_tracker.object_detected():
Expand All @@ -302,18 +336,22 @@ def main():
else:
lost_time = rospy.Time.now() - detection_timestamp
# 一定時間オブジェクトが見つからない場合は初期角度に戻る
# If it doesn't detect any object for a certain amount of time,
# it will return to the initial angle
if lost_time.to_sec() > 1.0:
look_object = False

if look_object:
# オブジェクトが画像中心にくるように首を動かす
# Moves the neck so that the object comes to the middle of the image
if math.fabs(object_position.x) > THRESH_X:
yaw_angle += -object_position.x * OPERATION_GAIN_X

if math.fabs(object_position.y) > THRESH_Y:
pitch_angle += object_position.y * OPERATION_GAIN_Y

# 首の制御角度を制限する
# Limits the neck angles
if yaw_angle > MAX_YAW_ANGLE:
yaw_angle = MAX_YAW_ANGLE
if yaw_angle < MIN_YAW_ANGLE:
Expand All @@ -326,6 +364,7 @@ def main():

else:
# ゆっくり初期角度へ戻る
# Returns to the initial angle slowly
diff_yaw_angle = yaw_angle - INITIAL_YAW_ANGLE
if math.fabs(diff_yaw_angle) > RESET_OPERATION_ANGLE:
yaw_angle -= math.copysign(RESET_OPERATION_ANGLE, diff_yaw_angle)
Expand Down
Loading

0 comments on commit 3f8152c

Please sign in to comment.