Skip to content

jakhon37/autonomous_ROS

Folders and files

NameName
Last commit message
Last commit date

Latest commit

 

History

1 Commit
 
 

Repository files navigation

Implementing an autonomous driving system with ROS (Robot Operating System) on the NVIDIA Jetson Nano involves several steps, including setting up the hardware, installing the necessary software, and developing and integrating various components such as perception, planning, and control. Here’s a comprehensive guide to help you get started:

1. Setting Up the Jetson Nano

Hardware Requirements:

  • NVIDIA Jetson Nano Developer Kit
  • MicroSD card (32GB or larger)
  • Power supply (5V 4A recommended)
  • Camera (preferably a stereo camera or depth camera)
  • Sensors (e.g., LiDAR, ultrasonic sensors, IMU)
  • Motor controller and actuators (for steering and throttle)
  • Monitor, keyboard, and mouse

Software Requirements:

  • JetPack SDK
  • ROS Melodic or ROS Noetic
  • Python libraries: OpenCV, NumPy, etc.
  • Deep learning frameworks: TensorFlow or PyTorch

2. Initial Setup

  1. Flash the MicroSD Card:

    • Download and flash the JetPack image onto the MicroSD card.
  2. Boot the Jetson Nano:

    • Insert the MicroSD card, connect peripherals, and power on the Jetson Nano.
  3. Install ROS:

    • Follow the ROS installation guide for Jetson Nano:
      # Set up your sources.list
      sudo sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main" > /etc/apt/sources.list.d/ros-latest.list'
      
      # Set up your keys
      sudo apt-key adv --keyserver 'hkp://keyserver.ubuntu.com:80' --recv-key C1CF6E31E6BADE8868B172B4F42ED6FBAB17C654
      
      # Update package index
      sudo apt-get update
      
      # Install ROS
      sudo apt-get install ros-melodic-desktop-full
      
      # Initialize rosdep
      sudo rosdep init
      rosdep update
      
      # Environment setup
      echo "source /opt/ros/melodic/setup.bash" >> ~/.bashrc
      source ~/.bashrc
      
      # Dependencies for building packages
      sudo apt install python-rosinstall python-rosinstall-generator python-wstool build-essential
  4. Install Required Libraries:

    • Update the system and install essential libraries:
      sudo apt-get update
      sudo apt-get upgrade
      sudo apt-get install python3-opencv python3-numpy
      pip3 install tensorflow

3. Developing the Autonomous Driving System

Step 1: Perception (Object Detection, Lane Detection)

  1. Object Detection:

    • Use a pretrained deep learning model for object detection (e.g., SSD, YOLO):
      import tensorflow as tf
      import numpy as np
      import cv2
      
      model = tf.saved_model.load("ssd_mobilenet_v2_fpnlite_320x320/saved_model")
      detection_fn = model.signatures['serving_default']
      
      def detect_objects(image):
          input_tensor = tf.convert_to_tensor(image)
          input_tensor = input_tensor[tf.newaxis, ...]
          detections = detection_fn(input_tensor)
          return detections
      
      cap = cv2.VideoCapture(0)
      while True:
          ret, frame = cap.read()
          if not ret:
              break
          image_np = np.array(frame)
          detections = detect_objects(image_np)
          for i in range(int(detections['num_detections'])):
              score = detections['detection_scores'][i].numpy()
              if score > 0.5:
                  bbox = detections['detection_boxes'][i].numpy()
                  ymin, xmin, ymax, xmax = bbox
                  (left, right, top, bottom) = (xmin * width, xmax * width, ymin * height, ymax * height)
                  cv2.rectangle(frame, (int(left), int(top)), (int(right), int(bottom)), (0, 255, 0), 2)
          cv2.imshow('Autonomous Driving - Object Detection', frame)
          if cv2.waitKey(1) & 0xFF == ord('q'):
              break
      cap.release()
      cv2.destroyAllWindows()
  2. Lane Detection:

    def region_of_interest(img, vertices):
        mask = np.zeros_like(img)
        cv2.fillPoly(mask, vertices, 255)
        masked_image = cv2.bitwise_and(img, mask)
        return masked_image
    
    def draw_lines(img, lines):
        if lines is not None:
            for line in lines:
                for x1, y1, x2, y2 in line:
                    cv2.line(img, (x1, y1), (x2, y2), (0, 255, 0), 5)
    
    cap = cv2.VideoCapture(0)
    while True:
        ret, frame = cap.read()
        if not ret:
            break
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        blur = cv2.GaussianBlur(gray, (5, 5), 0)
        edges = cv2.Canny(blur, 50, 150)
        height, width = edges.shape
        roi_vertices = [(0, height), (width / 2, height / 2), (width, height)]
        cropped_edges = region_of_interest(edges, np.array([roi_vertices], np.int32))
        lines = cv2.HoughLinesP(cropped_edges, 1, np.pi / 180, 50, maxLineGap=50)
        draw_lines(frame, lines)
        cv2.imshow('Autonomous Driving - Lane Detection', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cap.release()
    cv2.destroyAllWindows()

Step 2: Planning (Path Planning)

  1. Implementing a Simple Path Planner:

    • Use A* or Dijkstra’s algorithm for path planning:
      import heapq
      
      def a_star(start, goal, graph):
          open_list = []
          heapq.heappush(open_list, (0, start))
          came_from = {}
          g_score = {node: float('inf') for node in graph}
          g_score[start] = 0
          f_score = {node: float('inf') for node in graph}
          f_score[start] = heuristic(start, goal)
      
          while open_list:
              _, current = heapq.heappop(open_list)
              if current == goal:
                  return reconstruct_path(came_from, current)
              for neighbor in graph[current]:
                  tentative_g_score = g_score[current] + graph[current][neighbor]
                  if tentative_g_score < g_score[neighbor]:
                      came_from[neighbor] = current
                      g_score[neighbor] = tentative_g_score
                      f_score[neighbor] = g_score[neighbor] + heuristic(neighbor, goal)
                      if neighbor not in [i[1] for i in open_list]:
                          heapq.heappush(open_list, (f_score[neighbor], neighbor))
          return []
      
      def heuristic(a, b):
          return abs(a[0] - b[0]) + abs(a[1] - b[1])
      
      def reconstruct_path(came_from, current):
          total_path = [current]
          while current in came_from:
              current = came_from[current]
              total_path.append(current)
          return total_path[::-1]
  2. Integrate with ROS:

    • Create a ROS node to handle path planning and communication with other components:
      import rospy
      from nav_msgs.msg import Path
      from geometry_msgs.msg import PoseStamped
      
      def path_planner():
          rospy.init_node('path_planner', anonymous=True)
          path_pub = rospy.Publisher('/planned_path', Path, queue_size=10)
          rate = rospy.Rate(10)
      
          while not rospy.is_shutdown():
              path_msg = Path()
              path_msg.header.stamp = rospy.Time.now()
              path_msg.header.frame_id = "map"
              for pose in planned_path:
                  pose_msg = PoseStamped()
                  pose_msg.header.stamp = rospy.Time.now()
                  pose_msg.header.frame_id = "map"
                  pose_msg.pose.position.x = pose[0]
                  pose_msg.pose.position.y = pose[1]
                  path_msg.poses.append(pose_msg)
              path_pub.publish(path_msg)
              rate.sleep()
      
      if __name__ == '__main__':
          try:
              planned_path = a_star(start, goal, graph)
              path_planner()
          except rospy.ROSInterruptException:
              pass

Step 3: Control (Actuating the Vehicle)

  1. Implementing Control Algorithms:
    • Use a PID controller for steering and speed control:
      class PIDController:
          def __init__(self, kp, ki, kd):
              self.kp = kp
              self.ki = ki
              self.kd = kd
              self.previous_error = 0
              self.integral = 0
      
          def control(self, setpoint, measured_value):
              error = setpoint - measured_value
              self.integral += error
              derivative = error - self.previous_error
              output = self.kp * error + self.ki * self.integral + self.kd * derivative
              self.previous_error = error
              return output
      
      pid = PIDController(1.0, 0.1, 

About

No description, website, or topics provided.

Resources

Stars

Watchers

Forks

Releases

No releases published

Packages

No packages published