From 573724a0337c557d6b8530cbfe3f14da231644c4 Mon Sep 17 00:00:00 2001 From: sijmenhuizenga Date: Fri, 24 Jul 2020 17:08:36 +0200 Subject: [PATCH] Depth cameras ros bridge, documentatio --- docs/camera.md | 36 +++++++++++++ docs/integration-handbook.md | 5 +- docs/lidar.md | 2 +- mkdocs.yml | 1 + .../fsds_ros_bridge/scripts/cameralauncher.py | 3 +- .../src/fsds_ros_bridge_camera.cpp | 52 ++++++++++++++++--- settings.json | 10 ++-- 7 files changed, 92 insertions(+), 17 deletions(-) create mode 100644 docs/camera.md diff --git a/docs/camera.md b/docs/camera.md new file mode 100644 index 00000000..92797372 --- /dev/null +++ b/docs/camera.md @@ -0,0 +1,36 @@ +# Cameras + +To add a camera to your vehicle, add the following json to the "Cameras" map in your `settings.json`: + +``` +"Camera1": { + "CaptureSettings": [{ + "ImageType": 0, + "Width": 785, + "Height": 785, + "FOV_Degrees": 90 + }], + "X": 1.0, + "Y": 0.06, + "Z": -2.20, + "Pitch": 0.0, + "Roll": 0.0, + "Yaw": 180 +} +``` + +`Camera1` is the name of the camera. This name will be used in ros topics and in coordinate frame. + +`X`, `Y` and `Z` are the position of the lidar relative the [vehicle pawn center](vehicle_model.md) of the car in NED frame. + +`Roll`,`Pitch` and `Yaw` are rotations in degrees. + +`ImageType` describes the type of camera. +At this moment only rgb and depth cameras are supported. +For rgb camera, set this value to 0 and for depth camera set the value to 2. +Using depth camera (DepthPerspective) you get depth using a projection ray that hits that pixel. + +RGB images published in ros are encoded using `bgr8`, depth images published in ROS are encoded in `32FC1`. + +`FOV_Degrees` describes [how much the camera sees](https://en.wikipedia.org/wiki/Field_of_view). +The vertical FoV will be automatically calculated using the following formula: `vertical FoV = image height / image width * horizontal FoV`. \ No newline at end of file diff --git a/docs/integration-handbook.md b/docs/integration-handbook.md index cfbca3ac..a40eb926 100644 --- a/docs/integration-handbook.md +++ b/docs/integration-handbook.md @@ -80,7 +80,7 @@ To ensure the simulation will perform as expected, the sensor suite has some res Here you can read the requirements and restrictions that apply to every sensor. ### Camera -**A this moment camera's are a bit broken. You can use the camera's but the framerate and topic names might change. See #43 and #85.** +**Be warned, camera framerate cannot be guaranteed #43.** Every vehicle can have a maximum of 2 camera sensors. These camera(s) can be placed anywhere on the vehicle that would be allowed by FSG 2020 rules. @@ -91,8 +91,6 @@ You can choose the resolution of the camera(s). In total, the camera’s can have 1232450 pixels. Every dimension (width or height) must be at least 240px and no greater than 1600px. The horizontal field of view (FoV) is configurable for each camera and must be at least 30 degrees and not be greater than 90 degrees. -The vertical FoV will be automatically calculated using the following formula: `vertical FoV = image height / image width * horizontal FoV`. - The camera's auto exposure, motion blur and gamma settings will be equal for all teams. @@ -112,6 +110,7 @@ For each lidar, during a single rotation, the number of collected points for 1 l The number of collected points per lidar per laser per second cannot exceed 20480. To ensure the simulation keeps running smoothly: + * Every lidar is limited to collect 10000 points per scan. * The total number of points collected per second can be no larger than 100000 diff --git a/docs/lidar.md b/docs/lidar.md index 42866e0f..be7eba4d 100644 --- a/docs/lidar.md +++ b/docs/lidar.md @@ -22,7 +22,7 @@ This is an example lidar: `Lidar1` is the name of the lidar. This value will be used in the ros topic name and coordinate frame. -`X`, `Y` and `Z` are the position of the lidar relative the the center of the car in NED frame. +`X`, `Y` and `Z` are the position of the lidar relative the [vehicle pawn center](vehicle_model.md) of the car in NED frame. `Roll`,`Pitch` and `Yaw` are rotations in degrees. diff --git a/mkdocs.yml b/mkdocs.yml index d2550c35..c8a9ba76 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -16,6 +16,7 @@ nav: - "Vehicle model": "vehicle_model.md" - "IMU": "imu.md" - "Lidar": "lidar.md" + - "Camera": "camera.md" - "GPS": "gps.md" - "GSS": "gss.md" - "#tech madness": diff --git a/ros/src/fsds_ros_bridge/scripts/cameralauncher.py b/ros/src/fsds_ros_bridge/scripts/cameralauncher.py index 8dbf27df..7a4ac051 100755 --- a/ros/src/fsds_ros_bridge/scripts/cameralauncher.py +++ b/ros/src/fsds_ros_bridge/scripts/cameralauncher.py @@ -41,8 +41,9 @@ def signal_handler(sig, frame): required=True, output='screen', args=args({ 'camera_name': cameraname, + 'depthcamera': camsettings["CaptureSettings"][0]["ImageType"] == 2, 'framerate': CAMERA_FRAMERATE, - 'host_ip': AIRSIM_HOSTIP + 'host_ip': AIRSIM_HOSTIP, }))) # launch.launch( # roslaunch.core.Node( diff --git a/ros/src/fsds_ros_bridge/src/fsds_ros_bridge_camera.cpp b/ros/src/fsds_ros_bridge/src/fsds_ros_bridge_camera.cpp index 994c89ed..f3f1036c 100644 --- a/ros/src/fsds_ros_bridge/src/fsds_ros_bridge_camera.cpp +++ b/ros/src/fsds_ros_bridge/src/fsds_ros_bridge_camera.cpp @@ -8,6 +8,7 @@ #include "vehicles/car/api/CarRpcLibClient.hpp" #include "statistics.h" #include "rpc/rpc_error.h" +#include @@ -27,6 +28,7 @@ ros_bridge::Statistics fps_statistic; std::string camera_name = ""; double framerate = 0.0; std::string host_ip = "localhost"; +bool depthcamera = false; ros::Time make_ts(uint64_t unreal_ts) { @@ -41,21 +43,26 @@ std::string logprefix() { return "[cambridge " + camera_name + "] "; } -void doImageUpdate(const ros::TimerEvent&) -{ - // We are using simGetImages instead of simGetImage because the latter does not return image dimention information. - std::vector req; - req.push_back(ImageRequest(camera_name, ImageType::Scene, false, false)); +std::vector getImage(ImageRequest req) { + // We are using simGetImages instead of simGetImage because the latter does not return image dimention information. + std::vector reqs; + reqs.push_back(req); std::vector img_responses; try { - img_responses = airsim_api->simGetImages(req, "FSCar"); + img_responses = airsim_api->simGetImages(reqs, "FSCar"); } catch (rpc::rpc_error& e) { std::cout << "error" << std::endl; std::string msg = e.get_error().as(); std::cout << "Exception raised by the API while getting image:" << std::endl << msg << std::endl; } + return img_responses; +} + +void doImageUpdate(const ros::TimerEvent&) +{ + auto img_responses = getImage(ImageRequest(camera_name, ImageType::Scene, false, false)); // if a render request failed for whatever reason, this img will be empty. if (img_responses.size() == 0 || img_responses[0].time_stamp == 0) @@ -78,6 +85,36 @@ void doImageUpdate(const ros::TimerEvent&) fps_statistic.addCount(); } +cv::Mat manual_decode_depth(const ImageResponse& img_response) +{ + cv::Mat mat(img_response.height, img_response.width, CV_32FC1, cv::Scalar(0)); + int img_width = img_response.width; + + for (int row = 0; row < img_response.height; row++) + for (int col = 0; col < img_width; col++) + mat.at(row, col) = img_response.image_data_float[row * img_width + col]; + return mat; +} + + +void doDepthImageUpdate(const ros::TimerEvent&) { + auto img_responses = getImage(ImageRequest(camera_name, ImageType::DepthPerspective, true, false)); + + // if a render request failed for whatever reason, this img will be empty. + if (img_responses.size() == 0 || img_responses[0].time_stamp == 0) + return; + + ImageResponse img_response = img_responses[0]; + + cv::Mat depth_img = manual_decode_depth(img_response); + sensor_msgs::ImagePtr img_msg = cv_bridge::CvImage(std_msgs::Header(), "32FC1", depth_img).toImageMsg(); + img_msg->header.stamp = make_ts(img_response.time_stamp); + img_msg->header.frame_id = "/fsds/camera/"+camera_name; + + image_pub.publish(img_msg); + fps_statistic.addCount(); +} + void printFps(const ros::TimerEvent&) { std::cout << logprefix() << "Average FPS: " << (fps_statistic.getCount()/FPS_WINDOW) << std::endl; @@ -93,6 +130,7 @@ int main(int argc, char ** argv) nh.param("camera_name", camera_name, ""); nh.param("framerate", framerate, 0.0); nh.param("host_ip", host_ip, "localhost"); + nh.param("depthcamera", depthcamera, false); if(camera_name == "") { std::cout << logprefix() << "camera_name unset." << std::endl; @@ -123,7 +161,7 @@ int main(int argc, char ** argv) image_pub = nh.advertise("/fsds/camera/" + camera_name, 1); // start the loop - ros::Timer imageTimer = nh.createTimer(ros::Duration(1/framerate), doImageUpdate); + ros::Timer imageTimer = nh.createTimer(ros::Duration(1/framerate), depthcamera ? doDepthImageUpdate : doImageUpdate); ros::Timer fpsTimer = nh.createTimer(ros::Duration(FPS_WINDOW), printFps); ros::spin(); return 0; diff --git a/settings.json b/settings.json index 480a6c25..3c76ffb3 100644 --- a/settings.json +++ b/settings.json @@ -69,23 +69,23 @@ ], "X": 1.0, "Y": 0.06, - "Z": -2.20, + "Z": -1.20, "Pitch": 0.0, "Roll": 0.0, - "Yaw": 180 + "Yaw": 0 }, "cam2": { "CaptureSettings": [ { - "ImageType": 0, + "ImageType": 2, "Width": 785, "Height": 785, "FOV_Degrees": 90 } ], "X": 1.0, - "Y": 0.06, - "Z": -2.20, + "Y": 0.56, + "Z": -1.20, "Pitch": 0.0, "Roll": 0.0, "Yaw": 0.0