diff --git a/docs/camera.md b/docs/camera.md new file mode 100644 index 00000000..d14ca8ad --- /dev/null +++ b/docs/camera.md @@ -0,0 +1,36 @@ +# Cameras + +To add a camera to your vehicle, add the following json to the "Cameras" map in your `settings.json`: + +``` +"Camera1": { + "CaptureSettings": [{ + "ImageType": 0, + "Width": 785, + "Height": 785, + "FOV_Degrees": 90 + }], + "X": 1.0, + "Y": 0.06, + "Z": -2.20, + "Pitch": 0.0, + "Roll": 0.0, + "Yaw": 180 +} +``` + +`Camera1` is the name of the camera. This name will be used in ros topics and in coordinate frame. + +`X`, `Y` and `Z` are the position of the lidar relative the [vehicle pawn center](vehicle_model.md) of the car in NED frame. + +`Roll`,`Pitch` and `Yaw` are rotations in degrees. + +`ImageType` describes the type of camera. +At this moment only rgb and depth cameras are supported. +For rgb camera, set this value to 0 and for depth camera set the value to 2. + +* Depth Cameras (aka DepthPerspective) act as follows: each pixel is given a float value in meters corresponding to the smallest distance from the camera to that point. Images published in ros are encoded in `32FC1` +* RGB images are just your normal video camera. Images published in ros are encoded using `bgr8` + +`FOV_Degrees` describes [how much the camera sees](https://en.wikipedia.org/wiki/Field_of_view). +The vertical FoV will be automatically calculated using the following formula: `vertical FoV = image height / image width * horizontal FoV`. diff --git a/docs/integration-handbook.md b/docs/integration-handbook.md index cfbca3ac..c3591d99 100644 --- a/docs/integration-handbook.md +++ b/docs/integration-handbook.md @@ -80,22 +80,22 @@ To ensure the simulation will perform as expected, the sensor suite has some res Here you can read the requirements and restrictions that apply to every sensor. ### Camera -**A this moment camera's are a bit broken. You can use the camera's but the framerate and topic names might change. See #43 and #85.** +**Be warned, camera framerate cannot be guaranteed, see #43.** + +Only rgb camera's are allowed during competition ([see camera docs](camera.md)). +Depth cameras are not allowed during FSOnline 2020. Every vehicle can have a maximum of 2 camera sensors. These camera(s) can be placed anywhere on the vehicle that would be allowed by FSG 2020 rules. The camera body dimensions are a 4x4x4 cm cube with mounting points at any side except the front-facing side. -All camera sensors output uncompressed RGBA8 images at 30 FPS. +All camera sensors output images at around 20 FPS. You can choose the resolution of the camera(s). -In total, the camera’s can have 1232450 pixels. +In total, the camera’s can have 1232450 pixels. Every dimension (width or height) must be at least 240px and no greater than 1600px. The horizontal field of view (FoV) is configurable for each camera and must be at least 30 degrees and not be greater than 90 degrees. -The vertical FoV will be automatically calculated using the following formula: `vertical FoV = image height / image width * horizontal FoV`. - The camera's auto exposure, motion blur and gamma settings will be equal for all teams. - ### Lidar A vehicle can have between 0 and 5 lidars. The lidar(s) can be placed anywhere on the vehicle that would be allowed by FSG 2020 rules. @@ -112,6 +112,7 @@ For each lidar, during a single rotation, the number of collected points for 1 l The number of collected points per lidar per laser per second cannot exceed 20480. To ensure the simulation keeps running smoothly: + * Every lidar is limited to collect 10000 points per scan. * The total number of points collected per second can be no larger than 100000 diff --git a/docs/lidar.md b/docs/lidar.md index 42866e0f..be7eba4d 100644 --- a/docs/lidar.md +++ b/docs/lidar.md @@ -22,7 +22,7 @@ This is an example lidar: `Lidar1` is the name of the lidar. This value will be used in the ros topic name and coordinate frame. -`X`, `Y` and `Z` are the position of the lidar relative the the center of the car in NED frame. +`X`, `Y` and `Z` are the position of the lidar relative the [vehicle pawn center](vehicle_model.md) of the car in NED frame. `Roll`,`Pitch` and `Yaw` are rotations in degrees. diff --git a/mkdocs.yml b/mkdocs.yml index d2550c35..c8a9ba76 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -16,6 +16,7 @@ nav: - "Vehicle model": "vehicle_model.md" - "IMU": "imu.md" - "Lidar": "lidar.md" + - "Camera": "camera.md" - "GPS": "gps.md" - "GSS": "gss.md" - "#tech madness": diff --git a/ros/src/fsds_ros_bridge/scripts/cameralauncher.py b/ros/src/fsds_ros_bridge/scripts/cameralauncher.py index 8dbf27df..7a4ac051 100755 --- a/ros/src/fsds_ros_bridge/scripts/cameralauncher.py +++ b/ros/src/fsds_ros_bridge/scripts/cameralauncher.py @@ -41,8 +41,9 @@ def signal_handler(sig, frame): required=True, output='screen', args=args({ 'camera_name': cameraname, + 'depthcamera': camsettings["CaptureSettings"][0]["ImageType"] == 2, 'framerate': CAMERA_FRAMERATE, - 'host_ip': AIRSIM_HOSTIP + 'host_ip': AIRSIM_HOSTIP, }))) # launch.launch( # roslaunch.core.Node( diff --git a/ros/src/fsds_ros_bridge/src/fsds_ros_bridge_camera.cpp b/ros/src/fsds_ros_bridge/src/fsds_ros_bridge_camera.cpp index 994c89ed..6d5a7529 100644 --- a/ros/src/fsds_ros_bridge/src/fsds_ros_bridge_camera.cpp +++ b/ros/src/fsds_ros_bridge/src/fsds_ros_bridge_camera.cpp @@ -8,7 +8,8 @@ #include "vehicles/car/api/CarRpcLibClient.hpp" #include "statistics.h" #include "rpc/rpc_error.h" - +#include +#include @@ -27,6 +28,7 @@ ros_bridge::Statistics fps_statistic; std::string camera_name = ""; double framerate = 0.0; std::string host_ip = "localhost"; +bool depthcamera = false; ros::Time make_ts(uint64_t unreal_ts) { @@ -41,21 +43,26 @@ std::string logprefix() { return "[cambridge " + camera_name + "] "; } -void doImageUpdate(const ros::TimerEvent&) -{ - // We are using simGetImages instead of simGetImage because the latter does not return image dimention information. - std::vector req; - req.push_back(ImageRequest(camera_name, ImageType::Scene, false, false)); +std::vector getImage(ImageRequest req) { + // We are using simGetImages instead of simGetImage because the latter does not return image dimention information. + std::vector reqs; + reqs.push_back(req); std::vector img_responses; try { - img_responses = airsim_api->simGetImages(req, "FSCar"); + img_responses = airsim_api->simGetImages(reqs, "FSCar"); } catch (rpc::rpc_error& e) { std::cout << "error" << std::endl; std::string msg = e.get_error().as(); std::cout << "Exception raised by the API while getting image:" << std::endl << msg << std::endl; } + return img_responses; +} + +void doImageUpdate(const ros::TimerEvent&) +{ + auto img_responses = getImage(ImageRequest(camera_name, ImageType::Scene, false, false)); // if a render request failed for whatever reason, this img will be empty. if (img_responses.size() == 0 || img_responses[0].time_stamp == 0) @@ -78,6 +85,60 @@ void doImageUpdate(const ros::TimerEvent&) fps_statistic.addCount(); } +cv::Mat manual_decode_depth(const ImageResponse& img_response) +{ + cv::Mat mat(img_response.height, img_response.width, CV_32FC1, cv::Scalar(0)); + int img_width = img_response.width; + + for (int row = 0; row < img_response.height; row++) + for (int col = 0; col < img_width; col++) + mat.at(row, col) = img_response.image_data_float[row * img_width + col]; + return mat; +} + +float roundUp(float numToRound, float multiple) +{ + assert(multiple); + return ((numToRound + multiple - 1) / multiple) * multiple; +} + +cv::Mat noisify_depthimage(cv::Mat in) +{ + cv::Mat out = in.clone(); + + // Blur + cv::Mat kernel = cv::Mat::ones( 7, 7, CV_32F ) / (float)(7 * 7); + cv::filter2D(in, out, -1 , kernel, cv::Point( -1, -1 ), 0, cv::BORDER_DEFAULT ); + + // Decrease depth resolution + for (int row = 0; row < in.rows; row++) { + for (int col = 0; col < in.cols; col++) { + float roundtarget = ceil(std::min(std::max(out.at(row, col), 1.0f), 10.0f)); + out.at(row, col) = roundUp(out.at(row, col), roundtarget); + } + } + + return out; +} + +void doDepthImageUpdate(const ros::TimerEvent&) { + auto img_responses = getImage(ImageRequest(camera_name, ImageType::DepthPerspective, true, false)); + + // if a render request failed for whatever reason, this img will be empty. + if (img_responses.size() == 0 || img_responses[0].time_stamp == 0) + return; + + ImageResponse img_response = img_responses[0]; + + cv::Mat depth_img = noisify_depthimage(manual_decode_depth(img_response)); + sensor_msgs::ImagePtr img_msg = cv_bridge::CvImage(std_msgs::Header(), "32FC1", depth_img).toImageMsg(); + img_msg->header.stamp = make_ts(img_response.time_stamp); + img_msg->header.frame_id = "/fsds/camera/"+camera_name; + + image_pub.publish(img_msg); + fps_statistic.addCount(); +} + void printFps(const ros::TimerEvent&) { std::cout << logprefix() << "Average FPS: " << (fps_statistic.getCount()/FPS_WINDOW) << std::endl; @@ -93,6 +154,7 @@ int main(int argc, char ** argv) nh.param("camera_name", camera_name, ""); nh.param("framerate", framerate, 0.0); nh.param("host_ip", host_ip, "localhost"); + nh.param("depthcamera", depthcamera, false); if(camera_name == "") { std::cout << logprefix() << "camera_name unset." << std::endl; @@ -123,7 +185,7 @@ int main(int argc, char ** argv) image_pub = nh.advertise("/fsds/camera/" + camera_name, 1); // start the loop - ros::Timer imageTimer = nh.createTimer(ros::Duration(1/framerate), doImageUpdate); + ros::Timer imageTimer = nh.createTimer(ros::Duration(1/framerate), depthcamera ? doDepthImageUpdate : doImageUpdate); ros::Timer fpsTimer = nh.createTimer(ros::Duration(FPS_WINDOW), printFps); ros::spin(); return 0; diff --git a/settings.json b/settings.json index 480a6c25..db40a580 100644 --- a/settings.json +++ b/settings.json @@ -69,10 +69,10 @@ ], "X": 1.0, "Y": 0.06, - "Z": -2.20, + "Z": -1.20, "Pitch": 0.0, "Roll": 0.0, - "Yaw": 180 + "Yaw": 0 }, "cam2": { "CaptureSettings": [ @@ -84,8 +84,8 @@ } ], "X": 1.0, - "Y": 0.06, - "Z": -2.20, + "Y": 0.56, + "Z": -1.20, "Pitch": 0.0, "Roll": 0.0, "Yaw": 0.0