From f6d6b84e3a8ea9a45fb4ac5c5ab845088561ae6f Mon Sep 17 00:00:00 2001 From: sergiopaniego Date: Tue, 28 Nov 2023 19:07:19 +0100 Subject: [PATCH 01/11] [WIP] Replicate TCP paper as brain --- behavior_metrics/brains/CARLA/TCP/config.py | 64 + .../brains/CARLA/TCP/leaderboard/CHANGELOG.md | 4 + .../brains/CARLA/TCP/leaderboard/LICENSE | 21 + .../brains/CARLA/TCP/leaderboard/README.md | 16 + .../TCP_training_routes/routes_town01.xml | 1803 +++++++++++++++++ .../routes_town01_addition.xml | 603 ++++++ .../TCP_training_routes/routes_town01_val.xml | 303 +++ .../TCP_training_routes/routes_town02.xml | 1803 +++++++++++++++++ .../TCP_training_routes/routes_town02_val.xml | 129 ++ .../TCP_training_routes/routes_town03.xml | 1803 +++++++++++++++++ .../routes_town03_addition.xml | 603 ++++++ .../TCP_training_routes/routes_town03_val.xml | 303 +++ .../TCP_training_routes/routes_town04.xml | 1803 +++++++++++++++++ .../routes_town04_addition.xml | 723 +++++++ .../TCP_training_routes/routes_town04_val.xml | 363 ++++ .../TCP_training_routes/routes_town05.xml | 1803 +++++++++++++++++ .../routes_town05_addition.xml | 483 +++++ .../TCP_training_routes/routes_town05_val.xml | 303 +++ .../TCP_training_routes/routes_town06.xml | 1803 +++++++++++++++++ .../TCP_training_routes/routes_town06_val.xml | 303 +++ .../TCP_training_routes/routes_town07.xml | 1803 +++++++++++++++++ .../TCP_training_routes/routes_town07_val.xml | 303 +++ .../TCP_training_routes/routes_town10.xml | 1803 +++++++++++++++++ .../routes_town10_addition.xml | 453 +++++ .../TCP_training_routes/routes_town10_val.xml | 303 +++ .../additional_routes/routes_town01_long.xml | 175 ++ .../additional_routes/routes_town02_long.xml | 202 ++ .../additional_routes/routes_town03_long.xml | 734 +++++++ .../additional_routes/routes_town04_long.xml | 1016 ++++++++++ .../additional_routes/routes_town06_long.xml | 371 ++++ .../evaluation_routes/routes_lav_valid.xml | 67 + .../evaluation_routes/routes_town05_long.xml | 353 ++++ .../training_routes/routes_town01_short.xml | 95 + .../training_routes/routes_town01_tiny.xml | 1171 +++++++++++ .../training_routes/routes_town02_short.xml | 63 + .../training_routes/routes_town02_tiny.xml | 519 +++++ .../training_routes/routes_town03_short.xml | 91 + .../training_routes/routes_town03_tiny.xml | 1771 ++++++++++++++++ .../routes_town03_tiny_180.xml | 719 +++++++ .../training_routes/routes_town04_short.xml | 99 + .../training_routes/routes_town04_tiny.xml | 1435 +++++++++++++ .../routes_town04_tiny_260.xml | 395 ++++ .../training_routes/routes_town06_short.xml | 75 + .../training_routes/routes_town06_tiny.xml | 1179 +++++++++++ .../training_routes/routes_town07_short.xml | 59 + .../training_routes/routes_town07_tiny.xml | 571 ++++++ .../training_routes/routes_town10_short.xml | 39 + .../training_routes/routes_town10_tiny.xml | 471 +++++ .../validation_routes/routes_town05_short.xml | 131 ++ .../validation_routes/routes_town05_tiny.xml | 1355 +++++++++++++ .../TCP/leaderboard/leaderboard/__init__.py | 0 .../leaderboard/autoagents/__init__.py | 0 .../leaderboard/autoagents/agent_wrapper.py | 280 +++ .../autoagents/autonomous_agent.py | 126 ++ .../leaderboard/autoagents/dummy_agent.py | 90 + .../leaderboard/autoagents/human_agent.py | 266 +++ .../autoagents/human_agent_config.txt | 9 + .../leaderboard/autoagents/npc_agent.py | 106 + .../leaderboard/autoagents/ros_agent.py | 451 +++++ .../leaderboard/leaderboard/envs/__init__.py | 0 .../leaderboard/envs/sensor_interface.py | 250 +++ .../leaderboard/leaderboard_evaluator.py | 529 +++++ .../leaderboard/scenarios/__init__.py | 0 .../scenarios/background_activity.py | 100 + .../leaderboard/scenarios/master_scenario.py | 120 ++ .../leaderboard/scenarios/route_scenario.py | 599 ++++++ .../leaderboard/scenarios/scenario_manager.py | 226 +++ .../scenarios/scenarioatomics/__init__.py | 0 .../scenarioatomics/atomic_criteria.py | 95 + .../leaderboard/leaderboard/utils/__init__.py | 0 .../leaderboard/utils/checkpoint_tools.py | 77 + .../leaderboard/utils/result_writer.py | 116 ++ .../leaderboard/utils/route_indexer.py | 72 + .../leaderboard/utils/route_manipulation.py | 161 ++ .../leaderboard/utils/route_parser.py | 357 ++++ .../leaderboard/utils/statistics_manager.py | 343 ++++ .../CARLA/TCP/leaderboard/requirements.txt | 5 + .../CARLA/TCP/leaderboard/scenario_runner | 1 + .../TCP/leaderboard/scripts/Dockerfile.master | 96 + .../scripts/code_check_and_formatting.sh | 11 + .../leaderboard/scripts/data_collection.sh | 45 + .../TCP/leaderboard/scripts/make_docker.sh | 35 + .../leaderboard/scripts/pretty_print_json.py | 111 + .../TCP/leaderboard/scripts/run_evaluation.sh | 43 + .../leaderboard/scripts/set_new_scenarios.py | 218 ++ .../TCP/leaderboard/team_code/auto_pilot.py | 379 ++++ .../TCP/leaderboard/team_code/base_agent.py | 937 +++++++++ .../TCP/leaderboard/team_code/map_agent.py | 179 ++ .../leaderboard/team_code/pid_controller.py | 28 + .../TCP/leaderboard/team_code/planner.py | 113 ++ .../leaderboard/team_code/roach_ap_agent.py | 636 ++++++ .../TCP/leaderboard/team_code/tcp_agent.py | 272 +++ behavior_metrics/brains/CARLA/TCP/model.py | 344 ++++ behavior_metrics/brains/CARLA/TCP/resnet.py | 389 ++++ .../brains/CARLA/brain_carla_TCP.py | 293 +++ ...02_anticlockwise_imitation_learning.launch | 4 +- .../configs/CARLA/default_carla_torch.yml | 9 +- behavior_metrics/robot/interfaces/imu.py | 87 + behavior_metrics/robot/sensors.py | 22 + 99 files changed, 41486 insertions(+), 4 deletions(-) create mode 100644 behavior_metrics/brains/CARLA/TCP/config.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/CHANGELOG.md create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/LICENSE create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/README.md create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town01.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town01_addition.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town01_val.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town02.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town02_val.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town03.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town03_addition.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town03_val.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town04.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town04_addition.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town04_val.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town05.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town05_addition.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town05_val.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town06.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town06_val.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town07.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town07_val.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town10.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town10_addition.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town10_val.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/additional_routes/routes_town01_long.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/additional_routes/routes_town02_long.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/additional_routes/routes_town03_long.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/additional_routes/routes_town04_long.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/additional_routes/routes_town06_long.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/evaluation_routes/routes_lav_valid.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/evaluation_routes/routes_town05_long.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town01_short.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town01_tiny.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town02_short.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town02_tiny.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town03_short.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town03_tiny.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town03_tiny_180.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town04_short.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town04_tiny.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town04_tiny_260.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town06_short.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town06_tiny.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town07_short.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town07_tiny.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town10_short.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town10_tiny.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/validation_routes/routes_town05_short.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/data/validation_routes/routes_town05_tiny.xml create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/__init__.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/autoagents/__init__.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/autoagents/agent_wrapper.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/autoagents/autonomous_agent.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/autoagents/dummy_agent.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/autoagents/human_agent.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/autoagents/human_agent_config.txt create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/autoagents/npc_agent.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/autoagents/ros_agent.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/envs/__init__.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/envs/sensor_interface.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/leaderboard_evaluator.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/__init__.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/background_activity.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/master_scenario.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/route_scenario.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/scenario_manager.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/scenarioatomics/__init__.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/scenarioatomics/atomic_criteria.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/utils/__init__.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/utils/checkpoint_tools.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/utils/result_writer.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/utils/route_indexer.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/utils/route_manipulation.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/utils/route_parser.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/utils/statistics_manager.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/requirements.txt create mode 160000 behavior_metrics/brains/CARLA/TCP/leaderboard/scenario_runner create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/scripts/Dockerfile.master create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/scripts/code_check_and_formatting.sh create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/scripts/data_collection.sh create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/scripts/make_docker.sh create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/scripts/pretty_print_json.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/scripts/run_evaluation.sh create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/scripts/set_new_scenarios.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/auto_pilot.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/base_agent.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/map_agent.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/pid_controller.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/planner.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/roach_ap_agent.py create mode 100644 behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/tcp_agent.py create mode 100644 behavior_metrics/brains/CARLA/TCP/model.py create mode 100644 behavior_metrics/brains/CARLA/TCP/resnet.py create mode 100644 behavior_metrics/brains/CARLA/brain_carla_TCP.py create mode 100644 behavior_metrics/robot/interfaces/imu.py diff --git a/behavior_metrics/brains/CARLA/TCP/config.py b/behavior_metrics/brains/CARLA/TCP/config.py new file mode 100644 index 00000000..6c2d6b43 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/config.py @@ -0,0 +1,64 @@ +import os + +class GlobalConfig: + """ base architecture configurations """ + # Data + seq_len = 1 # input timesteps + pred_len = 4 # future waypoints predicted + + # data root + root_dir_all = "tcp_carla_data" + + train_towns = ['town01', 'town03', 'town04', 'town06', ] + val_towns = ['town02', 'town05', 'town07', 'town10'] + train_data, val_data = [], [] + for town in train_towns: + train_data.append(os.path.join(root_dir_all, town)) + train_data.append(os.path.join(root_dir_all, town+'_addition')) + for town in val_towns: + val_data.append(os.path.join(root_dir_all, town+'_val')) + + ignore_sides = True # don't consider side cameras + ignore_rear = True # don't consider rear cameras + + input_resolution = 256 + + scale = 1 # image pre-processing + crop = 256 # image pre-processing + + lr = 1e-4 # learning rate + + # Controller + turn_KP = 0.75 + turn_KI = 0.75 + turn_KD = 0.3 + turn_n = 40 # buffer size + + speed_KP = 5.0 + speed_KI = 0.5 + speed_KD = 1.0 + speed_n = 40 # buffer size + + max_throttle = 0.75 # upper limit on throttle signal value in dataset + brake_speed = 0.4 # desired speed below which brake is triggered + brake_ratio = 1.1 # ratio of speed to desired speed at which brake is triggered + clip_delta = 0.25 # maximum change in speed input to logitudinal controller + + + aim_dist = 4.0 # distance to search around for aim point + angle_thresh = 0.3 # outlier control detection angle + dist_thresh = 10 # target point y-distance for outlier filtering + + + speed_weight = 0.05 + value_weight = 0.001 + features_weight = 0.05 + + rl_ckpt = "roach/log/ckpt_11833344.pth" + + img_aug = True + + + def __init__(self, **kwargs): + for k,v in kwargs.items(): + setattr(self, k, v) diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/CHANGELOG.md b/behavior_metrics/brains/CARLA/TCP/leaderboard/CHANGELOG.md new file mode 100644 index 00000000..5e31ad62 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/CHANGELOG.md @@ -0,0 +1,4 @@ +## Latest changes + +* Creating stable version for the CARLA online leaderboard +* Initial creation of the repository diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/LICENSE b/behavior_metrics/brains/CARLA/TCP/leaderboard/LICENSE new file mode 100644 index 00000000..a583c079 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 CARLA + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/README.md b/behavior_metrics/brains/CARLA/TCP/leaderboard/README.md new file mode 100644 index 00000000..03b6b1eb --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/README.md @@ -0,0 +1,16 @@ +The main goal of the CARLA Autonomous Driving Leaderboard is to evaluate the driving proficiency of autonomous agents in realistic traffic situations. The leaderboard serves as an open platform for the community to perform fair and reproducible evaluations, simplifying the comparison between different approaches. + +Autonomous agents have to drive through a set of predefined routes. For each route, agents are initialized at a starting point and have to drive to a destination point. The agents will be provided with a description of the route. Routes will happen in a variety of areas, including freeways, urban scenes, and residential districts. + +Agents will face multiple traffic situations based in the NHTSA typology, such as: + +* Lane merging +* Lane changing +* Negotiations at traffic intersections +* Negotiations at roundabouts +* Handling traffic lights and traffic signs +* Coping with pedestrians, cyclists and other elements + +The user can change the weather of the simulation, allowing the evaluation of the agent in a variety of weather conditions, including daylight scenes, sunset, rain, fog, and night, among others. + +More information can be found [here](https://leaderboard.carla.org/) \ No newline at end of file diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town01.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town01.xml new file mode 100644 index 00000000..e43c3716 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town01.xml @@ -0,0 +1,1803 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town01_addition.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town01_addition.xml new file mode 100644 index 00000000..ef0e5bdf --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town01_addition.xml @@ -0,0 +1,603 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town01_val.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town01_val.xml new file mode 100644 index 00000000..294c6e18 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town01_val.xml @@ -0,0 +1,303 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town02.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town02.xml new file mode 100644 index 00000000..9ca6bd0d --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town02.xml @@ -0,0 +1,1803 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town02_val.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town02_val.xml new file mode 100644 index 00000000..ffb32d7c --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town02_val.xml @@ -0,0 +1,129 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town03.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town03.xml new file mode 100644 index 00000000..27517af9 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town03.xml @@ -0,0 +1,1803 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town03_addition.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town03_addition.xml new file mode 100644 index 00000000..9c8c70b8 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town03_addition.xml @@ -0,0 +1,603 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town03_val.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town03_val.xml new file mode 100644 index 00000000..bfa366ba --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town03_val.xml @@ -0,0 +1,303 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town04.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town04.xml new file mode 100644 index 00000000..5fc1bf68 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town04.xml @@ -0,0 +1,1803 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town04_addition.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town04_addition.xml new file mode 100644 index 00000000..01a5b114 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town04_addition.xml @@ -0,0 +1,723 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town04_val.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town04_val.xml new file mode 100644 index 00000000..b36eaae5 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town04_val.xml @@ -0,0 +1,363 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town05.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town05.xml new file mode 100644 index 00000000..a48fb264 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town05.xml @@ -0,0 +1,1803 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town05_addition.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town05_addition.xml new file mode 100644 index 00000000..2e0a41e4 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town05_addition.xml @@ -0,0 +1,483 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town05_val.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town05_val.xml new file mode 100644 index 00000000..4f43bc90 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town05_val.xml @@ -0,0 +1,303 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town06.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town06.xml new file mode 100644 index 00000000..1e2ce220 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town06.xml @@ -0,0 +1,1803 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town06_val.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town06_val.xml new file mode 100644 index 00000000..f6579a06 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town06_val.xml @@ -0,0 +1,303 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town07.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town07.xml new file mode 100644 index 00000000..bd7fbcbe --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town07.xml @@ -0,0 +1,1803 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town07_val.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town07_val.xml new file mode 100644 index 00000000..559ea17c --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town07_val.xml @@ -0,0 +1,303 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town10.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town10.xml new file mode 100644 index 00000000..29cc6ef0 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town10.xml @@ -0,0 +1,1803 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town10_addition.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town10_addition.xml new file mode 100644 index 00000000..695ae2d9 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town10_addition.xml @@ -0,0 +1,453 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town10_val.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town10_val.xml new file mode 100644 index 00000000..41ad993a --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town10_val.xml @@ -0,0 +1,303 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/additional_routes/routes_town01_long.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/additional_routes/routes_town01_long.xml new file mode 100644 index 00000000..dfcdaf2c --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/additional_routes/routes_town01_long.xml @@ -0,0 +1,175 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/additional_routes/routes_town02_long.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/additional_routes/routes_town02_long.xml new file mode 100644 index 00000000..b0def188 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/additional_routes/routes_town02_long.xml @@ -0,0 +1,202 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/additional_routes/routes_town03_long.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/additional_routes/routes_town03_long.xml new file mode 100644 index 00000000..750f79f1 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/additional_routes/routes_town03_long.xml @@ -0,0 +1,734 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/additional_routes/routes_town04_long.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/additional_routes/routes_town04_long.xml new file mode 100644 index 00000000..4225ce5e --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/additional_routes/routes_town04_long.xml @@ -0,0 +1,1016 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/additional_routes/routes_town06_long.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/additional_routes/routes_town06_long.xml new file mode 100644 index 00000000..934989a7 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/additional_routes/routes_town06_long.xml @@ -0,0 +1,371 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/evaluation_routes/routes_lav_valid.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/evaluation_routes/routes_lav_valid.xml new file mode 100644 index 00000000..85f83d61 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/evaluation_routes/routes_lav_valid.xml @@ -0,0 +1,67 @@ + + + + + \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n + + + + \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n + + + + \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n + + + + \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n + + + + \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n + + + + \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n + + + + \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n + + + + \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n + + + + \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n + + + + \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n + + + + \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n + + + + \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n + + + + \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n + + + + \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n + + + + \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n + + + + \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/evaluation_routes/routes_town05_long.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/evaluation_routes/routes_town05_long.xml new file mode 100644 index 00000000..50ba8479 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/evaluation_routes/routes_town05_long.xml @@ -0,0 +1,353 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town01_short.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town01_short.xml new file mode 100644 index 00000000..b7719787 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town01_short.xml @@ -0,0 +1,95 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town01_tiny.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town01_tiny.xml new file mode 100644 index 00000000..04db1968 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town01_tiny.xml @@ -0,0 +1,1171 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town02_short.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town02_short.xml new file mode 100644 index 00000000..3ee0a84e --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town02_short.xml @@ -0,0 +1,63 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town02_tiny.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town02_tiny.xml new file mode 100644 index 00000000..a2d735e1 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town02_tiny.xml @@ -0,0 +1,519 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town03_short.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town03_short.xml new file mode 100644 index 00000000..b125e4ce --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town03_short.xml @@ -0,0 +1,91 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town03_tiny.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town03_tiny.xml new file mode 100644 index 00000000..84735c37 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town03_tiny.xml @@ -0,0 +1,1771 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town03_tiny_180.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town03_tiny_180.xml new file mode 100644 index 00000000..14f37971 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town03_tiny_180.xml @@ -0,0 +1,719 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town04_short.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town04_short.xml new file mode 100644 index 00000000..cbbb5153 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town04_short.xml @@ -0,0 +1,99 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town04_tiny.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town04_tiny.xml new file mode 100644 index 00000000..cad6a317 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town04_tiny.xml @@ -0,0 +1,1435 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town04_tiny_260.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town04_tiny_260.xml new file mode 100644 index 00000000..43d1dee0 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town04_tiny_260.xml @@ -0,0 +1,395 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town06_short.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town06_short.xml new file mode 100644 index 00000000..f6e460cf --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town06_short.xml @@ -0,0 +1,75 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town06_tiny.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town06_tiny.xml new file mode 100644 index 00000000..7dd232d8 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town06_tiny.xml @@ -0,0 +1,1179 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town07_short.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town07_short.xml new file mode 100644 index 00000000..38212f91 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town07_short.xml @@ -0,0 +1,59 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town07_tiny.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town07_tiny.xml new file mode 100644 index 00000000..41f77436 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town07_tiny.xml @@ -0,0 +1,571 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town10_short.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town10_short.xml new file mode 100644 index 00000000..162337c0 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town10_short.xml @@ -0,0 +1,39 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town10_tiny.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town10_tiny.xml new file mode 100644 index 00000000..40158296 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/training_routes/routes_town10_tiny.xml @@ -0,0 +1,471 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/validation_routes/routes_town05_short.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/validation_routes/routes_town05_short.xml new file mode 100644 index 00000000..e51d0166 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/validation_routes/routes_town05_short.xml @@ -0,0 +1,131 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/data/validation_routes/routes_town05_tiny.xml b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/validation_routes/routes_town05_tiny.xml new file mode 100644 index 00000000..bc2e2334 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/data/validation_routes/routes_town05_tiny.xml @@ -0,0 +1,1355 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/__init__.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/autoagents/__init__.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/autoagents/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/autoagents/agent_wrapper.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/autoagents/agent_wrapper.py new file mode 100644 index 00000000..6247852a --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/autoagents/agent_wrapper.py @@ -0,0 +1,280 @@ +#!/usr/bin/env python + +# Copyright (c) 2019 Intel Corporation +# +# This work is licensed under the terms of the MIT license. +# For a copy, see . + +""" +Wrapper for autonomous agents required for tracking and checking of used sensors +""" + +from __future__ import print_function +import math +import os +import time + +import carla +from srunner.scenariomanager.carla_data_provider import CarlaDataProvider + +from leaderboard.envs.sensor_interface import CallBack, OpenDriveMapReader, SpeedometerReader, SensorConfigurationInvalid +from leaderboard.autoagents.autonomous_agent import Track + +MAX_ALLOWED_RADIUS_SENSOR = 3.0 +MAX_ALLOWED_RADIUS_SENSOR = 1000.0 # increased for topdown map generation + +SENSORS_LIMITS = { + 'sensor.camera.rgb': 10, + 'sensor.camera.semantic_segmentation': 10, + 'sensor.camera.depth': 10, + 'sensor.lidar.ray_cast': 1, + 'sensor.lidar.ray_cast_semantic': 1, + 'sensor.other.radar': 2, + 'sensor.other.gnss': 1, + 'sensor.other.imu': 1, + 'sensor.opendrive_map': 1, + 'sensor.speedometer': 1 +} + + +class AgentError(Exception): + """ + Exceptions thrown when the agent returns an error during the simulation + """ + + def __init__(self, message): + super(AgentError, self).__init__(message) + + +class AgentWrapper(object): + + """ + Wrapper for autonomous agents required for tracking and checking of used sensors + """ + + allowed_sensors = [ + 'sensor.opendrive_map', + 'sensor.speedometer', + 'sensor.camera.rgb', + 'sensor.camera.semantic_segmentation', + 'sensor.camera.depth', + 'sensor.camera', + 'sensor.lidar.ray_cast', + 'sensor.lidar.ray_cast_semantic', + 'sensor.other.radar', + 'sensor.other.gnss', + 'sensor.other.imu' + ] + + _agent = None + _sensors_list = [] + + def __init__(self, agent): + """ + Set the autonomous agent + """ + self._agent = agent + + def __call__(self): + """ + Pass the call directly to the agent + """ + return self._agent() + + def setup_sensors(self, vehicle, debug_mode=False): + """ + Create the sensors defined by the user and attach them to the ego-vehicle + :param vehicle: ego vehicle + :return: + """ + bp_library = CarlaDataProvider.get_world().get_blueprint_library() + for sensor_spec in self._agent.sensors(): + # These are the pseudosensors (not spawned) + if sensor_spec['type'].startswith('sensor.opendrive_map'): + # The HDMap pseudo sensor is created directly here + sensor = OpenDriveMapReader(vehicle, sensor_spec['reading_frequency']) + elif sensor_spec['type'].startswith('sensor.speedometer'): + delta_time = CarlaDataProvider.get_world().get_settings().fixed_delta_seconds + frame_rate = 1 / delta_time + sensor = SpeedometerReader(vehicle, frame_rate) + # These are the sensors spawned on the carla world + else: + bp = bp_library.find(str(sensor_spec['type'])) + if sensor_spec['type'].startswith('sensor.camera.semantic_segmentation'): + bp.set_attribute('image_size_x', str(sensor_spec['width'])) + bp.set_attribute('image_size_y', str(sensor_spec['height'])) + bp.set_attribute('fov', str(sensor_spec['fov'])) + bp.set_attribute('lens_circle_multiplier', str(3.0)) + bp.set_attribute('lens_circle_falloff', str(3.0)) + + sensor_location = carla.Location(x=sensor_spec['x'], y=sensor_spec['y'], + z=sensor_spec['z']) + sensor_rotation = carla.Rotation(pitch=sensor_spec['pitch'], + roll=sensor_spec['roll'], + yaw=sensor_spec['yaw']) + elif sensor_spec['type'].startswith('sensor.camera.depth'): + bp.set_attribute('image_size_x', str(sensor_spec['width'])) + bp.set_attribute('image_size_y', str(sensor_spec['height'])) + bp.set_attribute('fov', str(sensor_spec['fov'])) + bp.set_attribute('lens_circle_multiplier', str(3.0)) + bp.set_attribute('lens_circle_falloff', str(3.0)) + + sensor_location = carla.Location(x=sensor_spec['x'], y=sensor_spec['y'], + z=sensor_spec['z']) + sensor_rotation = carla.Rotation(pitch=sensor_spec['pitch'], + roll=sensor_spec['roll'], + yaw=sensor_spec['yaw']) + elif sensor_spec['type'].startswith('sensor.camera'): + bp.set_attribute('image_size_x', str(sensor_spec['width'])) + bp.set_attribute('image_size_y', str(sensor_spec['height'])) + bp.set_attribute('fov', str(sensor_spec['fov'])) + bp.set_attribute('lens_circle_multiplier', str(3.0)) + bp.set_attribute('lens_circle_falloff', str(3.0)) + bp.set_attribute('chromatic_aberration_intensity', str(0.5)) + bp.set_attribute('chromatic_aberration_offset', str(0)) + + sensor_location = carla.Location(x=sensor_spec['x'], y=sensor_spec['y'], + z=sensor_spec['z']) + sensor_rotation = carla.Rotation(pitch=sensor_spec['pitch'], + roll=sensor_spec['roll'], + yaw=sensor_spec['yaw']) + elif sensor_spec['type'].startswith('sensor.lidar.ray_cast_semantic'): + bp.set_attribute('range', str(85)) + bp.set_attribute('rotation_frequency', str(10)) # default: 10, change to 20 for old lidar models + bp.set_attribute('channels', str(64)) + bp.set_attribute('upper_fov', str(10)) + bp.set_attribute('lower_fov', str(-30)) + bp.set_attribute('points_per_second', str(600000)) + sensor_location = carla.Location(x=sensor_spec['x'], y=sensor_spec['y'], + z=sensor_spec['z']) + sensor_rotation = carla.Rotation(pitch=sensor_spec['pitch'], + roll=sensor_spec['roll'], + yaw=sensor_spec['yaw']) + elif sensor_spec['type'].startswith('sensor.lidar'): + bp.set_attribute('range', str(85)) + bp.set_attribute('rotation_frequency', str(10)) # default: 10, change to 20 to generate 360 degree LiDAR point cloud + bp.set_attribute('channels', str(64)) + bp.set_attribute('upper_fov', str(10)) + bp.set_attribute('lower_fov', str(-30)) + bp.set_attribute('points_per_second', str(600000)) + bp.set_attribute('atmosphere_attenuation_rate', str(0.004)) + bp.set_attribute('dropoff_general_rate', str(0.45)) + bp.set_attribute('dropoff_intensity_limit', str(0.8)) + bp.set_attribute('dropoff_zero_intensity', str(0.4)) + sensor_location = carla.Location(x=sensor_spec['x'], y=sensor_spec['y'], + z=sensor_spec['z']) + sensor_rotation = carla.Rotation(pitch=sensor_spec['pitch'], + roll=sensor_spec['roll'], + yaw=sensor_spec['yaw']) + elif sensor_spec['type'].startswith('sensor.other.radar'): + bp.set_attribute('horizontal_fov', str(sensor_spec['fov'])) # degrees + bp.set_attribute('vertical_fov', str(sensor_spec['fov'])) # degrees + bp.set_attribute('points_per_second', '1500') + bp.set_attribute('range', '100') # meters + + sensor_location = carla.Location(x=sensor_spec['x'], + y=sensor_spec['y'], + z=sensor_spec['z']) + sensor_rotation = carla.Rotation(pitch=sensor_spec['pitch'], + roll=sensor_spec['roll'], + yaw=sensor_spec['yaw']) + + elif sensor_spec['type'].startswith('sensor.other.gnss'): + # bp.set_attribute('noise_alt_stddev', str(0.000005)) + # bp.set_attribute('noise_lat_stddev', str(0.000005)) + # bp.set_attribute('noise_lon_stddev', str(0.000005)) + bp.set_attribute('noise_alt_bias', str(0.0)) + bp.set_attribute('noise_lat_bias', str(0.0)) + bp.set_attribute('noise_lon_bias', str(0.0)) + + sensor_location = carla.Location(x=sensor_spec['x'], + y=sensor_spec['y'], + z=sensor_spec['z']) + sensor_rotation = carla.Rotation() + + elif sensor_spec['type'].startswith('sensor.other.imu'): + bp.set_attribute('noise_accel_stddev_x', str(0.001)) + bp.set_attribute('noise_accel_stddev_y', str(0.001)) + bp.set_attribute('noise_accel_stddev_z', str(0.015)) + bp.set_attribute('noise_gyro_stddev_x', str(0.001)) + bp.set_attribute('noise_gyro_stddev_y', str(0.001)) + bp.set_attribute('noise_gyro_stddev_z', str(0.001)) + + sensor_location = carla.Location(x=sensor_spec['x'], + y=sensor_spec['y'], + z=sensor_spec['z']) + sensor_rotation = carla.Rotation(pitch=sensor_spec['pitch'], + roll=sensor_spec['roll'], + yaw=sensor_spec['yaw']) + # create sensor + sensor_transform = carla.Transform(sensor_location, sensor_rotation) + sensor = CarlaDataProvider.get_world().spawn_actor(bp, sensor_transform, vehicle) + # setup callback + sensor.listen(CallBack(sensor_spec['id'], sensor_spec['type'], sensor, self._agent.sensor_interface)) + self._sensors_list.append(sensor) + + # Tick once to spawn the sensors + CarlaDataProvider.get_world().tick() + + + @staticmethod + def validate_sensor_configuration(sensors, agent_track, selected_track): + """ + Ensure that the sensor configuration is valid, in case the challenge mode is used + Returns true on valid configuration, false otherwise + """ + if Track(selected_track) != agent_track: + raise SensorConfigurationInvalid("You are submitting to the wrong track [{}]!".format(Track(selected_track))) + + sensor_count = {} + sensor_ids = [] + + for sensor in sensors: + + # Check if the is has been already used + sensor_id = sensor['id'] + if sensor_id in sensor_ids: + raise SensorConfigurationInvalid("Duplicated sensor tag [{}]".format(sensor_id)) + else: + sensor_ids.append(sensor_id) + + # Check if the sensor is valid + if agent_track == Track.SENSORS: + if sensor['type'].startswith('sensor.opendrive_map'): + raise SensorConfigurationInvalid("Illegal sensor used for Track [{}]!".format(agent_track)) + + # Check the sensors validity + if sensor['type'] not in AgentWrapper.allowed_sensors: + raise SensorConfigurationInvalid("Illegal sensor used. {} are not allowed!".format(sensor['type'])) + + # Check the extrinsics of the sensor + if 'x' in sensor and 'y' in sensor and 'z' in sensor: + if math.sqrt(sensor['x']**2 + sensor['y']**2 + sensor['z']**2) > MAX_ALLOWED_RADIUS_SENSOR: + raise SensorConfigurationInvalid( + "Illegal sensor extrinsics used for Track [{}]!".format(agent_track)) + + # Check the amount of sensors + if sensor['type'] in sensor_count: + sensor_count[sensor['type']] += 1 + else: + sensor_count[sensor['type']] = 1 + + + for sensor_type, max_instances_allowed in SENSORS_LIMITS.items(): + if sensor_type in sensor_count and sensor_count[sensor_type] > max_instances_allowed: + raise SensorConfigurationInvalid( + "Too many {} used! " + "Maximum number allowed is {}, but {} were requested.".format(sensor_type, + max_instances_allowed, + sensor_count[sensor_type])) + + def cleanup(self): + """ + Remove and destroy all sensors + """ + for i, _ in enumerate(self._sensors_list): + if self._sensors_list[i] is not None: + self._sensors_list[i].stop() + self._sensors_list[i].destroy() + self._sensors_list[i] = None + self._sensors_list = [] diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/autoagents/autonomous_agent.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/autoagents/autonomous_agent.py new file mode 100644 index 00000000..f80991b6 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/autoagents/autonomous_agent.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python + +# This work is licensed under the terms of the MIT license. +# For a copy, see . + +""" +This module provides the base class for all autonomous agents +""" + +from __future__ import print_function + +from enum import Enum + +import carla +from srunner.scenariomanager.timer import GameTime + +from leaderboard.utils.route_manipulation import downsample_route +from leaderboard.envs.sensor_interface import SensorInterface + + +class Track(Enum): + + """ + This enum represents the different tracks of the CARLA AD leaderboard. + """ + SENSORS = 'SENSORS' + MAP = 'MAP' + +class AutonomousAgent(object): + + """ + Autonomous agent base class. All user agents have to be derived from this class + """ + + def __init__(self, path_to_conf_file): + self.track = Track.SENSORS + # current global plans to reach a destination + self._global_plan = None + self._global_plan_world_coord = None + + # this data structure will contain all sensor data + self.sensor_interface = SensorInterface() + + # agent's initialization + self.setup(path_to_conf_file) + + self.wallclock_t0 = None + + def setup(self, path_to_conf_file): + """ + Initialize everything needed by your agent and set the track attribute to the right type: + Track.SENSORS : CAMERAS, LIDAR, RADAR, GPS and IMU sensors are allowed + Track.MAP : OpenDRIVE map is also allowed + """ + pass + + def sensors(self): # pylint: disable=no-self-use + """ + Define the sensor suite required by the agent + + :return: a list containing the required sensors in the following format: + + [ + {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'width': 300, 'height': 200, 'fov': 100, 'id': 'Left'}, + + {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'width': 300, 'height': 200, 'fov': 100, 'id': 'Right'}, + + {'type': 'sensor.lidar.ray_cast', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'yaw': 0.0, 'pitch': 0.0, 'roll': 0.0, + 'id': 'LIDAR'} + ] + + """ + sensors = [] + + return sensors + + def run_step(self, input_data, timestamp): + """ + Execute one step of navigation. + :return: control + """ + control = carla.VehicleControl() + control.steer = 0.0 + control.throttle = 0.0 + control.brake = 0.0 + control.hand_brake = False + + return control + + def destroy(self): + """ + Destroy (clean-up) the agent + :return: + """ + pass + + def __call__(self): + """ + Execute the agent call, e.g. agent() + Returns the next vehicle controls + """ + input_data = self.sensor_interface.get_data() + + timestamp = GameTime.get_time() + + if not self.wallclock_t0: + self.wallclock_t0 = GameTime.get_wallclocktime() + wallclock = GameTime.get_wallclocktime() + wallclock_diff = (wallclock - self.wallclock_t0).total_seconds() + + # print('======[Agent] Wallclock_time = {} / {} / Sim_time = {} / {}x'.format(wallclock, wallclock_diff, timestamp, timestamp/(wallclock_diff+0.001))) + + control = self.run_step(input_data, timestamp) + control.manual_gear_shift = False + + return control + + def set_global_plan(self, global_plan_gps, global_plan_world_coord, wp=None): + """ + Set the plan (route) for the agent + """ + ds_ids = downsample_route(global_plan_world_coord, 50) + self._global_plan_world_coord = [(global_plan_world_coord[x][0], global_plan_world_coord[x][1]) for x in ds_ids] + self._global_plan = [global_plan_gps[x] for x in ds_ids] diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/autoagents/dummy_agent.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/autoagents/dummy_agent.py new file mode 100644 index 00000000..46d39eb6 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/autoagents/dummy_agent.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python + +# This work is licensed under the terms of the MIT license. +# For a copy, see . + +""" +This module provides a dummy agent to control the ego vehicle +""" + +from __future__ import print_function + +import carla + +from leaderboard.autoagents.autonomous_agent import AutonomousAgent, Track + +def get_entry_point(): + return 'DummyAgent' + +class DummyAgent(AutonomousAgent): + + """ + Dummy autonomous agent to control the ego vehicle + """ + + def setup(self, path_to_conf_file): + """ + Setup the agent parameters + """ + self.track = Track.MAP + + def sensors(self): + """ + Define the sensor suite required by the agent + + :return: a list containing the required sensors in the following format: + + [ + {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'width': 300, 'height': 200, 'fov': 100, 'id': 'Left'}, + + {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'width': 300, 'height': 200, 'fov': 100, 'id': 'Right'}, + + {'type': 'sensor.lidar.ray_cast', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'yaw': 0.0, 'pitch': 0.0, 'roll': 0.0, + 'id': 'LIDAR'} + ] + """ + + sensors = [ + {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'width': 800, 'height': 600, 'fov': 100, 'id': 'Center'}, + {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': -45.0, + 'width': 800, 'height': 600, 'fov': 100, 'id': 'Left'}, + {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 45.0, + 'width': 800, 'height': 600, 'fov': 100, 'id': 'Right'}, + {'type': 'sensor.lidar.ray_cast', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, + 'yaw': -45.0, 'id': 'LIDAR'}, + {'type': 'sensor.other.radar', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, + 'yaw': -45.0, 'fov': 30, 'id': 'RADAR'}, + {'type': 'sensor.other.gnss', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'id': 'GPS'}, + {'type': 'sensor.other.imu', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, + 'yaw': -45.0, 'id': 'IMU'}, + {'type': 'sensor.opendrive_map', 'reading_frequency': 1, 'id': 'OpenDRIVE'}, + ] + + return sensors + + def run_step(self, input_data, timestamp): + """ + Execute one step of navigation. + """ + print("=====================>") + for key, val in input_data.items(): + if hasattr(val[1], 'shape'): + shape = val[1].shape + print("[{} -- {:06d}] with shape {}".format(key, val[0], shape)) + else: + print("[{} -- {:06d}] ".format(key, val[0])) + print("<=====================") + + # DO SOMETHING SMART + + # RETURN CONTROL + control = carla.VehicleControl() + control.steer = 0.0 + control.throttle = 0.0 + control.brake = 0.0 + control.hand_brake = False + + return control diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/autoagents/human_agent.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/autoagents/human_agent.py new file mode 100644 index 00000000..742dff21 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/autoagents/human_agent.py @@ -0,0 +1,266 @@ +#!/usr/bin/env python + +# This work is licensed under the terms of the MIT license. +# For a copy, see . + +""" +This module provides a human agent to control the ego vehicle via keyboard +""" + +import time +import json +from threading import Thread +import cv2 +import numpy as np + +try: + import pygame + from pygame.locals import K_DOWN + from pygame.locals import K_LEFT + from pygame.locals import K_RIGHT + from pygame.locals import K_SPACE + from pygame.locals import K_UP + from pygame.locals import K_a + from pygame.locals import K_d + from pygame.locals import K_s + from pygame.locals import K_w + from pygame.locals import K_q +except ImportError: + raise RuntimeError('cannot import pygame, make sure pygame package is installed') + +import carla + +from leaderboard.autoagents.autonomous_agent import AutonomousAgent, Track + + +def get_entry_point(): + return 'HumanAgent' + +class HumanInterface(object): + + """ + Class to control a vehicle manually for debugging purposes + """ + + def __init__(self): + self._width = 800 + self._height = 600 + self._surface = None + + pygame.init() + pygame.font.init() + self._clock = pygame.time.Clock() + self._display = pygame.display.set_mode((self._width, self._height), pygame.HWSURFACE | pygame.DOUBLEBUF) + pygame.display.set_caption("Human Agent") + + def run_interface(self, input_data): + """ + Run the GUI + """ + + # process sensor data + image_center = input_data['Center'][1][:, :, -2::-1] + + # display image + self._surface = pygame.surfarray.make_surface(image_center.swapaxes(0, 1)) + if self._surface is not None: + self._display.blit(self._surface, (0, 0)) + pygame.display.flip() + + def _quit(self): + pygame.quit() + + +class HumanAgent(AutonomousAgent): + + """ + Human agent to control the ego vehicle via keyboard + """ + + current_control = None + agent_engaged = False + + def setup(self, path_to_conf_file): + """ + Setup the agent parameters + """ + self.track = Track.SENSORS + + self.agent_engaged = False + self._hic = HumanInterface() + self._controller = KeyboardControl(path_to_conf_file) + self._prev_timestamp = 0 + + def sensors(self): + """ + Define the sensor suite required by the agent + + :return: a list containing the required sensors in the following format: + + [ + {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'width': 300, 'height': 200, 'fov': 100, 'id': 'Left'}, + + {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'width': 300, 'height': 200, 'fov': 100, 'id': 'Right'}, + + {'type': 'sensor.lidar.ray_cast', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'yaw': 0.0, 'pitch': 0.0, 'roll': 0.0, + 'id': 'LIDAR'} + ] + """ + + sensors = [ + {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'width': 800, 'height': 600, 'fov': 100, 'id': 'Center'}, + {'type': 'sensor.speedometer', 'reading_frequency': 20, 'id': 'speed'}, + ] + + return sensors + + def run_step(self, input_data, timestamp): + """ + Execute one step of navigation. + """ + self.agent_engaged = True + self._hic.run_interface(input_data) + + control = self._controller.parse_events(timestamp - self._prev_timestamp) + self._prev_timestamp = timestamp + + return control + + def destroy(self): + """ + Cleanup + """ + self._hic._quit = True + + +class KeyboardControl(object): + + """ + Keyboard control for the human agent + """ + + def __init__(self, path_to_conf_file): + """ + Init + """ + self._control = carla.VehicleControl() + self._steer_cache = 0.0 + self._clock = pygame.time.Clock() + + # Get the mode + if path_to_conf_file: + + with (open(path_to_conf_file, "r")) as f: + lines = f.read().split("\n") + self._mode = lines[0].split(" ")[1] + self._endpoint = lines[1].split(" ")[1] + + # Get the needed vars + if self._mode == "log": + self._log_data = {'records': []} + + elif self._mode == "playback": + self._index = 0 + self._control_list = [] + + with open(self._endpoint) as fd: + try: + self._records = json.load(fd) + self._json_to_control() + except json.JSONDecodeError: + pass + else: + self._mode = "normal" + self._endpoint = None + + def _json_to_control(self): + + # transform strs into VehicleControl commands + for entry in self._records['records']: + control = carla.VehicleControl(throttle=entry['control']['throttle'], + steer=entry['control']['steer'], + brake=entry['control']['brake'], + hand_brake=entry['control']['hand_brake'], + reverse=entry['control']['reverse'], + manual_gear_shift=entry['control']['manual_gear_shift'], + gear=entry['control']['gear']) + self._control_list.append(control) + + def parse_events(self, timestamp): + """ + Parse the keyboard events and set the vehicle controls accordingly + """ + # Move the vehicle + if self._mode == "playback": + self._parse_json_control() + else: + self._parse_vehicle_keys(pygame.key.get_pressed(), timestamp*1000) + + # Record the control + if self._mode == "log": + self._record_control() + + return self._control + + def _parse_vehicle_keys(self, keys, milliseconds): + """ + Calculate new vehicle controls based on input keys + """ + + for event in pygame.event.get(): + if event.type == pygame.QUIT: + return + elif event.type == pygame.KEYUP: + if event.key == K_q: + self._control.gear = 1 if self._control.reverse else -1 + self._control.reverse = self._control.gear < 0 + + if keys[K_UP] or keys[K_w]: + self._control.throttle = 0.6 + else: + self._control.throttle = 0.0 + + steer_increment = 3e-4 * milliseconds + if keys[K_LEFT] or keys[K_a]: + self._steer_cache -= steer_increment + elif keys[K_RIGHT] or keys[K_d]: + self._steer_cache += steer_increment + else: + self._steer_cache = 0.0 + + steer_cache = min(0.95, max(-0.95, self._steer_cache)) + self._control.steer = round(self._steer_cache, 1) + self._control.brake = 1.0 if keys[K_DOWN] or keys[K_s] else 0.0 + self._control.hand_brake = keys[K_SPACE] + + def _parse_json_control(self): + + if self._index < len(self._control_list): + self._control = self._control_list[self._index] + self._index += 1 + else: + print("JSON file has no more entries") + + def _record_control(self): + new_record = { + 'control': { + 'throttle': self._control.throttle, + 'steer': self._control.steer, + 'brake': self._control.brake, + 'hand_brake': self._control.hand_brake, + 'reverse': self._control.reverse, + 'manual_gear_shift': self._control.manual_gear_shift, + 'gear': self._control.gear + } + } + + self._log_data['records'].append(new_record) + + def __del__(self): + # Get ready to log user commands + if self._mode == "log" and self._log_data: + with open(self._endpoint, 'w') as fd: + json.dump(self._log_data, fd, indent=4, sort_keys=True) diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/autoagents/human_agent_config.txt b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/autoagents/human_agent_config.txt new file mode 100644 index 00000000..bd1e6d37 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/autoagents/human_agent_config.txt @@ -0,0 +1,9 @@ +mode: log # Either 'log' or 'playback' +file: test.json # path to the file with the controls + + +This is the configuration file of the human agent. This agent incorporates two modes. +The log mode parses the controls given to the vehicle into a dictionary and records them into a .json file. +This file can be read by the playback mode to control the vehicle in the same way, resulting in a deterministic agent. +The file name can be chosen, and is the second argument of this config file. + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/autoagents/npc_agent.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/autoagents/npc_agent.py new file mode 100644 index 00000000..01060cd3 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/autoagents/npc_agent.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python + +# This work is licensed under the terms of the MIT license. +# For a copy, see . + +""" +This module provides an NPC agent to control the ego vehicle +""" + +from __future__ import print_function + +import carla +from agents.navigation.basic_agent import BasicAgent +from srunner.scenariomanager.carla_data_provider import CarlaDataProvider + +from leaderboard.autoagents.autonomous_agent import AutonomousAgent, Track + +def get_entry_point(): + return 'NpcAgent' + +class NpcAgent(AutonomousAgent): + + """ + NPC autonomous agent to control the ego vehicle + """ + + _agent = None + _route_assigned = False + + def setup(self, path_to_conf_file): + """ + Setup the agent parameters + """ + self.track = Track.SENSORS + + self._route_assigned = False + self._agent = None + + def sensors(self): + """ + Define the sensor suite required by the agent + + :return: a list containing the required sensors in the following format: + + [ + {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'width': 300, 'height': 200, 'fov': 100, 'id': 'Left'}, + + {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'width': 300, 'height': 200, 'fov': 100, 'id': 'Right'}, + + {'type': 'sensor.lidar.ray_cast', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'yaw': 0.0, 'pitch': 0.0, 'roll': 0.0, + 'id': 'LIDAR'} + ] + """ + + sensors = [ + {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'width': 300, 'height': 200, 'fov': 100, 'id': 'Left'}, + ] + + return sensors + + def run_step(self, input_data, timestamp): + """ + Execute one step of navigation. + """ + control = carla.VehicleControl() + control.steer = 0.0 + control.throttle = 0.0 + control.brake = 0.0 + control.hand_brake = False + + if not self._agent: + hero_actor = None + for actor in CarlaDataProvider.get_world().get_actors(): + if 'role_name' in actor.attributes and actor.attributes['role_name'] == 'hero': + hero_actor = actor + break + if hero_actor: + self._agent = BasicAgent(hero_actor) + + return control + + if not self._route_assigned: + if self._global_plan: + plan = [] + + prev = None + for transform, _ in self._global_plan_world_coord: + wp = CarlaDataProvider.get_map().get_waypoint(transform.location) + if prev: + route_segment = self._agent._trace_route(prev, wp) + plan.extend(route_segment) + + prev = wp + + #loc = plan[-1][0].transform.location + #self._agent.set_destination([loc.x, loc.y, loc.z]) + self._agent._local_planner.set_global_plan(plan) # pylint: disable=protected-access + self._route_assigned = True + + else: + control = self._agent.run_step() + + return control diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/autoagents/ros_agent.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/autoagents/ros_agent.py new file mode 100644 index 00000000..75ceb906 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/autoagents/ros_agent.py @@ -0,0 +1,451 @@ +#!/usr/bin/env python +# +# Copyright (c) 2019 Intel Corporation +# +# This work is licensed under the terms of the MIT license. +# For a copy, see . +# + +""" +This module provides a ROS autonomous agent interface to control the ego vehicle via a ROS stack +""" + +import math +import os +import subprocess +import signal +import threading +import time + +import numpy + +import carla + +import rospy +from cv_bridge import CvBridge +from geometry_msgs.msg import PoseStamped +from nav_msgs.msg import Odometry, Path +from rosgraph_msgs.msg import Clock +from sensor_msgs.msg import Image, PointCloud2, NavSatFix, NavSatStatus, CameraInfo +from sensor_msgs.point_cloud2 import create_cloud_xyz32 +from std_msgs.msg import Header, String +import tf +# pylint: disable=line-too-long +from carla_msgs.msg import CarlaEgoVehicleStatus, CarlaEgoVehicleInfo, CarlaEgoVehicleInfoWheel, CarlaEgoVehicleControl, CarlaWorldInfo +# pylint: enable=line-too-long + +from leaderboard.autoagents.autonomous_agent import AutonomousAgent, Track + + +class RosAgent(AutonomousAgent): + + """ + Base class for ROS-based stacks. + + Derive from it and implement the sensors() method. + + Please define TEAM_CODE_ROOT in your environment. + The stack is started by executing $TEAM_CODE_ROOT/start.sh + + The sensor data is published on similar topics as with the carla-ros-bridge. You can find details about + the utilized datatypes there. + + This agent expects a roscore to be running. + """ + + speed = None + current_control = None + stack_process = None + timestamp = None + current_map_name = None + step_mode_possible = None + vehicle_info_publisher = None + global_plan_published = None + + def setup(self, path_to_conf_file): + """ + setup agent + """ + self.track = Track.MAP + self.stack_thread = None + + # get start_script from environment + team_code_path = os.environ['TEAM_CODE_ROOT'] + if not team_code_path or not os.path.exists(team_code_path): + raise IOError("Path '{}' defined by TEAM_CODE_ROOT invalid".format(team_code_path)) + start_script = "{}/start.sh".format(team_code_path) + if not os.path.exists(start_script): + raise IOError("File '{}' defined by TEAM_CODE_ROOT invalid".format(start_script)) + + # set use_sim_time via commandline before init-node + process = subprocess.Popen( + "rosparam set use_sim_time true", shell=True, stderr=subprocess.STDOUT, stdout=subprocess.PIPE) + process.wait() + if process.returncode: + raise RuntimeError("Could not set use_sim_time") + + # initialize ros node + rospy.init_node('ros_agent', anonymous=True) + + # publish first clock value '0' + self.clock_publisher = rospy.Publisher('clock', Clock, queue_size=10, latch=True) + self.clock_publisher.publish(Clock(rospy.Time.from_sec(0))) + + # execute script that starts the ad stack (remains running) + rospy.loginfo("Executing stack...") + self.stack_process = subprocess.Popen(start_script, shell=True, preexec_fn=os.setpgrp) + + self.vehicle_control_event = threading.Event() + self.timestamp = None + self.speed = 0 + self.global_plan_published = False + + self.vehicle_info_publisher = None + self.vehicle_status_publisher = None + self.odometry_publisher = None + self.world_info_publisher = None + self.map_file_publisher = None + self.current_map_name = None + self.tf_broadcaster = None + self.step_mode_possible = False + + self.vehicle_control_subscriber = rospy.Subscriber( + '/carla/ego_vehicle/vehicle_control_cmd', CarlaEgoVehicleControl, self.on_vehicle_control) + + self.current_control = carla.VehicleControl() + + self.waypoint_publisher = rospy.Publisher( + '/carla/ego_vehicle/waypoints', Path, queue_size=1, latch=True) + + self.publisher_map = {} + self.id_to_sensor_type_map = {} + self.id_to_camera_info_map = {} + self.cv_bridge = CvBridge() + + # setup ros publishers for sensors + # pylint: disable=line-too-long + for sensor in self.sensors(): + self.id_to_sensor_type_map[sensor['id']] = sensor['type'] + if sensor['type'] == 'sensor.camera.rgb': + self.publisher_map[sensor['id']] = rospy.Publisher( + '/carla/ego_vehicle/camera/rgb/' + sensor['id'] + "/image_color", Image, queue_size=1, latch=True) + self.id_to_camera_info_map[sensor['id']] = self.build_camera_info(sensor) + self.publisher_map[sensor['id'] + '_info'] = rospy.Publisher( + '/carla/ego_vehicle/camera/rgb/' + sensor['id'] + "/camera_info", CameraInfo, queue_size=1, latch=True) + elif sensor['type'] == 'sensor.lidar.ray_cast': + self.publisher_map[sensor['id']] = rospy.Publisher( + '/carla/ego_vehicle/lidar/' + sensor['id'] + "/point_cloud", PointCloud2, queue_size=1, latch=True) + elif sensor['type'] == 'sensor.other.gnss': + self.publisher_map[sensor['id']] = rospy.Publisher( + '/carla/ego_vehicle/gnss/' + sensor['id'] + "/fix", NavSatFix, queue_size=1, latch=True) + elif sensor['type'] == 'sensor.can_bus': + if not self.vehicle_info_publisher: + self.vehicle_info_publisher = rospy.Publisher( + '/carla/ego_vehicle/vehicle_info', CarlaEgoVehicleInfo, queue_size=1, latch=True) + if not self.vehicle_status_publisher: + self.vehicle_status_publisher = rospy.Publisher( + '/carla/ego_vehicle/vehicle_status', CarlaEgoVehicleStatus, queue_size=1, latch=True) + elif sensor['type'] == 'sensor.hd_map': + if not self.odometry_publisher: + self.odometry_publisher = rospy.Publisher( + '/carla/ego_vehicle/odometry', Odometry, queue_size=1, latch=True) + if not self.world_info_publisher: + self.world_info_publisher = rospy.Publisher( + '/carla/world_info', CarlaWorldInfo, queue_size=1, latch=True) + if not self.map_file_publisher: + self.map_file_publisher = rospy.Publisher('/carla/map_file', String, queue_size=1, latch=True) + if not self.tf_broadcaster: + self.tf_broadcaster = tf.TransformBroadcaster() + else: + raise TypeError("Invalid sensor type: {}".format(sensor['type'])) + # pylint: enable=line-too-long + + def destroy(self): + """ + Cleanup of all ROS publishers + """ + if self.stack_process and self.stack_process.poll() is None: + rospy.loginfo("Sending SIGTERM to stack...") + os.killpg(os.getpgid(self.stack_process.pid), signal.SIGTERM) + rospy.loginfo("Waiting for termination of stack...") + self.stack_process.wait() + time.sleep(5) + rospy.loginfo("Terminated stack.") + + rospy.loginfo("Stack is no longer running") + self.world_info_publisher.unregister() + self.map_file_publisher.unregister() + self.vehicle_status_publisher.unregister() + self.vehicle_info_publisher.unregister() + self.waypoint_publisher.unregister() + self.stack_process = None + rospy.loginfo("Cleanup finished") + + def on_vehicle_control(self, data): + """ + callback if a new vehicle control command is received + """ + cmd = carla.VehicleControl() + cmd.throttle = data.throttle + cmd.steer = data.steer + cmd.brake = data.brake + cmd.hand_brake = data.hand_brake + cmd.reverse = data.reverse + cmd.gear = data.gear + cmd.manual_gear_shift = data.manual_gear_shift + self.current_control = cmd + if not self.vehicle_control_event.is_set(): + self.vehicle_control_event.set() + # After the first vehicle control is sent out, it is possible to use the stepping mode + self.step_mode_possible = True + + def build_camera_info(self, attributes): # pylint: disable=no-self-use + """ + Private function to compute camera info + + camera info doesn't change over time + """ + camera_info = CameraInfo() + # store info without header + camera_info.header = None + camera_info.width = int(attributes['width']) + camera_info.height = int(attributes['height']) + camera_info.distortion_model = 'plumb_bob' + cx = camera_info.width / 2.0 + cy = camera_info.height / 2.0 + fx = camera_info.width / ( + 2.0 * math.tan(float(attributes['fov']) * math.pi / 360.0)) + fy = fx + camera_info.K = [fx, 0, cx, 0, fy, cy, 0, 0, 1] + camera_info.D = [0, 0, 0, 0, 0] + camera_info.R = [1.0, 0, 0, 0, 1.0, 0, 0, 0, 1.0] + camera_info.P = [fx, 0, cx, 0, 0, fy, cy, 0, 0, 0, 1.0, 0] + return camera_info + + def publish_plan(self): + """ + publish the global plan + """ + msg = Path() + msg.header.frame_id = "/map" + msg.header.stamp = rospy.Time.now() + for wp in self._global_plan_world_coord: + pose = PoseStamped() + pose.pose.position.x = wp[0].location.x + pose.pose.position.y = -wp[0].location.y + pose.pose.position.z = wp[0].location.z + quaternion = tf.transformations.quaternion_from_euler( + 0, 0, -math.radians(wp[0].rotation.yaw)) + pose.pose.orientation.x = quaternion[0] + pose.pose.orientation.y = quaternion[1] + pose.pose.orientation.z = quaternion[2] + pose.pose.orientation.w = quaternion[3] + msg.poses.append(pose) + + rospy.loginfo("Publishing Plan...") + self.waypoint_publisher.publish(msg) + + def sensors(self): + """ + Define the sensor suite required by the agent + + :return: a list containing the required sensors + """ + raise NotImplementedError( + "This function has to be implemented by the derived classes") + + def get_header(self): + """ + Returns ROS message header + """ + header = Header() + header.stamp = rospy.Time.from_sec(self.timestamp) + return header + + def publish_lidar(self, sensor_id, data): + """ + Function to publish lidar data + """ + header = self.get_header() + header.frame_id = 'ego_vehicle/lidar/{}'.format(sensor_id) + + lidar_data = numpy.frombuffer( + data, dtype=numpy.float32) + lidar_data = numpy.reshape( + lidar_data, (int(lidar_data.shape[0] / 3), 3)) + # we take the oposite of y axis + # (as lidar point are express in left handed coordinate system, and ros need right handed) + # we need a copy here, because the data are read only in carla numpy + # array + lidar_data = -1.0 * lidar_data + # we also need to permute x and y + lidar_data = lidar_data[..., [1, 0, 2]] + msg = create_cloud_xyz32(header, lidar_data) + self.publisher_map[sensor_id].publish(msg) + + def publish_gnss(self, sensor_id, data): + """ + Function to publish gnss data + """ + msg = NavSatFix() + msg.header = self.get_header() + msg.header.frame_id = 'gps' + msg.latitude = data[0] + msg.longitude = data[1] + msg.altitude = data[2] + msg.status.status = NavSatStatus.STATUS_SBAS_FIX + # pylint: disable=line-too-long + msg.status.service = NavSatStatus.SERVICE_GPS | NavSatStatus.SERVICE_GLONASS | NavSatStatus.SERVICE_COMPASS | NavSatStatus.SERVICE_GALILEO + # pylint: enable=line-too-long + self.publisher_map[sensor_id].publish(msg) + + def publish_camera(self, sensor_id, data): + """ + Function to publish camera data + """ + msg = self.cv_bridge.cv2_to_imgmsg(data, encoding='bgra8') + # the camera data is in respect to the camera's own frame + msg.header = self.get_header() + msg.header.frame_id = 'ego_vehicle/camera/rgb/{}'.format(sensor_id) + + cam_info = self.id_to_camera_info_map[sensor_id] + cam_info.header = msg.header + self.publisher_map[sensor_id + '_info'].publish(cam_info) + self.publisher_map[sensor_id].publish(msg) + + def publish_can(self, sensor_id, data): + """ + publish can data + """ + if not self.vehicle_info_publisher: + self.vehicle_info_publisher = rospy.Publisher( + '/carla/ego_vehicle/vehicle_info', CarlaEgoVehicleInfo, queue_size=1, latch=True) + info_msg = CarlaEgoVehicleInfo() + for wheel in data['wheels']: + wheel_info = CarlaEgoVehicleInfoWheel() + wheel_info.tire_friction = wheel['tire_friction'] + wheel_info.damping_rate = wheel['damping_rate'] + wheel_info.steer_angle = wheel['steer_angle'] + wheel_info.disable_steering = wheel['disable_steering'] + info_msg.wheels.append(wheel_info) + info_msg.max_rpm = data['max_rpm'] + info_msg.moi = data['moi'] + info_msg.damping_rate_full_throttle = data['damping_rate_full_throttle'] + info_msg.damping_rate_zero_throttle_clutch_disengaged = data['damping_rate_zero_throttle_clutch_disengaged'] + info_msg.use_gear_autobox = data['use_gear_autobox'] + info_msg.clutch_strength = data['clutch_strength'] + info_msg.mass = data['mass'] + info_msg.drag_coefficient = data['drag_coefficient'] + info_msg.center_of_mass.x = data['center_of_mass']['x'] + info_msg.center_of_mass.y = data['center_of_mass']['y'] + info_msg.center_of_mass.z = data['center_of_mass']['z'] + self.vehicle_info_publisher.publish(info_msg) + msg = CarlaEgoVehicleStatus() + msg.header = self.get_header() + msg.velocity = data['speed'] + self.speed = data['speed'] + # todo msg.acceleration + msg.control.throttle = self.current_control.throttle + msg.control.steer = self.current_control.steer + msg.control.brake = self.current_control.brake + msg.control.hand_brake = self.current_control.hand_brake + msg.control.reverse = self.current_control.reverse + msg.control.gear = self.current_control.gear + msg.control.manual_gear_shift = self.current_control.manual_gear_shift + + self.vehicle_status_publisher.publish(msg) + + def publish_hd_map(self, sensor_id, data): + """ + publish hd map data + """ + roll = -math.radians(data['transform']['roll']) + pitch = -math.radians(data['transform']['pitch']) + yaw = -math.radians(data['transform']['yaw']) + quat = tf.transformations.quaternion_from_euler(roll, pitch, yaw) + x = data['transform']['x'] + y = -data['transform']['y'] + z = data['transform']['z'] + + if self.odometry_publisher: + + odometry = Odometry() + odometry.header.frame_id = 'map' + odometry.header.stamp = rospy.Time.from_sec(self.timestamp) + odometry.child_frame_id = 'base_link' + odometry.pose.pose.position.x = x + odometry.pose.pose.position.y = y + odometry.pose.pose.position.z = z + + odometry.pose.pose.orientation.x = quat[0] + odometry.pose.pose.orientation.y = quat[1] + odometry.pose.pose.orientation.z = quat[2] + odometry.pose.pose.orientation.w = quat[3] + + odometry.twist.twist.linear.x = self.speed + odometry.twist.twist.linear.y = 0 + odometry.twist.twist.linear.z = 0 + + self.odometry_publisher.publish(odometry) + + if self.world_info_publisher: + # extract map name + map_name = os.path.basename(data['map_file'])[:-4] + if self.current_map_name != map_name: + self.current_map_name = map_name + world_info = CarlaWorldInfo() + world_info.map_name = self.current_map_name + world_info.opendrive = data['opendrive'] + self.world_info_publisher.publish(world_info) + if self.map_file_publisher: + self.map_file_publisher.publish(data['map_file']) + + def use_stepping_mode(self): # pylint: disable=no-self-use + """ + Overload this function to use stepping mode! + """ + return False + + def run_step(self, input_data, timestamp): + """ + Execute one step of navigation. + """ + self.vehicle_control_event.clear() + self.timestamp = timestamp + self.clock_publisher.publish(Clock(rospy.Time.from_sec(timestamp))) + + # check if stack is still running + if self.stack_process and self.stack_process.poll() is not None: + raise RuntimeError("Stack exited with: {} {}".format( + self.stack_process.returncode, self.stack_process.communicate()[0])) + + # publish global plan to ROS once + if self._global_plan_world_coord and not self.global_plan_published: + self.global_plan_published = True + self.publish_plan() + + new_data_available = False + + # publish data of all sensors + for key, val in input_data.items(): + new_data_available = True + sensor_type = self.id_to_sensor_type_map[key] + if sensor_type == 'sensor.camera.rgb': + self.publish_camera(key, val[1]) + elif sensor_type == 'sensor.lidar.ray_cast': + self.publish_lidar(key, val[1]) + elif sensor_type == 'sensor.other.gnss': + self.publish_gnss(key, val[1]) + elif sensor_type == 'sensor.can_bus': + self.publish_can(key, val[1]) + elif sensor_type == 'sensor.hd_map': + self.publish_hd_map(key, val[1]) + else: + raise TypeError("Invalid sensor type: {}".format(sensor_type)) + + if self.use_stepping_mode(): + if self.step_mode_possible and new_data_available: + self.vehicle_control_event.wait() + # if the stepping mode is not used or active, there is no need to wait here + + return self.current_control diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/envs/__init__.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/envs/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/envs/sensor_interface.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/envs/sensor_interface.py new file mode 100644 index 00000000..571994d0 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/envs/sensor_interface.py @@ -0,0 +1,250 @@ +import copy +import logging +import numpy as np +import os +import time +from threading import Thread + +from queue import Queue +from queue import Empty + +import carla +from srunner.scenariomanager.carla_data_provider import CarlaDataProvider +from srunner.scenariomanager.timer import GameTime + + +def threaded(fn): + def wrapper(*args, **kwargs): + thread = Thread(target=fn, args=args, kwargs=kwargs) + thread.setDaemon(True) + thread.start() + + return thread + return wrapper + + +class SensorConfigurationInvalid(Exception): + """ + Exceptions thrown when the sensors used by the agent are not allowed for that specific submissions + """ + + def __init__(self, message): + super(SensorConfigurationInvalid, self).__init__(message) + + +class SensorReceivedNoData(Exception): + """ + Exceptions thrown when the sensors used by the agent take too long to receive data + """ + + def __init__(self, message): + super(SensorReceivedNoData, self).__init__(message) + + +class GenericMeasurement(object): + def __init__(self, data, frame): + self.data = data + self.frame = frame + + +class BaseReader(object): + def __init__(self, vehicle, reading_frequency=1.0): + self._vehicle = vehicle + self._reading_frequency = reading_frequency + self._callback = None + self._run_ps = True + self.run() + + def __call__(self): + pass + + @threaded + def run(self): + first_time = True + latest_time = GameTime.get_time() + while self._run_ps: + if self._callback is not None: + current_time = GameTime.get_time() + + # Second part forces the sensors to send data at the first tick, regardless of frequency + if current_time - latest_time > (1 / self._reading_frequency) \ + or (first_time and GameTime.get_frame() != 0): + self._callback(GenericMeasurement(self.__call__(), GameTime.get_frame())) + latest_time = GameTime.get_time() + first_time = False + + else: + time.sleep(0.001) + + def listen(self, callback): + # Tell that this function receives what the producer does. + self._callback = callback + + def stop(self): + self._run_ps = False + + def destroy(self): + self._run_ps = False + + +class SpeedometerReader(BaseReader): + """ + Sensor to measure the speed of the vehicle. + """ + MAX_CONNECTION_ATTEMPTS = 10 + + def _get_forward_speed(self, transform=None, velocity=None): + """ Convert the vehicle transform directly to forward speed """ + if not velocity: + velocity = self._vehicle.get_velocity() + if not transform: + transform = self._vehicle.get_transform() + + vel_np = np.array([velocity.x, velocity.y, velocity.z]) + pitch = np.deg2rad(transform.rotation.pitch) + yaw = np.deg2rad(transform.rotation.yaw) + orientation = np.array([np.cos(pitch) * np.cos(yaw), np.cos(pitch) * np.sin(yaw), np.sin(pitch)]) + speed = np.dot(vel_np, orientation) + return speed + + def __call__(self): + """ We convert the vehicle physics information into a convenient dictionary """ + + # protect this access against timeout + attempts = 0 + while attempts < self.MAX_CONNECTION_ATTEMPTS: + try: + velocity = self._vehicle.get_velocity() + transform = self._vehicle.get_transform() + break + except Exception: + attempts += 1 + time.sleep(0.2) + continue + + return {'speed': self._get_forward_speed(transform=transform, velocity=velocity)} + + +class OpenDriveMapReader(BaseReader): + def __call__(self): + return {'opendrive': CarlaDataProvider.get_map().to_opendrive()} + + +class CallBack(object): + def __init__(self, tag, sensor_type, sensor, data_provider): + self._tag = tag + self._data_provider = data_provider + + self._data_provider.register_sensor(tag, sensor_type, sensor) + + def __call__(self, data): + if isinstance(data, carla.libcarla.Image): + self._parse_image_cb(data, self._tag) + elif isinstance(data, carla.libcarla.LidarMeasurement): + self._parse_lidar_cb(data, self._tag) + elif isinstance(data, carla.libcarla.SemanticLidarMeasurement): + self._parse_semantic_lidar_cb(data, self._tag) + elif isinstance(data, carla.libcarla.RadarMeasurement): + self._parse_radar_cb(data, self._tag) + elif isinstance(data, carla.libcarla.GnssMeasurement): + self._parse_gnss_cb(data, self._tag) + elif isinstance(data, carla.libcarla.IMUMeasurement): + self._parse_imu_cb(data, self._tag) + elif isinstance(data, GenericMeasurement): + self._parse_pseudosensor(data, self._tag) + else: + logging.error('No callback method for this sensor.') + + # Parsing CARLA physical Sensors + def _parse_image_cb(self, image, tag): + array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8")) + array = copy.deepcopy(array) + array = np.reshape(array, (image.height, image.width, 4)) + self._data_provider.update_sensor(tag, array, image.frame) + + def _parse_lidar_cb(self, lidar_data, tag): + points = np.frombuffer(lidar_data.raw_data, dtype=np.dtype('f4')) + points = copy.deepcopy(points) + points = np.reshape(points, (int(points.shape[0] / 4), 4)) + self._data_provider.update_sensor(tag, points, lidar_data.frame) + + def _parse_semantic_lidar_cb(self, semantic_lidar_data, tag): + points = np.frombuffer(semantic_lidar_data.raw_data, dtype=np.dtype('f4')) + points = copy.deepcopy(points) + points = np.reshape(points, (int(points.shape[0] / 6), 6)) + self._data_provider.update_sensor(tag, points, semantic_lidar_data.frame) + + def _parse_radar_cb(self, radar_data, tag): + # [depth, azimuth, altitute, velocity] + points = np.frombuffer(radar_data.raw_data, dtype=np.dtype('f4')) + points = copy.deepcopy(points) + points = np.reshape(points, (int(points.shape[0] / 4), 4)) + points = np.flip(points, 1) + self._data_provider.update_sensor(tag, points, radar_data.frame) + + def _parse_gnss_cb(self, gnss_data, tag): + array = np.array([gnss_data.latitude, + gnss_data.longitude, + gnss_data.altitude], dtype=np.float64) + self._data_provider.update_sensor(tag, array, gnss_data.frame) + + def _parse_imu_cb(self, imu_data, tag): + array = np.array([imu_data.accelerometer.x, + imu_data.accelerometer.y, + imu_data.accelerometer.z, + imu_data.gyroscope.x, + imu_data.gyroscope.y, + imu_data.gyroscope.z, + imu_data.compass, + ], dtype=np.float64) + self._data_provider.update_sensor(tag, array, imu_data.frame) + + def _parse_pseudosensor(self, package, tag): + self._data_provider.update_sensor(tag, package.data, package.frame) + + +class SensorInterface(object): + def __init__(self): + self._sensors_objects = {} + self._data_buffers = {} + self._new_data_buffers = Queue() + self._queue_timeout = 10 # default: 10 + + # Only sensor that doesn't get the data on tick, needs special treatment + self._opendrive_tag = None + + + def register_sensor(self, tag, sensor_type, sensor): + if tag in self._sensors_objects: + raise SensorConfigurationInvalid("Duplicated sensor tag [{}]".format(tag)) + + self._sensors_objects[tag] = sensor + + if sensor_type == 'sensor.opendrive_map': + self._opendrive_tag = tag + + def update_sensor(self, tag, data, timestamp): + # print("Updating {} - {}".format(tag, timestamp)) + if tag not in self._sensors_objects: + raise SensorConfigurationInvalid("The sensor with tag [{}] has not been created!".format(tag)) + + self._new_data_buffers.put((tag, timestamp, data)) + + def get_data(self): + try: + data_dict = {} + while len(data_dict.keys()) < len(self._sensors_objects.keys()): + + # Don't wait for the opendrive sensor + if self._opendrive_tag and self._opendrive_tag not in data_dict.keys() \ + and len(self._sensors_objects.keys()) == len(data_dict.keys()) + 1: + # print("Ignoring opendrive sensor") + break + + sensor_data = self._new_data_buffers.get(True, self._queue_timeout) + data_dict[sensor_data[0]] = ((sensor_data[1], sensor_data[2])) + + except Empty: + raise SensorReceivedNoData("A sensor took too long to send their data") + + return data_dict diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/leaderboard_evaluator.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/leaderboard_evaluator.py new file mode 100644 index 00000000..280c8caf --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/leaderboard_evaluator.py @@ -0,0 +1,529 @@ +#!/usr/bin/env python +# Copyright (c) 2018-2019 Intel Corporation. +# authors: German Ros (german.ros@intel.com), Felipe Codevilla (felipe.alcm@gmail.com) +# +# This work is licensed under the terms of the MIT license. +# For a copy, see . + +""" +CARLA Challenge Evaluator Routes + +Provisional code to evaluate Autonomous Agents for the CARLA Autonomous Driving challenge +""" +from __future__ import print_function + +import traceback +import argparse +from argparse import RawTextHelpFormatter +from datetime import datetime +from distutils.version import LooseVersion +import importlib +import os +import sys +import gc +import pkg_resources +import sys +import carla +import copy +import signal + +from srunner.scenariomanager.carla_data_provider import * +from srunner.scenariomanager.timer import GameTime +from srunner.scenariomanager.watchdog import Watchdog + +from leaderboard.scenarios.scenario_manager import ScenarioManager +from leaderboard.scenarios.route_scenario import RouteScenario +from leaderboard.envs.sensor_interface import SensorInterface, SensorConfigurationInvalid +from leaderboard.autoagents.agent_wrapper import AgentWrapper, AgentError +from leaderboard.utils.statistics_manager import StatisticsManager +from leaderboard.utils.route_indexer import RouteIndexer + + +sensors_to_icons = { + 'sensor.camera.rgb': 'carla_camera', + 'sensor.camera.semantic_segmentation': 'carla_camera', + 'sensor.camera.depth': 'carla_camera', + 'sensor.lidar.ray_cast': 'carla_lidar', + 'sensor.lidar.ray_cast_semantic': 'carla_lidar', + 'sensor.other.radar': 'carla_radar', + 'sensor.other.gnss': 'carla_gnss', + 'sensor.other.imu': 'carla_imu', + 'sensor.opendrive_map': 'carla_opendrive_map', + 'sensor.speedometer': 'carla_speedometer' +} + + +WEATHERS = { + 'ClearNoon': carla.WeatherParameters.ClearNoon, + 'ClearSunset': carla.WeatherParameters.ClearSunset, + + 'CloudyNoon': carla.WeatherParameters.CloudyNoon, + 'CloudySunset': carla.WeatherParameters.CloudySunset, + + 'WetNoon': carla.WeatherParameters.WetNoon, + 'WetSunset': carla.WeatherParameters.WetSunset, + + 'MidRainyNoon': carla.WeatherParameters.MidRainyNoon, + 'MidRainSunset': carla.WeatherParameters.MidRainSunset, + + 'WetCloudyNoon': carla.WeatherParameters.WetCloudyNoon, + 'WetCloudySunset': carla.WeatherParameters.WetCloudySunset, + + 'HardRainNoon': carla.WeatherParameters.HardRainNoon, + 'HardRainSunset': carla.WeatherParameters.HardRainSunset, + + 'SoftRainNoon': carla.WeatherParameters.SoftRainNoon, + 'SoftRainSunset': carla.WeatherParameters.SoftRainSunset, +} + +class LeaderboardEvaluator(object): + + """ + TODO: document me! + """ + + ego_vehicles = [] + + # Tunable parameters + client_timeout = 10.0 # in seconds + wait_for_world = 20.0 # in seconds + frame_rate = 20.0 # in Hz + + def __init__(self, args, statistics_manager): + """ + Setup CARLA client and world + Setup ScenarioManager + """ + self.statistics_manager = statistics_manager + self.sensors = None + self.sensor_icons = [] + self._vehicle_lights = carla.VehicleLightState.Position | carla.VehicleLightState.LowBeam + + # First of all, we need to create the client that will send the requests + # to the simulator. Here we'll assume the simulator is accepting + # requests in the localhost at port 2000. + self.client = carla.Client(args.host, int(args.port)) + if args.timeout: + self.client_timeout = float(args.timeout) + self.client.set_timeout(self.client_timeout) + + try: + self.traffic_manager = self.client.get_trafficmanager(int(args.trafficManagerPort)) + # self.traffic_manager = self.client.get_trafficmanager(8000) + except Exception as e: + print(e) + dist = pkg_resources.get_distribution("carla") + # if dist.version != 'leaderboard': + # if LooseVersion(dist.version) < LooseVersion('0.9.10'): + # raise ImportError("CARLA version 0.9.10.1 or newer required. CARLA version found: {}".format(dist)) + + # Load agent + module_name = os.path.basename(args.agent).split('.')[0] + sys.path.insert(0, os.path.dirname(args.agent)) + self.module_agent = importlib.import_module(module_name) + + # Create the ScenarioManager + self.manager = ScenarioManager(args.timeout, args.debug > 1) + + # Time control for summary purposes + self._start_time = GameTime.get_time() + self._end_time = None + + # Create the agent timer + self._agent_watchdog = Watchdog(int(float(args.timeout))) + signal.signal(signal.SIGINT, self._signal_handler) + + def _signal_handler(self, signum, frame): + """ + Terminate scenario ticking when receiving a signal interrupt + """ + if self._agent_watchdog and not self._agent_watchdog.get_status(): + raise RuntimeError("Timeout: Agent took too long to setup") + elif self.manager: + self.manager.signal_handler(signum, frame) + + def __del__(self): + """ + Cleanup and delete actors, ScenarioManager and CARLA world + """ + + self._cleanup() + if hasattr(self, 'manager') and self.manager: + del self.manager + if hasattr(self, 'world') and self.world: + del self.world + + def _cleanup(self): + """ + Remove and destroy all actors + """ + + # Simulation still running and in synchronous mode? + if self.manager and self.manager.get_running_status() \ + and hasattr(self, 'world') and self.world: + # Reset to asynchronous mode + settings = self.world.get_settings() + settings.synchronous_mode = False + settings.fixed_delta_seconds = None + self.world.apply_settings(settings) + self.traffic_manager.set_synchronous_mode(False) + + if self.manager: + self.manager.cleanup() + + CarlaDataProvider.cleanup() + + for i, _ in enumerate(self.ego_vehicles): + if self.ego_vehicles[i]: + self.ego_vehicles[i].destroy() + self.ego_vehicles[i] = None + self.ego_vehicles = [] + + if self._agent_watchdog: + self._agent_watchdog.stop() + + if hasattr(self, 'agent_instance') and self.agent_instance: + self.agent_instance.destroy() + self.agent_instance = None + + if hasattr(self, 'statistics_manager') and self.statistics_manager: + self.statistics_manager.scenario = None + + def _prepare_ego_vehicles(self, ego_vehicles, wait_for_ego_vehicles=False): + """ + Spawn or update the ego vehicles + """ + + if not wait_for_ego_vehicles: + for vehicle in ego_vehicles: + self.ego_vehicles.append(CarlaDataProvider.request_new_actor(vehicle.model, + vehicle.transform, + vehicle.rolename, + color=vehicle.color, + vehicle_category=vehicle.category)) + + else: + ego_vehicle_missing = True + while ego_vehicle_missing: + self.ego_vehicles = [] + ego_vehicle_missing = False + for ego_vehicle in ego_vehicles: + ego_vehicle_found = False + carla_vehicles = CarlaDataProvider.get_world().get_actors().filter('vehicle.*') + for carla_vehicle in carla_vehicles: + if carla_vehicle.attributes['role_name'] == ego_vehicle.rolename: + ego_vehicle_found = True + self.ego_vehicles.append(carla_vehicle) + break + if not ego_vehicle_found: + ego_vehicle_missing = True + break + + for i, _ in enumerate(self.ego_vehicles): + self.ego_vehicles[i].set_transform(ego_vehicles[i].transform) + + # sync state + CarlaDataProvider.get_world().tick() + + def _load_and_wait_for_world(self, args, town, ego_vehicles=None): + """ + Load a new CARLA world and provide data to CarlaDataProvider + """ + self.traffic_manager.set_synchronous_mode(False) + # if hasattr(self, 'world'): + # settings = self.world.get_settings() + # settings.synchronous_mode = False + # self.world.apply_settings(settings) + print(town) + try: + self.world = self.client.load_world(town) + except Exception as e: + print(e) + settings = self.world.get_settings() + settings.fixed_delta_seconds = 1.0 / self.frame_rate + settings.synchronous_mode = True + self.world.apply_settings(settings) + + self.world.reset_all_traffic_lights() + + CarlaDataProvider.set_client(self.client) + CarlaDataProvider.set_world(self.world) + CarlaDataProvider.set_traffic_manager_port(int(args.trafficManagerPort)) + + if args.weather != "none": + assert args.weather in WEATHERS + CarlaDataProvider.set_weather(WEATHERS[args.weather]) + + self.traffic_manager.set_synchronous_mode(True) + self.traffic_manager.set_random_device_seed(int(args.trafficManagerSeed)) + + # Wait for the world to be ready + if CarlaDataProvider.is_sync_mode(): + self.world.tick() + else: + self.world.wait_for_tick() + + if CarlaDataProvider.get_map().name != town: + raise Exception("The CARLA server uses the wrong map!" + "This scenario requires to use map {}".format(town)) + + def _register_statistics(self, config, checkpoint, entry_status, crash_message=""): + """ + Computes and saved the simulation statistics + """ + # register statistics + current_stats_record = self.statistics_manager.compute_route_statistics( + config, + self.manager.scenario_duration_system, + self.manager.scenario_duration_game, + crash_message + ) + + print("\033[1m> Registering the route statistics\033[0m") + self.statistics_manager.save_record(current_stats_record, config.index, checkpoint) + self.statistics_manager.save_entry_status(entry_status, False, checkpoint) + + def _load_and_run_scenario(self, args, config): + """ + Load and run the scenario given by config. + + Depending on what code fails, the simulation will either stop the route and + continue from the next one, or report a crash and stop. + """ + crash_message = "" + entry_status = "Started" + + print("\n\033[1m========= Preparing {} (repetition {}) =========".format(config.name, config.repetition_index)) + print("> Setting up the agent\033[0m") + + # Prepare the statistics of the route + self.statistics_manager.set_route(config.name, config.index) + + # Set up the user's agent, and the timer to avoid freezing the simulation + try: + self._agent_watchdog.start() + agent_class_name = getattr(self.module_agent, 'get_entry_point')() + self.agent_instance = getattr(self.module_agent, agent_class_name)(args.agent_config) + config.agent = self.agent_instance + + # Check and store the sensors + if not self.sensors: + self.sensors = self.agent_instance.sensors() + track = self.agent_instance.track + + AgentWrapper.validate_sensor_configuration(self.sensors, track, args.track) + + self.sensor_icons = [sensors_to_icons[sensor['type']] for sensor in self.sensors] + self.statistics_manager.save_sensors(self.sensor_icons, args.checkpoint) + + self._agent_watchdog.stop() + + except SensorConfigurationInvalid as e: + # The sensors are invalid -> set the ejecution to rejected and stop + print("\n\033[91mThe sensor's configuration used is invalid:") + print("> {}\033[0m\n".format(e)) + traceback.print_exc() + + crash_message = "Agent's sensors were invalid" + entry_status = "Rejected" + + self._register_statistics(config, args.checkpoint, entry_status, crash_message) + self._cleanup() + sys.exit(-1) + + except Exception as e: + # The agent setup has failed -> start the next route + print("\n\033[91mCould not set up the required agent:") + print("> {}\033[0m\n".format(e)) + traceback.print_exc() + + crash_message = "Agent couldn't be set up" + + self._register_statistics(config, args.checkpoint, entry_status, crash_message) + self._cleanup() + return + + print("\033[1m> Loading the world\033[0m") + + # Load the world and the scenario + try: + self._load_and_wait_for_world(args, config.town, config.ego_vehicles) + self._prepare_ego_vehicles(config.ego_vehicles, False) + scenario = RouteScenario(world=self.world, config=config, debug_mode=args.debug) + self.statistics_manager.set_scenario(scenario.scenario) + + # self.agent_instance._init() + # self.agent_instance.sensor_interface = SensorInterface() + + # Night mode + if config.weather.sun_altitude_angle < 0.0: + for vehicle in scenario.ego_vehicles: + vehicle.set_light_state(carla.VehicleLightState(self._vehicle_lights)) + + # Load scenario and run it + if args.record: + self.client.start_recorder("{}/{}_rep{}.log".format(args.record, config.name, config.repetition_index)) + self.manager.load_scenario(scenario, self.agent_instance, config.repetition_index) + + except Exception as e: + # The scenario is wrong -> set the ejecution to crashed and stop + print("\n\033[91mThe scenario could not be loaded:") + print("> {}\033[0m\n".format(e)) + traceback.print_exc() + + crash_message = "Simulation crashed" + entry_status = "Crashed" + + self._register_statistics(config, args.checkpoint, entry_status, crash_message) + + if args.record: + self.client.stop_recorder() + + self._cleanup() + sys.exit(-1) + + print("\033[1m> Running the route\033[0m") + + # Run the scenario + # try: + self.manager.run_scenario() + + # except AgentError as e: + # # The agent has failed -> stop the route + # print("\n\033[91mStopping the route, the agent has crashed:") + # print("> {}\033[0m\n".format(e)) + # traceback.print_exc() + + # crash_message = "Agent crashed" + + # except Exception as e: + # print("\n\033[91mError during the simulation:") + # print("> {}\033[0m\n".format(e)) + # traceback.print_exc() + + # crash_message = "Simulation crashed" + # entry_status = "Crashed" + + # Stop the scenario + try: + print("\033[1m> Stopping the route\033[0m") + self.manager.stop_scenario() + self._register_statistics(config, args.checkpoint, entry_status, crash_message) + + if args.record: + self.client.stop_recorder() + + # Remove all actors + scenario.remove_all_actors() + + self._cleanup() + + except Exception as e: + print("\n\033[91mFailed to stop the scenario, the statistics might be empty:") + print("> {}\033[0m\n".format(e)) + traceback.print_exc() + + crash_message = "Simulation crashed" + + if crash_message == "Simulation crashed": + sys.exit(-1) + + def run(self, args): + """ + Run the challenge mode + """ + # agent_class_name = getattr(self.module_agent, 'get_entry_point')() + # self.agent_instance = getattr(self.module_agent, agent_class_name)(args.agent_config) + + route_indexer = RouteIndexer(args.routes, args.scenarios, args.repetitions) + + if args.resume: + route_indexer.resume(args.checkpoint) + self.statistics_manager.resume(args.checkpoint) + else: + self.statistics_manager.clear_record(args.checkpoint) + route_indexer.save_state(args.checkpoint) + + while route_indexer.peek(): + # setup + config = route_indexer.next() + + # run + self._load_and_run_scenario(args, config) + + for obj in gc.get_objects(): + try: + if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)): + print(type(obj), obj.size()) + except: + pass + + route_indexer.save_state(args.checkpoint) + + # save global statistics + print("\033[1m> Registering the global statistics\033[0m") + global_stats_record = self.statistics_manager.compute_global_statistics(route_indexer.total) + StatisticsManager.save_global_record(global_stats_record, self.sensor_icons, route_indexer.total, args.checkpoint) + + +def main(): + description = "CARLA AD Leaderboard Evaluation: evaluate your Agent in CARLA scenarios\n" + + # general parameters + parser = argparse.ArgumentParser(description=description, formatter_class=RawTextHelpFormatter) + parser.add_argument('--host', default='localhost', + help='IP of the host server (default: localhost)') + parser.add_argument('--port', default='62000', help='TCP port to listen to (default: 2000)') + parser.add_argument('--trafficManagerPort', default='8000', + help='Port to use for the TrafficManager (default: 8000)') + parser.add_argument('--trafficManagerSeed', default='0', + help='Seed used by the TrafficManager (default: 0)') + parser.add_argument('--debug', type=int, help='Run with debug output', default=0) + parser.add_argument('--record', type=str, default='', + help='Use CARLA recording feature to create a recording of the scenario') + parser.add_argument('--timeout', default="200.0", + help='Set the CARLA client timeout value in seconds') + + # simulation setup + parser.add_argument('--routes', + help='Name of the route to be executed. Point to the route_xml_file to be executed.', + required=True) + parser.add_argument('--weather', + type=str, default='none', + help='Name of the weahter to be executed', + ) + parser.add_argument('--scenarios', + help='Name of the scenario annotation file to be mixed with the route.', + required=True) + parser.add_argument('--repetitions', + type=int, + default=1, + help='Number of repetitions per route.') + + # agent-related options + parser.add_argument("-a", "--agent", type=str, help="Path to Agent's py file to evaluate", required=True) + parser.add_argument("--agent-config", type=str, help="Path to Agent's configuration file", default="") + + parser.add_argument("--track", type=str, default='SENSORS', help="Participation track: SENSORS, MAP") + parser.add_argument('--resume', type=bool, default=False, help='Resume execution from last checkpoint?') + parser.add_argument("--checkpoint", type=str, + default='./simulation_results.json', + help="Path to checkpoint used for saving statistics and resuming") + + arguments = parser.parse_args() + print("init statistics_manager") + statistics_manager = StatisticsManager() + + try: + print("begin") + leaderboard_evaluator = LeaderboardEvaluator(arguments, statistics_manager) + leaderboard_evaluator.run(arguments) + + except Exception as e: + traceback.print_exc() + finally: + del leaderboard_evaluator + + +if __name__ == '__main__': + main() diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/__init__.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/background_activity.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/background_activity.py new file mode 100644 index 00000000..6ca0167c --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/background_activity.py @@ -0,0 +1,100 @@ +#!/usr/bin/env python + +# +# This work is licensed under the terms of the MIT license. +# For a copy, see . + +""" +Scenario spawning elements to make the town dynamic and interesting +""" + +import carla + +from srunner.scenariomanager.carla_data_provider import CarlaDataProvider +from srunner.scenarios.basic_scenario import BasicScenario + + +BACKGROUND_ACTIVITY_SCENARIOS = ["BackgroundActivity"] + + +class BackgroundActivity(BasicScenario): + + """ + Implementation of a scenario to spawn a set of background actors, + and to remove traffic jams in background traffic + + This is a single ego vehicle scenario + """ + + category = "BackgroundActivity" + + town_amount = { + # 'Town01': 120, + 'Town01': 120, + 'Town02': 100, + 'Town03': 120, + 'Town04': 200, + 'Town05': 120, + 'Town06': 150, + 'Town07': 110, + 'Town08': 180, + 'Town09': 300, + 'Town10': 120, + } + + def __init__(self, world, ego_vehicles, config, randomize=False, debug_mode=False, timeout=35 * 60): + """ + Setup all relevant parameters and create scenario + """ + self.config = config + self.debug = debug_mode + + self.timeout = timeout # Timeout of scenario in seconds + + super(BackgroundActivity, self).__init__("BackgroundActivity", + ego_vehicles, + config, + world, + debug_mode, + terminate_on_failure=True, + criteria_enable=True) + + def _initialize_actors(self, config): + + town_name = config.town + if town_name in self.town_amount: + amount = self.town_amount[town_name] + else: + amount = 0 + + new_actors = CarlaDataProvider.request_new_batch_actors('vehicle.*', + amount, + carla.Transform(), + autopilot=True, + random_location=True, + rolename='background') + + if new_actors is None: + raise Exception("Error: Unable to add the background activity, all spawn points were occupied") + + for _actor in new_actors: + self.other_actors.append(_actor) + + def _create_behavior(self): + """ + Basic behavior do nothing, i.e. Idle + """ + pass + + def _create_test_criteria(self): + """ + A list of all test criteria will be created that is later used + in parallel behavior tree. + """ + pass + + def __del__(self): + """ + Remove all actors upon deletion + """ + self.remove_all_actors() diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/master_scenario.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/master_scenario.py new file mode 100644 index 00000000..801a2395 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/master_scenario.py @@ -0,0 +1,120 @@ +#!/usr/bin/env python + +# +# This work is licensed under the terms of the MIT license. +# For a copy, see . + +""" +Basic CARLA Autonomous Driving training scenario +""" + +import py_trees +from srunner.scenarioconfigs.route_scenario_configuration import RouteConfiguration +from srunner.scenariomanager.scenarioatomics.atomic_behaviors import Idle +from srunner.scenariomanager.scenarioatomics.atomic_criteria import (CollisionTest, + InRouteTest, + RouteCompletionTest, + RunningRedLightTest, + RunningStopTest, + OutsideRouteLanesTest) +from srunner.scenarios.basic_scenario import BasicScenario + +from leaderboard.scenarios.scenarioatomics.atomic_criteria import ActorSpeedAboveThresholdTest + + +MASTER_SCENARIO = ["MasterScenario"] + + +class MasterScenario(BasicScenario): + + """ + Implementation of a Master scenario that controls the route. + + This is a single ego vehicle scenario + """ + + category = "Master" + radius = 10.0 # meters + + + def __init__(self, world, ego_vehicles, config, randomize=False, debug_mode=False, criteria_enable=True, + timeout=300): + """ + Setup all relevant parameters and create scenario + """ + self.config = config + self.route = None + # Timeout of scenario in seconds + self.timeout = timeout + + if hasattr(self.config, 'route'): + self.route = self.config.route + else: + raise ValueError("Master scenario must have a route") + + super(MasterScenario, self).__init__("MasterScenario", ego_vehicles=ego_vehicles, config=config, + world=world, debug_mode=debug_mode, + terminate_on_failure=True, criteria_enable=criteria_enable) + + def _create_behavior(self): + """ + Basic behavior do nothing, i.e. Idle + """ + + # Build behavior tree + sequence = py_trees.composites.Sequence("MasterScenario") + idle_behavior = Idle() + sequence.add_child(idle_behavior) + + return sequence + + def _create_test_criteria(self): + """ + A list of all test criteria will be created that is later used + in parallel behavior tree. + """ + + if isinstance(self.route, RouteConfiguration): + route = self.route.data + else: + route = self.route + + collision_criterion = CollisionTest(self.ego_vehicles[0], terminate_on_failure=False) + + route_criterion = InRouteTest(self.ego_vehicles[0], + route=route, + offroad_max=30, + terminate_on_failure=True) + + completion_criterion = RouteCompletionTest(self.ego_vehicles[0], route=route) + + outsidelane_criterion = OutsideRouteLanesTest(self.ego_vehicles[0],route=route) + + red_light_criterion = RunningRedLightTest(self.ego_vehicles[0]) + + stop_criterion = RunningStopTest(self.ego_vehicles[0]) + + blocked_criterion = ActorSpeedAboveThresholdTest(self.ego_vehicles[0], + speed_threshold=0.1, + below_threshold_max_time=90.0, + terminate_on_failure=True) + + parallel_criteria = py_trees.composites.Parallel("group_criteria", + policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE) + + parallel_criteria.add_child(completion_criterion) + parallel_criteria.add_child(collision_criterion) + parallel_criteria.add_child(route_criterion) + parallel_criteria.add_child(outsidelane_criterion) + parallel_criteria.add_child(red_light_criterion) + parallel_criteria.add_child(stop_criterion) + parallel_criteria.add_child(blocked_criterion) + + + return parallel_criteria + + def __del__(self): + """ + Remove all actors upon deletion + """ + self.remove_all_actors() diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/route_scenario.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/route_scenario.py new file mode 100644 index 00000000..827727cd --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/route_scenario.py @@ -0,0 +1,599 @@ +#!/usr/bin/env python + +# Copyright (c) 2019 Intel Corporation +# +# This work is licensed under the terms of the MIT license. +# For a copy, see . + +""" +This module provides Challenge routes as standalone scenarios +""" + +from __future__ import print_function + +import math +import xml.etree.ElementTree as ET +import numpy.random as random +import os +import py_trees + +import carla + +from agents.navigation.local_planner import RoadOption + +# pylint: disable=line-too-long +from srunner.scenarioconfigs.scenario_configuration import ScenarioConfiguration, ActorConfigurationData +# pylint: enable=line-too-long +from srunner.scenariomanager.scenarioatomics.atomic_behaviors import Idle, ScenarioTriggerer +from srunner.scenariomanager.carla_data_provider import CarlaDataProvider +from srunner.scenarios.basic_scenario import BasicScenario +from srunner.scenarios.control_loss import ControlLoss +from srunner.scenarios.follow_leading_vehicle import FollowLeadingVehicle +from srunner.scenarios.object_crash_vehicle import DynamicObjectCrossing +from srunner.scenarios.object_crash_intersection import VehicleTurningRoute +from srunner.scenarios.other_leading_vehicle import OtherLeadingVehicle +from srunner.scenarios.maneuver_opposite_direction import ManeuverOppositeDirection +from srunner.scenarios.junction_crossing_route import SignalJunctionCrossingRoute, NoSignalJunctionCrossingRoute + +from srunner.scenariomanager.scenarioatomics.atomic_criteria import (CollisionTest, + InRouteTest, + RouteCompletionTest, + OutsideRouteLanesTest, + RunningRedLightTest, + RunningStopTest, + ActorSpeedAboveThresholdTest) + +from leaderboard.utils.route_parser import RouteParser, TRIGGER_THRESHOLD, TRIGGER_ANGLE_THRESHOLD +from leaderboard.utils.route_manipulation import interpolate_trajectory + +DATA_COLLECTION = os.environ.get('DATA_COLLECTION', None) + +ROUTESCENARIO = ["RouteScenario"] + +SECONDS_GIVEN_PER_METERS = 0.8 # for timeout +# SECONDS_GIVEN_PER_METERS = 2 # for timeout +INITIAL_SECONDS_DELAY = 5.0 + +NUMBER_CLASS_TRANSLATION = { + "Scenario1": ControlLoss, + "Scenario2": FollowLeadingVehicle, + "Scenario3": DynamicObjectCrossing, + "Scenario4": VehicleTurningRoute, + "Scenario5": OtherLeadingVehicle, + "Scenario6": ManeuverOppositeDirection, + "Scenario7": SignalJunctionCrossingRoute, + "Scenario8": SignalJunctionCrossingRoute, + "Scenario9": SignalJunctionCrossingRoute, + "Scenario10": NoSignalJunctionCrossingRoute +} + + +def oneshot_behavior(name, variable_name, behaviour): + """ + This is taken from py_trees.idiom.oneshot. + """ + # Initialize the variables + blackboard = py_trees.blackboard.Blackboard() + _ = blackboard.set(variable_name, False) + + # Wait until the scenario has ended + subtree_root = py_trees.composites.Selector(name=name) + check_flag = py_trees.blackboard.CheckBlackboardVariable( + name=variable_name + " Done?", + variable_name=variable_name, + expected_value=True, + clearing_policy=py_trees.common.ClearingPolicy.ON_INITIALISE + ) + set_flag = py_trees.blackboard.SetBlackboardVariable( + name="Mark Done", + variable_name=variable_name, + variable_value=True + ) + # If it's a sequence, don't double-nest it in a redundant manner + if isinstance(behaviour, py_trees.composites.Sequence): + behaviour.add_child(set_flag) + sequence = behaviour + else: + sequence = py_trees.composites.Sequence(name="OneShot") + sequence.add_children([behaviour, set_flag]) + + subtree_root.add_children([check_flag, sequence]) + return subtree_root + + +def convert_json_to_transform(actor_dict): + """ + Convert a JSON string to a CARLA transform + """ + return carla.Transform(location=carla.Location(x=float(actor_dict['x']), y=float(actor_dict['y']), + z=float(actor_dict['z'])), + rotation=carla.Rotation(roll=0.0, pitch=0.0, yaw=float(actor_dict['yaw']))) + + +def convert_json_to_actor(actor_dict): + """ + Convert a JSON string to an ActorConfigurationData dictionary + """ + node = ET.Element('waypoint') + node.set('x', actor_dict['x']) + node.set('y', actor_dict['y']) + node.set('z', actor_dict['z']) + node.set('yaw', actor_dict['yaw']) + + return ActorConfigurationData.parse_from_node(node, 'simulation') + + +def convert_transform_to_location(transform_vec): + """ + Convert a vector of transforms to a vector of locations + """ + location_vec = [] + for transform_tuple in transform_vec: + location_vec.append((transform_tuple[0].location, transform_tuple[1])) + + return location_vec + + +def compare_scenarios(scenario_choice, existent_scenario): + """ + Compare function for scenarios based on distance of the scenario start position + """ + def transform_to_pos_vec(scenario): + """ + Convert left/right/front to a meaningful CARLA position + """ + position_vec = [scenario['trigger_position']] + if scenario['other_actors'] is not None: + if 'left' in scenario['other_actors']: + position_vec += scenario['other_actors']['left'] + if 'front' in scenario['other_actors']: + position_vec += scenario['other_actors']['front'] + if 'right' in scenario['other_actors']: + position_vec += scenario['other_actors']['right'] + + return position_vec + + # put the positions of the scenario choice into a vec of positions to be able to compare + + choice_vec = transform_to_pos_vec(scenario_choice) + existent_vec = transform_to_pos_vec(existent_scenario) + for pos_choice in choice_vec: + for pos_existent in existent_vec: + + dx = float(pos_choice['x']) - float(pos_existent['x']) + dy = float(pos_choice['y']) - float(pos_existent['y']) + dz = float(pos_choice['z']) - float(pos_existent['z']) + dist_position = math.sqrt(dx * dx + dy * dy + dz * dz) + dyaw = float(pos_choice['yaw']) - float(pos_choice['yaw']) + dist_angle = math.sqrt(dyaw * dyaw) + if dist_position < TRIGGER_THRESHOLD and dist_angle < TRIGGER_ANGLE_THRESHOLD: + return True + + return False + + +class RouteScenario(BasicScenario): + + """ + Implementation of a RouteScenario, i.e. a scenario that consists of driving along a pre-defined route, + along which several smaller scenarios are triggered + """ + + category = "RouteScenario" + + def __init__(self, world, config, debug_mode=0, criteria_enable=True): + """ + Setup all relevant parameters and create scenarios along route + """ + self.config = config + self.route = None + self.sampled_scenarios_definitions = None + + self._update_route(world, config, debug_mode>0) + + ego_vehicle = self._update_ego_vehicle() + + self.list_scenarios = self._build_scenario_instances(world, + ego_vehicle, + self.sampled_scenarios_definitions, + scenarios_per_tick=10, + timeout=self.timeout, + debug_mode=debug_mode>1) + + + super(RouteScenario, self).__init__(name=config.name, + ego_vehicles=[ego_vehicle], + config=config, + world=world, + debug_mode=debug_mode>1, + terminate_on_failure=False, + criteria_enable=criteria_enable) + + def _update_route(self, world, config, debug_mode): + """ + Update the input route, i.e. refine waypoint list, and extract possible scenario locations + + Parameters: + - world: CARLA world + - config: Scenario configuration (RouteConfiguration) + """ + + # Transform the scenario file into a dictionary + world_annotations = RouteParser.parse_annotations_file(config.scenario_file) + + # prepare route's trajectory (interpolate and add the GPS route) + # gps_route, route = interpolate_trajectory(world, config.trajectory) + gps_route, route, wp_route = interpolate_trajectory(world, config.trajectory) + + potential_scenarios_definitions, _ = RouteParser.scan_route_for_scenarios( + config.town, route, world_annotations) + + self.route = route + CarlaDataProvider.set_ego_vehicle_route(convert_transform_to_location(self.route)) + + config.agent.set_global_plan(gps_route, self.route, wp_route) + + # Sample the scenarios to be used for this route instance. + self.sampled_scenarios_definitions = self._scenario_sampling(potential_scenarios_definitions) + + # Timeout of scenario in seconds + self.timeout = self._estimate_route_timeout() + + # Print route in debug mode + if debug_mode: + self._draw_waypoints(world, self.route, vertical_shift=1.0, persistency=50000.0) + + def _update_ego_vehicle(self): + """ + Set/Update the start position of the ego_vehicle + """ + # move ego to correct position + elevate_transform = self.route[0][0] + elevate_transform.location.z += 0.5 + + ego_vehicle = CarlaDataProvider.request_new_actor('vehicle.lincoln.mkz2017', + elevate_transform, + rolename='hero') + + spectator = CarlaDataProvider.get_world().get_spectator() + ego_trans = ego_vehicle.get_transform() + spectator.set_transform(carla.Transform(ego_trans.location + carla.Location(z=50), + carla.Rotation(pitch=-90))) + + return ego_vehicle + + def _estimate_route_timeout(self): + """ + Estimate the duration of the route + """ + route_length = 0.0 # in meters + + prev_point = self.route[0][0] + for current_point, _ in self.route[1:]: + dist = current_point.location.distance(prev_point.location) + route_length += dist + prev_point = current_point + + return int(SECONDS_GIVEN_PER_METERS * route_length + INITIAL_SECONDS_DELAY) + + # pylint: disable=no-self-use + def _draw_waypoints(self, world, waypoints, vertical_shift, persistency=-1): + """ + Draw a list of waypoints at a certain height given in vertical_shift. + """ + for w in waypoints: + wp = w[0].location + carla.Location(z=vertical_shift) + + size = 0.2 + if w[1] == RoadOption.LEFT: # Yellow + color = carla.Color(255, 255, 0) + elif w[1] == RoadOption.RIGHT: # Cyan + color = carla.Color(0, 255, 255) + elif w[1] == RoadOption.CHANGELANELEFT: # Orange + color = carla.Color(255, 64, 0) + elif w[1] == RoadOption.CHANGELANERIGHT: # Dark Cyan + color = carla.Color(0, 64, 255) + elif w[1] == RoadOption.STRAIGHT: # Gray + color = carla.Color(128, 128, 128) + else: # LANEFOLLOW + color = carla.Color(0, 255, 0) # Green + size = 0.1 + + world.debug.draw_point(wp, size=size, color=color, life_time=persistency) + + world.debug.draw_point(waypoints[0][0].location + carla.Location(z=vertical_shift), size=0.2, + color=carla.Color(0, 0, 255), life_time=persistency) + world.debug.draw_point(waypoints[-1][0].location + carla.Location(z=vertical_shift), size=0.2, + color=carla.Color(255, 0, 0), life_time=persistency) + + def _scenario_sampling(self, potential_scenarios_definitions, random_seed=0): + """ + The function used to sample the scenarios that are going to happen for this route. + """ + + # fix the random seed for reproducibility + rgn = random.RandomState(random_seed) + + def position_sampled(scenario_choice, sampled_scenarios): + """ + Check if a position was already sampled, i.e. used for another scenario + """ + for existent_scenario in sampled_scenarios: + # If the scenarios have equal positions then it is true. + if compare_scenarios(scenario_choice, existent_scenario): + return True + + return False + + def select_scenario(list_scenarios): + # priority to the scenarios with higher number: 10 has priority over 9, etc. + higher_id = -1 + selected_scenario = None + for scenario in list_scenarios: + try: + scenario_number = int(scenario['name'].split('Scenario')[1]) + except: + scenario_number = -1 + + if scenario_number >= higher_id: + higher_id = scenario_number + selected_scenario = scenario + + return selected_scenario + + + # def select_scenario(list_scenarios): + # # priority to the scenarios with higher number: 10 has priority over 9, etc. + # selected_scenario = None + # for scenario in list_scenarios: + # try: + # scenario_number = int(scenario['name'].split('Scenario')[1]) + # except: + # scenario_number = -1 + + # if scenario_number == 3: + # selected_scenario = scenario + # break + # if selected_scenario is None: + # return rgn.choice(list_scenarios) + + # return selected_scenario + + def select_scenario_randomly(list_scenarios): + # randomly select a scenario + return rgn.choice(list_scenarios) + + # The idea is to randomly sample a scenario per trigger position. + sampled_scenarios = [] + for trigger in potential_scenarios_definitions.keys(): + possible_scenarios = potential_scenarios_definitions[trigger] + if DATA_COLLECTION: + scenario_choice = select_scenario_randomly(possible_scenarios) # random sampling + else: + scenario_choice = select_scenario(possible_scenarios) # original prioritized sampling + del possible_scenarios[possible_scenarios.index(scenario_choice)] + # We keep sampling and testing if this position is present on any of the scenarios. + while position_sampled(scenario_choice, sampled_scenarios): + if possible_scenarios is None or not possible_scenarios: + scenario_choice = None + break + scenario_choice = rgn.choice(possible_scenarios) + del possible_scenarios[possible_scenarios.index(scenario_choice)] + + if scenario_choice is not None: + sampled_scenarios.append(scenario_choice) + + return sampled_scenarios + + def _build_scenario_instances(self, world, ego_vehicle, scenario_definitions, + scenarios_per_tick=5, timeout=300, debug_mode=False): + """ + Based on the parsed route and possible scenarios, build all the scenario classes. + """ + scenario_instance_vec = [] + + if debug_mode: + for scenario in scenario_definitions: + loc = carla.Location(scenario['trigger_position']['x'], + scenario['trigger_position']['y'], + scenario['trigger_position']['z']) + carla.Location(z=2.0) + world.debug.draw_point(loc, size=0.3, color=carla.Color(255, 0, 0), life_time=100000) + world.debug.draw_string(loc, str(scenario['name']), draw_shadow=False, + color=carla.Color(0, 0, 255), life_time=100000, persistent_lines=True) + + for scenario_number, definition in enumerate(scenario_definitions): + # Get the class possibilities for this scenario number + scenario_class = NUMBER_CLASS_TRANSLATION[definition['name']] + + # Create the other actors that are going to appear + if definition['other_actors'] is not None: + list_of_actor_conf_instances = self._get_actors_instances(definition['other_actors']) + else: + list_of_actor_conf_instances = [] + # Create an actor configuration for the ego-vehicle trigger position + + egoactor_trigger_position = convert_json_to_transform(definition['trigger_position']) + scenario_configuration = ScenarioConfiguration() + scenario_configuration.other_actors = list_of_actor_conf_instances + scenario_configuration.trigger_points = [egoactor_trigger_position] + scenario_configuration.subtype = definition['scenario_type'] + scenario_configuration.ego_vehicles = [ActorConfigurationData('vehicle.lincoln.mkz2017', + ego_vehicle.get_transform(), + 'hero')] + route_var_name = "ScenarioRouteNumber{}".format(scenario_number) + scenario_configuration.route_var_name = route_var_name + try: + scenario_instance = scenario_class(world, [ego_vehicle], scenario_configuration, + criteria_enable=False, timeout=timeout) + # Do a tick every once in a while to avoid spawning everything at the same time + if scenario_number % scenarios_per_tick == 0: + if CarlaDataProvider.is_sync_mode(): + world.tick() + else: + world.wait_for_tick() + + except Exception as e: + print("Skipping scenario '{}' due to setup error: {}".format(definition['name'], e)) + continue + + scenario_instance_vec.append(scenario_instance) + + return scenario_instance_vec + + def _get_actors_instances(self, list_of_antagonist_actors): + """ + Get the full list of actor instances. + """ + + def get_actors_from_list(list_of_actor_def): + """ + Receives a list of actor definitions and creates an actual list of ActorConfigurationObjects + """ + sublist_of_actors = [] + for actor_def in list_of_actor_def: + sublist_of_actors.append(convert_json_to_actor(actor_def)) + + return sublist_of_actors + + list_of_actors = [] + # Parse vehicles to the left + if 'front' in list_of_antagonist_actors: + list_of_actors += get_actors_from_list(list_of_antagonist_actors['front']) + + if 'left' in list_of_antagonist_actors: + list_of_actors += get_actors_from_list(list_of_antagonist_actors['left']) + + if 'right' in list_of_antagonist_actors: + list_of_actors += get_actors_from_list(list_of_antagonist_actors['right']) + + return list_of_actors + + # pylint: enable=no-self-use + + def _initialize_actors(self, config): + """ + Set other_actors to the superset of all scenario actors + """ + # Create the background activity of the route + town_amount = { + 'Town01': 120, + 'Town02': 100, + 'Town03': 120, + 'Town04': 200, + 'Town05': 120, + 'Town06': 150, + 'Town07': 110, + 'Town08': 180, + 'Town09': 300, + 'Town10HD': 120, # town10 doesn't load properly for some reason + } + + amount = town_amount[config.town] if config.town in town_amount else 0 + + new_actors = CarlaDataProvider.request_new_batch_actors('vehicle.*', + amount, + carla.Transform(), + autopilot=True, + random_location=True, + rolename='background') + if new_actors is None: + raise Exception("Error: Unable to add the background activity, all spawn points were occupied") + + for _actor in new_actors: + self.other_actors.append(_actor) + + # Add all the actors of the specific scenarios to self.other_actors + for scenario in self.list_scenarios: + self.other_actors.extend(scenario.other_actors) + + def _create_behavior(self): + """ + Basic behavior do nothing, i.e. Idle + """ + scenario_trigger_distance = 1.5 # Max trigger distance between route and scenario + + behavior = py_trees.composites.Parallel(policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE) + + subbehavior = py_trees.composites.Parallel(name="Behavior", + policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ALL) + + scenario_behaviors = [] + blackboard_list = [] + + for i, scenario in enumerate(self.list_scenarios): + if scenario.scenario.behavior is not None: + route_var_name = scenario.config.route_var_name + + if route_var_name is not None: + scenario_behaviors.append(scenario.scenario.behavior) + blackboard_list.append([scenario.config.route_var_name, + scenario.config.trigger_points[0].location]) + else: + name = "{} - {}".format(i, scenario.scenario.behavior.name) + oneshot_idiom = oneshot_behavior( + name=name, + variable_name=name, + behaviour=scenario.scenario.behavior) + scenario_behaviors.append(oneshot_idiom) + + # Add behavior that manages the scenarios trigger conditions + scenario_triggerer = ScenarioTriggerer( + self.ego_vehicles[0], + self.route, + blackboard_list, + scenario_trigger_distance, + repeat_scenarios=False + ) + + subbehavior.add_child(scenario_triggerer) # make ScenarioTriggerer the first thing to be checked + subbehavior.add_children(scenario_behaviors) + subbehavior.add_child(Idle()) # The behaviours cannot make the route scenario stop + behavior.add_child(subbehavior) + return behavior + + def _create_test_criteria(self): + """ + """ + criteria = [] + route = convert_transform_to_location(self.route) + + # we terminate the route if collision or red light infraction happens during data collection + + if DATA_COLLECTION: + collision_criterion = CollisionTest(self.ego_vehicles[0], terminate_on_failure=True) + red_light_criterion = RunningRedLightTest(self.ego_vehicles[0], terminate_on_failure=True) + else: + collision_criterion = CollisionTest(self.ego_vehicles[0], terminate_on_failure=False) + red_light_criterion = RunningRedLightTest(self.ego_vehicles[0], terminate_on_failure=False) + route_criterion = InRouteTest(self.ego_vehicles[0], + route=route, + offroad_max=30, + terminate_on_failure=True) + + completion_criterion = RouteCompletionTest(self.ego_vehicles[0], route=route) + + outsidelane_criterion = OutsideRouteLanesTest(self.ego_vehicles[0], route=route) + + stop_criterion = RunningStopTest(self.ego_vehicles[0]) + + blocked_criterion = ActorSpeedAboveThresholdTest(self.ego_vehicles[0], + speed_threshold=0.1, + below_threshold_max_time=180.0, + terminate_on_failure=True, + name="AgentBlockedTest") + + criteria.append(completion_criterion) + criteria.append(outsidelane_criterion) + criteria.append(collision_criterion) + criteria.append(red_light_criterion) + criteria.append(stop_criterion) + criteria.append(route_criterion) + criteria.append(blocked_criterion) + + return criteria + + def __del__(self): + """ + Remove all actors upon deletion + """ + self.remove_all_actors() diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/scenario_manager.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/scenario_manager.py new file mode 100644 index 00000000..9993fa44 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/scenario_manager.py @@ -0,0 +1,226 @@ +#!/usr/bin/env python + +# Copyright (c) 2018-2020 Intel Corporation +# +# This work is licensed under the terms of the MIT license. +# For a copy, see . + +""" +This module provides the ScenarioManager implementations. +It must not be modified and is for reference only! +""" + +from __future__ import print_function +import signal +import sys +import time + +import py_trees +import carla + +from srunner.scenariomanager.carla_data_provider import CarlaDataProvider +from srunner.scenariomanager.timer import GameTime +from srunner.scenariomanager.watchdog import Watchdog + +from leaderboard.autoagents.agent_wrapper import AgentWrapper, AgentError +from leaderboard.envs.sensor_interface import SensorReceivedNoData +from leaderboard.utils.result_writer import ResultOutputProvider + + +class ScenarioManager(object): + + """ + Basic scenario manager class. This class holds all functionality + required to start, run and stop a scenario. + + The user must not modify this class. + + To use the ScenarioManager: + 1. Create an object via manager = ScenarioManager() + 2. Load a scenario via manager.load_scenario() + 3. Trigger the execution of the scenario manager.run_scenario() + This function is designed to explicitly control start and end of + the scenario execution + 4. If needed, cleanup with manager.stop_scenario() + """ + + + def __init__(self, timeout, debug_mode=False): + """ + Setups up the parameters, which will be filled at load_scenario() + """ + self.scenario = None + self.scenario_tree = None + self.scenario_class = None + self.ego_vehicles = None + self.other_actors = None + + self._debug_mode = debug_mode + self._agent = None + self._running = False + self._timestamp_last_run = 0.0 + self._timeout = float(timeout) + + # Used to detect if the simulation is down + watchdog_timeout = max(5, self._timeout - 2) + self._watchdog = Watchdog(watchdog_timeout) + + # Avoid the agent from freezing the simulation + agent_timeout = watchdog_timeout - 1 + self._agent_watchdog = Watchdog(agent_timeout) + + self.scenario_duration_system = 0.0 + self.scenario_duration_game = 0.0 + self.start_system_time = None + self.end_system_time = None + self.end_game_time = None + + # Register the scenario tick as callback for the CARLA world + # Use the callback_id inside the signal handler to allow external interrupts + signal.signal(signal.SIGINT, self.signal_handler) + + def signal_handler(self, signum, frame): + """ + Terminate scenario ticking when receiving a signal interrupt + """ + self._running = False + + def cleanup(self): + """ + Reset all parameters + """ + self._timestamp_last_run = 0.0 + self.scenario_duration_system = 0.0 + self.scenario_duration_game = 0.0 + self.start_system_time = None + self.end_system_time = None + self.end_game_time = None + + def load_scenario(self, scenario, agent, rep_number): + """ + Load a new scenario + """ + + GameTime.restart() + self._agent = AgentWrapper(agent) + self.scenario_class = scenario + self.scenario = scenario.scenario + self.scenario_tree = self.scenario.scenario_tree + self.ego_vehicles = scenario.ego_vehicles + self.other_actors = scenario.other_actors + self.repetition_number = rep_number + + # To print the scenario tree uncomment the next line + # py_trees.display.render_dot_tree(self.scenario_tree) + CarlaDataProvider.set_ego(self.ego_vehicles[0]) + + self._agent.setup_sensors(self.ego_vehicles[0], self._debug_mode) + + def run_scenario(self): + """ + Trigger the start of the scenario and wait for it to finish/fail + """ + self.start_system_time = time.time() + self.start_game_time = GameTime.get_time() + + self._watchdog.start() + self._running = True + + while self._running: + timestamp = None + world = CarlaDataProvider.get_world() + if world: + snapshot = world.get_snapshot() + if snapshot: + timestamp = snapshot.timestamp + if timestamp: + self._tick_scenario(timestamp) + + def _tick_scenario(self, timestamp): + """ + Run next tick of scenario and the agent and tick the world. + """ + + if self._timestamp_last_run < timestamp.elapsed_seconds and self._running: + self._timestamp_last_run = timestamp.elapsed_seconds + + self._watchdog.update() + # Update game time and actor information + GameTime.on_carla_tick(timestamp) + CarlaDataProvider.on_carla_tick() + + try: + ego_action = self._agent() + + # Special exception inside the agent that isn't caused by the agent + except SensorReceivedNoData as e: + raise RuntimeError(e) + + except Exception as e: + raise AgentError(e) + + self.ego_vehicles[0].apply_control(ego_action) + + # Tick scenario + self.scenario_tree.tick_once() + + if self._debug_mode: + print("\n") + py_trees.display.print_ascii_tree( + self.scenario_tree, show_status=True) + sys.stdout.flush() + + if self.scenario_tree.status != py_trees.common.Status.RUNNING: + self._running = False + + spectator = CarlaDataProvider.get_world().get_spectator() + ego_trans = self.ego_vehicles[0].get_transform() + spectator.set_transform(carla.Transform(ego_trans.location + carla.Location(z=50), + carla.Rotation(pitch=-90))) + + if self._running and self.get_running_status(): + CarlaDataProvider.get_world().tick(self._timeout) + + def get_running_status(self): + """ + returns: + bool: False if watchdog exception occured, True otherwise + """ + return self._watchdog.get_status() + + def stop_scenario(self): + """ + This function triggers a proper termination of a scenario + """ + self._watchdog.stop() + + self.end_system_time = time.time() + self.end_game_time = GameTime.get_time() + + self.scenario_duration_system = self.end_system_time - self.start_system_time + self.scenario_duration_game = self.end_game_time - self.start_game_time + + if self.get_running_status(): + if self.scenario is not None: + self.scenario.terminate() + + if self._agent is not None: + self._agent.cleanup() + self._agent = None + + self.analyze_scenario() + + def analyze_scenario(self): + """ + Analyzes and prints the results of the route + """ + global_result = '\033[92m'+'SUCCESS'+'\033[0m' + + for criterion in self.scenario.get_criteria(): + if criterion.test_status != "SUCCESS": + global_result = '\033[91m'+'FAILURE'+'\033[0m' + + if self.scenario.timeout_node.timeout: + global_result = '\033[91m'+'FAILURE'+'\033[0m' + + ResultOutputProvider(self, global_result) diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/scenarioatomics/__init__.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/scenarioatomics/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/scenarioatomics/atomic_criteria.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/scenarioatomics/atomic_criteria.py new file mode 100644 index 00000000..a3718cc6 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/scenarioatomics/atomic_criteria.py @@ -0,0 +1,95 @@ +#!/usr/bin/env python + +# Copyright (c) 2018-2020 Intel Corporation +# +# This work is licensed under the terms of the MIT license. +# For a copy, see . + +""" +This module provides additional atomic evaluation criteria required +to analyze if a scenario was completed successfully or failed. + +Criteria should run continuously to monitor the state of a single actor, multiple +actors or environmental parameters. Hence, a termination is not required. + +The atomic criteria are implemented with py_trees. +""" + +import py_trees +from srunner.scenariomanager.timer import GameTime +from srunner.scenariomanager.carla_data_provider import CarlaDataProvider +from srunner.scenariomanager.scenarioatomics.atomic_criteria import Criterion +from srunner.scenariomanager.traffic_events import TrafficEvent, TrafficEventType + + +class ActorSpeedAboveThresholdTest(Criterion): + """ + This test will fail if the actor has had its linear velocity lower than a specific value for + a specific amount of time + + Important parameters: + - actor: CARLA actor to be used for this test + - speed_threshold: speed required + - below_threshold_max_time: Maximum time (in seconds) the actor can remain under the speed threshold + - terminate_on_failure [optional]: If True, the complete scenario will terminate upon failure of this test + """ + + def __init__(self, actor, speed_threshold, below_threshold_max_time, + name="ActorSpeedAboveThresholdTest", terminate_on_failure=False): + """ + Class constructor. + """ + super(ActorSpeedAboveThresholdTest, self).__init__(name, actor, 0, terminate_on_failure=terminate_on_failure) + self.logger.debug("%s.__init__()" % (self.__class__.__name__)) + self._actor = actor + self._speed_threshold = speed_threshold + self._below_threshold_max_time = below_threshold_max_time + self._time_last_valid_state = None + + def update(self): + """ + Check if the actor speed is above the speed_threshold + """ + new_status = py_trees.common.Status.RUNNING + + linear_speed = CarlaDataProvider.get_velocity(self._actor) + if linear_speed is not None: + if linear_speed < self._speed_threshold and self._time_last_valid_state: + if (GameTime.get_time() - self._time_last_valid_state) > self._below_threshold_max_time: + # Game over. The actor has been "blocked" for too long + self.test_status = "FAILURE" + + # record event + vehicle_location = CarlaDataProvider.get_location(self._actor) + blocked_event = TrafficEvent(event_type=TrafficEventType.VEHICLE_BLOCKED) + ActorSpeedAboveThresholdTest._set_event_message(blocked_event, vehicle_location) + ActorSpeedAboveThresholdTest._set_event_dict(blocked_event, vehicle_location) + self.list_traffic_events.append(blocked_event) + else: + self._time_last_valid_state = GameTime.get_time() + + if self._terminate_on_failure and (self.test_status == "FAILURE"): + new_status = py_trees.common.Status.FAILURE + self.logger.debug("%s.update()[%s->%s]" % (self.__class__.__name__, self.status, new_status)) + + return new_status + + @staticmethod + def _set_event_message(event, location): + """ + Sets the message of the event + """ + + event.set_message('Agent got blocked at (x={}, y={}, z={})'.format(round(location.x, 3), + round(location.y, 3), + round(location.z, 3))) + @staticmethod + def _set_event_dict(event, location): + """ + Sets the dictionary of the event + """ + event.set_dict({ + 'x': location.x, + 'y': location.y, + 'z': location.z, + }) diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/utils/__init__.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/utils/checkpoint_tools.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/utils/checkpoint_tools.py new file mode 100644 index 00000000..4fec5b08 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/utils/checkpoint_tools.py @@ -0,0 +1,77 @@ +import json +try: + import simplejson as json +except ImportError: + import json +import requests +import os.path + + +def autodetect_proxy(): + proxies = {} + + proxy_https = os.getenv('HTTPS_PROXY', os.getenv('https_proxy', None)) + proxy_http = os.getenv('HTTP_PROXY', os.getenv('http_proxy', None)) + + if proxy_https: + proxies['https'] = proxy_https + if proxy_http: + proxies['http'] = proxy_http + + return proxies + + +def fetch_dict(endpoint): + data = None + if endpoint.startswith(('http:', 'https:', 'ftp:')): + proxies = autodetect_proxy() + + if proxies: + response = requests.get(url=endpoint, proxies=proxies) + else: + response = requests.get(url=endpoint) + + try: + data = response.json() + except json.decoder.JSONDecodeError: + data = {} + else: + data = {} + if os.path.exists(endpoint): + with open(endpoint) as fd: + try: + data = json.load(fd) + except json.JSONDecodeError: + data = {} + + return data + + +def create_default_json_msg(): + msg = { + "sensors": [], + "values": [], + "labels": [], + "entry_status": "", + "eligible": "", + "_checkpoint": { + "progress": [], + "records": [], + "global_record": {} + }, + } + + return msg + + +def save_dict(endpoint, data): + if endpoint.startswith(('http:', 'https:', 'ftp:')): + proxies = autodetect_proxy() + + if proxies: + _ = requests.patch(url=endpoint, headers={'content-type':'application/json'}, data=json.dumps(data, indent=4, sort_keys=True), proxies=proxies) + else: + _ = requests.patch(url=endpoint, headers={'content-type':'application/json'}, data=json.dumps(data, indent=4, sort_keys=True)) + else: + with open(endpoint, 'w') as fd: + json.dump(data, fd, indent=4, sort_keys=True) diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/utils/result_writer.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/utils/result_writer.py new file mode 100644 index 00000000..11fbfad6 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/utils/result_writer.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python + +# Copyright (c) 2018-2019 Intel Corporation +# +# This work is licensed under the terms of the MIT license. +# For a copy, see . + +""" +This module contains the result gatherer and write for CARLA scenarios. +It shall be used from the ScenarioManager only. +""" + +from __future__ import print_function + +import time +from tabulate import tabulate + + +class ResultOutputProvider(object): + + """ + This module contains the _result gatherer and write for CARLA scenarios. + It shall be used from the ScenarioManager only. + """ + + def __init__(self, data, global_result): + """ + - data contains all scenario-related information + - global_result is overall pass/fail info + """ + self._data = data + self._global_result = global_result + + self._start_time = time.strftime('%Y-%m-%d %H:%M:%S', + time.localtime(self._data.start_system_time)) + self._end_time = time.strftime('%Y-%m-%d %H:%M:%S', + time.localtime(self._data.end_system_time)) + + print(self.create_output_text()) + + def create_output_text(self): + """ + Creates the output message + """ + + # Create the title + output = "\n" + output += "\033[1m========= Results of {} (repetition {}) ------ {} \033[1m=========\033[0m\n".format( + self._data.scenario_tree.name, self._data.repetition_number, self._global_result) + output += "\n" + + # Simulation part + system_time = round(self._data.scenario_duration_system, 2) + game_time = round(self._data.scenario_duration_game, 2) + ratio = round(self._data.scenario_duration_game / self._data.scenario_duration_system, 3) + + list_statistics = [["Start Time", "{}".format(self._start_time)]] + list_statistics.extend([["End Time", "{}".format(self._end_time)]]) + list_statistics.extend([["Duration (System Time)", "{}s".format(system_time)]]) + list_statistics.extend([["Duration (Game Time)", "{}s".format(game_time)]]) + list_statistics.extend([["Ratio (System Time / Game Time)", "{}".format(ratio)]]) + + output += tabulate(list_statistics, tablefmt='fancy_grid') + output += "\n\n" + + # Criteria part + header = ['Criterion', 'Result', 'Value'] + list_statistics = [header] + + for criterion in self._data.scenario.get_criteria(): + + actual_value = criterion.actual_value + expected_value = criterion.expected_value_success + name = criterion.name + + result = criterion.test_status + + if result == "SUCCESS": + result = '\033[92m'+'SUCCESS'+'\033[0m' + elif result == "FAILURE": + result = '\033[91m'+'FAILURE'+'\033[0m' + + if name == "RouteCompletionTest": + actual_value = str(actual_value) + " %" + elif name == "OutsideRouteLanesTest": + actual_value = str(actual_value) + " %" + elif name == "CollisionTest": + actual_value = str(actual_value) + " times" + elif name == "RunningRedLightTest": + actual_value = str(actual_value) + " times" + elif name == "RunningStopTest": + actual_value = str(actual_value) + " times" + elif name == "InRouteTest": + actual_value = "" + elif name == "AgentBlockedTest": + actual_value = "" + + list_statistics.extend([[name, result, actual_value]]) + + # Timeout + name = "Timeout" + + actual_value = self._data.scenario_duration_game + expected_value = self._data.scenario.timeout + + if self._data.scenario_duration_game < self._data.scenario.timeout: + result = '\033[92m'+'SUCCESS'+'\033[0m' + else: + result = '\033[91m'+'FAILURE'+'\033[0m' + + list_statistics.extend([[name, result, '']]) + + output += tabulate(list_statistics, tablefmt='fancy_grid') + output += "\n" + + return output diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/utils/route_indexer.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/utils/route_indexer.py new file mode 100644 index 00000000..43f21ec7 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/utils/route_indexer.py @@ -0,0 +1,72 @@ +from collections import OrderedDict +from dictor import dictor + +import copy + +from srunner.scenarioconfigs.route_scenario_configuration import RouteScenarioConfiguration + + +from leaderboard.utils.route_parser import RouteParser +from leaderboard.utils.checkpoint_tools import fetch_dict, create_default_json_msg, save_dict + + +class RouteIndexer(): + def __init__(self, routes_file, scenarios_file, repetitions): + self._routes_file = routes_file + self._scenarios_file = scenarios_file + self._repetitions = repetitions + self._configs_dict = OrderedDict() + self._configs_list = [] + self.routes_length = [] + self._index = 0 + + # retrieve routes + route_configurations = RouteParser.parse_routes_file(self._routes_file, self._scenarios_file, False) + + self.n_routes = len(route_configurations) + self.total = self.n_routes*self._repetitions + + for i, config in enumerate(route_configurations): + for repetition in range(repetitions): + config.index = i * self._repetitions + repetition + config.repetition_index = repetition + self._configs_dict['{}.{}'.format(config.name, repetition)] = copy.copy(config) + + self._configs_list = list(self._configs_dict.items()) + + def peek(self): + return not (self._index >= len(self._configs_list)) + + def next(self): + if self._index >= len(self._configs_list): + return None + + key, config = self._configs_list[self._index] + self._index += 1 + + return config + + def resume(self, endpoint): + data = fetch_dict(endpoint) + + if data: + checkpoint_dict = dictor(data, '_checkpoint') + if checkpoint_dict and 'progress' in checkpoint_dict: + progress = checkpoint_dict['progress'] + if not progress: + current_route = 0 + else: + current_route, total_routes = progress + if current_route <= self.total: + self._index = current_route + else: + print('Problem reading checkpoint. Route id {} ' + 'larger than maximum number of routes {}'.format(current_route, self.total)) + + def save_state(self, endpoint): + data = fetch_dict(endpoint) + if not data: + data = create_default_json_msg() + data['_checkpoint']['progress'] = [self._index, self.total] + + save_dict(endpoint, data) diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/utils/route_manipulation.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/utils/route_manipulation.py new file mode 100644 index 00000000..ab29ae3f --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/utils/route_manipulation.py @@ -0,0 +1,161 @@ +#!/usr/bin/env python +# Copyright (c) 2018-2019 Intel Labs. +# authors: German Ros (german.ros@intel.com), Felipe Codevilla (felipe.alcm@gmail.com) +# +# This work is licensed under the terms of the MIT license. +# For a copy, see . + +""" +Module to manipulate the routes, by making then more or less dense (Up to a certain parameter). +It also contains functions to convert the CARLA world location do GPS coordinates. +""" + +import math +import xml.etree.ElementTree as ET + +from agents.navigation.global_route_planner import GlobalRoutePlanner +#from agents.navigation.global_route_planner_dao import GlobalRoutePlannerDAO +from agents.navigation.local_planner import RoadOption + + +def _location_to_gps(lat_ref, lon_ref, location): + """ + Convert from world coordinates to GPS coordinates + :param lat_ref: latitude reference for the current map + :param lon_ref: longitude reference for the current map + :param location: location to translate + :return: dictionary with lat, lon and height + """ + + EARTH_RADIUS_EQUA = 6378137.0 # pylint: disable=invalid-name + scale = math.cos(lat_ref * math.pi / 180.0) + mx = scale * lon_ref * math.pi * EARTH_RADIUS_EQUA / 180.0 + my = scale * EARTH_RADIUS_EQUA * math.log(math.tan((90.0 + lat_ref) * math.pi / 360.0)) + mx += location.x + my -= location.y + + lon = mx * 180.0 / (math.pi * EARTH_RADIUS_EQUA * scale) + lat = 360.0 * math.atan(math.exp(my / (EARTH_RADIUS_EQUA * scale))) / math.pi - 90.0 + z = location.z + + return {'lat': lat, 'lon': lon, 'z': z} + + +def location_route_to_gps(route, lat_ref, lon_ref): + """ + Locate each waypoint of the route into gps, (lat long ) representations. + :param route: + :param lat_ref: + :param lon_ref: + :return: + """ + gps_route = [] + + for transform, connection in route: + gps_point = _location_to_gps(lat_ref, lon_ref, transform.location) + gps_route.append((gps_point, connection)) + + return gps_route + + +def _get_latlon_ref(world): + """ + Convert from waypoints world coordinates to CARLA GPS coordinates + :return: tuple with lat and lon coordinates + """ + xodr = world.get_map().to_opendrive() + tree = ET.ElementTree(ET.fromstring(xodr)) + + # default reference + lat_ref = 42.0 + lon_ref = 2.0 + + for opendrive in tree.iter("OpenDRIVE"): + for header in opendrive.iter("header"): + for georef in header.iter("geoReference"): + if georef.text: + str_list = georef.text.split(' ') + for item in str_list: + if '+lat_0' in item: + lat_ref = float(item.split('=')[1]) + if '+lon_0' in item: + lon_ref = float(item.split('=')[1]) + return lat_ref, lon_ref + + +def downsample_route(route, sample_factor): + """ + Downsample the route by some factor. + :param route: the trajectory , has to contain the waypoints and the road options + :param sample_factor: Maximum distance between samples + :return: returns the ids of the final route that can + """ + + ids_to_sample = [] + prev_option = None + dist = 0 + + for i, point in enumerate(route): + curr_option = point[1] + + # Lane changing + if curr_option in (RoadOption.CHANGELANELEFT, RoadOption.CHANGELANERIGHT): + ids_to_sample.append(i) + dist = 0 + + # When road option changes + elif prev_option != curr_option and prev_option not in (RoadOption.CHANGELANELEFT, RoadOption.CHANGELANERIGHT): + ids_to_sample.append(i) + dist = 0 + + # After a certain max distance + elif dist > sample_factor: + ids_to_sample.append(i) + dist = 0 + + # At the end + elif i == len(route) - 1: + ids_to_sample.append(i) + dist = 0 + + # Compute the distance traveled + else: + curr_location = point[0].location + prev_location = route[i-1][0].location + dist += curr_location.distance(prev_location) + + prev_option = curr_option + + return ids_to_sample + + +def interpolate_trajectory(world, waypoints_trajectory, hop_resolution=1.0): + """ + Given some raw keypoints interpolate a full dense trajectory to be used by the user. + returns the full interpolated route both in GPS coordinates and also in its original form. + + Args: + - world: an reference to the CARLA world so we can use the planner + - waypoints_trajectory: the current coarse trajectory + - hop_resolution: is the resolution, how dense is the provided trajectory going to be made + """ + + #dao = GlobalRoutePlannerDAO(world.get_map(), hop_resolution) + #grp = GlobalRoutePlanner(dao) + grp = GlobalRoutePlanner(world.get_map(), hop_resolution) + #grp.setup() + # Obtain route plan + route = [] + wp_route = [] + for i in range(len(waypoints_trajectory) - 1): # Goes until the one before the last. + + waypoint = waypoints_trajectory[i] + waypoint_next = waypoints_trajectory[i + 1] + interpolated_trace = grp.trace_route(waypoint, waypoint_next) + for wp_tuple in interpolated_trace: + route.append((wp_tuple[0].transform, wp_tuple[1])) + wp_route.append((wp_tuple[0], wp_tuple[1])) + + lat_ref, lon_ref = _get_latlon_ref(world) + + return location_route_to_gps(route, lat_ref, lon_ref), route #, wp_route diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/utils/route_parser.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/utils/route_parser.py new file mode 100644 index 00000000..dabb3779 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/utils/route_parser.py @@ -0,0 +1,357 @@ +#!/usr/bin/env python + +# This work is licensed under the terms of the MIT license. +# For a copy, see . + +""" +Module used to parse all the route and scenario configuration parameters. +""" +from collections import OrderedDict +import json +import math +import xml.etree.ElementTree as ET + +import carla +from agents.navigation.local_planner import RoadOption +from srunner.scenarioconfigs.route_scenario_configuration import RouteScenarioConfiguration + +# TODO check this threshold, it could be a bit larger but not so large that we cluster scenarios. +TRIGGER_THRESHOLD = 2.0 # Threshold to say if a trigger position is new or repeated, works for matching positions +TRIGGER_ANGLE_THRESHOLD = 10 # Threshold to say if two angles can be considering matching when matching transforms. + +# for loading predefined weathers while parsing routes +WEATHERS = { + '1': carla.WeatherParameters.ClearNoon, + '2': carla.WeatherParameters.ClearSunset, + '3': carla.WeatherParameters.CloudyNoon, + '4': carla.WeatherParameters.CloudySunset, + '5': carla.WeatherParameters.WetNoon, + '6': carla.WeatherParameters.WetSunset, + '7': carla.WeatherParameters.MidRainyNoon, + '8': carla.WeatherParameters.MidRainSunset, + '9': carla.WeatherParameters.WetCloudyNoon, + '10': carla.WeatherParameters.WetCloudySunset, + '11': carla.WeatherParameters.HardRainNoon, + '12': carla.WeatherParameters.HardRainSunset, + '13': carla.WeatherParameters.SoftRainNoon, + '14': carla.WeatherParameters.SoftRainSunset, +} + + +class RouteParser(object): + + """ + Pure static class used to parse all the route and scenario configuration parameters. + """ + + @staticmethod + def parse_annotations_file(annotation_filename): + """ + Return the annotations of which positions where the scenarios are going to happen. + :param annotation_filename: the filename for the anotations file + :return: + """ + with open(annotation_filename, 'r') as f: + annotation_dict = json.loads(f.read(), object_pairs_hook=OrderedDict) + + final_dict = OrderedDict() + + for town_dict in annotation_dict['available_scenarios']: + final_dict.update(town_dict) + + return final_dict # the file has a current maps name that is an one element vec + + @staticmethod + def parse_routes_file(route_filename, scenario_file, single_route=None): + """ + Returns a list of route elements. + :param route_filename: the path to a set of routes. + :param single_route: If set, only this route shall be returned + :return: List of dicts containing the waypoints, id and town of the routes + """ + + list_route_descriptions = [] + tree = ET.parse(route_filename) + for route in tree.iter("route"): + + route_id = route.attrib['id'] + if single_route and route_id != single_route: + continue + + new_config = RouteScenarioConfiguration() + new_config.town = route.attrib['town'] + new_config.name = "RouteScenario_{}".format(route_id) + new_config.weather = RouteParser.parse_weather(route) # default: parse_weather(route) + new_config.scenario_file = scenario_file + + waypoint_list = [] # the list of waypoints that can be found on this route + for waypoint in route.iter('waypoint'): + waypoint_list.append(carla.Location(x=float(waypoint.attrib['x']), + y=float(waypoint.attrib['y']), + z=float(waypoint.attrib['z']))) + + new_config.trajectory = waypoint_list + + list_route_descriptions.append(new_config) + + return list_route_descriptions + + @staticmethod + def parse_weather(route): + """ + Returns a carla.WeatherParameters with the corresponding weather for that route. If the route + has no weather attribute, the default one is triggered. + """ + + route_weather = route.find("weather") + + if route_weather is None: + + weather = carla.WeatherParameters(sun_altitude_angle=70, cloudiness=30) + + else: + weather = carla.WeatherParameters() + for weather_attrib in route.iter("weather"): + + if 'cloudiness' in weather_attrib.attrib: + weather.cloudiness = float(weather_attrib.attrib['cloudiness']) + if 'precipitation' in weather_attrib.attrib: + weather.precipitation = float(weather_attrib.attrib['precipitation']) + if 'precipitation_deposits' in weather_attrib.attrib: + weather.precipitation_deposits =float(weather_attrib.attrib['precipitation_deposits']) + if 'wind_intensity' in weather_attrib.attrib: + weather.wind_intensity = float(weather_attrib.attrib['wind_intensity']) + if 'sun_azimuth_angle' in weather_attrib.attrib: + weather.sun_azimuth_angle = float(weather_attrib.attrib['sun_azimuth_angle']) + if 'sun_altitude_angle' in weather_attrib.attrib: + weather.sun_altitude_angle = float(weather_attrib.attrib['sun_altitude_angle']) + if 'wetness' in weather_attrib.attrib: + weather.wetness = float(weather_attrib.attrib['wetness']) + if 'fog_distance' in weather_attrib.attrib: + weather.fog_distance = float(weather_attrib.attrib['fog_distance']) + if 'fog_density' in weather_attrib.attrib: + weather.fog_density = float(weather_attrib.attrib['fog_density']) + if 'fog_falloff' in weather_attrib.attrib: + weather.fog_falloff = float(weather_attrib.attrib['fog_falloff']) + + return weather + + @staticmethod + def parse_preset_weather(route): + """ + Returns one of the 14 preset weather condition. If the route + has no weather attribute, the default one is triggered. + """ + + if 'weather' not in route.attrib: + weather = carla.WeatherParameters(sun_altitude_angle=70, cloudiness=30) + else: + weather = WEATHERS[route.attrib['weather']] + + return weather + + @staticmethod + def check_trigger_position(new_trigger, existing_triggers): + """ + Check if this trigger position already exists or if it is a new one. + :param new_trigger: + :param existing_triggers: + :return: + """ + + for trigger_id in existing_triggers.keys(): + trigger = existing_triggers[trigger_id] + dx = trigger['x'] - new_trigger['x'] + dy = trigger['y'] - new_trigger['y'] + distance = math.sqrt(dx * dx + dy * dy) + + dyaw = (trigger['yaw'] - new_trigger['yaw']) % 360 + if distance < TRIGGER_THRESHOLD \ + and (dyaw < TRIGGER_ANGLE_THRESHOLD or dyaw > (360 - TRIGGER_ANGLE_THRESHOLD)): + return trigger_id + + return None + + @staticmethod + def convert_waypoint_float(waypoint): + """ + Convert waypoint values to float + """ + waypoint['x'] = float(waypoint['x']) + waypoint['y'] = float(waypoint['y']) + waypoint['z'] = float(waypoint['z']) + waypoint['yaw'] = float(waypoint['yaw']) + + @staticmethod + def match_world_location_to_route(world_location, route_description): + """ + We match this location to a given route. + world_location: + route_description: + """ + def match_waypoints(waypoint1, wtransform): + """ + Check if waypoint1 and wtransform are similar + """ + dx = float(waypoint1['x']) - wtransform.location.x + dy = float(waypoint1['y']) - wtransform.location.y + dz = float(waypoint1['z']) - wtransform.location.z + dpos = math.sqrt(dx * dx + dy * dy + dz * dz) + + dyaw = (float(waypoint1['yaw']) - wtransform.rotation.yaw) % 360 + + return dpos < TRIGGER_THRESHOLD \ + and (dyaw < TRIGGER_ANGLE_THRESHOLD or dyaw > (360 - TRIGGER_ANGLE_THRESHOLD)) + + match_position = 0 + # TODO this function can be optimized to run on Log(N) time + for route_waypoint in route_description: + if match_waypoints(world_location, route_waypoint[0]): + return match_position + match_position += 1 + + return None + + @staticmethod + def get_scenario_type(scenario, match_position, trajectory): + """ + Some scenarios have different types depending on the route. + :param scenario: the scenario name + :param match_position: the matching position for the scenarion + :param trajectory: the route trajectory the ego is following + :return: tag representing this subtype + + Also used to check which are not viable (Such as an scenario + that triggers when turning but the route doesnt') + WARNING: These tags are used at: + - VehicleTurningRoute + - SignalJunctionCrossingRoute + and changes to these tags will affect them + """ + + def check_this_waypoint(tuple_wp_turn): + """ + Decides whether or not the waypoint will define the scenario behavior + """ + if RoadOption.LANEFOLLOW == tuple_wp_turn[1]: + return False + elif RoadOption.CHANGELANELEFT == tuple_wp_turn[1]: + return False + elif RoadOption.CHANGELANERIGHT == tuple_wp_turn[1]: + return False + return True + + # Unused tag for the rest of scenarios, + # can't be None as they are still valid scenarios + subtype = 'valid' + + if scenario == 'Scenario4': + for tuple_wp_turn in trajectory[match_position:]: + if check_this_waypoint(tuple_wp_turn): + if RoadOption.LEFT == tuple_wp_turn[1]: + subtype = 'S4left' + elif RoadOption.RIGHT == tuple_wp_turn[1]: + subtype = 'S4right' + else: + subtype = None + break # Avoid checking all of them + subtype = None + + if scenario == 'Scenario7': + for tuple_wp_turn in trajectory[match_position:]: + if check_this_waypoint(tuple_wp_turn): + if RoadOption.LEFT == tuple_wp_turn[1]: + subtype = 'S7left' + elif RoadOption.RIGHT == tuple_wp_turn[1]: + subtype = 'S7right' + elif RoadOption.STRAIGHT == tuple_wp_turn[1]: + subtype = 'S7opposite' + else: + subtype = None + break # Avoid checking all of them + subtype = None + + if scenario == 'Scenario8': + for tuple_wp_turn in trajectory[match_position:]: + if check_this_waypoint(tuple_wp_turn): + if RoadOption.LEFT == tuple_wp_turn[1]: + subtype = 'S8left' + else: + subtype = None + break # Avoid checking all of them + subtype = None + + if scenario == 'Scenario9': + for tuple_wp_turn in trajectory[match_position:]: + if check_this_waypoint(tuple_wp_turn): + if RoadOption.RIGHT == tuple_wp_turn[1]: + subtype = 'S9right' + else: + subtype = None + break # Avoid checking all of them + subtype = None + + return subtype + + @staticmethod + def scan_route_for_scenarios(route_name, trajectory, world_annotations): + """ + Just returns a plain list of possible scenarios that can happen in this route by matching + the locations from the scenario into the route description + + :return: A list of scenario definitions with their correspondent parameters + """ + + # the triggers dictionaries: + existent_triggers = OrderedDict() + # We have a table of IDs and trigger positions associated + possible_scenarios = OrderedDict() + + # Keep track of the trigger ids being added + latest_trigger_id = 0 + + for town_name in world_annotations.keys(): + if town_name != route_name: + continue + + scenarios = world_annotations[town_name] + for scenario in scenarios: # For each existent scenario + scenario_name = scenario["scenario_type"] + for event in scenario["available_event_configurations"]: + waypoint = event['transform'] # trigger point of this scenario + RouteParser.convert_waypoint_float(waypoint) + # We match trigger point to the route, now we need to check if the route affects + match_position = RouteParser.match_world_location_to_route( + waypoint, trajectory) + if match_position is not None: + # We match a location for this scenario, create a scenario object so this scenario + # can be instantiated later + + if 'other_actors' in event: + other_vehicles = event['other_actors'] + else: + other_vehicles = None + scenario_subtype = RouteParser.get_scenario_type(scenario_name, match_position, + trajectory) + if scenario_subtype is None: + continue + scenario_description = { + 'name': scenario_name, + 'other_actors': other_vehicles, + 'trigger_position': waypoint, + 'scenario_type': scenario_subtype, # some scenarios have route dependent configurations + } + + trigger_id = RouteParser.check_trigger_position(waypoint, existent_triggers) + if trigger_id is None: + # This trigger does not exist create a new reference on existent triggers + existent_triggers.update({latest_trigger_id: waypoint}) + # Update a reference for this trigger on the possible scenarios + possible_scenarios.update({latest_trigger_id: []}) + trigger_id = latest_trigger_id + # Increment the latest trigger + latest_trigger_id += 1 + + possible_scenarios[trigger_id].append(scenario_description) + + return possible_scenarios, existent_triggers diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/utils/statistics_manager.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/utils/statistics_manager.py new file mode 100644 index 00000000..7ea26d58 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/utils/statistics_manager.py @@ -0,0 +1,343 @@ +#!/usr/bin/env python + +# Copyright (c) 2018-2019 Intel Corporation +# +# This work is licensed under the terms of the MIT license. +# For a copy, see . + +""" +This module contains a statistics manager for the CARLA AD leaderboard +""" + +from __future__ import print_function + +from dictor import dictor +import math +import sys + +from srunner.scenariomanager.traffic_events import TrafficEventType + +from leaderboard.utils.checkpoint_tools import fetch_dict, save_dict, create_default_json_msg + +PENALTY_COLLISION_PEDESTRIAN = 0.50 +PENALTY_COLLISION_VEHICLE = 0.60 +PENALTY_COLLISION_STATIC = 0.65 +PENALTY_TRAFFIC_LIGHT = 0.70 +PENALTY_STOP = 0.80 + + +class RouteRecord(): + def __init__(self): + self.route_id = None + self.index = None + self.status = 'Started' + self.infractions = { + 'collisions_pedestrian': [], + 'collisions_vehicle': [], + 'collisions_layout': [], + 'red_light': [], + 'stop_infraction': [], + 'outside_route_lanes': [], + 'route_dev': [], + 'route_timeout': [], + 'vehicle_blocked': [] + } + + self.scores = { + 'score_route': 0, + 'score_penalty': 0, + 'score_composed': 0 + } + + self.meta = {} + + +def to_route_record(record_dict): + record = RouteRecord() + for key, value in record_dict.items(): + setattr(record, key, value) + + return record + + +def compute_route_length(config): + trajectory = config.trajectory + + route_length = 0.0 + previous_location = None + for location in trajectory: + if previous_location: + dist = math.sqrt((location.x-previous_location.x)*(location.x-previous_location.x) + + (location.y-previous_location.y)*(location.y-previous_location.y) + + (location.z - previous_location.z) * (location.z - previous_location.z)) + route_length += dist + previous_location = location + + return route_length + + +class StatisticsManager(object): + + """ + This is the statistics manager for the CARLA leaderboard. + It gathers data at runtime via the scenario evaluation criteria. + """ + + def __init__(self): + self._master_scenario = None + self._registry_route_records = [] + + def resume(self, endpoint): + data = fetch_dict(endpoint) + + if data and dictor(data, '_checkpoint.records'): + records = data['_checkpoint']['records'] + + for record in records: + self._registry_route_records.append(to_route_record(record)) + + def set_route(self, route_id, index): + + self._master_scenario = None + route_record = RouteRecord() + route_record.route_id = route_id + route_record.index = index + + if index < len(self._registry_route_records): + # the element already exists and therefore we update it + self._registry_route_records[index] = route_record + else: + self._registry_route_records.append(route_record) + + def set_scenario(self, scenario): + """ + Sets the scenario from which the statistics willb e taken + """ + self._master_scenario = scenario + + def compute_route_statistics(self, config, duration_time_system=-1, duration_time_game=-1, failure=""): + """ + Compute the current statistics by evaluating all relevant scenario criteria + """ + index = config.index + + if not self._registry_route_records or index >= len(self._registry_route_records): + raise Exception('Critical error with the route registry.') + + # fetch latest record to fill in + route_record = self._registry_route_records[index] + + target_reached = False + score_penalty = 1.0 + score_route = 0.0 + + route_record.meta['duration_system'] = duration_time_system + route_record.meta['duration_game'] = duration_time_game + route_record.meta['route_length'] = compute_route_length(config) + + route_record.meta['total_steps'] = config.agent.step + + if self._master_scenario: + if self._master_scenario.timeout_node.timeout: + route_record.infractions['route_timeout'].append('Route timeout.') + failure = "Agent timed out" + + for node in self._master_scenario.get_criteria(): + if node.list_traffic_events: + # analyze all traffic events + for event in node.list_traffic_events: + if event.get_type() == TrafficEventType.COLLISION_STATIC: + score_penalty *= PENALTY_COLLISION_STATIC + route_record.infractions['collisions_layout'].append(event.get_message()) + + elif event.get_type() == TrafficEventType.COLLISION_PEDESTRIAN: + score_penalty *= PENALTY_COLLISION_PEDESTRIAN + route_record.infractions['collisions_pedestrian'].append(event.get_message()) + + elif event.get_type() == TrafficEventType.COLLISION_VEHICLE: + score_penalty *= PENALTY_COLLISION_VEHICLE + route_record.infractions['collisions_vehicle'].append(event.get_message()) + + elif event.get_type() == TrafficEventType.OUTSIDE_ROUTE_LANES_INFRACTION: + score_penalty *= (1 - event.get_dict()['percentage'] / 100) + route_record.infractions['outside_route_lanes'].append(event.get_message()) + + elif event.get_type() == TrafficEventType.TRAFFIC_LIGHT_INFRACTION: + score_penalty *= PENALTY_TRAFFIC_LIGHT + route_record.infractions['red_light'].append(event.get_message()) + + elif event.get_type() == TrafficEventType.ROUTE_DEVIATION: + route_record.infractions['route_dev'].append(event.get_message()) + failure = "Agent deviated from the route" + + elif event.get_type() == TrafficEventType.STOP_INFRACTION: + score_penalty *= PENALTY_STOP + route_record.infractions['stop_infraction'].append(event.get_message()) + + elif event.get_type() == TrafficEventType.VEHICLE_BLOCKED: + route_record.infractions['vehicle_blocked'].append(event.get_message()) + failure = "Agent got blocked" + + elif event.get_type() == TrafficEventType.ROUTE_COMPLETED: + score_route = 100.0 + target_reached = True + elif event.get_type() == TrafficEventType.ROUTE_COMPLETION: + if not target_reached: + if event.get_dict(): + score_route = event.get_dict()['route_completed'] + else: + score_route = 0 + + # update route scores + route_record.scores['score_route'] = score_route + route_record.scores['score_penalty'] = score_penalty + route_record.scores['score_composed'] = max(score_route*score_penalty, 0.0) + + # update status + if target_reached: + route_record.status = 'Completed' + else: + route_record.status = 'Failed' + if failure: + route_record.status += ' - ' + failure + + return route_record + + def compute_global_statistics(self, total_routes): + global_record = RouteRecord() + global_record.route_id = -1 + global_record.index = -1 + global_record.status = 'Completed' + + if self._registry_route_records: + for route_record in self._registry_route_records: + global_record.scores['score_route'] += route_record.scores['score_route'] + global_record.scores['score_penalty'] += route_record.scores['score_penalty'] + global_record.scores['score_composed'] += route_record.scores['score_composed'] + + for key in global_record.infractions.keys(): + route_length_kms = max(route_record.scores['score_route'] * route_record.meta['route_length'] / 1000.0, 0.001) + if isinstance(global_record.infractions[key], list): + global_record.infractions[key] = len(route_record.infractions[key]) / route_length_kms + else: + global_record.infractions[key] += len(route_record.infractions[key]) / route_length_kms + + if route_record.status is not 'Completed': + global_record.status = 'Failed' + if 'exceptions' not in global_record.meta: + global_record.meta['exceptions'] = [] + global_record.meta['exceptions'].append((route_record.route_id, + route_record.index, + route_record.status)) + + global_record.scores['score_route'] /= float(total_routes) + global_record.scores['score_penalty'] /= float(total_routes) + global_record.scores['score_composed'] /= float(total_routes) + + return global_record + + @staticmethod + def save_record(route_record, index, endpoint): + data = fetch_dict(endpoint) + if not data: + data = create_default_json_msg() + + stats_dict = route_record.__dict__ + record_list = data['_checkpoint']['records'] + if index > len(record_list): + print('Error! No enough entries in the list') + sys.exit(-1) + elif index == len(record_list): + record_list.append(stats_dict) + else: + record_list[index] = stats_dict + + save_dict(endpoint, data) + + @staticmethod + def save_global_record(route_record, sensors, total_routes, endpoint): + data = fetch_dict(endpoint) + if not data: + data = create_default_json_msg() + + stats_dict = route_record.__dict__ + data['_checkpoint']['global_record'] = stats_dict + data['values'] = ['{:.3f}'.format(stats_dict['scores']['score_composed']), + '{:.3f}'.format(stats_dict['scores']['score_route']), + '{:.3f}'.format(stats_dict['scores']['score_penalty']), + # infractions + '{:.3f}'.format(stats_dict['infractions']['collisions_pedestrian']), + '{:.3f}'.format(stats_dict['infractions']['collisions_vehicle']), + '{:.3f}'.format(stats_dict['infractions']['collisions_layout']), + '{:.3f}'.format(stats_dict['infractions']['red_light']), + '{:.3f}'.format(stats_dict['infractions']['stop_infraction']), + '{:.3f}'.format(stats_dict['infractions']['outside_route_lanes']), + '{:.3f}'.format(stats_dict['infractions']['route_dev']), + '{:.3f}'.format(stats_dict['infractions']['route_timeout']), + '{:.3f}'.format(stats_dict['infractions']['vehicle_blocked']) + ] + + data['labels'] = ['Avg. driving score', + 'Avg. route completion', + 'Avg. infraction penalty', + 'Collisions with pedestrians', + 'Collisions with vehicles', + 'Collisions with layout', + 'Red lights infractions', + 'Stop sign infractions', + 'Off-road infractions', + 'Route deviations', + 'Route timeouts', + 'Agent blocked' + ] + + entry_status = "Finished" + eligible = True + + route_records = data["_checkpoint"]["records"] + progress = data["_checkpoint"]["progress"] + + if progress[1] != total_routes: + raise Exception('Critical error with the route registry.') + + if len(route_records) != total_routes or progress[0] != progress[1]: + entry_status = "Finished with missing data" + eligible = False + else: + for route in route_records: + route_status = route["status"] + if "Agent" in route_status: + entry_status = "Finished with agent errors" + break + + data['entry_status'] = entry_status + data['eligible'] = eligible + + save_dict(endpoint, data) + + @staticmethod + def save_sensors(sensors, endpoint): + data = fetch_dict(endpoint) + if not data: + data = create_default_json_msg() + + if not data['sensors']: + data['sensors'] = sensors + + save_dict(endpoint, data) + + @staticmethod + def save_entry_status(entry_status, eligible, endpoint): + data = fetch_dict(endpoint) + if not data: + data = create_default_json_msg() + + data['entry_status'] = entry_status + data['eligible'] = eligible + save_dict(endpoint, data) + + @staticmethod + def clear_record(endpoint): + if not endpoint.startswith(('http:', 'https:', 'ftp:')): + with open(endpoint, 'w') as fd: + fd.truncate(0) diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/requirements.txt b/behavior_metrics/brains/CARLA/TCP/leaderboard/requirements.txt new file mode 100644 index 00000000..702fd25c --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/requirements.txt @@ -0,0 +1,5 @@ +dictor +requests +opencv-python==4.2.0.32 +pygame +tabulate diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/scenario_runner b/behavior_metrics/brains/CARLA/TCP/leaderboard/scenario_runner new file mode 160000 index 00000000..d12d8bb7 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/scenario_runner @@ -0,0 +1 @@ +Subproject commit d12d8bb788201ebf6f03e55b9c5169f9dcb2ea87 diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/scripts/Dockerfile.master b/behavior_metrics/brains/CARLA/TCP/leaderboard/scripts/Dockerfile.master new file mode 100644 index 00000000..90c74043 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/scripts/Dockerfile.master @@ -0,0 +1,96 @@ +FROM nvidia/cuda:9.0-cudnn7-devel-ubuntu16.04 + +ARG HTTP_PROXY +ARG HTTPS_PROXY +ARG http_proxy + +RUN apt-get update && apt-get install --reinstall -y locales && locale-gen en_US.UTF-8 +ENV LANG en_US.UTF-8 +ENV LANGUAGE en_US +ENV LC_ALL en_US.UTF-8 + +RUN apt-get update && apt-get install -y --no-install-recommends \ + build-essential \ + cmake \ + git \ + curl \ + vim \ + ca-certificates \ + libjpeg-dev \ + libpng16-16 \ + libtiff5 \ + libpng-dev \ + python-dev \ + python3.5 \ + python3.5-dev \ + python-networkx \ + python-setuptools \ + python3-setuptools \ + python-pip \ + python3-pip && \ + pip install --upgrade pip && \ + pip3 install --upgrade pip && \ + rm -rf /var/lib/apt/lists/* + +# installing conda +RUN curl -o ~/miniconda.sh -LO https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh && \ + chmod +x ~/miniconda.sh && \ + ~/miniconda.sh -b -p /opt/conda && \ + rm ~/miniconda.sh && \ + /opt/conda/bin/conda clean -ya && \ + /opt/conda/bin/conda create -n python37 python=3.7 numpy networkx scipy six requests + +RUN packages='py_trees==0.8.3 shapely six dictor requests ephem tabulate' \ + && pip3 install ${packages} + +WORKDIR /workspace +COPY .tmp/PythonAPI /workspace/CARLA/PythonAPI +ENV CARLA_ROOT /workspace/CARLA + +ENV PATH "/workspace/CARLA/PythonAPI/carla/dist/carla-leaderboard.egg":/opt/conda/envs/python37/bin:/opt/conda/envs/bin:$PATH + +# adding CARLA egg to default python environment +RUN pip install --user setuptools py_trees==0.8.3 psutil shapely six dictor requests ephem tabulate + +ENV SCENARIO_RUNNER_ROOT "/workspace/scenario_runner" +ENV LEADERBOARD_ROOT "/workspace/leaderboard" +ENV TEAM_CODE_ROOT "/workspace/team_code" +ENV PYTHONPATH "/workspace/CARLA/PythonAPI/carla/dist/carla-leaderboard.egg":"${SCENARIO_RUNNER_ROOT}":"${CARLA_ROOT}/PythonAPI/carla":"${LEADERBOARD_ROOT}":${PYTHONPATH} + +COPY .tmp/scenario_runner ${SCENARIO_RUNNER_ROOT} +COPY .tmp/leaderboard ${LEADERBOARD_ROOT} +COPY .tmp/team_code ${TEAM_CODE_ROOT} + +RUN mkdir -p /workspace/results +RUN chmod +x /workspace/leaderboard/scripts/run_evaluation.sh + + +######################################################################################################################## +######################################################################################################################## +############ BEGINNING OF USER COMMANDS ############ +######################################################################################################################## +######################################################################################################################## + +ENV TEAM_AGENT ${TEAM_CODE_ROOT}/npc_agent.py +ENV TEAM_CONFIG ${TEAM_CODE_ROOT}/YOUR_CONFIG_FILE +ENV CHALLENGE_TRACK_CODENAME SENSORS + +######################################################################################################################## +######################################################################################################################## +############ END OF USER COMMANDS ############ +######################################################################################################################## +######################################################################################################################## + +ENV SCENARIOS ${LEADERBOARD_ROOT}/data/all_towns_traffic_scenarios_public.json +ENV ROUTES ${LEADERBOARD_ROOT}/data/routes_training.xml +ENV REPETITIONS 1 +ENV CHECKPOINT_ENDPOINT /workspace/results/results.json +ENV DEBUG_CHALLENGE 0 + +ENV HTTP_PROXY "" +ENV HTTPS_PROXY "" +ENV http_proxy "" +ENV https_proxy "" + + +CMD ["/bin/bash"] diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/scripts/code_check_and_formatting.sh b/behavior_metrics/brains/CARLA/TCP/leaderboard/scripts/code_check_and_formatting.sh new file mode 100644 index 00000000..e4b34c73 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/scripts/code_check_and_formatting.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +autopep8_params="--in-place --max-line-length=120" +pylint_params="--rcfile=.pylintrc" + +files=`find leaderboard/ -type f -name "*.py"` +for file in ${files[@]} +do + autopep8 $file ${autopep8_params} + pylint --rcfile=.pylintrc ${pylint_params} $file +done diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/scripts/data_collection.sh b/behavior_metrics/brains/CARLA/TCP/leaderboard/scripts/data_collection.sh new file mode 100644 index 00000000..d4f25058 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/scripts/data_collection.sh @@ -0,0 +1,45 @@ +#!/bin/bash +export CARLA_ROOT= PATH_TO_CARLA +export CARLA_SERVER=${CARLA_ROOT}/CarlaUE4.sh +export PYTHONPATH=$PYTHONPATH:${CARLA_ROOT}/PythonAPI +export PYTHONPATH=$PYTHONPATH:${CARLA_ROOT}/PythonAPI/carla +export PYTHONPATH=$PYTHONPATH:$CARLA_ROOT/PythonAPI/carla/dist/carla-0.9.10-py3.7-linux-x86_64.egg +export PYTHONPATH=$PYTHONPATH:leaderboard +export PYTHONPATH=$PYTHONPATH:leaderboard/team_code +export PYTHONPATH=$PYTHONPATH:scenario_runner + +export LEADERBOARD_ROOT=leaderboard +export CHALLENGE_TRACK_CODENAME=SENSORS +export PORT=2000 +export TM_PORT=8000 +export DEBUG_CHALLENGE=0 +export REPETITIONS=1 # multiple evaluation runs +export RESUME=True +export DATA_COLLECTION=True + + +# Roach data collection +export ROUTES=leaderboard/data/TCP_training_routes/routes_town01.xml +export TEAM_AGENT=team_code/roach_ap_agent.py +export TEAM_CONFIG=roach/config/config_agent.yaml +export CHECKPOINT_ENDPOINT=data_collect_town01_results.json +export SCENARIOS=leaderboard/data/scenarios/all_towns_traffic_scenarios.json +export SAVE_PATH=data/data_collect_town01_results/ + + + +python3 ${LEADERBOARD_ROOT}/leaderboard/leaderboard_evaluator.py \ +--scenarios=${SCENARIOS} \ +--routes=${ROUTES} \ +--repetitions=${REPETITIONS} \ +--track=${CHALLENGE_TRACK_CODENAME} \ +--checkpoint=${CHECKPOINT_ENDPOINT} \ +--agent=${TEAM_AGENT} \ +--agent-config=${TEAM_CONFIG} \ +--debug=${DEBUG_CHALLENGE} \ +--record=${RECORD_PATH} \ +--resume=${RESUME} \ +--port=${PORT} \ +--trafficManagerPort=${TM_PORT} + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/scripts/make_docker.sh b/behavior_metrics/brains/CARLA/TCP/leaderboard/scripts/make_docker.sh new file mode 100644 index 00000000..85d7b924 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/scripts/make_docker.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +if [ -z "$CARLA_ROOT" ] +then + echo "Error $CARLA_ROOT is empty. Set \$CARLA_ROOT as an environment variable first." + exit 1 +fi + +if [ -z "$SCENARIO_RUNNER_ROOT" ] +then echo "Error $SCENARIO_RUNNER_ROOT is empty. Set \$SCENARIO_RUNNER_ROOT as an environment variable first." + exit 1 +fi + +if [ -z "$LEADERBOARD_ROOT" ] +then echo "Error $LEADERBOARD_ROOT is empty. Set \$LEADERBOARD_ROOT as an environment variable first." + exit 1 +fi + +if [ -z "$TEAM_CODE_ROOT" ] +then echo "Error $TEAM_CODE_ROOT is empty. Set \$TEAM_CODE_ROOT as an environment variable first." + exit 1 +fi + +mkdir .tmp + +cp -fr ${CARLA_ROOT}/PythonAPI .tmp +mv .tmp/PythonAPI/carla/dist/carla-*.egg .tmp/PythonAPI/carla/dist/carla-leaderboard.egg +cp -fr ${SCENARIO_RUNNER_ROOT}/ .tmp +cp -fr ${LEADERBOARD_ROOT}/ .tmp +cp -fr ${TEAM_CODE_ROOT}/ .tmp/team_code + +# build docker image +docker build --force-rm -t leaderboard-user -f ${LEADERBOARD_ROOT}/scripts/Dockerfile.master . + +rm -fr .tmp diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/scripts/pretty_print_json.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/scripts/pretty_print_json.py new file mode 100644 index 00000000..5aeffb95 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/scripts/pretty_print_json.py @@ -0,0 +1,111 @@ +# !/usr/bin/env python +# Copyright (c) 2020 Intel Corporation. +# authors: German Ros (german.ros@intel.com) +# +# This work is licensed under the terms of the MIT license. +# For a copy, see . + +""" +Create a human readable version of the scores provided by the leaderboard. +""" + +from __future__ import print_function + +import argparse +from argparse import RawTextHelpFormatter +from dictor import dictor +import json +from tabulate import tabulate + + +def prettify_json(args): + with open(args.file) as fd: + json_dict = json.load(fd) + + if not json_dict: + print('[Error] The file [{}] could not be parsed.'.format(args.file)) + return -1 + + progress = dictor(json_dict, '_checkpoint.progress') + records_table = dictor(json_dict, '_checkpoint.records') + sensors = dictor(json_dict, 'sensors') + labels_scores = dictor(json_dict, 'labels') + scores = dictor(json_dict, 'values') + + # compose output + output = "" + + if progress: + output += '* {}% ({}/{}) routes completed\n'.format(100.0*progress[0]/float(progress[1]), + progress[0], + progress[1]) + if sensors: + output += '* The agent used the following sensors: {}\n\n'.format(', '.join(sensors)) + + if scores and labels_scores: + metrics = list(zip(*[labels_scores[0:3], scores[0:3]])) + infractions = list(zip(*[labels_scores[3:], scores[3:]])) + + output += '=== Global average metrics: ===\n' + output += tabulate(metrics, tablefmt=args.format) + output += '\n\n' + output += '=== Total infractions: ===\n' + output += tabulate(infractions, tablefmt=args.format) + output += '\n\n' + + if records_table: + header = ['metric', 'value', 'additional information'] + list_statistics = [header] + total_duration_game = 0 + total_duration_system = 0 + total_route_length = 0 + for route in records_table: + route_completed_kms = 0.01 * route['scores']['score_route'] * route['meta']['route_length'] / 1000.0 + metrics_route = [[key, '{:.3f}'.format(values), ''] for key, values in route['scores'].items()] + infractions_route = [[key, '{:.3f} ({} occurrences)'.format(len(values)/route_completed_kms, len(values)), + '\n'.join(values)] for key, values in route['infractions'].items()] + + times = [['duration game', '{:.3f}'.format(route['meta']['duration_game']), 'seconds'], + ['duration system', '{:.3f}'.format(route['meta']['duration_system']), 'seconds']] + + route_completed_length = [ ['distance driven', '{:.3f}'.format(route_completed_kms), 'Km']] + + total_duration_game += route['meta']['duration_game'] + total_duration_system += route['meta']['duration_system'] + total_route_length += route_completed_kms + + list_statistics.extend([['{}'.format(route['route_id']), '', '']]) + list_statistics.extend([*metrics_route, *infractions_route, *times, *route_completed_length]) + list_statistics.extend([['', '', '']]) + + list_statistics.extend([['total duration_game', '{:.3f}'.format(total_duration_game), 'seconds']]) + list_statistics.extend([['total duration_system', '{:.3f}'.format(total_duration_system), 'seconds']]) + list_statistics.extend([['total distance driven', '{:.3f}'.format(total_route_length), 'Km']]) + + output += '==== Per-route analysis: ===\n'.format() + output += tabulate(list_statistics, tablefmt=args.format) + + if args.output: + with open(args.output, 'w') as fd: + fd.write(output) + else: + print(output) + + return 0 + + +def main(): + description = 'Create a human readable version of the scores provided by the leaderboard.\n' + # general parameters + parser = argparse.ArgumentParser(description=description, formatter_class=RawTextHelpFormatter) + parser.add_argument('-f', '--file', help='JSON file containing the results of the leaderboard', required=True) + parser.add_argument('--format', default='fancy_grid', + help='Format in which the table will be printed, e.g.: fancy_grid, latex, github, html, jira') + parser.add_argument('-o', '--output', help='Output file to print the results into') + arguments = parser.parse_args() + + return prettify_json(arguments) + + +if __name__ == '__main__': + main() diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/scripts/run_evaluation.sh b/behavior_metrics/brains/CARLA/TCP/leaderboard/scripts/run_evaluation.sh new file mode 100644 index 00000000..1d8a7435 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/scripts/run_evaluation.sh @@ -0,0 +1,43 @@ +#!/bin/bash +export CARLA_ROOT= PATH_TO_CARLA +export CARLA_SERVER=${CARLA_ROOT}/CarlaUE4.sh +export PYTHONPATH=$PYTHONPATH:${CARLA_ROOT}/PythonAPI +export PYTHONPATH=$PYTHONPATH:${CARLA_ROOT}/PythonAPI/carla +export PYTHONPATH=$PYTHONPATH:$CARLA_ROOT/PythonAPI/carla/dist/carla-0.9.10-py3.7-linux-x86_64.egg +export PYTHONPATH=$PYTHONPATH:leaderboard +export PYTHONPATH=$PYTHONPATH:leaderboard/team_code +export PYTHONPATH=$PYTHONPATH:scenario_runner + +export LEADERBOARD_ROOT=leaderboard +export CHALLENGE_TRACK_CODENAME=SENSORS +export PORT=2000 +export TM_PORT=8000 +export DEBUG_CHALLENGE=0 +export REPETITIONS=3 # multiple evaluation runs +export RESUME=True + + +# TCP evaluation +export ROUTES=leaderboard/data/evaluation_routes/routes_lav_valid.xml +export TEAM_AGENT=team_code/tcp_agent.py +export TEAM_CONFIG= PATH_TO_MODEL_CKPT +export CHECKPOINT_ENDPOINT=results_TCP.json +export SCENARIOS=leaderboard/data/scenarios/all_towns_traffic_scenarios.json +export SAVE_PATH=data/results_TCP/ + + +python3 ${LEADERBOARD_ROOT}/leaderboard/leaderboard_evaluator.py \ +--scenarios=${SCENARIOS} \ +--routes=${ROUTES} \ +--repetitions=${REPETITIONS} \ +--track=${CHALLENGE_TRACK_CODENAME} \ +--checkpoint=${CHECKPOINT_ENDPOINT} \ +--agent=${TEAM_AGENT} \ +--agent-config=${TEAM_CONFIG} \ +--debug=${DEBUG_CHALLENGE} \ +--record=${RECORD_PATH} \ +--resume=${RESUME} \ +--port=${PORT} \ +--trafficManagerPort=${TM_PORT} + + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/scripts/set_new_scenarios.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/scripts/set_new_scenarios.py new file mode 100644 index 00000000..9b2ed4cb --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/scripts/set_new_scenarios.py @@ -0,0 +1,218 @@ +import argparse +import json +import math +from argparse import RawTextHelpFormatter +from leaderboard.utils.checkpoint_tools import fetch_dict +import carla +import os + +SCENARIO_COLOR = { + "Scenario1": [carla.Color(255, 0, 0), "Red"], # Red + "Scenario2": [carla.Color(0, 255, 0), "Green"], # Green + "Scenario3": [carla.Color(0, 0, 255), "Blue"], # Blue + "Scenario4": [carla.Color(255, 100, 0), "Orange"], # Orange + "Scenario5": [carla.Color(0, 255, 100), "Blueish green"], # Blueish green + "Scenario6": [carla.Color(100, 0, 255), "Purple"], # Purple + "Scenario7": [carla.Color(255, 100, 255), "Pink"], # Pink + "Scenario8": [carla.Color(255, 255, 100), "Yellow"], # Yellow + "Scenario9": [carla.Color(100, 255, 255), "Light Blue"], # Light Blue + "Scenario10": [carla.Color(100, 100, 100), "Gray"] # Gray +} + +def apart_enough(world, _waypoint, scenario_waypoint): + """ + Uses the same condition as in route_scenario to see if they will + be differentiated + """ + TRIGGER_THRESHOLD = 4.0 + TRIGGER_ANGLE_THRESHOLD = 10 + + dx = float(_waypoint["x"]) - scenario_waypoint.transform.location.x + dy = float(_waypoint["y"]) - scenario_waypoint.transform.location.y + distance = math.sqrt(dx * dx + dy * dy) + + dyaw = float(_waypoint["yaw"]) - scenario_waypoint.transform.rotation.yaw + dist_angle = math.sqrt(dyaw * dyaw) + + if distance < TRIGGER_THRESHOLD and dist_angle < TRIGGER_ANGLE_THRESHOLD: + # if distance < TRIGGER_THRESHOLD: + world.debug.draw_point(scenario_waypoint.transform.location + carla.Location(z=1), + size=float(0.15), color=carla.Color(255, 0, 0)) + else: + world.debug.draw_point(scenario_waypoint.transform.location + carla.Location(z=1), + size=float(0.15), color=carla.Color(0, 255, 0)) + +def save_from_wp(endpoint, wp): + """ + Creates a mini json with the data from the scenario location. + used to copy paste it to the .json + """ + with open(endpoint, mode='w') as fd: + + entry = {} + transform = { + "x": str(round(wp.transform.location.x, 2)), + "y": str(round(wp.transform.location.y, 2)), + "z": "1.0", + "yaw": str(round(wp.transform.rotation.yaw, 0)), + "pitch": str(round(wp.transform.rotation.pitch, 0)), + } + entry["transform"] = transform + entry["other_actors"] = {} + json.dump(entry, fd, indent=4) + +def save_from_dict(endpoint, wp): + """ + Creates a mini json with the data from the scenario waypoint. + used to copy paste it to the .json + """ + with open(endpoint, mode='w') as fd: + + entry = {} + transform = { + "x": str(round(float(wp["x"]), 2)), + "y": str(round(float(wp["y"]), 2)), + "z": "1.0", + "yaw": str(round(float(wp["yaw"]), 0)), + "pitch": str(round(float(wp["pitch"]), 0)), + } + entry["transform"] = transform + entry["other_actors"] = {} + json.dump(entry, fd, indent=4) + +def draw_scenarios(world, scenarios, args): + """ + Draws all the points related to args.scenarios + """ + z = 3 + + if scenarios["scenario_type"] in args.scenarios: + number = float(scenarios["scenario_type"][8:]) + color = SCENARIO_COLOR[scenarios["scenario_type"]][0] + + event_list = scenarios["available_event_configurations"] + for i in range(len(event_list)): + event = event_list[i] + _waypoint = event['transform'] # trigger point of this scenario + location = carla.Location(float(_waypoint["x"]), float(_waypoint["y"]), float(_waypoint["z"])) + + scenario_location = location + carla.Location(z=number / z) + world.debug.draw_point(scenario_location, size=float(0.15), color=color) + world.debug.draw_string(scenario_location + carla.Location(z=0.1), text=str(i+1), color=carla.Color(0, 0, 0), life_time=1000) + + if args.debug: + save_from_dict(args.endpoint, _waypoint) + spectator = world.get_spectator() + spectator.set_transform(carla.Transform(location + carla.Location(z=50), + carla.Rotation(pitch=-90))) + print(" Scenario [{}/{}]. Press Enter for the next scenario".format(i+1, len(event_list))) + input() + world.wait_for_tick() + +def modify_junction_scenarios(world, scenarios, args): + """ + Used to move scenario trigger points: + 1) a certain distance to the front (follows the lane) + 2) a certain distance to the back (does not follow the lane) + """ + + if scenarios["scenario_type"] in args.scenarios: + event_list = scenarios["available_event_configurations"] + + for i in range(len(event_list)): + event = event_list[i] + _waypoint = event['transform'] # trigger point of this scenario + location = carla.Location(float(_waypoint["x"]), float(_waypoint["y"]), float(_waypoint["z"])) + rotation = carla.Rotation(float(0), float(_waypoint["pitch"]), float(_waypoint["yaw"])) + world.debug.draw_point(location, size=float(0.15), color=carla.Color(0, 255, 255)) + world.debug.draw_string(location + carla.Location(x=1), text=str(i+1), color=carla.Color(0, 0, 0)) + + # # Case 1) + # DISTANCE = 10 + # new_waypoint = world.get_map().get_waypoint(location) + # scenario_waypoint = new_waypoint.next(DISTANCE)[0] + + # Case 2) + DISTANCE = 5 + new_waypoint = world.get_map().get_waypoint(location) + wp_vec = new_waypoint.transform.get_forward_vector() + new_location = new_waypoint.transform.location - wp_vec*DISTANCE + scenario_waypoint = world.get_map().get_waypoint(new_location) + + # Drawing and waiting for input + apart_enough(world, _waypoint, scenario_waypoint) + save_from_wp(args.endpoint, scenario_waypoint) + + spectator = world.get_spectator() + spectator.set_transform(carla.Transform(scenario_waypoint.transform.location + carla.Location(z=70), + carla.Rotation(pitch=-90))) + + print(" Scenario [{}/{}]. Press Enter for the next scenario".format(i+1, len(event_list))) + input() + world.wait_for_tick() + +def main(): + """ + Used to help with the visualization of the scenario trigger points, as well as its + modifications. + --town: Selects the town + --scenario: The scenario that will be printed. Use the number of the scenarios 1 2 3 ... + --modify: Used to modify the trigger_points of the given scenario in args.scenarios. + debug is auto-enabled here. It will be shown red if they aren't apart enough or green, if they are + --debug: If debug is selected, the points will be shown one by one, and stored at --endpoint, + in case some copy-pasting is required. + """ + + # general parameters + parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter) + parser.add_argument('--town', default='Town08') + parser.add_argument('--debug', action='store_true') + parser.add_argument('--reload', action='store_true') + parser.add_argument('--inipoint', default="") + parser.add_argument('--endpoint', default="set_new_scenarios.json") + parser.add_argument('--scenarios', nargs='+', default='Scenario7') + parser.add_argument('--modify', action='store_true') + parser.add_argument('--host', default='localhost', help='IP of the host server (default: localhost)') + parser.add_argument('--port', default='2000', help='TCP port to listen to (default: 2000)') + + args = parser.parse_args() + + # 0) Set the world + client = carla.Client(args.host, int(args.port)) + client.set_timeout(20) + if args.reload: + world = client.load_world(args.town) + else: + world = client.get_world() + + settings = world.get_settings() + settings.fixed_delta_seconds = None + settings.synchronous_mode = False + world.apply_settings(settings) + + # 1) Read the json file + data = fetch_dict(args.inipoint) + data = data["available_scenarios"][0] + + town_data = data[args.town] + + new_args_scenario = [] + for ar_sc in args.scenarios: + new_args_scenario.append("Scenario" + ar_sc) + args.scenarios = new_args_scenario + + for scenarios in town_data: + + if args.modify: + modify_junction_scenarios(world, scenarios, args) + else: + draw_scenarios(world, scenarios, args) + + print(" ---------------------------- ") + for ar_sc in args.scenarios: + print(" {} is colored as {}".format(ar_sc, SCENARIO_COLOR[ar_sc][1])) + print(" ---------------------------- ") + + +if __name__ == '__main__': + main() diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/auto_pilot.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/auto_pilot.py new file mode 100644 index 00000000..84a4d54f --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/auto_pilot.py @@ -0,0 +1,379 @@ +import os +import time +import datetime +import pathlib +import json +import random + +import numpy as np +import cv2 +import carla +from PIL import Image + +from team_code.map_agent import MapAgent +from team_code.pid_controller import PIDController + + +# WEATHERS = { +# 'ClearNoon': carla.WeatherParameters.ClearNoon, +# 'ClearSunset': carla.WeatherParameters.ClearSunset, + +# 'CloudyNoon': carla.WeatherParameters.CloudyNoon, +# 'CloudySunset': carla.WeatherParameters.CloudySunset, + +# 'WetNoon': carla.WeatherParameters.WetNoon, +# 'WetSunset': carla.WeatherParameters.WetSunset, + +# 'MidRainyNoon': carla.WeatherParameters.MidRainyNoon, +# 'MidRainSunset': carla.WeatherParameters.MidRainSunset, + +# 'WetCloudyNoon': carla.WeatherParameters.WetCloudyNoon, +# 'WetCloudySunset': carla.WeatherParameters.WetCloudySunset, + +# 'HardRainNoon': carla.WeatherParameters.HardRainNoon, +# 'HardRainSunset': carla.WeatherParameters.HardRainSunset, + +# 'SoftRainNoon': carla.WeatherParameters.SoftRainNoon, +# 'SoftRainSunset': carla.WeatherParameters.SoftRainSunset, +# } +# WEATHERS_IDS = list(WEATHERS) + +WEATHERS = { + 'ClearNoon': carla.WeatherParameters.ClearNoon, + 'ClearSunset': carla.WeatherParameters.ClearSunset, + 'WetNoon': carla.WeatherParameters.WetNoon, + # 'WetSunset': carla.WeatherParameters.WetSunset, + 'HardRainNoon': carla.WeatherParameters.HardRainNoon, + # 'SoftRainSunset': carla.WeatherParameters.SoftRainSunset, +} +WEATHERS_IDS = list(WEATHERS) + + +def get_entry_point(): + return 'AutoPilot' + + +def _numpy(carla_vector, normalize=False): + result = np.float32([carla_vector.x, carla_vector.y]) + + if normalize: + return result / (np.linalg.norm(result) + 1e-4) + + return result + + +def _location(x, y, z): + return carla.Location(x=float(x), y=float(y), z=float(z)) + + +def _orientation(yaw): + return np.float32([np.cos(np.radians(yaw)), np.sin(np.radians(yaw))]) + + +def get_collision(p1, v1, p2, v2): + A = np.stack([v1, -v2], 1) + b = p2 - p1 + + if abs(np.linalg.det(A)) < 1e-3: + return False, None + + x = np.linalg.solve(A, b) + collides = all(x >= 0) and all(x <= 1) # how many seconds until collision + + return collides, p1 + x[0] * v1 + + +def check_episode_has_noise(lat_noise_percent, long_noise_percent): + lat_noise = False + long_noise = False + if random.randint(0, 101) < lat_noise_percent: + lat_noise = True + + if random.randint(0, 101) < long_noise_percent: + long_noise = True + + return lat_noise, long_noise + + +class AutoPilot(MapAgent): + + # for stop signs + PROXIMITY_THRESHOLD = 30.0 # meters + SPEED_THRESHOLD = 0.1 + WAYPOINT_STEP = 1.0 # meters + + def setup(self, path_to_conf_file): + super().setup(path_to_conf_file) + + def _init(self): + super()._init() + + self._turn_controller = PIDController(K_P=1.25, K_I=0.75, K_D=0.3, n=40) + self._speed_controller = PIDController(K_P=5.0, K_I=0.5, K_D=1.0, n=40) + + # for stop signs + self._target_stop_sign = None # the stop sign affecting the ego vehicle + self._stop_completed = False # if the ego vehicle has completed the stop sign + self._affected_by_stop = False # if the ego vehicle is influenced by a stop sign + + def _get_angle_to(self, pos, theta, target): + R = np.array([ + [np.cos(theta), -np.sin(theta)], + [np.sin(theta), np.cos(theta)], + ]) + + aim = R.T.dot(target - pos) + angle = -np.degrees(np.arctan2(-aim[1], aim[0])) + angle = 0.0 if np.isnan(angle) else angle + + return angle + + def _get_control(self, target, far_target, tick_data): + pos = self._get_position(tick_data) + theta = tick_data['compass'] + speed = tick_data['speed'] + + # Steering. + angle_unnorm = self._get_angle_to(pos, theta, target) + angle = angle_unnorm / 90 + + steer = self._turn_controller.step(angle) + steer = np.clip(steer, -1.0, 1.0) + steer = round(steer, 3) + + # Acceleration. + angle_far_unnorm = self._get_angle_to(pos, theta, far_target) + should_slow = abs(angle_far_unnorm) > 45.0 or abs(angle_unnorm) > 5.0 + target_speed = 4.0 if should_slow else 7.0 + brake = self._should_brake() + target_speed = target_speed if not brake else 0.0 + + self.should_slow = int(should_slow) + self.should_brake = int(brake) + self.angle = angle + self.angle_unnorm = angle_unnorm + self.angle_far_unnorm = angle_far_unnorm + + delta = np.clip(target_speed - speed, 0.0, 0.25) + throttle = self._speed_controller.step(delta) + throttle = np.clip(throttle, 0.0, 0.75) + + if brake: + steer *= 0.5 + throttle = 0.0 + + return steer, throttle, brake, target_speed + + def run_step(self, input_data, timestamp): + if not self.initialized: + self._init() + + # change weather for visual diversity + if self.step % 10 == 0: + index = random.choice(range(len(WEATHERS))) + self.weather_id = WEATHERS_IDS[index] + weather = WEATHERS[WEATHERS_IDS[index]] + print (self.weather_id, weather) + self._world.set_weather(weather) + + data = self.tick(input_data) + gps = self._get_position(data) + + near_node, near_command = self._waypoint_planner.run_step(gps) + far_node, far_command = self._command_planner.run_step(gps) + + steer, throttle, brake, target_speed = self._get_control(near_node, far_node, data) + + control = carla.VehicleControl() + control.steer = steer + 1e-2 * np.random.randn() + control.throttle = throttle + control.brake = float(brake) + + if self.step % 10 == 0 and self.save_path is not None: + self.save(near_node, far_node, near_command, steer, throttle, brake, target_speed, data) + + return control + + def _should_brake(self): + actors = self._world.get_actors() + + vehicle = self._is_vehicle_hazard(actors.filter('*vehicle*')) + light = self._is_light_red(actors.filter('*traffic_light*')) + walker = self._is_walker_hazard(actors.filter('*walker*')) + stop_sign = self._is_stop_sign_hazard(actors.filter('*stop*')) + + self.is_vehicle_present = 1 if vehicle is not None else 0 + self.is_red_light_present = 1 if light is not None else 0 + self.is_pedestrian_present = 1 if walker is not None else 0 + self.is_stop_sign_present = 1 if stop_sign is not None else 0 + + return any(x is not None for x in [vehicle, light, walker, stop_sign]) + + def _point_inside_boundingbox(self, point, bb_center, bb_extent): + A = carla.Vector2D(bb_center.x - bb_extent.x, bb_center.y - bb_extent.y) + B = carla.Vector2D(bb_center.x + bb_extent.x, bb_center.y - bb_extent.y) + D = carla.Vector2D(bb_center.x - bb_extent.x, bb_center.y + bb_extent.y) + M = carla.Vector2D(point.x, point.y) + + AB = B - A + AD = D - A + AM = M - A + am_ab = AM.x * AB.x + AM.y * AB.y + ab_ab = AB.x * AB.x + AB.y * AB.y + am_ad = AM.x * AD.x + AM.y * AD.y + ad_ad = AD.x * AD.x + AD.y * AD.y + + return am_ab > 0 and am_ab < ab_ab and am_ad > 0 and am_ad < ad_ad + + def _get_forward_speed(self, transform=None, velocity=None): + """ Convert the vehicle transform directly to forward speed """ + if not velocity: + velocity = self._vehicle.get_velocity() + if not transform: + transform = self._vehicle.get_transform() + + vel_np = np.array([velocity.x, velocity.y, velocity.z]) + pitch = np.deg2rad(transform.rotation.pitch) + yaw = np.deg2rad(transform.rotation.yaw) + orientation = np.array([np.cos(pitch) * np.cos(yaw), np.cos(pitch) * np.sin(yaw), np.sin(pitch)]) + speed = np.dot(vel_np, orientation) + return speed + + def _is_actor_affected_by_stop(self, actor, stop, multi_step=20): + """ + Check if the given actor is affected by the stop + """ + affected = False + # first we run a fast coarse test + current_location = actor.get_location() + stop_location = stop.get_transform().location + if stop_location.distance(current_location) > self.PROXIMITY_THRESHOLD: + return affected + + stop_t = stop.get_transform() + transformed_tv = stop_t.transform(stop.trigger_volume.location) + + # slower and accurate test based on waypoint's horizon and geometric test + list_locations = [current_location] + waypoint = self._world.get_map().get_waypoint(current_location) + for _ in range(multi_step): + if waypoint: + waypoint = waypoint.next(self.WAYPOINT_STEP)[0] + if not waypoint: + break + list_locations.append(waypoint.transform.location) + + for actor_location in list_locations: + if self._point_inside_boundingbox(actor_location, transformed_tv, stop.trigger_volume.extent): + affected = True + + return affected + + def _is_stop_sign_hazard(self, stop_sign_list): + if self._affected_by_stop: + if not self._stop_completed: + current_speed = self._get_forward_speed() + if current_speed < self.SPEED_THRESHOLD: + self._stop_completed = True + return None + else: + return self._target_stop_sign + else: + # reset if the ego vehicle is outside the influence of the current stop sign + if not self._is_actor_affected_by_stop(self._vehicle, self._target_stop_sign): + self._affected_by_stop = False + self._stop_completed = False + self._target_stop_sign = None + return None + + ve_tra = self._vehicle.get_transform() + ve_dir = ve_tra.get_forward_vector() + + wp = self._world.get_map().get_waypoint(ve_tra.location) + wp_dir = wp.transform.get_forward_vector() + + dot_ve_wp = ve_dir.x * wp_dir.x + ve_dir.y * wp_dir.y + ve_dir.z * wp_dir.z + + if dot_ve_wp > 0: # Ignore all when going in a wrong lane + for stop_sign in stop_sign_list: + if self._is_actor_affected_by_stop(self._vehicle, stop_sign): + # this stop sign is affecting the vehicle + self._affected_by_stop = True + self._target_stop_sign = stop_sign + return self._target_stop_sign + + return None + + def _is_light_red(self, lights_list): + if self._vehicle.get_traffic_light_state() != carla.libcarla.TrafficLightState.Green: + affecting = self._vehicle.get_traffic_light() + + for light in self._traffic_lights: + if light.id == affecting.id: + return affecting + + return None + + def _is_walker_hazard(self, walkers_list): + z = self._vehicle.get_location().z + p1 = _numpy(self._vehicle.get_location()) + v1 = 10.0 * _orientation(self._vehicle.get_transform().rotation.yaw) + + for walker in walkers_list: + v2_hat = _orientation(walker.get_transform().rotation.yaw) + s2 = np.linalg.norm(_numpy(walker.get_velocity())) + + if s2 < 0.05: + v2_hat *= s2 + + p2 = -3.0 * v2_hat + _numpy(walker.get_location()) + v2 = 8.0 * v2_hat + + collides, collision_point = get_collision(p1, v1, p2, v2) + + if collides: + return walker + + return None + + def _is_vehicle_hazard(self, vehicle_list): + z = self._vehicle.get_location().z + + o1 = _orientation(self._vehicle.get_transform().rotation.yaw) + p1 = _numpy(self._vehicle.get_location()) + s1 = max(10, 3.0 * np.linalg.norm(_numpy(self._vehicle.get_velocity()))) # increases the threshold distance + v1_hat = o1 + v1 = s1 * v1_hat + + for target_vehicle in vehicle_list: + if target_vehicle.id == self._vehicle.id: + continue + + o2 = _orientation(target_vehicle.get_transform().rotation.yaw) + p2 = _numpy(target_vehicle.get_location()) + s2 = max(5.0, 2.0 * np.linalg.norm(_numpy(target_vehicle.get_velocity()))) + v2_hat = o2 + v2 = s2 * v2_hat + + p2_p1 = p2 - p1 + distance = np.linalg.norm(p2_p1) + p2_p1_hat = p2_p1 / (distance + 1e-4) + + angle_to_car = np.degrees(np.arccos(v1_hat.dot(p2_p1_hat))) + angle_between_heading = np.degrees(np.arccos(o1.dot(o2))) + + # to consider -ve angles too + angle_to_car = min(angle_to_car, 360.0 - angle_to_car) + angle_between_heading = min(angle_between_heading, 360.0 - angle_between_heading) + + if angle_between_heading > 60.0 and not (angle_to_car < 15 and distance < s1): + continue + elif angle_to_car > 30.0: + continue + elif distance > s1: + continue + + return target_vehicle + + return None + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/base_agent.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/base_agent.py new file mode 100644 index 00000000..a7f7ffcd --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/base_agent.py @@ -0,0 +1,937 @@ +import time +import os +import datetime +import pathlib +import json + +import cv2 +import carla + +from leaderboard.autoagents import autonomous_agent +from team_code.planner import RoutePlanner + +import numpy as np +from PIL import Image, ImageDraw + + +SAVE_PATH = os.environ.get('SAVE_PATH', None) + + +class BaseAgent(autonomous_agent.AutonomousAgent): + def setup(self, path_to_conf_file): + self.track = autonomous_agent.Track.SENSORS + self.config_path = path_to_conf_file + self.step = -1 + self.wall_start = time.time() + self.initialized = False + + self._sensor_data = { + 'width': 400, + 'height': 300, + 'fov': 100 + } + + self._3d_bb_distance = 50 + self.weather_id = None + + self.save_path = None + + if SAVE_PATH is not None: + now = datetime.datetime.now() + string = pathlib.Path(os.environ['ROUTES']).stem + '_' + string += '_'.join(map(lambda x: '%02d' % x, (now.month, now.day, now.hour, now.minute, now.second))) + + print (string) + + self.save_path = pathlib.Path(os.environ['SAVE_PATH']) / string + self.save_path.mkdir(parents=True, exist_ok=False) + + for sensor in self.sensors(): + if hasattr(sensor, 'save') and sensor['save']: + (self.save_path / sensor['id']).mkdir() + + (self.save_path / '3d_bbs').mkdir(parents=True, exist_ok=True) + (self.save_path / 'affordances').mkdir(parents=True, exist_ok=True) + (self.save_path / 'measurements').mkdir(parents=True, exist_ok=True) + (self.save_path / 'lidar').mkdir(parents=True, exist_ok=True) + (self.save_path / 'semantic_lidar').mkdir(parents=True, exist_ok=True) + (self.save_path / 'topdown').mkdir(parents=True, exist_ok=True) + + for pos in ['front', 'left', 'right', 'rear']: + for sensor_type in ['rgb', 'seg', 'depth', '2d_bbs']: + name = sensor_type + '_' + pos + (self.save_path / name).mkdir() + + def _init(self): + self._command_planner = RoutePlanner(7.5, 25.0, 257) + self._command_planner.set_route(self._global_plan, True) + + self.initialized = True + + self._sensor_data['calibration'] = self._get_camera_to_car_calibration(self._sensor_data) + + self._sensors = self.sensor_interface._sensors_objects + + + + def _get_position(self, tick_data): + gps = tick_data['gps'] + gps = (gps - self._command_planner.mean) * self._command_planner.scale + + return gps + + def sensors(self): + return [ + { + 'type': 'sensor.camera.rgb', + 'x': -1.5, 'y': 0.0, 'z': 2.0, + 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'width': 900, 'height': 256, 'fov': 100, + 'id': 'rgb_front' + }, + { + 'type': 'sensor.camera.semantic_segmentation', + 'x': 1.3, 'y': 0.0, 'z': 2.3, + 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'], + 'id': 'seg_front' + }, + { + 'type': 'sensor.camera.depth', + 'x': 1.3, 'y': 0.0, 'z': 2.3, + 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'], + 'id': 'depth_front' + }, + # { + # 'type': 'sensor.camera.rgb', + # 'x': -1.3, 'y': 0.0, 'z': 2.3, + # 'roll': 0.0, 'pitch': 0.0, 'yaw': 180.0, + # 'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'], + # 'id': 'rgb_rear' + # }, + # { + # 'type': 'sensor.camera.semantic_segmentation', + # 'x': -1.3, 'y': 0.0, 'z': 2.3, + # 'roll': 0.0, 'pitch': 0.0, 'yaw': 180.0, + # 'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'], + # 'id': 'seg_rear' + # }, + # { + # 'type': 'sensor.camera.depth', + # 'x': -1.3, 'y': 0.0, 'z': 2.3, + # 'roll': 0.0, 'pitch': 0.0, 'yaw': 180.0, + # 'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'], + # 'id': 'depth_rear' + # }, + # { + # 'type': 'sensor.camera.rgb', + # 'x': 1.3, 'y': 0.0, 'z': 2.3, + # 'roll': 0.0, 'pitch': 0.0, 'yaw': -60.0, + # 'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'], + # 'id': 'rgb_left' + # }, + # { + # 'type': 'sensor.camera.semantic_segmentation', + # 'x': 1.3, 'y': 0.0, 'z': 2.3, + # 'roll': 0.0, 'pitch': 0.0, 'yaw': -60.0, + # 'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'], + # 'id': 'seg_left' + # }, + # { + # 'type': 'sensor.camera.depth', + # 'x': 1.3, 'y': 0.0, 'z': 2.3, + # 'roll': 0.0, 'pitch': 0.0, 'yaw': -60.0, + # 'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'], + # 'id': 'depth_left' + # }, + # { + # 'type': 'sensor.camera.rgb', + # 'x': 1.3, 'y': 0.0, 'z': 2.3, + # 'roll': 0.0, 'pitch': 0.0, 'yaw': 60.0, + # 'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'], + # 'id': 'rgb_right' + # }, + # { + # 'type': 'sensor.camera.semantic_segmentation', + # 'x': 1.3, 'y': 0.0, 'z': 2.3, + # 'roll': 0.0, 'pitch': 0.0, 'yaw': 60.0, + # 'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'], + # 'id': 'seg_right' + # }, + # { + # 'type': 'sensor.camera.depth', + # 'x': 1.3, 'y': 0.0, 'z': 2.3, + # 'roll': 0.0, 'pitch': 0.0, 'yaw': 60.0, + # 'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'], + # 'id': 'depth_right' + # }, + # { + # 'type': 'sensor.lidar.ray_cast', + # 'x': 1.3, 'y': 0.0, 'z': 2.5, + # 'roll': 0.0, 'pitch': 0.0, 'yaw': -90.0, + # 'id': 'lidar' + # }, + # { + # 'type': 'sensor.lidar.ray_cast_semantic', + # 'x': 1.3, 'y': 0.0, 'z': 2.5, + # 'roll': 0.0, 'pitch': 0.0, 'yaw': -90.0, + # 'id': 'semantic_lidar' + # }, + { + 'type': 'sensor.other.imu', + 'x': 0.0, 'y': 0.0, 'z': 0.0, + 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'sensor_tick': 0.05, + 'id': 'imu' + }, + { + 'type': 'sensor.other.gnss', + 'x': 0.0, 'y': 0.0, 'z': 0.0, + 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'sensor_tick': 0.01, + 'id': 'gps' + }, + { + 'type': 'sensor.speedometer', + 'reading_frequency': 20, + 'id': 'speed' + } + ] + + def tick(self, input_data): + self.step += 1 + + # affordances = self._get_affordances() + + # traffic_lights = self._find_obstacle('*traffic_light*') + # stop_signs = self._find_obstacle('*stop*') + + depth = {} + seg = {} + + # bb_3d = self._get_3d_bbs(max_distance=self._3d_bb_distance) + + bb_2d = {} + + # for pos in ['front', 'left', 'right', 'rear']: + # seg_cam = 'seg_' + pos + # depth_cam = 'depth_' + pos + # _segmentation = np.copy(input_data[seg_cam][1][:, :, 2]) + + # depth[pos] = self._get_depth(input_data[depth_cam][1][:, :, :3]) + + # self._change_seg_tl(_segmentation, depth[pos], traffic_lights) + # self._change_seg_stop(_segmentation, depth[pos], stop_signs, seg_cam) + + # bb_2d[pos] = self._get_2d_bbs(seg_cam, affordances, bb_3d, _segmentation) + + # #self._draw_2d_bbs(_segmentation, bb_2d[pos]) + + # seg[pos] = _segmentation + + + rgb_front = cv2.cvtColor(input_data['rgb_front'][1][:, :, :3], cv2.COLOR_BGR2RGB) + # rgb_rear = cv2.cvtColor(input_data['rgb_rear'][1][:, :, :3], cv2.COLOR_BGR2RGB) + # rgb_left = cv2.cvtColor(input_data['rgb_left'][1][:, :, :3], cv2.COLOR_BGR2RGB) + # rgb_right = cv2.cvtColor(input_data['rgb_right'][1][:, :, :3], cv2.COLOR_BGR2RGB) + gps = input_data['gps'][1][:2] + speed = input_data['speed'][1]['speed'] + compass = input_data['imu'][1][-1] + + # depth_front = cv2.cvtColor(input_data['depth_front'][1][:, :, :3], cv2.COLOR_BGR2RGB) + # depth_left = cv2.cvtColor(input_data['depth_left'][1][:, :, :3], cv2.COLOR_BGR2RGB) + # depth_right = cv2.cvtColor(input_data['depth_right'][1][:, :, :3], cv2.COLOR_BGR2RGB) + # depth_rear = cv2.cvtColor(input_data['depth_rear'][1][:, :, :3], cv2.COLOR_BGR2RGB) + + weather = self._weather_to_dict(self._world.get_weather()) + + # return { + # 'rgb_front': rgb_front, + # 'seg_front': seg['front'], + # 'depth_front': depth_front, + # '2d_bbs_front': bb_2d['front'], + # 'rgb_rear': rgb_rear, + # 'seg_rear': seg['rear'], + # 'depth_rear': depth_rear, + # '2d_bbs_rear': bb_2d['rear'], + # 'rgb_left': rgb_left, + # 'seg_left': seg['left'], + # 'depth_left': depth_left, + # '2d_bbs_left': bb_2d['left'], + # 'rgb_right': rgb_right, + # 'seg_right': seg['right'], + # 'depth_right': depth_right, + # '2d_bbs_right': bb_2d['right'], + # 'lidar' : input_data['lidar'][1], + # 'semantic_lidar': input_data['semantic_lidar'][1], + # 'gps': gps, + # 'speed': speed, + # 'compass': compass, + # 'weather': weather, + # 'affordances': affordances, + # '3d_bbs': bb_3d + # } + + return { + 'rgb_front': rgb_front, + + 'gps': gps, + 'speed': speed, + 'compass': compass, + 'weather': weather, + } + + + def save(self, near_node, far_node, near_command, steer, throttle, brake, target_speed, tick_data): + frame = self.step // 10 + + pos = self._get_position(tick_data) + theta = tick_data['compass'] + speed = tick_data['speed'] + weather = tick_data['weather'] + + data = { + 'x': pos[0], + 'y': pos[1], + 'theta': theta, + 'speed': speed, + 'target_speed': target_speed, + 'x_command': far_node[0], + 'y_command': far_node[1], + 'command': near_command.value, + 'steer': steer, + 'throttle': throttle, + 'brake': brake, + 'weather': weather, + 'weather_id': self.weather_id, + 'near_node_x': near_node[0], + 'near_node_y': near_node[1], + 'far_node_x': far_node[0], + 'far_node_y': far_node[1], + 'is_vehicle_present': self.is_vehicle_present, + 'is_pedestrian_present': self.is_pedestrian_present, + 'is_red_light_present': self.is_red_light_present, + 'is_stop_sign_present': self.is_stop_sign_present, + 'should_slow': self.should_slow, + 'should_brake': self.should_brake, + 'angle': self.angle, + 'angle_unnorm': self.angle_unnorm, + 'angle_far_unnorm': self.angle_far_unnorm, + } + + measurements_file = self.save_path / 'measurements' / ('%04d.json' % frame) + f = open(measurements_file, 'w') + json.dump(data, f, indent=4) + f.close() + + # for pos in ['front', 'left', 'right', 'rear']: + for pos in ['front']: + name = 'rgb_' + pos + Image.fromarray(tick_data[name]).save(self.save_path / name / ('%04d.png' % frame)) + # for sensor_type in ['seg', 'depth']: + # name = sensor_type + '_' + pos + # Image.fromarray(tick_data[name]).save(self.save_path / name / ('%04d.png' % frame)) + # for sensor_type in ['2d_bbs']: + # name = sensor_type + '_' + pos + # np.save(self.save_path / name / ('%04d.npy' % frame), tick_data[name], allow_pickle=True) + + # Image.fromarray(tick_data['topdown']).save(self.save_path / 'topdown' / ('%04d.png' % frame)) + # np.save(self.save_path / 'lidar' / ('%04d.npy' % frame), tick_data['lidar'], allow_pickle=True) + # np.save(self.save_path / 'semantic_lidar' / ('%04d.npy' % frame), tick_data['semantic_lidar'], allow_pickle=True) + # np.save(self.save_path / '3d_bbs' / ('%04d.npy' % frame), tick_data['3d_bbs'], allow_pickle=True) + # np.save(self.save_path / 'affordances' / ('%04d.npy' % frame), tick_data['affordances'], allow_pickle=True) + + + def _weather_to_dict(self, carla_weather): + weather = { + 'cloudiness': carla_weather.cloudiness, + 'precipitation': carla_weather.precipitation, + 'precipitation_deposits': carla_weather.precipitation_deposits, + 'wind_intensity': carla_weather.wind_intensity, + 'sun_azimuth_angle': carla_weather.sun_azimuth_angle, + 'sun_altitude_angle': carla_weather.sun_altitude_angle, + 'fog_density': carla_weather.fog_density, + 'fog_distance': carla_weather.fog_distance, + 'wetness': carla_weather.wetness, + 'fog_falloff': carla_weather.fog_falloff, + } + + return weather + + def _create_bb_points(self, bb): + """ + Returns 3D bounding box world coordinates. + """ + + cords = np.zeros((8, 4)) + extent = bb[1] + loc = bb[0] + cords[0, :] = np.array([loc[0] + extent[0], loc[1] + extent[1], loc[2] - extent[2], 1]) + cords[1, :] = np.array([loc[0] - extent[0], loc[1] + extent[1], loc[2] - extent[2], 1]) + cords[2, :] = np.array([loc[0] - extent[0], loc[1] - extent[1], loc[2] - extent[2], 1]) + cords[3, :] = np.array([loc[0] + extent[0], loc[1] - extent[1], loc[2] - extent[2], 1]) + cords[4, :] = np.array([loc[0] + extent[0], loc[1] + extent[1], loc[2] + extent[2], 1]) + cords[5, :] = np.array([loc[0] - extent[0], loc[1] + extent[1], loc[2] + extent[2], 1]) + cords[6, :] = np.array([loc[0] - extent[0], loc[1] - extent[1], loc[2] + extent[2], 1]) + cords[7, :] = np.array([loc[0] + extent[0], loc[1] - extent[1], loc[2] + extent[2], 1]) + return cords + + def _translate_tl_state(self, state): + + if state == carla.TrafficLightState.Red: + return 0 + elif state == carla.TrafficLightState.Yellow: + return 1 + elif state == carla.TrafficLightState.Green: + return 2 + elif state == carla.TrafficLightState.Off: + return 3 + elif state == carla.TrafficLightState.Unknown: + return 4 + else: + return None + + + def _get_affordances(self): + + # affordance tl + affordances = {} + affordances["traffic_light"] = None + + affecting = self._vehicle.get_traffic_light() + if affecting is not None: + for light in self._traffic_lights: + if light.id == affecting.id: + affordances["traffic_light"] = self._translate_tl_state(self._vehicle.get_traffic_light_state()) + + affordances["stop_sign"] = self._affected_by_stop + + return affordances + + def _get_3d_bbs(self, max_distance=50): + + bounding_boxes = { + "traffic_lights": [], + "stop_signs": [], + "vehicles": [], + "pedestrians": [] + } + + bounding_boxes['traffic_lights'] = self._find_obstacle_3dbb('*traffic_light*', max_distance) + bounding_boxes['stop_signs'] = self._find_obstacle_3dbb('*stop*', max_distance) + bounding_boxes['vehicles'] = self._find_obstacle_3dbb('*vehicle*', max_distance) + bounding_boxes['pedestrians'] = self._find_obstacle_3dbb('*walker*', max_distance) + + return bounding_boxes + + def _get_2d_bbs(self, seg_cam, affordances, bb_3d, seg_img): + """Returns a dict of all 2d boundingboxes given a camera position, affordances and 3d bbs + + Args: + seg_cam ([type]): [description] + affordances ([type]): [description] + bb_3d ([type]): [description] + + Returns: + [type]: [description] + """ + + bounding_boxes = { + "traffic_light": list(), + "stop_sign": list(), + "vehicles": list(), + "pedestrians": list() + } + + + if affordances['stop_sign']: + baseline = self._get_2d_bb_baseline(self._target_stop_sign) + bb = self._baseline_to_box(baseline, seg_cam) + + if bb is not None: + bounding_boxes["stop_sign"].append(bb) + + if affordances['traffic_light'] is not None: + baseline = self._get_2d_bb_baseline(self._vehicle.get_traffic_light(), distance=8) + + tl_bb = self._baseline_to_box(baseline, seg_cam, height=.5) + + if tl_bb is not None: + bounding_boxes["traffic_light"].append({ + "bb": tl_bb, + "state": self._translate_tl_state(self._vehicle.get_traffic_light_state()) + }) + + for vehicle in bb_3d["vehicles"]: + + trig_loc_world = self._create_bb_points(vehicle).T + cords_x_y_z = self._world_to_sensor(trig_loc_world, self._get_sensor_position(seg_cam), False) + + cords_x_y_z = np.array(cords_x_y_z)[:3, :] + veh_bb = self._coords_to_2d_bb(cords_x_y_z) + + if veh_bb is not None: + if np.any(seg_img[veh_bb[0][1]:veh_bb[1][1],veh_bb[0][0]:veh_bb[1][0]] == 10): + bounding_boxes["vehicles"].append(veh_bb) + + for pedestrian in bb_3d["pedestrians"]: + + trig_loc_world = self._create_bb_points(pedestrian).T + cords_x_y_z = self._world_to_sensor(trig_loc_world, self._get_sensor_position(seg_cam), False) + + cords_x_y_z = np.array(cords_x_y_z)[:3, :] + + ped_bb = self._coords_to_2d_bb(cords_x_y_z) + + if ped_bb is not None: + if np.any(seg_img[ped_bb[0][1]:ped_bb[1][1],ped_bb[0][0]:ped_bb[1][0]] == 4): + bounding_boxes["pedestrians"].append(ped_bb) + + return bounding_boxes + + def _draw_2d_bbs(self, seg_img, bbs): + """For debugging only + + Args: + seg_img ([type]): [description] + bbs ([type]): [description] + """ + + for bb_type in bbs: + + _region = np.zeros(seg_img.shape) + + if bb_type == "traffic_light": + for bb in bbs[bb_type]: + _region = np.zeros(seg_img.shape) + box = bb['bb'] + _region[box[0][1]:box[1][1],box[0][0]:box[1][0]] = 1 + seg_img[(_region == 1)] = 23 + else: + + for bb in bbs[bb_type]: + + _region[bb[0][1]:bb[1][1],bb[0][0]:bb[1][0]] = 1 + + if bb_type == "stop_sign": + seg_img[(_region == 1)] = 26 + elif bb_type == "vehicles": + seg_img[(_region == 1)] = 10 + elif bb_type == "pedestrians": + seg_img[(_region == 1)] = 4 + + def _find_obstacle_3dbb(self, obstacle_type, max_distance=50): + """Returns a list of 3d bounding boxes of type obstacle_type. + If the object does have a bounding box, this is returned. Otherwise a bb + of size 0.5,0.5,2 is returned at the origin of the object. + + Args: + obstacle_type (String): Regular expression + max_distance (int, optional): max search distance. Returns all bbs in this radius. Defaults to 50. + + Returns: + List: List of Boundingboxes + """ + obst = list() + + _actors = self._world.get_actors() + _obstacles = _actors.filter(obstacle_type) + + for _obstacle in _obstacles: + distance_to_car = _obstacle.get_transform().location.distance(self._vehicle.get_location()) + + if 0 < distance_to_car <= max_distance: + + if hasattr(_obstacle, 'bounding_box'): + loc = _obstacle.bounding_box.location + _obstacle.get_transform().transform(loc) + + extent = _obstacle.bounding_box.extent + _rotation_matrix = self.get_matrix(carla.Transform(carla.Location(0,0,0), _obstacle.get_transform().rotation)) + + rotated_extent = np.squeeze(np.array((np.array([[extent.x, extent.y, extent.z, 1]]) @ _rotation_matrix)[:3])) + + bb = np.array([ + [loc.x, loc.y, loc.z], + [rotated_extent[0], rotated_extent[1], rotated_extent[2]] + ]) + + else: + loc = _obstacle.get_transform().location + bb = np.array([ + [loc.x, loc.y, loc.z], + [0.5, 0.5, 2] + ]) + + obst.append(bb) + + return obst + + def _get_2d_bb_baseline(self, obstacle, distance=2, cam='seg_front'): + """Returns 2 coordinates for the baseline for 2d bbs in world coordinates + (distance behind trigger volume, as seen from camera) + + Args: + obstacle (Actor): obstacle with + distance (int, optional): Distance behind trigger volume. Defaults to 2. + + Returns: + np.ndarray: Baseline + """ + trigger = obstacle.trigger_volume + bb = self._create_2d_bb_points(trigger) + trig_loc_world = self._trig_to_world(bb, obstacle, trigger) + #self._draw_line(trig_loc_world[:,0], trig_loc_world[:,3], 0.7, color=(0, 255, 255)) + + + cords_x_y_z = np.array(self._world_to_sensor(trig_loc_world, self._get_sensor_position(cam))) + indices = (-cords_x_y_z[0]).argsort() + + # check crooked up boxes + if self._get_dist(cords_x_y_z[:,indices[0]],cords_x_y_z[:,indices[1]]) < self._get_dist(cords_x_y_z[:,indices[0]],cords_x_y_z[:,indices[2]]): + cords = cords_x_y_z[:, [indices[0],indices[2]]] + np.array([[distance],[0],[0],[0]]) + else: + cords = cords_x_y_z[:, [indices[0],indices[1]]] + np.array([[distance],[0],[0],[0]]) + + sensor_world_matrix = self.get_matrix(self._get_sensor_position(cam)) + baseline = np.dot(sensor_world_matrix, cords) + + return baseline + + def _baseline_to_box(self, baseline, cam, height=1): + """Transforms a baseline (in world coords) into a 2d box (in sensor coords) + + Args: + baseline ([type]): [description] + cam ([type]): [description] + height (int, optional): Box height. Defaults to 1. + + Returns: + [type]: Box in sensor coords + """ + cords_x_y_z = np.array(self._world_to_sensor(baseline, self._get_sensor_position(cam))[:3, :]) + + cords = np.hstack((cords_x_y_z, np.fliplr(cords_x_y_z + np.array([[0],[0],[height]])))) + + return self._coords_to_2d_bb(cords) + + + def _coords_to_2d_bb(self, cords): + """Returns coords of a 2d box given points in sensor coords + + Args: + cords ([type]): [description] + + Returns: + [type]: [description] + """ + cords_y_minus_z_x = np.vstack((cords[1, :], -cords[2, :], cords[0, :])) + + bbox = (self._sensor_data['calibration'] @ cords_y_minus_z_x).T + + camera_bbox = np.vstack([bbox[:, 0] / bbox[:, 2], bbox[:, 1] / bbox[:, 2], bbox[:, 2]]).T + + if np.any(camera_bbox[:,2] > 0): + + camera_bbox = np.array(camera_bbox) + _positive_bb = camera_bbox[camera_bbox[:,2] > 0] + + min_x = int(np.clip(np.min(_positive_bb[:,0]), 0, self._sensor_data['width'])) + min_y = int(np.clip(np.min(_positive_bb[:,1]), 0, self._sensor_data['height'])) + max_x = int(np.clip(np.max(_positive_bb[:,0]), 0, self._sensor_data['width'])) + max_y = int(np.clip(np.max(_positive_bb[:,1]), 0, self._sensor_data['height'])) + + return [(min_x,min_y),(max_x,max_y)] + else: + return None + + + def _change_seg_stop(self, seg_img, depth_img, stop_signs, cam, _region_size=6): + """Adds a stop class to the segmentation image + + Args: + seg_img ([type]): [description] + depth_img ([type]): [description] + stop_signs ([type]): [description] + cam ([type]): [description] + _region_size (int, optional): [description]. Defaults to 6. + """ + for stop in stop_signs: + + _dist = self._get_distance(stop.get_transform().location) + + _region = np.abs(depth_img - _dist) + + seg_img[(_region < _region_size) & (seg_img == 12)] = 26 + + # lane markings + trigger = stop.trigger_volume + + _trig_loc_world = self._trig_to_world(np.array([[0], [0], [0], [1.0]]).T, stop, trigger) + _x = self._world_to_sensor(_trig_loc_world, self._get_sensor_position(cam))[0,0] + + if _x > 0: # stop is in front of camera + + bb = self._create_2d_bb_points(trigger, 4) + trig_loc_world = self._trig_to_world(bb, stop, trigger) + cords_x_y_z = self._world_to_sensor(trig_loc_world, self._get_sensor_position(cam), True) + + #if cords_x_y_z.size: + cords_x_y_z = cords_x_y_z[:3, :] + cords_y_minus_z_x = np.concatenate([cords_x_y_z[1, :], -cords_x_y_z[2, :], cords_x_y_z[0, :]]) + bbox = (self._sensor_data['calibration'] @ cords_y_minus_z_x).T + + camera_bbox = np.concatenate([bbox[:, 0] / bbox[:, 2], bbox[:, 1] / bbox[:, 2], bbox[:, 2]], axis=1) + + if np.any(camera_bbox[:,2] > 0): + + camera_bbox = np.array(camera_bbox) + + polygon = [(camera_bbox[i, 0], camera_bbox[i, 1]) for i in range(len(camera_bbox))] + + img = Image.new('L', (self._sensor_data['width'], self._sensor_data['height']), 0) + ImageDraw.Draw(img).polygon(polygon, outline=1, fill=1) + _region = np.array(img) + + #seg_img[(_region == 1)] = 27 + seg_img[(_region == 1) & (seg_img == 6)] = 27 + + + def _trig_to_world(self, bb, parent, trigger): + """Transforms the trigger coordinates to world coordinates + + Args: + bb ([type]): [description] + parent ([type]): [description] + trigger ([type]): [description] + + Returns: + [type]: [description] + """ + bb_transform = carla.Transform(trigger.location) + bb_vehicle_matrix = self.get_matrix(bb_transform) + vehicle_world_matrix = self.get_matrix(parent.get_transform()) + bb_world_matrix = vehicle_world_matrix @ bb_vehicle_matrix + world_cords = bb_world_matrix @ bb.T + return world_cords + + def _create_2d_bb_points(self, actor_bb, scale_factor=1): + """ + Returns 2D floor bounding box for an actor. + """ + + cords = np.zeros((4, 4)) + extent = actor_bb.extent + x = extent.x * scale_factor + y = extent.y * scale_factor + z = extent.z * scale_factor + cords[0, :] = np.array([x, y, 0, 1]) + cords[1, :] = np.array([-x, y, 0, 1]) + cords[2, :] = np.array([-x, -y, 0, 1]) + cords[3, :] = np.array([x, -y, 0, 1]) + return cords + + def _get_sensor_position(self, cam): + """returns the sensor position and rotation + + Args: + cam ([type]): [description] + + Returns: + [type]: [description] + """ + sensor_transform = self._sensors[cam].get_transform() + + return sensor_transform + + def _world_to_sensor(self, cords, sensor, move_cords=False): + """ + Transforms world coordinates to sensor. + """ + + sensor_world_matrix = self.get_matrix(sensor) + world_sensor_matrix = np.linalg.inv(sensor_world_matrix) + sensor_cords = np.dot(world_sensor_matrix, cords) + + if move_cords: + _num_cords = range(sensor_cords.shape[1]) + modified_cords = np.array([]) + for i in _num_cords: + if sensor_cords[0,i] < 0: + for j in _num_cords: + if sensor_cords[0,j] > 0: + _direction = sensor_cords[:,i] - sensor_cords[:,j] + _distance = -sensor_cords[0,j] / _direction[0] + new_cord = sensor_cords[:,j] + _distance[0,0] * _direction * 0.9999 + modified_cords = np.hstack([modified_cords, new_cord]) if modified_cords.size else new_cord + else: + modified_cords = np.hstack([modified_cords, sensor_cords[:,i]]) if modified_cords.size else sensor_cords[:,i] + + return modified_cords + else: + return sensor_cords + + def get_matrix(self, transform): + """ + Creates matrix from carla transform. + """ + + rotation = transform.rotation + location = transform.location + c_y = np.cos(np.radians(rotation.yaw)) + s_y = np.sin(np.radians(rotation.yaw)) + c_r = np.cos(np.radians(rotation.roll)) + s_r = np.sin(np.radians(rotation.roll)) + c_p = np.cos(np.radians(rotation.pitch)) + s_p = np.sin(np.radians(rotation.pitch)) + matrix = np.matrix(np.identity(4)) + matrix[0, 3] = location.x + matrix[1, 3] = location.y + matrix[2, 3] = location.z + matrix[0, 0] = c_p * c_y + matrix[0, 1] = c_y * s_p * s_r - s_y * c_r + matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r + matrix[1, 0] = s_y * c_p + matrix[1, 1] = s_y * s_p * s_r + c_y * c_r + matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r + matrix[2, 0] = s_p + matrix[2, 1] = -c_p * s_r + matrix[2, 2] = c_p * c_r + return matrix + + def _change_seg_tl(self, seg_img, depth_img, traffic_lights, _region_size=4): + """Adds 3 traffic light classes (green, yellow, red) to the segmentation image + + Args: + seg_img ([type]): [description] + depth_img ([type]): [description] + traffic_lights ([type]): [description] + _region_size (int, optional): [description]. Defaults to 4. + """ + for tl in traffic_lights: + _dist = self._get_distance(tl.get_transform().location) + + _region = np.abs(depth_img - _dist) + + if tl.get_state() == carla.TrafficLightState.Red: + state = 23 + elif tl.get_state() == carla.TrafficLightState.Yellow: + state = 24 + elif tl.get_state() == carla.TrafficLightState.Green: + state = 25 + else: #none of the states above, do not change class + state = 18 + + #seg_img[(_region >= _region_size)] = 0 + seg_img[(_region < _region_size) & (seg_img == 18)] = state + + + def _get_dist(self, p1, p2): + """Returns the distance between p1 and p2 + + Args: + target ([type]): [description] + + Returns: + [type]: [description] + """ + + distance = np.sqrt( + (p1[0] - p2[0]) ** 2 + + (p1[1] - p2[1]) ** 2 + + (p1[2] - p2[2]) ** 2) + + return distance + + def _get_distance(self, target): + """Returns the distance from the (rgb_front) camera to the target + + Args: + target ([type]): [description] + + Returns: + [type]: [description] + """ + sensor_transform = self._sensors['rgb_front'].get_transform() + + distance = np.sqrt( + (sensor_transform.location.x - target.x) ** 2 + + (sensor_transform.location.y - target.y) ** 2 + + (sensor_transform.location.z - target.z) ** 2) + + return distance + + + def _get_depth(self, data): + """Transforms the depth image into meters + + Args: + data ([type]): [description] + + Returns: + [type]: [description] + """ + + data = data.astype(np.float32) + + normalized = np.dot(data, [65536.0, 256.0, 1.0]) + normalized /= (256 * 256 * 256 - 1) + in_meters = 1000 * normalized + + return in_meters + + def _find_obstacle(self, obstacle_type='*traffic_light*'): + """Find all actors of a certain type that are close to the vehicle + + Args: + obstacle_type (str, optional): [description]. Defaults to '*traffic_light*'. + + Returns: + [type]: [description] + """ + obst = list() + + _actors = self._world.get_actors() + _obstacles = _actors.filter(obstacle_type) + + + for _obstacle in _obstacles: + trigger = _obstacle.trigger_volume + + _obstacle.get_transform().transform(trigger.location) + + distance_to_car = trigger.location.distance(self._vehicle.get_location()) + + a = np.sqrt( + trigger.extent.x ** 2 + + trigger.extent.y ** 2 + + trigger.extent.z ** 2) + b = np.sqrt( + self._vehicle.bounding_box.extent.x ** 2 + + self._vehicle.bounding_box.extent.y ** 2 + + self._vehicle.bounding_box.extent.z ** 2) + + s = a + b + 10 + + + if distance_to_car <= s: + # the actor is affected by this obstacle. + obst.append(_obstacle) + + return obst + + def _get_camera_to_car_calibration(self, sensor): + """returns the calibration matrix for the given sensor + + Args: + sensor ([type]): [description] + + Returns: + [type]: [description] + """ + calibration = np.identity(3) + calibration[0, 2] = sensor["width"] / 2.0 + calibration[1, 2] = sensor["height"] / 2.0 + calibration[0, 0] = calibration[1, 1] = sensor["width"] / (2.0 * np.tan(sensor["fov"] * np.pi / 360.0)) + return calibration \ No newline at end of file diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/map_agent.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/map_agent.py new file mode 100644 index 00000000..d056edc2 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/map_agent.py @@ -0,0 +1,179 @@ +import numpy as np +from PIL import Image, ImageDraw + +from srunner.scenariomanager.carla_data_provider import CarlaDataProvider + +from team_code.base_agent import BaseAgent +from team_code.planner import RoutePlanner + + +class MapAgent(BaseAgent): + def sensors(self): + result = super().sensors() + result.append({ + 'type': 'sensor.camera.semantic_segmentation', + 'x': 0.0, 'y': 0.0, 'z': 100.0, + 'roll': 0.0, 'pitch': -90.0, 'yaw': 0.0, + 'width': 512, 'height': 512, 'fov': 5 * 10.0, + 'id': 'map' + }) + + return result + + def set_global_plan(self, global_plan_gps, global_plan_world_coord, wp=None): + super().set_global_plan(global_plan_gps, global_plan_world_coord) + + self._plan_HACK = global_plan_world_coord + self._plan_gps_HACK = global_plan_gps + + def _init(self): + super()._init() + + self._vehicle = CarlaDataProvider.get_hero_actor() + self._world = self._vehicle.get_world() + + self._waypoint_planner = RoutePlanner(4.0, 50) + self._waypoint_planner.set_route(self._plan_gps_HACK, True) + + self._traffic_lights = list() + + def tick(self, input_data): + self._actors = self._world.get_actors() + self._traffic_lights = get_nearby_lights(self._vehicle, self._actors.filter('*traffic_light*')) + self._stop_signs = get_nearby_lights(self._vehicle, self._actors.filter('*stop*')) + + topdown = input_data['map'][1][:, :, 2] + topdown = draw_traffic_lights(topdown, self._vehicle, self._traffic_lights) + topdown = draw_stop_signs(topdown, self._vehicle, self._stop_signs) + + result = super().tick(input_data) + result['topdown'] = topdown + + return result + + +def get_nearby_lights(vehicle, lights, pixels_per_meter=5.5, size=512, radius=5): + result = list() + + transform = vehicle.get_transform() + pos = transform.location + theta = np.radians(90 + transform.rotation.yaw) + R = np.array([ + [np.cos(theta), -np.sin(theta)], + [np.sin(theta), np.cos(theta)], + ]) + + for light in lights: + delta = light.get_transform().location - pos + + target = R.T.dot([delta.x, delta.y]) + target *= pixels_per_meter + target += size // 2 + + if min(target) < 0 or max(target) >= size: + continue + + trigger = light.trigger_volume + light.get_transform().transform(trigger.location) + dist = trigger.location.distance(vehicle.get_location()) + a = np.sqrt( + trigger.extent.x ** 2 + + trigger.extent.y ** 2 + + trigger.extent.z ** 2) + b = np.sqrt( + vehicle.bounding_box.extent.x ** 2 + + vehicle.bounding_box.extent.y ** 2 + + vehicle.bounding_box.extent.z ** 2) + + if dist > a + b: + continue + + result.append(light) + + return result + + +def draw_traffic_lights(image, vehicle, lights, pixels_per_meter=5.5, size=512, radius=5): + image = Image.fromarray(image) + draw = ImageDraw.Draw(image) + + transform = vehicle.get_transform() + pos = transform.location + theta = np.radians(90 + transform.rotation.yaw) + R = np.array([ + [np.cos(theta), -np.sin(theta)], + [np.sin(theta), np.cos(theta)], + ]) + + for light in lights: + delta = light.get_transform().location - pos + + target = R.T.dot([delta.x, delta.y]) + target *= pixels_per_meter + target += size // 2 + + if min(target) < 0 or max(target) >= size: + continue + + trigger = light.trigger_volume + light.get_transform().transform(trigger.location) + dist = trigger.location.distance(vehicle.get_location()) + a = np.sqrt( + trigger.extent.x ** 2 + + trigger.extent.y ** 2 + + trigger.extent.z ** 2) + b = np.sqrt( + vehicle.bounding_box.extent.x ** 2 + + vehicle.bounding_box.extent.y ** 2 + + vehicle.bounding_box.extent.z ** 2) + + if dist > a + b: + continue + + x, y = target + draw.ellipse((x-radius, y-radius, x+radius, y+radius), 23 + light.state.real) # 13 changed to 23 for carla 0.9.10 + + return np.array(image) + + +def draw_stop_signs(image, vehicle, lights, pixels_per_meter=5.5, size=512, radius=5): + image = Image.fromarray(image) + draw = ImageDraw.Draw(image) + + transform = vehicle.get_transform() + pos = transform.location + theta = np.radians(90 + transform.rotation.yaw) + R = np.array([ + [np.cos(theta), -np.sin(theta)], + [np.sin(theta), np.cos(theta)], + ]) + + for light in lights: + delta = light.get_transform().location - pos + + target = R.T.dot([delta.x, delta.y]) + target *= pixels_per_meter + target += size // 2 + + if min(target) < 0 or max(target) >= size: + continue + + trigger = light.trigger_volume + light.get_transform().transform(trigger.location) + dist = trigger.location.distance(vehicle.get_location()) + a = np.sqrt( + trigger.extent.x ** 2 + + trigger.extent.y ** 2 + + trigger.extent.z ** 2) + b = np.sqrt( + vehicle.bounding_box.extent.x ** 2 + + vehicle.bounding_box.extent.y ** 2 + + vehicle.bounding_box.extent.z ** 2) + + if dist > a + b: + continue + + x, y = target + draw.ellipse((x-radius, y-radius, x+radius, y+radius), 26) + + return np.array(image) \ No newline at end of file diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/pid_controller.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/pid_controller.py new file mode 100644 index 00000000..df36a0a5 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/pid_controller.py @@ -0,0 +1,28 @@ +from collections import deque + +import numpy as np + + +class PIDController(object): + def __init__(self, K_P=1.0, K_I=0.0, K_D=0.0, n=20): + self._K_P = K_P + self._K_I = K_I + self._K_D = K_D + + self._window = deque([0 for _ in range(n)], maxlen=n) + self._max = 0.0 + self._min = 0.0 + + def step(self, error): + self._window.append(error) + self._max = max(self._max, abs(error)) + self._min = -abs(self._max) + + if len(self._window) >= 2: + integral = np.mean(self._window) + derivative = (self._window[-1] - self._window[-2]) + else: + integral = 0.0 + derivative = 0.0 + + return self._K_P * error + self._K_I * integral + self._K_D * derivative diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/planner.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/planner.py new file mode 100644 index 00000000..6aff90a2 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/planner.py @@ -0,0 +1,113 @@ +import os +from collections import deque + +import numpy as np + + +DEBUG = int(os.environ.get('HAS_DISPLAY', 0)) + + +class Plotter(object): + def __init__(self, size): + self.size = size + self.clear() + self.title = str(self.size) + + def clear(self): + from PIL import Image, ImageDraw + + self.img = Image.fromarray(np.zeros((self.size, self.size, 3), dtype=np.uint8)) + self.draw = ImageDraw.Draw(self.img) + + def dot(self, pos, node, color=(255, 255, 255), r=2): + x, y = 5.5 * (pos - node) + x += self.size / 2 + y += self.size / 2 + + self.draw.ellipse((x-r, y-r, x+r, y+r), color) + + def show(self): + if not DEBUG: + return + + import cv2 + + cv2.imshow(self.title, cv2.cvtColor(np.array(self.img), cv2.COLOR_BGR2RGB)) + cv2.waitKey(1) + + +class RoutePlanner(object): + def __init__(self, min_distance, max_distance, debug_size=256): + self.route = deque() + self.min_distance = min_distance + self.max_distance = max_distance + + # self.mean = np.array([49.0, 8.0]) # for carla 9.9 + # self.scale = np.array([111324.60662786, 73032.1570362]) # for carla 9.9 + self.mean = np.array([0.0, 0.0]) # for carla 9.10 + self.scale = np.array([111324.60662786, 111319.490945]) # for carla 9.10 + + self.debug = Plotter(debug_size) + + def set_route(self, global_plan, gps=False, global_plan_world = None): + self.route.clear() + + if global_plan_world: + for (pos, cmd), (pos_word, _ )in zip(global_plan, global_plan_world): + if gps: + pos = np.array([pos['lat'], pos['lon']]) + pos -= self.mean + pos *= self.scale + else: + pos = np.array([pos.location.x, pos.location.y]) + pos -= self.mean + + self.route.append((pos, cmd, pos_word)) + else: + for pos, cmd in global_plan: + if gps: + pos = np.array([pos['lat'], pos['lon']]) + pos -= self.mean + pos *= self.scale + else: + pos = np.array([pos.location.x, pos.location.y]) + pos -= self.mean + + self.route.append((pos, cmd)) + + def run_step(self, gps): + self.debug.clear() + + if len(self.route) == 1: + return self.route[0] + + to_pop = 0 + farthest_in_range = -np.inf + cumulative_distance = 0.0 + + for i in range(1, len(self.route)): + if cumulative_distance > self.max_distance: + break + + cumulative_distance += np.linalg.norm(self.route[i][0] - self.route[i-1][0]) + distance = np.linalg.norm(self.route[i][0] - gps) + + if distance <= self.min_distance and distance > farthest_in_range: + farthest_in_range = distance + to_pop = i + + r = 255 * int(distance > self.min_distance) + g = 255 * int(self.route[i][1].value == 4) + b = 255 + self.debug.dot(gps, self.route[i][0], (r, g, b)) + + for _ in range(to_pop): + if len(self.route) > 2: + self.route.popleft() + + self.debug.dot(gps, self.route[0][0], (0, 255, 0)) + self.debug.dot(gps, self.route[1][0], (255, 0, 0)) + self.debug.dot(gps, gps, (0, 0, 255)) + self.debug.show() + + return self.route[1] diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/roach_ap_agent.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/roach_ap_agent.py new file mode 100644 index 00000000..ac8467f7 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/roach_ap_agent.py @@ -0,0 +1,636 @@ +import os +import json +import datetime +import pathlib +import time +import cv2 + +import torch +import carla +import numpy as np +from PIL import Image + +from leaderboard.autoagents import autonomous_agent +import numpy as np +from omegaconf import OmegaConf + +from roach.criteria import run_stop_sign +from roach.obs_manager.birdview.chauffeurnet import ObsManager +from roach.utils.config_utils import load_entry_point +import roach.utils.transforms as trans_utils +from roach.utils.traffic_light import TrafficLightHandler + +from srunner.scenariomanager.carla_data_provider import CarlaDataProvider +from leaderboard.utils.route_manipulation import downsample_route +from agents.navigation.local_planner import RoadOption + +from team_code.planner import RoutePlanner + + +SAVE_PATH = os.environ.get('SAVE_PATH', None) + +def get_entry_point(): + return 'ROACHAgent' + +def _numpy(carla_vector, normalize=False): + result = np.float32([carla_vector.x, carla_vector.y]) + + if normalize: + return result / (np.linalg.norm(result) + 1e-4) + + return result + + +def _location(x, y, z): + return carla.Location(x=float(x), y=float(y), z=float(z)) + + +def get_xyz(_): + return [_.x, _.y, _.z] + + +def _orientation(yaw): + return np.float32([np.cos(np.radians(yaw)), np.sin(np.radians(yaw))]) + + +def get_collision(p1, v1, p2, v2): + A = np.stack([v1, -v2], 1) + b = p2 - p1 + + if abs(np.linalg.det(A)) < 1e-3: + return False, None + + x = np.linalg.solve(A, b) + collides = all(x >= 0) and all(x <= 1) # how many seconds until collision + + return collides, p1 + x[0] * v1 + + +class ROACHAgent(autonomous_agent.AutonomousAgent): + def setup(self, path_to_conf_file, ckpt="roach/log/ckpt_11833344.pth"): + self._render_dict = None + self.supervision_dict = None + self._ckpt = ckpt + cfg = OmegaConf.load(path_to_conf_file) + cfg = OmegaConf.to_container(cfg) + self.cfg = cfg + self._obs_configs = cfg['obs_configs'] + self._train_cfg = cfg['training'] + self._policy_class = load_entry_point(cfg['policy']['entry_point']) + self._policy_kwargs = cfg['policy']['kwargs'] + if self._ckpt is None: + self._policy = None + else: + self._policy, self._train_cfg['kwargs'] = self._policy_class.load(self._ckpt) + self._policy = self._policy.eval() + self._wrapper_class = load_entry_point(cfg['env_wrapper']['entry_point']) + self._wrapper_kwargs = cfg['env_wrapper']['kwargs'] + + self.track = autonomous_agent.Track.SENSORS + self.config_path = path_to_conf_file + self.step = -1 + self.wall_start = time.time() + self.initialized = False + + self._3d_bb_distance = 50 + + self.prev_lidar = None + + self.save_path = None + if SAVE_PATH is not None: + now = datetime.datetime.now() + string = pathlib.Path(os.environ['ROUTES']).stem + '_' + string += '_'.join(map(lambda x: '%02d' % x, (now.month, now.day, now.hour, now.minute, now.second))) + + + self.save_path = pathlib.Path(os.environ['SAVE_PATH']) / string + self.save_path.mkdir(parents=True, exist_ok=False) + + (self.save_path / 'rgb').mkdir() + (self.save_path / 'measurements').mkdir() + (self.save_path / 'supervision').mkdir() + (self.save_path / 'bev').mkdir() + + def _init(self): + self._waypoint_planner = RoutePlanner(4.0, 50) + self._waypoint_planner.set_route(self._plan_gps_HACK, True) + + self._command_planner = RoutePlanner(7.5, 25.0, 257) + self._command_planner.set_route(self._global_plan, True) + + self._route_planner = RoutePlanner(4.0, 50.0) + self._route_planner.set_route(self._global_plan, True) + + self._world = CarlaDataProvider.get_world() + self._map = self._world.get_map() + self._ego_vehicle = CarlaDataProvider.get_ego() + self._last_route_location = self._ego_vehicle.get_location() + self._criteria_stop = run_stop_sign.RunStopSign(self._world) + self.birdview_obs_manager = ObsManager(self.cfg['obs_configs']['birdview'], self._criteria_stop) + self.birdview_obs_manager.attach_ego_vehicle(self._ego_vehicle) + + self.navigation_idx = -1 + + + # for stop signs + self._target_stop_sign = None # the stop sign affecting the ego vehicle + self._stop_completed = False # if the ego vehicle has completed the stop sign + self._affected_by_stop = False # if the ego vehicle is influenced by a stop sign + + TrafficLightHandler.reset(self._world) + print("initialized") + + self.initialized = True + + def _get_angle_to(self, pos, theta, target): + R = np.array([ + [np.cos(theta), -np.sin(theta)], + [np.sin(theta), np.cos(theta)], + ]) + + aim = R.T.dot(target - pos) + angle = -np.degrees(np.arctan2(-aim[1], aim[0])) + angle = 0.0 if np.isnan(angle) else angle + + return angle + + + def _truncate_global_route_till_local_target(self, windows_size=5): + ev_location = self._ego_vehicle.get_location() + closest_idx = 0 + for i in range(len(self._global_route)-1): + if i > windows_size: + break + + loc0 = self._global_route[i][0].transform.location + loc1 = self._global_route[i+1][0].transform.location + + wp_dir = loc1 - loc0 + wp_veh = ev_location - loc0 + dot_ve_wp = wp_veh.x * wp_dir.x + wp_veh.y * wp_dir.y + wp_veh.z * wp_dir.z + + if dot_ve_wp > 0: + closest_idx = i+1 + if closest_idx > 0: + self._last_route_location = carla.Location(self._global_route[0][0].transform.location) + + self._global_route = self._global_route[closest_idx:] + + def _get_position(self, tick_data): + gps = tick_data['gps'] + gps = (gps - self._command_planner.mean) * self._command_planner.scale + + return gps + + def set_global_plan(self, global_plan_gps, global_plan_world_coord, wp_route): + """ + Set the plan (route) for the agent + """ + self._global_route = wp_route + ds_ids = downsample_route(global_plan_world_coord, 50) + self._global_plan = [global_plan_gps[x] for x in ds_ids] + self._global_plan_world_coord = [(global_plan_world_coord[x][0], global_plan_world_coord[x][1]) for x in ds_ids] + + self._plan_gps_HACK = global_plan_gps + self._plan_HACK = global_plan_world_coord + + def sensors(self): + return [ + { + 'type': 'sensor.camera.rgb', + 'x': -1.5, 'y': 0.0, 'z':2.0, + 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'width': 900, 'height': 256, 'fov': 100, + 'id': 'rgb' + }, + { + 'type': 'sensor.other.imu', + 'x': 0.0, 'y': 0.0, 'z': 0.0, + 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'sensor_tick': 0.05, + 'id': 'imu' + }, + { + 'type': 'sensor.other.gnss', + 'x': 0.0, 'y': 0.0, 'z': 0.0, + 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'sensor_tick': 0.01, + 'id': 'gps' + }, + { + 'type': 'sensor.speedometer', + 'reading_frequency': 20, + 'id': 'speed' + } + ] + + def tick(self, input_data, timestamp): + self._truncate_global_route_till_local_target() + + birdview_obs = self.birdview_obs_manager.get_observation(self._global_route) + control = self._ego_vehicle.get_control() + throttle = np.array([control.throttle], dtype=np.float32) + steer = np.array([control.steer], dtype=np.float32) + brake = np.array([control.brake], dtype=np.float32) + gear = np.array([control.gear], dtype=np.float32) + + + ev_transform = self._ego_vehicle.get_transform() + vel_w = self._ego_vehicle.get_velocity() + vel_ev = trans_utils.vec_global_to_ref(vel_w, ev_transform.rotation) + vel_xy = np.array([vel_ev.x, vel_ev.y], dtype=np.float32) + + + self._criteria_stop.tick(self._ego_vehicle, timestamp) + + state_list = [] + state_list.append(throttle) + state_list.append(steer) + state_list.append(brake) + state_list.append(gear) + state_list.append(vel_xy) + state = np.concatenate(state_list) + obs_dict = { + 'state': state.astype(np.float32), + 'birdview': birdview_obs['masks'], + } + + rgb = cv2.cvtColor(input_data['rgb'][1][:, :, :3], cv2.COLOR_BGR2RGB) + + gps = input_data['gps'][1][:2] + speed = input_data['speed'][1]['speed'] + compass = input_data['imu'][1][-1] + + target_gps, target_command = self.get_target_gps(input_data['gps'][1], compass) + + weather = self._weather_to_dict(self._world.get_weather()) + + result = { + 'rgb': rgb, + 'gps': gps, + 'speed': speed, + 'compass': compass, + 'weather': weather, + } + next_wp, next_cmd = self._route_planner.run_step(self._get_position(result)) + + result['next_command'] = next_cmd.value + result['x_target'] = next_wp[0] + result['y_target'] = next_wp[1] + + + return result, obs_dict, birdview_obs['rendered'], target_gps, target_command + + def im_render(self, render_dict): + im_birdview = render_dict['rendered'] + h, w, c = im_birdview.shape + im = np.zeros([h, w*2, c], dtype=np.uint8) + im[:h, :w] = im_birdview + + action_str = np.array2string(render_dict['action'], precision=2, separator=',', suppress_small=True) + + + txt_1 = f'a{action_str}' + im = cv2.putText(im, txt_1, (3, 24), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1) + + debug_texts = [ + 'should_brake: ' + render_dict['should_brake'], + ] + for i, txt in enumerate(debug_texts): + im = cv2.putText(im, txt, (w, (i+2)*12), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1) + return im + + @torch.no_grad() + def run_step(self, input_data, timestamp): + if not self.initialized: + self._init() + + self.step += 1 + + if self.step < 20: + + control = carla.VehicleControl() + control.steer = 0.0 + control.throttle = 0.0 + control.brake = 0.0 + self.last_control = control + return control + + if self.step % 2 != 0: + return self.last_control + tick_data, policy_input, rendered, target_gps, target_command = self.tick(input_data, timestamp) + + gps = self._get_position(tick_data) + + near_node, near_command = self._waypoint_planner.run_step(gps) + far_node, far_command = self._command_planner.run_step(gps) + + actions, values, log_probs, mu, sigma, features = self._policy.forward( + policy_input, deterministic=True, clip_action=True) + control = self.process_act(actions) + + render_dict = {"rendered": rendered, "action": actions} + + # additional collision detection to enhance safety + should_brake = self.collision_detect() + only_ap_brake = True if (control.brake <= 0 and should_brake) else False + if should_brake: + control.steer = control.steer * 0.5 + control.throttle = 0.0 + control.brake = 1.0 + render_dict = {"rendered": rendered, "action": actions, "should_brake":str(should_brake),} + + render_img = self.im_render(render_dict) + + supervision_dict = { + 'action': np.array([control.throttle, control.steer, control.brake], dtype=np.float32), + 'value': values[0], + 'action_mu': mu[0], + 'action_sigma': sigma[0], + 'features': features[0], + 'speed': tick_data['speed'], + 'target_gps': target_gps, + 'target_command': target_command, + 'should_brake': should_brake, + 'only_ap_brake': only_ap_brake, + } + if SAVE_PATH is not None and self.step % 10 == 0: + self.save(near_node, far_node, near_command, far_command, tick_data, supervision_dict, render_img, should_brake) + + steer = control.steer + control.steer = steer + 1e-2 * np.random.randn() + self.last_control = control + return control + + def collision_detect(self): + actors = self._world.get_actors() + + vehicle = self._is_vehicle_hazard(actors.filter('*vehicle*')) + walker = self._is_walker_hazard(actors.filter('*walker*')) + + + self.is_vehicle_present = 1 if vehicle is not None else 0 + self.is_pedestrian_present = 1 if walker is not None else 0 + + return any(x is not None for x in [vehicle, walker]) + + def _is_walker_hazard(self, walkers_list): + z = self._ego_vehicle.get_location().z + p1 = _numpy(self._ego_vehicle.get_location()) + v1 = 10.0 * _orientation(self._ego_vehicle.get_transform().rotation.yaw) + + for walker in walkers_list: + v2_hat = _orientation(walker.get_transform().rotation.yaw) + s2 = np.linalg.norm(_numpy(walker.get_velocity())) + + if s2 < 0.05: + v2_hat *= s2 + + p2 = -3.0 * v2_hat + _numpy(walker.get_location()) + v2 = 8.0 * v2_hat + + collides, collision_point = get_collision(p1, v1, p2, v2) + + if collides: + return walker + + return None + + def _is_vehicle_hazard(self, vehicle_list): + z = self._ego_vehicle.get_location().z + + o1 = _orientation(self._ego_vehicle.get_transform().rotation.yaw) + p1 = _numpy(self._ego_vehicle.get_location()) + s1 = max(10, 3.0 * np.linalg.norm(_numpy(self._ego_vehicle.get_velocity()))) # increases the threshold distance + v1_hat = o1 + v1 = s1 * v1_hat + + for target_vehicle in vehicle_list: + if target_vehicle.id == self._ego_vehicle.id: + continue + + o2 = _orientation(target_vehicle.get_transform().rotation.yaw) + p2 = _numpy(target_vehicle.get_location()) + s2 = max(5.0, 2.0 * np.linalg.norm(_numpy(target_vehicle.get_velocity()))) + v2_hat = o2 + v2 = s2 * v2_hat + + p2_p1 = p2 - p1 + distance = np.linalg.norm(p2_p1) + p2_p1_hat = p2_p1 / (distance + 1e-4) + + angle_to_car = np.degrees(np.arccos(v1_hat.dot(p2_p1_hat))) + angle_between_heading = np.degrees(np.arccos(o1.dot(o2))) + + # to consider -ve angles too + angle_to_car = min(angle_to_car, 360.0 - angle_to_car) + angle_between_heading = min(angle_between_heading, 360.0 - angle_between_heading) + + if angle_between_heading > 60.0 and not (angle_to_car < 15 and distance < s1): + continue + elif angle_to_car > 30.0: + continue + elif distance > s1: + continue + + return target_vehicle + + return None + + def save(self, near_node, far_node, near_command, far_command, tick_data, supervision_dict, render_img, should_brake): + frame = self.step // 10 - 2 + + Image.fromarray(tick_data['rgb']).save(self.save_path / 'rgb' / ('%04d.png' % frame)) + Image.fromarray(render_img).save(self.save_path / 'bev' / ('%04d.png' % frame)) + pos = self._get_position(tick_data) + theta = tick_data['compass'] + speed = tick_data['speed'] + + data = { + 'x': pos[0], + 'y': pos[1], + 'theta': theta, + 'speed': speed, + 'x_command_far': far_node[0], + 'y_command_far': far_node[1], + 'command_far': far_command.value, + 'x_command_near': near_node[0], + 'y_command_near': near_node[1], + 'command_near': near_command.value, + 'should_brake': should_brake, + 'x_target': tick_data['x_target'], + 'y_target': tick_data['y_target'], + 'target_command': tick_data['next_command'], + } + outfile = open(self.save_path / 'measurements' / ('%04d.json' % frame), 'w') + json.dump(data, outfile, indent=4) + outfile.close() + with open(self.save_path / 'supervision' / ('%04d.npy' % frame), 'wb') as f: + np.save(f, supervision_dict) + + + def get_target_gps(self, gps, compass): + # target gps + def gps_to_location(gps): + lat, lon, z = gps + lat = float(lat) + lon = float(lon) + z = float(z) + + location = carla.Location(z=z) + xy = (gps[:2] - self._command_planner.mean) * self._command_planner.scale + location.x = xy[0] + location.y = -xy[1] + return location + global_plan_gps = self._global_plan + next_gps, _ = global_plan_gps[self.navigation_idx+1] + next_gps = np.array([next_gps['lat'], next_gps['lon'], next_gps['z']]) + next_vec_in_global = gps_to_location(next_gps) - gps_to_location(gps) + ref_rot_in_global = carla.Rotation(yaw=np.rad2deg(compass)-90.0) + loc_in_ev = trans_utils.vec_global_to_ref(next_vec_in_global, ref_rot_in_global) + + if np.sqrt(loc_in_ev.x**2+loc_in_ev.y**2) < 12.0 and loc_in_ev.x < 0.0: + self.navigation_idx += 1 + + self.navigation_idx = min(self.navigation_idx, len(global_plan_gps)-2) + + _, road_option_0 = global_plan_gps[max(0, self.navigation_idx)] + gps_point, road_option_1 = global_plan_gps[self.navigation_idx+1] + gps_point = np.array([gps_point['lat'], gps_point['lon'], gps_point['z']]) + + if (road_option_0 in [RoadOption.CHANGELANELEFT, RoadOption.CHANGELANERIGHT]) \ + and (road_option_1 not in [RoadOption.CHANGELANELEFT, RoadOption.CHANGELANERIGHT]): + road_option = road_option_1 + else: + road_option = road_option_0 + + return np.array(gps_point, dtype=np.float32), np.array([road_option.value], dtype=np.int8) + + + def process_act(self, action): + + # acc, steer = action.astype(np.float64) + acc = action[0][0] + steer = action[0][1] + if acc >= 0.0: + throttle = acc + brake = 0.0 + else: + throttle = 0.0 + brake = np.abs(acc) + + throttle = np.clip(throttle, 0, 1) + steer = np.clip(steer, -1, 1) + brake = np.clip(brake, 0, 1) + control = carla.VehicleControl(throttle=throttle, steer=steer, brake=brake) + return control + + def _weather_to_dict(self, carla_weather): + weather = { + 'cloudiness': carla_weather.cloudiness, + 'precipitation': carla_weather.precipitation, + 'precipitation_deposits': carla_weather.precipitation_deposits, + 'wind_intensity': carla_weather.wind_intensity, + 'sun_azimuth_angle': carla_weather.sun_azimuth_angle, + 'sun_altitude_angle': carla_weather.sun_altitude_angle, + 'fog_density': carla_weather.fog_density, + 'fog_distance': carla_weather.fog_distance, + 'wetness': carla_weather.wetness, + 'fog_falloff': carla_weather.fog_falloff, + } + + return weather + + + def _get_3d_bbs(self, max_distance=50): + + bounding_boxes = { + "traffic_lights": [], + "stop_signs": [], + "vehicles": [], + "pedestrians": [] + } + + bounding_boxes['traffic_lights'] = self._find_obstacle_3dbb('*traffic_light*', max_distance) + bounding_boxes['stop_signs'] = self._find_obstacle_3dbb('*stop*', max_distance) + bounding_boxes['vehicles'] = self._find_obstacle_3dbb('*vehicle*', max_distance) + bounding_boxes['pedestrians'] = self._find_obstacle_3dbb('*walker*', max_distance) + + return bounding_boxes + + + def _find_obstacle_3dbb(self, obstacle_type, max_distance=50): + """Returns a list of 3d bounding boxes of type obstacle_type. + If the object does have a bounding box, this is returned. Otherwise a bb + of size 0.5,0.5,2 is returned at the origin of the object. + + Args: + obstacle_type (String): Regular expression + max_distance (int, optional): max search distance. Returns all bbs in this radius. Defaults to 50. + + Returns: + List: List of Boundingboxes + """ + obst = list() + + _actors = self._world.get_actors() + _obstacles = _actors.filter(obstacle_type) + + for _obstacle in _obstacles: + distance_to_car = _obstacle.get_transform().location.distance(self._ego_vehicle.get_location()) + + if 0 < distance_to_car <= max_distance: + + if hasattr(_obstacle, 'bounding_box'): + loc = _obstacle.bounding_box.location + _obstacle.get_transform().transform(loc) + + extent = _obstacle.bounding_box.extent + _rotation_matrix = self.get_matrix(carla.Transform(carla.Location(0,0,0), _obstacle.get_transform().rotation)) + + rotated_extent = np.squeeze(np.array((np.array([[extent.x, extent.y, extent.z, 1]]) @ _rotation_matrix)[:3])) + + bb = np.array([ + [loc.x, loc.y, loc.z], + [rotated_extent[0], rotated_extent[1], rotated_extent[2]] + ]) + + else: + loc = _obstacle.get_transform().location + bb = np.array([ + [loc.x, loc.y, loc.z], + [0.5, 0.5, 2] + ]) + + obst.append(bb) + + return obst + + def get_matrix(self, transform): + """ + Creates matrix from carla transform. + """ + + rotation = transform.rotation + location = transform.location + c_y = np.cos(np.radians(rotation.yaw)) + s_y = np.sin(np.radians(rotation.yaw)) + c_r = np.cos(np.radians(rotation.roll)) + s_r = np.sin(np.radians(rotation.roll)) + c_p = np.cos(np.radians(rotation.pitch)) + s_p = np.sin(np.radians(rotation.pitch)) + matrix = np.matrix(np.identity(4)) + matrix[0, 3] = location.x + matrix[1, 3] = location.y + matrix[2, 3] = location.z + matrix[0, 0] = c_p * c_y + matrix[0, 1] = c_y * s_p * s_r - s_y * c_r + matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r + matrix[1, 0] = s_y * c_p + matrix[1, 1] = s_y * s_p * s_r + c_y * c_r + matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r + matrix[2, 0] = s_p + matrix[2, 1] = -c_p * s_r + matrix[2, 2] = c_p * c_r + return matrix + diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/tcp_agent.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/tcp_agent.py new file mode 100644 index 00000000..8c11cda6 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/tcp_agent.py @@ -0,0 +1,272 @@ +import os +import json +import datetime +import pathlib +import time +import cv2 +import carla +from collections import deque +import math +from collections import OrderedDict + +import torch +import carla +import numpy as np +from PIL import Image +from torchvision import transforms as T + +from leaderboard.autoagents import autonomous_agent + +from TCP.model import TCP +from TCP.config import GlobalConfig +from team_code.planner import RoutePlanner + + +SAVE_PATH = os.environ.get('SAVE_PATH', None) + + +def get_entry_point(): + return 'TCPAgent' + + +class TCPAgent(autonomous_agent.AutonomousAgent): + def setup(self, path_to_conf_file): + self.track = autonomous_agent.Track.SENSORS + self.alpha = 0.3 + self.status = 0 + self.steer_step = 0 + self.last_moving_status = 0 + self.last_moving_step = -1 + self.last_steers = deque() + + self.config_path = path_to_conf_file + self.step = -1 + self.wall_start = time.time() + self.initialized = False + + self.config = GlobalConfig() + self.net = TCP(self.config) + + + ckpt = torch.load(path_to_conf_file) + ckpt = ckpt["state_dict"] + new_state_dict = OrderedDict() + for key, value in ckpt.items(): + new_key = key.replace("model.","") + new_state_dict[new_key] = value + self.net.load_state_dict(new_state_dict, strict = False) + self.net.cuda() + self.net.eval() + + self.takeover = False + self.stop_time = 0 + self.takeover_time = 0 + + self.save_path = None + self._im_transform = T.Compose([T.ToTensor(), T.Normalize(mean=[0.485,0.456,0.406], std=[0.229,0.224,0.225])]) + + self.last_steers = deque() + if SAVE_PATH is not None: + now = datetime.datetime.now() + string = pathlib.Path(os.environ['ROUTES']).stem + '_' + string += '_'.join(map(lambda x: '%02d' % x, (now.month, now.day, now.hour, now.minute, now.second))) + + print (string) + + self.save_path = pathlib.Path(os.environ['SAVE_PATH']) / string + self.save_path.mkdir(parents=True, exist_ok=False) + + (self.save_path / 'rgb').mkdir() + (self.save_path / 'meta').mkdir() + (self.save_path / 'bev').mkdir() + + def _init(self): + self._route_planner = RoutePlanner(4.0, 50.0) + self._route_planner.set_route(self._global_plan, True) + + self.initialized = True + + def _get_position(self, tick_data): + gps = tick_data['gps'] + gps = (gps - self._route_planner.mean) * self._route_planner.scale + + return gps + + def sensors(self): + return [ + { + 'type': 'sensor.camera.rgb', + 'x': -1.5, 'y': 0.0, 'z':2.0, + 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'width': 900, 'height': 256, 'fov': 100, + 'id': 'rgb' + }, + { + 'type': 'sensor.camera.rgb', + 'x': 0.0, 'y': 0.0, 'z': 50.0, + 'roll': 0.0, 'pitch': -90.0, 'yaw': 0.0, + 'width': 512, 'height': 512, 'fov': 5 * 10.0, + 'id': 'bev' + }, + { + 'type': 'sensor.other.imu', + 'x': 0.0, 'y': 0.0, 'z': 0.0, + 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'sensor_tick': 0.05, + 'id': 'imu' + }, + { + 'type': 'sensor.other.gnss', + 'x': 0.0, 'y': 0.0, 'z': 0.0, + 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'sensor_tick': 0.01, + 'id': 'gps' + }, + { + 'type': 'sensor.speedometer', + 'reading_frequency': 20, + 'id': 'speed' + } + ] + + def tick(self, input_data): + self.step += 1 + + rgb = cv2.cvtColor(input_data['rgb'][1][:, :, :3], cv2.COLOR_BGR2RGB) + bev = cv2.cvtColor(input_data['bev'][1][:, :, :3], cv2.COLOR_BGR2RGB) + gps = input_data['gps'][1][:2] + speed = input_data['speed'][1]['speed'] + compass = input_data['imu'][1][-1] + + if (math.isnan(compass) == True): #It can happen that the compass sends nan for a few frames + compass = 0.0 + + result = { + 'rgb': rgb, + 'gps': gps, + 'speed': speed, + 'compass': compass, + 'bev': bev + } + + pos = self._get_position(result) + result['gps'] = pos + next_wp, next_cmd = self._route_planner.run_step(pos) + result['next_command'] = next_cmd.value + + + theta = compass + np.pi/2 + R = np.array([ + [np.cos(theta), -np.sin(theta)], + [np.sin(theta), np.cos(theta)] + ]) + + local_command_point = np.array([next_wp[0]-pos[0], next_wp[1]-pos[1]]) + local_command_point = R.T.dot(local_command_point) + result['target_point'] = tuple(local_command_point) + + return result + @torch.no_grad() + def run_step(self, input_data, timestamp): + if not self.initialized: + self._init() + tick_data = self.tick(input_data) + if self.step < self.config.seq_len: + rgb = self._im_transform(tick_data['rgb']).unsqueeze(0) + + control = carla.VehicleControl() + control.steer = 0.0 + control.throttle = 0.0 + control.brake = 0.0 + + return control + + gt_velocity = torch.FloatTensor([tick_data['speed']]).to('cuda', dtype=torch.float32) + command = tick_data['next_command'] + if command < 0: + command = 4 + command -= 1 + assert command in [0, 1, 2, 3, 4, 5] + cmd_one_hot = [0] * 6 + cmd_one_hot[command] = 1 + cmd_one_hot = torch.tensor(cmd_one_hot).view(1, 6).to('cuda', dtype=torch.float32) + speed = torch.FloatTensor([float(tick_data['speed'])]).view(1,1).to('cuda', dtype=torch.float32) + speed = speed / 12 + rgb = self._im_transform(tick_data['rgb']).unsqueeze(0).to('cuda', dtype=torch.float32) + + tick_data['target_point'] = [torch.FloatTensor([tick_data['target_point'][0]]), + torch.FloatTensor([tick_data['target_point'][1]])] + target_point = torch.stack(tick_data['target_point'], dim=1).to('cuda', dtype=torch.float32) + state = torch.cat([speed, target_point, cmd_one_hot], 1) + + pred= self.net(rgb, state, target_point) + + steer_ctrl, throttle_ctrl, brake_ctrl, metadata = self.net.process_action(pred, tick_data['next_command'], gt_velocity, target_point) + + steer_traj, throttle_traj, brake_traj, metadata_traj = self.net.control_pid(pred['pred_wp'], gt_velocity, target_point) + if brake_traj < 0.05: brake_traj = 0.0 + if throttle_traj > brake_traj: brake_traj = 0.0 + + self.pid_metadata = metadata_traj + control = carla.VehicleControl() + + if self.status == 0: + self.alpha = 0.3 + self.pid_metadata['agent'] = 'traj' + control.steer = np.clip(self.alpha*steer_ctrl + (1-self.alpha)*steer_traj, -1, 1) + control.throttle = np.clip(self.alpha*throttle_ctrl + (1-self.alpha)*throttle_traj, 0, 0.75) + control.brake = np.clip(self.alpha*brake_ctrl + (1-self.alpha)*brake_traj, 0, 1) + else: + self.alpha = 0.3 + self.pid_metadata['agent'] = 'ctrl' + control.steer = np.clip(self.alpha*steer_traj + (1-self.alpha)*steer_ctrl, -1, 1) + control.throttle = np.clip(self.alpha*throttle_traj + (1-self.alpha)*throttle_ctrl, 0, 0.75) + control.brake = np.clip(self.alpha*brake_traj + (1-self.alpha)*brake_ctrl, 0, 1) + + + self.pid_metadata['steer_ctrl'] = float(steer_ctrl) + self.pid_metadata['steer_traj'] = float(steer_traj) + self.pid_metadata['throttle_ctrl'] = float(throttle_ctrl) + self.pid_metadata['throttle_traj'] = float(throttle_traj) + self.pid_metadata['brake_ctrl'] = float(brake_ctrl) + self.pid_metadata['brake_traj'] = float(brake_traj) + + if control.brake > 0.5: + control.throttle = float(0) + + if len(self.last_steers) >= 20: + self.last_steers.popleft() + self.last_steers.append(abs(float(control.steer))) + #chech whether ego is turning + # num of steers larger than 0.1 + num = 0 + for s in self.last_steers: + if s > 0.10: + num += 1 + if num > 10: + self.status = 1 + self.steer_step += 1 + + else: + self.status = 0 + + self.pid_metadata['status'] = self.status + + if SAVE_PATH is not None and self.step % 10 == 0: + self.save(tick_data) + return control + + def save(self, tick_data): + frame = self.step // 10 + + Image.fromarray(tick_data['rgb']).save(self.save_path / 'rgb' / ('%04d.png' % frame)) + + Image.fromarray(tick_data['bev']).save(self.save_path / 'bev' / ('%04d.png' % frame)) + + outfile = open(self.save_path / 'meta' / ('%04d.json' % frame), 'w') + json.dump(self.pid_metadata, outfile, indent=4) + outfile.close() + + def destroy(self): + del self.net + torch.cuda.empty_cache() \ No newline at end of file diff --git a/behavior_metrics/brains/CARLA/TCP/model.py b/behavior_metrics/brains/CARLA/TCP/model.py new file mode 100644 index 00000000..6881c851 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/model.py @@ -0,0 +1,344 @@ +from collections import deque +import numpy as np +import torch +from torch import nn +from brains.CARLA.TCP.resnet import * + + +class PIDController(object): + def __init__(self, K_P=1.0, K_I=0.0, K_D=0.0, n=20): + self._K_P = K_P + self._K_I = K_I + self._K_D = K_D + + self._window = deque([0 for _ in range(n)], maxlen=n) + self._max = 0.0 + self._min = 0.0 + + def step(self, error): + self._window.append(error) + self._max = max(self._max, abs(error)) + self._min = -abs(self._max) + + if len(self._window) >= 2: + integral = np.mean(self._window) + derivative = (self._window[-1] - self._window[-2]) + else: + integral = 0.0 + derivative = 0.0 + + return self._K_P * error + self._K_I * integral + self._K_D * derivative + +class TCP(nn.Module): + + def __init__(self, config): + super().__init__() + self.config = config + + self.turn_controller = PIDController(K_P=config.turn_KP, K_I=config.turn_KI, K_D=config.turn_KD, n=config.turn_n) + self.speed_controller = PIDController(K_P=config.speed_KP, K_I=config.speed_KI, K_D=config.speed_KD, n=config.speed_n) + + self.perception = resnet34(pretrained=True) + + self.measurements = nn.Sequential( + nn.Linear(1+2+6, 128), + nn.ReLU(inplace=True), + nn.Linear(128, 128), + nn.ReLU(inplace=True), + ) + + self.join_traj = nn.Sequential( + nn.Linear(128+1000, 512), + nn.ReLU(inplace=True), + nn.Linear(512, 512), + nn.ReLU(inplace=True), + nn.Linear(512, 256), + nn.ReLU(inplace=True), + ) + + self.join_ctrl = nn.Sequential( + nn.Linear(128+512, 512), + nn.ReLU(inplace=True), + nn.Linear(512, 512), + nn.ReLU(inplace=True), + nn.Linear(512, 256), + nn.ReLU(inplace=True), + ) + + self.speed_branch = nn.Sequential( + nn.Linear(1000, 256), + nn.ReLU(inplace=True), + nn.Linear(256, 256), + nn.Dropout2d(p=0.5), + nn.ReLU(inplace=True), + nn.Linear(256, 1), + ) + + self.value_branch_traj = nn.Sequential( + nn.Linear(256, 256), + nn.ReLU(inplace=True), + nn.Linear(256, 256), + nn.Dropout2d(p=0.5), + nn.ReLU(inplace=True), + nn.Linear(256, 1), + ) + self.value_branch_ctrl = nn.Sequential( + nn.Linear(256, 256), + nn.ReLU(inplace=True), + nn.Linear(256, 256), + nn.Dropout2d(p=0.5), + nn.ReLU(inplace=True), + nn.Linear(256, 1), + ) + # shared branches_neurons + dim_out = 2 + + self.policy_head = nn.Sequential( + nn.Linear(256, 256), + nn.ReLU(inplace=True), + nn.Linear(256, 256), + nn.Dropout2d(p=0.5), + nn.ReLU(inplace=True), + ) + self.decoder_ctrl = nn.GRUCell(input_size=256+4, hidden_size=256) + self.output_ctrl = nn.Sequential( + nn.Linear(256, 256), + nn.ReLU(inplace=True), + nn.Linear(256, 256), + nn.ReLU(inplace=True), + ) + self.dist_mu = nn.Sequential(nn.Linear(256, dim_out), nn.Softplus()) + self.dist_sigma = nn.Sequential(nn.Linear(256, dim_out), nn.Softplus()) + + + self.decoder_traj = nn.GRUCell(input_size=4, hidden_size=256) + self.output_traj = nn.Linear(256, 2) + + self.init_att = nn.Sequential( + nn.Linear(128, 256), + nn.ReLU(inplace=True), + nn.Linear(256, 29*8), + nn.Softmax(1) + ) + + self.wp_att = nn.Sequential( + nn.Linear(256+256, 256), + nn.ReLU(inplace=True), + nn.Linear(256, 29*8), + nn.Softmax(1) + ) + + self.merge = nn.Sequential( + nn.Linear(512+256, 512), + nn.ReLU(inplace=True), + nn.Linear(512, 256), + ) + + + def forward(self, img, state, target_point): + feature_emb, cnn_feature = self.perception(img) + outputs = {} + outputs['pred_speed'] = self.speed_branch(feature_emb) + measurement_feature = self.measurements(state) + + j_traj = self.join_traj(torch.cat([feature_emb, measurement_feature], 1)) + outputs['pred_value_traj'] = self.value_branch_traj(j_traj) + outputs['pred_features_traj'] = j_traj + z = j_traj + output_wp = list() + traj_hidden_state = list() + + # initial input variable to GRU + x = torch.zeros(size=(z.shape[0], 2), dtype=z.dtype).type_as(z) + + # autoregressive generation of output waypoints + for _ in range(self.config.pred_len): + x_in = torch.cat([x, target_point], dim=1) + z = self.decoder_traj(x_in, z) + traj_hidden_state.append(z) + dx = self.output_traj(z) + x = dx + x + output_wp.append(x) + + pred_wp = torch.stack(output_wp, dim=1) + outputs['pred_wp'] = pred_wp + + traj_hidden_state = torch.stack(traj_hidden_state, dim=1) + init_att = self.init_att(measurement_feature).view(-1, 1, 8, 29) + feature_emb = torch.sum(cnn_feature*init_att, dim=(2, 3)) + j_ctrl = self.join_ctrl(torch.cat([feature_emb, measurement_feature], 1)) + outputs['pred_value_ctrl'] = self.value_branch_ctrl(j_ctrl) + outputs['pred_features_ctrl'] = j_ctrl + policy = self.policy_head(j_ctrl) + outputs['mu_branches'] = self.dist_mu(policy) + outputs['sigma_branches'] = self.dist_sigma(policy) + + x = j_ctrl + mu = outputs['mu_branches'] + sigma = outputs['sigma_branches'] + future_feature, future_mu, future_sigma = [], [], [] + + # initial hidden variable to GRU + h = torch.zeros(size=(x.shape[0], 256), dtype=x.dtype).type_as(x) + + for _ in range(self.config.pred_len): + x_in = torch.cat([x, mu, sigma], dim=1) + h = self.decoder_ctrl(x_in, h) + wp_att = self.wp_att(torch.cat([h, traj_hidden_state[:, _]], 1)).view(-1, 1, 8, 29) + new_feature_emb = torch.sum(cnn_feature*wp_att, dim=(2, 3)) + merged_feature = self.merge(torch.cat([h, new_feature_emb], 1)) + dx = self.output_ctrl(merged_feature) + x = dx + x + + policy = self.policy_head(x) + mu = self.dist_mu(policy) + sigma = self.dist_sigma(policy) + future_feature.append(x) + future_mu.append(mu) + future_sigma.append(sigma) + + + outputs['future_feature'] = future_feature + outputs['future_mu'] = future_mu + outputs['future_sigma'] = future_sigma + return outputs + + def process_action(self, pred, command, speed, target_point): + action = self._get_action_beta(pred['mu_branches'].view(1,2), pred['sigma_branches'].view(1,2)) + acc, steer = action.cpu().numpy()[0].astype(np.float64) + if acc >= 0.0: + throttle = acc + brake = 0.0 + else: + throttle = 0.0 + brake = np.abs(acc) + + throttle = np.clip(throttle, 0, 1) + steer = np.clip(steer, -1, 1) + brake = np.clip(brake, 0, 1) + + metadata = { + 'speed': float(speed.cpu().numpy().astype(np.float64)), + 'steer': float(steer), + 'throttle': float(throttle), + 'brake': float(brake), + 'command': command, + 'target_point': tuple(target_point[0].data.cpu().numpy().astype(np.float64)), + } + return steer, throttle, brake, metadata + + def _get_action_beta(self, alpha, beta): + x = torch.zeros_like(alpha) + x[:, 1] += 0.5 + mask1 = (alpha > 1) & (beta > 1) + x[mask1] = (alpha[mask1]-1)/(alpha[mask1]+beta[mask1]-2) + + mask2 = (alpha <= 1) & (beta > 1) + x[mask2] = 0.0 + + mask3 = (alpha > 1) & (beta <= 1) + x[mask3] = 1.0 + + # mean + mask4 = (alpha <= 1) & (beta <= 1) + x[mask4] = alpha[mask4]/torch.clamp((alpha[mask4]+beta[mask4]), min=1e-5) + + x = x * 2 - 1 + + return x + + def control_pid(self, waypoints, velocity, target): + ''' Predicts vehicle control with a PID controller. + Args: + waypoints (tensor): output of self.plan() + velocity (tensor): speedometer input + ''' + assert(waypoints.size(0)==1) + waypoints = waypoints[0].data.cpu().numpy() + target = target.squeeze().data.cpu().numpy() + + # flip y (forward is negative in our waypoints) + waypoints[:,1] *= -1 + target[1] *= -1 + + # iterate over vectors between predicted waypoints + num_pairs = len(waypoints) - 1 + best_norm = 1e5 + desired_speed = 0 + aim = waypoints[0] + for i in range(num_pairs): + # magnitude of vectors, used for speed + desired_speed += np.linalg.norm( + waypoints[i+1] - waypoints[i]) * 2.0 / num_pairs + + # norm of vector midpoints, used for steering + norm = np.linalg.norm((waypoints[i+1] + waypoints[i]) / 2.0) + if abs(self.config.aim_dist-best_norm) > abs(self.config.aim_dist-norm): + aim = waypoints[i] + best_norm = norm + + aim_last = waypoints[-1] - waypoints[-2] + + angle = np.degrees(np.pi / 2 - np.arctan2(aim[1], aim[0])) / 90 + angle_last = np.degrees(np.pi / 2 - np.arctan2(aim_last[1], aim_last[0])) / 90 + angle_target = np.degrees(np.pi / 2 - np.arctan2(target[1], target[0])) / 90 + + # choice of point to aim for steering, removing outlier predictions + # use target point if it has a smaller angle or if error is large + # predicted point otherwise + # (reduces noise in eg. straight roads, helps with sudden turn commands) + use_target_to_aim = np.abs(angle_target) < np.abs(angle) + use_target_to_aim = use_target_to_aim or (np.abs(angle_target-angle_last) > self.config.angle_thresh and target[1] < self.config.dist_thresh) + if use_target_to_aim: + angle_final = angle_target + else: + angle_final = angle + + steer = self.turn_controller.step(angle_final) + steer = np.clip(steer, -1.0, 1.0) + + speed = velocity[0].data.cpu().numpy() + brake = desired_speed < self.config.brake_speed or (speed / desired_speed) > self.config.brake_ratio + + delta = np.clip(desired_speed - speed, 0.0, self.config.clip_delta) + throttle = self.speed_controller.step(delta) + throttle = np.clip(throttle, 0.0, self.config.max_throttle) + throttle = throttle if not brake else 0.0 + + metadata = { + 'speed': float(speed.astype(np.float64)), + 'steer': float(steer), + 'throttle': float(throttle), + 'brake': float(brake), + 'wp_4': tuple(waypoints[3].astype(np.float64)), + 'wp_3': tuple(waypoints[2].astype(np.float64)), + 'wp_2': tuple(waypoints[1].astype(np.float64)), + 'wp_1': tuple(waypoints[0].astype(np.float64)), + 'aim': tuple(aim.astype(np.float64)), + 'target': tuple(target.astype(np.float64)), + 'desired_speed': float(desired_speed.astype(np.float64)), + 'angle': float(angle.astype(np.float64)), + 'angle_last': float(angle_last.astype(np.float64)), + 'angle_target': float(angle_target.astype(np.float64)), + 'angle_final': float(angle_final.astype(np.float64)), + 'delta': float(delta.astype(np.float64)), + } + + return steer, throttle, brake, metadata + + + def get_action(self, mu, sigma): + action = self._get_action_beta(mu.view(1,2), sigma.view(1,2)) + acc, steer = action[:, 0], action[:, 1] + if acc >= 0.0: + throttle = acc + brake = torch.zeros_like(acc) + else: + throttle = torch.zeros_like(acc) + brake = torch.abs(acc) + + throttle = torch.clamp(throttle, 0, 1) + steer = torch.clamp(steer, -1, 1) + brake = torch.clamp(brake, 0, 1) + + return throttle, steer, brake \ No newline at end of file diff --git a/behavior_metrics/brains/CARLA/TCP/resnet.py b/behavior_metrics/brains/CARLA/TCP/resnet.py new file mode 100644 index 00000000..41399ff2 --- /dev/null +++ b/behavior_metrics/brains/CARLA/TCP/resnet.py @@ -0,0 +1,389 @@ +import torch +from torch import Tensor +import torch.nn as nn +from torch.hub import load_state_dict_from_url +from typing import Type, Any, Callable, Union, List, Optional + + +__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', + 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', + 'wide_resnet50_2', 'wide_resnet101_2'] + + +model_urls = { + 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', + 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', + 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', + 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', + 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', + 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth', + 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth', + 'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth', + 'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth', +} + + +def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d: + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=dilation, groups=groups, bias=False, dilation=dilation) + + +def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d: + """1x1 convolution""" + return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) + + +class BasicBlock(nn.Module): + expansion: int = 1 + + def __init__( + self, + inplanes: int, + planes: int, + stride: int = 1, + downsample: Optional[nn.Module] = None, + groups: int = 1, + base_width: int = 64, + dilation: int = 1, + norm_layer: Optional[Callable[..., nn.Module]] = None + ) -> None: + super(BasicBlock, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + if groups != 1 or base_width != 64: + raise ValueError('BasicBlock only supports groups=1 and base_width=64') + if dilation > 1: + raise NotImplementedError("Dilation > 1 not supported in BasicBlock") + # Both self.conv1 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = norm_layer(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = norm_layer(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x: Tensor) -> Tensor: + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2) + # while original implementation places the stride at the first 1x1 convolution(self.conv1) + # according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385. + # This variant is also known as ResNet V1.5 and improves accuracy according to + # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch. + + expansion: int = 4 + + def __init__( + self, + inplanes: int, + planes: int, + stride: int = 1, + downsample: Optional[nn.Module] = None, + groups: int = 1, + base_width: int = 64, + dilation: int = 1, + norm_layer: Optional[Callable[..., nn.Module]] = None + ) -> None: + super(Bottleneck, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + width = int(planes * (base_width / 64.)) * groups + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv1x1(inplanes, width) + self.bn1 = norm_layer(width) + self.conv2 = conv3x3(width, width, stride, groups, dilation) + self.bn2 = norm_layer(width) + self.conv3 = conv1x1(width, planes * self.expansion) + self.bn3 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x: Tensor) -> Tensor: + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class ResNet(nn.Module): + + def __init__( + self, + block: Type[Union[BasicBlock, Bottleneck]], + layers: List[int], + num_classes: int = 1000, + zero_init_residual: bool = False, + groups: int = 1, + width_per_group: int = 64, + replace_stride_with_dilation: Optional[List[bool]] = None, + norm_layer: Optional[Callable[..., nn.Module]] = None + ) -> None: + super(ResNet, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + self._norm_layer = norm_layer + + self.inplanes = 64 + self.dilation = 1 + if replace_stride_with_dilation is None: + # each element in the tuple indicates if we should replace + # the 2x2 stride with a dilated convolution instead + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + self.groups = groups + self.base_width = width_per_group + self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + self.bn1 = norm_layer(self.inplanes) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2, + dilate=replace_stride_with_dilation[0]) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2, + dilate=replace_stride_with_dilation[1]) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2, + dilate=replace_stride_with_dilation[2]) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + # Zero-initialize the last BN in each residual branch, + # so that the residual branch starts with zeros, and each residual block behaves like an identity. + # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 + if zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type] + elif isinstance(m, BasicBlock): + nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type] + + def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int, + stride: int = 1, dilate: bool = False) -> nn.Sequential: + norm_layer = self._norm_layer + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample, self.groups, + self.base_width, previous_dilation, norm_layer)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, groups=self.groups, + base_width=self.base_width, dilation=self.dilation, + norm_layer=norm_layer)) + + return nn.Sequential(*layers) + + def _forward_impl(self, x: Tensor) -> Tensor: + # See note [TorchScript super()] + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x_layer4 = self.layer4(x) + + x = self.avgpool(x_layer4) + x = torch.flatten(x, 1) + x = self.fc(x) + + return x, x_layer4 + + def forward(self, x: Tensor) -> Tensor: + return self._forward_impl(x) + + +def _resnet( + arch: str, + block: Type[Union[BasicBlock, Bottleneck]], + layers: List[int], + pretrained: bool, + progress: bool, + **kwargs: Any +) -> ResNet: + model = ResNet(block, layers, **kwargs) + if pretrained: + state_dict = load_state_dict_from_url(model_urls[arch], + progress=progress) + model.load_state_dict(state_dict) + return model + + +def resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + r"""ResNet-18 model from + `"Deep Residual Learning for Image Recognition" `_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, + **kwargs) + + +def resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + r"""ResNet-34 model from + `"Deep Residual Learning for Image Recognition" `_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, + **kwargs) + + +def resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + r"""ResNet-50 model from + `"Deep Residual Learning for Image Recognition" `_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, + **kwargs) + + +def resnet101(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + r"""ResNet-101 model from + `"Deep Residual Learning for Image Recognition" `_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, + **kwargs) + + +def resnet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + r"""ResNet-152 model from + `"Deep Residual Learning for Image Recognition" `_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress, + **kwargs) + + +def resnext50_32x4d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + r"""ResNeXt-50 32x4d model from + `"Aggregated Residual Transformation for Deep Neural Networks" `_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + kwargs['groups'] = 32 + kwargs['width_per_group'] = 4 + return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], + pretrained, progress, **kwargs) + + +def resnext101_32x8d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + r"""ResNeXt-101 32x8d model from + `"Aggregated Residual Transformation for Deep Neural Networks" `_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + kwargs['groups'] = 32 + kwargs['width_per_group'] = 8 + return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], + pretrained, progress, **kwargs) + + +def wide_resnet50_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + r"""Wide ResNet-50-2 model from + `"Wide Residual Networks" `_. + + The model is the same as ResNet except for the bottleneck number of channels + which is twice larger in every block. The number of channels in outer 1x1 + convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 + channels, and in Wide ResNet-50-2 has 2048-1024-2048. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + kwargs['width_per_group'] = 64 * 2 + return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3], + pretrained, progress, **kwargs) + + +def wide_resnet101_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + r"""Wide ResNet-101-2 model from + `"Wide Residual Networks" `_. + + The model is the same as ResNet except for the bottleneck number of channels + which is twice larger in every block. The number of channels in outer 1x1 + convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 + channels, and in Wide ResNet-50-2 has 2048-1024-2048. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + kwargs['width_per_group'] = 64 * 2 + return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3], + pretrained, progress, **kwargs) \ No newline at end of file diff --git a/behavior_metrics/brains/CARLA/brain_carla_TCP.py b/behavior_metrics/brains/CARLA/brain_carla_TCP.py new file mode 100644 index 00000000..c780d978 --- /dev/null +++ b/behavior_metrics/brains/CARLA/brain_carla_TCP.py @@ -0,0 +1,293 @@ +from brains.CARLA.TCP.model import TCP +from brains.CARLA.TCP.config import GlobalConfig +from collections import OrderedDict + +from brains.CARLA.utils.test_utils import traffic_light_to_int, model_control, calculate_delta_yaw +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from brains.CARLA.utils.high_level_command import HighLevelCommandLoader +from os import path + +import numpy as np + +import torch +import time +import math +import carla + +from utils.logger import logger +import importlib +from torchvision import transforms as T +from brains.CARLA.TCP.leaderboard.team_code.planner import RoutePlanner +from brains.CARLA.TCP.leaderboard.leaderboard.utils.route_manipulation import downsample_route +from brains.CARLA.TCP.leaderboard.leaderboard.utils.route_manipulation import interpolate_trajectory +from brains.CARLA.TCP.leaderboard.leaderboard.utils.route_indexer import RouteIndexer + + +PRETRAINED_MODELS = '/home/SergioPaniego/Descargas/' + +#MODEL_DIR = 'best_model.ckpt' # Inside download folder. + +class Brain: + + def __init__(self, sensors, actuators, model=None, handler=None, config=None): + self.motors = actuators.get_motor('motors_0') + self.camera_rgb = sensors.get_camera('camera_0') # rgb front view camera + self.camera_seg = sensors.get_camera('camera_2') # segmentation camera + self.imu = sensors.get_imu('imu_0') # imu + self.handler = handler + self.inference_times = [] + self.gpu_inference = config['GPU'] + self.device = torch.device('cuda' if (torch.cuda.is_available() and self.gpu_inference) else 'cpu') + + client = carla.Client('localhost', 2000) + client.set_timeout(100.0) + world = client.get_world() + self.map = world.get_map() + + print('-----------------------') + print('-----------------------') + print(PRETRAINED_MODELS + model) + print(PRETRAINED_MODELS + model) + print('-----------------------') + print('-----------------------') + + if model: + if not path.exists(PRETRAINED_MODELS + model): + print("File " + model + " cannot be found in " + PRETRAINED_MODELS) + + else: + self.config = GlobalConfig() + self.net = TCP(self.config).to(self.device) + + ckpt = torch.load(PRETRAINED_MODELS + model,map_location=self.device) + ckpt = ckpt["state_dict"] + new_state_dict = OrderedDict() + for key, value in ckpt.items(): + new_key = key.replace("model.","") + new_state_dict[new_key] = value + self.net.load_state_dict(new_state_dict, strict = False) + + #self.net.load_state_dict(torch.load(PRETRAINED_MODELS + model,map_location=self.device)) + self.net.cuda() + self.net.eval() + + self.vehicle = None + while self.vehicle is None: + for vehicle in world.get_actors().filter('vehicle.*'): + if vehicle.attributes.get('role_name') == 'ego_vehicle': + self.vehicle = vehicle + break + if self.vehicle is None: + print("Waiting for vehicle with role_name 'ego_vehicle'") + time.sleep(1) # sleep for 1 second before checking again + + #app_configuration = Config(config_data['config'][0]) + route_counter = 0 + TEST_ROUTES = [{ + "map": "Town02", + "start": "-3.68, -288.22, 0.5, 0.0, 0.0, 90.0", + "end": "41.39, -212.98, 0.5, 0.0, 0.0, -90.0", + "distance": 158, + "commands": ["Right", "Right"] + }] + spawn_point = TEST_ROUTES[route_counter]['start'] + target_point = TEST_ROUTES[route_counter]['end'].split(', ') + target_point = (float(target_point[0]), float(target_point[1])) + start_point = spawn_point.split(', ') + start_point = (float(start_point[0]), float(start_point[1])) + logger.info(f'-------from {start_point} to {target_point}-------') + self.target_point = target_point + + self.target_point = [torch.FloatTensor([target_point[0]]), + torch.FloatTensor([target_point[1]])] + self.target_point = torch.stack(self.target_point, dim=1).to('cuda', dtype=torch.float32) + + #self.target_point = torch.tensor(self.target_point).view(-1, 1).to('cuda', dtype=torch.float32) + + self._im_transform = T.Compose([T.ToTensor(), T.Normalize(mean=[0.485,0.456,0.406], std=[0.229,0.224,0.225])]) + + + repetitions = 1 + routes = '/home/SergioPaniego/Documentos/BehaviorMetrics/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town01.xml' + scenarios = '/home/SergioPaniego/Documentos/BehaviorMetrics/behavior_metrics/brains/CARLA/TCP/leaderboard/data/scenarios/town01_all_scenarios.json' + route_indexer = RouteIndexer(routes, scenarios, repetitions) + #route_indexer = RouteIndexer(args.routes, args.scenarios, args.repetitions) + # setup + config = route_indexer.next() + + # prepare route's trajectory (interpolate and add the GPS route) + gps_route, route = interpolate_trajectory(world, config.trajectory) + + self.route = route + self.set_global_plan(gps_route, self.route) + self._init_route_planner() + + + def set_global_plan(self, global_plan_gps, global_plan_world_coord, wp=None): + """ + Set the plan (route) for the agent + """ + ds_ids = downsample_route(global_plan_world_coord, 50) + self._global_plan_world_coord = [(global_plan_world_coord[x][0], global_plan_world_coord[x][1]) for x in ds_ids] + self._global_plan = [global_plan_gps[x] for x in ds_ids] + + + def _init_route_planner(self): + self._route_planner = RoutePlanner(4.0, 50.0) + self._route_planner.set_route(self._global_plan, True) + + self.initialized = True + + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + + def _get_position(self, tick_data): + gps = tick_data['gps'] + gps = (gps - self._route_planner.mean) * self._route_planner.scale + + return gps + + @torch.no_grad() + def execute(self): + """Main loop of the brain. This will be called iteratively each TIME_CYCLE (see pilot.py)""" + + + # VOID = -1 + # LEFT = 1 + # RIGHT = 2 + # STRAIGHT = 3 + # LANEFOLLOW = 4 + # CHANGELANELEFT = 5 + # CHANGELANERIGHT = 6 + command = 4 + if command < 0: + command = 4 + command -= 1 + assert command in [0, 1, 2, 3, 4, 5] + cmd_one_hot = [0] * 6 + cmd_one_hot[command] = 1 + cmd_one_hot = torch.tensor(cmd_one_hot).view(1, 6).to('cuda', dtype=torch.float32) + + rgb = self.camera_rgb.getImage().data + #import cv2 + #rgb = cv2.cvtColor(rgb[:, :, :3], cv2.COLOR_BGR2RGB) + seg_image = self.camera_seg.getImage().data + + self.update_frame('frame_0', rgb) + self.update_frame('frame_1', seg_image) + + velocity = self.vehicle.get_velocity() + speed_m_s = np.sqrt(velocity.x**2 + velocity.y**2 + velocity.z**2) + speed = 3.6 * speed_m_s #m/s to km/h + gt_velocity = torch.FloatTensor([speed]).to('cuda', dtype=torch.float32) + speed = torch.tensor(speed).view(-1, 1).to('cuda', dtype=torch.float32) + speed = speed / 12 + + print(speed) + #print(type(speed)) + #print(speed.dtype) + # expected Tensor as element 1 in argument 0, but got tuple + print(self.target_point) + #print(type(self.target_point)) + print(cmd_one_hot) + #print(type(cmd_one_hot)) + + imu_data = self.imu.getIMU().data + print(imu_data) + compass = imu_data + + + #gps = input_data['gps'][1][:2] + #compass = input_data['imu'][1][-1] + + if (math.isnan(compass) == True): #It can happen that the compass sends nan for a few frames + compass = 0.0 + + result = { + 'rgb': rgb, + #'gps': gps, + 'speed': speed, + 'compass': compass, + #'bev': bev + } + + pos = self._get_position(result) + #result['gps'] = pos + next_wp, next_cmd = self._route_planner.run_step(pos) + result['next_command'] = next_cmd.value + + + theta = compass + np.pi/2 + R = np.array([ + [np.cos(theta), -np.sin(theta)], + [np.sin(theta), np.cos(theta)] + ]) + + local_command_point = np.array([next_wp[0]-pos[0], next_wp[1]-pos[1]]) + local_command_point = R.T.dot(local_command_point) + result['target_point'] = tuple(local_command_point) + + + + state = torch.cat([speed, self.target_point, cmd_one_hot], 1) + + print(state) + + rgb = self._im_transform(rgb).unsqueeze(0).to('cuda', dtype=torch.float32) + + print(rgb.shape) + + pred = self.net(rgb, state, self.target_point) + + #print(pred) + #print(type(pred)) + + + steer_ctrl, throttle_ctrl, brake_ctrl, metadata = self.net.process_action(pred, command, gt_velocity, self.target_point) + + print(steer_ctrl, throttle_ctrl, brake_ctrl, metadata) + + steer_traj, throttle_traj, brake_traj, metadata_traj = self.net.control_pid(pred['pred_wp'], gt_velocity, self.target_point) + + print(steer_traj, throttle_traj, brake_traj, metadata_traj) + + print() + print() + print() + + self.alpha = 0.3 + steer_ctrl = np.clip(self.alpha*steer_ctrl + (1-self.alpha)*steer_traj, -1, 1) + throttle_ctrl = np.clip(self.alpha*throttle_ctrl + (1-self.alpha)*throttle_traj, 0, 0.75) + brake_ctrl = np.clip(self.alpha*brake_ctrl + (1-self.alpha)*brake_traj, 0, 1) + + if brake_ctrl > 0.5: + throttle_ctrl = float(0) + + print(steer_ctrl, throttle_ctrl, brake_ctrl) + + + self.motors.sendThrottle(throttle_ctrl) + self.motors.sendSteer(steer_ctrl) + self.motors.sendBrake(brake_ctrl) + diff --git a/behavior_metrics/configs/CARLA/CARLA_launch_files/town_02_anticlockwise_imitation_learning.launch b/behavior_metrics/configs/CARLA/CARLA_launch_files/town_02_anticlockwise_imitation_learning.launch index f3d9ce7c..a6fe01e5 100644 --- a/behavior_metrics/configs/CARLA/CARLA_launch_files/town_02_anticlockwise_imitation_learning.launch +++ b/behavior_metrics/configs/CARLA/CARLA_launch_files/town_02_anticlockwise_imitation_learning.launch @@ -34,8 +34,8 @@ - - + + diff --git a/behavior_metrics/configs/CARLA/default_carla_torch.yml b/behavior_metrics/configs/CARLA/default_carla_torch.yml index 1cc1148e..995949a9 100644 --- a/behavior_metrics/configs/CARLA/default_carla_torch.yml +++ b/behavior_metrics/configs/CARLA/default_carla_torch.yml @@ -26,6 +26,10 @@ Behaviors: Speedometer_0: Name: 'speedometer_0' Topic: '/carla/ego_vehicle/speedometer' + IMU: + IMU_0: + Name: 'imu_0' + Topic: '/carla/ego_vehicle/imu' Actuators: CARLA_Motors: Motors_0: @@ -33,11 +37,11 @@ Behaviors: Topic: '/carla/ego_vehicle/vehicle_control_cmd' MaxV: 3 MaxW: 0.3 - BrainPath: 'brains/CARLA/brain_carla_segmentation_based_imitation_learning.py' + BrainPath: 'brains/CARLA/brain_carla_TCP.py' PilotTimeCycle: 50 AsyncMode: True Parameters: - Model: 'pilotnet_v8.0.pth' + Model: 'best_model.ckpt' ImageCropped: True ImageSize: [ 100,50 ] ImageNormalized: True @@ -49,6 +53,7 @@ Behaviors: Simulation: World: configs/CARLA/CARLA_launch_files/town_02_anticlockwise_imitation_learning.launch RandomSpawnPoint: False + TestSuite: 'Town02_two_turns' Dataset: In: '/tmp/my_bag.bag' Out: '' diff --git a/behavior_metrics/robot/interfaces/imu.py b/behavior_metrics/robot/interfaces/imu.py new file mode 100644 index 00000000..b94e16a1 --- /dev/null +++ b/behavior_metrics/robot/interfaces/imu.py @@ -0,0 +1,87 @@ +import rospy +from std_msgs.msg import Float32 +import threading + + +def imuMsg2IMU(imuMsg): + + imu = IMU() + + imu.data = imuMsg.data + now = rospy.get_rostime() + imu.timeStamp = now.secs + (now.nsecs * 1e-9) + + return imu + + +class IMU(): + + def __init__(self): + + self.data = 0 # X coord [meters] + self.timeStamp = 0 # Time stamp [s] + + def __str__(self): + s = "IMU: {\n x: " + str(self.x) + "\n timeStamp: " + str(self.timeStamp) + "\n}" + return s + + +class ListenerIMU: + ''' + ROS IMU Subscriber. IMU Client to Receive imu from ROS nodes. + ''' + def __init__(self, topic): + ''' + ListenerIMU Constructor. + + @param topic: ROS topic to subscribe + @type topic: String + + ''' + self.topic = topic + self.data = IMU() + self.sub = None + self.lock = threading.Lock() + self.start() + + def __callback(self, imu): + ''' + Callback function to receive and save IMU. + + @param odom: ROS Odometry received + + @type odom: Odometry + + ''' + imu = imuMsg2IMU(imu) + + self.lock.acquire() + self.data = imu + self.lock.release() + + def stop(self): + ''' + Stops (Unregisters) the client. + + ''' + self.sub.unregister() + + def start(self): + ''' + Starts (Subscribes) the client. + + ''' + self.sub = rospy.Subscriber(self.topic, Float32, self.__callback) + + def getIMU(self): + ''' + Returns last IMU. + + @return last IMU saved + + ''' + self.lock.acquire() + imu = self.data + self.lock.release() + + return imu diff --git a/behavior_metrics/robot/sensors.py b/behavior_metrics/robot/sensors.py index 8d71cc5d..cd15f82e 100644 --- a/behavior_metrics/robot/sensors.py +++ b/behavior_metrics/robot/sensors.py @@ -21,6 +21,7 @@ except ModuleNotFoundError as ex: logger.error('CARLA is not supported') from robot.interfaces.speedometer import ListenerSpeedometer +from robot.interfaces.imu import ListenerIMU __author__ = 'fqez' __contributors__ = [] @@ -72,6 +73,12 @@ def __init__(self, sensors_config): if speedometer_conf: self.speedometer = self.__create_sensor(speedometer_conf, 'speedometer') + # Load imu + imu_conf = sensors_config.get('IMU', None) + self.imu = None + if imu_conf: + self.imu = self.__create_sensor(imu_conf, 'imu') + def __create_sensor(self, sensor_config, sensor_type): """Fill the sensor dictionary with instances of the sensor_type and sensor_config""" sensor_dict = {} @@ -88,6 +95,8 @@ def __create_sensor(self, sensor_config, sensor_type): sensor_dict[name] = BirdEyeView() elif sensor_type == 'speedometer': sensor_dict[name] = ListenerSpeedometer(topic) + elif sensor_type == 'imu': + sensor_dict[name] = ListenerIMU(topic) return sensor_dict @@ -106,6 +115,8 @@ def __get_sensor(self, sensor_name, sensor_type): sensor = self.bird_eye_view[sensor_name] elif sensor_type == 'speedometer': sensor = self.speedometer[sensor_name] + elif sensor_type == 'imu': + sensor = self.imu[sensor_name] except KeyError: return "[ERROR] No existing camera with {} name.".format(sensor_name) @@ -154,6 +165,17 @@ def get_bird_eye_view(self, bird_eye_view_name): robot.interfaces.birdeyeview.BirdEyeView instance -- birdeyeview instance """ return self.__get_sensor(bird_eye_view_name, 'bird_eye_view') + + def get_imu(self, imu_name): + """Retrieve an specific existing bird eye view + + Arguments: + imu_name {str} -- Name of the birdeyeview to be retrieved + + Returns: + robot.interfaces.birdeyeview.BirdEyeView instance -- birdeyeview instance + """ + return self.__get_sensor(imu_name, 'imu') def kill(self): """Destroy all the running sensors""" From 57ddcacb7e42b159900ed0dcd0d9a6bbea94c0f6 Mon Sep 17 00:00:00 2001 From: sergiopaniego Date: Wed, 20 Dec 2023 09:51:15 +0100 Subject: [PATCH 02/11] ORIG_HEAD --- .../brains/CARLA/brain_carla_TCP.py | 181 ++++++++++++++---- .../configs/CARLA/default_carla_torch.yml | 6 +- behavior_metrics/robot/interfaces/gnss.py | 98 ++++++++++ behavior_metrics/robot/interfaces/imu.py | 28 ++- behavior_metrics/robot/interfaces/pose3d.py | 2 +- behavior_metrics/robot/sensors.py | 22 +++ 6 files changed, 290 insertions(+), 47 deletions(-) create mode 100644 behavior_metrics/robot/interfaces/gnss.py diff --git a/behavior_metrics/brains/CARLA/brain_carla_TCP.py b/behavior_metrics/brains/CARLA/brain_carla_TCP.py index c780d978..45b7bd12 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_TCP.py +++ b/behavior_metrics/brains/CARLA/brain_carla_TCP.py @@ -22,10 +22,10 @@ from brains.CARLA.TCP.leaderboard.leaderboard.utils.route_manipulation import interpolate_trajectory from brains.CARLA.TCP.leaderboard.leaderboard.utils.route_indexer import RouteIndexer +from collections import deque -PRETRAINED_MODELS = '/home/SergioPaniego/Descargas/' -#MODEL_DIR = 'best_model.ckpt' # Inside download folder. +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'CARLA/' class Brain: @@ -34,6 +34,7 @@ def __init__(self, sensors, actuators, model=None, handler=None, config=None): self.camera_rgb = sensors.get_camera('camera_0') # rgb front view camera self.camera_seg = sensors.get_camera('camera_2') # segmentation camera self.imu = sensors.get_imu('imu_0') # imu + self.gnss = sensors.get_gnss('gnss_0') # gnss self.handler = handler self.inference_times = [] self.gpu_inference = config['GPU'] @@ -82,6 +83,7 @@ def __init__(self, sensors, actuators, model=None, handler=None, config=None): time.sleep(1) # sleep for 1 second before checking again #app_configuration = Config(config_data['config'][0]) + ''' route_counter = 0 TEST_ROUTES = [{ "map": "Town02", @@ -103,17 +105,32 @@ def __init__(self, sensors, actuators, model=None, handler=None, config=None): self.target_point = torch.stack(self.target_point, dim=1).to('cuda', dtype=torch.float32) #self.target_point = torch.tensor(self.target_point).view(-1, 1).to('cuda', dtype=torch.float32) - + ''' self._im_transform = T.Compose([T.ToTensor(), T.Normalize(mean=[0.485,0.456,0.406], std=[0.229,0.224,0.225])]) repetitions = 1 - routes = '/home/SergioPaniego/Documentos/BehaviorMetrics/behavior_metrics/brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town01.xml' - scenarios = '/home/SergioPaniego/Documentos/BehaviorMetrics/behavior_metrics/brains/CARLA/TCP/leaderboard/data/scenarios/town01_all_scenarios.json' + routes = 'brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town02.xml' + scenarios = 'brains/CARLA/TCP/leaderboard/data/scenarios/town02_all_scenarios.json' route_indexer = RouteIndexer(routes, scenarios, repetitions) - #route_indexer = RouteIndexer(args.routes, args.scenarios, args.repetitions) # setup config = route_indexer.next() + ''' + We currently hard-code the initial and target points + ''' + print('----- TRAJECTORY ------') + print(config) + print(config.trajectory) + print(config.trajectory[0].x, config.trajectory[0].y) + config.trajectory[0].x = 55.3 + config.trajectory[0].y = -105.6 + + config.trajectory[1].x = 2.0 + config.trajectory[1].y = -105.6 + print(config.trajectory[0].x, config.trajectory[0].y) + print() + print(config.trajectory[1].x, config.trajectory[1].y) + print('----- TRAJECTORY ------') # prepare route's trajectory (interpolate and add the GPS route) gps_route, route = interpolate_trajectory(world, config.trajectory) @@ -122,6 +139,9 @@ def __init__(self, sensors, actuators, model=None, handler=None, config=None): self.set_global_plan(gps_route, self.route) self._init_route_planner() + self.last_steers = deque() + self.status = 0 + def set_global_plan(self, global_plan_gps, global_plan_world_coord, wp=None): """ @@ -130,12 +150,19 @@ def set_global_plan(self, global_plan_gps, global_plan_world_coord, wp=None): ds_ids = downsample_route(global_plan_world_coord, 50) self._global_plan_world_coord = [(global_plan_world_coord[x][0], global_plan_world_coord[x][1]) for x in ds_ids] self._global_plan = [global_plan_gps[x] for x in ds_ids] + print('-----GLOBAL PLAN -----') + print(self._global_plan) def _init_route_planner(self): self._route_planner = RoutePlanner(4.0, 50.0) self._route_planner.set_route(self._global_plan, True) + gps = np.array([self._global_plan[0][0]['lon'], self._global_plan[0][0]['lat']]) + gps = (gps - self._route_planner.mean) * self._route_planner.scale + print('-----GPS----') + print(gps) + self.initialized = True @@ -163,16 +190,18 @@ def update_frame(self, frame_id, data): def _get_position(self, tick_data): - gps = tick_data['gps'] + gps = self.gnss.getGNSS() + gps = np.array([gps.longitude, gps.latitude]) gps = (gps - self._route_planner.mean) * self._route_planner.scale - + print('-----GPS-----') + print(gps) + print('-----GPS-----') return gps @torch.no_grad() def execute(self): """Main loop of the brain. This will be called iteratively each TIME_CYCLE (see pilot.py)""" - # VOID = -1 # LEFT = 1 # RIGHT = 2 @@ -190,8 +219,6 @@ def execute(self): cmd_one_hot = torch.tensor(cmd_one_hot).view(1, 6).to('cuda', dtype=torch.float32) rgb = self.camera_rgb.getImage().data - #import cv2 - #rgb = cv2.cvtColor(rgb[:, :, :3], cv2.COLOR_BGR2RGB) seg_image = self.camera_seg.getImage().data self.update_frame('frame_0', rgb) @@ -204,22 +231,12 @@ def execute(self): speed = torch.tensor(speed).view(-1, 1).to('cuda', dtype=torch.float32) speed = speed / 12 - print(speed) - #print(type(speed)) - #print(speed.dtype) - # expected Tensor as element 1 in argument 0, but got tuple - print(self.target_point) - #print(type(self.target_point)) - print(cmd_one_hot) - #print(type(cmd_one_hot)) - - imu_data = self.imu.getIMU().data - print(imu_data) - compass = imu_data - - - #gps = input_data['gps'][1][:2] - #compass = input_data['imu'][1][-1] + imu_data = self.imu.getIMU() + #compass = np.array([imu_data.compass.x, imu_data.compass.y, imu_data.compass.z, imu_data.compass.w]) + compass = np.array([imu_data.compass.x, imu_data.compass.y, imu_data.compass.z]) + print('----------compass--------------') + print(compass) + compass = compass[-1] if (math.isnan(compass) == True): #It can happen that the compass sends nan for a few frames compass = 0.0 @@ -235,9 +252,9 @@ def execute(self): pos = self._get_position(result) #result['gps'] = pos next_wp, next_cmd = self._route_planner.run_step(pos) + next_wp = [next_wp[1], next_wp[0]] result['next_command'] = next_cmd.value - theta = compass + np.pi/2 R = np.array([ [np.cos(theta), -np.sin(theta)], @@ -245,49 +262,133 @@ def execute(self): ]) local_command_point = np.array([next_wp[0]-pos[0], next_wp[1]-pos[1]]) - local_command_point = R.T.dot(local_command_point) + #local_command_point = R.T.dot(local_command_point) + + #local_command_point = R.T.dot(local_command_point) + #local_command_point = R.T.dot(local_command_point) + #local_command_point = R.T.dot(local_command_point) + #local_command_point = R.T.dot(local_command_point) + #local_command_point = R.T.dot(local_command_point) + #local_command_point = R.T.dot(local_command_point) + + result['target_point'] = tuple(local_command_point) + result['target_point'] = [torch.FloatTensor([result['target_point'][0]]), + torch.FloatTensor([result['target_point'][1]])] + self.target_point = torch.stack(result['target_point'], dim=1).to('cuda', dtype=torch.float32) + + + + ''' + + Local point seems to be incorrect and R.T.dot is commented while in the original code it's used. + ''' + + print('----------compass--------------') + ''' + -0.999806824289457 + ''' + print(compass) + #print('--------- POS ---------------') + ''' + [ 43.17920366 -105.57289901] + ''' + #print(pos) + print('-----------LOCAL COMMAND POINT ---------------') + ''' + [-3.83868739e+01 1.80376380e-02] + ''' + print(local_command_point) + #print('-------NEXT WAYPOINT-----------') + ''' + [4.7923297947398655, -105.55486136806194] + ''' + #print(next_wp) + print('-------Target point-----------') + print(self.target_point) + ''' + -------Target point----------- + tensor([[-3.8387e+01, 1.8038e-02]], device='cuda:0') + ''' + print('--------RESULT----------') + #print(result) + print('------------------') state = torch.cat([speed, self.target_point, cmd_one_hot], 1) + print('------STATE-----') print(state) rgb = self._im_transform(rgb).unsqueeze(0).to('cuda', dtype=torch.float32) - print(rgb.shape) + #print(rgb.shape) pred = self.net(rgb, state, self.target_point) - #print(pred) - #print(type(pred)) + print('-----PRED------') + print(pred.keys()) + print(pred['pred_wp']) + print('------COMMAND----') + print(command) steer_ctrl, throttle_ctrl, brake_ctrl, metadata = self.net.process_action(pred, command, gt_velocity, self.target_point) + print('------ steer_ctrl, throttle_ctrl, brake_ctrl, metadata-------') print(steer_ctrl, throttle_ctrl, brake_ctrl, metadata) steer_traj, throttle_traj, brake_traj, metadata_traj = self.net.control_pid(pred['pred_wp'], gt_velocity, self.target_point) - print(steer_traj, throttle_traj, brake_traj, metadata_traj) + if brake_traj < 0.05: brake_traj = 0.0 + if throttle_traj > brake_traj: brake_traj = 0.0 - print() - print() - print() - self.alpha = 0.3 - steer_ctrl = np.clip(self.alpha*steer_ctrl + (1-self.alpha)*steer_traj, -1, 1) - throttle_ctrl = np.clip(self.alpha*throttle_ctrl + (1-self.alpha)*throttle_traj, 0, 0.75) - brake_ctrl = np.clip(self.alpha*brake_ctrl + (1-self.alpha)*brake_traj, 0, 1) + print('------steer_traj, throttle_traj, brake_traj, metadata_traj-----') + print(steer_traj, throttle_traj, brake_traj, metadata_traj) + + + if self.status == 0: + print('LOG 1***********************************************************************************************') + self.alpha = 0.3 + steer_ctrl = np.clip(self.alpha*steer_ctrl + (1-self.alpha)*steer_traj, -1, 1) + throttle_ctrl = np.clip(self.alpha*throttle_ctrl + (1-self.alpha)*throttle_traj, 0, 0.75) + brake_ctrl = np.clip(self.alpha*brake_ctrl + (1-self.alpha)*brake_traj, 0, 1) + else: + print('LOG 2***********************************************************************************************') + self.alpha = 0.3 + #self.pid_metadata['agent'] = 'ctrl' + steer_ctrl = np.clip(self.alpha*steer_traj + (1-self.alpha)*steer_ctrl, -1, 1) + throttle_ctrl = np.clip(self.alpha*throttle_traj + (1-self.alpha)*throttle_ctrl, 0, 0.75) + brake_ctrl = np.clip(self.alpha*brake_traj + (1-self.alpha)*brake_ctrl, 0, 1) if brake_ctrl > 0.5: throttle_ctrl = float(0) + print('-------------steer_ctrl, throttle_ctrl, brake_ctrl----------') print(steer_ctrl, throttle_ctrl, brake_ctrl) + print() + print() + print() self.motors.sendThrottle(throttle_ctrl) self.motors.sendSteer(steer_ctrl) self.motors.sendBrake(brake_ctrl) + if len(self.last_steers) >= 20: + self.last_steers.popleft() + self.last_steers.append(abs(float(steer_ctrl))) + + num = 0 + for s in self.last_steers: + if s > 0.10: + num += 1 + if num > 10: + self.status = 1 + self.steer_step += 1 + + else: + self.status = 0 + diff --git a/behavior_metrics/configs/CARLA/default_carla_torch.yml b/behavior_metrics/configs/CARLA/default_carla_torch.yml index 995949a9..f6e88293 100644 --- a/behavior_metrics/configs/CARLA/default_carla_torch.yml +++ b/behavior_metrics/configs/CARLA/default_carla_torch.yml @@ -30,6 +30,10 @@ Behaviors: IMU_0: Name: 'imu_0' Topic: '/carla/ego_vehicle/imu' + GNSS: + GNSS_0: + Name: 'gnss_0' + Topic: '/carla/ego_vehicle/gnss' Actuators: CARLA_Motors: Motors_0: @@ -41,7 +45,7 @@ Behaviors: PilotTimeCycle: 50 AsyncMode: True Parameters: - Model: 'best_model.ckpt' + Model: 'tcp_best_model.ckpt' ImageCropped: True ImageSize: [ 100,50 ] ImageNormalized: True diff --git a/behavior_metrics/robot/interfaces/gnss.py b/behavior_metrics/robot/interfaces/gnss.py new file mode 100644 index 00000000..3361f66d --- /dev/null +++ b/behavior_metrics/robot/interfaces/gnss.py @@ -0,0 +1,98 @@ +import rospy +from sensor_msgs.msg import NavSatFix +import threading + + +def gnssMsg2GNSS(gnssMsg): + + gnss = GNSS() + + #print(gnssMsg) + + gnss.latitude = gnssMsg.latitude + gnss.longitude = gnssMsg.longitude + gnss.altitude = gnssMsg.altitude + + #msg.latitude = data[0] + # msg.longitude = data[1] + # msg.altitude = data[2] + + now = rospy.get_rostime() + gnss.timeStamp = now.secs + (now.nsecs * 1e-9) + + return gnss + + +class GNSS(): + + def __init__(self): + + self.latitude = 0 + self.longitude = 0 + self.altitude = 0 + self.timeStamp = 0 # Time stamp [s] + + def __str__(self): + s = "GNSS: {\n latitude: " + str(self.latitude) + "\n }\n longitude: " + str(self.longitude) + "\n }\n altitude: " + str(self.altitude) + "\n }\n timeStamp: " + str(self.timeStamp) + "\n}" + return s + + +class ListenerGNSS: + ''' + ROS GNSS Subscriber. GNSS Client to Receive gnss from ROS nodes. + ''' + def __init__(self, topic): + ''' + ListenerGNSS Constructor. + + @param topic: ROS topic to subscribe + @type topic: String + + ''' + self.topic = topic + self.data = GNSS() + self.sub = None + self.lock = threading.Lock() + self.start() + + def __callback(self, gnss): + ''' + Callback function to receive and save GNSS. + + @param odom: ROS Odometry received + + @type odom: Odometry + + ''' + gnss = gnssMsg2GNSS(gnss) + + self.lock.acquire() + self.data = gnss + self.lock.release() + + def stop(self): + ''' + Stops (Unregisters) the client. + + ''' + self.sub.unregister() + + def start(self): + ''' + Starts (Subscribes) the client. + + ''' + self.sub = rospy.Subscriber(self.topic, NavSatFix, self.__callback) + + def getGNSS(self): + ''' + Returns last GNSS. + + @return last GNSS saved + + ''' + self.lock.acquire() + gnss = self.data + self.lock.release() + + return gnss diff --git a/behavior_metrics/robot/interfaces/imu.py b/behavior_metrics/robot/interfaces/imu.py index b94e16a1..5afe9a88 100644 --- a/behavior_metrics/robot/interfaces/imu.py +++ b/behavior_metrics/robot/interfaces/imu.py @@ -1,5 +1,5 @@ import rospy -from std_msgs.msg import Float32 +from sensor_msgs.msg import Imu import threading @@ -7,7 +7,10 @@ def imuMsg2IMU(imuMsg): imu = IMU() - imu.data = imuMsg.data + imu.compass = imuMsg.orientation + imu.accelerometer = imuMsg.linear_acceleration + imu.gyroscope = imuMsg.angular_velocity + now = rospy.get_rostime() imu.timeStamp = now.secs + (now.nsecs * 1e-9) @@ -18,11 +21,26 @@ class IMU(): def __init__(self): - self.data = 0 # X coord [meters] + self.compass = { + 'x': 0, + 'y': 0, + 'z': 0, + 'w': 0 + } + self.gyroscope = { + 'x': 0, + 'y': 0, + 'z': 0 + } + self.accelerometer = { + 'x': 0, + 'y': 0, + 'z': 0 + } self.timeStamp = 0 # Time stamp [s] def __str__(self): - s = "IMU: {\n x: " + str(self.x) + "\n timeStamp: " + str(self.timeStamp) + "\n}" + s = "IMU: {\n compass: " + str(self.compass) + "\n }\n accelerometer: " + str(self.accelerometer) + "\n }\n gyroscope: " + str(self.gyroscope) + "\n }\n timeStamp: " + str(self.timeStamp) + "\n}" return s @@ -71,7 +89,7 @@ def start(self): Starts (Subscribes) the client. ''' - self.sub = rospy.Subscriber(self.topic, Float32, self.__callback) + self.sub = rospy.Subscriber(self.topic, Imu, self.__callback) def getIMU(self): ''' diff --git a/behavior_metrics/robot/interfaces/pose3d.py b/behavior_metrics/robot/interfaces/pose3d.py index 9fc84a3c..dcd1b72a 100644 --- a/behavior_metrics/robot/interfaces/pose3d.py +++ b/behavior_metrics/robot/interfaces/pose3d.py @@ -109,7 +109,7 @@ def __init__(self): self.timeStamp = 0 # Time stamp [s] def __str__(self): - s = "Pose3D: {\n x: " + str(self.x) + "\n Y: " + str(self.y) + s = "Pose3D: {\n X: " + str(self.x) + "\n Y: " + str(self.y) s = s + "\n Z: " + str(self.z) + "\n H: " + str(self.h) s = s + "\n Yaw: " + str(self.yaw) + "\n Pitch: " + str(self.pitch) + "\n Roll: " + str(self.roll) s = s + "\n quaternion: " + str(self.q) + "\n timeStamp: " + str(self.timeStamp) + "\n}" diff --git a/behavior_metrics/robot/sensors.py b/behavior_metrics/robot/sensors.py index cd15f82e..1527a3f4 100644 --- a/behavior_metrics/robot/sensors.py +++ b/behavior_metrics/robot/sensors.py @@ -22,6 +22,7 @@ logger.error('CARLA is not supported') from robot.interfaces.speedometer import ListenerSpeedometer from robot.interfaces.imu import ListenerIMU +from robot.interfaces.gnss import ListenerGNSS __author__ = 'fqez' __contributors__ = [] @@ -79,6 +80,12 @@ def __init__(self, sensors_config): if imu_conf: self.imu = self.__create_sensor(imu_conf, 'imu') + # Load gnss + gnss_conf = sensors_config.get('GNSS', None) + self.gnss = None + if gnss_conf: + self.gnss = self.__create_sensor(gnss_conf, 'gnss') + def __create_sensor(self, sensor_config, sensor_type): """Fill the sensor dictionary with instances of the sensor_type and sensor_config""" sensor_dict = {} @@ -97,6 +104,8 @@ def __create_sensor(self, sensor_config, sensor_type): sensor_dict[name] = ListenerSpeedometer(topic) elif sensor_type == 'imu': sensor_dict[name] = ListenerIMU(topic) + elif sensor_type == 'gnss': + sensor_dict[name] = ListenerGNSS(topic) return sensor_dict @@ -117,6 +126,8 @@ def __get_sensor(self, sensor_name, sensor_type): sensor = self.speedometer[sensor_name] elif sensor_type == 'imu': sensor = self.imu[sensor_name] + elif sensor_type == 'gnss': + sensor = self.gnss[sensor_name] except KeyError: return "[ERROR] No existing camera with {} name.".format(sensor_name) @@ -176,6 +187,17 @@ def get_imu(self, imu_name): robot.interfaces.birdeyeview.BirdEyeView instance -- birdeyeview instance """ return self.__get_sensor(imu_name, 'imu') + + def get_gnss(self, gnss_name): + """Retrieve an specific existing bird eye view + + Arguments: + gnss_name {str} -- Name of the birdeyeview to be retrieved + + Returns: + robot.interfaces.birdeyeview.BirdEyeView instance -- birdeyeview instance + """ + return self.__get_sensor(gnss_name, 'gnss') def kill(self): """Destroy all the running sensors""" From 42e33381f93129cbeb75b9d18eda490629cf468c Mon Sep 17 00:00:00 2001 From: sergiopaniego Date: Mon, 29 Jan 2024 19:53:12 +0100 Subject: [PATCH 03/11] Regenerated TCP controller --- .../brains/CARLA/brain_carla_TCP_2.py | 254 ++++++++++++++++++ 1 file changed, 254 insertions(+) create mode 100644 behavior_metrics/brains/CARLA/brain_carla_TCP_2.py diff --git a/behavior_metrics/brains/CARLA/brain_carla_TCP_2.py b/behavior_metrics/brains/CARLA/brain_carla_TCP_2.py new file mode 100644 index 00000000..e5f14aad --- /dev/null +++ b/behavior_metrics/brains/CARLA/brain_carla_TCP_2.py @@ -0,0 +1,254 @@ +from brains.CARLA.TCP.model import TCP +from brains.CARLA.TCP.config import GlobalConfig +from collections import OrderedDict + +from brains.CARLA.utils.test_utils import traffic_light_to_int, model_control, calculate_delta_yaw +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from brains.CARLA.utils.high_level_command import HighLevelCommandLoader +from os import path + +import numpy as np + +import torch +import time +import math +import carla + +from utils.logger import logger +import importlib +from torchvision import transforms as T +from brains.CARLA.TCP.leaderboard.team_code.planner import RoutePlanner +from brains.CARLA.TCP.leaderboard.leaderboard.utils.route_manipulation import downsample_route +from brains.CARLA.TCP.leaderboard.leaderboard.utils.route_manipulation import interpolate_trajectory +from brains.CARLA.TCP.leaderboard.leaderboard.utils.route_indexer import RouteIndexer + +from collections import deque + + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'CARLA/' + +class Brain: + + def __init__(self, sensors, actuators, model=None, handler=None, config=None): + self.motors = actuators.get_motor('motors_0') + self.camera_rgb = sensors.get_camera('camera_0') # rgb front view camera + self.camera_seg = sensors.get_camera('camera_2') # segmentation camera + self.imu = sensors.get_imu('imu_0') # imu + self.gnss = sensors.get_gnss('gnss_0') # gnss + self.speedometer = sensors.get_speedometer('speedometer_0') # gnss + self.handler = handler + self.inference_times = [] + self.gpu_inference = config['GPU'] + self.device = torch.device('cuda' if (torch.cuda.is_available() and self.gpu_inference) else 'cpu') + + client = carla.Client('localhost', 2000) + client.set_timeout(10.0) + world = client.get_world() + self.map = world.get_map() + + if model: + if not path.exists(PRETRAINED_MODELS + model): + print("File " + model + " cannot be found in " + PRETRAINED_MODELS) + + else: + # Initialize TCP variables + self.alpha = 0.3 + self.status = 0 + self.steer_step = 0 + self.last_steers = deque() + self.step = -1 + self.initialized = False + self.config = GlobalConfig() + self.net = TCP(self.config).to(self.device) + + ckpt = torch.load(PRETRAINED_MODELS + model,map_location=self.device) + ckpt = ckpt["state_dict"] + new_state_dict = OrderedDict() # TODO: Why an OrderedDict + for key, value in ckpt.items(): + new_key = key.replace("model.","") + new_state_dict[new_key] = value + self.net.load_state_dict(new_state_dict, strict = False) + self.net.cuda() + self.net.eval() + + self._im_transform = T.Compose([T.ToTensor(), T.Normalize(mean=[0.485,0.456,0.406], std=[0.229,0.224,0.225])]) + + self.vehicle = None + while self.vehicle is None: + for vehicle in world.get_actors().filter('vehicle.*'): + if vehicle.attributes.get('role_name') == 'ego_vehicle': + self.vehicle = vehicle + break + if self.vehicle is None: + print("Waiting for vehicle with role_name 'ego_vehicle'") + time.sleep(1) # sleep for 1 second before checking again + + # Added code + repetitions = 1 + routes = 'brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town02.xml' + scenarios = 'brains/CARLA/TCP/leaderboard/data/scenarios/town02_all_scenarios.json' + route_indexer = RouteIndexer(routes, scenarios, repetitions) + config = route_indexer.next() + config.trajectory[0].x = 55.3 + config.trajectory[0].y = -105.6 + + #config.trajectory[1].y = -30.0 + #config.trajectory[1].x = -105.6 + + + gps_route, route = interpolate_trajectory(world, config.trajectory) + # Minimum distance is 50m + for route_point in route: + print(route_point[0].location, route_point[0].rotation, route_point[1]) + + self.set_global_plan(gps_route, route) + # Added code + self._route_planner = RoutePlanner(4.0, 50.0) + self._route_planner.set_route(self._global_plan, True) + + for route_point in self._route_planner.route: + print(route_point) + + self.initialized = True + + + def _get_position(self): + gps = self.gnss.getGNSS() + gps = np.array([gps.longitude, gps.latitude]) + gps = (gps - self._route_planner.mean) * self._route_planner.scale + + return gps + + def set_global_plan(self, global_plan_gps, global_plan_world_coord, wp=None): + """ + Set the plan (route) for the agent + """ + ds_ids = downsample_route(global_plan_world_coord, 50) + self._global_plan_world_coord = [(global_plan_world_coord[x][0], global_plan_world_coord[x][1]) for x in ds_ids] + self._global_plan = [global_plan_gps[x] for x in ds_ids] + # Custom update + for x in ds_ids: + global_plan_gps[x][0]['lat'], global_plan_gps[x][0]['lon'] = global_plan_gps[x][0]['lon'], global_plan_gps[x][0]['lat'] + + def tick(self): + rgb = self.camera_rgb.getImage().data + speed = self.speedometer.getSpeedometer().data + imu_data = self.imu.getIMU() + compass = np.array([imu_data.compass.x, imu_data.compass.y, imu_data.compass.z]) + compass = compass[-1] + + + result = { + 'rgb': rgb, + #'gps': gps, + 'speed': speed, + 'compass': compass, + #'bev': bev + } + + pos = self._get_position() + next_wp, next_cmd = self._route_planner.run_step(pos) + + result['next_command'] = next_cmd.value + # Custom compass update + #compass = ((compass + 1) / 2) * 0.02 - 0.01 + theta = compass + np.pi/2 + R = np.array([ + [np.cos(theta), -np.sin(theta)], + [np.sin(theta), np.cos(theta)] + ]) + local_command_point = np.array([next_wp[0]-pos[0], next_wp[1]-pos[1]]) + + #local_command_point = R.T.dot(local_command_point) + #local_command_point[0], local_command_point[1] = local_command_point[1], -local_command_point[0]t + + result['target_point'] = tuple(local_command_point) + + + return result + + + @torch.no_grad() + def execute(self): + + tick_data = self.tick() + if self.step < self.config.seq_len: + #rgb = self._im_transform(tick_data['rgb']).unsqueeze(0) + + self.motors.sendThrottle(0.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0.0) + + gt_velocity = torch.FloatTensor([tick_data['speed']]).to('cuda', dtype=torch.float32) + command = tick_data['next_command'] + if command < 0: + command = 4 + command -= 1 + assert command in [0, 1, 2, 3, 4, 5] + cmd_one_hot = [0] * 6 + cmd_one_hot[command] = 1 + cmd_one_hot = torch.tensor(cmd_one_hot).view(1, 6).to('cuda', dtype=torch.float32) + speed = torch.FloatTensor([float(tick_data['speed'])]).view(1,1).to('cuda', dtype=torch.float32) + speed = speed / 12 + rgb = self._im_transform(tick_data['rgb']).unsqueeze(0).to('cuda', dtype=torch.float32) + + tick_data['target_point'] = [torch.FloatTensor([tick_data['target_point'][0]]), + torch.FloatTensor([tick_data['target_point'][1]])] + target_point = torch.stack(tick_data['target_point'], dim=1).to('cuda', dtype=torch.float32) + state = torch.cat([speed, target_point, cmd_one_hot], 1) + + pred= self.net(rgb, state, target_point) + + steer_ctrl, throttle_ctrl, brake_ctrl, metadata = self.net.process_action(pred, tick_data['next_command'], gt_velocity, target_point) + + steer_traj, throttle_traj, brake_traj, metadata_traj = self.net.control_pid(pred['pred_wp'], gt_velocity, target_point) + if brake_traj < 0.05: brake_traj = 0.0 + if throttle_traj > brake_traj: brake_traj = 0.0 + + self.pid_metadata = metadata_traj + + if self.status == 0: + self.alpha = 0.3 + self.pid_metadata['agent'] = 'traj' + steer = np.clip(self.alpha*steer_ctrl + (1-self.alpha)*steer_traj, -1, 1) + throttle = np.clip(self.alpha*throttle_ctrl + (1-self.alpha)*throttle_traj, 0, 0.75) + brake = np.clip(self.alpha*brake_ctrl + (1-self.alpha)*brake_traj, 0, 1) + else: + self.alpha = 0.3 + self.pid_metadata['agent'] = 'ctrl' + steer = np.clip(self.alpha*steer_traj + (1-self.alpha)*steer_ctrl, -1, 1) + throttle = np.clip(self.alpha*throttle_traj + (1-self.alpha)*throttle_ctrl, 0, 0.75) + brake = np.clip(self.alpha*brake_traj + (1-self.alpha)*brake_ctrl, 0, 1) + + self.pid_metadata['steer_ctrl'] = float(steer_ctrl) + self.pid_metadata['steer_traj'] = float(steer_traj) + self.pid_metadata['throttle_ctrl'] = float(throttle_ctrl) + self.pid_metadata['throttle_traj'] = float(throttle_traj) + self.pid_metadata['brake_ctrl'] = float(brake_ctrl) + self.pid_metadata['brake_traj'] = float(brake_traj) + + if brake > 0.5: + throttle = float(0) + + if len(self.last_steers) >= 20: + self.last_steers.popleft() + self.last_steers.append(abs(float(steer))) + + num = 0 + for s in self.last_steers: + if s > 0.10: + num += 1 + if num > 10: + self.status = 1 + self.steer_step += 1 + else: + self.status = 0 + + self.pid_metadata['status'] = self.status + + + #print(throttle, steer, brake) + self.motors.sendThrottle(throttle) + self.motors.sendSteer(steer) + self.motors.sendBrake(brake) + From 70bb0f24781610f6d44e58d8e0eaf1d5c497f38b Mon Sep 17 00:00:00 2001 From: sergiopaniego Date: Wed, 31 Jan 2024 16:23:26 +0100 Subject: [PATCH 04/11] [WIP] TCP brain replicated --- .../brains/CARLA/brain_carla_TCP.py | 82 ++++++++++--------- .../brains/CARLA/brain_carla_TCP_2.py | 65 +++++++++++++-- 2 files changed, 103 insertions(+), 44 deletions(-) diff --git a/behavior_metrics/brains/CARLA/brain_carla_TCP.py b/behavior_metrics/brains/CARLA/brain_carla_TCP.py index 45b7bd12..6b9481f0 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_TCP.py +++ b/behavior_metrics/brains/CARLA/brain_carla_TCP.py @@ -35,6 +35,7 @@ def __init__(self, sensors, actuators, model=None, handler=None, config=None): self.camera_seg = sensors.get_camera('camera_2') # segmentation camera self.imu = sensors.get_imu('imu_0') # imu self.gnss = sensors.get_gnss('gnss_0') # gnss + self.speedometer = sensors.get_speedometer('speedometer_0') # gnss self.handler = handler self.inference_times = [] self.gpu_inference = config['GPU'] @@ -125,7 +126,7 @@ def __init__(self, sensors, actuators, model=None, handler=None, config=None): config.trajectory[0].x = 55.3 config.trajectory[0].y = -105.6 - config.trajectory[1].x = 2.0 + config.trajectory[1].x = -30.0 config.trajectory[1].y = -105.6 print(config.trajectory[0].x, config.trajectory[0].y) print() @@ -139,6 +140,7 @@ def __init__(self, sensors, actuators, model=None, handler=None, config=None): self.set_global_plan(gps_route, self.route) self._init_route_planner() + self.steer_step = 0 self.last_steers = deque() self.status = 0 @@ -150,8 +152,8 @@ def set_global_plan(self, global_plan_gps, global_plan_world_coord, wp=None): ds_ids = downsample_route(global_plan_world_coord, 50) self._global_plan_world_coord = [(global_plan_world_coord[x][0], global_plan_world_coord[x][1]) for x in ds_ids] self._global_plan = [global_plan_gps[x] for x in ds_ids] - print('-----GLOBAL PLAN -----') - print(self._global_plan) + #print('-----GLOBAL PLAN -----') + #print(self._global_plan) def _init_route_planner(self): @@ -160,8 +162,8 @@ def _init_route_planner(self): gps = np.array([self._global_plan[0][0]['lon'], self._global_plan[0][0]['lat']]) gps = (gps - self._route_planner.mean) * self._route_planner.scale - print('-----GPS----') - print(gps) + #print('-----GPS----') + #print(gps) self.initialized = True @@ -193,9 +195,9 @@ def _get_position(self, tick_data): gps = self.gnss.getGNSS() gps = np.array([gps.longitude, gps.latitude]) gps = (gps - self._route_planner.mean) * self._route_planner.scale - print('-----GPS-----') - print(gps) - print('-----GPS-----') + #print('-----GPS-----') + #print(gps) + #print('-----GPS-----') return gps @torch.no_grad() @@ -224,9 +226,15 @@ def execute(self): self.update_frame('frame_0', rgb) self.update_frame('frame_1', seg_image) - velocity = self.vehicle.get_velocity() - speed_m_s = np.sqrt(velocity.x**2 + velocity.y**2 + velocity.z**2) - speed = 3.6 * speed_m_s #m/s to km/h + + + print('----------getSpeedometer--------------') + print(self.speedometer.getSpeedometer().data) + + speed = self.speedometer.getSpeedometer().data + #velocity = self.vehicle.get_velocity() + #speed_m_s = np.sqrt(velocity.x**2 + velocity.y**2 + velocity.z**2) + #speed = 3.6 * speed_m_s #m/s to km/h gt_velocity = torch.FloatTensor([speed]).to('cuda', dtype=torch.float32) speed = torch.tensor(speed).view(-1, 1).to('cuda', dtype=torch.float32) speed = speed / 12 @@ -234,8 +242,8 @@ def execute(self): imu_data = self.imu.getIMU() #compass = np.array([imu_data.compass.x, imu_data.compass.y, imu_data.compass.z, imu_data.compass.w]) compass = np.array([imu_data.compass.x, imu_data.compass.y, imu_data.compass.z]) - print('----------compass--------------') - print(compass) + #print('----------compass--------------') + #print(compass) compass = compass[-1] if (math.isnan(compass) == True): #It can happen that the compass sends nan for a few frames @@ -286,21 +294,21 @@ def execute(self): ''' - print('----------compass--------------') + #print('----------compass--------------') ''' -0.999806824289457 ''' - print(compass) + #print(compass) #print('--------- POS ---------------') ''' [ 43.17920366 -105.57289901] ''' #print(pos) - print('-----------LOCAL COMMAND POINT ---------------') + #print('-----------LOCAL COMMAND POINT ---------------') ''' [-3.83868739e+01 1.80376380e-02] ''' - print(local_command_point) + #print(local_command_point) #print('-------NEXT WAYPOINT-----------') ''' [4.7923297947398655, -105.55486136806194] @@ -312,14 +320,14 @@ def execute(self): -------Target point----------- tensor([[-3.8387e+01, 1.8038e-02]], device='cuda:0') ''' - print('--------RESULT----------') + #print('--------RESULT----------') #print(result) - print('------------------') + #print('------------------') state = torch.cat([speed, self.target_point, cmd_one_hot], 1) - print('------STATE-----') - print(state) + #print('------STATE-----') + #print(state) rgb = self._im_transform(rgb).unsqueeze(0).to('cuda', dtype=torch.float32) @@ -327,17 +335,17 @@ def execute(self): pred = self.net(rgb, state, self.target_point) - print('-----PRED------') - print(pred.keys()) - print(pred['pred_wp']) - print('------COMMAND----') - print(command) + #print('-----PRED------') + #print(pred.keys()) + #print(pred['pred_wp']) + #print('------COMMAND----') + #print(command) steer_ctrl, throttle_ctrl, brake_ctrl, metadata = self.net.process_action(pred, command, gt_velocity, self.target_point) - print('------ steer_ctrl, throttle_ctrl, brake_ctrl, metadata-------') - print(steer_ctrl, throttle_ctrl, brake_ctrl, metadata) + #print('------ steer_ctrl, throttle_ctrl, brake_ctrl, metadata-------') + #print(steer_ctrl, throttle_ctrl, brake_ctrl, metadata) steer_traj, throttle_traj, brake_traj, metadata_traj = self.net.control_pid(pred['pred_wp'], gt_velocity, self.target_point) @@ -345,18 +353,18 @@ def execute(self): if throttle_traj > brake_traj: brake_traj = 0.0 - print('------steer_traj, throttle_traj, brake_traj, metadata_traj-----') - print(steer_traj, throttle_traj, brake_traj, metadata_traj) + #print('------steer_traj, throttle_traj, brake_traj, metadata_traj-----') + #print(steer_traj, throttle_traj, brake_traj, metadata_traj) if self.status == 0: - print('LOG 1***********************************************************************************************') + #print('LOG 1***********************************************************************************************') self.alpha = 0.3 steer_ctrl = np.clip(self.alpha*steer_ctrl + (1-self.alpha)*steer_traj, -1, 1) throttle_ctrl = np.clip(self.alpha*throttle_ctrl + (1-self.alpha)*throttle_traj, 0, 0.75) brake_ctrl = np.clip(self.alpha*brake_ctrl + (1-self.alpha)*brake_traj, 0, 1) else: - print('LOG 2***********************************************************************************************') + #print('LOG 2***********************************************************************************************') self.alpha = 0.3 #self.pid_metadata['agent'] = 'ctrl' steer_ctrl = np.clip(self.alpha*steer_traj + (1-self.alpha)*steer_ctrl, -1, 1) @@ -366,12 +374,12 @@ def execute(self): if brake_ctrl > 0.5: throttle_ctrl = float(0) - print('-------------steer_ctrl, throttle_ctrl, brake_ctrl----------') - print(steer_ctrl, throttle_ctrl, brake_ctrl) + #print('-------------steer_ctrl, throttle_ctrl, brake_ctrl----------') + #print(steer_ctrl, throttle_ctrl, brake_ctrl) - print() - print() - print() + #print() + #print() + #print() self.motors.sendThrottle(throttle_ctrl) self.motors.sendSteer(steer_ctrl) diff --git a/behavior_metrics/brains/CARLA/brain_carla_TCP_2.py b/behavior_metrics/brains/CARLA/brain_carla_TCP_2.py index e5f14aad..8031d06a 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_TCP_2.py +++ b/behavior_metrics/brains/CARLA/brain_carla_TCP_2.py @@ -130,14 +130,55 @@ def set_global_plan(self, global_plan_gps, global_plan_world_coord, wp=None): for x in ds_ids: global_plan_gps[x][0]['lat'], global_plan_gps[x][0]['lon'] = global_plan_gps[x][0]['lon'], global_plan_gps[x][0]['lat'] + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + def tick(self): rgb = self.camera_rgb.getImage().data + self.update_frame('frame_0', rgb) speed = self.speedometer.getSpeedometer().data + #velocity = self.vehicle.get_velocity() + #speed_m_s = np.sqrt(velocity.x**2 + velocity.y**2 + velocity.z**2) + #speed = 3.6 * speed_m_s #m/s to km/h + imu_data = self.imu.getIMU() - compass = np.array([imu_data.compass.x, imu_data.compass.y, imu_data.compass.z]) - compass = compass[-1] + + compass = np.array([imu_data.compass.x, imu_data.compass.y, imu_data.compass.z, imu_data.compass.w]) + + def convert_vector_to_compass_orientation(orientation_vector): + _, _, orientation_x, orientation_y = orientation_vector + + compass_orientation = math.atan2(round(orientation_y, 2), round(orientation_x, 2)) + + if compass_orientation < 0: + compass_orientation += 2 * math.pi + + compass_orientation -= math.pi / 2.0 + + return compass_orientation + compass = convert_vector_to_compass_orientation(compass) + result = { 'rgb': rgb, #'gps': gps, @@ -150,17 +191,16 @@ def tick(self): next_wp, next_cmd = self._route_planner.run_step(pos) result['next_command'] = next_cmd.value - # Custom compass update - #compass = ((compass + 1) / 2) * 0.02 - 0.01 + theta = compass + np.pi/2 R = np.array([ [np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)] ]) + local_command_point = np.array([next_wp[0]-pos[0], next_wp[1]-pos[1]]) - - #local_command_point = R.T.dot(local_command_point) - #local_command_point[0], local_command_point[1] = local_command_point[1], -local_command_point[0]t + local_command_point = R.T.dot(local_command_point) + local_command_point[0], local_command_point[1] = local_command_point[1], -local_command_point[0] result['target_point'] = tuple(local_command_point) @@ -190,6 +230,10 @@ def execute(self): cmd_one_hot = torch.tensor(cmd_one_hot).view(1, 6).to('cuda', dtype=torch.float32) speed = torch.FloatTensor([float(tick_data['speed'])]).view(1,1).to('cuda', dtype=torch.float32) speed = speed / 12 + + print(tick_data['rgb'].shape) + print(type(tick_data['rgb'])) + rgb = self._im_transform(tick_data['rgb']).unsqueeze(0).to('cuda', dtype=torch.float32) tick_data['target_point'] = [torch.FloatTensor([tick_data['target_point'][0]]), @@ -252,3 +296,10 @@ def execute(self): self.motors.sendSteer(steer) self.motors.sendBrake(brake) + + ''' + TODO: Draw the waypoints on the map + TODO: Draw the trajectory on the images. + TODO: Figure out what's the transformation for the compass. + + ''' \ No newline at end of file From 98e0e2b3fc911459f74fe8f32783b515964fbb32 Mon Sep 17 00:00:00 2001 From: sergiopaniego Date: Mon, 18 Mar 2024 11:11:56 +0100 Subject: [PATCH 05/11] [WIP] Looking for the issue with TCP brain replication --- .../leaderboard/leaderboard_evaluator.py | 10 +++- .../leaderboard/scenarios/route_scenario.py | 49 +++++++++++++------ .../leaderboard/scenarios/scenario_manager.py | 5 +- .../leaderboard/utils/statistics_manager.py | 6 ++- .../TCP/leaderboard/scripts/run_evaluation.sh | 18 +++++-- .../TCP/leaderboard/team_code/planner.py | 6 +++ .../TCP/leaderboard/team_code/tcp_agent.py | 25 ++++++++++ behavior_metrics/brains/CARLA/TCP/model.py | 6 ++- .../brains/CARLA/brain_carla_TCP_2.py | 33 ++++++++++++- behavior_metrics/robot/interfaces/imu.py | 4 ++ behavior_metrics/robot/sensors.py | 11 +++++ 11 files changed, 145 insertions(+), 28 deletions(-) diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/leaderboard_evaluator.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/leaderboard_evaluator.py index 280c8caf..423a9936 100644 --- a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/leaderboard_evaluator.py +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/leaderboard_evaluator.py @@ -111,6 +111,8 @@ def __init__(self, args, statistics_manager): self.traffic_manager = self.client.get_trafficmanager(int(args.trafficManagerPort)) # self.traffic_manager = self.client.get_trafficmanager(8000) except Exception as e: + print('TRAFFIC MANAGER CREATION FAILED!!!') + print(int(args.trafficManagerPort)) print(e) dist = pkg_resources.get_distribution("carla") # if dist.version != 'leaderboard': @@ -263,7 +265,8 @@ def _load_and_wait_for_world(self, args, town, ego_vehicles=None): else: self.world.wait_for_tick() - if CarlaDataProvider.get_map().name != town: + if CarlaDataProvider.get_map().name != town and CarlaDataProvider.get_map().name.split('/')[2] != town: + print(CarlaDataProvider.get_map().name) raise Exception("The CARLA server uses the wrong map!" "This scenario requires to use map {}".format(town)) @@ -350,7 +353,10 @@ def _load_and_run_scenario(self, args, config): self._load_and_wait_for_world(args, config.town, config.ego_vehicles) self._prepare_ego_vehicles(config.ego_vehicles, False) scenario = RouteScenario(world=self.world, config=config, debug_mode=args.debug) - self.statistics_manager.set_scenario(scenario.scenario) + print(scenario) + print('-----------------------------------------------------------------') + self.statistics_manager.set_scenario(scenario) + #self.statistics_manager.set_scenario(scenario.scenario) # self.agent_instance._init() # self.agent_instance.sensor_interface = SensorInterface() diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/route_scenario.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/route_scenario.py index 827727cd..41dfc363 100644 --- a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/route_scenario.py +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/route_scenario.py @@ -33,15 +33,15 @@ from srunner.scenarios.object_crash_intersection import VehicleTurningRoute from srunner.scenarios.other_leading_vehicle import OtherLeadingVehicle from srunner.scenarios.maneuver_opposite_direction import ManeuverOppositeDirection -from srunner.scenarios.junction_crossing_route import SignalJunctionCrossingRoute, NoSignalJunctionCrossingRoute +#from srunner.scenarios.junction_crossing_route import SignalJunctionCrossingRoute, NoSignalJunctionCrossingRoute from srunner.scenariomanager.scenarioatomics.atomic_criteria import (CollisionTest, InRouteTest, RouteCompletionTest, OutsideRouteLanesTest, RunningRedLightTest, - RunningStopTest, - ActorSpeedAboveThresholdTest) + RunningStopTest)#, + #ActorSpeedAboveThresholdTest) from leaderboard.utils.route_parser import RouteParser, TRIGGER_THRESHOLD, TRIGGER_ANGLE_THRESHOLD from leaderboard.utils.route_manipulation import interpolate_trajectory @@ -61,10 +61,10 @@ "Scenario4": VehicleTurningRoute, "Scenario5": OtherLeadingVehicle, "Scenario6": ManeuverOppositeDirection, - "Scenario7": SignalJunctionCrossingRoute, - "Scenario8": SignalJunctionCrossingRoute, - "Scenario9": SignalJunctionCrossingRoute, - "Scenario10": NoSignalJunctionCrossingRoute + #"Scenario7": SignalJunctionCrossingRoute, + #"Scenario8": SignalJunctionCrossingRoute, + #"Scenario9": SignalJunctionCrossingRoute, + #"Scenario10": NoSignalJunctionCrossingRoute } @@ -222,16 +222,17 @@ def _update_route(self, world, config, debug_mode): world_annotations = RouteParser.parse_annotations_file(config.scenario_file) # prepare route's trajectory (interpolate and add the GPS route) - # gps_route, route = interpolate_trajectory(world, config.trajectory) - gps_route, route, wp_route = interpolate_trajectory(world, config.trajectory) + gps_route, route = interpolate_trajectory(world, config.trajectory) + #gps_route, route, wp_route = interpolate_trajectory(world, config.trajectory) potential_scenarios_definitions, _ = RouteParser.scan_route_for_scenarios( config.town, route, world_annotations) self.route = route - CarlaDataProvider.set_ego_vehicle_route(convert_transform_to_location(self.route)) + #CarlaDataProvider.set_ego_vehicle_route(convert_transform_to_location(self.route)) - config.agent.set_global_plan(gps_route, self.route, wp_route) + #config.agent.set_global_plan(gps_route, self.route, wp_route) + config.agent.set_global_plan(gps_route, self.route) # Sample the scenarios to be used for this route instance. self.sampled_scenarios_definitions = self._scenario_sampling(potential_scenarios_definitions) @@ -520,8 +521,14 @@ def _create_behavior(self): scenario_behaviors = [] blackboard_list = [] + print('---------------------') + print(self.list_scenarios) + print('--------RUTA!-------------') + print(self.route) + print('---------------------') + for i, scenario in enumerate(self.list_scenarios): - if scenario.scenario.behavior is not None: + if hasattr(scenario, 'scenario') and scenario.scenario.behavior is not None: route_var_name = scenario.config.route_var_name if route_var_name is not None: @@ -542,7 +549,7 @@ def _create_behavior(self): self.route, blackboard_list, scenario_trigger_distance, - repeat_scenarios=False + #repeat_scenarios=False ) subbehavior.add_child(scenario_triggerer) # make ScenarioTriggerer the first thing to be checked @@ -554,8 +561,17 @@ def _create_behavior(self): def _create_test_criteria(self): """ """ + print() + print('_create_test_criteria') + print() criteria = [] - route = convert_transform_to_location(self.route) + #route = convert_transform_to_location(self.route) + route = self.route + print() + print('convert_transform_to_location') + print('convert_transform_to_location') + print('convert_transform_to_location') + print() # we terminate the route if collision or red light infraction happens during data collection @@ -576,19 +592,20 @@ def _create_test_criteria(self): stop_criterion = RunningStopTest(self.ego_vehicles[0]) + ''' blocked_criterion = ActorSpeedAboveThresholdTest(self.ego_vehicles[0], speed_threshold=0.1, below_threshold_max_time=180.0, terminate_on_failure=True, name="AgentBlockedTest") - + ''' criteria.append(completion_criterion) criteria.append(outsidelane_criterion) criteria.append(collision_criterion) criteria.append(red_light_criterion) criteria.append(stop_criterion) criteria.append(route_criterion) - criteria.append(blocked_criterion) + #criteria.append(blocked_criterion) return criteria diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/scenario_manager.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/scenario_manager.py index 9993fa44..0295e8ae 100644 --- a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/scenario_manager.py +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/scenarios/scenario_manager.py @@ -104,7 +104,8 @@ def load_scenario(self, scenario, agent, rep_number): GameTime.restart() self._agent = AgentWrapper(agent) self.scenario_class = scenario - self.scenario = scenario.scenario + #self.scenario = scenario.scenario + self.scenario = scenario self.scenario_tree = self.scenario.scenario_tree self.ego_vehicles = scenario.ego_vehicles self.other_actors = scenario.other_actors @@ -112,7 +113,7 @@ def load_scenario(self, scenario, agent, rep_number): # To print the scenario tree uncomment the next line # py_trees.display.render_dot_tree(self.scenario_tree) - CarlaDataProvider.set_ego(self.ego_vehicles[0]) + #CarlaDataProvider.set_ego(self.ego_vehicles[0]) self._agent.setup_sensors(self.ego_vehicles[0], self._debug_mode) diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/utils/statistics_manager.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/utils/statistics_manager.py index 7ea26d58..6514f11d 100644 --- a/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/utils/statistics_manager.py +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/leaderboard/utils/statistics_manager.py @@ -143,9 +143,11 @@ def compute_route_statistics(self, config, duration_time_system=-1, duration_tim failure = "Agent timed out" for node in self._master_scenario.get_criteria(): - if node.list_traffic_events: + if node.events: + #if node.list_traffic_events: # analyze all traffic events - for event in node.list_traffic_events: + for event in node.events: + #for event in node.list_traffic_events: if event.get_type() == TrafficEventType.COLLISION_STATIC: score_penalty *= PENALTY_COLLISION_STATIC route_record.infractions['collisions_layout'].append(event.get_message()) diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/scripts/run_evaluation.sh b/behavior_metrics/brains/CARLA/TCP/leaderboard/scripts/run_evaluation.sh index 1d8a7435..f026baf1 100644 --- a/behavior_metrics/brains/CARLA/TCP/leaderboard/scripts/run_evaluation.sh +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/scripts/run_evaluation.sh @@ -1,12 +1,17 @@ #!/bin/bash export CARLA_ROOT= PATH_TO_CARLA +export CARLA_ROOT="/home/SergioPaniego/carla/Dist/CARLA_Shipping_0.9.15-dirty/LinuxNoEditor" export CARLA_SERVER=${CARLA_ROOT}/CarlaUE4.sh +export PYTHONPATH="/home/SergioPaniego/carla-ros-bridge/catkin_ws/devel/lib/python3/dist-packages:/opt/ros/noetic/lib/python3/dist-packages" export PYTHONPATH=$PYTHONPATH:${CARLA_ROOT}/PythonAPI export PYTHONPATH=$PYTHONPATH:${CARLA_ROOT}/PythonAPI/carla -export PYTHONPATH=$PYTHONPATH:$CARLA_ROOT/PythonAPI/carla/dist/carla-0.9.10-py3.7-linux-x86_64.egg +export PYTHONPATH=$PYTHONPATH:${CARLA_ROOT}/PythonAPI/carla/agents +export PYTHONPATH=$PYTHONPATH:${CARLA_ROOT}/PythonAPI/carla/agents/navigation +export PYTHONPATH=$PYTHONPATH:$CARLA_ROOT/PythonAPI/carla/dist/carla-0.9.15-py3.8-linux-x86_64.egg export PYTHONPATH=$PYTHONPATH:leaderboard export PYTHONPATH=$PYTHONPATH:leaderboard/team_code -export PYTHONPATH=$PYTHONPATH:scenario_runner +export PYTHONPATH=$PYTHONPATH:leaderboard/scenario_runner +export PYTHONPATH=$PYTHONPATH:/home/SergioPaniego/Documentos/BehaviorMetrics/behavior_metrics/brains/CARLA export LEADERBOARD_ROOT=leaderboard export CHALLENGE_TRACK_CODENAME=SENSORS @@ -20,11 +25,18 @@ export RESUME=True # TCP evaluation export ROUTES=leaderboard/data/evaluation_routes/routes_lav_valid.xml export TEAM_AGENT=team_code/tcp_agent.py -export TEAM_CONFIG= PATH_TO_MODEL_CKPT +export TEAM_CONFIG=/home/SergioPaniego/Documentos/BehaviorMetrics/behavior_metrics/models/CARLA/tcp_best_model.ckpt export CHECKPOINT_ENDPOINT=results_TCP.json export SCENARIOS=leaderboard/data/scenarios/all_towns_traffic_scenarios.json export SAVE_PATH=data/results_TCP/ +echo "CARLA_ROOT" +echo $CARLA_ROOTs +echo "PATH_TO_CARLA" +echo $PATH_TO_CARLA +echo "PYTHONPATH" +echo $PYTHONPATH +echo "HOLA" python3 ${LEADERBOARD_ROOT}/leaderboard/leaderboard_evaluator.py \ --scenarios=${SCENARIOS} \ diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/planner.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/planner.py index 6aff90a2..d38ed8fa 100644 --- a/behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/planner.py +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/planner.py @@ -80,6 +80,9 @@ def run_step(self, gps): if len(self.route) == 1: return self.route[0] + #print() + #print() + #print('DENTRO', self.route) to_pop = 0 farthest_in_range = -np.inf @@ -110,4 +113,7 @@ def run_step(self, gps): self.debug.dot(gps, gps, (0, 0, 255)) self.debug.show() + #print() + #print() + #print('DENTRO', self.route) return self.route[1] diff --git a/behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/tcp_agent.py b/behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/tcp_agent.py index 8c11cda6..f035acf5 100644 --- a/behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/tcp_agent.py +++ b/behavior_metrics/brains/CARLA/TCP/leaderboard/team_code/tcp_agent.py @@ -138,6 +138,17 @@ def tick(self, input_data): speed = input_data['speed'][1]['speed'] compass = input_data['imu'][1][-1] + #print('---- IMU DATA ----') + #print(input_data['imu']) + #print('---- COMPASS ----') + #print(compass) + ''' + (1263, array([ 5.01221323e+00, -2.09726281e-02, 9.80859756e+00, -6.65326806e-05, + -1.39825954e-03, 1.25346202e-02, 3.65396426e-03])) + ---- COMPASS ---- + 0.0036539642605930567 + ''' + if (math.isnan(compass) == True): #It can happen that the compass sends nan for a few frames compass = 0.0 @@ -190,14 +201,28 @@ def run_step(self, input_data, timestamp): cmd_one_hot = [0] * 6 cmd_one_hot[command] = 1 cmd_one_hot = torch.tensor(cmd_one_hot).view(1, 6).to('cuda', dtype=torch.float32) + print('---SPEED---') + print(tick_data['speed']) speed = torch.FloatTensor([float(tick_data['speed'])]).view(1,1).to('cuda', dtype=torch.float32) speed = speed / 12 + #print('---SPEED---') + print(speed) + print(tick_data['rgb'].shape) + print(type(tick_data['rgb'])) + from PIL import Image + imagen_pil = Image.fromarray(tick_data['rgb']) + imagen_pil.save('imagen_de_tcp.png') + rgb = self._im_transform(tick_data['rgb']).unsqueeze(0).to('cuda', dtype=torch.float32) tick_data['target_point'] = [torch.FloatTensor([tick_data['target_point'][0]]), torch.FloatTensor([tick_data['target_point'][1]])] target_point = torch.stack(tick_data['target_point'], dim=1).to('cuda', dtype=torch.float32) + state = torch.cat([speed, target_point, cmd_one_hot], 1) + print(state) + print(state.dtype) + pred= self.net(rgb, state, target_point) diff --git a/behavior_metrics/brains/CARLA/TCP/model.py b/behavior_metrics/brains/CARLA/TCP/model.py index 6881c851..2a58aa3e 100644 --- a/behavior_metrics/brains/CARLA/TCP/model.py +++ b/behavior_metrics/brains/CARLA/TCP/model.py @@ -2,7 +2,8 @@ import numpy as np import torch from torch import nn -from brains.CARLA.TCP.resnet import * +#from brains.CARLA.TCP.resnet import * +from TCP.resnet import * class PIDController(object): @@ -16,6 +17,9 @@ def __init__(self, K_P=1.0, K_I=0.0, K_D=0.0, n=20): self._min = 0.0 def step(self, error): + print('llega!') + print(self._window) + print('llega!') self._window.append(error) self._max = max(self._max, abs(error)) self._min = -abs(self._max) diff --git a/behavior_metrics/brains/CARLA/brain_carla_TCP_2.py b/behavior_metrics/brains/CARLA/brain_carla_TCP_2.py index 8031d06a..67263bda 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_TCP_2.py +++ b/behavior_metrics/brains/CARLA/brain_carla_TCP_2.py @@ -160,6 +160,9 @@ def tick(self): #speed_m_s = np.sqrt(velocity.x**2 + velocity.y**2 + velocity.z**2) #speed = 3.6 * speed_m_s #m/s to km/h + print('---SPEED 1---') + print(speed) + imu_data = self.imu.getIMU() compass = np.array([imu_data.compass.x, imu_data.compass.y, imu_data.compass.z, imu_data.compass.w]) @@ -230,6 +233,9 @@ def execute(self): cmd_one_hot = torch.tensor(cmd_one_hot).view(1, 6).to('cuda', dtype=torch.float32) speed = torch.FloatTensor([float(tick_data['speed'])]).view(1,1).to('cuda', dtype=torch.float32) speed = speed / 12 + print('---SPEED 2---') + print(speed) + print(tick_data['rgb'].shape) print(type(tick_data['rgb'])) @@ -291,15 +297,38 @@ def execute(self): self.pid_metadata['status'] = self.status - #print(throttle, steer, brake) + print(throttle, steer, brake) self.motors.sendThrottle(throttle) self.motors.sendSteer(steer) self.motors.sendBrake(brake) + #self.motors.sendThrottle(1.0) + #self.motors.sendSteer(0.0) + #self.motors.sendBrake(0.0) + + ''' + TODO: REVIEW PID CONTROLLERS SINCE THEY USE A WINDOW TODO: Draw the waypoints on the map TODO: Draw the trajectory on the images. TODO: Figure out what's the transformation for the compass. - + TODO: Review metadata from model!!! metadata = { + 'speed': float(speed.astype(np.float64)), + 'steer': float(steer), + 'throttle': float(throttle), + 'brake': float(brake), + 'wp_4': tuple(waypoints[3].astype(np.float64)), + 'wp_3': tuple(waypoints[2].astype(np.float64)), + 'wp_2': tuple(waypoints[1].astype(np.float64)), + 'wp_1': tuple(waypoints[0].astype(np.float64)), + 'aim': tuple(aim.astype(np.float64)), + 'target': tuple(target.astype(np.float64)), + 'desired_speed': float(desired_speed.astype(np.float64)), + 'angle': float(angle.astype(np.float64)), + 'angle_last': float(angle_last.astype(np.float64)), + 'angle_target': float(angle_target.astype(np.float64)), + 'angle_final': float(angle_final.astype(np.float64)), + 'delta': float(delta.astype(np.float64)), + } ''' \ No newline at end of file diff --git a/behavior_metrics/robot/interfaces/imu.py b/behavior_metrics/robot/interfaces/imu.py index 5afe9a88..e41bc3f4 100644 --- a/behavior_metrics/robot/interfaces/imu.py +++ b/behavior_metrics/robot/interfaces/imu.py @@ -7,6 +7,10 @@ def imuMsg2IMU(imuMsg): imu = IMU() + #print('-----') + #print(imuMsg.orientation) + #print('-----') + imu.compass = imuMsg.orientation imu.accelerometer = imuMsg.linear_acceleration imu.gyroscope = imuMsg.angular_velocity diff --git a/behavior_metrics/robot/sensors.py b/behavior_metrics/robot/sensors.py index 1527a3f4..c5106514 100644 --- a/behavior_metrics/robot/sensors.py +++ b/behavior_metrics/robot/sensors.py @@ -198,6 +198,17 @@ def get_gnss(self, gnss_name): robot.interfaces.birdeyeview.BirdEyeView instance -- birdeyeview instance """ return self.__get_sensor(gnss_name, 'gnss') + + def get_speedometer(self, speedometer_name): + """Retrieve an specific existing bird eye view + + Arguments: + speedometer_name {str} -- Name of the birdeyeview to be retrieved + + Returns: + robot.interfaces.birdeyeview.BirdEyeView instance -- birdeyeview instance + """ + return self.__get_sensor(speedometer_name, 'speedometer') def kill(self): """Destroy all the running sensors""" From fe2692d28129bcc8711c499aa356f0f604f84d56 Mon Sep 17 00:00:00 2001 From: sergiopaniego Date: Tue, 16 Apr 2024 15:35:53 +0200 Subject: [PATCH 06/11] [WIP] --- behavior_metrics/brains/CARLA/TCP/model.py | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/behavior_metrics/brains/CARLA/TCP/model.py b/behavior_metrics/brains/CARLA/TCP/model.py index 2a58aa3e..5c909a1d 100644 --- a/behavior_metrics/brains/CARLA/TCP/model.py +++ b/behavior_metrics/brains/CARLA/TCP/model.py @@ -17,9 +17,10 @@ def __init__(self, K_P=1.0, K_I=0.0, K_D=0.0, n=20): self._min = 0.0 def step(self, error): - print('llega!') + print('here!') print(self._window) - print('llega!') + print(error) + print('here!') self._window.append(error) self._max = max(self._max, abs(error)) self._min = -abs(self._max) @@ -261,10 +262,15 @@ def control_pid(self, waypoints, velocity, target): waypoints = waypoints[0].data.cpu().numpy() target = target.squeeze().data.cpu().numpy() + print('waypoints', waypoints) + print('target', target) # flip y (forward is negative in our waypoints) waypoints[:,1] *= -1 target[1] *= -1 + print('waypoints', waypoints) + print('target', target) + # iterate over vectors between predicted waypoints num_pairs = len(waypoints) - 1 best_norm = 1e5 @@ -292,12 +298,24 @@ def control_pid(self, waypoints, velocity, target): # predicted point otherwise # (reduces noise in eg. straight roads, helps with sudden turn commands) use_target_to_aim = np.abs(angle_target) < np.abs(angle) + print('np.degrees(np.arctan2(target[1], target[0])) / 90', np.degrees(np.arctan2(target[1], target[0])) / 90) + print('np.abs(angle_target) < np.abs(angle)', np.abs(angle_target) < np.abs(angle)) + print('use_target_to_aim',use_target_to_aim) use_target_to_aim = use_target_to_aim or (np.abs(angle_target-angle_last) > self.config.angle_thresh and target[1] < self.config.dist_thresh) + print('use_target_to_aim',use_target_to_aim) + print('angle', angle) + print('angle_last', angle_last) + print('angle_target', angle_target) + print('target', target) + print('waypoints', waypoints) + print('aim_last', aim_last) + print('aim', aim) if use_target_to_aim: angle_final = angle_target else: angle_final = angle + print('angle_final', angle_final) steer = self.turn_controller.step(angle_final) steer = np.clip(steer, -1.0, 1.0) From 4dec5751421af1df5f3397c8c0d73dc82b4e3c28 Mon Sep 17 00:00:00 2001 From: sergiopaniego Date: Fri, 3 May 2024 15:08:06 +0200 Subject: [PATCH 07/11] Added TCP brain working correctly in one route --- .../brains/CARLA/brain_carla_TCP_3.py | 283 ++++++++++++++++++ ...02_anticlockwise_imitation_learning.launch | 5 +- 2 files changed, 287 insertions(+), 1 deletion(-) create mode 100644 behavior_metrics/brains/CARLA/brain_carla_TCP_3.py diff --git a/behavior_metrics/brains/CARLA/brain_carla_TCP_3.py b/behavior_metrics/brains/CARLA/brain_carla_TCP_3.py new file mode 100644 index 00000000..9e38fba3 --- /dev/null +++ b/behavior_metrics/brains/CARLA/brain_carla_TCP_3.py @@ -0,0 +1,283 @@ +from brains.CARLA.TCP.model import TCP +from brains.CARLA.TCP.config import GlobalConfig +from collections import OrderedDict + +from brains.CARLA.utils.test_utils import traffic_light_to_int, model_control, calculate_delta_yaw +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from brains.CARLA.utils.high_level_command import HighLevelCommandLoader +from os import path + +import numpy as np + +import torch +import time +import math +import carla + +from utils.logger import logger +import importlib +from torchvision import transforms as T +from brains.CARLA.TCP.leaderboard.team_code.planner import RoutePlanner +from brains.CARLA.TCP.leaderboard.leaderboard.utils.route_manipulation import downsample_route +from brains.CARLA.TCP.leaderboard.leaderboard.utils.route_manipulation import interpolate_trajectory +from brains.CARLA.TCP.leaderboard.leaderboard.utils.route_indexer import RouteIndexer + +from collections import deque + + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'CARLA/' + +class Brain: + + def __init__(self, sensors, actuators, model=None, handler=None, config=None): + self.motors = actuators.get_motor('motors_0') + self.camera_rgb = sensors.get_camera('camera_0') # rgb front view camera + self.camera_seg = sensors.get_camera('camera_2') # segmentation camera + self.imu = sensors.get_imu('imu_0') # imu + self.gnss = sensors.get_gnss('gnss_0') # gnss + self.speedometer = sensors.get_speedometer('speedometer_0') # gnss + self.handler = handler + self.inference_times = [] + self.gpu_inference = config['GPU'] + self.device = torch.device('cuda' if (torch.cuda.is_available() and self.gpu_inference) else 'cpu') + + client = carla.Client('localhost', 2000) + client.set_timeout(100.0) + world = client.get_world() + self.map = world.get_map() + + world.set_weather(carla.WeatherParameters.ClearNoon) + + if model: + if not path.exists(PRETRAINED_MODELS + model): + print("File " + model + " cannot be found in " + PRETRAINED_MODELS) + + else: + self.config = GlobalConfig() + self.net = TCP(self.config).to(self.device) + + ckpt = torch.load(PRETRAINED_MODELS + model,map_location=self.device) + ckpt = ckpt["state_dict"] + new_state_dict = OrderedDict() + for key, value in ckpt.items(): + new_key = key.replace("model.","") + new_state_dict[new_key] = value + self.net.load_state_dict(new_state_dict, strict = False) + + #self.net.load_state_dict(torch.load(PRETRAINED_MODELS + model,map_location=self.device)) + self.net.cuda() + self.net.eval() + + self.vehicle = None + while self.vehicle is None: + for vehicle in world.get_actors().filter('vehicle.*'): + if vehicle.attributes.get('role_name') == 'ego_vehicle': + self.vehicle = vehicle + break + if self.vehicle is None: + print("Waiting for vehicle with role_name 'ego_vehicle'") + time.sleep(1) # sleep for 1 second before checking again + + self._im_transform = T.Compose([T.ToTensor(), T.Normalize(mean=[0.485,0.456,0.406], std=[0.229,0.224,0.225])]) + + + repetitions = 1 + routes = 'brains/CARLA/TCP/leaderboard/data/TCP_training_routes/routes_town02.xml' + scenarios = 'brains/CARLA/TCP/leaderboard/data/scenarios/town02_all_scenarios.json' + route_indexer = RouteIndexer(routes, scenarios, repetitions) + # setup + config = route_indexer.next() + ''' + We currently hard-code the initial and target points + ''' + #config.trajectory[0].x = 55.3 + #config.trajectory[0].y = -105.6 + + #config.trajectory[1].x = -30.0 + #config.trajectory[1].y = -105.6 + + config.trajectory[0].x = -3.3 + config.trajectory[0].y = 179.5 + + config.trajectory[1].x = -3.3 + config.trajectory[1].y = 120.6 + + # prepare route's trajectory (interpolate and add the GPS route) + gps_route, route = interpolate_trajectory(world, config.trajectory) + + self.route = route + self.set_global_plan(gps_route, self.route) + self._init_route_planner() + + self.steer_step = 0 + self.last_steers = deque() + self.status = 0 + + + def set_global_plan(self, global_plan_gps, global_plan_world_coord, wp=None): + """ + Set the plan (route) for the agent + """ + ds_ids = downsample_route(global_plan_world_coord, 50) + self._global_plan_world_coord = [(global_plan_world_coord[x][0], global_plan_world_coord[x][1]) for x in ds_ids] + self._global_plan = [global_plan_gps[x] for x in ds_ids] + + + def _init_route_planner(self): + self._route_planner = RoutePlanner(4.0, 50.0) # min_distance, max_distance + self._route_planner.set_route(self._global_plan, True) + + gps = np.array([self._global_plan[0][0]['lon'], self._global_plan[0][0]['lat']]) + gps = (gps - self._route_planner.mean) * self._route_planner.scale + + self.initialized = True + + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + + def _get_position(self, tick_data): + gps = self.gnss.getGNSS() + gps = np.array([gps.longitude, gps.latitude]) + gps = (gps - self._route_planner.mean) * self._route_planner.scale + return gps + + @torch.no_grad() + def execute(self): + """Main loop of the brain. This will be called iteratively each TIME_CYCLE (see pilot.py)""" + + # VOID = -1 + # LEFT = 1 + # RIGHT = 2 + # STRAIGHT = 3 + # LANEFOLLOW = 4 + # CHANGELANELEFT = 5 + # CHANGELANERIGHT = 6 + command = 4 + if command < 0: + command = 4 + command -= 1 + assert command in [0, 1, 2, 3, 4, 5] + cmd_one_hot = [0] * 6 + cmd_one_hot[command] = 1 + cmd_one_hot = torch.tensor(cmd_one_hot).view(1, 6).to('cuda', dtype=torch.float32) + + rgb = self.camera_rgb.getImage().data + seg_image = self.camera_seg.getImage().data + + self.update_frame('frame_0', rgb) + self.update_frame('frame_1', seg_image) + + + speed = self.speedometer.getSpeedometer().data + gt_velocity = torch.FloatTensor([speed]).to('cuda', dtype=torch.float32) + speed = torch.tensor(speed).view(-1, 1).to('cuda', dtype=torch.float32) + speed = speed / 12 + + imu_data = self.imu.getIMU() + compass = np.array([imu_data.compass.x, imu_data.compass.y, imu_data.compass.z]) + compass = compass[-1] + + if (math.isnan(compass) == True): #It can happen that the compass sends nan for a few frames + compass = 0.0 + + result = { + 'rgb': rgb, + #'gps': gps, + 'speed': speed, + 'compass': compass, + #'bev': bev + } + + pos = self._get_position(result) + #result['gps'] = pos + next_wp, next_cmd = self._route_planner.run_step(pos) + next_wp = [next_wp[1], next_wp[0]] + result['next_command'] = next_cmd.value + + theta = compass + np.pi/2 + R = np.array([ + [np.cos(theta), -np.sin(theta)], + [np.sin(theta), np.cos(theta)] + ]) + + local_command_point = np.array([next_wp[0]-pos[0], next_wp[1]-pos[1]]) + + + + result['target_point'] = tuple(local_command_point) + result['target_point'] = [torch.FloatTensor([result['target_point'][0]]), + torch.FloatTensor([result['target_point'][1]])] + result['target_point'][1] *= -1 + + self.target_point = torch.stack(result['target_point'], dim=1).to('cuda', dtype=torch.float32) + + state = torch.cat([speed, self.target_point, cmd_one_hot], 1) + + rgb = self._im_transform(rgb).unsqueeze(0).to('cuda', dtype=torch.float32) + pred = self.net(rgb, state, self.target_point) + + + steer_ctrl, throttle_ctrl, brake_ctrl, metadata = self.net.process_action(pred, command, gt_velocity, self.target_point) + + steer_traj, throttle_traj, brake_traj, metadata_traj = self.net.control_pid(pred['pred_wp'], gt_velocity, self.target_point) + + if brake_traj < 0.05: brake_traj = 0.0 + if throttle_traj > brake_traj: brake_traj = 0.0 + + + + if self.status == 0: + self.alpha = 0.3 + steer_ctrl = np.clip(self.alpha*steer_ctrl + (1-self.alpha)*steer_traj, -1, 1) + throttle_ctrl = np.clip(self.alpha*throttle_ctrl + (1-self.alpha)*throttle_traj, 0, 0.75) + brake_ctrl = np.clip(self.alpha*brake_ctrl + (1-self.alpha)*brake_traj, 0, 1) + else: + self.alpha = 0.3 + steer_ctrl = np.clip(self.alpha*steer_traj + (1-self.alpha)*steer_ctrl, -1, 1) + throttle_ctrl = np.clip(self.alpha*throttle_traj + (1-self.alpha)*throttle_ctrl, 0, 0.75) + brake_ctrl = np.clip(self.alpha*brake_traj + (1-self.alpha)*brake_ctrl, 0, 1) + + if brake_ctrl > 0.5: + throttle_ctrl = float(0) + + + self.motors.sendThrottle(throttle_ctrl) + self.motors.sendSteer(steer_ctrl) + self.motors.sendBrake(brake_ctrl) + + if len(self.last_steers) >= 20: + self.last_steers.popleft() + self.last_steers.append(abs(float(steer_ctrl))) + + num = 0 + for s in self.last_steers: + if s > 0.10: + num += 1 + if num > 10: + self.status = 1 + self.steer_step += 1 + + else: + self.status = 0 + diff --git a/behavior_metrics/configs/CARLA/CARLA_launch_files/town_02_anticlockwise_imitation_learning.launch b/behavior_metrics/configs/CARLA/CARLA_launch_files/town_02_anticlockwise_imitation_learning.launch index a6fe01e5..54125b7a 100644 --- a/behavior_metrics/configs/CARLA/CARLA_launch_files/town_02_anticlockwise_imitation_learning.launch +++ b/behavior_metrics/configs/CARLA/CARLA_launch_files/town_02_anticlockwise_imitation_learning.launch @@ -39,7 +39,10 @@ - + + + + From ededf4de90e8bae84fd67f66f69f81745588a48e Mon Sep 17 00:00:00 2001 From: sergiopaniego Date: Fri, 3 May 2024 18:03:11 +0200 Subject: [PATCH 08/11] Updated TCP brain woking in 3 directions --- .../brains/CARLA/brain_carla_TCP_3.py | 44 ++++++++++++++++--- 1 file changed, 37 insertions(+), 7 deletions(-) diff --git a/behavior_metrics/brains/CARLA/brain_carla_TCP_3.py b/behavior_metrics/brains/CARLA/brain_carla_TCP_3.py index 9e38fba3..353dbd94 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_TCP_3.py +++ b/behavior_metrics/brains/CARLA/brain_carla_TCP_3.py @@ -88,7 +88,19 @@ def __init__(self, sensors, actuators, model=None, handler=None, config=None): # setup config = route_indexer.next() ''' - We currently hard-code the initial and target points + [WIP] WORKS WITH: + result['target_point'][1] *= -1 + self.target_point = torch.stack([result['target_point'][1], result['target_point'][0]], dim=1).to('cuda', dtype=torch.float32) + ''' + config.trajectory[0].x = 55.3 + config.trajectory[0].y = -109.0 + + config.trajectory[1].x = 80.0 + config.trajectory[1].y = -109.0 + + ''' + WORKS WITH: + self.target_point = torch.stack([result['target_point'][1], result['target_point'][0]], dim=1).to('cuda', dtype=torch.float32) ''' #config.trajectory[0].x = 55.3 #config.trajectory[0].y = -105.6 @@ -96,11 +108,29 @@ def __init__(self, sensors, actuators, model=None, handler=None, config=None): #config.trajectory[1].x = -30.0 #config.trajectory[1].y = -105.6 - config.trajectory[0].x = -3.3 - config.trajectory[0].y = 179.5 + # WORKS with result['target_point'][1] *= -1 + ''' + WORKS WITH: + result['target_point'][1] *= -1 + self.target_point = torch.stack(result['target_point'], dim=1).to('cuda', dtype=torch.float32) + ''' + #config.trajectory[0].x = -3.3 + #config.trajectory[0].y = 179.5 + + #config.trajectory[1].x = -3.3 + #config.trajectory[1].y = 120.6 + + # WORKS without result['target_point'][1] *= -1 + ''' + WORKS WITH: + #result['target_point'][1] *= -1 + self.target_point = torch.stack(result['target_point'], dim=1).to('cuda', dtype=torch.float32) + ''' + #config.trajectory[0].x = -7.43 + #config.trajectory[0].y = 125.5 - config.trajectory[1].x = -3.3 - config.trajectory[1].y = 120.6 + #config.trajectory[1].x = -7.43 + #config.trajectory[1].y = 170.6 # prepare route's trajectory (interpolate and add the GPS route) gps_route, route = interpolate_trajectory(world, config.trajectory) @@ -228,9 +258,9 @@ def execute(self): result['target_point'] = tuple(local_command_point) result['target_point'] = [torch.FloatTensor([result['target_point'][0]]), torch.FloatTensor([result['target_point'][1]])] - result['target_point'][1] *= -1 - + #result['target_point'][1] *= -1 self.target_point = torch.stack(result['target_point'], dim=1).to('cuda', dtype=torch.float32) + #self.target_point = torch.stack([result['target_point'][1], result['target_point'][0]], dim=1).to('cuda', dtype=torch.float32) state = torch.cat([speed, self.target_point, cmd_one_hot], 1) From 26090390fd4ef317b6adf02c633abf92dfef4f1a Mon Sep 17 00:00:00 2001 From: sergiopaniego Date: Fri, 3 May 2024 18:38:20 +0200 Subject: [PATCH 09/11] TCP working in all directions --- behavior_metrics/brains/CARLA/brain_carla_TCP_3.py | 10 +++++----- .../town_02_anticlockwise_imitation_learning.launch | 9 ++++++++- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/behavior_metrics/brains/CARLA/brain_carla_TCP_3.py b/behavior_metrics/brains/CARLA/brain_carla_TCP_3.py index 353dbd94..e851d07d 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_TCP_3.py +++ b/behavior_metrics/brains/CARLA/brain_carla_TCP_3.py @@ -88,15 +88,15 @@ def __init__(self, sensors, actuators, model=None, handler=None, config=None): # setup config = route_indexer.next() ''' - [WIP] WORKS WITH: - result['target_point'][1] *= -1 + WORKS WITH: + result['target_point'][0] *= -1 self.target_point = torch.stack([result['target_point'][1], result['target_point'][0]], dim=1).to('cuda', dtype=torch.float32) ''' - config.trajectory[0].x = 55.3 - config.trajectory[0].y = -109.0 + config.trajectory[0].x = 30.3 + config.trajectory[0].y = 109.5 config.trajectory[1].x = 80.0 - config.trajectory[1].y = -109.0 + config.trajectory[1].y = 109.5 ''' WORKS WITH: diff --git a/behavior_metrics/configs/CARLA/CARLA_launch_files/town_02_anticlockwise_imitation_learning.launch b/behavior_metrics/configs/CARLA/CARLA_launch_files/town_02_anticlockwise_imitation_learning.launch index 54125b7a..7813cf52 100644 --- a/behavior_metrics/configs/CARLA/CARLA_launch_files/town_02_anticlockwise_imitation_learning.launch +++ b/behavior_metrics/configs/CARLA/CARLA_launch_files/town_02_anticlockwise_imitation_learning.launch @@ -39,8 +39,15 @@ + - + + + + + + + From f8848507e8ce1e47bbdf959e07b916fb15673c0b Mon Sep 17 00:00:00 2001 From: sergiopaniego Date: Mon, 20 May 2024 11:42:07 +0200 Subject: [PATCH 10/11] Updated TCP brain --- .../brains/CARLA/brain_carla_TCP.py | 115 +++++++++++++++--- 1 file changed, 97 insertions(+), 18 deletions(-) diff --git a/behavior_metrics/brains/CARLA/brain_carla_TCP.py b/behavior_metrics/brains/CARLA/brain_carla_TCP.py index 6b9481f0..6e1f6b70 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_TCP.py +++ b/behavior_metrics/brains/CARLA/brain_carla_TCP.py @@ -46,6 +46,8 @@ def __init__(self, sensors, actuators, model=None, handler=None, config=None): world = client.get_world() self.map = world.get_map() + world.set_weather(carla.WeatherParameters.ClearNoon) + print('-----------------------') print('-----------------------') print(PRETRAINED_MODELS + model) @@ -120,14 +122,55 @@ def __init__(self, sensors, actuators, model=None, handler=None, config=None): We currently hard-code the initial and target points ''' print('----- TRAJECTORY ------') - print(config) - print(config.trajectory) + print('config', config) + print('trajectory', config.trajectory) print(config.trajectory[0].x, config.trajectory[0].y) - config.trajectory[0].x = 55.3 - config.trajectory[0].y = -105.6 - config.trajectory[1].x = -30.0 - config.trajectory[1].y = -105.6 + ''' + WORKS WITH: + result['target_point'][0] *= -1 + self.target_point = torch.stack([result['target_point'][1], result['target_point'][0]], dim=1).to('cuda', dtype=torch.float32) + ''' + config.trajectory[0].x = 30.3 + config.trajectory[0].y = 109.5 + + config.trajectory[1].x = 80.0 + config.trajectory[1].y = 109.5 + + ''' + WORKS WITH: + self.target_point = torch.stack([result['target_point'][1], result['target_point'][0]], dim=1).to('cuda', dtype=torch.float32) + ''' + #config.trajectory[0].x = 55.3 + #config.trajectory[0].y = -105.6 + + #config.trajectory[1].x = -30.0 + #config.trajectory[1].y = -105.6 + + # WORKS with result['target_point'][1] *= -1 + ''' + WORKS WITH: + result['target_point'][1] *= -1 + self.target_point = torch.stack(result['target_point'], dim=1).to('cuda', dtype=torch.float32) + ''' + #config.trajectory[0].x = -3.3 + #config.trajectory[0].y = 179.5 + + #config.trajectory[1].x = -3.3 + #config.trajectory[1].y = 120.6 + + # WORKS without result['target_point'][1] *= -1 + ''' + WORKS WITH: + #result['target_point'][1] *= -1 + self.target_point = torch.stack(result['target_point'], dim=1).to('cuda', dtype=torch.float32) + ''' + #config.trajectory[0].x = -7.43 + #config.trajectory[0].y = 125.5 + + #config.trajectory[1].x = -7.43 + #config.trajectory[1].y = 170.6 + print(config.trajectory[0].x, config.trajectory[0].y) print() print(config.trajectory[1].x, config.trajectory[1].y) @@ -136,6 +179,11 @@ def __init__(self, sensors, actuators, model=None, handler=None, config=None): # prepare route's trajectory (interpolate and add the GPS route) gps_route, route = interpolate_trajectory(world, config.trajectory) + print('---gps_route---') + print(gps_route) + print('---route---') + print(route) + self.route = route self.set_global_plan(gps_route, self.route) self._init_route_planner() @@ -145,6 +193,24 @@ def __init__(self, sensors, actuators, model=None, handler=None, config=None): self.status = 0 + waypoint = world.get_map().get_waypoint(vehicle.get_location(),project_to_road=True, lane_type=(carla.LaneType.Driving | carla.LaneType.Shoulder | carla.LaneType.Sidewalk)) + + print('---------------') + print(vehicle.get_location()) + print(waypoint) + print("road_id: " + str(waypoint.road_id)) + print("section_id: " + str(waypoint.section_id)) + print("lane_id: " + str(waypoint.lane_id)) + print("Current lane type: " + str(waypoint.lane_type)) + # Check current lane change allowed + print("Current Lane change: " + str(waypoint.lane_change)) + # Left and Right lane markings + print("L lane marking type: " + str(waypoint.left_lane_marking.type)) + print("L lane marking change: " + str(waypoint.left_lane_marking.lane_change)) + print("R lane marking type: " + str(waypoint.right_lane_marking.type)) + print("R lane marking change: " + str(waypoint.right_lane_marking.lane_change)) + + def set_global_plan(self, global_plan_gps, global_plan_world_coord, wp=None): """ Set the plan (route) for the agent @@ -152,18 +218,18 @@ def set_global_plan(self, global_plan_gps, global_plan_world_coord, wp=None): ds_ids = downsample_route(global_plan_world_coord, 50) self._global_plan_world_coord = [(global_plan_world_coord[x][0], global_plan_world_coord[x][1]) for x in ds_ids] self._global_plan = [global_plan_gps[x] for x in ds_ids] - #print('-----GLOBAL PLAN -----') - #print(self._global_plan) + print('-----GLOBAL PLAN -----') + print(self._global_plan) def _init_route_planner(self): - self._route_planner = RoutePlanner(4.0, 50.0) + self._route_planner = RoutePlanner(4.0, 50.0) # min_distance, max_distance self._route_planner.set_route(self._global_plan, True) gps = np.array([self._global_plan[0][0]['lon'], self._global_plan[0][0]['lat']]) gps = (gps - self._route_planner.mean) * self._route_planner.scale - #print('-----GPS----') - #print(gps) + print('-----GPS----') + print(gps) self.initialized = True @@ -226,6 +292,10 @@ def execute(self): self.update_frame('frame_0', rgb) self.update_frame('frame_1', seg_image) + from PIL import Image + imagen_pil = Image.fromarray(rgb) + imagen_pil.save('imagen_de_tcp.png') + print('----------getSpeedometer--------------') @@ -242,8 +312,8 @@ def execute(self): imu_data = self.imu.getIMU() #compass = np.array([imu_data.compass.x, imu_data.compass.y, imu_data.compass.z, imu_data.compass.w]) compass = np.array([imu_data.compass.x, imu_data.compass.y, imu_data.compass.z]) - #print('----------compass--------------') - #print(compass) + print('----------compass--------------') + print(compass) compass = compass[-1] if (math.isnan(compass) == True): #It can happen that the compass sends nan for a few frames @@ -270,7 +340,11 @@ def execute(self): ]) local_command_point = np.array([next_wp[0]-pos[0], next_wp[1]-pos[1]]) + print('--local_command_point--', local_command_point) + print('next_wp', next_wp) + print('pos', pos) #local_command_point = R.T.dot(local_command_point) + #print('--local_command_point--', local_command_point) #local_command_point = R.T.dot(local_command_point) #local_command_point = R.T.dot(local_command_point) @@ -284,8 +358,10 @@ def execute(self): result['target_point'] = [torch.FloatTensor([result['target_point'][0]]), torch.FloatTensor([result['target_point'][1]])] - self.target_point = torch.stack(result['target_point'], dim=1).to('cuda', dtype=torch.float32) + result['target_point'][0] *= -1 + #self.target_point = torch.stack(result['target_point'], dim=1).to('cuda', dtype=torch.float32) + self.target_point = torch.stack([result['target_point'][1], result['target_point'][0]], dim=1).to('cuda', dtype=torch.float32) ''' @@ -309,11 +385,11 @@ def execute(self): [-3.83868739e+01 1.80376380e-02] ''' #print(local_command_point) - #print('-------NEXT WAYPOINT-----------') + print('-------NEXT WAYPOINT-----------') ''' [4.7923297947398655, -105.55486136806194] ''' - #print(next_wp) + print(next_wp) print('-------Target point-----------') print(self.target_point) ''' @@ -335,9 +411,9 @@ def execute(self): pred = self.net(rgb, state, self.target_point) - #print('-----PRED------') + print('-----PRED------') #print(pred.keys()) - #print(pred['pred_wp']) + print(pred['pred_wp']) #print('------COMMAND----') #print(command) @@ -347,6 +423,9 @@ def execute(self): #print('------ steer_ctrl, throttle_ctrl, brake_ctrl, metadata-------') #print(steer_ctrl, throttle_ctrl, brake_ctrl, metadata) + #result['target_point'][0] *= -1 + #self.target_point = torch.stack([result['target_point'][1], result['target_point'][0]], dim=1).to('cuda', dtype=torch.float32) + steer_traj, throttle_traj, brake_traj, metadata_traj = self.net.control_pid(pred['pred_wp'], gt_velocity, self.target_point) if brake_traj < 0.05: brake_traj = 0.0 From 50de75842bf17e3655b8e00fd79adb9dcbbd9fd8 Mon Sep 17 00:00:00 2001 From: sergiopaniego Date: Thu, 12 Sep 2024 10:57:42 +0200 Subject: [PATCH 11/11] Upload files --- ...o_segmentation_based_imitation_learning.py | 195 +++++++++++++++ ...a_segmentation_based_imitation_learning.py | 2 + ...ep_learning_previous_v_generate_dataset.py | 234 ++++++++++++++++++ ..._carla_bird_eye_deep_lerning_atlas_uc3m.py | 203 +++++++++++++++ .../carla_new_ada_bridge.launch | 100 ++++++++ .../carla_new_ada_bridge_updated.launch | 85 +++++++ ...la_new_ada_bridge_updated_autopilot.launch | 83 +++++++ ...rla_new_ada_bridge_updated_parque01.launch | 90 +++++++ .../CARLA_launch_files/image_transform.py | 60 +++++ .../town_02_anticlockwise_meiqi.launch | 78 ++++++ .../configs/CARLA/default_carla.yml | 6 +- .../configs/CARLA/default_carla_autopilot.yml | 4 +- .../CARLA/default_carla_tensorflow.yml | 4 +- .../default_carla_tensorflow_updated.yml | 80 ++++++ .../robot/interfaces/speedometer.py | 2 +- behavior_metrics/utils/environment.py | 6 +- 16 files changed, 1224 insertions(+), 8 deletions(-) create mode 100644 behavior_metrics/brains/CARLA/brain_carla_meiqi_no_segmentation_based_imitation_learning.py create mode 100644 behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v_generate_dataset.py create mode 100644 behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_lerning_atlas_uc3m.py create mode 100644 behavior_metrics/configs/CARLA/CARLA_launch_files/carla_new_ada_bridge.launch create mode 100644 behavior_metrics/configs/CARLA/CARLA_launch_files/carla_new_ada_bridge_updated.launch create mode 100644 behavior_metrics/configs/CARLA/CARLA_launch_files/carla_new_ada_bridge_updated_autopilot.launch create mode 100644 behavior_metrics/configs/CARLA/CARLA_launch_files/carla_new_ada_bridge_updated_parque01.launch create mode 100644 behavior_metrics/configs/CARLA/CARLA_launch_files/image_transform.py create mode 100644 behavior_metrics/configs/CARLA/CARLA_launch_files/town_02_anticlockwise_meiqi.launch create mode 100644 behavior_metrics/configs/CARLA/default_carla_tensorflow_updated.yml diff --git a/behavior_metrics/brains/CARLA/brain_carla_meiqi_no_segmentation_based_imitation_learning.py b/behavior_metrics/brains/CARLA/brain_carla_meiqi_no_segmentation_based_imitation_learning.py new file mode 100644 index 00000000..c30e4dbc --- /dev/null +++ b/behavior_metrics/brains/CARLA/brain_carla_meiqi_no_segmentation_based_imitation_learning.py @@ -0,0 +1,195 @@ +from brains.CARLA.utils.pilotnet_onehot import PilotNetOneHot +from brains.CARLA.utils.test_utils import traffic_light_to_int, model_control, calculate_delta_yaw +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from brains.CARLA.utils.high_level_command import HighLevelCommandLoader +from os import path + +import numpy as np + +import torch +import time +import math +import carla + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'CARLA/' + +class Brain: + + def __init__(self, sensors, actuators, model=None, handler=None, config=None): + self.motors = actuators.get_motor('motors_0') + self.camera_rgb = sensors.get_camera('camera_0') # rgb front view camera + self.camera_seg = sensors.get_camera('camera_2') # segmentation camera + self.handler = handler + self.inference_times = [] + self.gpu_inference = config['GPU'] + self.device = torch.device('cuda' if (torch.cuda.is_available() and self.gpu_inference) else 'cpu') + self.red_light_counter = 0 + self.running_light = False + + client = carla.Client('localhost', 2000) + client.set_timeout(100.0) + world = client.get_world() + self.map = world.get_map() + + weather = carla.WeatherParameters.ClearNoon + world.set_weather(weather) + + self.vehicle = None + while self.vehicle is None: + for vehicle in world.get_actors().filter('vehicle.*'): + if vehicle.attributes.get('role_name') == 'ego_vehicle': + self.vehicle = vehicle + break + if self.vehicle is None: + print("Waiting for vehicle with role_name 'ego_vehicle'") + time.sleep(1) # sleep for 1 second before checking again + + if model: + if not path.exists(PRETRAINED_MODELS + model): + print("File " + model + " cannot be found in " + PRETRAINED_MODELS) + + if config['UseOptimized']: + self.net = torch.jit.load(PRETRAINED_MODELS + model).to(self.device) + else: + #self.net = PilotNetOneHot((288, 200, 6), 3, 4, 4).to(self.device) + #self.net = PilotNetOneHot((288, 200, 3), 3, 4, 4).to(self.device) + self.net = PilotNetOneHot((288, 200, 3), 2, 4, 4).to(self.device) # combined control + self.net.load_state_dict(torch.load(PRETRAINED_MODELS + model,map_location=self.device)) + self.net.eval() + + if 'Route' in config: + route = config['Route'] + print('route: ', route) + else: + route = None + + self.hlc_loader = HighLevelCommandLoader(self.vehicle, self.map, route=route) + self.prev_hlc = 0 + self.prev_yaw = None + self.delta_yaw = 0 + + self.target_point = None + self.termination_code = 0 # 0: not terminated; 1: arrived at target; 2: wrong turn + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + def execute(self): + """Main loop of the brain. This will be called iteratively each TIME_CYCLE (see pilot.py)""" + + rgb_image = self.camera_rgb.getImage().data + seg_image = self.camera_seg.getImage().data + + self.update_frame('frame_0', rgb_image) + self.update_frame('frame_1', seg_image) + + start_time = time.time() + try: + # calculate speed + velocity = self.vehicle.get_velocity() + speed_m_s = np.sqrt(velocity.x**2 + velocity.y**2 + velocity.z**2) + speed = 3.6 * speed_m_s #m/s to km/h + + # read next high-level command or choose a random direction + hlc = self.hlc_loader.get_next_hlc() + if hlc != 0: + if self.prev_hlc == 0: + self.prev_yaw = self.vehicle.get_transform().rotation.yaw + else: + cur_yaw = self.vehicle.get_transform().rotation.yaw + self.delta_yaw += calculate_delta_yaw(self.prev_yaw, cur_yaw) + self.prev_yaw = cur_yaw + + # detect whether the vehicle made the correct turn + turning_infraction = False + if self.prev_hlc != 0 and hlc == 0: + print(f'turned {self.delta_yaw} degrees') + if 45 < np.abs(self.delta_yaw) < 180: + if self.delta_yaw < 0 and self.prev_hlc != 1: + turning_infraction = True + elif self.delta_yaw > 0 and self.prev_hlc != 2: + turning_infraction = True + elif self.prev_hlc != 3: + turning_infraction = True + if turning_infraction: + print('Wrong Turn!!!') + self.termination_code = 2 + self.delta_yaw = 0 + + self.prev_hlc = hlc + + # get traffic light status + light_status = -1 + vehicle_location = self.vehicle.get_transform().location + if self.vehicle.is_at_traffic_light(): + traffic_light = self.vehicle.get_traffic_light() + light_status = traffic_light.get_state() + traffic_light_location = traffic_light.get_transform().location + distance_to_traffic_light = np.sqrt((vehicle_location.x - traffic_light_location.x)**2 + (vehicle_location.y - traffic_light_location.y)**2) + if light_status == carla.libcarla.TrafficLightState.Red and distance_to_traffic_light < 6 and speed_m_s > 5: + if not self.running_light: + self.running_light = True + self.red_light_counter += 1 + else: + self.running_light = False + hlc = 0 + print(f'high-level command: {hlc}') + print(f'light: {light_status}') + frame_data = { + 'hlc': hlc, + 'measurements': speed, + 'rgb': np.copy(rgb_image), + 'segmentation': np.copy(seg_image), + 'light': np.array([traffic_light_to_int(light_status)]) + } + + + throttle, steer, brake = model_control(self.net, + frame_data, + ignore_traffic_light=False, + device=self.device, + combined_control=True) + #combined_control=False) + + + self.inference_times.append(time.time() - start_time) + + print(throttle, steer, brake) + + self.motors.sendThrottle(throttle) + #self.motors.sendThrottle(0.25) + self.motors.sendSteer(steer) + self.motors.sendBrake(brake) + #sself.motors.sendBrake(0.0) + + # calculate distance to target point + # print(f'vehicle location: ({vehicle_location.x}, {-vehicle_location.y})') + # print(f'target point: ({self.target_point[0]}, {self.target_point[1]})') + if self.target_point != None: + distance_to_target = np.sqrt((self.target_point[0] - vehicle_location.x)**2 + (self.target_point[1] - (-vehicle_location.y))**2) + print(f'Euclidean distance to target: {distance_to_target}') + if distance_to_target < 1.5: + self.termination_code = 1 + + + except Exception as err: + print(err) \ No newline at end of file diff --git a/behavior_metrics/brains/CARLA/brain_carla_segmentation_based_imitation_learning.py b/behavior_metrics/brains/CARLA/brain_carla_segmentation_based_imitation_learning.py index d45c06d5..dcd60bee 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_segmentation_based_imitation_learning.py +++ b/behavior_metrics/brains/CARLA/brain_carla_segmentation_based_imitation_learning.py @@ -52,6 +52,8 @@ def __init__(self, sensors, actuators, model=None, handler=None, config=None): self.net = torch.jit.load(PRETRAINED_MODELS + model).to(self.device) else: self.net = PilotNetOneHot((288, 200, 6), 3, 4, 4).to(self.device) + #self.net = PilotNetOneHot((288, 200, 3), 3, 4, 4).to(self.device) + #self.net = PilotNetOneHot((288, 200, 3), 3, 4, 4).to(self.device) self.net.load_state_dict(torch.load(PRETRAINED_MODELS + model,map_location=self.device)) self.net.eval() diff --git a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v_generate_dataset.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v_generate_dataset.py new file mode 100644 index 00000000..e03a568f --- /dev/null +++ b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v_generate_dataset.py @@ -0,0 +1,234 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import csv +import cv2 +import math +import numpy as np +import threading +import time +import carla +from os import path +from albumentations import ( + Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare +) +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from utils.logger import logger +from traceback import print_exc + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'CARLA/' + +from tensorflow.python.framework.errors_impl import NotFoundError +from tensorflow.python.framework.errors_impl import UnimplementedError +import tensorflow as tf +import pandas as pd + +import os +os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + +#gpus = tf.config.experimental.list_physical_devices('GPU') +#for gpu in gpus: +# tf.config.experimental.set_memory_growth(gpu, True) + + +class Brain: + + def __init__(self, sensors, actuators, handler, model, config=None): + self.camera_0 = sensors.get_camera('camera_0') + self.camera_1 = sensors.get_camera('camera_1') + self.camera_2 = sensors.get_camera('camera_2') + self.camera_3 = sensors.get_camera('camera_3') + + self.cameras_first_images = [] + + self.pose = sensors.get_pose3d('pose3d_0') + + self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0') + + self.motors = actuators.get_motor('motors_0') + self.handler = handler + self.config = config + self.inference_times = [] + self.gpu_inference = True if tf.test.gpu_device_name() else False + + self.threshold_image = np.zeros((640, 360, 3), np.uint8) + self.color_image = np.zeros((640, 360, 3), np.uint8) + + client = carla.Client('localhost', 2000) + client.set_timeout(10.0) # seconds + world = client.get_world() + + time.sleep(5) + self.vehicle = world.get_actors().filter('vehicle.*')[0] + + if model: + if not path.exists(PRETRAINED_MODELS + model): + logger.info("File " + model + " cannot be found in " + PRETRAINED_MODELS) + logger.info("** Load TF model **") + self.net = tf.keras.models.load_model(PRETRAINED_MODELS + model) + logger.info("** Loaded TF model **") + else: + logger.info("** Brain not loaded **") + logger.info("- Models path: " + PRETRAINED_MODELS) + logger.info("- Model: " + str(model)) + + self.previous_speed = 0 + self.previous_bird_eye_view_image = 0 + self.bird_eye_view_images = 0 + self.bird_eye_view_unique_images = 0 + + self.folder_name = '../../carla_dataset_ATLAS/18_03_anticlockwise_town_02/' + + # open the file in the write mode + self.f = open(self.folder_name + 'dataset.csv', 'a') + # create the csv writer + self.writer = csv.writer(self.f) + + try: + df = pd.read_csv(self.folder_name + 'dataset.csv') + self.batch_number = int(df.iloc[-1]['batch']) + 1 + except Exception as ex: + #fkjfbdjkhfsd + self.batch_number = 0 + header = ['batch', 'image_id', 'timestamp', 'throttle', 'steer', 'brake', 'location_x', 'location_y', 'previous_velocity', 'current_velocity'] + self.writer.writerow(header) + + print(self.batch_number) + self.frame_number = 0 + + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + def update_pose(self, pose_data): + self.handler.update_pose3d(pose_data) + + def execute(self): + image = self.camera_0.getImage().data + image_1 = self.camera_1.getImage().data + image_2 = self.camera_2.getImage().data + image_3 = self.camera_3.getImage().data + + base_img_copy = image.copy() + + bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle) + bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB) + + if self.cameras_first_images == []: + self.cameras_first_images.append(image) + self.cameras_first_images.append(image_1) + self.cameras_first_images.append(image_2) + self.cameras_first_images.append(image_3) + self.cameras_first_images.append(bird_eye_view_1) + + self.cameras_last_images = [ + image, + image_1, + image_2, + image_3, + bird_eye_view_1 + ] + + self.update_frame('frame_1', image_1) + self.update_frame('frame_2', image_2) + self.update_frame('frame_3', image_3) + + self.update_frame('frame_0', bird_eye_view_1) + + self.update_pose(self.pose.getPose3d()) + + image_shape=(66, 200) + img_base = cv2.resize(bird_eye_view_1, image_shape) + + AUGMENTATIONS_TEST = Compose([ + Normalize() + ]) + image = AUGMENTATIONS_TEST(image=img_base) + img = image["image"] + + self.bird_eye_view_images += 1 + if (self.previous_bird_eye_view_image==img).all() == False: + self.bird_eye_view_unique_images += 1 + self.previous_bird_eye_view_image = img + + velocity_dim = np.full((200, 66), self.previous_speed/30) + new_img_vel = np.dstack((img, velocity_dim)) + img = new_img_vel + + img = np.expand_dims(img, axis=0) + start_time = time.time() + try: + prediction = self.net.predict(img, verbose=0) + self.inference_times.append(time.time() - start_time) + throttle = prediction[0][0] + steer = prediction[0][1] * (1 - (-1)) + (-1) + break_command = prediction[0][2] + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + + ''' + Save dataset + ''' + from PIL import Image + pil_image = Image.fromarray(base_img_copy) + pil_image.save(self.folder_name + str(self.batch_number) + '_' + str(self.frame_number) + '.png') + #cv2.imwrite('../../carla_dataset_ATLAS/18_03_anticlockwise_town_02/' + str(self.batch_number) + '_' + str(self.frame_number) + '.png', img) + vehicle_location = self.vehicle.get_location() + row = [self.batch_number, str(self.batch_number) + '_' + str(self.frame_number) + '.png', time.time(), throttle, steer, break_command, vehicle_location.x, vehicle_location.y, self.previous_speed, vehicle_speed] + + print(str(self.batch_number) + '_' + str(self.frame_number) + '.png') + print(row) + + self.writer.writerow(row) + self.frame_number += 1 + self.previous_speed = vehicle_speed + + if vehicle_speed < 5: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.motors.sendThrottle(throttle) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + + + + except NotFoundError as ex: + logger.info('Error inside brain: NotFoundError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except UnimplementedError as ex: + logger.info('Error inside brain: UnimplementedError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except Exception as ex: + logger.info('Error inside brain: Exception!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + + + + + diff --git a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_lerning_atlas_uc3m.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_lerning_atlas_uc3m.py new file mode 100644 index 00000000..7cacca82 --- /dev/null +++ b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_lerning_atlas_uc3m.py @@ -0,0 +1,203 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import csv +import cv2 +import math +import numpy as np +import threading +import time +import carla +from os import path +from albumentations import ( + Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare, GridDropout +) +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from utils.logger import logger +from traceback import print_exc + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'CARLA/' + +from tensorflow.python.framework.errors_impl import NotFoundError +from tensorflow.python.framework.errors_impl import UnimplementedError +import tensorflow as tf + + +#import os +#os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + +gpus = tf.config.experimental.list_physical_devices('GPU') +for gpu in gpus: + tf.config.experimental.set_memory_growth(gpu, True) + +class Brain: + + def __init__(self, sensors, actuators, handler, model, config=None): + self.camera_0 = sensors.get_camera('camera_0') + self.camera_1 = sensors.get_camera('camera_1') + self.camera_2 = sensors.get_camera('camera_2') + self.camera_3 = sensors.get_camera('camera_3') + + self.cameras_first_images = [] + + self.pose = sensors.get_pose3d('pose3d_0') + + self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0') + + self.motors = actuators.get_motor('motors_0') + self.handler = handler + self.config = config + self.inference_times = [] + self.gpu_inference = True if tf.test.gpu_device_name() else False + + self.threshold_image = np.zeros((640, 360, 3), np.uint8) + self.color_image = np.zeros((640, 360, 3), np.uint8) + + client = carla.Client('localhost', 2000) + client.set_timeout(30.0) # seconds + world = client.get_world() + + time.sleep(5) + for vehicle in world.get_actors().filter('vehicle.*'): + if vehicle.attributes.get('role_name') == 'ego_vehicle': + self.vehicle = vehicle + break + + if model: + if not path.exists(PRETRAINED_MODELS + model): + logger.info("File " + model + " cannot be found in " + PRETRAINED_MODELS) + logger.info("** Load TF model **") + self.net = tf.keras.models.load_model(PRETRAINED_MODELS + model, compile=False) + logger.info("** Loaded TF model **") + else: + logger.info("** Brain not loaded **") + logger.info("- Models path: " + PRETRAINED_MODELS) + logger.info("- Model: " + str(model)) + + self.previous_bird_eye_view_image = 0 + self.bird_eye_view_images = 0 + self.bird_eye_view_unique_images = 0 + + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + def update_pose(self, pose_data): + self.handler.update_pose3d(pose_data) + + def execute(self): + image = self.camera_0.getImage().data + image_1 = self.camera_1.getImage().data + image_2 = self.camera_2.getImage().data + image_3 = self.camera_3.getImage().data + + bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle) + bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB) + + if self.cameras_first_images == []: + self.cameras_first_images.append(image) + self.cameras_first_images.append(image_1) + self.cameras_first_images.append(image_2) + self.cameras_first_images.append(image_3) + self.cameras_first_images.append(bird_eye_view_1) + + self.cameras_last_images = [ + image, + image_1, + image_2, + image_3, + bird_eye_view_1 + ] + + self.update_frame('frame_0', image) + self.update_frame('frame_1', image_1) + self.update_frame('frame_2', image_2) + self.update_frame('frame_3', image_3) + + ''' + nombre_archivo = "imagen_guardada.png" + imagen_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + cv2.imwrite(nombre_archivo, imagen_rgb) + nombre_archivo = "image_1_guardada.png" + imagen_rgb = cv2.cvtColor(image_1, cv2.COLOR_BGR2RGB) + cv2.imwrite(nombre_archivo, imagen_rgb) + nombre_archivo = "image_2_guardada.png" + imagen_rgb = cv2.cvtColor(image_2, cv2.COLOR_BGR2RGB) + cv2.imwrite(nombre_archivo, imagen_rgb) + nombre_archivo = "image_3_guardada.png" + imagen_rgb = cv2.cvtColor(image_3, cv2.COLOR_BGR2RGB) + cv2.imwrite(nombre_archivo, imagen_rgb) + ''' + #self.update_frame('frame_0', bird_eye_view_1) + + self.update_pose(self.pose.getPose3d()) + + image_shape=(66, 200) + #image_shape=(50, 150) + img_base = cv2.resize(bird_eye_view_1, image_shape) + + AUGMENTATIONS_TEST = Compose([ + Normalize() + ]) + image = AUGMENTATIONS_TEST(image=img_base) + img = image["image"] + + self.bird_eye_view_images += 1 + if (self.previous_bird_eye_view_image==img).all() == False: + self.bird_eye_view_unique_images += 1 + self.previous_bird_eye_view_image = img + + img = np.expand_dims(img, axis=0) + start_time = time.time() + try: + prediction = self.net.predict(img, verbose=0) + self.inference_times.append(time.time() - start_time) + throttle = prediction[0][0] + steer = prediction[0][1] * (1 - (-1)) + (-1) + break_command = prediction[0][2] + + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + + if vehicle_speed < 5: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.motors.sendThrottle(throttle) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + except NotFoundError as ex: + logger.info('Error inside brain: NotFoundError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except UnimplementedError as ex: + logger.info('Error inside brain: UnimplementedError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except Exception as ex: + logger.info('Error inside brain: Exception!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + \ No newline at end of file diff --git a/behavior_metrics/configs/CARLA/CARLA_launch_files/carla_new_ada_bridge.launch b/behavior_metrics/configs/CARLA/CARLA_launch_files/carla_new_ada_bridge.launch new file mode 100644 index 00000000..5cf5b766 --- /dev/null +++ b/behavior_metrics/configs/CARLA/CARLA_launch_files/carla_new_ada_bridge.launch @@ -0,0 +1,100 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/behavior_metrics/configs/CARLA/CARLA_launch_files/carla_new_ada_bridge_updated.launch b/behavior_metrics/configs/CARLA/CARLA_launch_files/carla_new_ada_bridge_updated.launch new file mode 100644 index 00000000..67c8b5f0 --- /dev/null +++ b/behavior_metrics/configs/CARLA/CARLA_launch_files/carla_new_ada_bridge_updated.launch @@ -0,0 +1,85 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/behavior_metrics/configs/CARLA/CARLA_launch_files/carla_new_ada_bridge_updated_autopilot.launch b/behavior_metrics/configs/CARLA/CARLA_launch_files/carla_new_ada_bridge_updated_autopilot.launch new file mode 100644 index 00000000..15c389d2 --- /dev/null +++ b/behavior_metrics/configs/CARLA/CARLA_launch_files/carla_new_ada_bridge_updated_autopilot.launch @@ -0,0 +1,83 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/behavior_metrics/configs/CARLA/CARLA_launch_files/carla_new_ada_bridge_updated_parque01.launch b/behavior_metrics/configs/CARLA/CARLA_launch_files/carla_new_ada_bridge_updated_parque01.launch new file mode 100644 index 00000000..1125d783 --- /dev/null +++ b/behavior_metrics/configs/CARLA/CARLA_launch_files/carla_new_ada_bridge_updated_parque01.launch @@ -0,0 +1,90 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/behavior_metrics/configs/CARLA/CARLA_launch_files/image_transform.py b/behavior_metrics/configs/CARLA/CARLA_launch_files/image_transform.py new file mode 100644 index 00000000..293f1348 --- /dev/null +++ b/behavior_metrics/configs/CARLA/CARLA_launch_files/image_transform.py @@ -0,0 +1,60 @@ +#!/usr/bin/env python +# license removed for brevity + +import rospy +import numpy as np +import roslib +import tf +from nav_msgs.msg import Odometry +from std_msgs.msg import Float64 +from sensor_msgs.msg import PointCloud2 +from geometry_msgs.msg import PoseWithCovarianceStamped + +from sensor_msgs.msg import Imu + +from sensor_msgs.msg import Image +from sensor_msgs.msg import CameraInfo + +import sys +import cv2 +from std_msgs.msg import String +from cv_bridge import CvBridge, CvBridgeError + +pub = [] + +def image_update(msg): + bridge = CvBridge() + new_img = Image() + new_img_cv = bridge.imgmsg_to_cv2(msg,"bgr8") + new_img = bridge.cv2_to_imgmsg(new_img_cv,"bgr8") + new_img.header = msg.header + new_img.header.frame_id = "pylon_camera" + pub.publish(new_img) + +def image_info_update(msg): + new_img_info = CameraInfo() + new_img_info = msg + new_img_info.header.frame_id = "pylon_camera" + new_img_info.distortion_model = "plumb_bob" + + # copy the camera info of atlas here + new_img_info.D = [-0.288902, 0.085422, 0.0013, -6.3e-05, 0.0] + new_img_info.K = [664.9933754342509, 0.0, 1024.0, 0.0, 664.9933754342509, 768.0, 0.0, 0.0, 1.0] + new_img_info.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0] + new_img_info.P = [664.9933754342509, 0.0, 1024.0, 0.0, 0.0, 664.9933754342509, 768.0, 0.0, 0.0, 0.0, 1.0, 0.0] + + pub_2.publish(new_img_info) + +if __name__ == '__main__': + rospy.init_node('camera_rename') + # pub = rospy.Publisher('/ada/Lidar32/point_cloud', PointCloud2, queue_size=1) + pub = rospy.Publisher('/ada/rgb_front/image_color', Image, queue_size=1) + pub_2 = rospy.Publisher('/ada/rgb_front/camera_info',CameraInfo, queue_size=1) + + #img_sub = rospy.Subscriber('/carla/ego_vehicle/camera/rgb/rgb_front/image_color', Image, image_update) + img_sub = rospy.Subscriber('/carla/ego_vehicle/rgb_front/image', Image, image_update) + #img_inf_sub = rospy.Subscriber('/carla/ego_vehicle/camera/rgb/rgb_front/camera_info', CameraInfo, image_info_update) + img_inf_sub = rospy.Subscriber('/carla/ego_vehicle/rgb_front/camera_info', CameraInfo, image_info_update) + + + rospy.spin() diff --git a/behavior_metrics/configs/CARLA/CARLA_launch_files/town_02_anticlockwise_meiqi.launch b/behavior_metrics/configs/CARLA/CARLA_launch_files/town_02_anticlockwise_meiqi.launch new file mode 100644 index 00000000..cc7405d8 --- /dev/null +++ b/behavior_metrics/configs/CARLA/CARLA_launch_files/town_02_anticlockwise_meiqi.launch @@ -0,0 +1,78 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/behavior_metrics/configs/CARLA/default_carla.yml b/behavior_metrics/configs/CARLA/default_carla.yml index ddc3427c..c4466576 100644 --- a/behavior_metrics/configs/CARLA/default_carla.yml +++ b/behavior_metrics/configs/CARLA/default_carla.yml @@ -33,9 +33,9 @@ Behaviors: Topic: '/carla/ego_vehicle/vehicle_control_cmd' MaxV: 3 MaxW: 0.3 - BrainPath: 'brains/CARLA/brain_carla_slow_and_turn.py' + BrainPath: 'brains/CARLA/brain_carla_autopilot.py' PilotTimeCycle: 50 # Turn up to reduce number of control decisions - AsyncMode: True # Set to False to control simulator time + AsyncMode: False # Set to False to control simulator time Parameters: Model: '' ImageCropped: True @@ -47,7 +47,7 @@ Behaviors: ImageTranform: '' Type: 'CARLA' Simulation: - World: configs/CARLA/CARLA_launch_files/town_02_anticlockwise.launch + World: configs/CARLA/CARLA_launch_files/carla_new_ada_bridge_updated_autopilot.launch RandomSpawnPoint: False Dataset: In: '/tmp/my_bag.bag' diff --git a/behavior_metrics/configs/CARLA/default_carla_autopilot.yml b/behavior_metrics/configs/CARLA/default_carla_autopilot.yml index 2eeeb996..374e8036 100644 --- a/behavior_metrics/configs/CARLA/default_carla_autopilot.yml +++ b/behavior_metrics/configs/CARLA/default_carla_autopilot.yml @@ -36,6 +36,7 @@ Behaviors: BrainPath: 'brains/CARLA/brain_carla_autopilot.py' PilotTimeCycle: 300 + AsyncMode: False # Set to False to control simulator time Parameters: ImageCropped: True ImageSize: [ 100,50 ] @@ -47,7 +48,8 @@ Behaviors: Type: 'CARLA' RandomSpawnPoint: False Simulation: - World: configs/CARLA/CARLA_launch_files/town_01_anticlockwise.launch + World: configs/CARLA/CARLA_launch_files/carla_new_ada_bridge_updated_autopilot.launch + RandomSpawnPoint: False Dataset: In: '/tmp/my_bag.bag' Out: '' diff --git a/behavior_metrics/configs/CARLA/default_carla_tensorflow.yml b/behavior_metrics/configs/CARLA/default_carla_tensorflow.yml index 46fae066..bcd84b52 100644 --- a/behavior_metrics/configs/CARLA/default_carla_tensorflow.yml +++ b/behavior_metrics/configs/CARLA/default_carla_tensorflow.yml @@ -33,11 +33,11 @@ Behaviors: Topic: '/carla/ego_vehicle/vehicle_control_cmd' MaxV: 3 MaxW: 0.3 - BrainPath: 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning.py' + BrainPath: 'brains/CARLA/brain_carla_subjective_vision_deep_learning_previous_v.py' PilotTimeCycle: 50 # Turn up to reduce number of control decisions AsyncMode: True # Set to False to control simulator time Parameters: - Model: 'pilotnet.h5' + Model: 'subjective_vision_pilotnet/pilotnet_starstar_traffic_6.h5' ImageCropped: True ImageSize: [ 200,66 ] ImageNormalized: True diff --git a/behavior_metrics/configs/CARLA/default_carla_tensorflow_updated.yml b/behavior_metrics/configs/CARLA/default_carla_tensorflow_updated.yml new file mode 100644 index 00000000..07944ed0 --- /dev/null +++ b/behavior_metrics/configs/CARLA/default_carla_tensorflow_updated.yml @@ -0,0 +1,80 @@ +Behaviors: + Robot: + Sensors: + Cameras: + Camera_0: + Name: 'camera_0' + Topic: '/carla/ego_vehicle/rgb_front/image' + Camera_1: + Name: 'camera_1' + Topic: '/carla/ego_vehicle/rgb_view/image' + Camera_2: + Name: 'camera_2' + Topic: '/ada/rgb_front/image_color' + Camera_3: + Name: 'camera_3' + Topic: '/ada/rgb_front/image_rect_color' + #Camera_2: + # Name: 'camera_2' + # Topic: '/carla/ego_vehicle/semantic_segmentation_front/image' + #Camera_3: + # Name: 'camera_3' + # Topic: '/carla/ego_vehicle/dvs_front/image' + Pose3D: + Pose3D_0: + Name: 'pose3d_0' + Topic: '/carla/ego_vehicle/odometry' + BirdEyeView: + BirdEyeView_0: + Name: 'bird_eye_view_0' + Topic: '' + Speedometer: + Speedometer_0: + Name: 'speedometer_0' + Topic: '/carla/ego_vehicle/speedometer' + Actuators: + CARLA_Motors: + Motors_0: + Name: 'motors_0' + Topic: '/carla/ego_vehicle/vehicle_control_cmd' + MaxV: 3 + MaxW: 0.3 + BrainPath: 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v_generate_dataset.py' + PilotTimeCycle: 50 # Turn up to reduce number of control decisions + AsyncMode: True # Set to False to control simulator time + Parameters: + Model: 'pilotnet_no_visual_kinematic.h5' + ImageCropped: True + ImageSize: [ 200,66 ] + ImageNormalized: True + PredictionsNormalized: True + GPU: True + UseOptimized: False + ImageTranform: '' + Type: 'CARLA' + Simulation: + World: configs/CARLA/CARLA_launch_files/carla_new_ada_bridge_updated.launch + RandomSpawnPoint: False + Dataset: + In: '/tmp/my_bag.bag' + Out: '' + Stats: + Out: './' + PerfectLap: './perfect_bags/lap-simple-circuit.bag' + Layout: + Frame_0: + Name: frame_0 + Geometry: [1, 1, 1, 1] + Data: rgbimage + Frame_1: + Name: frame_1 + Geometry: [0, 1, 1, 1] + Data: rgbimage + Frame_2: + Name: frame_2 + Geometry: [0, 2, 1, 1] + Data: rgbimage + Frame_3: + Name: frame_3 + Geometry: [1, 2, 1, 1] + Data: rgbimage diff --git a/behavior_metrics/robot/interfaces/speedometer.py b/behavior_metrics/robot/interfaces/speedometer.py index 4a75a079..f0723659 100644 --- a/behavior_metrics/robot/interfaces/speedometer.py +++ b/behavior_metrics/robot/interfaces/speedometer.py @@ -22,7 +22,7 @@ def __init__(self): self.timeStamp = 0 # Time stamp [s] def __str__(self): - s = "Speedometer: {\n x: " + str(self.x) + "\n timeStamp: " + str(self.timeStamp) + "\n}" + s = "Speedometer: {\n data: " + str(self.data) + "\n timeStamp: " + str(self.timeStamp) + "\n}" return s diff --git a/behavior_metrics/utils/environment.py b/behavior_metrics/utils/environment.py index 8225c697..e400b708 100644 --- a/behavior_metrics/utils/environment.py +++ b/behavior_metrics/utils/environment.py @@ -73,9 +73,13 @@ def launch_env(launch_file, random_spawn_point=False, carla_simulator=False, con tree = ET.parse(ROOT_PATH + '/' + launch_file) root = tree.getroot() quality = root.find(".//*[@name=\"quality\"]") + if quality == None: - subprocess.Popen([os.environ["CARLA_ROOT"] + "CarlaUE4.sh", "-RenderOffScreen"], stdout=out, stderr=err) + print('>>>>>>>> HIGH QUALITY RENDERING!') + subprocess.Popen([os.environ["CARLA_ROOT"] + "CarlaUE4.sh", "-RenderOffScreen"], stdout=out, stderr=err) + #subprocess.Popen([os.environ["CARLA_ROOT"] + "CarlaUE4.sh", "-RenderOffScreen", "-quality-level=Low"], stdout=out, stderr=err) elif quality.attrib['default'] == 'Low': + print('>>>>>>>> LOW QUALITY RENDERING!') subprocess.Popen([os.environ["CARLA_ROOT"] + "CarlaUE4.sh", "-RenderOffScreen", "-quality-level=Low"], stdout=out, stderr=err) #subprocess.Popen(["/home/jderobot/Documents/Projects/carla_simulator_0_9_13/CarlaUE4.sh", "-RenderOffScreen", "-quality-level=Low"], stdout=out, stderr=err) logger.info("SimulatorEnv: launching simulator server.")