Skip to content

Commit

Permalink
add naivesinglelane, fix evaluator, add goal_distance evaluator
Browse files Browse the repository at this point in the history
  • Loading branch information
Xiangzhong Liu committed Dec 4, 2023
1 parent 79cff9a commit fd9eb6a
Show file tree
Hide file tree
Showing 5 changed files with 203 additions and 13 deletions.
3 changes: 2 additions & 1 deletion bark_ml/environments/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,8 @@ py_library(
deps = ["//bark_ml/observers:observers",
"//bark_ml/evaluators:evaluators",
"//bark_ml/behaviors:behaviors",
"@bark_project//bark/runtime/scenario/scenario_generation:scenario_generation"],
"@bark_project//bark/runtime/scenario/scenario_generation:scenario_generation",
"@bark_project//bark/runtime/commons:commons"],
visibility = ["//visibility:public"],
)

Expand Down
128 changes: 122 additions & 6 deletions bark_ml/environments/blueprints/single_lane/single_lane.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,13 @@
from bark.runtime.viewer.video_renderer import VideoRenderer
from bark.runtime.scenario.scenario_generation.config_with_ease import \
LaneCorridorConfig, ConfigWithEase
from bark.runtime.commons.xodr_parser import XodrParser
from bark.core.world.opendrive import *
from bark.core.geometry import Line2d, GetPointAtS, GetTangentAngleAtS
from bark.core.geometry import Line2d, GetPointAtS, GetTangentAngleAtS, Polygon2d, Point2d
from bark.core.models.behavior import BehaviorDynamicModel, BehaviorMacroActionsFromParamServer
from bark.core.world.map import MapInterface
from bark.core.world.opendrive import XodrDrivingDirection
from bark.core.world.goal_definition import GoalDefinitionStateLimitsFrenet
from bark.core.world.goal_definition import GoalDefinitionStateLimitsFrenet, GoalDefinitionPolygon
from bark.core.models.dynamic import SingleTrackSteeringRateModel, SingleTrackModel
from bark.core.models.observer import ObserverModelParametric

Expand Down Expand Up @@ -153,7 +154,103 @@ def reset(self):
def velocity(self):
return 0.0

class NaiveGoalSingleLaneLaneCorridorConfig(LaneCorridorConfig):

"""
Configures the a single lane, e.g., with a simple goal.
"""
def __init__(self,
params=None,
**kwargs):
super(NaiveGoalSingleLaneLaneCorridorConfig, self).__init__(params,
**kwargs)
def goal(self, world):
lane_corr = self._road_corridor.lane_corridors[0]
goal_polygon = Polygon2d([0, 0, 0], [Point2d(-1, -1), Point2d(-1, 1), Point2d(1, 1), Point2d(1, -1)])
goal_polygon = goal_polygon.Translate(Point2d(lane_corr.center_line.ToArray()[-1, 0], lane_corr.center_line.ToArray()[-1, 1]))
return GoalDefinitionPolygon(goal_polygon)


class NaiveSingleLaneBlueprint(Blueprint):
"""The NaiveSingleLaneBlueprint blueprint sets up a single lane scenario with initial
conditions.
One lane SingleLane, with the ego vehicle following leading vehicle.
"""

def __init__(self,
params=None,
num_scenarios=250,
random_seed=0,
ml_behavior=None,
viewer=True,
dt=0.2,
xodr_path=None,
goalConfigs=None,
mode="medium"):
ds_min = 35. # [m]
ds_max = 50.
if mode == "dense":
ds_min = 20. # [m]
ds_max = 35.
if mode == "medium":
ds_min = 30. # [m]
ds_max = 40.
params["World"]["remove_agents_out_of_map"] = False
lane_configs = []
s_min = 20.
s_max = 80.
local_params = params.clone()
local_params["BehaviorIDMClassic"]["DesiredVelocity"] = np.random.uniform(12, 17)
lane_conf = NaiveGoalSingleLaneLaneCorridorConfig(params=local_params,
road_ids=[16],
lane_corridor_id=0,
min_vel=10,
max_vel=17,
ds_min=ds_min,
ds_max=ds_max,
s_min=s_min,
s_max=s_max,
controlled_ids=True)
lane_configs.append(lane_conf)

# Map Definition
if xodr_path is None:
xodr_path = os.path.join(
os.path.dirname(__file__),
"../../../environments/blueprints/single_lane/single_lane.xodr")
print(f"xodr map file path is: {xodr_path}.")
map_interface = MapInterface()
xodr_parser = XodrParser(xodr_path)
map_interface.SetOpenDriveMap(xodr_parser.map)

scenario_generation = \
ConfigWithEase(
num_scenarios=num_scenarios,
random_seed=random_seed,
params=params,
map_interface=map_interface,
lane_corridor_configs=lane_configs)

if viewer:
viewer = MPViewer(params=params,
x_range=[-150, 150],
y_range=[-150, 150],
follow_agent_id=True)
if params["Experiment"]["ExportVideos"]:
viewer = VideoRenderer(renderer=viewer, world_step_time=dt)

evaluator = GeneralEvaluator(params)
observer = NearestAgentsObserver(params)

super().__init__(
scenario_generation=scenario_generation,
viewer=viewer,
dt=dt,
evaluator=evaluator,
observer=observer,
ml_behavior=ml_behavior)


class SingleLaneBlueprint(Blueprint):
"""The SingleLane blueprint sets up a merging scenario with initial
conditions.
Expand Down Expand Up @@ -255,14 +352,13 @@ def __init__(self,
observer_model=observer_model)
if viewer:
viewer = MPViewer(params=params,
x_range=[-100, 100],
y_range=[-100, 100],
x_range=[-150, 150],
y_range=[-150, 150],
follow_agent_id=True)
if params["Experiment"]["ExportVideos"]:
viewer = VideoRenderer(renderer=viewer,world_step_time=dt)
viewer = VideoRenderer(renderer=viewer, world_step_time=dt)
evaluator = GeneralEvaluator(params)
observer = NearestAgentsObserver(params)
ml_behavior = ml_behavior

super().__init__(
scenario_generation=scenario_generation,
Expand Down Expand Up @@ -316,4 +412,24 @@ def __init__(self,
dt=dt,
laneCorrConfigs=laneCorrConfigs,
csv_path=csv_path,
goalConfigs=goalConfigs)

class ContinuousNaiveSingleLaneBlueprint(NaiveSingleLaneBlueprint):
def __init__(self,
params=None,
num_scenarios=25,
random_seed=0,
viewer=True,
dt=0.2,
xodr_path=None,
goalConfigs={}):
ml_behavior = BehaviorContinuousML(params)
NaiveSingleLaneBlueprint.__init__(self,
params=params,
num_scenarios=num_scenarios,
random_seed=random_seed,
ml_behavior=ml_behavior,
viewer=viewer,
dt=dt,
xodr_path=xodr_path,
goalConfigs=goalConfigs)
4 changes: 2 additions & 2 deletions bark_ml/environments/blueprints/single_lane/single_lane.xodr
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@
<OpenDRIVE>
<header revMajor="1" revMinor="4" name="" version="1.00" date="Thu Jun 6 10:01:32 2019" north="0.0000000000000000e+00" south="0.0000000000000000e+00" east="0.0000000000000000e+00" west="0.0000000000000000e+00">
</header>
<road name="" length="100" id="16" junction="-1">
<road name="" length="200" id="16" junction="-1">
<type s="0.0000000000000000e+00" type="town"/>
<planView>
<geometry s="0.0000000000000000e+00" x="0" y="0" hdg="0.0" length="100">
<geometry s="0.0000000000000000e+00" x="0" y="0" hdg="0.0" length="200">
<line/>
</geometry>
</planView>
Expand Down
33 changes: 29 additions & 4 deletions bark_ml/evaluators/evaluator_configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,19 @@ def __init__(self, params):
"pot_vel_functor": PotentialVelocityFunctor(self._params)
})

class RewardShapingEvaluatorMaxSteps(GeneralEvaluator):
def __init__(self, params):
self._params = params["ML"]["RewardShapingEvaluatorMaxSteps"]
super().__init__(
params=self._params,
bark_ml_eval_fns={
"collision_functor" : CollisionFunctor(self._params),
"drivable_area_functor" : DrivableAreaFunctor(self._params),
"pot_center_functor": PotentialCenterlineFunctor(self._params),
"max_step_count_as_goal_functor": MaxStepCountAsGoalFunctor(self._params),
"pot_vel_functor": PotentialVelocityFunctor(self._params)
})

class SmoothnessSingleLaneEvaluator(GeneralEvaluator):
def __init__(self, params):
self._params = params["ML"]["SmoothnessSingleLaneEvaluator"]
Expand Down Expand Up @@ -134,7 +147,10 @@ def __init__(self, params):
"PotentialGoalCenterlineFunctor": "pot_goal_center_functor",
"StateActionLoggingFunctor": "state_action_logging_functor",
"CollisionDrivableAreaFunctor" : "collision_drivable_area_functor",
"PotentialGoalReachedVelocityFunctor": "pot_goal_vel_functor"
"PotentialGoalReachedVelocityFunctor": "pot_goal_vel_functor",
"MaxStepCountAsGoalFunctor": "max_step_count_as_goal_functor",
"PotentialGoalPolyFunctor": "pot_goal_poly_functor",
"TrafficRuleLTLFunctor": "traffic_rule_ltl_functor"
}
self._params = params
functor_configs = self._params["ML"]["EvaluatorConfigurator"]["EvaluatorConfigs"]["FunctorConfigs"]
Expand All @@ -153,21 +169,30 @@ def __init__(self, params):
"step_count" : lambda: EvaluatorStepCount(),
"drivable_area" : lambda: EvaluatorDrivableArea()
}

rules_configs = self._params["ML"]["EvaluatorConfigurator"]["RulesConfigs"]

for rule_config in rules_configs["Rules"]:
# parse label function for each rule
labels_list = []
for label_conf in rule_config["RuleConfig"]["labels"]:
label_params_dict = label_conf["params"].ConvertToDict()
labels_list.append(eval("{}(**label_params_dict)".format(label_conf["type"])))
labels_list.append(eval("{}(*(label_params_dict.values()))".format(label_conf["type"])))
print("labels_list:",labels_list)

# instance rule evaluator for each rule
#TODO: check if evaluatorLTL can access private function in python
ltl_formula_ = rule_config["RuleConfig"]["params"]["formula"]
bark_evals[rule_config["RuleName"]] = eval("lambda: {}(agent_id=None, ltl_formula=ltl_formula_,label_functions=labels_list)".format(rule_config["RuleConfig"]["type"]))
print("ltl_formula_:",ltl_formula_)
tmp_ltl_settings = {}
tmp_ltl_settings["agent_id"] = 1
tmp_ltl_settings["ltl_formula"] = ltl_formula_
tmp_ltl_settings["label_functions"] = labels_list

tmp_ltl_eval = eval("{}(**tmp_ltl_settings)".format(rule_config["RuleConfig"]["type"]))
bark_evals[rule_config["RuleName"]] = lambda: tmp_ltl_eval
# add rule functors to bark_ml_eval_fns
functor_n_ = rule_config["RuleName"] + "_functor"
functor_n_ = rule_config["RuleName"] + "_ltl_functor"
eval_fns[functor_n_] = eval("{}(rule_config)".format("TrafficRuleLTLFunctor"))

super().__init__(params=self._params, bark_eval_fns=bark_evals, bark_ml_eval_fns=eval_fns)
Expand Down
48 changes: 48 additions & 0 deletions bark_ml/evaluators/general_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,19 @@ def __call__(self, observed_world, action, eval_results):
"MaxStepCount", "", 220]:
return True, self.WeightedReward(self._params["StepCountReward", "", 0.]), {}
return False, 0, {}

class MaxStepCountAsGoalFunctor(Functor):
def __init__(self, params):

self._params = params["MaxStepCountAsGoalFunctor"]
super().__init__(params=self._params)

def __call__(self, observed_world, action, eval_results):
if eval_results["step_count"] > self._params[
"MaxStepCount", "", 220]:
eval_results["goal_reached"] = True
return True, self.WeightedReward(self._params["StepCountAsGoalReward", "", 1.]), {}
return False, 0, {}


class MinMaxVelFunctor(Functor):
Expand Down Expand Up @@ -200,6 +213,39 @@ def __call__(self, observed_world, action, eval_results):
self._params["DistExponent", "", 0.2])
return False, self.WeightedReward(self._params["Gamma", "", 0.99]*cur_pot - prev_pot), {}
return False, 0, {}

class PotentialGoalPolyFunctor(PotentialBasedFunctor):
def __init__(self, params):
self._params = params["PotentialGoalPolyFunctor"]
super().__init__(params=self._params)


@staticmethod
def DistancePotential(d, d_max, b):
return 1. - (d/d_max)**b

def DistanceToPolygon(self, observed_world, state):
ego_agent = observed_world.ego_agent
goal_poly = ego_agent.goal_definition.goal_shape
dist = Distance(goal_poly,
Point2d(state[int(StateDefinition.X_POSITION)],
state[int(StateDefinition.Y_POSITION)]))
return dist

def __call__(self, observed_world, action, eval_results):
hist = observed_world.ego_agent.history
if len(hist) >= 2:
prev_state, cur_state = self.GetPrevAndCurState(observed_world)
prev_dist = self.DistanceToPolygon(observed_world, prev_state)
cur_dist = self.DistanceToPolygon(observed_world, cur_state)
prev_pot = self.DistancePotential(
prev_dist, self._params["MaxDist", "", 100.],
self._params["DistExponent", "", 0.2])
cur_pot = self.DistancePotential(
cur_dist, self._params["MaxDist", "", 100.],
self._params["DistExponent", "", 0.2])
return False, self.WeightedReward(self._params["Gamma", "", 0.99]*cur_pot - prev_pot), {}
return False, 0, {}

class PotentialVelocityFunctor(PotentialBasedFunctor):
def __init__(self, params):
Expand Down Expand Up @@ -355,12 +401,14 @@ def __init__(self, params):

def __call__(self, observed_world, action, eval_results):
self.traffic_rule_violation_post = eval_results[self._params["RuleName"]]
print("Result from evaluatorLTL:", self.traffic_rule_violation_post)
max_vio_num = self._params["ViolationTolerance","",15]
if self.traffic_rule_violation_post < self.traffic_rule_violation_pre:
self.traffic_rule_violation_pre = self.traffic_rule_violation_post
current_traffic_rule_violations = self.traffic_rule_violation_post - self.traffic_rule_violation_pre
self.traffic_rule_violations = self.traffic_rule_violations + current_traffic_rule_violations
self.traffic_rule_violation_pre = self.traffic_rule_violation_post
print("current traffic rule violations:", self.traffic_rule_violations)
if self.traffic_rule_violations > max_vio_num:
return True, 0, {}
return False, self.WeightedReward(current_traffic_rule_violations/max_vio_num), {}
Expand Down

0 comments on commit fd9eb6a

Please sign in to comment.