Skip to content

Commit

Permalink
Merge branch 'ltl_configue' of github.com:bark-simulator/bark-ml into…
Browse files Browse the repository at this point in the history
… ltl_configue
  • Loading branch information
acarcelik committed Jan 16, 2024
2 parents 78d39e9 + ca477c1 commit e2ff59c
Show file tree
Hide file tree
Showing 5 changed files with 31 additions and 20 deletions.
5 changes: 4 additions & 1 deletion .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,9 @@ jobs:
run: virtualenv -p python3 ./bark_ml/python_wrapper/venv --system-site-packages
- name: Entering the virtual python environment..
run: . ./bark_ml/python_wrapper/venv/bin/activate
- name: Installing and upgrading pip packages..
- name: Installing and upgrading pip..
run: pip install --upgrade pip
- name: Installing and upgrading pip packages..
run: pip install --upgrade --trusted-host pypi.org -r ./utils/docker/installers/requirements.txt
- name: Installing Tensorflow packages..
run: pip install tensorflow-addons==0.11.2 graph-nets==1.1.0 ray==1.0.0
Expand All @@ -25,6 +26,8 @@ jobs:
run: pip install torch==1.9.0+cpu torchvision -f https://download.pytorch.org/whl/torch_stable.html
- name: Add torch environment variable
run: export LD_LIBRARY_PATH=./bark_ml/python_wrapper/venv/lib/python3.7/site-packages/torch/lib/
- name: reinstall protobuf3.20.*.
run: pip install protobuf==3.20.*
- name: Running tests
run: bazel test //bark_ml:unit_tests

27 changes: 15 additions & 12 deletions bark_ml/environments/blueprints/highway/highway.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,14 +8,16 @@

import os
import numpy as np
from bark.runtime.viewer.buffered_mp_viewer import BufferedMPViewer
# from bark.runtime.viewer.buffered_mp_viewer import BufferedMPViewer
from bark.runtime.viewer.matplotlib_viewer import MPViewer
from bark.runtime.viewer.video_renderer import VideoRenderer
from bark.runtime.scenario.scenario_generation.config_with_ease import \
LaneCorridorConfig, ConfigWithEase
from bark.core.world.goal_definition import GoalDefinitionPolygon
from bark.core.geometry import Polygon2d, Point2d

from bark_ml.environments.blueprints.blueprint import Blueprint
from bark_ml.evaluators.evaluator_configs import RewardShapingEvaluator
from bark_ml.evaluators.general_evaluator import GeneralEvaluator
from bark_ml.behaviors.cont_behavior import BehaviorContinuousML
from bark_ml.behaviors.discrete_behavior import BehaviorDiscreteMacroActionsML
from bark_ml.core.observers import NearestObserver
Expand Down Expand Up @@ -64,7 +66,7 @@ def __init__(self,

ego_lane_id = 2
lane_configs = []
for i in range(0, 4):
for i in range(1, 4):
is_controlled = True if (ego_lane_id == i) else None
s_min = 0
s_max = 250
Expand Down Expand Up @@ -92,18 +94,19 @@ def __init__(self,
random_seed=random_seed,
params=params,
lane_corridor_configs=lane_configs)

dt = 0.2
if viewer:
# viewer = MPViewer(params=params,
# use_world_bounds=True)
viewer = BufferedMPViewer(
params=params,
x_range=[-55, 55],
y_range=[-55, 55],
follow_agent_id=True)
dt = 0.2
viewer = MPViewer(params=params,
x_range=[-150, 150],
y_range=[-150, 150],
follow_agent_id=True)
if params["Experiment"]["ExportVideos"]:
viewer = VideoRenderer(renderer=viewer, world_step_time=dt)

params["ML"]["RewardShapingEvaluator"]["PotentialVelocityFunctor"][
"DesiredVel", "Desired velocity for the ego agent.", 20]
evaluator = RewardShapingEvaluator(params)
evaluator = GeneralEvaluator(params)
observer = NearestObserver(params)
ml_behavior = ml_behavior

Expand Down
12 changes: 6 additions & 6 deletions bark_ml/environments/blueprints/merging/merging.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@
# https://opensource.org/licenses/MIT

import os
from bark.runtime.viewer.buffered_mp_viewer import BufferedMPViewer
# from bark.runtime.viewer.buffered_mp_viewer import BufferedMPViewer
from bark.runtime.viewer.matplotlib_viewer import MPViewer
from bark.runtime.scenario.scenario_generation.config_with_ease import \
LaneCorridorConfig, ConfigWithEase
from bark.core.world.opendrive import XodrDrivingDirection
Expand Down Expand Up @@ -96,11 +97,10 @@ def __init__(self,
params=params,
lane_corridor_configs=[left_lane, right_lane])
if viewer:
viewer = BufferedMPViewer(
params=params,
x_range=[-25, 25],
y_range=[-25, 25],
follow_agent_id=True)
viewer = MPViewer(params=params,
x_range=[-150, 150],
y_range=[-150, 150],
follow_agent_id=True)
dt = 0.2
params["ML"]["RewardShapingEvaluator"]["PotentialVelocityFunctor"][
"DesiredVel", "Desired velocity for the ego agent.", 20]
Expand Down
5 changes: 4 additions & 1 deletion bark_ml/evaluators/evaluator_configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from bark.core.world.evaluation import \
EvaluatorGoalReached, EvaluatorCollisionEgoAgent, \
EvaluatorStepCount, EvaluatorDrivableArea

from bark.core.geometry import Point2d
from bark_ml.evaluators.general_evaluator import *

class GoalReached(GeneralEvaluator):
Expand Down Expand Up @@ -177,6 +177,9 @@ def __init__(self, params):
labels_list = []
for label_conf in rule_config["RuleConfig"]["labels"]:
label_params_dict = label_conf["params"].ConvertToDict()
if label_conf["type"] == "EgoBeyondPointLabelFunction" or label_conf["type"] == "AgentBeyondPointLabelFunction":
merge_point = label_params_dict["point"]
label_params_dict["point"] = Point2d(merge_point[0],merge_point[1])
labels_list.append(eval("{}(*(label_params_dict.values()))".format(label_conf["type"])))
print("labels_list:",labels_list)

Expand Down
2 changes: 2 additions & 0 deletions bark_ml/evaluators/general_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -238,12 +238,14 @@ def __call__(self, observed_world, action, eval_results):
prev_state, cur_state = self.GetPrevAndCurState(observed_world)
prev_dist = self.DistanceToPolygon(observed_world, prev_state)
cur_dist = self.DistanceToPolygon(observed_world, cur_state)
# print("!!!!!!!!!!!!!current distance to goal is ", cur_dist)
prev_pot = self.DistancePotential(
prev_dist, self._params["MaxDist", "", 100.],
self._params["DistExponent", "", 0.2])
cur_pot = self.DistancePotential(
cur_dist, self._params["MaxDist", "", 100.],
self._params["DistExponent", "", 0.2])
print("!!!!!!!!!!!!!current potential is ", 0.99*cur_pot - prev_pot)
return False, self.WeightedReward(self._params["Gamma", "", 0.99]*cur_pot - prev_pot), {}
return False, 0, {}

Expand Down

0 comments on commit e2ff59c

Please sign in to comment.