Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Overhead experiments #14

Open
wants to merge 13 commits into
base: dev
Choose a base branch
from
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -139,4 +139,5 @@ tmp_cache

# results
testrun*/
results/
results/
experiment_scripts/config-*
3 changes: 2 additions & 1 deletion config/rasc_debug.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,8 @@ optimal_schedule_metric: "min_avg_rtn_latency"
rescheduling_window: 30
routine_priority_policy: "earliest" # from shortest, longest, earliest, latest
record_results: true
routine_arrival_filename: "arrival_morning.csv"
routine_arrival_filename: "arrival_debug.csv"
overhead_measurement: true
# rescheduling_estimation: true
# rescheduling_accuracy: "reschedule_all"
# mthresh: 1
Expand Down
40 changes: 40 additions & 0 deletions experiment_scripts/overhead_experiments.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
# Run overhead experiments
# create 10 copies of the config file
estimations=(mean p50 p70 p80 p90 p95 p99)
triggers=(reactive anticipatory proactive)
dataset="${1:-morning}"
case $dataset in
morning|afternoon|evening|all)
# do nothing
;;
*)
# error
echo 'dataset is not one of morning|afternoon|evening|all'
exit 1
esac

rm -rf experiment_scripts/config-*

echo "Dataset: $dataset"
for i in ${!estimations[@]}
do
for j in ${!triggers[@]}
do
echo "Running overhead experiment (${estimations[$i]}, ${triggers[$j]})"
cp -r ./config ./experiment_scripts/config-${estimations[$i]}-${triggers[$j]}
sed -i "s/action_length_estimation: mean/action_length_estimation: ${estimations[$i]}/g" ./experiment_scripts/config-${estimations[$i]}-${triggers[$j]}/rasc.yaml
sed -i "s/rescheduling_trigger: proactive/rescheduling_trigger: ${triggers[$j]}/g" ./experiment_scripts/config-${estimations[$i]}-${triggers[$j]}/rasc.yaml
sed -i "s/overhead_measurement: false/overhead_measurement: true/g" ./experiment_scripts/config-${estimations[$i]}-${triggers[$j]}/rasc.yaml
hass -c ./experiment_scripts/config-${estimations[$i]}-${triggers[$j]}
rm -rf ./experiment_scripts/config-${estimations[$i]}-${triggers[$j]}
done
done

# Run baseline
echo "Running overhead experiment baseline"
cp -r ./config ./experiment_scripts/config-baseline
sed -i "s/enabled: true/enabled: false/g" ./experiment_scripts/config-baseline/rasc.yaml
sed -i "s/routine_arrival_filename: arrival_morning.csv/routine_arrival_filename: arrival_$dataset.csv/g" ./experiment_scripts/config-baseline/rasc.yaml
sed -i "s/overhead_measurement: false/overhead_measurement: true/g" ./experiment_scripts/config-baseline/rasc.yaml
hass -c ./experiment_scripts/config-baseline
rm -rf ./experiment_scripts/config-baseline
32 changes: 17 additions & 15 deletions homeassistant/components/automation/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import asyncio
from collections.abc import Callable, Mapping
from dataclasses import dataclass
import json
import logging
import os
from typing import Any, Optional, Protocol, cast
Expand Down Expand Up @@ -33,6 +34,7 @@
CONF_ZONE,
DOMAIN_RASCALSCHEDULER,
EVENT_HOMEASSISTANT_STARTED,
OVERHEAD_MEASUREMENT,
SERVICE_RELOAD,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
Expand Down Expand Up @@ -262,17 +264,17 @@ def trigger_automations_later(
return

automations = list(component.entities)
arrival_time = 10.0
arrival_time = 0.0
routine_arrivals = dict[str, list[float]]()
routine_aliases = dict[str, str]()
with open(routine_arrival_pathname, encoding="utf-8") as f:
for line in f:
interarrival_time, routine_id, alias = line.strip().split(",")
interarrival_time, routine_id, routine_alias = line.strip().split(",")
arrival_time = arrival_time + float(interarrival_time)
if routine_id not in routine_arrivals:
routine_arrivals[routine_id] = []
routine_arrivals[routine_id].append(arrival_time)
routine_aliases[routine_id] = alias
routine_aliases[routine_id] = routine_alias

async def trigger_automation_later(
automation: BaseAutomationEntity, arrival_time: float
Expand All @@ -285,7 +287,7 @@ async def trigger_automation_later(
)
await asyncio.sleep(arrival_time)
await automation.async_trigger({"trigger": {"platform": None}})
if not config["rasc"].get("enabled"):
if "rasc" not in hass.data:
# if rasc is not enabled, assume all routines take 30 seconds
await asyncio.sleep(30)
hass.bus.async_fire(
Expand All @@ -308,15 +310,15 @@ async def trigger_automation_later(
def handle_routine_ended(event: Event) -> None:
routine_id = event.data["routine_id"].split("-")[0]
remained_routines[routine_id] -= 1
# print(
# json.dumps(
# {
# routine_aliases[routine_id]: remains
# for routine_id, remains in remained_routines.items()
# },
# indent=2,
# )
# )
print(
json.dumps(
{
routine_aliases[routine_id]: remains
for routine_id, remains in remained_routines.items()
},
indent=2,
)
)
if all(
remained_routine == 0 for remained_routine in remained_routines.values()
):
Expand Down Expand Up @@ -410,7 +412,7 @@ async def reload_service_handler(service_call: ServiceCall) -> None:

websocket_api.async_register_command(hass, websocket_config)

if config["rasc"]["overhead_measurement"]:
if config["rasc"].get(OVERHEAD_MEASUREMENT):

def run_experiments(_: Event) -> None:
routine_arrival_filename: str = config["rasc"][
Expand Down Expand Up @@ -725,7 +727,7 @@ async def async_trigger(
reason = f' by {run_variables["trigger"]["description"]}'
if "alias" in run_variables["trigger"]:
alias = f' trigger \'{run_variables["trigger"]["alias"]}\''
self._logger.debug("Automation%s triggered%s", alias, reason)
self._logger.debug("Automation %s triggered%s", alias, reason)

# Create a new context referring to the old context.
parent_id = None if context is None else context.id
Expand Down
102 changes: 99 additions & 3 deletions homeassistant/components/rasc/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,12 @@
import os
import shutil

import numpy as np
import voluptuous as vol

from homeassistant.components.climate import SERVICE_SET_TEMPERATURE
from homeassistant.const import (
ACTION_LENGTH_ESTIMATION,
ANTICIPATORY,
CONF_OPTIMAL_SCHEDULE_METRIC,
CONF_RECORD_RESULTS,
Expand All @@ -18,10 +21,12 @@
CONF_ROUTINE_ARRIVAL_FILENAME,
CONF_ROUTINE_PRIORITY_POLICY,
CONF_SCHEDULING_POLICY,
DO_COMPARISON,
DOMAIN_RASCALRESCHEDULER,
DOMAIN_RASCALSCHEDULER,
EARLIEST,
EARLY_START,
EVENT_HOMEASSISTANT_STARTED,
FCFS,
FCFS_POST,
GLOBAL_FIRST,
Expand All @@ -35,6 +40,7 @@
LONGEST,
MAX_AVG_PARALLELISM,
MAX_P05_PARALLELISM,
MEAN_ESTIMATION,
MIN_AVG_IDLE_TIME,
MIN_AVG_RTN_LATENCY,
MIN_AVG_RTN_WAIT_TIME,
Expand All @@ -46,6 +52,13 @@
NONE,
OPTIMALW,
OPTIMALWO,
OVERHEAD_MEASUREMENT,
P50_ESTIMATION,
P70_ESTIMATION,
P80_ESTIMATION,
P90_ESTIMATION,
P95_ESTIMATION,
P99_ESTIMATION,
PROACTIVE,
REACTIVE,
RESCHEDULE_ALL,
Expand All @@ -68,9 +81,20 @@
CONF_RESULTS_DIR,
DOMAIN,
LOGGER,
RASC_ACTION,
RASC_ENTITY_ID,
RASC_EXPERIMENT_SETTING,
RASC_FIXED_HISTORY,
RASC_INTERRUPTION_MOMENT,
RASC_INTERRUPTION_TIME,
RASC_SLO,
RASC_THERMOSTAT,
RASC_THERMOSTAT_START,
RASC_THERMOSTAT_TARGET,
RASC_USE_UNIFORM,
RASC_WORST_Q,
SUPPORTED_PLATFORMS,
CONF_ENABLED,
)
from .helpers import OverheadMeasurement
from .rescheduler import RascalRescheduler
Expand Down Expand Up @@ -107,13 +131,16 @@
]
supported_routine_priority_policies = [SHORTEST, LONGEST, EARLIEST, LATEST]
supported_rescheduling_accuracies = [RESCHEDULE_ALL, RESCHEDULE_SOME]
supported_action_length_estimations = [MEAN_ESTIMATION, P50_ESTIMATION, P70_ESTIMATION, P80_ESTIMATION, P90_ESTIMATION, P50_ESTIMATION, P70_ESTIMATION, P80_ESTIMATION, P90_ESTIMATION, P95_ESTIMATION, P99_ESTIMATION]

CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_ENABLED, default=True): bool,
vol.Optional("overhead_measurement", default=False): bool,
vol.Optional(OVERHEAD_MEASUREMENT, default=False): bool,
vol.Optional(ACTION_LENGTH_ESTIMATION, default="mean"): vol.In(supported_action_length_estimations),
vol.Optional(DO_COMPARISON, default=True): bool,
vol.Optional(CONF_SCHEDULING_POLICY, default=TIMELINE): vol.In(
supported_scheduling_policies
),
Expand All @@ -140,6 +167,12 @@
),
vol.Optional("mthresh", default=1.0): cv.positive_float, # seconds
vol.Optional("mithresh", default=2.0): cv.positive_float, # seconds
# vol.Optional(RESCHEDULING_ESTIMATION, default=True): cv.boolean,
# vol.Optional(RESCHEDULING_ACCURACY, default=RESCHEDULE_ALL): vol.In(
# supported_rescheduling_accuracies
# ),
# vol.Optional("mthresh", default=1.0): cv.positive_float, # seconds
# vol.Optional("mithresh", default=2.0): cv.positive_float, # seconds
**{
vol.Optional(platform.value): vol.Schema(
{
Expand All @@ -149,7 +182,22 @@
)
for platform in SUPPORTED_PLATFORMS
},
},
vol.Optional(RASC_USE_UNIFORM): cv.boolean,
vol.Optional(RASC_FIXED_HISTORY): cv.boolean,
vol.Optional(RASC_EXPERIMENT_SETTING): vol.Schema(
{
vol.Required(RASC_ENTITY_ID): cv.string,
vol.Required(RASC_ACTION): cv.string,
vol.Optional(RASC_INTERRUPTION_MOMENT): cv.positive_float,
vol.Optional(RASC_THERMOSTAT): vol.Schema(
{
vol.Required(RASC_THERMOSTAT_START): cv.string,
vol.Required(RASC_THERMOSTAT_TARGET): cv.string,
}
),
}
),
}
)
},
extra=vol.ALLOW_EXTRA,
Expand All @@ -158,6 +206,49 @@
LOGGER.level = logging.DEBUG


def run_experiments(hass: HomeAssistant, rasc: RASCAbstraction):
"""Run experiments."""

async def wrapper(_):
settings = rasc.config[RASC_EXPERIMENT_SETTING]
key = f"{settings[RASC_ENTITY_ID]},{settings[RASC_ACTION]}"
if settings[RASC_ACTION] == SERVICE_SET_TEMPERATURE:
if RASC_THERMOSTAT not in settings:
raise ValueError("Thermostat setting not found")
key += f",{settings[RASC_THERMOSTAT][RASC_THERMOSTAT_START]},{settings[RASC_THERMOSTAT][RASC_THERMOSTAT_TARGET]}"
for i, level in enumerate(range(0, 105, 5)):
LOGGER.debug("RUN: %d, level=%d", i + 1, level)
avg_complete_time = np.mean(rasc.get_history(key))
interruption_time = avg_complete_time * level * 0.01
interruption_moment = settings[RASC_INTERRUPTION_MOMENT]
# a_coro, s_coro, c_coro = hass.services.rasc_call("cover", "open_cover", {"entity_id": "cover.rpi_device_door"})
a_coro, s_coro, c_coro = hass.services.rasc_call(
"climate",
"set_temperature",
{"temperature": 69, "entity_id": "climate.rpi_device_thermostat"},
{
RASC_INTERRUPTION_TIME: interruption_time,
RASC_INTERRUPTION_MOMENT: interruption_moment,
},
)
await a_coro
await s_coro
await c_coro
LOGGER.debug("complete!68->69")
# a_coro, s_coro, c_coro = hass.services.rasc_call("cover", "close_cover", {"entity_id": "cover.rpi_device_door"})
a_coro, s_coro, c_coro = hass.services.rasc_call(
"climate",
"set_temperature",
{"temperature": 68, "entity_id": "climate.rpi_device_thermostat"},
)
await a_coro
await s_coro
await c_coro
LOGGER.debug("complete!69->68")

return wrapper


def _create_result_dir() -> str:
"""Create the result directory."""
if not os.path.exists(CONF_RESULTS_DIR):
Expand All @@ -183,7 +274,7 @@ async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:

# cpu/memory measurement

om = OverheadMeasurement(hass.loop, config[DOMAIN])
om = OverheadMeasurement(hass, config[DOMAIN])

hass.bus.async_listen_once("rasc_measurement_start", lambda _: om.start())
hass.bus.async_listen_once("rasc_measurement_stop", lambda _: om.stop())
Expand All @@ -209,4 +300,9 @@ async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:

await component.async_load()

if RASC_EXPERIMENT_SETTING in config[DOMAIN]:
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STARTED, run_experiments(hass, component)
)

return True
Loading