diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 55c4809f..3a2c2a9e 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -48,8 +48,8 @@ jobs:
python -m pip install --upgrade pip
pip install -e .[dev]
- - name: Generate databases
- run: python cities/utils/csv_to_db_pipeline.py
+ # - name: Generate databases
+ # run: python cities/utils/csv_to_db_pipeline.py
- name: Test
diff --git a/.gitignore b/.gitignore
index 0cbf1626..df8277e4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -30,6 +30,12 @@ tests/.coverage
.vscode/launch.json
data/sql/counties_database.db
data/sql/msa_database.db
+docs/experimental_notebooks/zoning/interactions_preds.dill
+docs/experimental_notebooks/zoning/population_preds.dill
+docs/experimental_notebooks/zoning/waic_dict_7.pkl
+docs/experimental_notebooks/zoning/waic_dict_13.pkl
+docs/experimental_notebooks/zoning/waic_dict_14.pkl
+
.Rproj.user
**/*.RData
@@ -42,6 +48,8 @@ data/minneapolis/sourced/demographic/**
data/minneapolis/preds/**
data/minneapolis/sourced/parcel_to_census_tract_mappings/**
data/minneapolis/sourced/parcel_to_parking_info_mappings/**
+
+data/minneapolis/.pgpass
cities/deployment/tracts_minneapolis/tracts_model_guide.pkl
cities/deployment/tracts_minneapolis/tracts_model_params.pth
build/cities/deployment/tracts_minneapolis/tracts_model_guide.pkl
diff --git a/Makefile b/Makefile
index 62670a78..3fff98ae 100755
--- a/Makefile
+++ b/Makefile
@@ -1,10 +1,20 @@
format: FORCE
./scripts/clean.sh
+
+path ?= .
+
+format_path: FORCE
+ ./scripts/clean_path.sh $(path)
+
lint: FORCE
./scripts/lint.sh
test: FORCE
+ ./scripts/test.sh
+
+test_all: FORCE
+ ./scripts/clean.sh
./scripts/lint.sh
./scripts/test.sh
./scripts/test_notebooks.sh
diff --git a/build/.env b/build/.env
new file mode 100644
index 00000000..c1e54d7a
--- /dev/null
+++ b/build/.env
@@ -0,0 +1,10 @@
+GOOGLE_CLOUD_PROJECT=cities-429602
+GOOGLE_CLOUD_BUCKET=minneapolis-basis
+
+ENV=dev
+INSTANCE_CONNECTION_NAME=cities-429602:us-central1:cities-devel
+DB_SEARCH_PATH=dev,public
+HOST=34.123.100.76
+SCHEMA=minneapolis
+DATABASE=cities
+DB_USERNAME=postgres
diff --git a/build/Dockerfile b/build/Dockerfile
new file mode 100644
index 00000000..cb1144de
--- /dev/null
+++ b/build/Dockerfile
@@ -0,0 +1,10 @@
+FROM python:3
+
+WORKDIR /usr/src/app
+
+COPY requirements.txt ./
+RUN pip install --no-cache-dir -r requirements.txt
+
+COPY . .
+
+CMD [ "python", "main.py" ]
diff --git a/build/api/Dockerfile b/build/api/Dockerfile
new file mode 100644
index 00000000..cb1144de
--- /dev/null
+++ b/build/api/Dockerfile
@@ -0,0 +1,10 @@
+FROM python:3
+
+WORKDIR /usr/src/app
+
+COPY requirements.txt ./
+RUN pip install --no-cache-dir -r requirements.txt
+
+COPY . .
+
+CMD [ "python", "main.py" ]
diff --git a/build/api/main.py b/build/api/main.py
new file mode 100644
index 00000000..fbfcea0b
--- /dev/null
+++ b/build/api/main.py
@@ -0,0 +1,235 @@
+import os
+
+from typing import Annotated
+
+from dotenv import load_dotenv
+from fastapi import FastAPI, Depends, Query
+from fastapi.middleware.gzip import GZipMiddleware
+import uvicorn
+
+import psycopg2
+from psycopg2.pool import ThreadedConnectionPool
+
+load_dotenv()
+
+ENV = os.getenv("ENV")
+USERNAME = os.getenv("DB_USERNAME")
+PASSWORD = os.getenv("PASSWORD")
+HOST = os.getenv("HOST")
+DATABASE = os.getenv("DATABASE")
+DB_SEARCH_PATH = os.getenv("DB_SEARCH_PATH")
+INSTANCE_CONNECTION_NAME = os.getenv("INSTANCE_CONNECTION_NAME")
+
+app = FastAPI()
+
+if ENV == "dev":
+ from fastapi.middleware.cors import CORSMiddleware
+
+ origins = [
+ "http://localhost",
+ "http://localhost:5000",
+ ]
+ app.add_middleware(CORSMiddleware, allow_origins=origins, allow_credentials=True)
+
+app.add_middleware(GZipMiddleware, minimum_size=1000, compresslevel=5)
+
+
+if ENV == "dev":
+ host = HOST
+else:
+ host = f"/cloudsql/{INSTANCE_CONNECTION_NAME}"
+
+pool = ThreadedConnectionPool(
+ 1,
+ 10,
+ user=USERNAME,
+ password=PASSWORD,
+ host=HOST,
+ database=DATABASE,
+ options=f"-csearch_path={DB_SEARCH_PATH}",
+)
+
+
+def get_db() -> psycopg2.extensions.connection:
+ db = pool.getconn()
+ try:
+ yield db
+ finally:
+ pool.putconn(db)
+
+
+predictor = None
+
+
+def get_predictor(db: psycopg2.extensions.connection = Depends(get_db)):
+ from cities.deployment.tracts_minneapolis.predict import TractsModelPredictor
+
+ global predictor
+ if predictor is None:
+ predictor = TractsModelPredictor(db)
+ return predictor
+
+
+Limit = Annotated[float, Query(ge=0, le=1)]
+Radius = Annotated[float, Query(ge=0)]
+Year = Annotated[int, Query(ge=2000, le=2030)]
+
+
+@app.middleware("http")
+async def add_cache_control_header(request, call_next):
+ response = await call_next(request)
+ response.headers["Cache-Control"] = "public, max-age=300"
+ return response
+
+
+if ENV == "dev":
+
+ @app.middleware("http")
+ async def add_acess_control_header(request, call_next):
+ response = await call_next(request)
+ response.headers["Access-Control-Allow-Origin"] = "*"
+ return response
+
+
+@app.get("/demographics")
+async def read_demographics(
+ category: Annotated[str, Query(max_length=100)], db=Depends(get_db)
+):
+ with db.cursor() as cur:
+ cur.execute(
+ """
+ select tract_id, "2011", "2012", "2013", "2014", "2015", "2016", "2017", "2018", "2019", "2020", "2021", "2022"
+ from api__demographics where description = %s
+ """,
+ (category,),
+ )
+ return [[desc[0] for desc in cur.description]] + cur.fetchall()
+
+
+@app.get("/census-tracts")
+async def read_census_tracts(year: Year, db=Depends(get_db)):
+ with db.cursor() as cur:
+ cur.execute("select * from api__census_tracts where year_ = %s", (year,))
+ row = cur.fetchone()
+
+ return row[1] if row is not None else None
+
+
+@app.get("/high-frequency-transit-lines")
+async def read_high_frequency_transit_lines(year: Year, db=Depends(get_db)):
+ with db.cursor() as cur:
+ cur.execute(
+ """
+ select line_geom_json
+ from api__high_frequency_transit_lines
+ where '%s-01-01'::date <@ valid
+ """,
+ (year,),
+ )
+ row = cur.fetchone()
+
+ return row[0] if row is not None else None
+
+
+@app.get("/high-frequency-transit-stops")
+async def read_high_frequency_transit_stops(year: Year, db=Depends(get_db)):
+ with db.cursor() as cur:
+ cur.execute(
+ """
+ select stop_geom_json
+ from api__high_frequency_transit_lines
+ where '%s-01-01'::date <@ valid
+ """,
+ (year,),
+ )
+ row = cur.fetchone()
+
+ return row[0] if row is not None else None
+
+
+@app.get("/yellow-zone")
+async def read_yellow_zone(
+ year: Year, line_radius: Radius, stop_radius: Radius, db=Depends(get_db)
+):
+ with db.cursor() as cur:
+ cur.execute(
+ """
+ select
+ st_asgeojson(st_transform(st_union(st_buffer(line_geom, %s, 'quad_segs=4'), st_buffer(stop_geom, %s, 'quad_segs=4')), 4269))::json
+ from api__high_frequency_transit_lines
+ where '%s-01-01'::date <@ valid
+ """,
+ (line_radius, stop_radius, year),
+ )
+ row = cur.fetchone()
+
+ if row is None:
+ return None
+
+ return {
+ "type": "FeatureCollection",
+ "features": [
+ {"type": "Feature", "properties": {"id": "0"}, "geometry": row[0]}
+ ],
+ }
+
+
+@app.get("/blue-zone")
+async def read_blue_zone(year: Year, radius: Radius, db=Depends(get_db)):
+ with db.cursor() as cur:
+ cur.execute(
+ """
+ select st_asgeojson(st_transform(st_buffer(line_geom, %s, 'quad_segs=4'), 4269))::json
+ from api__high_frequency_transit_lines
+ where '%s-01-01'::date <@ valid
+ """,
+ (radius, year),
+ )
+ row = cur.fetchone()
+
+ if row is None:
+ return None
+
+ return {
+ "type": "FeatureCollection",
+ "features": [
+ {"type": "Feature", "properties": {"id": "0"}, "geometry": row[0]}
+ ],
+ }
+
+
+@app.get("/predict")
+async def read_predict(
+ blue_zone_radius: Radius,
+ yellow_zone_line_radius: Radius,
+ yellow_zone_stop_radius: Radius,
+ blue_zone_limit: Limit,
+ yellow_zone_limit: Limit,
+ year: Year,
+ db=Depends(get_db),
+ predictor=Depends(get_predictor),
+):
+ result = predictor.predict_cumulative(
+ db,
+ intervention=(
+ {
+ "radius_blue": blue_zone_radius,
+ "limit_blue": blue_zone_limit,
+ "radius_yellow_line": yellow_zone_line_radius,
+ "radius_yellow_stop": yellow_zone_stop_radius,
+ "limit_yellow": yellow_zone_limit,
+ "reform_year": year,
+ }
+ ),
+ )
+ return {
+ "census_tracts": [str(t) for t in result["census_tracts"]],
+ "housing_units_factual": [t.item() for t in result["housing_units_factual"]],
+ "housing_units_counterfactual": [
+ t.tolist() for t in result["housing_units_counterfactual"]
+ ],
+ }
+
+
+if __name__ == "__main__":
+ uvicorn.run(app, host="0.0.0.0", port=int(os.getenv("PORT", 8000)))
diff --git a/build/api/postgrest.conf b/build/api/postgrest.conf
new file mode 100644
index 00000000..ddb71965
--- /dev/null
+++ b/build/api/postgrest.conf
@@ -0,0 +1,107 @@
+## Admin server used for checks. It's disabled by default unless a port is specified.
+# admin-server-port = 3001
+
+## The database role to use when no client authentication is provided
+db-anon-role = "web_anon"
+
+## Notification channel for reloading the schema cache
+db-channel = "pgrst"
+
+## Enable or disable the notification channel
+db-channel-enabled = true
+
+## Enable in-database configuration
+db-config = true
+
+## Function for in-database configuration
+## db-pre-config = "postgrest.pre_config"
+
+## Extra schemas to add to the search_path of every request
+db-extra-search-path = "public"
+
+## Limit rows in response
+# db-max-rows = 1000
+
+## Allow getting the EXPLAIN plan through the `Accept: application/vnd.pgrst.plan` header
+# db-plan-enabled = false
+
+## Number of open connections in the pool
+db-pool = 10
+
+## Time in seconds to wait to acquire a slot from the connection pool
+# db-pool-acquisition-timeout = 10
+
+## Time in seconds after which to recycle pool connections
+# db-pool-max-lifetime = 1800
+
+## Time in seconds after which to recycle unused pool connections
+# db-pool-max-idletime = 30
+
+## Allow automatic database connection retrying
+# db-pool-automatic-recovery = true
+
+## Stored proc to exec immediately after auth
+# db-pre-request = "stored_proc_name"
+
+## Enable or disable prepared statements. disabling is only necessary when behind a connection pooler.
+## When disabled, statements will be parametrized but won't be prepared.
+db-prepared-statements = true
+
+## The name of which database schema to expose to REST clients
+db-schemas = "api"
+
+## How to terminate database transactions
+## Possible values are:
+## commit (default)
+## Transaction is always committed, this can not be overriden
+## commit-allow-override
+## Transaction is committed, but can be overriden with Prefer tx=rollback header
+## rollback
+## Transaction is always rolled back, this can not be overriden
+## rollback-allow-override
+## Transaction is rolled back, but can be overriden with Prefer tx=commit header
+db-tx-end = "commit"
+
+## The standard connection URI format, documented at
+## https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING
+db-uri = "postgresql://postgres@34.123.100.76:5432/cities"
+
+# jwt-aud = "your_audience_claim"
+
+## Jspath to the role claim key
+jwt-role-claim-key = ".role"
+
+## Choose a secret, JSON Web Key (or set) to enable JWT auth
+## (use "@filename" to load from separate file)
+# jwt-secret = "secret_with_at_least_32_characters"
+jwt-secret-is-base64 = false
+
+## Enables and set JWT Cache max lifetime, disables caching with 0
+# jwt-cache-max-lifetime = 0
+
+## Logging level, the admitted values are: crit, error, warn, info and debug.
+log-level = "error"
+
+## Determine if the OpenAPI output should follow or ignore role privileges or be disabled entirely.
+## Admitted values: follow-privileges, ignore-privileges, disabled
+openapi-mode = "follow-privileges"
+
+## Base url for the OpenAPI output
+openapi-server-proxy-uri = ""
+
+## Configurable CORS origins
+# server-cors-allowed-origins = ""
+
+server-host = "!4"
+server-port = 3001
+
+## Allow getting the request-response timing information through the `Server-Timing` header
+server-timing-enabled = true
+
+## Unix socket location
+## if specified it takes precedence over server-port
+# server-unix-socket = "/tmp/pgrst.sock"
+
+## Unix socket file mode
+## When none is provided, 660 is applied by default
+# server-unix-socket-mode = "660"
diff --git a/build/api/requirements.txt b/build/api/requirements.txt
new file mode 100644
index 00000000..95cd7505
--- /dev/null
+++ b/build/api/requirements.txt
@@ -0,0 +1,183 @@
+#
+# This file is autogenerated by pip-compile with Python 3.12
+# by the following command:
+#
+# pip-compile --extra=api --output-file=api/requirements.txt
+#
+annotated-types==0.7.0
+ # via pydantic
+anyio==4.4.0
+ # via
+ # httpx
+ # starlette
+ # watchfiles
+certifi==2024.8.30
+ # via
+ # httpcore
+ # httpx
+chirho @ git+https://github.com/BasisResearch/chirho.git
+ # via cities (setup.py)
+click==8.1.7
+ # via
+ # typer
+ # uvicorn
+contourpy==1.3.0
+ # via matplotlib
+cycler==0.12.1
+ # via matplotlib
+dill==0.3.8
+ # via cities (setup.py)
+dnspython==2.6.1
+ # via email-validator
+email-validator==2.2.0
+ # via fastapi
+fastapi[standard]==0.114.0
+ # via cities (setup.py)
+fastapi-cli[standard]==0.0.5
+ # via fastapi
+filelock==3.16.0
+ # via torch
+fonttools==4.53.1
+ # via matplotlib
+fsspec==2024.9.0
+ # via torch
+h11==0.14.0
+ # via
+ # httpcore
+ # uvicorn
+httpcore==1.0.5
+ # via httpx
+httptools==0.6.1
+ # via uvicorn
+httpx==0.27.2
+ # via fastapi
+idna==3.8
+ # via
+ # anyio
+ # email-validator
+ # httpx
+jinja2==3.1.4
+ # via
+ # fastapi
+ # torch
+joblib==1.4.2
+ # via scikit-learn
+kiwisolver==1.4.7
+ # via matplotlib
+markdown-it-py==3.0.0
+ # via rich
+markupsafe==2.1.5
+ # via jinja2
+matplotlib==3.9.2
+ # via cities (setup.py)
+mdurl==0.1.2
+ # via markdown-it-py
+mpmath==1.3.0
+ # via sympy
+networkx==3.3
+ # via torch
+numpy==2.1.1
+ # via
+ # cities (setup.py)
+ # contourpy
+ # matplotlib
+ # opt-einsum
+ # pandas
+ # pyro-ppl
+ # scikit-learn
+ # scipy
+opt-einsum==3.3.0
+ # via pyro-ppl
+packaging==24.1
+ # via
+ # matplotlib
+ # plotly
+pandas==2.2.2
+ # via cities (setup.py)
+pillow==10.4.0
+ # via matplotlib
+plotly==5.24.0
+ # via cities (setup.py)
+psycopg2==2.9.9
+ # via cities (setup.py)
+pydantic==2.9.1
+ # via fastapi
+pydantic-core==2.23.3
+ # via pydantic
+pygments==2.18.0
+ # via rich
+pyparsing==3.1.4
+ # via matplotlib
+pyro-api==0.1.2
+ # via pyro-ppl
+pyro-ppl==1.8.6
+ # via
+ # chirho
+ # cities (setup.py)
+python-dateutil==2.9.0.post0
+ # via
+ # matplotlib
+ # pandas
+python-dotenv==1.0.1
+ # via uvicorn
+python-multipart==0.0.9
+ # via fastapi
+pytz==2024.1
+ # via pandas
+pyyaml==6.0.2
+ # via uvicorn
+rich==13.8.0
+ # via typer
+scikit-learn==1.5.1
+ # via cities (setup.py)
+scipy==1.14.1
+ # via scikit-learn
+shellingham==1.5.4
+ # via typer
+six==1.16.0
+ # via python-dateutil
+sniffio==1.3.1
+ # via
+ # anyio
+ # httpx
+sqlalchemy==2.0.34
+ # via cities (setup.py)
+starlette==0.38.5
+ # via fastapi
+sympy==1.13.2
+ # via torch
+tenacity==9.0.0
+ # via plotly
+threadpoolctl==3.5.0
+ # via scikit-learn
+torch==2.4.1
+ # via
+ # cities (setup.py)
+ # pyro-ppl
+tqdm==4.66.5
+ # via pyro-ppl
+typer==0.12.5
+ # via fastapi-cli
+typing-extensions==4.12.2
+ # via
+ # fastapi
+ # pydantic
+ # pydantic-core
+ # sqlalchemy
+ # torch
+ # typer
+tzdata==2024.1
+ # via pandas
+uvicorn[standard]==0.30.6
+ # via
+ # fastapi
+ # fastapi-cli
+uvloop==0.20.0
+ # via uvicorn
+watchfiles==0.24.0
+ # via uvicorn
+websockets==13.0.1
+ # via uvicorn
+
+# The following packages are considered to be unsafe in a requirements file:
+# setuptools
diff --git a/build/api/schema.sql b/build/api/schema.sql
new file mode 100644
index 00000000..2285c2b7
--- /dev/null
+++ b/build/api/schema.sql
@@ -0,0 +1,67 @@
+begin;
+drop schema if exists api cascade;
+
+create schema api;
+
+create view api.demographics as (
+ select * from api__demographics
+);
+
+create view api.census_tracts as (
+ select * from api__census_tracts
+);
+
+create function api.high_frequency_transit_lines() returns setof dev.api__high_frequency_transit_lines as $$
+ select * from dev.api__high_frequency_transit_lines
+$$ language sql;
+
+create function api.high_frequency_transit_lines(
+ blue_zone_radius double precision,
+ yellow_zone_line_radius double precision,
+ yellow_zone_stop_radius double precision
+) returns table (
+ valid daterange,
+ geom geometry(LineString, 4269),
+ blue_zone_geom geometry(LineString, 4269),
+ yellow_zone_geom geometry(Geometry, 4269)
+) as $$
+ with
+ lines as (select * from dev.stg_high_frequency_transit_lines_union),
+ stops as (select * from dev.high_frequency_transit_stops),
+ lines_and_stops as (
+ select
+ lines.valid * stops.valid as valid,
+ lines.geom as line_geom,
+ stops.geom as stop_geom
+ from lines inner join stops on lines.valid && stops.valid
+ )
+ select
+ valid,
+ st_transform(line_geom, 4269) as geom,
+ st_transform(st_buffer(line_geom, blue_zone_radius), 4269) as blue_zone_geom,
+ st_transform(st_union(st_buffer(line_geom, yellow_zone_line_radius), st_buffer(stop_geom, yellow_zone_stop_radius)), 4269) as yellow_zone_geom
+ from lines_and_stops
+$$ language sql;
+
+do $$
+begin
+create role web_anon nologin;
+exception when duplicate_object then raise notice '%, skipping', sqlerrm using errcode = sqlstate;
+end
+$$;
+
+grant all on schema public to web_anon;
+grant all on schema dev to web_anon;
+grant select on table public.spatial_ref_sys TO web_anon;
+grant usage on schema api to web_anon;
+grant all on all tables in schema api to web_anon;
+grant all on all functions in schema api to web_anon;
+grant all on schema api to web_anon;
+GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA dev TO web_anon;
+GRANT ALL PRIVILEGES ON ALL functions IN SCHEMA dev TO web_anon;
+GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA api TO web_anon;
+GRANT ALL PRIVILEGES ON ALL functions IN SCHEMA api TO web_anon;
+GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO web_anon;
+GRANT ALL PRIVILEGES ON ALL functions IN SCHEMA public TO web_anon;
+grant web_anon to postgres;
+commit;
diff --git a/build/cities/__init__.py b/build/cities/__init__.py
new file mode 100644
index 00000000..f993e182
--- /dev/null
+++ b/build/cities/__init__.py
@@ -0,0 +1,6 @@
+"""**cities**
+
+Project short description.
+"""
+
+__version__ = "0.0.1"
diff --git a/build/cities/deployment/tracts_minneapolis/.gitignore b/build/cities/deployment/tracts_minneapolis/.gitignore
new file mode 100644
index 00000000..5304474d
--- /dev/null
+++ b/build/cities/deployment/tracts_minneapolis/.gitignore
@@ -0,0 +1,2 @@
+*.pth
+*.pkl
\ No newline at end of file
diff --git a/build/cities/deployment/tracts_minneapolis/__init__.py b/build/cities/deployment/tracts_minneapolis/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/build/cities/deployment/tracts_minneapolis/generate_torch_loader.py b/build/cities/deployment/tracts_minneapolis/generate_torch_loader.py
new file mode 100644
index 00000000..c07d8107
--- /dev/null
+++ b/build/cities/deployment/tracts_minneapolis/generate_torch_loader.py
@@ -0,0 +1,87 @@
+import os
+import time
+
+import sqlalchemy
+import torch
+from dotenv import load_dotenv
+
+from cities.utils.data_grabber import find_repo_root
+from cities.utils.data_loader import ZoningDataset, select_from_sql
+
+load_dotenv()
+
+local_user = os.getenv("USER")
+if local_user == "rafal":
+ load_dotenv(os.path.expanduser("~/.env_pw"))
+# local torch loader is needed for subsampling in evaluation, comparison to the previous dataset and useful for ED
+DB_USERNAME = os.getenv("DB_USERNAME")
+HOST = os.getenv("HOST")
+DATABASE = os.getenv("DATABASE")
+PASSWORD = os.getenv("PASSWORD")
+
+
+#####################
+# data load and prep
+#####################
+
+kwargs = {
+ "categorical": ["year", "census_tract"],
+ "continuous": {
+ "housing_units",
+ "housing_units_original",
+ "total_value",
+ "total_value_original",
+ "median_value",
+ "mean_limit_original",
+ "median_distance",
+ "income",
+ "segregation_original",
+ "white_original",
+ "parcel_sqm",
+ },
+ "outcome": "housing_units",
+}
+
+load_start = time.time()
+with sqlalchemy.create_engine(
+ f"postgresql://{DB_USERNAME}:{PASSWORD}@{HOST}/{DATABASE}"
+).connect() as conn:
+ subset = select_from_sql(
+ "select * from dev.tracts_model__census_tracts order by census_tract, year",
+ conn,
+ kwargs,
+ )
+load_end = time.time()
+print(f"Data loaded in {load_end - load_start} seconds")
+
+
+columns_to_standardize = [
+ "housing_units_original",
+ "total_value_original",
+]
+
+new_standardization_dict = {}
+
+for column in columns_to_standardize:
+ new_standardization_dict[column] = {
+ "mean": subset["continuous"][column].mean(),
+ "std": subset["continuous"][column].std(),
+ }
+
+
+assert "parcel_sqm" in subset["continuous"].keys()
+
+root = find_repo_root()
+
+pg_census_tracts_dataset = ZoningDataset(
+ subset["categorical"],
+ subset["continuous"],
+ standardization_dictionary=new_standardization_dict,
+)
+assert "parcel_sqm" in subset["continuous"].keys()
+
+pg_census_tracts_data_path = os.path.join(
+ root, "data/minneapolis/processed/pg_census_tracts_dataset.pt"
+)
+
+torch.save(pg_census_tracts_dataset, pg_census_tracts_data_path)
diff --git a/build/cities/deployment/tracts_minneapolis/predict.py b/build/cities/deployment/tracts_minneapolis/predict.py
new file mode 100644
index 00000000..8ae4ac43
--- /dev/null
+++ b/build/cities/deployment/tracts_minneapolis/predict.py
@@ -0,0 +1,343 @@
+import copy
+import os
+
+import dill
+import pandas as pd
+import pyro
+import torch
+from chirho.counterfactual.handlers import MultiWorldCounterfactual
+from chirho.indexed.ops import IndexSet, gather
+from chirho.interventional.handlers import do
+from dotenv import load_dotenv
+from pyro.infer import Predictive
+
+# from cities.modeling.zoning_models.zoning_tracts_sqm_model import (
+# TractsModelSqm as TractsModel,
+# )
+
+from cities.modeling.zoning_models.zoning_tracts_continuous_interactions_model import (
+ TractsModelContinuousInteractions as TractsModel,
+)
+from cities.utils.data_grabber import find_repo_root
+from cities.utils.data_loader import select_from_data, select_from_sql
+
+load_dotenv()
+
+local_user = os.getenv("USER")
+if local_user == "rafal":
+ load_dotenv(os.path.expanduser("~/.env_pw"))
+
+
+class TractsModelPredictor:
+ kwargs = {
+ "categorical": ["year", "year_original", "census_tract",],
+ "continuous": {
+ "housing_units",
+ "housing_units_original",
+ "total_value",
+ "median_value",
+ "mean_limit_original",
+ "median_distance",
+ "income",
+ "segregation_original",
+ "white_original",
+ "parcel_sqm",
+ 'downtown_overlap',
+ 'university_overlap',
+ },
+ "outcome": "housing_units",
+ }
+
+ kwargs_subset = {
+ "categorical": ["year", "year_original", "census_tract"],
+ "continuous": {
+ "housing_units",
+ "total_value",
+ "median_value",
+ "mean_limit_original",
+ "median_distance",
+ "income",
+ "segregation_original",
+ "white_original",
+ "parcel_sqm",
+ 'downtown_overlap',
+ 'university_overlap',
+ },
+ "outcome": "housing_units",
+ }
+
+
+
+ parcel_intervention_sql = """
+ select
+ census_tract,
+ year_,
+ case
+ when downtown_yn then 0
+ when not downtown_yn
+ and year_ >= %(reform_year)s
+ and distance_to_transit <= %(radius_blue)s
+ then %(limit_blue)s
+ when not downtown_yn
+ and year_ >= %(reform_year)s
+ and distance_to_transit > %(radius_blue)s
+ and (distance_to_transit_line <= %(radius_yellow_line)s
+ or distance_to_transit_stop <= %(radius_yellow_stop)s)
+ then %(limit_yellow)s
+ when not downtown_yn
+ and year_ >= %(reform_year)s
+ and distance_to_transit_line > %(radius_yellow_line)s
+ and distance_to_transit_stop > %(radius_yellow_stop)s
+ then 1
+ else limit_con
+ end as intervention
+ from tracts_model__parcels
+ """
+
+ tracts_intervention_sql = f"""
+ with parcel_interventions as ({parcel_intervention_sql})
+ select
+ census_tract,
+ year_,
+ avg(intervention) as intervention
+ from parcel_interventions
+ group by census_tract, year_
+ order by census_tract, year_
+ """
+
+ def __init__(self, conn):
+ self.conn = conn
+
+ root = find_repo_root()
+ deploy_path = os.path.join(root, "cities/deployment/tracts_minneapolis")
+
+ guide_path = os.path.join(deploy_path, "tracts_model_guide.pkl")
+ self.param_path = os.path.join(deploy_path, "tracts_model_params.pth")
+
+ need_to_train_flag = False
+ if not os.path.isfile(guide_path):
+ need_to_train_flag = True
+ print(f"Warning: '{guide_path}' does not exist.")
+ if not os.path.isfile(self.param_path):
+ need_to_train_flag = True
+ print(f"Warning: '{self.param_path}' does not exist.")
+
+ if need_to_train_flag:
+ print("Please run 'train_model.py' to generate the required files.")
+
+ with open(guide_path, "rb") as file:
+ guide = dill.load(file)
+
+ self.data = select_from_sql(
+ "select * from tracts_model__census_tracts order by census_tract, year",
+ conn,
+ TractsModelPredictor.kwargs,
+ )
+
+
+ # set to zero whenever the university overlap is above 1
+ # TODO this should be handled at the data processing stage
+ self.data['continuous']['mean_limit_original'] = torch.where(self.data['continuous']['university_overlap'] > 1,
+ torch.zeros_like(self.data['continuous']['mean_limit_original']),
+ self.data['continuous']['mean_limit_original'])
+
+
+ self.subset = select_from_data(self.data, TractsModelPredictor.kwargs_subset)
+
+
+ self.years = self.data["categorical"]["year_original"]
+ self.year_ids = self.data['categorical']["year"]
+ self.tracts = self.data["categorical"]["census_tract"]
+
+
+ categorical_levels = {
+ "year": torch.unique(self.subset["categorical"]["year"]),
+ "year_original": torch.unique(self.subset["categorical"]["year_original"]),
+ "census_tract": torch.unique(self.subset["categorical"]["census_tract"]),
+ }
+
+ self.housing_units_std = self.data["continuous"]["housing_units_original"].std()
+ self.housing_units_mean = self.data["continuous"][
+ "housing_units_original"
+ ].mean()
+
+ #interaction_pairs
+ ins = [
+ ("university_overlap", "limit"),
+ ("downtown_overlap", "limit"),
+ ("distance", "downtown_overlap"),
+ ("distance", "university_overlap"),
+ ("distance", "limit"),
+ ("median_value", "segregation"),
+ ("distance", "segregation"),
+ ("limit", "sqm"),
+ ("segregation", "sqm"),
+ ("distance", "white"),
+ ("income", "limit"),
+ ("downtown_overlap", "median_value"),
+ ("downtown_overlap", "segregation"),
+ ("median_value", "white"),
+ ("distance", "income"),
+ ]
+
+
+ model = TractsModel(**self.subset, categorical_levels=categorical_levels,
+ housing_units_continuous_interaction_pairs=ins)
+
+ self.predictive = Predictive(model=model, guide=guide, num_samples=100)
+
+ # these are at the tracts level
+ def _tracts_intervention(
+ self,
+ conn,
+ radius_blue,
+ limit_blue,
+ radius_yellow_line,
+ radius_yellow_stop,
+ limit_yellow,
+ reform_year,
+ ):
+ params = {
+ "reform_year": reform_year,
+ "radius_blue": radius_blue,
+ "limit_blue": limit_blue,
+ "radius_yellow_line": radius_yellow_line,
+ "radius_yellow_stop": radius_yellow_stop,
+ "limit_yellow": limit_yellow,
+ }
+ df = pd.read_sql(
+ TractsModelPredictor.tracts_intervention_sql, conn, params=params
+ )
+ return torch.tensor(df["intervention"].values, dtype=torch.float32)
+
+ def predict_cumulative(self, conn, intervention):
+ """Predict the total number of housing units built from 2011-2020 under intervention.
+
+ Returns a dictionary with keys:
+ - 'census_tracts': the tracts considered
+ - 'housing_units_factual': total housing units built according to real housing data
+ - 'housing_units_counterfactual': samples from prediction of total housing units built
+ """
+ pyro.clear_param_store()
+ pyro.get_param_store().load(self.param_path)
+
+ subset_for_preds = copy.deepcopy(self.subset)
+ subset_for_preds["continuous"]["housing_units"] = None
+
+ limit_intervention = self._tracts_intervention(conn, **intervention)
+
+ limit_intervention = torch.where(self.data['continuous']['university_overlap'] > 2,
+ torch.zeros_like(limit_intervention),
+ limit_intervention)
+
+ limit_intervention = torch.where(self.data['continuous']['downtown_overlap'] > 1,
+ torch.zeros_like(limit_intervention),
+ limit_intervention)
+
+ with MultiWorldCounterfactual() as mwc:
+ with do(actions={"limit": limit_intervention}):
+ result_all = self.predictive(**subset_for_preds)["housing_units"]
+ with mwc:
+ result_f = gather(
+ result_all, IndexSet(**{"limit": {0}}), event_dims=0
+ ).squeeze()
+ result_cf = gather(
+ result_all, IndexSet(**{"limit": {1}}), event_dims=0
+ ).squeeze()
+
+ obs_housing_units = self.data["continuous"]["housing_units_original"]
+ f_housing_units = (result_f * self.housing_units_std + self.housing_units_mean)#.clamp(min = 0)
+ cf_housing_units = (result_cf * self.housing_units_std + self.housing_units_mean)#.clamp(min = 0)
+
+
+ # calculate cumulative housing units (factual)
+ obs_cumsums = {}
+ f_cumsums = {}
+ cf_cumsums = {}
+ for key in self.tracts.unique():
+ obs_units = []
+ f_units = []
+ cf_units = []
+ for year in self.years.unique():
+ obs_units.append(obs_housing_units[(self.tracts == key) & (self.years == year)])
+ f_units.append(f_housing_units[:,(self.tracts == key) & (self.years == year)])
+ cf_units.append(cf_housing_units[:,(self.tracts == key) & (self.years == year)])
+
+ obs_cumsum = torch.cumsum(torch.stack(obs_units), dim = 0).flatten()
+ f_cumsum = torch.cumsum(torch.stack(f_units), dim = 0).squeeze()
+ cf_cumsum = torch.cumsum(torch.stack(cf_units), dim = 0).squeeze()
+
+ obs_cumsums[key] = obs_cumsum
+ f_cumsums[key] = f_cumsum
+ cf_cumsums[key] = cf_cumsum
+
+
+ # presumably outdated
+
+ tracts = self.data["categorical"]["census_tract"]
+
+ # calculate cumulative housing units (factual)
+ f_totals = {}
+ for i in range(tracts.shape[0]):
+ key = tracts[i].item()
+ if key not in f_totals:
+ f_totals[key] = 0
+ f_totals[key] += obs_housing_units[i]
+
+ # calculate cumulative housing units (counterfactual)
+ cf_totals = {}
+ for i in range(tracts.shape[0]):
+ year = self.years[i].item()
+ key = tracts[i].item()
+ if key not in cf_totals:
+ cf_totals[key] = 0
+ if year < intervention["reform_year"]:
+ cf_totals[key] += obs_housing_units[i]
+ else:
+ cf_totals[key] = cf_totals[key] + cf_housing_units[:, i]
+ cf_totals = {k: torch.clamp(v, 0) for k, v in cf_totals.items()}
+
+ census_tracts = list(cf_totals.keys())
+ f_housing_units = [f_totals[k] for k in census_tracts]
+ cf_housing_units = [cf_totals[k] for k in census_tracts]
+
+
+
+ return {"obs_cumsums": obs_cumsums, "f_cumsums": f_cumsums, "cf_cumsums": cf_cumsums,
+ "limit_intervention": limit_intervention,
+ # presumably outdated
+ "census_tracts": census_tracts,
+ "housing_units_factual": f_housing_units,
+ "housing_units_counterfactual": cf_housing_units,}
+
+
+ # return {
+ # "census_tracts": census_tracts,
+ # "housing_units_factual": f_housing_units,
+ # "housing_units_counterfactual": cf_housing_units,
+ # "limit_intervention": limit_intervention,
+ # }
+
+
+if __name__ == "__main__":
+ import time
+
+ from cities.utils.data_loader import db_connection
+
+ with db_connection() as conn:
+ predictor = TractsModelPredictor(conn)
+ start = time.time()
+
+ result = predictor.predict_cumulative(
+ conn,
+ intervention={
+ "radius_blue": 106.7,
+ "limit_blue": 0,
+ "radius_yellow_line": 402.3,
+ "radius_yellow_stop": 804.7,
+ "limit_yellow": 0.5,
+ "reform_year": 2015,
+ },
+ )
+ end = time.time()
+ print(f"Counterfactual in {end - start} seconds")
diff --git a/build/cities/deployment/tracts_minneapolis/tracts_model_overview/tracts_dag_plot_high_density.png b/build/cities/deployment/tracts_minneapolis/tracts_model_overview/tracts_dag_plot_high_density.png
new file mode 100644
index 00000000..e6e5f6cc
Binary files /dev/null and b/build/cities/deployment/tracts_minneapolis/tracts_model_overview/tracts_dag_plot_high_density.png differ
diff --git a/build/cities/deployment/tracts_minneapolis/tracts_model_overview/tracts_model_overview.ipynb b/build/cities/deployment/tracts_minneapolis/tracts_model_overview/tracts_model_overview.ipynb
new file mode 100644
index 00000000..18b68ce0
--- /dev/null
+++ b/build/cities/deployment/tracts_minneapolis/tracts_model_overview/tracts_model_overview.ipynb
@@ -0,0 +1,86 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "vscode": {
+ "languageId": "plaintext"
+ }
+ },
+ "source": [
+ "## What is this project about?\n",
+ "\n",
+ "We use state-of-the-art Bayesian causal modeling tools ([ChiRho](https://github.com/BasisResearch/chirho)) to investigate the role of parking zoning reform in Minneapolis on the development of new housing units, at a relatively fine-grained level of census tracts. Minneapolis is an example of a city which somewhat sucessfuly navigates the housing crisis, and a parking zoning reform has been claimed to be connected to this outcome (see for example [here](https://reason.com/2024/02/27/fear-loathing-and-zoning-reform-in-minnesota/) and [here](https://www.strongtowns.org/journal/2023/9/15/ending-minimum-parking-requirements-was-a-policy-win-for-the-twin-cities)).\n",
+ "\n",
+ "\n",
+ "%TODO Someone should perhaps check if there are better links to include here\n",
+ "\n",
+ "Whether this is so, to what extent and with what uncertainty has been unclear. Yes, the number of housing units in the city increased faster after the reform. But it is not ovious whether this isn't a mere correlation arising from other variables being causally responsible, or random variation. We decided to take a deep dive and connect detailed census tracts data with demographic variables within a carefully devised causal model to investigate. Due to data availability limitations, we start at year 2010. Since a major world-wide event changed too many things in 2020, this is where our data collection stops, to be able to separate the zoning concerns from the complex and unprecedented events that follow. It turns out that even with 10 years of data only, causal modelling allows us to offer some (admittedly, uncertain) answers."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Why this is not a typical machine learning project\n",
+ "\n",
+ "A typical predictive project in machine learning tends to use as much data as possible and algorithms to identify patters, focusing only on predictive accuracy. While such an approach is useful, the key limitation is that such models have a hard time distinguishing accidental correlations from causal connections, and therefore are not realiable guides to counterfactual predictions and causal effect estimation. Moreover, a typical model often disregards information that humans use heavily: temporal, spatial or causal structures, which are needed to generalize well outside the training data.\n",
+ "\n",
+ "Instead, we use our core open source technology, [ChiRho](https://github.com/BasisResearch/chirho) to build **bayesian causal models** using hand-picked relevant variables. This way, we can work with humans and in the loop. The fact that we use Bayesian methods, allows for the injection of human understanding of the causal dependecies, which then are made work in symbiosis with the data, even if the latter is somewhat limited, and for honest assessment of the resulting uncertainties. The fact that the models is causal gives us a chance to address counterfactual queries involving alternative interventions.\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "\n",
+ "\n",
+ "## Why care about different types of questions?\n",
+ "\n",
+ "Once we start thinking in causal terms, there are **multiple types of queries** that we can distinguish and answer using the model, and such questions typically have different answers. While assosciative information is often useful or revealing, equally often we wwant to be able to evaluate potential consequences of acting one way or another, and in this mode of reflection, we rather turn to thinking in terms of interventions and counterfactuals.\n",
+ "\n",
+ "- *Association*. Example: Is there a correlation between increased green spaces and decreased crime rate in an area? Perhaps, areas with more green spaces do tend to have lower crime rates for various reasons.\n",
+ "\n",
+ "- *Intervention* If the city implements a zoning change to create more green spaces, how would this impact the crime rate in the area? The answer might differ here: factors other than the policy change probably influence crime rates to a large extent.\n",
+ "\n",
+ "- *Counterfactual* Suppose you did create more green spaces and the crime rate in the area did go down. Are you to be thanked? This depends on whether the crime rate would have gone down had you not created more green space in the area. Would it?\n",
+ "\n",
+ "\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Counterfactual modeling of the zoning reform\n",
+ "\n",
+ "In the case at hand, we allow you, the user, to investigate predicted counterfactual outcomes of a zoning reform, specifed in terms of where the two zones start, what parking limits are to be imposed in different zones, and what year the reform has been introduced. From among the available variables we hand-picked the ones that are most useful and meaningfully causally connected. The model simultaneously learns the strenghts of over 30 causal connections and uses this information to inform its counterfactual predictions. The structural assumptions we have made at a high level can be described by the diagram below. However, a moderately competent user can use our [open source codebase](https://github.com/BasisResearch/cities) to tweak or modify these assumptions and invesigate the consequences of doing so.\n",
+ " "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## How does the model perform?\n",
+ "\n",
+ "The causal layer, nevertheless, should not take place at the cost of predictive power. The models went through a battery of tests on split data, each time being able to account for around 25-30% variation in the data (which for such noisy problems is fairly decent peformance), effectively on average improving predictions of new housing units appearing in each of census tracts at each of a given years by the count of 35-40 over a null model. A detailed notebook with model testing is also available at our open source codebase. "
+ ]
+ }
+ ],
+ "metadata": {
+ "language_info": {
+ "name": "python"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/build/cities/deployment/tracts_minneapolis/train_model.py b/build/cities/deployment/tracts_minneapolis/train_model.py
new file mode 100644
index 00000000..82a95eab
--- /dev/null
+++ b/build/cities/deployment/tracts_minneapolis/train_model.py
@@ -0,0 +1,114 @@
+import os
+import time
+
+import dill
+import pyro
+import torch
+from dotenv import load_dotenv
+
+from cities.modeling.svi_inference import run_svi_inference
+from cities.modeling.zoning_models.zoning_tracts_continuous_interactions_model import (
+ TractsModelContinuousInteractions as TractsModel,
+)
+from cities.utils.data_grabber import find_repo_root
+from cities.utils.data_loader import db_connection, select_from_sql
+
+# from cities.modeling.zoning_models.zoning_tracts_model import TractsModel
+# from cities.modeling.zoning_models.zoning_tracts_sqm_model import (
+# TractsModelSqm as TractsModel,
+# )
+
+
+n_steps = 2000
+
+load_dotenv()
+
+local_user = os.getenv("USER")
+if local_user == "rafal":
+ load_dotenv(os.path.expanduser("~/.env_pw"))
+
+#####################
+# data load and prep
+#####################
+
+kwargs = {
+ "categorical": ["year", "census_tract"],
+ "continuous": {
+ "housing_units",
+ "housing_units_original",
+ "total_value",
+ "median_value",
+ "mean_limit_original",
+ "median_distance",
+ "income",
+ "segregation_original",
+ "white_original",
+ "parcel_sqm",
+ "downtown_overlap",
+ "university_overlap",
+ },
+ "outcome": "housing_units",
+}
+
+load_start = time.time()
+with db_connection() as conn:
+ subset = select_from_sql(
+ "select * from dev.tracts_model__census_tracts order by census_tract, year",
+ conn,
+ kwargs,
+ )
+load_end = time.time()
+print(f"Data loaded in {load_end - load_start} seconds")
+
+#############################
+# instantiate and train model
+#############################
+
+# interaction terms
+ins = [
+ ("university_overlap", "limit"),
+ ("downtown_overlap", "limit"),
+ ("distance", "downtown_overlap"),
+ ("distance", "university_overlap"),
+ ("distance", "limit"),
+ ("median_value", "segregation"),
+ ("distance", "segregation"),
+ ("limit", "sqm"),
+ ("segregation", "sqm"),
+ ("distance", "white"),
+ ("income", "limit"),
+ ("downtown_overlap", "median_value"),
+ ("downtown_overlap", "segregation"),
+ ("median_value", "white"),
+ ("distance", "income"),
+]
+
+# model
+tracts_model = TractsModel(
+ **subset,
+ categorical_levels={
+ "year": torch.unique(subset["categorical"]["year"]),
+ "census_tract": torch.unique(subset["categorical"]["census_tract"]),
+ },
+ housing_units_continuous_interaction_pairs=ins,
+)
+
+pyro.clear_param_store()
+
+guide = run_svi_inference(tracts_model, n_steps=n_steps, lr=0.03, plot=False, **subset)
+
+##########################################
+# save guide and params in the same folder
+##########################################
+root = find_repo_root()
+
+deploy_path = os.path.join(root, "cities/deployment/tracts_minneapolis")
+guide_path = os.path.join(deploy_path, "tracts_model_guide.pkl")
+param_path = os.path.join(deploy_path, "tracts_model_params.pth")
+
+serialized_guide = dill.dumps(guide)
+with open(guide_path, "wb") as file:
+ file.write(serialized_guide)
+
+with open(param_path, "wb") as file:
+ pyro.get_param_store().save(param_path)
diff --git a/build/cities/modeling/__init__.py b/build/cities/modeling/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/build/cities/modeling/evaluation.py b/build/cities/modeling/evaluation.py
new file mode 100644
index 00000000..5613ca54
--- /dev/null
+++ b/build/cities/modeling/evaluation.py
@@ -0,0 +1,300 @@
+import copy
+import os
+from typing import Any, Callable, Dict, Optional, Tuple, Union
+
+import matplotlib.pyplot as plt
+import pyro
+import seaborn as sns
+import torch
+from pyro.infer import Predictive
+from torch.utils.data import DataLoader, random_split
+
+from cities.modeling.svi_inference import run_svi_inference
+from cities.utils.data_grabber import find_repo_root
+from cities.utils.data_loader import select_from_data
+
+root = find_repo_root()
+
+
+def prep_data_for_test(
+ data_path: Optional[str] = None, train_size: float = 0.8
+) -> Tuple[DataLoader, DataLoader, list]:
+
+ if data_path is None:
+ data_path = os.path.join(root, "data/minneapolis/processed/zoning_dataset.pt")
+ zoning_dataset_read = torch.load(data_path)
+
+ train_size = int(train_size * len(zoning_dataset_read))
+ test_size = len(zoning_dataset_read) - train_size
+
+ train_dataset, test_dataset = random_split(
+ zoning_dataset_read, [train_size, test_size]
+ )
+
+ train_loader = DataLoader(train_dataset, batch_size=train_size, shuffle=True)
+ test_loader = DataLoader(test_dataset, batch_size=test_size, shuffle=False)
+
+ categorical_levels = zoning_dataset_read.categorical_levels
+
+ return train_loader, test_loader, categorical_levels
+
+
+def recode_categorical(
+ kwarg_names: Dict[str, Any], train_loader: DataLoader, test_loader: DataLoader
+) -> Tuple[Dict[str, Dict[str, torch.Tensor]], Dict[str, Dict[str, torch.Tensor]]]:
+
+ assert all(
+ item in kwarg_names.keys() for item in ["categorical", "continuous", "outcome"]
+ )
+
+ train_data = next(iter(train_loader))
+ test_data = next(iter(test_loader))
+
+ _train_data = select_from_data(train_data, kwarg_names)
+ _test_data = select_from_data(test_data, kwarg_names)
+
+ ####################################################
+ # eliminate test categories not in the training data
+ ####################################################
+ def apply_mask(data, mask):
+ return {key: val[mask] for key, val in data.items()}
+
+ mask = torch.ones(len(_test_data["outcome"]), dtype=torch.bool)
+ for key, value in _test_data["categorical"].items():
+ mask = mask * torch.isin(
+ _test_data["categorical"][key], (_train_data["categorical"][key].unique())
+ )
+
+ _test_data["categorical"] = apply_mask(_test_data["categorical"], mask)
+ _test_data["continuous"] = apply_mask(_test_data["continuous"], mask)
+ _test_data["outcome"] = _test_data["outcome"][mask]
+
+ for key in _test_data["categorical"].keys():
+ assert _test_data["categorical"][key].shape[0] == mask.sum()
+ for key in _test_data["continuous"].keys():
+ assert _test_data["continuous"][key].shape[0] == mask.sum()
+
+ # raise error if sum(mask) < .5 * len(test_data['outcome'])
+ if sum(mask) < 0.5 * len(_test_data["outcome"]):
+ raise ValueError(
+ "Sampled test data has too many new categorical levels, consider decreasing train size"
+ )
+
+ # ####################################
+ # recode categorical variables to have
+ # no index gaps in the training data
+ # ####################################
+
+ mappings = {}
+ for name in _train_data["categorical"].keys():
+ unique_train = torch.unique(_train_data["categorical"][name])
+ mappings[name] = {v.item(): i for i, v in enumerate(unique_train)}
+ _train_data["categorical"][name] = torch.tensor(
+ [mappings[name][x.item()] for x in _train_data["categorical"][name]]
+ )
+ _test_data["categorical"][name] = torch.tensor(
+ [mappings[name][x.item()] for x in _test_data["categorical"][name]]
+ )
+
+ return _train_data, _test_data
+
+
+def test_performance(
+ model_or_class: Union[Callable[..., Any], Any],
+ kwarg_names: Dict[str, Any],
+ train_loader: DataLoader,
+ test_loader: DataLoader,
+ categorical_levels: Dict[str, torch.Tensor],
+ outcome_type: str = "outcome",
+ outcome_name: str = "outcome",
+ n_steps: int = 600,
+ plot: bool = True,
+ lim: Optional[Tuple[float, float]] = None,
+ is_class: bool = True,
+) -> Dict[str, float]:
+
+ _train_data, _test_data = recode_categorical(kwarg_names, train_loader, test_loader)
+
+ pyro.clear_param_store()
+
+ ######################
+ # train and test
+ ######################
+
+ if is_class:
+ model = model_or_class(**_train_data)
+
+ else:
+ model = model_or_class
+
+ guide = run_svi_inference(
+ model, n_steps=n_steps, lr=0.01, verbose=True, **_train_data
+ )
+
+ predictive = Predictive(model, guide=guide, num_samples=1000)
+
+ categorical_levels = model.categorical_levels
+
+ _train_data_for_preds = copy.deepcopy(_train_data)
+ _test_data_for_preds = copy.deepcopy(_test_data)
+
+ if outcome_type != "outcome":
+ _train_data_for_preds[outcome_type][outcome_name] = None # type: ignore
+ _test_data_for_preds[outcome_type][outcome_name] = None # type: ignore
+
+ else:
+ _train_data_for_preds[outcome_type] = None # type: ignore
+
+ samples_train = predictive(
+ **_train_data_for_preds,
+ categorical_levels=categorical_levels,
+ )
+
+ samples_test = predictive(
+ **_test_data_for_preds,
+ categorical_levels=categorical_levels,
+ )
+
+ train_predicted_mean = samples_train[outcome_name].squeeze().mean(dim=0)
+ train_predicted_lower = samples_train[outcome_name].squeeze().quantile(0.05, dim=0)
+ train_predicted_upper = samples_train[outcome_name].squeeze().quantile(0.95, dim=0)
+
+ coverage_training = (
+ _train_data[outcome_type][outcome_name]
+ .squeeze()
+ .gt(train_predicted_lower)
+ .float()
+ * _train_data[outcome_type][outcome_name]
+ .squeeze()
+ .lt(train_predicted_upper)
+ .float()
+ )
+
+ null_residuals_train = (
+ _train_data[outcome_type][outcome_name].squeeze()
+ - _train_data[outcome_type][outcome_name].squeeze().mean()
+ )
+
+ null_mae_train = torch.abs(null_residuals_train).mean().item()
+
+ residuals_train = (
+ _train_data[outcome_type][outcome_name].squeeze() - train_predicted_mean
+ )
+ mae_train = torch.abs(residuals_train).mean().item()
+
+ rsquared_train = (
+ 1
+ - residuals_train.var()
+ / _train_data[outcome_type][outcome_name].squeeze().var()
+ )
+
+ test_predicted_mean = samples_test[outcome_name].squeeze().mean(dim=0)
+ test_predicted_lower = samples_test[outcome_name].squeeze().quantile(0.05, dim=0)
+ test_predicted_upper = samples_test[outcome_name].squeeze().quantile(0.95, dim=0)
+
+ coverage_test = (
+ _test_data[outcome_type][outcome_name]
+ .squeeze()
+ .gt(test_predicted_lower)
+ .float()
+ * _test_data[outcome_type][outcome_name]
+ .squeeze()
+ .lt(test_predicted_upper)
+ .float()
+ )
+
+ null_residuals_test = (
+ _test_data[outcome_type][outcome_name].squeeze()
+ - _test_data[outcome_type][outcome_name].squeeze().mean()
+ )
+
+ null_mae_test = torch.abs(null_residuals_test).mean().item()
+
+ residuals_test = (
+ _test_data[outcome_type][outcome_name].squeeze() - test_predicted_mean
+ )
+ mae_test = torch.abs(residuals_test).mean().item()
+
+ rsquared_test = (
+ 1
+ - residuals_test.var() / _test_data[outcome_type][outcome_name].squeeze().var()
+ )
+
+ print(rsquared_train, rsquared_test)
+
+ if plot:
+ fig, axs = plt.subplots(2, 2, figsize=(14, 10))
+
+ axs[0, 0].scatter(
+ x=_train_data[outcome_type][outcome_name],
+ y=train_predicted_mean,
+ s=6,
+ alpha=0.5,
+ )
+ axs[0, 0].set_title(
+ "Training data, ratio of outcomes within 95% CI: {:.2f}".format(
+ coverage_training.mean().item()
+ )
+ )
+
+ if lim is not None:
+ axs[0, 0].set_xlim(lim)
+ axs[0, 0].set_ylim(lim)
+ axs[0, 0].set_xlabel("observed values")
+ axs[0, 0].set_ylabel("mean predicted values")
+
+ axs[0, 1].hist(residuals_train, bins=50)
+
+ axs[0, 1].set_title(
+ "Training set residuals, MAE (null): {:.2f} ({:.2f}), Rsquared: {:.2f}".format(
+ mae_train, null_mae_train, rsquared_train.item()
+ )
+ )
+ axs[0, 1].set_xlabel("residuals")
+ axs[0, 1].set_ylabel("frequency")
+
+ axs[1, 0].scatter(
+ x=_test_data[outcome_type][outcome_name],
+ y=test_predicted_mean,
+ s=6,
+ alpha=0.5,
+ )
+ axs[1, 0].set_title(
+ "Test data, ratio of outcomes within 95% CI: {:.2f}".format(
+ coverage_test.mean().item()
+ )
+ )
+ axs[1, 0].set_xlabel("true values")
+ axs[1, 0].set_ylabel("mean predicted values")
+ if lim is not None:
+ axs[1, 0].set_xlim(lim)
+ axs[1, 0].set_ylim(lim)
+
+ axs[1, 1].hist(residuals_test, bins=50)
+
+ axs[1, 1].set_title(
+ "Test set residuals, MAE (null): {:.2f} ({:.2f}), Rsquared: {:.2f}".format(
+ mae_test, null_mae_test, rsquared_test.item()
+ )
+ )
+
+ axs[1, 1].set_xlabel("residuals")
+ axs[1, 1].set_ylabel("frequency")
+
+ plt.tight_layout(rect=(0, 0, 1, 0.96))
+ sns.despine()
+
+ fig.suptitle("Model evaluation", fontsize=16)
+
+ plt.show()
+
+ return {
+ "mae_null_train": null_mae_train,
+ "mae_null_test": null_mae_test,
+ "mae_train": mae_train,
+ "mae_test": mae_test,
+ "rsquared_train": rsquared_train,
+ "rsquared_test": rsquared_test,
+ "coverage_train": coverage_training.mean().item(),
+ "coverage_test": coverage_test.mean().item(),
+ }
diff --git a/build/cities/modeling/model_components.py b/build/cities/modeling/model_components.py
new file mode 100644
index 00000000..c914bb41
--- /dev/null
+++ b/build/cities/modeling/model_components.py
@@ -0,0 +1,351 @@
+from typing import Dict, List, Optional, Tuple
+
+import pyro
+import pyro.distributions as dist
+import torch
+
+
+def get_n(categorical: Dict[str, torch.Tensor], continuous: Dict[str, torch.Tensor]):
+ N_categorical = len(categorical)
+ N_continuous = len(continuous)
+
+ # a but convoluted, but groups might be missing and sometimes
+ # vars are allowed to be None
+ n_cat = None
+ if N_categorical > 0:
+ for value in categorical.values():
+ if value is not None:
+ n_cat = value.shape[0]
+ break
+
+ n_con = None
+ if N_continuous > 0:
+ for value in continuous.values():
+ if value is not None:
+ n_con = value.shape[0]
+ break
+
+ if N_categorical > 0 and N_continuous > 0:
+ if n_cat != n_con:
+ raise ValueError(
+ "The number of categorical and continuous data points must be the same"
+ )
+
+ n = n_cat if n_cat is not None else n_con
+
+ if n is None:
+ raise ValueError("Both categorical and continuous dictionaries are empty.")
+
+ return N_categorical, N_continuous, n
+
+
+def check_categorical_is_subset_of_levels(categorical, categorical_levels):
+
+ assert set(categorical.keys()).issubset(set(categorical_levels.keys()))
+
+ # # TODO should these be subsets or can we only check lengths?
+
+ return True
+
+
+def get_categorical_levels(categorical):
+ """
+ Assumes that no levels are missing from the categorical data, and constructs the levels from the unique values.
+ This should only be used with supersets of all data (so that every data subset will have its levels represented
+ in the levels returned here.
+ """
+ return {name: torch.unique(categorical[name]) for name in categorical.keys()}
+
+
+def categorical_contribution(
+ categorical: Dict[str, torch.Tensor],
+ child_name: str,
+ leeway: float,
+ categorical_levels: Dict[str, torch.Tensor],
+) -> torch.Tensor:
+
+ check_categorical_is_subset_of_levels(categorical, categorical_levels)
+
+ categorical_names = list(categorical.keys())
+
+ weights_categorical_outcome = {}
+ objects_cat_weighted = {}
+
+ for name in categorical_names:
+ weights_categorical_outcome[name] = pyro.sample(
+ f"weights_categorical_{name}_{child_name}",
+ dist.Normal(0.0, leeway).expand(categorical_levels[name].shape).to_event(1),
+ )
+
+ if len(weights_categorical_outcome[name].shape) > 1:
+ weights_categorical_outcome[name] = weights_categorical_outcome[
+ name
+ ].squeeze(-2)
+
+ final_nonevent_shape = torch.broadcast_shapes(
+ categorical[name].shape[:-1], weights_categorical_outcome[name].shape[:-1]
+ )
+ expanded_weight_indices = categorical[name].expand(*final_nonevent_shape, -1)
+ expanded_weights = weights_categorical_outcome[name].expand(
+ *final_nonevent_shape, -1
+ )
+
+ objects_cat_weighted[name] = torch.gather(
+ expanded_weights, dim=-1, index=expanded_weight_indices
+ )
+
+ # weight_indices = categorical[name].expand(
+ # *weights_categorical_outcome[name].shape[:-1], -1
+ # )
+
+ # objects_cat_weighted[name] = torch.gather(
+ # weights_categorical_outcome[name], dim=-1, index=weight_indices
+ # )
+
+ values = list(objects_cat_weighted.values())
+
+ categorical_contribution_outcome = torch.stack(values, dim=0).sum(dim=0)
+
+ return categorical_contribution_outcome
+
+
+def continuous_contribution(
+ continuous: Dict[str, torch.Tensor],
+ child_name: str,
+ leeway: float,
+) -> torch.Tensor:
+
+ contributions = torch.zeros(1)
+
+ bias_continuous = pyro.sample(
+ f"bias_continuous_{child_name}",
+ dist.Normal(0.0, leeway),
+ )
+
+ for key, value in continuous.items():
+
+ weight_continuous = pyro.sample(
+ f"weight_continuous_{key}_to_{child_name}",
+ dist.Normal(0.0, leeway),
+ )
+
+ contribution = weight_continuous * value
+ contributions = contribution + contributions
+
+ contributions = bias_continuous + contributions
+
+ return contributions
+
+
+def add_linear_component(
+ child_name: str,
+ child_continuous_parents: Dict[str, torch.Tensor],
+ child_categorical_parents: Dict[str, torch.Tensor],
+ leeway: float,
+ data_plate,
+ categorical_levels: Dict[str, torch.Tensor],
+ observations: Optional[torch.Tensor] = None,
+) -> torch.Tensor:
+
+ sigma_child = pyro.sample(
+ f"sigma_{child_name}", dist.Exponential(1.0)
+ ) # type: ignore
+
+ continuous_contribution_to_child = continuous_contribution(
+ child_continuous_parents, child_name, leeway=leeway
+ )
+
+ categorical_contribution_to_child = categorical_contribution(
+ child_categorical_parents,
+ child_name,
+ leeway,
+ categorical_levels=categorical_levels,
+ )
+
+ with data_plate:
+
+ mean_prediction_child = pyro.deterministic( # type: ignore
+ f"mean_outcome_prediction_{child_name}",
+ continuous_contribution_to_child + categorical_contribution_to_child,
+ event_dim=0,
+ )
+
+ child_observed = pyro.sample( # type: ignore
+ f"{child_name}",
+ dist.Normal(mean_prediction_child, sigma_child),
+ obs=observations,
+ )
+
+ return child_observed
+
+
+def add_linear_component_continuous_interactions(
+ child_name: str,
+ child_continuous_parents: Dict[str, torch.Tensor],
+ child_categorical_parents: Dict[str, torch.Tensor],
+ continous_interaction_pairs: List[Tuple[str, str]],
+ leeway: float,
+ data_plate,
+ categorical_levels: Dict[str, torch.Tensor],
+ observations: Optional[torch.Tensor] = None,
+) -> torch.Tensor:
+
+ if continous_interaction_pairs == [("all", "all")]:
+ continous_interaction_pairs = [
+ (key1, key2)
+ for key1 in child_continuous_parents.keys()
+ for key2 in child_continuous_parents.keys()
+ if key1 != key2
+ ]
+
+ for interaction_pair in continous_interaction_pairs:
+ assert interaction_pair[0] in child_continuous_parents.keys()
+ assert interaction_pair[1] in child_continuous_parents.keys()
+
+ interaction_name = f"{interaction_pair[0]}_x_{interaction_pair[1]}_to_{child_name}"
+
+ with data_plate:
+ child_continuous_parents[interaction_name] = pyro.deterministic(
+ interaction_name,
+ child_continuous_parents[interaction_pair[0]]
+ * child_continuous_parents[interaction_pair[1]],
+ event_dim=0,
+ )
+
+ child_observed = add_linear_component(
+ child_name=child_name,
+ child_continuous_parents=child_continuous_parents,
+ child_categorical_parents=child_categorical_parents,
+ leeway=leeway,
+ data_plate=data_plate,
+ categorical_levels=categorical_levels,
+ observations=observations,
+ )
+
+ return child_observed
+
+
+def add_logistic_component(
+ child_name: str,
+ child_continuous_parents: Dict[str, torch.Tensor],
+ child_categorical_parents: Dict[str, torch.Tensor],
+ leeway: float,
+ data_plate,
+ categorical_levels: Dict[str, torch.Tensor],
+ observations: Optional[torch.Tensor] = None,
+) -> torch.Tensor:
+
+ continuous_contribution_to_child = continuous_contribution(
+ child_continuous_parents, child_name, leeway
+ )
+
+ categorical_contribution_to_child = categorical_contribution(
+ child_categorical_parents,
+ child_name,
+ leeway,
+ categorical_levels=categorical_levels,
+ )
+
+ with data_plate:
+
+ mean_prediction_child = pyro.deterministic( # type: ignore
+ f"mean_outcome_prediction_{child_name}",
+ categorical_contribution_to_child + continuous_contribution_to_child,
+ event_dim=0,
+ )
+
+ child_probs = pyro.deterministic(
+ f"child_probs_{child_name}",
+ torch.sigmoid(mean_prediction_child),
+ event_dim=0,
+ )
+
+ child_observed = pyro.sample(
+ f"{child_name}",
+ dist.Bernoulli(child_probs),
+ obs=observations,
+ )
+
+ return child_observed
+
+
+def add_ratio_component(
+ child_name: str,
+ child_continuous_parents: Dict[str, torch.Tensor],
+ child_categorical_parents: Dict[str, torch.Tensor],
+ leeway: float,
+ data_plate,
+ categorical_levels: Dict[str, torch.Tensor],
+ observations: Optional[torch.Tensor] = None,
+) -> torch.Tensor:
+
+ continuous_contribution_to_child = continuous_contribution(
+ child_continuous_parents, child_name, leeway
+ )
+
+ categorical_contribution_to_child = categorical_contribution(
+ child_categorical_parents,
+ child_name,
+ leeway,
+ categorical_levels=categorical_levels,
+ )
+
+ sigma_child = pyro.sample(f"sigma_{child_name}", dist.Exponential(40.0))
+
+ with data_plate:
+
+ mean_prediction_child = pyro.deterministic( # type: ignore
+ f"mean_outcome_prediction_{child_name}",
+ categorical_contribution_to_child + continuous_contribution_to_child,
+ event_dim=0,
+ )
+
+ child_probs = pyro.deterministic(
+ f"child_probs_{child_name}",
+ torch.sigmoid(mean_prediction_child),
+ event_dim=0,
+ )
+
+ child_observed = pyro.sample(
+ child_name, dist.Normal(child_probs, sigma_child), obs=observations
+ )
+
+ return child_observed
+
+
+def add_ratio_component_continuous_interactions(
+ child_name: str,
+ child_continuous_parents: Dict[str, torch.Tensor],
+ child_categorical_parents: Dict[str, torch.Tensor],
+ continous_interaction_pairs: List[Tuple[str, str]],
+ leeway: float,
+ data_plate,
+ categorical_levels: Dict[str, torch.Tensor],
+ observations: Optional[torch.Tensor] = None,
+) -> torch.Tensor:
+
+ for interaction_pair in continous_interaction_pairs:
+ assert interaction_pair[0] in child_continuous_parents.keys()
+ assert interaction_pair[1] in child_continuous_parents.keys()
+
+ interaction_name = f"{interaction_pair[0]}_x_{interaction_pair[1]}_to_{child_name}"
+
+ with data_plate:
+ child_continuous_parents[interaction_name] = pyro.deterministic(
+ interaction_name,
+ child_continuous_parents[interaction_pair[0]]
+ * child_continuous_parents[interaction_pair[1]],
+ event_dim=0,
+ )
+
+ child_observed = add_ratio_component(
+ child_name=child_name,
+ child_continuous_parents=child_continuous_parents,
+ child_categorical_parents=child_categorical_parents,
+ leeway=leeway,
+ data_plate=data_plate,
+ categorical_levels=categorical_levels,
+ observations=observations,
+ )
+
+ return child_observed
diff --git a/build/cities/modeling/model_interactions.py b/build/cities/modeling/model_interactions.py
new file mode 100644
index 00000000..2446d6d5
--- /dev/null
+++ b/build/cities/modeling/model_interactions.py
@@ -0,0 +1,181 @@
+import logging
+import os
+from typing import Optional
+
+import dill
+import pyro
+import pyro.distributions as dist
+import torch
+
+from cities.modeling.modeling_utils import (
+ prep_wide_data_for_inference,
+ train_interactions_model,
+)
+from cities.utils.data_grabber import DataGrabber, find_repo_root
+
+
+class InteractionsModel:
+ def __init__(
+ self,
+ outcome_dataset: str,
+ intervention_dataset: str,
+ intervention_variable: Optional[str] = None,
+ forward_shift: int = 2,
+ num_iterations: int = 1500,
+ num_samples: int = 1000,
+ plot_loss: bool = False,
+ ):
+ self.outcome_dataset = outcome_dataset
+ self.intervention_dataset = intervention_dataset
+ self.forward_shift = forward_shift
+ self.num_iterations = num_iterations
+ self.num_samples = num_samples
+ self.plot_loss = plot_loss
+ self.root = find_repo_root()
+
+ if intervention_variable:
+ self.intervention_variable = intervention_variable
+ else:
+ _dg = DataGrabber()
+ _dg.get_features_std_long([intervention_dataset])
+ self.intervention_variable = _dg.std_long[intervention_dataset].columns[-1]
+
+ self.data = prep_wide_data_for_inference(
+ outcome_dataset=self.outcome_dataset,
+ intervention_dataset=self.intervention_dataset,
+ forward_shift=self.forward_shift,
+ )
+
+ self.model = model_cities_interaction
+
+ self.model_args = self.data["model_args"]
+
+ self.model_conditioned = pyro.condition( # type: ignore
+ self.model,
+ data={"T": self.data["t"], "Y": self.data["y"], "X": self.data["x"]},
+ )
+
+ self.model_rendering = pyro.render_model( # type: ignore
+ self.model, model_args=self.model_args, render_distributions=True
+ )
+
+ def train_interactions_model(self):
+ self.guide = train_interactions_model(
+ conditioned_model=self.model_conditioned,
+ model_args=self.model_args,
+ num_iterations=self.num_iterations,
+ plot_loss=self.plot_loss,
+ )
+
+ def sample_from_guide(self):
+ predictive = pyro.infer.Predictive(
+ model=self.model,
+ guide=self.guide,
+ num_samples=self.num_samples,
+ parallel=False,
+ )
+ self.samples = predictive(*self.model_args)
+
+ def save_guide(self):
+ guide_name = (
+ f"{self.intervention_dataset}_{self.outcome_dataset}_{self.forward_shift}"
+ )
+ serialized_guide = dill.dumps(self.guide)
+ file_path = os.path.join(
+ self.root, "data/model_guides", f"{guide_name}_guide.pkl"
+ )
+ with open(file_path, "wb") as file:
+ file.write(serialized_guide)
+ param_path = os.path.join(
+ self.root, "data/model_guides", f"{guide_name}_params.pth"
+ )
+ pyro.get_param_store().save(param_path)
+
+ logging.info(
+ f"Guide and params for {self.intervention_dataset}",
+ f"{self.outcome_dataset} with shift {self.forward_shift}",
+ "has been saved.",
+ )
+
+
+def model_cities_interaction(
+ N_t,
+ N_cov,
+ N_s,
+ N_u,
+ state_index,
+ unit_index,
+ leeway=0.9,
+):
+ bias_Y = pyro.sample("bias_Y", dist.Normal(0, leeway))
+ bias_T = pyro.sample("bias_T", dist.Normal(0, leeway))
+
+ weight_TY = pyro.sample("weight_TY", dist.Normal(0, leeway))
+
+ sigma_T = pyro.sample("sigma_T", dist.Exponential(1))
+ sigma_Y = pyro.sample("sigma_Y", dist.Exponential(1))
+
+ counties_plate = pyro.plate("counties_plate", N_u, dim=-1)
+ states_plate = pyro.plate("states_plate", N_s, dim=-2)
+ covariates_plate = pyro.plate("covariates_plate", N_cov, dim=-3)
+ time_plate = pyro.plate("time_plate", N_t, dim=-4)
+
+ with covariates_plate:
+ bias_X = pyro.sample("bias_X", dist.Normal(0, leeway))
+ sigma_X = pyro.sample("sigma_X", dist.Exponential(1))
+ weight_XT = pyro.sample("weight_XT", dist.Normal(0, leeway))
+ weight_XY = pyro.sample("weight_XY", dist.Normal(0, leeway))
+
+ with states_plate:
+ bias_stateT = pyro.sample("bias_stateT", dist.Normal(0, leeway))
+ bias_stateY = pyro.sample("bias_stateY", dist.Normal(0, leeway))
+
+ with covariates_plate:
+ bias_stateX = pyro.sample("bias_stateX", dist.Normal(0, leeway))
+
+ with time_plate:
+ bias_timeT = pyro.sample("bias_timeT", dist.Normal(0, leeway))
+ bias_timeY = pyro.sample("bias_timeY", dist.Normal(0, leeway))
+
+ with counties_plate:
+ with covariates_plate:
+ mean_X = pyro.deterministic(
+ "mean_X",
+ torch.einsum(
+ "...xdd,...xcd->...xdc", bias_X, bias_stateX[..., state_index, :]
+ ),
+ )
+
+ X = pyro.sample("X", dist.Normal(mean_X[..., unit_index], sigma_X))
+
+ XT_weighted = torch.einsum(
+ "...xdc, ...xdd -> ...dc", X, weight_XT
+ ).unsqueeze(-2)
+ XY_weighted = torch.einsum(
+ "...xdc, ...xdd -> ...dc", X, weight_XY
+ ).unsqueeze(-2)
+
+ with time_plate:
+ bias_stateT_tiled = pyro.deterministic(
+ "bias_stateT_tiled",
+ torch.einsum("...cd -> ...dc", bias_stateT[..., state_index, :]),
+ )
+
+ mean_T = pyro.deterministic(
+ "mean_T", bias_T + bias_timeT + bias_stateT_tiled + XT_weighted
+ )
+
+ T = pyro.sample("T", dist.Normal(mean_T, sigma_T))
+
+ bias_stateY_tiled = pyro.deterministic(
+ "bias_stateY_tiled",
+ torch.einsum("...cd -> ...dc", bias_stateY[..., state_index, :]),
+ )
+
+ mean_Y = pyro.deterministic(
+ "mean_Y",
+ bias_Y + bias_timeY + bias_stateY_tiled + XY_weighted + weight_TY * T,
+ )
+ Y = pyro.sample("Y", dist.Normal(mean_Y, sigma_Y))
+
+ return Y
diff --git a/build/cities/modeling/modeling_utils.py b/build/cities/modeling/modeling_utils.py
new file mode 100644
index 00000000..55aaccc6
--- /dev/null
+++ b/build/cities/modeling/modeling_utils.py
@@ -0,0 +1,403 @@
+from typing import Callable
+
+import matplotlib.pyplot as plt
+import pandas as pd
+import pyro
+import torch
+from pyro.infer import SVI, Trace_ELBO
+from pyro.infer.autoguide import AutoNormal
+from pyro.optim import Adam # type: ignore
+from scipy.stats import spearmanr
+
+from cities.utils.data_grabber import (
+ DataGrabber,
+ list_available_features,
+ list_tensed_features,
+)
+
+
+def drop_high_correlation(df, threshold=0.85):
+ df_var = df.iloc[:, 2:].copy()
+ correlation_matrix, _ = spearmanr(df_var)
+
+ high_correlation_pairs = [
+ (df_var.columns[i], df_var.columns[j])
+ for i in range(df_var.shape[1])
+ for j in range(i + 1, df_var.shape[1])
+ if abs(correlation_matrix[i, j]) > threshold
+ and abs(correlation_matrix[i, j]) < 1.0
+ ]
+ high_correlation_pairs = [
+ (var1, var2) for var1, var2 in high_correlation_pairs if var1 != var2
+ ]
+
+ removed = set()
+ print(
+ f"Highly correlated pairs: {high_correlation_pairs}, second elements will be dropped"
+ )
+ for var1, var2 in high_correlation_pairs:
+ assert var2 in df_var.columns
+ for var1, var2 in high_correlation_pairs:
+ if var2 in df_var.columns:
+ removed.add(var2)
+ df_var.drop(var2, axis=1, inplace=True)
+
+ result = pd.concat([df.iloc[:, :2], df_var], axis=1)
+ print(f"Removed {removed} due to correlation > {threshold}")
+ return result
+
+
+def prep_wide_data_for_inference(
+ outcome_dataset: str, intervention_dataset: str, forward_shift: int
+):
+ """
+ Prepares wide-format data for causal inference modeling.
+
+ Parameters:
+ - outcome_dataset (str): Name of the outcome variable.
+ - intervention_dataset (str): Name of the intervention variable.
+ - forward_shift (int): Number of time steps to shift the outcome variable for prediction.
+
+ Returns:
+ dict: A dictionary containing the necessary inputs for causal inference modeling.
+
+ The function performs the following steps:
+ 1. Identifies available device (GPU if available, otherwise CPU), to be used with tensors.
+ 2. Uses a DataGrabber class to obtain standardized wide-format data.
+ 3. Separates covariate datasets into time series (tensed) and fixed covariates.
+ 4. Loads the required transformed features.
+ 5. Merges fixed covariates into a joint dataframe based on a common ID column.
+ 6. Ensures that the GeoFIPS (geographical identifier) is consistent across datasets.
+ 7. Extracts common years for which both intervention and outcome data are available.
+ 8. Shifts the outcome variable forward by the specified number of time steps.
+ 9. Prepares tensors for input features (x), interventions (t), and outcomes (y).
+ 10. Creates indices for states and units, preparing them as tensors.
+ 11. Validates the shapes of the tensors.
+ 12. Constructs a dictionary containing model arguments and prepared tensors.
+
+ Example usage:
+ prep_data = prep_wide_data_for_inference("outcome_data", "intervention_data", 2)
+ """
+ if torch.cuda.is_available():
+ device = torch.device("cuda")
+ else:
+ device = torch.device("cpu")
+
+ dg = DataGrabber()
+
+ tensed_covariates_datasets = [
+ var
+ for var in list_tensed_features()
+ if var not in [outcome_dataset, intervention_dataset]
+ ]
+ fixed_covariates_datasets = [
+ var
+ for var in list_available_features()
+ if var
+ not in tensed_covariates_datasets + [outcome_dataset, intervention_dataset]
+ ]
+
+ features_needed = [
+ outcome_dataset,
+ intervention_dataset,
+ ] + fixed_covariates_datasets
+
+ dg.get_features_std_wide(features_needed)
+
+ intervention = dg.std_wide[intervention_dataset]
+ outcome = dg.std_wide[outcome_dataset]
+
+ # put covariates in one df as columns, dropping repeated ID columns
+ f_covariates = {
+ dataset: dg.std_wide[dataset] for dataset in fixed_covariates_datasets
+ }
+ f_covariates_joint = f_covariates[fixed_covariates_datasets[0]]
+ for dataset in f_covariates.keys():
+ if dataset != fixed_covariates_datasets[0]:
+ if "GeoName" in f_covariates[dataset].columns:
+ f_covariates[dataset] = f_covariates[dataset].drop(columns=["GeoName"])
+ f_covariates_joint = f_covariates_joint.merge(
+ f_covariates[dataset], on=["GeoFIPS"]
+ )
+
+ f_covariates_joint = drop_high_correlation(f_covariates_joint)
+
+ assert f_covariates_joint["GeoFIPS"].equals(intervention["GeoFIPS"])
+
+ # extract data for which intervention and outcome overlap
+ year_min = max(
+ intervention.columns[2:].astype(int).min(),
+ outcome.columns[2:].astype(int).min(),
+ )
+
+ year_max = min(
+ intervention.columns[2:].astype(int).max(),
+ outcome.columns[2:].astype(int).max(),
+ )
+
+ assert all(intervention["GeoFIPS"] == outcome["GeoFIPS"])
+
+ outcome_years_to_keep = [
+ year
+ for year in outcome.columns[2:]
+ if year_min <= int(year) <= year_max + forward_shift
+ ]
+
+ outcome_years_to_keep = [
+ year for year in outcome_years_to_keep if year in intervention.columns[2:]
+ ]
+
+ outcome = outcome[outcome_years_to_keep]
+
+ # shift outcome `forward_shift` steps ahead
+ # for the prediction task
+ outcome_shifted = outcome.copy()
+
+ for i in range(len(outcome_years_to_keep) - forward_shift):
+ outcome_shifted.iloc[:, i] = outcome_shifted.iloc[:, i + forward_shift]
+
+ years_to_drop = [
+ f"{year}" for year in range(year_max - forward_shift + 1, year_max + 1)
+ ]
+ outcome_shifted.drop(columns=years_to_drop, inplace=True)
+
+ intervention.drop(columns=["GeoFIPS", "GeoName"], inplace=True)
+ intervention = intervention[outcome_shifted.columns]
+
+ assert intervention.shape == outcome_shifted.shape
+
+ years_available = outcome_shifted.columns.astype(int).values
+
+ unit_index = pd.factorize(f_covariates_joint["GeoFIPS"].values)[0]
+ state_index = pd.factorize(f_covariates_joint["GeoFIPS"].values // 1000)[0]
+
+ # prepare tensors
+ x = torch.tensor(
+ f_covariates_joint.iloc[:, 2:].values, dtype=torch.float32, device=device
+ )
+ x = x.unsqueeze(1).unsqueeze(1).permute(2, 3, 1, 0)
+
+ t = torch.tensor(intervention.values, dtype=torch.float32, device=device)
+ t = t.unsqueeze(1).unsqueeze(1).permute(3, 1, 2, 0)
+
+ y = torch.tensor(outcome_shifted.values, dtype=torch.float32, device=device)
+ y = y.unsqueeze(1).unsqueeze(1).permute(3, 1, 2, 0)
+
+ state_index = torch.tensor(state_index, dtype=torch.int, device=device)
+ unit_index = torch.tensor(unit_index, dtype=torch.int, device=device)
+
+ N_t = y.shape[0]
+ N_cov = x.shape[1]
+ N_s = state_index.unique().shape[0]
+ N_u = unit_index.unique().shape[0]
+
+ assert x.shape == (1, N_cov, 1, N_u)
+ assert y.shape == (N_t, 1, 1, N_u)
+ assert t.shape == (N_t, 1, 1, N_u)
+
+ model_args = (N_t, N_cov, N_s, N_u, state_index, unit_index)
+
+ return {
+ "model_args": model_args,
+ "x": x,
+ "t": t,
+ "y": y,
+ "years_available": years_available,
+ "outcome_years": outcome_years_to_keep,
+ "covariates_df": f_covariates_joint,
+ }
+
+
+def train_interactions_model(
+ conditioned_model: Callable,
+ model_args,
+ num_iterations: int = 1000,
+ plot_loss: bool = True,
+ print_interval: int = 100,
+ lr: float = 0.01,
+):
+ guide = None
+ pyro.clear_param_store() # type: ignore
+
+ guide = AutoNormal(conditioned_model)
+
+ svi = SVI(
+ model=conditioned_model, guide=guide, optim=Adam({"lr": lr}), loss=Trace_ELBO()
+ )
+
+ losses = []
+ for step in range(num_iterations):
+ loss = svi.step(*model_args)
+ losses.append(loss)
+ if step % print_interval == 0:
+ print("[iteration %04d] loss: %.4f" % (step + 1, loss))
+
+ if plot_loss:
+ plt.plot(range(num_iterations), losses, label="Loss")
+ plt.show()
+
+ return guide
+
+
+def prep_data_for_interaction_inference(
+ outcome_dataset, intervention_dataset, intervention_variable, forward_shift
+):
+ dg = DataGrabber()
+
+ tensed_covariates_datasets = [
+ var
+ for var in list_tensed_features()
+ if var not in [outcome_dataset, intervention_dataset]
+ ]
+ fixed_covariates_datasets = [
+ var
+ for var in list_available_features()
+ if var
+ not in tensed_covariates_datasets + [outcome_dataset, intervention_dataset]
+ ]
+
+ dg.get_features_std_long(list_available_features())
+ dg.get_features_std_wide(list_available_features())
+
+ year_min = max(
+ dg.std_long[intervention_dataset]["Year"].min(),
+ dg.std_long[outcome_dataset]["Year"].min(),
+ )
+ year_max = min(
+ dg.std_long[intervention_dataset]["Year"].max(),
+ dg.std_long[outcome_dataset]["Year"].max(),
+ )
+ outcome_df = dg.std_long[outcome_dataset].sort_values(by=["GeoFIPS", "Year"])
+
+ # now we adding forward shift to the outcome
+ # cleaning up and puting intervention/outcome in one df
+ # and fixed covariates in another
+
+ outcome_df[f"{outcome_dataset}_shifted_by_{forward_shift}"] = None
+
+ geo_subsets = []
+ for geo_fips in outcome_df["GeoFIPS"].unique():
+ geo_subset = outcome_df[outcome_df["GeoFIPS"] == geo_fips].copy()
+ # Shift the 'Value' column `forward_shift` in a new column
+ geo_subset[f"{outcome_dataset}_shifted_by_{forward_shift}"] = geo_subset[
+ "Value"
+ ].shift(-forward_shift)
+ geo_subsets.append(geo_subset)
+
+ outcome_df = pd.concat(geo_subsets).reset_index(drop=True)
+
+ outcome = outcome_df[
+ (outcome_df["Year"] >= year_min)
+ & (outcome_df["Year"] <= year_max + forward_shift)
+ ]
+
+ intervention = dg.std_long[intervention_dataset][
+ (dg.std_long[intervention_dataset]["Year"] >= year_min)
+ & (dg.std_long[intervention_dataset]["Year"] <= year_max)
+ ]
+ f_covariates = {
+ dataset: dg.std_wide[dataset] for dataset in fixed_covariates_datasets
+ }
+ f_covariates_joint = f_covariates[fixed_covariates_datasets[0]]
+ for dataset in f_covariates.keys():
+ if dataset != fixed_covariates_datasets[0]:
+ if "GeoName" in f_covariates[dataset].columns:
+ f_covariates[dataset] = f_covariates[dataset].drop(columns=["GeoName"])
+ f_covariates_joint = f_covariates_joint.merge(
+ f_covariates[dataset], on=["GeoFIPS"]
+ )
+
+ i_o_data = pd.merge(outcome, intervention, on=["GeoFIPS", "Year"])
+
+ if "GeoName_x" in i_o_data.columns:
+ i_o_data.rename(columns={"GeoName_x": "GeoName"}, inplace=True)
+ columns_to_drop = i_o_data.filter(regex=r"^GeoName_[a-zA-Z]$")
+ i_o_data.drop(columns=columns_to_drop.columns, inplace=True)
+
+ i_o_data.rename(columns={"Value": outcome_dataset}, inplace=True)
+
+ i_o_data["state"] = [code // 1000 for code in i_o_data["GeoFIPS"]]
+
+ N_s = len(i_o_data["state"].unique()) # number of states
+ i_o_data.dropna(inplace=True)
+
+ i_o_data["unit_index"] = pd.factorize(i_o_data["GeoFIPS"].values)[0]
+ i_o_data["state_index"] = pd.factorize(i_o_data["state"].values)[0]
+ i_o_data["time_index"] = pd.factorize(i_o_data["Year"].values)[0]
+
+ assert i_o_data["GeoFIPS"].isin(f_covariates_joint["GeoFIPS"]).all()
+
+ f_covariates_joint.drop(columns=["GeoName"], inplace=True)
+ data = i_o_data.merge(f_covariates_joint, on="GeoFIPS", how="left")
+
+ assert not data.isna().any().any()
+
+ time_index_idx = data.columns.get_loc("time_index")
+ covariates_df = data.iloc[:, time_index_idx + 1 :].copy()
+ covariates_df_sparse = covariates_df.copy()
+ covariates_df_sparse["unit_index"] = data["unit_index"]
+ covariates_df_sparse["state_index"] = data["state_index"]
+ covariates_df_sparse.drop_duplicates(inplace=True)
+ assert set(covariates_df_sparse["unit_index"]) == set(data["unit_index"])
+
+ # get tensors
+
+ if torch.cuda.is_available():
+ device = torch.device("cuda")
+ else:
+ device = torch.device("cpu")
+
+ y = data[f"{outcome_dataset}_shifted_by_{forward_shift}"]
+ y = torch.tensor(y, dtype=torch.float32, device=device)
+
+ unit_index = torch.tensor(data["unit_index"], dtype=torch.int, device=device)
+ unit_index_sparse = torch.tensor(
+ covariates_df_sparse["unit_index"], dtype=torch.int
+ )
+
+ state_index = torch.tensor(data["state_index"], dtype=torch.int, device=device)
+ state_index_sparse = torch.tensor(
+ covariates_df_sparse["state_index"], dtype=torch.int
+ )
+
+ time_index = torch.tensor(data["time_index"], dtype=torch.int, device=device)
+ intervention = torch.tensor(
+ data[intervention_variable], dtype=torch.float32, device=device
+ )
+
+ covariates = torch.tensor(covariates_df.values, dtype=torch.float32, device=device)
+
+ covariates_df_sparse.drop(columns=["unit_index", "state_index"], inplace=True)
+ covariates_sparse = torch.tensor(
+ covariates_df_sparse.values, dtype=torch.float32, device=device
+ )
+
+ N_cov = covariates.shape[1] # number of covariates
+ N_u = covariates_sparse.shape[0] # number of units (counties)
+ N_obs = len(y) # number of observations
+ N_t = len(time_index.unique()) # number of time points
+ N_s = len(state_index.unique()) # number of states
+
+ assert len(intervention) == len(y)
+ assert len(unit_index) == len(y)
+ assert len(state_index) == len(unit_index)
+ assert len(time_index) == len(unit_index)
+ assert covariates.shape[1] == covariates_sparse.shape[1]
+ assert len(unit_index_sparse) == N_u
+
+ return {
+ "N_t": N_t,
+ "N_cov": N_cov,
+ "N_s": N_s,
+ "N_u": N_u,
+ "N_obs": N_obs,
+ "unit_index": unit_index,
+ "state_index": state_index,
+ "time_index": time_index,
+ "unit_index_sparse": unit_index_sparse,
+ "state_index_sparse": state_index_sparse,
+ "covariates": covariates,
+ "covariates_sparse": covariates_sparse,
+ "intervention": intervention,
+ "y": y,
+ }
diff --git a/build/cities/modeling/svi_inference.py b/build/cities/modeling/svi_inference.py
new file mode 100644
index 00000000..8ccef03c
--- /dev/null
+++ b/build/cities/modeling/svi_inference.py
@@ -0,0 +1,44 @@
+import matplotlib.pyplot as plt
+import pyro
+import torch
+from pyro.infer.autoguide import AutoMultivariateNormal, init_to_mean
+
+
+def run_svi_inference(
+ model,
+ verbose=True,
+ lr=0.03,
+ vi_family=AutoMultivariateNormal,
+ guide=None,
+ hide=[],
+ n_steps=500,
+ ylim=None,
+ plot=True,
+ **model_kwargs
+):
+ losses = []
+ if guide is None:
+ guide = vi_family(
+ pyro.poutine.block(model, hide=hide), init_loc_fn=init_to_mean
+ )
+ elbo = pyro.infer.Trace_ELBO()(model, guide)
+
+ elbo(**model_kwargs)
+ adam = torch.optim.Adam(elbo.parameters(), lr=lr)
+
+ for step in range(1, n_steps + 1):
+ adam.zero_grad()
+ loss = elbo(**model_kwargs)
+ loss.backward()
+ losses.append(loss.item())
+ adam.step()
+ if (step % 50 == 0) or (step == 1) & verbose:
+ print("[iteration %04d] loss: %.4f" % (step, loss))
+
+ if plot:
+ plt.plot(losses)
+ if ylim:
+ plt.ylim(ylim)
+ plt.show()
+
+ return guide
diff --git a/build/cities/modeling/tau_caching_pipeline.py b/build/cities/modeling/tau_caching_pipeline.py
new file mode 100644
index 00000000..b517d522
--- /dev/null
+++ b/build/cities/modeling/tau_caching_pipeline.py
@@ -0,0 +1,88 @@
+import logging
+import os
+import time
+
+from cities.queries.causal_insight import CausalInsight
+from cities.utils.data_grabber import (
+ DataGrabber,
+ find_repo_root,
+ list_interventions,
+ list_outcomes,
+)
+
+root = find_repo_root()
+log_dir = os.path.join(root, "data", "tau_samples")
+log_file_path = os.path.join(log_dir, ".sampling.log")
+os.makedirs(log_dir, exist_ok=True)
+
+logging.basicConfig(
+ filename=log_file_path,
+ filemode="w",
+ format="%(asctime)s → %(name)s → %(levelname)s: %(message)s",
+ datefmt="%Y-%m-%d %H:%M:%S",
+ level=logging.INFO,
+)
+
+
+session_start = time.time()
+
+
+num_samples = 1000
+
+data = DataGrabber()
+
+interventions = list_interventions()
+outcomes = list_outcomes()
+
+
+N_combinations_samples = len(interventions) * len(outcomes)
+
+
+files = [f for f in os.listdir(log_dir) if os.path.isfile(os.path.join(log_dir, f))]
+num_files = len(files)
+
+logging.info(
+ f"{(num_files-2)} sample dictionaries already exist. "
+ f"Starting to obtain {N_combinations_samples - (num_files -2)}"
+ f" out of {N_combinations_samples} sample dictionaries needed."
+)
+remaining = N_combinations_samples - (num_files - 2)
+for intervention in interventions:
+ for outcome in outcomes:
+ tau_samples_path = os.path.join(
+ root,
+ "data/tau_samples",
+ f"{intervention}_{outcome}_{num_samples}_tau.pkl",
+ )
+
+ if not os.path.exists(tau_samples_path):
+ start_time = time.time()
+ logging.info(f"Sampling {outcome}/{intervention} pair now.")
+ ci = CausalInsight(
+ outcome_dataset=outcome,
+ intervention_dataset=intervention,
+ num_samples=num_samples,
+ )
+
+ ci.generate_tensed_samples()
+ end_time = time.time()
+ duration = end_time - start_time
+ files = [
+ f
+ for f in os.listdir(log_dir)
+ if os.path.isfile(os.path.join(log_dir, f))
+ ]
+ num_files = len(files)
+ remaining -= 1
+ logging.info(
+ f"Done sampling {outcome}/{intervention} pair, completed in {duration:.2f} seconds."
+ f" {remaining} out of {N_combinations_samples} samples remain."
+ )
+
+
+session_ends = time.time()
+
+logging.info(
+ f"All samples are now available."
+ f"Sampling took {session_ends - session_start:.2f} seconds, or {(session_ends - session_start)/60:.2f} minutes."
+)
diff --git a/build/cities/modeling/training_pipeline.py b/build/cities/modeling/training_pipeline.py
new file mode 100644
index 00000000..3f4ebc72
--- /dev/null
+++ b/build/cities/modeling/training_pipeline.py
@@ -0,0 +1,90 @@
+import logging
+import os
+import sys
+import time
+
+from cities.modeling.model_interactions import InteractionsModel
+from cities.utils.data_grabber import find_repo_root, list_interventions, list_outcomes
+
+if __name__ != "__main__":
+ sys.exit()
+
+root = find_repo_root()
+log_dir = os.path.join(root, "data", "model_guides")
+log_file_path = os.path.join(log_dir, ".training.log")
+os.makedirs(log_dir, exist_ok=True)
+
+logging.basicConfig(
+ filename=log_file_path,
+ filemode="w",
+ format="%(asctime)s → %(name)s → %(levelname)s: %(message)s",
+ datefmt="%Y-%m-%d %H:%M:%S",
+ level=logging.INFO,
+)
+
+
+# if you need to train from scratch
+# clean data/model_guides folder manually
+# automatic fresh start is not implemented
+# for security reasons
+
+num_iterations = 4000
+
+interventions = list_interventions()
+outcomes = list_outcomes()
+shifts = [1, 2, 3]
+
+
+N_combinations = len(interventions) * len(outcomes) * len(shifts)
+
+files = [f for f in os.listdir(log_dir) if os.path.isfile(os.path.join(log_dir, f))]
+num_files = len(files)
+
+
+logging.info(
+ f"{(num_files-2)/2} guides already exist. "
+ f"Starting to train {N_combinations - (num_files -2)/2} out of {N_combinations} guides needed."
+)
+
+remaining = N_combinations - (num_files - 2) / 2
+for intervention_dataset in interventions:
+ for outcome_dataset in outcomes:
+ for forward_shift in shifts:
+ # check if the corresponding guide already exists
+ # existing_guides = 0 seems rendundant, remove if all works
+ guide_name = f"{intervention_dataset}_{outcome_dataset}_{forward_shift}"
+ guide_path = os.path.join(
+ root, "data/model_guides", f"{guide_name}_guide.pkl"
+ )
+ if not os.path.exists(guide_path):
+ # existing_guides += 1 seems redundat remove if all works
+
+ logging.info(f"Training {guide_name} for {num_iterations} iterations.")
+
+ start_time = time.time()
+ model = InteractionsModel(
+ outcome_dataset=outcome_dataset,
+ intervention_dataset=intervention_dataset,
+ forward_shift=forward_shift,
+ num_iterations=num_iterations,
+ plot_loss=False,
+ )
+
+ model.train_interactions_model()
+ model.save_guide()
+
+ end_time = time.time()
+ duration = end_time - start_time
+ files = [
+ f
+ for f in os.listdir(log_dir)
+ if os.path.isfile(os.path.join(log_dir, f))
+ ]
+ num_files = len(files)
+ remaining -= 1
+ logging.info(
+ f"Training of {guide_name} completed in {duration:.2f} seconds. "
+ f"{int(remaining)} out of {N_combinations} guides remain to be trained."
+ )
+
+logging.info("All guides are now available.")
diff --git a/build/cities/modeling/waic.py b/build/cities/modeling/waic.py
new file mode 100644
index 00000000..cf388ee5
--- /dev/null
+++ b/build/cities/modeling/waic.py
@@ -0,0 +1,69 @@
+from typing import Any, Callable, Dict, Optional
+
+import pyro
+import torch
+from pyro.infer.enum import get_importance_trace
+
+
+def compute_waic(
+ model: Callable[..., Any],
+ guide: Callable[..., Any],
+ num_particles: int,
+ max_plate_nesting: int,
+ sites: Optional[list[str]] = None,
+ *args: Any,
+ **kwargs: Any
+) -> Dict[str, Any]:
+
+ def vectorize(fn: Callable[..., Any]) -> Callable[..., Any]:
+ def _fn(*args: Any, **kwargs: Any) -> Any:
+ with pyro.plate(
+ "num_particles_vectorized", num_particles, dim=-max_plate_nesting
+ ):
+ return fn(*args, **kwargs)
+
+ return _fn
+
+ model_trace, _ = get_importance_trace(
+ "flat", max_plate_nesting, vectorize(model), vectorize(guide), args, kwargs
+ )
+
+ def site_filter_is_observed(site_name: str) -> bool:
+ return model_trace.nodes[site_name]["is_observed"]
+
+ def site_filter_in_sites(site_name: str) -> bool:
+ return sites is not None and site_name in sites
+
+ if sites is None:
+ site_filter = site_filter_is_observed
+ else:
+ site_filter = site_filter_in_sites
+
+ observed_nodes = {
+ name: node for name, node in model_trace.nodes.items() if site_filter(name)
+ }
+
+ log_p_post = {
+ key: observed_nodes[key]["log_prob"].mean(dim=0) # sum(axis = 0)/num_particles
+ for key in observed_nodes.keys()
+ }
+
+ lppd = torch.stack([log_p_post[key] for key in log_p_post.keys()]).sum()
+
+ var_log_p_post = {
+ key: (observed_nodes[key]["log_prob"]).var(axis=0)
+ for key in observed_nodes.keys()
+ }
+
+ p_waic = torch.stack([var_log_p_post[key] for key in var_log_p_post.keys()]).sum()
+
+ waic = -2 * (lppd - p_waic)
+
+ return {
+ "waic": waic,
+ "nodes": observed_nodes,
+ "log_p_post": log_p_post,
+ "var_log_p_post": var_log_p_post,
+ "lppd": lppd,
+ "p_waic": p_waic,
+ }
diff --git a/build/cities/modeling/zoning_models/distance_causal_model.py b/build/cities/modeling/zoning_models/distance_causal_model.py
new file mode 100644
index 00000000..57f4fc31
--- /dev/null
+++ b/build/cities/modeling/zoning_models/distance_causal_model.py
@@ -0,0 +1,202 @@
+from typing import Any, Dict, Optional
+
+import pyro
+import pyro.distributions as dist
+import torch
+
+from cities.modeling.zoning_models.units_causal_model import add_linear_component, get_n
+
+
+class DistanceCausalModel(pyro.nn.PyroModule):
+ def __init__(
+ self,
+ categorical: Dict[str, torch.Tensor],
+ continuous: Dict[str, torch.Tensor],
+ outcome: Optional[
+ torch.Tensor
+ ] = None, # init args kept for uniformity, consider deleting
+ categorical_levels: Optional[Dict[str, Any]] = None,
+ leeway=0.9,
+ ):
+ super().__init__()
+
+ self.leeway = leeway
+
+ self.N_categorical, self.N_continuous, n = get_n(categorical, continuous)
+
+ # you might need and pass further the original
+ # categorical levels of the training data
+ if self.N_categorical > 0 and categorical_levels is None:
+ self.categorical_levels = dict()
+ for name in categorical.keys():
+ self.categorical_levels[name] = torch.unique(categorical[name])
+ else:
+ self.categorical_levels = categorical_levels # type: ignore
+
+ def forward(
+ self,
+ categorical: Dict[str, torch.Tensor],
+ continuous: Dict[str, torch.Tensor],
+ outcome: Optional[torch.Tensor] = None,
+ categorical_levels: Optional[Dict[str, torch.Tensor]] = None,
+ leeway=0.9,
+ ):
+ if categorical_levels is None:
+ categorical_levels = self.categorical_levels
+
+ _N_categorical, _N_continuous, n = get_n(categorical, continuous)
+
+ data_plate = pyro.plate("data", size=n, dim=-1)
+
+ #################
+ # register
+ #################
+ with data_plate:
+
+ year = pyro.sample(
+ "year",
+ dist.Categorical(torch.ones(len(categorical_levels["year"]))),
+ obs=categorical["year"],
+ )
+
+ month = pyro.sample(
+ "month",
+ dist.Categorical(torch.ones(len(categorical_levels["month"]))),
+ obs=categorical["month"],
+ )
+
+ zone_id = pyro.sample(
+ "zone_id",
+ dist.Categorical(torch.ones(len(categorical_levels["zone_id"]))),
+ obs=categorical["zone_id"],
+ )
+
+ neighborhood_id = pyro.sample(
+ "neighborhood_id",
+ dist.Categorical(
+ torch.ones(len(categorical_levels["neighborhood_id"]))
+ ),
+ obs=categorical["neighborhood_id"],
+ )
+
+ ward_id = pyro.sample(
+ "ward_id",
+ dist.Categorical(torch.ones(len(categorical_levels["ward_id"]))),
+ obs=categorical["ward_id"],
+ )
+
+ past_reform = pyro.sample(
+ "past_reform", dist.Normal(0, 1), obs=categorical["past_reform"]
+ )
+
+ # past_reform_by_zone = pyro.deterministic(
+ # "past_reform_by_zone",
+ # categorical_interaction_variable([past_reform, zone_id])[0],
+ # )
+ # categorical_levels["past_reform_by_zone"] = torch.unique(
+ # past_reform_by_zone
+ # )
+
+ # ___________________________________
+ # deterministic def of actual limits
+ # ___________________________________
+
+ with data_plate:
+ limit_con = pyro.deterministic(
+ "limit_con",
+ torch.where(
+ zone_id == 0,
+ torch.tensor(0.0),
+ torch.where(
+ zone_id == 1,
+ 1.0 - past_reform,
+ torch.where(
+ zone_id == 2, 1.0 - 0.5 * past_reform, torch.tensor(1.0)
+ ),
+ ),
+ ),
+ event_dim=0,
+ )
+
+ # __________________________________
+ # regression for distance to transit
+ # __________________________________
+
+ distance_to_transit_continuous_parents = {} # type: ignore
+ distance_to_transit_categorical_parents = {
+ "zone_id": zone_id,
+ }
+ distance_to_transit = add_linear_component(
+ child_name="distance_to_transit",
+ child_continuous_parents=distance_to_transit_continuous_parents,
+ child_categorical_parents=distance_to_transit_categorical_parents,
+ leeway=leeway,
+ data_plate=data_plate,
+ observations=continuous["distance_to_transit"],
+ categorical_levels=categorical_levels,
+ )
+
+ # ___________________________
+ # regression for parcel area
+ # ___________________________
+ parcel_area_continuous_parents = {"distance_to_transit": distance_to_transit} # type: ignore
+ parcel_are_categorical_parents = {
+ "zone_id": zone_id,
+ "neighborhood_id": neighborhood_id,
+ }
+ parcel_area = add_linear_component(
+ child_name="parcel_area",
+ child_continuous_parents=parcel_area_continuous_parents,
+ child_categorical_parents=parcel_are_categorical_parents,
+ leeway=leeway,
+ data_plate=data_plate,
+ observations=continuous["parcel_area"],
+ categorical_levels=categorical_levels,
+ )
+
+ # ___________________________
+ # regression for limit suspended in light of pyro.deterministic
+ # ___________________________
+
+ # limit_con_categorical_parents = {"past_reform_by_zone": past_reform_by_zone}
+
+ # # TODO consider using a `pyro.deterministic` statement if safe to assume what the
+ # # rules are and hard code them
+ # limit_con = add_linear_component(
+ # child_name="limit_con",
+ # child_continuous_parents={},
+ # child_categorical_parents=limit_con_categorical_parents,
+ # leeway=leeway,
+ # data_plate=data_plate,
+ # observations=continuous["limit_con"],
+ # categorical_levels=categorical_levels,
+ # )
+
+ # _____________________________
+ # regression for housing units
+ # _____________________________
+
+ housing_units_continuous_parents = {
+ "limit_con": limit_con,
+ "parcel_area": parcel_area,
+ "distance_to_transit": distance_to_transit,
+ }
+ housing_units_categorical_parents = {
+ "year": year,
+ "month": month,
+ "zone_id": zone_id,
+ "neighborhood_id": neighborhood_id,
+ "ward_id": ward_id,
+ }
+
+ housing_units = add_linear_component(
+ child_name="housing_units",
+ child_continuous_parents=housing_units_continuous_parents,
+ child_categorical_parents=housing_units_categorical_parents,
+ leeway=leeway,
+ data_plate=data_plate,
+ observations=outcome,
+ categorical_levels=categorical_levels,
+ )
+
+ return housing_units
diff --git a/build/cities/modeling/zoning_models/missingness_only_model.py b/build/cities/modeling/zoning_models/missingness_only_model.py
new file mode 100644
index 00000000..76fd7e82
--- /dev/null
+++ b/build/cities/modeling/zoning_models/missingness_only_model.py
@@ -0,0 +1,173 @@
+from typing import Any, Dict, Optional
+
+import pyro
+import pyro.distributions as dist
+import torch
+
+from cities.modeling.zoning_models.units_causal_model import (
+ categorical_contribution,
+ continuous_contribution,
+ get_n,
+)
+
+# see A WEAKLY INFORMATIVE DEFAULT PRIOR DISTRIBUTION FOR
+# LOGISTIC AND OTHER REGRESSION MODELS
+# B Y A NDREW G ELMAN , A LEKS JAKULIN , M ARIA G RAZIA
+# P ITTAU AND Y U -S UNG S
+# they recommed Cauchy with 2.5 scale for coefficient priors
+
+# see also zoning_missingness_only.ipynb for a normal approximation
+
+
+def add_logistic_component(
+ child_name: "str",
+ child_continuous_parents,
+ child_categorical_parents,
+ leeway,
+ data_plate,
+ observations=None,
+ categorical_levels=None,
+):
+
+ continuous_contribution_to_child = continuous_contribution(
+ child_continuous_parents, child_name, leeway
+ )
+
+ categorical_contribution_to_child = categorical_contribution(
+ child_categorical_parents,
+ child_name,
+ leeway,
+ categorical_levels=categorical_levels,
+ )
+
+ with data_plate:
+
+ mean_prediction_child = pyro.deterministic( # type: ignore
+ f"mean_outcome_prediction_{child_name}",
+ categorical_contribution_to_child + continuous_contribution_to_child,
+ event_dim=0,
+ )
+
+ child_probs = pyro.deterministic(
+ f"child_probs_{child_name}_{child_name}",
+ torch.sigmoid(mean_prediction_child),
+ event_dim=0,
+ )
+
+ child_observed = pyro.sample(
+ f"{child_name}",
+ dist.Bernoulli(child_probs),
+ obs=observations,
+ )
+
+ # TODO consider a gamma-like distro here
+
+ return child_observed
+
+
+class MissingnessOnlyModel(pyro.nn.PyroModule):
+ def __init__(
+ self,
+ categorical: Dict[str, torch.Tensor],
+ continuous: Dict[str, torch.Tensor],
+ outcome: Optional[
+ torch.Tensor
+ ] = None, # init args kept for uniformity, consider deleting
+ categorical_levels: Optional[Dict[str, Any]] = None,
+ leeway=0.9,
+ ):
+ super().__init__()
+
+ self.leeway = leeway
+
+ self.N_categorical, self.N_continuous, n = get_n(categorical, continuous)
+
+ # you might need and pass further the original
+ # categorical levels of the training data
+ if self.N_categorical > 0 and categorical_levels is None:
+ self.categorical_levels = dict()
+ for name in categorical.keys():
+ self.categorical_levels[name] = torch.unique(categorical[name])
+ else:
+ self.categorical_levels = categorical_levels # type: ignore
+
+ def forward(
+ self,
+ categorical: Dict[str, torch.Tensor],
+ continuous: Dict[str, torch.Tensor],
+ outcome: Optional[torch.Tensor] = None,
+ categorical_levels: Optional[Dict[str, torch.Tensor]] = None,
+ leeway=0.9,
+ ):
+ if categorical_levels is None:
+ categorical_levels = self.categorical_levels
+
+ _N_categorical, _N_continuous, n = get_n(categorical, continuous)
+
+ data_plate = pyro.plate("data", size=n, dim=-1)
+
+ #################
+ # register
+ #################
+ with data_plate:
+
+ year = pyro.sample(
+ "year",
+ dist.Categorical(torch.ones(len(categorical_levels["year"]))),
+ obs=categorical["year"],
+ )
+
+ value = pyro.sample("value", dist.Normal(0, 1), obs=continuous["value"])
+
+ # month = pyro.sample(
+ # "month",
+ # dist.Categorical(torch.ones(len(categorical_levels["month"]))),
+ # obs=categorical["month"],
+ # )
+
+ # zone_id = pyro.sample(
+ # "zone_id",
+ # dist.Categorical(torch.ones(len(categorical_levels["zone_id"]))),
+ # obs=categorical["zone_id"],
+ # )
+
+ # neighborhood_id = pyro.sample(
+ # "neighborhood_id",
+ # dist.Categorical(
+ # torch.ones(len(categorical_levels["neighborhood_id"]))
+ # ),
+ # obs=categorical["neighborhood_id"],
+ # )
+
+ # ward_id = pyro.sample(
+ # "ward_id",
+ # dist.Categorical(torch.ones(len(categorical_levels["ward_id"]))),
+ # obs=categorical["ward_id"],
+ # )
+
+ # past_reform = pyro.sample(
+ # "past_reform", dist.Normal(0, 1), obs=categorical["past_reform"]
+ # )
+
+ # ___________________________
+ # logistic regression for applied
+ # ___________________________
+
+ applied_continuous_parents = {
+ "value": value,
+ }
+ applied_categorical_parents = {
+ "year": year,
+ }
+
+ applied = add_logistic_component(
+ child_name="applied",
+ child_continuous_parents=applied_continuous_parents,
+ child_categorical_parents=applied_categorical_parents,
+ leeway=11.57,
+ data_plate=data_plate,
+ observations=categorical["applied"],
+ categorical_levels=categorical_levels,
+ )
+
+ return applied
diff --git a/build/cities/modeling/zoning_models/tracts_model.py b/build/cities/modeling/zoning_models/tracts_model.py
new file mode 100644
index 00000000..f4f8dc45
--- /dev/null
+++ b/build/cities/modeling/zoning_models/tracts_model.py
@@ -0,0 +1,703 @@
+from typing import Any, Dict, Optional
+
+import pyro
+import pyro.distributions as dist
+import torch
+
+from cities.modeling.zoning_models.units_causal_model import (
+ add_linear_component,
+ categorical_contribution,
+ continuous_contribution,
+ get_n,
+)
+
+
+def add_ratio_component(
+ child_name: "str",
+ child_continuous_parents,
+ child_categorical_parents,
+ leeway,
+ data_plate,
+ observations=None,
+ categorical_levels=None,
+):
+
+ continuous_contribution_to_child = continuous_contribution(
+ child_continuous_parents, child_name, leeway
+ )
+
+ categorical_contribution_to_child = categorical_contribution(
+ child_categorical_parents,
+ child_name,
+ leeway,
+ categorical_levels=categorical_levels,
+ )
+
+ sigma_child = pyro.sample(f"sigma_{child_name}", dist.Exponential(40.0))
+
+ with data_plate:
+
+ mean_prediction_child = pyro.deterministic( # type: ignore
+ f"mean_outcome_prediction_{child_name}",
+ categorical_contribution_to_child + continuous_contribution_to_child,
+ event_dim=0,
+ )
+
+ child_probs = pyro.deterministic(
+ f"child_probs_{child_name}_{child_name}",
+ torch.sigmoid(mean_prediction_child),
+ event_dim=0,
+ )
+
+ child_observed = pyro.sample(
+ child_name, dist.Normal(child_probs, sigma_child), obs=observations
+ )
+
+ return child_observed
+
+
+def add_poisson_component(
+ child_name: str,
+ child_continuous_parents: Dict[str, torch.Tensor],
+ child_categorical_parents: Dict[str, torch.Tensor],
+ leeway: float,
+ data_plate,
+ observations: Optional[torch.Tensor] = None,
+ categorical_levels: Optional[Dict[str, torch.Tensor]] = None,
+) -> torch.Tensor:
+
+ continuous_contribution_to_child = continuous_contribution(
+ child_continuous_parents, child_name, leeway
+ )
+
+ categorical_contribution_to_child = categorical_contribution(
+ child_categorical_parents,
+ child_name,
+ leeway,
+ categorical_levels=categorical_levels,
+ )
+
+ with data_plate:
+
+ mean_prediction_child = pyro.deterministic(
+ f"mean_outcome_prediction_{child_name}",
+ torch.exp(
+ categorical_contribution_to_child + continuous_contribution_to_child
+ ),
+ event_dim=0,
+ )
+
+ child_observed = pyro.sample(
+ child_name, dist.Poisson(mean_prediction_child), obs=observations
+ )
+
+ return child_observed
+
+
+class TractsModelNoRatios(pyro.nn.PyroModule):
+ def __init__(
+ self,
+ categorical: Dict[str, torch.Tensor],
+ continuous: Dict[str, torch.Tensor],
+ outcome: Optional[
+ torch.Tensor
+ ] = None, # init args kept for uniformity, consider deleting
+ categorical_levels: Optional[Dict[str, Any]] = None,
+ leeway=0.9,
+ ):
+ super().__init__()
+
+ self.leeway = leeway
+
+ self.N_categorical, self.N_continuous, n = get_n(categorical, continuous)
+
+ # you might need and pass further the original
+ # categorical levels of the training data
+ if self.N_categorical > 0 and categorical_levels is None:
+ self.categorical_levels = dict()
+ for name in categorical.keys():
+ self.categorical_levels[name] = torch.unique(categorical[name])
+ else:
+ self.categorical_levels = categorical_levels # type: ignore
+
+ def forward(
+ self,
+ categorical: Dict[str, torch.Tensor],
+ continuous: Dict[str, torch.Tensor],
+ outcome: Optional[torch.Tensor] = None,
+ categorical_levels: Optional[Dict[str, torch.Tensor]] = None,
+ leeway=0.9,
+ ):
+ if categorical_levels is None:
+ categorical_levels = self.categorical_levels
+
+ _N_categorical, _N_continuous, n = get_n(categorical, continuous)
+
+ data_plate = pyro.plate("data", size=n, dim=-1)
+
+ # #################
+ # # register
+ # #################
+ with data_plate:
+
+ year = pyro.sample(
+ "year",
+ dist.Categorical(torch.ones(len(categorical_levels["year"]))),
+ obs=categorical["year"],
+ )
+
+ distance = pyro.sample(
+ "distance", dist.Normal(0, 1), obs=continuous["median_distance"]
+ )
+
+ # past_reform = pyro.sample(
+ # "past_reform",
+ # dist.Categorical(torch.ones(len(categorical_levels["past_reform"]))),
+ # obs=categorical["past_reform"],
+ # )
+
+ # ___________________________
+ # regression for white
+ # ___________________________
+
+ white_continuous_parents = {
+ "distance": distance,
+ }
+
+ white_categorical_parents = {
+ "year": year,
+ }
+
+ white = add_linear_component(
+ child_name="white",
+ child_continuous_parents=white_continuous_parents,
+ child_categorical_parents=white_categorical_parents,
+ leeway=0.9,
+ data_plate=data_plate,
+ observations=continuous["white"],
+ )
+
+ # ___________________________
+ # regression for segregation
+ # ___________________________
+
+ segregation_continuous_parents = {
+ "distance": distance,
+ "white": white,
+ }
+
+ segregation_categorical_parents = {
+ "year": year,
+ }
+
+ segregation = add_linear_component(
+ child_name="segregation",
+ child_continuous_parents=segregation_continuous_parents,
+ child_categorical_parents=segregation_categorical_parents,
+ leeway=0.9,
+ data_plate=data_plate,
+ observations=continuous["segregation"],
+ )
+
+ # ___________________________
+ # regression for income
+ # ___________________________
+
+ income_continuous_parents = {
+ "distance": distance,
+ "white": white,
+ "segregation": segregation,
+ }
+
+ income_categorical_parents = {
+ "year": year,
+ }
+
+ income = add_linear_component(
+ child_name="income",
+ child_continuous_parents=income_continuous_parents,
+ child_categorical_parents=income_categorical_parents,
+ leeway=0.9,
+ data_plate=data_plate,
+ observations=continuous["income"],
+ )
+
+ # _____________________________
+ # regression for limit
+ # _____________________________
+
+ limit_continuous_parents = {
+ "distance": distance,
+ }
+
+ limit_categorical_parents = {
+ "year": year,
+ }
+
+ limit = add_linear_component(
+ child_name="limit",
+ child_continuous_parents=limit_continuous_parents,
+ child_categorical_parents=limit_categorical_parents,
+ leeway=0.9,
+ data_plate=data_plate,
+ observations=continuous["mean_limit"],
+ )
+
+ # _____________________________
+ # regression for median value
+ # _____________________________
+
+ value_continuous_parents = {
+ "distance": distance,
+ "limit": limit,
+ "income": income,
+ "white": white,
+ "segregation": segregation,
+ }
+
+ value_categorical_parents = {
+ "year": year,
+ }
+
+ median_value = add_linear_component(
+ child_name="median_value",
+ child_continuous_parents=value_continuous_parents,
+ child_categorical_parents=value_categorical_parents,
+ leeway=0.9,
+ data_plate=data_plate,
+ observations=continuous["median_value"],
+ )
+
+ # ___________________________
+ # regression for housing units
+ # ___________________________
+
+ housing_units_continuous_parents = {
+ "median_value": median_value,
+ "distance": distance,
+ "income": income,
+ "white": white,
+ "limit": limit,
+ "segregation": segregation,
+ }
+
+ housing_units_categorical_parents = {
+ "year": year,
+ }
+
+ housing_units = add_linear_component(
+ child_name="housing_units",
+ child_continuous_parents=housing_units_continuous_parents,
+ child_categorical_parents=housing_units_categorical_parents,
+ leeway=0.9,
+ data_plate=data_plate,
+ observations=continuous["housing_units"],
+ )
+
+ return housing_units
+
+
+class TractsModel(pyro.nn.PyroModule):
+ def __init__(
+ self,
+ categorical: Dict[str, torch.Tensor],
+ continuous: Dict[str, torch.Tensor],
+ outcome: Optional[
+ torch.Tensor
+ ] = None, # init args kept for uniformity, consider deleting
+ categorical_levels: Optional[Dict[str, Any]] = None,
+ leeway=0.9,
+ ):
+ super().__init__()
+
+ self.leeway = leeway
+
+ self.N_categorical, self.N_continuous, n = get_n(categorical, continuous)
+
+ # you might need and pass further the original
+ # categorical levels of the training data
+ if self.N_categorical > 0 and categorical_levels is None:
+ self.categorical_levels = dict()
+ for name in categorical.keys():
+ self.categorical_levels[name] = torch.unique(categorical[name])
+ else:
+ self.categorical_levels = categorical_levels # type: ignore
+
+ def forward(
+ self,
+ categorical: Dict[str, torch.Tensor],
+ continuous: Dict[str, torch.Tensor],
+ outcome: Optional[torch.Tensor] = None,
+ categorical_levels: Optional[Dict[str, torch.Tensor]] = None,
+ leeway=0.9,
+ ):
+ if categorical_levels is None:
+ categorical_levels = self.categorical_levels
+
+ _N_categorical, _N_continuous, n = get_n(categorical, continuous)
+
+ data_plate = pyro.plate("data", size=n, dim=-1)
+
+ # #################
+ # # register
+ # #################
+ with data_plate:
+
+ year = pyro.sample(
+ "year",
+ dist.Categorical(torch.ones(len(categorical_levels["year"]))),
+ obs=categorical["year"],
+ )
+
+ distance = pyro.sample(
+ "distance", dist.Normal(0, 1), obs=continuous["median_distance"]
+ )
+
+ # past_reform = pyro.sample(
+ # "past_reform",
+ # dist.Categorical(torch.ones(len(categorical_levels["past_reform"]))),
+ # obs=categorical["past_reform"],
+ # )
+
+ # ___________________________
+ # regression for white
+ # ___________________________
+
+ white_continuous_parents = {
+ "distance": distance,
+ }
+
+ white_categorical_parents = {
+ "year": year,
+ }
+
+ white = add_ratio_component(
+ child_name="white",
+ child_continuous_parents=white_continuous_parents,
+ child_categorical_parents=white_categorical_parents,
+ leeway=11.57,
+ data_plate=data_plate,
+ observations=continuous["white_original"],
+ )
+
+ # ___________________________
+ # regression for segregation
+ # ___________________________
+
+ segregation_continuous_parents = {
+ "distance": distance,
+ "white": white,
+ }
+
+ segregation_categorical_parents = {
+ "year": year,
+ }
+
+ segregation = add_ratio_component(
+ child_name="segregation",
+ child_continuous_parents=segregation_continuous_parents,
+ child_categorical_parents=segregation_categorical_parents,
+ leeway=11.57,
+ data_plate=data_plate,
+ observations=continuous["segregation_original"],
+ )
+
+ # ___________________________
+ # regression for income
+ # ___________________________
+
+ income_continuous_parents = {
+ "distance": distance,
+ "white": white,
+ "segregation": segregation,
+ }
+
+ income_categorical_parents = {
+ "year": year,
+ }
+
+ income = add_linear_component(
+ child_name="income",
+ child_continuous_parents=income_continuous_parents,
+ child_categorical_parents=income_categorical_parents,
+ leeway=0.9,
+ data_plate=data_plate,
+ observations=continuous["income"],
+ )
+
+ # _____________________________
+ # regression for limit
+ # _____________________________
+
+ limit_continuous_parents = {
+ "distance": distance,
+ }
+
+ limit_categorical_parents = {
+ "year": year,
+ }
+
+ limit = add_ratio_component(
+ child_name="limit",
+ child_continuous_parents=limit_continuous_parents,
+ child_categorical_parents=limit_categorical_parents,
+ leeway=11.57,
+ data_plate=data_plate,
+ observations=continuous["mean_limit_original"],
+ )
+
+ # _____________________________
+ # regression for median value
+ # _____________________________
+
+ value_continuous_parents = {
+ "distance": distance,
+ "limit": limit,
+ "income": income,
+ "white": white,
+ "segregation": segregation,
+ }
+
+ value_categorical_parents = {
+ "year": year,
+ }
+
+ median_value = add_linear_component(
+ child_name="median_value",
+ child_continuous_parents=value_continuous_parents,
+ child_categorical_parents=value_categorical_parents,
+ leeway=0.9,
+ data_plate=data_plate,
+ observations=continuous["median_value"],
+ )
+
+ # ___________________________
+ # regression for housing units
+ # ___________________________
+
+ housing_units_continuous_parents = {
+ "median_value": median_value,
+ "distance": distance,
+ "income": income,
+ "white": white,
+ "limit": limit,
+ "segregation": segregation,
+ }
+
+ housing_units_categorical_parents = {
+ "year": year,
+ }
+
+ housing_units = add_linear_component(
+ child_name="housing_units",
+ child_continuous_parents=housing_units_continuous_parents,
+ child_categorical_parents=housing_units_categorical_parents,
+ leeway=0.9,
+ data_plate=data_plate,
+ observations=continuous["housing_units"],
+ )
+
+ return housing_units
+
+
+class TractsModelPoisson(pyro.nn.PyroModule):
+ def __init__(
+ self,
+ categorical: Dict[str, torch.Tensor],
+ continuous: Dict[str, torch.Tensor],
+ outcome: Optional[
+ torch.Tensor
+ ] = None, # init args kept for uniformity, consider deleting
+ categorical_levels: Optional[Dict[str, Any]] = None,
+ leeway=0.9,
+ ):
+ super().__init__()
+
+ self.leeway = leeway
+
+ self.N_categorical, self.N_continuous, n = get_n(categorical, continuous)
+
+ # you might need and pass further the original
+ # categorical levels of the training data
+ if self.N_categorical > 0 and categorical_levels is None:
+ self.categorical_levels = dict()
+ for name in categorical.keys():
+ self.categorical_levels[name] = torch.unique(categorical[name])
+ else:
+ self.categorical_levels = categorical_levels # type: ignore
+
+ def forward(
+ self,
+ categorical: Dict[str, torch.Tensor],
+ continuous: Dict[str, torch.Tensor],
+ outcome: Optional[torch.Tensor] = None,
+ categorical_levels: Optional[Dict[str, torch.Tensor]] = None,
+ leeway=0.9,
+ ):
+ if categorical_levels is None:
+ categorical_levels = self.categorical_levels
+
+ _N_categorical, _N_continuous, n = get_n(categorical, continuous)
+
+ data_plate = pyro.plate("data", size=n, dim=-1)
+
+ # #################
+ # # register
+ # #################
+ with data_plate:
+
+ year = pyro.sample(
+ "year",
+ dist.Categorical(torch.ones(len(categorical_levels["year"]))),
+ obs=categorical["year"],
+ )
+
+ distance = pyro.sample(
+ "distance", dist.Normal(0, 1), obs=continuous["median_distance"]
+ )
+
+ # past_reform = pyro.sample(
+ # "past_reform",
+ # dist.Categorical(torch.ones(len(categorical_levels["past_reform"]))),
+ # obs=categorical["past_reform"],
+ # )
+
+ # ___________________________
+ # regression for white
+ # ___________________________
+
+ white_continuous_parents = {
+ "distance": distance,
+ }
+
+ white_categorical_parents = {
+ "year": year,
+ }
+
+ white = add_ratio_component(
+ child_name="white",
+ child_continuous_parents=white_continuous_parents,
+ child_categorical_parents=white_categorical_parents,
+ leeway=11.57,
+ data_plate=data_plate,
+ observations=continuous["white_original"],
+ )
+
+ # ___________________________
+ # regression for segregation
+ # ___________________________
+
+ segregation_continuous_parents = {
+ "distance": distance,
+ "white": white,
+ }
+
+ segregation_categorical_parents = {
+ "year": year,
+ }
+
+ segregation = add_ratio_component(
+ child_name="segregation",
+ child_continuous_parents=segregation_continuous_parents,
+ child_categorical_parents=segregation_categorical_parents,
+ leeway=11.57,
+ data_plate=data_plate,
+ observations=continuous["segregation_original"],
+ )
+
+ # ___________________________
+ # regression for income
+ # ___________________________
+
+ income_continuous_parents = {
+ "distance": distance,
+ "white": white,
+ "segregation": segregation,
+ }
+
+ income_categorical_parents = {
+ "year": year,
+ }
+
+ income = add_linear_component(
+ child_name="income",
+ child_continuous_parents=income_continuous_parents,
+ child_categorical_parents=income_categorical_parents,
+ leeway=0.9,
+ data_plate=data_plate,
+ observations=continuous["income"],
+ )
+
+ # #_____________________________
+ # # regression for limit
+ # #_____________________________
+
+ limit_continuous_parents = {
+ "distance": distance,
+ }
+
+ limit_categorical_parents = {
+ "year": year,
+ }
+
+ limit = add_ratio_component(
+ child_name="limit",
+ child_continuous_parents=limit_continuous_parents,
+ child_categorical_parents=limit_categorical_parents,
+ leeway=11.57,
+ data_plate=data_plate,
+ observations=continuous["mean_limit_original"],
+ )
+
+ # # _____________________________
+ # # regression for median value
+ # # _____________________________
+
+ value_continuous_parents = {
+ "distance": distance,
+ "limit": limit,
+ "income": income,
+ "white": white,
+ "segregation": segregation,
+ }
+
+ value_categorical_parents = {
+ "year": year,
+ }
+
+ median_value = add_linear_component(
+ child_name="median_value",
+ child_continuous_parents=value_continuous_parents,
+ child_categorical_parents=value_categorical_parents,
+ leeway=0.9,
+ data_plate=data_plate,
+ observations=continuous["median_value"],
+ )
+
+ # # ___________________________
+ # # regression for housing units
+ # # ___________________________
+
+ housing_units_continuous_parents = {
+ "median_value": median_value,
+ "distance": distance,
+ "income": income,
+ "white": white,
+ "limit": limit,
+ "segregation": segregation,
+ }
+
+ housing_units_categorical_parents = {
+ "year": year,
+ }
+
+ housing_units = add_poisson_component(
+ child_name="housing_units_original",
+ child_continuous_parents=housing_units_continuous_parents,
+ child_categorical_parents=housing_units_categorical_parents,
+ leeway=11.57,
+ data_plate=data_plate,
+ observations=continuous["housing_units_original"],
+ )
+
+ return housing_units
diff --git a/build/cities/modeling/zoning_models/units_causal_model.py b/build/cities/modeling/zoning_models/units_causal_model.py
new file mode 100644
index 00000000..27035096
--- /dev/null
+++ b/build/cities/modeling/zoning_models/units_causal_model.py
@@ -0,0 +1,289 @@
+from typing import Any, Dict, List, Optional
+
+import pyro
+import pyro.distributions as dist
+import torch
+
+
+def get_n(categorical: Dict[str, torch.Tensor], continuous: Dict[str, torch.Tensor]):
+ N_categorical = len(categorical.keys())
+ N_continuous = len(continuous.keys())
+
+ if N_categorical > 0:
+ n = len(next(iter(categorical.values())))
+ elif N_continuous > 0:
+ n = len(next(iter(continuous.values())))
+
+ return N_categorical, N_continuous, n
+
+
+def categorical_contribution(categorical, child_name, leeway, categorical_levels=None):
+
+ categorical_names = list(categorical.keys())
+
+ if categorical_levels is None:
+ categorical_levels = {
+ name: torch.unique(categorical[name]) for name in categorical_names
+ }
+
+ weights_categorical_outcome = {}
+ objects_cat_weighted = {}
+
+ for name in categorical_names:
+ weights_categorical_outcome[name] = pyro.sample(
+ f"weights_categorical_{name}_{child_name}",
+ dist.Normal(0.0, leeway).expand(categorical_levels[name].shape).to_event(1),
+ )
+
+ objects_cat_weighted[name] = weights_categorical_outcome[name][
+ ..., categorical[name]
+ ]
+
+ values = list(objects_cat_weighted.values())
+ for i in range(1, len(values)):
+ values[i] = values[i].view(values[0].shape)
+
+ categorical_contribution_outcome = torch.stack(values, dim=0).sum(dim=0)
+
+ return categorical_contribution_outcome
+
+
+def continuous_contribution(continuous, child_name, leeway):
+
+ contributions = torch.zeros(1)
+
+ for key, value in continuous.items():
+ bias_continuous = pyro.sample(
+ f"bias_continuous_{key}_{child_name}",
+ dist.Normal(0.0, leeway),
+ )
+
+ weight_continuous = pyro.sample(
+ f"weight_continuous_{key}_{child_name}",
+ dist.Normal(0.0, leeway),
+ )
+
+ contribution = bias_continuous + weight_continuous * value
+ contributions = contribution + contributions
+
+ return contributions
+
+
+def add_linear_component(
+ child_name: "str",
+ child_continuous_parents,
+ child_categorical_parents,
+ leeway,
+ data_plate,
+ observations=None,
+ categorical_levels=None,
+):
+
+ sigma_child = pyro.sample(
+ f"sigma_{child_name}", dist.Exponential(1.0)
+ ) # type: ignore
+
+ continuous_contribution_to_child = continuous_contribution(
+ child_continuous_parents, child_name, leeway
+ )
+
+ categorical_contribution_to_child = categorical_contribution(
+ child_categorical_parents,
+ child_name,
+ leeway,
+ categorical_levels=categorical_levels,
+ )
+
+ with data_plate:
+
+ mean_prediction_child = pyro.deterministic( # type: ignore
+ f"mean_outcome_prediction_{child_name}",
+ categorical_contribution_to_child + continuous_contribution_to_child,
+ event_dim=0,
+ )
+
+ child_observed = pyro.sample( # type: ignore
+ f"{child_name}",
+ dist.Normal(mean_prediction_child, sigma_child),
+ obs=observations,
+ )
+
+ # TODO consider a gamma-like distro here
+
+ return child_observed
+
+
+def categorical_interaction_variable(interaction_list: List[torch.Tensor]):
+
+ assert len(interaction_list) > 1
+
+ for tensor in interaction_list:
+ assert tensor.shape == interaction_list[0].shape
+
+ stacked_tensor = torch.stack(interaction_list, dim=-1)
+
+ unique_pairs, inverse_indices = torch.unique(
+ stacked_tensor, return_inverse=True, dim=0
+ )
+
+ unique_combined_tensor = inverse_indices.reshape(interaction_list[0].shape)
+
+ indexing_dictionary = {
+ tuple(pair.tolist()): i for i, pair in enumerate(unique_pairs)
+ }
+
+ return unique_combined_tensor, indexing_dictionary
+
+
+class UnitsCausalModel(pyro.nn.PyroModule):
+ def __init__(
+ self,
+ categorical: Dict[str, torch.Tensor],
+ continuous: Dict[str, torch.Tensor],
+ outcome: Optional[
+ torch.Tensor
+ ] = None, # init args kept for uniformity, consider deleting
+ categorical_levels: Optional[Dict[str, Any]] = None,
+ leeway=0.9,
+ ):
+ super().__init__()
+
+ self.leeway = leeway
+
+ self.N_categorical, self.N_continuous, n = get_n(categorical, continuous)
+
+ # you might need and pass further the original
+ # categorical levels of the training data
+ if self.N_categorical > 0 and categorical_levels is None:
+ self.categorical_levels = dict()
+ for name in categorical.keys():
+ self.categorical_levels[name] = torch.unique(categorical[name])
+ else:
+ self.categorical_levels = categorical_levels # type: ignore
+
+ def forward(
+ self,
+ categorical: Dict[str, torch.Tensor],
+ continuous: Dict[str, torch.Tensor],
+ outcome: Optional[torch.Tensor] = None,
+ categorical_levels: Optional[Dict[str, torch.Tensor]] = None,
+ leeway=0.9,
+ ):
+ if categorical_levels is None:
+ categorical_levels = self.categorical_levels
+
+ _N_categorical, _N_continuous, n = get_n(categorical, continuous)
+
+ data_plate = pyro.plate("data", size=n, dim=-1)
+
+ #################
+ # register
+ #################
+ with data_plate:
+
+ year = pyro.sample(
+ "year",
+ dist.Categorical(torch.ones(len(categorical_levels["year"]))),
+ obs=categorical["year"],
+ )
+
+ month = pyro.sample(
+ "month",
+ dist.Categorical(torch.ones(len(categorical_levels["month"]))),
+ obs=categorical["month"],
+ )
+
+ zone_id = pyro.sample(
+ "zone_id",
+ dist.Categorical(torch.ones(len(categorical_levels["zone_id"]))),
+ obs=categorical["zone_id"],
+ )
+
+ neighborhood_id = pyro.sample(
+ "neighborhood_id",
+ dist.Categorical(
+ torch.ones(len(categorical_levels["neighborhood_id"]))
+ ),
+ obs=categorical["neighborhood_id"],
+ )
+
+ ward_id = pyro.sample(
+ "ward_id",
+ dist.Categorical(torch.ones(len(categorical_levels["ward_id"]))),
+ obs=categorical["ward_id"],
+ )
+
+ past_reform = pyro.sample(
+ "past_reform", dist.Normal(0, 1), obs=categorical["past_reform"]
+ )
+
+ past_reform_by_zone = pyro.deterministic(
+ "past_reform_by_zone",
+ categorical_interaction_variable([past_reform, zone_id])[0],
+ )
+ categorical_levels["past_reform_by_zone"] = torch.unique(
+ past_reform_by_zone
+ )
+ # ___________________________
+ # regression for parcel area
+ # ___________________________
+ parcel_area_continuous_parents = {} # type: ignore
+ parcel_are_categorical_parents = {
+ "zone_id": zone_id,
+ "neighborhood_id": neighborhood_id,
+ }
+ parcel_area = add_linear_component(
+ child_name="parcel_area",
+ child_continuous_parents=parcel_area_continuous_parents,
+ child_categorical_parents=parcel_are_categorical_parents,
+ leeway=leeway,
+ data_plate=data_plate,
+ observations=continuous["parcel_area"],
+ categorical_levels=categorical_levels,
+ )
+
+ # ___________________________
+ # regression for limit
+ # ___________________________
+
+ limit_con_categorical_parents = {"past_reform_by_zone": past_reform_by_zone}
+
+ # TODO consider using a `pyro.deterministic` statement if safe to assume what the
+ # rules are and hard code them
+ limit_con = add_linear_component(
+ child_name="limit_con",
+ child_continuous_parents={},
+ child_categorical_parents=limit_con_categorical_parents,
+ leeway=leeway,
+ data_plate=data_plate,
+ observations=continuous["limit_con"],
+ categorical_levels=categorical_levels,
+ )
+
+ # _____________________________
+ # regression for housing units
+ # _____________________________
+
+ housing_units_continuous_parents = {
+ "limit_con": limit_con,
+ "parcel_area": parcel_area,
+ }
+ housing_units_categorical_parents = {
+ "year": year,
+ "month": month,
+ "zone_id": zone_id,
+ "neighborhood_id": neighborhood_id,
+ "ward_id": ward_id,
+ }
+
+ housing_units = add_linear_component(
+ child_name="housing_units",
+ child_continuous_parents=housing_units_continuous_parents,
+ child_categorical_parents=housing_units_categorical_parents,
+ leeway=leeway,
+ data_plate=data_plate,
+ observations=outcome,
+ categorical_levels=categorical_levels,
+ )
+
+ return housing_units
diff --git a/build/cities/modeling/zoning_models/zoning_tracts_continuous_interactions_model.py b/build/cities/modeling/zoning_models/zoning_tracts_continuous_interactions_model.py
new file mode 100644
index 00000000..69bd017a
--- /dev/null
+++ b/build/cities/modeling/zoning_models/zoning_tracts_continuous_interactions_model.py
@@ -0,0 +1,301 @@
+import warnings
+from typing import Any, Dict, Optional
+
+import pyro
+import pyro.distributions as dist
+import torch
+
+from cities.modeling.model_components import (
+ add_linear_component,
+ add_linear_component_continuous_interactions,
+ add_ratio_component_continuous_interactions,
+ add_ratio_component,
+ check_categorical_is_subset_of_levels,
+ get_categorical_levels,
+ get_n,
+)
+
+
+class TractsModelContinuousInteractions(pyro.nn.PyroModule):
+ def __init__(
+ self,
+ categorical: Dict[str, torch.Tensor],
+ continuous: Dict[str, torch.Tensor],
+ outcome: Optional[
+ torch.Tensor
+ ] = None, # init args kept for uniformity, consider deleting
+ categorical_levels: Optional[Dict[str, Any]] = None,
+ leeway=0.9,
+ housing_units_continuous_interaction_pairs=[],
+ limit_continuous_interaction_pairs=[],
+ ):
+ """
+
+ :param categorical: dict of categorical data
+ :param continuous: dict of continuous data
+ :param outcome: outcome data (unused, todo remove)
+ :param categorical_levels: dict of unique categorical values. If this is not passed, it will be computed from
+ the provided categorical data. Importantly, if categorical is a subset of the full dataset, this automated
+ computation may omit categorical levels that are present in the full dataset but not in the subset.
+ """
+ super().__init__()
+
+ self.leeway = leeway
+ self.housing_units_continuous_interaction_pairs = (
+ housing_units_continuous_interaction_pairs
+ )
+ self.limit_continuous_interaction_pairs = limit_continuous_interaction_pairs
+
+ self.N_categorical, self.N_continuous, n = get_n(categorical, continuous)
+
+ if self.N_categorical > 0 and categorical_levels is None:
+ self.categorical_levels = get_categorical_levels(categorical)
+ else:
+ self.categorical_levels = categorical_levels
+
+ def forward(
+ self,
+ categorical: Dict[str, torch.Tensor],
+ continuous: Dict[str, torch.Tensor],
+ outcome: Optional[torch.Tensor] = None,
+ leeway=0.9,
+ categorical_levels=None,
+ n=None,
+ ):
+ if categorical_levels is not None:
+ warnings.warn(
+ "Passed categorical_levels will no longer override the levels passed to or computed during"
+ " model initialization. The argument will be ignored."
+ )
+
+ categorical_levels = self.categorical_levels
+ assert check_categorical_is_subset_of_levels(categorical, categorical_levels)
+
+ if n is None:
+ _, _, n = get_n(categorical, continuous)
+
+ data_plate = pyro.plate("data", size=n, dim=-1)
+
+ # _________
+ # register
+ # _________
+
+ with data_plate:
+
+ year = pyro.sample(
+ "year",
+ dist.Categorical(torch.ones(len(categorical_levels["year"]))),
+ obs=categorical["year"],
+ )
+
+ distance = pyro.sample(
+ "distance", dist.Normal(0, 1), obs=continuous["median_distance"]
+ )
+
+ downtown_overlap = pyro.sample(
+ "downtown_overlap",
+ dist.Normal(0, 1),
+ obs=continuous["downtown_overlap"],
+ )
+
+ university_overlap = pyro.sample(
+ "university_overlap",
+ dist.Normal(0, 1),
+ obs=continuous["university_overlap"],
+ )
+
+ # ______________________
+ # regression for sqm
+ # ______________________
+
+ sqm_continuous_parents = {
+ "distance": distance,
+ }
+
+ sqm_categorical_parents = {
+ "year": year,
+ }
+
+ sqm = add_linear_component(
+ child_name="sqm",
+ child_continuous_parents=sqm_continuous_parents,
+ child_categorical_parents=sqm_categorical_parents,
+ leeway=0.5,
+ data_plate=data_plate,
+ observations=continuous["parcel_sqm"],
+ categorical_levels=self.categorical_levels,
+ )
+
+ # _______________________
+ # regression for limit
+ # _______________________
+
+ limit_continuous_parents = {
+ "distance": distance,
+ "downtown_overlap": downtown_overlap,
+ "university_overlap": university_overlap,
+ }
+
+ limit_categorical_parents = {
+ "year": year,
+ }
+
+
+
+ limit = add_ratio_component(
+ child_name="limit",
+ child_continuous_parents=limit_continuous_parents,
+ child_categorical_parents=limit_categorical_parents,
+ leeway=8, # ,
+ data_plate=data_plate,
+ observations=continuous["mean_limit_original"],
+ categorical_levels=self.categorical_levels,
+ )
+
+
+ # limit = add_ratio_component(
+ # child_name="limit",
+ # child_continuous_parents=limit_continuous_parents,
+ # child_categorical_parents=limit_categorical_parents,
+ # leeway=8, # ,
+ # data_plate=data_plate,
+ # observations=continuous["mean_limit_original"],
+ # categorical_levels=self.categorical_levels,
+ # )
+
+ # _____________________
+ # regression for white
+ # _____________________
+
+ white_continuous_parents = {
+ "distance": distance,
+ "sqm": sqm,
+ "limit": limit,
+ }
+
+ white_categorical_parents = {
+ "year": year,
+ }
+
+ white = add_ratio_component(
+ child_name="white",
+ child_continuous_parents=white_continuous_parents,
+ child_categorical_parents=white_categorical_parents,
+ leeway=8, # 11.57,
+ data_plate=data_plate,
+ observations=continuous["white_original"],
+ categorical_levels=self.categorical_levels,
+ )
+
+ # ___________________________
+ # regression for segregation
+ # ___________________________
+
+ segregation_continuous_parents = {
+ "distance": distance,
+ "white": white,
+ "sqm": sqm,
+ "limit": limit,
+ }
+
+ segregation_categorical_parents = {
+ "year": year,
+ }
+
+ segregation = add_ratio_component(
+ child_name="segregation",
+ child_continuous_parents=segregation_continuous_parents,
+ child_categorical_parents=segregation_categorical_parents,
+ leeway=8, # 11.57,
+ data_plate=data_plate,
+ observations=continuous["segregation_original"],
+ categorical_levels=self.categorical_levels,
+ )
+
+ # ______________________
+ # regression for income
+ # ______________________
+
+ income_continuous_parents = {
+ "distance": distance,
+ "white": white,
+ "segregation": segregation,
+ "sqm": sqm,
+ "limit": limit,
+ }
+
+ income_categorical_parents = {
+ "year": year,
+ }
+
+ income = add_linear_component(
+ child_name="income",
+ child_continuous_parents=income_continuous_parents,
+ child_categorical_parents=income_categorical_parents,
+ leeway=0.5,
+ data_plate=data_plate,
+ observations=continuous["income"],
+ categorical_levels=self.categorical_levels,
+ )
+
+ # _____________________________
+ # regression for median value
+ # _____________________________
+
+ value_continuous_parents = {
+ "distance": distance,
+ "income": income,
+ "white": white,
+ "segregation": segregation,
+ "sqm": sqm,
+ "limit": limit,
+ }
+
+ value_categorical_parents = {
+ "year": year,
+ }
+
+ median_value = add_linear_component(
+ child_name="median_value",
+ child_continuous_parents=value_continuous_parents,
+ child_categorical_parents=value_categorical_parents,
+ leeway=0.5,
+ data_plate=data_plate,
+ observations=continuous["median_value"],
+ categorical_levels=self.categorical_levels,
+ )
+
+ # ______________________________
+ # regression for housing units
+ # ______________________________
+
+ housing_units_continuous_parents = {
+ "median_value": median_value,
+ "distance": distance,
+ "income": income,
+ "white": white,
+ "limit": limit,
+ "segregation": segregation,
+ "sqm": sqm,
+ "downtown_overlap": downtown_overlap,
+ "university_overlap": university_overlap,
+ }
+
+ housing_units_categorical_parents = {
+ "year": year,
+ # "university_index": university_index,
+ # "downtown_index": downtown_index,
+ }
+
+ housing_units = add_linear_component_continuous_interactions(
+ child_name="housing_units",
+ child_continuous_parents=housing_units_continuous_parents,
+ child_categorical_parents=housing_units_categorical_parents,
+ continous_interaction_pairs=self.housing_units_continuous_interaction_pairs,
+ leeway=0.5,
+ data_plate=data_plate,
+ observations=continuous["housing_units"],
+ categorical_levels=self.categorical_levels,
+ )
+
+ return housing_units
diff --git a/build/cities/modeling/zoning_models/zoning_tracts_model.py b/build/cities/modeling/zoning_models/zoning_tracts_model.py
new file mode 100644
index 00000000..0357bdc4
--- /dev/null
+++ b/build/cities/modeling/zoning_models/zoning_tracts_model.py
@@ -0,0 +1,234 @@
+import warnings
+from typing import Any, Dict, Optional
+
+import pyro
+import pyro.distributions as dist
+import torch
+
+from cities.modeling.model_components import (
+ add_linear_component,
+ add_ratio_component,
+ check_categorical_is_subset_of_levels,
+ get_categorical_levels,
+ get_n,
+)
+
+
+class TractsModel(pyro.nn.PyroModule):
+
+ def __init__(
+ self,
+ categorical: Dict[str, torch.Tensor],
+ continuous: Dict[str, torch.Tensor],
+ outcome: Optional[
+ torch.Tensor
+ ] = None, # init args kept for uniformity, consider deleting
+ categorical_levels: Optional[Dict[str, Any]] = None,
+ leeway=0.9,
+ ):
+ """
+
+ :param categorical: dict of categorical data
+ :param continuous: dict of continuous data
+ :param outcome: outcome data (unused, todo remove)
+ :param categorical_levels: dict of unique categorical values. If this is not passed, it will be computed from
+ the provided categorical data. Importantly, if categorical is a subset of the full dataset, this automated
+ computation may omit categorical levels that are present in the full dataset but not in the subset.
+ """
+ super().__init__()
+
+ self.leeway = leeway
+
+ self.N_categorical, self.N_continuous, n = get_n(categorical, continuous)
+
+ # you might need and pass further the original
+ # categorical levels of the training data
+ if self.N_categorical > 0 and categorical_levels is None:
+ self.categorical_levels = get_categorical_levels(categorical)
+ else:
+ self.categorical_levels = categorical_levels # type: ignore
+
+ def forward(
+ self,
+ categorical: Dict[str, torch.Tensor],
+ continuous: Dict[str, torch.Tensor],
+ outcome: Optional[torch.Tensor] = None,
+ leeway=0.9,
+ categorical_levels=None,
+ n=None,
+ ):
+ if categorical_levels is not None:
+ warnings.warn(
+ "Passed categorical_levels will no longer override the levels passed to or computed during"
+ " model initialization. The argument will be ignored."
+ )
+
+ categorical_levels = self.categorical_levels
+ assert check_categorical_is_subset_of_levels(categorical, categorical_levels)
+
+ if n is None:
+ _, _, n = get_n(categorical, continuous)
+
+ data_plate = pyro.plate("data", size=n, dim=-1)
+
+ # _________
+ # register
+ # _________
+
+ with data_plate:
+
+ year = pyro.sample(
+ "year",
+ dist.Categorical(torch.ones(len(categorical_levels["year"]))),
+ obs=categorical["year"],
+ )
+
+ distance = pyro.sample(
+ "distance", dist.Normal(0, 1), obs=continuous["median_distance"]
+ )
+
+ # _____________________
+ # regression for white
+ # _____________________
+
+ white_continuous_parents = {
+ "distance": distance,
+ }
+
+ white_categorical_parents = {
+ "year": year,
+ }
+
+ white = add_ratio_component(
+ child_name="white",
+ child_continuous_parents=white_continuous_parents,
+ child_categorical_parents=white_categorical_parents,
+ leeway=11.57,
+ data_plate=data_plate,
+ observations=continuous["white_original"],
+ categorical_levels=self.categorical_levels,
+ )
+
+ # ___________________________
+ # regression for segregation
+ # ___________________________
+
+ segregation_continuous_parents = {
+ "distance": distance,
+ "white": white,
+ }
+
+ segregation_categorical_parents = {
+ "year": year,
+ }
+
+ segregation = add_ratio_component(
+ child_name="segregation",
+ child_continuous_parents=segregation_continuous_parents,
+ child_categorical_parents=segregation_categorical_parents,
+ leeway=11.57,
+ data_plate=data_plate,
+ observations=continuous["segregation_original"],
+ categorical_levels=self.categorical_levels,
+ )
+
+ # ______________________
+ # regression for income
+ # ______________________
+
+ income_continuous_parents = {
+ "distance": distance,
+ "white": white,
+ "segregation": segregation,
+ }
+
+ income_categorical_parents = {
+ "year": year,
+ }
+
+ income = add_linear_component(
+ child_name="income",
+ child_continuous_parents=income_continuous_parents,
+ child_categorical_parents=income_categorical_parents,
+ leeway=0.9,
+ data_plate=data_plate,
+ observations=continuous["income"],
+ categorical_levels=self.categorical_levels,
+ )
+
+ # _______________________
+ # regression for limit
+ # _______________________
+
+ limit_continuous_parents = {
+ "distance": distance,
+ }
+
+ limit_categorical_parents = {
+ "year": year,
+ }
+
+ limit = add_ratio_component(
+ child_name="limit",
+ child_continuous_parents=limit_continuous_parents,
+ child_categorical_parents=limit_categorical_parents,
+ leeway=11.57,
+ data_plate=data_plate,
+ observations=continuous["mean_limit_original"],
+ categorical_levels=self.categorical_levels,
+ )
+
+ # _____________________________
+ # regression for median value
+ # _____________________________
+
+ value_continuous_parents = {
+ "distance": distance,
+ "limit": limit,
+ "income": income,
+ "white": white,
+ "segregation": segregation,
+ }
+
+ value_categorical_parents = {
+ "year": year,
+ }
+
+ median_value = add_linear_component(
+ child_name="median_value",
+ child_continuous_parents=value_continuous_parents,
+ child_categorical_parents=value_categorical_parents,
+ leeway=0.9,
+ data_plate=data_plate,
+ observations=continuous["median_value"],
+ categorical_levels=self.categorical_levels,
+ )
+
+ # ______________________________
+ # regression for housing units
+ # ______________________________
+
+ housing_units_continuous_parents = {
+ "median_value": median_value,
+ "distance": distance,
+ "income": income,
+ "white": white,
+ "limit": limit,
+ "segregation": segregation,
+ }
+
+ housing_units_categorical_parents = {
+ "year": year,
+ }
+
+ housing_units = add_linear_component(
+ child_name="housing_units",
+ child_continuous_parents=housing_units_continuous_parents,
+ child_categorical_parents=housing_units_categorical_parents,
+ leeway=0.9,
+ data_plate=data_plate,
+ observations=continuous["housing_units"],
+ categorical_levels=self.categorical_levels,
+ )
+
+ return housing_units
diff --git a/build/cities/modeling/zoning_models/zoning_tracts_sqm_model.py b/build/cities/modeling/zoning_models/zoning_tracts_sqm_model.py
new file mode 100644
index 00000000..5634a52f
--- /dev/null
+++ b/build/cities/modeling/zoning_models/zoning_tracts_sqm_model.py
@@ -0,0 +1,261 @@
+import warnings
+from typing import Any, Dict, Optional
+
+import pyro
+import pyro.distributions as dist
+import torch
+
+from cities.modeling.model_components import (
+ add_linear_component,
+ add_ratio_component,
+ check_categorical_is_subset_of_levels,
+ get_categorical_levels,
+ get_n,
+)
+
+
+class TractsModelSqm(pyro.nn.PyroModule):
+ def __init__(
+ self,
+ categorical: Dict[str, torch.Tensor],
+ continuous: Dict[str, torch.Tensor],
+ outcome: Optional[
+ torch.Tensor
+ ] = None, # init args kept for uniformity, consider deleting
+ categorical_levels: Optional[Dict[str, Any]] = None,
+ leeway=0.9,
+ ):
+ """
+
+ :param categorical: dict of categorical data
+ :param continuous: dict of continuous data
+ :param outcome: outcome data (unused, todo remove)
+ :param categorical_levels: dict of unique categorical values. If this is not passed, it will be computed from
+ the provided categorical data. Importantly, if categorical is a subset of the full dataset, this automated
+ computation may omit categorical levels that are present in the full dataset but not in the subset.
+ """
+ super().__init__()
+
+ self.leeway = leeway
+
+ self.N_categorical, self.N_continuous, n = get_n(categorical, continuous)
+
+ if self.N_categorical > 0 and categorical_levels is None:
+ self.categorical_levels = get_categorical_levels(categorical)
+ else:
+ self.categorical_levels = categorical_levels
+
+ def forward(
+ self,
+ categorical: Dict[str, torch.Tensor],
+ continuous: Dict[str, torch.Tensor],
+ outcome: Optional[torch.Tensor] = None,
+ leeway=0.9,
+ categorical_levels=None,
+ n=None,
+ ):
+ if categorical_levels is not None:
+ warnings.warn(
+ "Passed categorical_levels will no longer override the levels passed to or computed during"
+ " model initialization. The argument will be ignored."
+ )
+
+ categorical_levels = self.categorical_levels
+ assert check_categorical_is_subset_of_levels(categorical, categorical_levels)
+
+ if n is None:
+ _, _, n = get_n(categorical, continuous)
+
+ data_plate = pyro.plate("data", size=n, dim=-1)
+
+ # _________
+ # register
+ # _________
+
+ with data_plate:
+
+ year = pyro.sample(
+ "year",
+ dist.Categorical(torch.ones(len(categorical_levels["year"]))),
+ obs=categorical["year"],
+ )
+
+ distance = pyro.sample(
+ "distance", dist.Normal(0, 1), obs=continuous["median_distance"]
+ )
+
+ # ______________________
+ # regression for sqm
+ # ______________________
+
+ sqm_continuous_parents = {
+ "distance": distance,
+ }
+
+ sqm_categorical_parents = {
+ "year": year,
+ }
+
+ sqm = add_linear_component(
+ child_name="sqm",
+ child_continuous_parents=sqm_continuous_parents,
+ child_categorical_parents=sqm_categorical_parents,
+ leeway=0.5,
+ data_plate=data_plate,
+ observations=continuous["parcel_sqm"],
+ categorical_levels=self.categorical_levels,
+ )
+
+ # _______________________
+ # regression for limit
+ # _______________________
+
+ limit_continuous_parents = {
+ "distance": distance,
+ }
+
+ limit_categorical_parents = {
+ "year": year,
+ }
+
+ limit = add_ratio_component(
+ child_name="limit",
+ child_continuous_parents=limit_continuous_parents,
+ child_categorical_parents=limit_categorical_parents,
+ leeway=8, # ,
+ data_plate=data_plate,
+ observations=continuous["mean_limit_original"],
+ categorical_levels=self.categorical_levels,
+ )
+
+ # _____________________
+ # regression for white
+ # _____________________
+
+ white_continuous_parents = {
+ "distance": distance,
+ "sqm": sqm,
+ "limit": limit,
+ }
+
+ white_categorical_parents = {
+ "year": year,
+ }
+
+ white = add_ratio_component(
+ child_name="white",
+ child_continuous_parents=white_continuous_parents,
+ child_categorical_parents=white_categorical_parents,
+ leeway=8, # 11.57,
+ data_plate=data_plate,
+ observations=continuous["white_original"],
+ categorical_levels=self.categorical_levels,
+ )
+
+ # ___________________________
+ # regression for segregation
+ # ___________________________
+
+ segregation_continuous_parents = {
+ "distance": distance,
+ "white": white,
+ "sqm": sqm,
+ "limit": limit,
+ }
+
+ segregation_categorical_parents = {
+ "year": year,
+ }
+
+ segregation = add_ratio_component(
+ child_name="segregation",
+ child_continuous_parents=segregation_continuous_parents,
+ child_categorical_parents=segregation_categorical_parents,
+ leeway=8, # 11.57,
+ data_plate=data_plate,
+ observations=continuous["segregation_original"],
+ categorical_levels=self.categorical_levels,
+ )
+
+ # ______________________
+ # regression for income
+ # ______________________
+
+ income_continuous_parents = {
+ "distance": distance,
+ "white": white,
+ "segregation": segregation,
+ "sqm": sqm,
+ "limit": limit,
+ }
+
+ income_categorical_parents = {
+ "year": year,
+ }
+
+ income = add_linear_component(
+ child_name="income",
+ child_continuous_parents=income_continuous_parents,
+ child_categorical_parents=income_categorical_parents,
+ leeway=0.5,
+ data_plate=data_plate,
+ observations=continuous["income"],
+ categorical_levels=self.categorical_levels,
+ )
+
+ # _____________________________
+ # regression for median value
+ # _____________________________
+
+ value_continuous_parents = {
+ "distance": distance,
+ "income": income,
+ "white": white,
+ "segregation": segregation,
+ "sqm": sqm,
+ "limit": limit,
+ }
+
+ value_categorical_parents = {
+ "year": year,
+ }
+
+ median_value = add_linear_component(
+ child_name="median_value",
+ child_continuous_parents=value_continuous_parents,
+ child_categorical_parents=value_categorical_parents,
+ leeway=0.5,
+ data_plate=data_plate,
+ observations=continuous["median_value"],
+ categorical_levels=self.categorical_levels,
+ )
+
+ # ______________________________
+ # regression for housing units
+ # ______________________________
+
+ housing_units_continuous_parents = {
+ "median_value": median_value,
+ "distance": distance,
+ "income": income,
+ "white": white,
+ "limit": limit,
+ "segregation": segregation,
+ "sqm": sqm,
+ }
+
+ housing_units_categorical_parents = {
+ "year": year,
+ }
+
+ housing_units = add_linear_component(
+ child_name="housing_units",
+ child_continuous_parents=housing_units_continuous_parents,
+ child_categorical_parents=housing_units_categorical_parents,
+ leeway=0.5,
+ data_plate=data_plate,
+ observations=continuous["housing_units"],
+ categorical_levels=self.categorical_levels,
+ )
+
+ return housing_units
diff --git a/build/cities/queries/__init__.py b/build/cities/queries/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/build/cities/queries/causal_insight.py b/build/cities/queries/causal_insight.py
new file mode 100644
index 00000000..7a7a7e98
--- /dev/null
+++ b/build/cities/queries/causal_insight.py
@@ -0,0 +1,585 @@
+import os
+
+import dill
+import matplotlib.pyplot as plt
+import numpy as np
+import pandas as pd
+import plotly.graph_objects as go
+import pyro
+import torch
+from sklearn.preprocessing import StandardScaler
+
+from cities.modeling.model_interactions import model_cities_interaction
+from cities.modeling.modeling_utils import prep_wide_data_for_inference
+from cities.utils.cleaning_utils import (
+ revert_prediction_df,
+ revert_standardize_and_scale_scaler,
+ sigmoid,
+)
+from cities.utils.data_grabber import DataGrabber, find_repo_root
+from cities.utils.percentiles import transformed_intervention_from_percentile
+
+
+class CausalInsight:
+ def __init__(
+ self,
+ outcome_dataset,
+ intervention_dataset,
+ num_samples=1000,
+ sites=None,
+ smoke_test=None,
+ ):
+ self.outcome_dataset = outcome_dataset
+ self.intervention_dataset = intervention_dataset
+ self.root = find_repo_root()
+ self.num_samples = num_samples
+ self.data = None
+ self.smoke_test = smoke_test
+
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+
+ self.tau_samples_path = os.path.join(
+ self.root,
+ "data/tau_samples",
+ f"{self.intervention_dataset}_{self.outcome_dataset}_{self.num_samples}_tau.pkl",
+ )
+
+ # these are loaded/computed as need be
+
+ self.guide = None
+ self.data = None
+ self.fips_id = None
+ self.name = None
+ self.model = None
+ self.model_args = None
+ self.predictive = None
+ self.samples = None
+ self.tensed_samples = None
+ self.tensed_tau_samples = None
+
+ self.intervened_value = None # possibly in the transformed scale
+ self.intervention_is_percentile = None # flag for the sort of input
+ self.intervened_percentile = None # possible passed at input
+ self.intervened_value_percentile = (
+ None # calculated if input was on the transformed scale
+ )
+ self.intervened_value_original = None # in the original scale
+ self.observed_intervention = None # in the transformed scale
+ self.observed_intervention_original = None # in the original scale
+ self.observed_intervention_percentile = None # calculated if input
+ # was on the transformed scale
+ self.observed_outcomes = None
+ self.intervention_diff = (
+ None # difference between observed and counterfactual value of the
+ )
+ # intervention variable
+ self.intervention_impact = None # dictionary with preds for each shift
+ self.predictions = None # df with preds, can be passed to plotting
+ self.prediction_original = None # df with preds on the original scale
+ # can be passed to plotting
+ self.fips_observed_data = None # to be used for plotting in
+ # contrast with the counterfactual prediction
+ self.year_id = None # year of intervention as index in the outcome years
+ self.prediction_years = None
+
+ # these are used in posterior predictive checks
+ self.average_predictions = None
+ self.r_squared = None
+
+ def load_guide(self, forward_shift):
+ pyro.clear_param_store()
+ guide_name = (
+ f"{self.intervention_dataset}_{self.outcome_dataset}_{forward_shift}"
+ )
+ guide_path = os.path.join(
+ self.root, "data/model_guides", f"{guide_name}_guide.pkl"
+ )
+
+ with open(guide_path, "rb") as file:
+ self.guide = dill.load(file)
+ param_path = os.path.join(
+ self.root, "data/model_guides", f"{guide_name}_params.pth"
+ )
+
+ pyro.get_param_store().load(param_path)
+
+ self.forward_shift = forward_shift
+
+ def generate_samples(self):
+ self.data = prep_wide_data_for_inference(
+ outcome_dataset=self.outcome_dataset,
+ intervention_dataset=self.intervention_dataset,
+ forward_shift=self.forward_shift,
+ )
+ self.model = model_cities_interaction
+
+ self.model_args = self.data["model_args"]
+
+ self.predictive = pyro.infer.Predictive(
+ model=self.model,
+ guide=self.guide,
+ num_samples=self.num_samples,
+ parallel=True,
+ # return_sites=self.sites,
+ )
+ self.samples = self.predictive(*self.model_args)
+
+ # idexing and gathering with mwc in this context
+ # seems to fail, calculating the expected diff made by the intervention manually
+ # wrt to actual observed outcomes rather than predicting outcomes themselves
+ # effectively keeping the noise fixed and focusing on a counterfactual claim
+
+ # TODO possible delete in the current strategy deemed uncontroversial
+ # else:
+ # if not isinstance(intervened_value, torch.Tensor):
+ # intervened_value = torch.tensor(intervened_value, device=self.device)
+ # intervened_expanded = intervened_value.expand_as(self.data['t'])
+
+ # with MultiWorldCounterfactual(first_available_dim=-6) as mwc:
+ # with do(actions = dict(T = intervened_expanded)):
+ # self.predictive = pyro.infer.Predictive(model=self.model, guide=self.guide,
+ # num_samples=self.num_samples, parallel=True)
+ # self.samples = self.predictive(*self.model_args)
+ # self.mwc = mwc
+
+ def generate_tensed_samples(self):
+ self.tensed_samples = {}
+ self.tensed_tau_samples = {}
+
+ for shift in [1, 2, 3]:
+ self.load_guide(shift)
+ self.generate_samples()
+ self.tensed_samples[shift] = self.samples
+ self.tensed_tau_samples[shift] = (
+ self.samples["weight_TY"].squeeze().detach().numpy()
+ )
+
+ if not self.smoke_test:
+ if not os.path.exists(self.tau_samples_path):
+ with open(self.tau_samples_path, "wb") as file:
+ dill.dump(self.tensed_tau_samples, file)
+
+ def get_tau_samples(self):
+ if os.path.exists(self.tau_samples_path):
+ with open(self.tau_samples_path, "rb") as file:
+ self.tensed_tau_samples = dill.load(file)
+ else:
+ raise ValueError("No tau samples found. Run generate_tensed_samples first.")
+
+ """Returns the intervened and observed value, in the original scale"""
+
+ def slider_values_to_interventions(self, intervened_percent, year):
+ try:
+ original_column = dg.wide[self.intervention_dataset][
+ str(year)
+ ].values.reshape(-1, 1)
+ except NameError:
+ dg = DataGrabber()
+ dg.get_features_wide([self.intervention_dataset])
+ original_column = dg.wide[self.intervention_dataset][
+ str(year)
+ ].values.reshape(-1, 1)
+
+ max = original_column.max()
+
+ intervened_original = intervened_percent * max / 100
+
+ scaler = StandardScaler()
+ scaler.fit(original_column)
+
+ intervened_scaled = scaler.transform(intervened_original.reshape(-1, 1))
+ intervened_transformed = sigmoid(intervened_scaled, scale=1 / 3)
+
+ # TODO this output is a bit verbose
+ # consider deleting what ends up not needed in the frontend
+ percent_calcs = {
+ "max": max,
+ "intervened_percent": intervened_percent,
+ "intervened_original": intervened_original,
+ "intervened_scaled": intervened_scaled[0, 0],
+ "intervened_transformed": intervened_transformed[0, 0],
+ }
+
+ return percent_calcs
+
+ def get_intervened_and_observed_values_original_scale(
+ self, fips, intervened_value, year
+ ):
+ dg = DataGrabber()
+ dg.get_features_std_wide([self.intervention_dataset, self.outcome_dataset])
+ dg.get_features_wide([self.intervention_dataset])
+
+ # intervened value, in the original scale
+ intervened_original_scale = revert_standardize_and_scale_scaler(
+ intervened_value, year, self.intervention_dataset
+ )
+
+ fips_id = (
+ dg.std_wide[self.intervention_dataset]
+ .loc[dg.std_wide[self.intervention_dataset]["GeoFIPS"] == fips]
+ .index[0]
+ )
+
+ # observed value, in the original scale
+ observed_original_scale = dg.wide[self.intervention_dataset].iloc[fips_id][
+ str(year)
+ ]
+
+ return (intervened_original_scale[0], observed_original_scale)
+
+ def get_fips_predictions(
+ self, fips, intervened_value, year=None, intervention_is_percentile=False
+ ):
+ self.fips = fips
+
+ if self.data is None:
+ self.data = prep_wide_data_for_inference(
+ outcome_dataset=self.outcome_dataset,
+ intervention_dataset=self.intervention_dataset,
+ forward_shift=3, # shift doesn't matter here, as long as data exists
+ )
+
+ # start with the latest year possible by default
+ if year is None:
+ year = self.data["years_available"][-1]
+ assert year in self.data["years_available"]
+
+ self.year = year
+
+ if intervention_is_percentile:
+ self.intervened_percentile = intervened_value
+ intervened_value = transformed_intervention_from_percentile(
+ self.intervention_dataset, year, intervened_value
+ )
+
+ self.intervened_value = intervened_value
+
+ # find years for prediction
+ outcome_years = self.data["outcome_years"]
+ year_id = [int(x) for x in outcome_years].index(year)
+ self.year_id = year_id
+
+ self.prediction_years = outcome_years[(year_id) : (year_id + 4)]
+
+ # find fips unit index
+ dg = DataGrabber()
+ dg.get_features_std_wide([self.intervention_dataset, self.outcome_dataset])
+ dg.get_features_wide([self.intervention_dataset])
+ interventions_this_year_original = dg.wide[self.intervention_dataset][str(year)]
+
+ self.intervened_value_original = revert_standardize_and_scale_scaler(
+ self.intervened_value, self.year, self.intervention_dataset
+ )
+
+ self.intervened_value_percentile = round(
+ (
+ np.mean(
+ interventions_this_year_original.values
+ <= self.intervened_value_original
+ )
+ * 100
+ ),
+ 3,
+ )
+
+ self.fips_id = (
+ dg.std_wide[self.intervention_dataset]
+ .loc[dg.std_wide[self.intervention_dataset]["GeoFIPS"] == fips]
+ .index[0]
+ )
+
+ self.name = dg.std_wide[self.intervention_dataset]["GeoName"].iloc[self.fips_id]
+
+ # get observed values at the prediction times
+ self.observed_intervention = dg.std_wide[self.intervention_dataset].iloc[
+ self.fips_id
+ ][str(year)]
+
+ self.observed_intervention_original = dg.wide[self.intervention_dataset].iloc[
+ self.fips_id
+ ][str(year)]
+
+ if intervention_is_percentile:
+ self.observed_intervention_percentile = round(
+ (
+ np.mean(
+ interventions_this_year_original.values
+ <= self.observed_intervention_original
+ )
+ * 100
+ ),
+ 1,
+ )
+
+ self.observed_outcomes = dg.std_wide[self.outcome_dataset].iloc[self.fips_id][
+ outcome_years[year_id : (year_id + 4)]
+ ]
+ self.intervention_diff = self.intervened_value - self.observed_intervention
+
+ self.intervention_impact = {}
+ self.intervention_impact_mean = []
+ self.intervention_impact_low = []
+ self.intervention_impact_high = []
+ for shift in [1, 2, 3]:
+ self.intervention_impact[shift] = (
+ self.tensed_tau_samples[shift] * self.intervention_diff
+ )
+ self.intervention_impact_mean.append(
+ np.mean(self.intervention_impact[shift])
+ )
+ self.intervention_impact_low.append(
+ np.percentile(self.intervention_impact[shift], 2.5)
+ )
+ self.intervention_impact_high.append(
+ np.percentile(self.intervention_impact[shift], 97.5)
+ )
+
+ predicted_mean = [self.observed_outcomes.iloc[0]] + (
+ self.intervention_impact_mean + self.observed_outcomes.iloc[1:]
+ ).tolist()
+ predicted_low = [self.observed_outcomes.iloc[0]] + (
+ self.intervention_impact_low + self.observed_outcomes.iloc[1:]
+ ).tolist()
+ predicted_high = [self.observed_outcomes.iloc[0]] + (
+ self.intervention_impact_high + self.observed_outcomes.iloc[1:]
+ ).tolist()
+
+ self.predictions = pd.DataFrame(
+ {
+ "year": self.prediction_years,
+ "observed": self.observed_outcomes,
+ "mean": predicted_mean,
+ "low": predicted_low,
+ "high": predicted_high,
+ }
+ )
+
+ self.predictions_original = revert_prediction_df(
+ self.predictions, self.outcome_dataset
+ )
+
+ # TODO for some reason indexing using gather doesn't pick the right indices
+ # look into this some time, do this by hand for now
+ # with self.mwc:
+ # self.tau_samples = self.samples['weight_TY'].squeeze().detach().numpy()
+ # self.tensed_observed_samples[shift] = self.tensed_intervened_samples[shift] = gather(
+ # self.samples['Y'], IndexSet(**{"T": {0}}),
+ # event_dim=0,).squeeze()
+ # self.tensed_intervened_samples[shift] = gather(
+ # self.samples['Y'], IndexSet(**{"T": {1}}),
+ # event_dim=0,).squeeze()#[:,self.fips_id]
+
+ # self.tensed_outcome_difference[shift] = (
+ # self.tensed_intervened_samples[shift] - self.tensed_observed_samples[shift]
+ # )
+ return
+
+ def plot_predictions(
+ self, range_multiplier=1.5, show_figure=True, scaling="transformed"
+ ):
+ assert scaling in ["transformed", "original"]
+
+ dg = DataGrabber()
+
+ if scaling == "transformed":
+ dg.get_features_std_long([self.outcome_dataset])
+ plot_data = dg.std_long[self.outcome_dataset]
+ self.fips_observed_data = plot_data[
+ plot_data["GeoFIPS"] == self.fips
+ ].copy()
+
+ y_min = (
+ min(
+ self.fips_observed_data["Value"].min(),
+ self.predictions["low"].min(),
+ )
+ - 0.05
+ )
+ y_max = (
+ max(
+ self.fips_observed_data["Value"].max(),
+ self.predictions["high"].max(),
+ )
+ + 0.05
+ )
+ else:
+ dg.get_features_long([self.outcome_dataset])
+ plot_data = dg.long[self.outcome_dataset]
+
+ self.fips_observed_data = plot_data[
+ plot_data["GeoFIPS"] == self.fips
+ ].copy()
+
+ y_min = 0.8 * min(
+ self.fips_observed_data["Value"].min(),
+ self.predictions_original["low"].min(),
+ )
+ y_max = 1.3 * max(
+ self.fips_observed_data["Value"].max(),
+ self.predictions_original["high"].max(),
+ )
+
+ fig = go.Figure()
+
+ fig.add_trace(
+ go.Scatter(
+ x=self.fips_observed_data["Year"],
+ y=self.fips_observed_data["Value"],
+ mode="lines+markers",
+ name=self.fips_observed_data["GeoName"].iloc[0],
+ line=dict(color="darkred", width=3),
+ text=self.fips_observed_data["GeoName"].iloc[0],
+ textposition="top right",
+ showlegend=False,
+ )
+ )
+
+ if scaling == "transformed":
+ fig.add_trace(
+ go.Scatter(
+ x=self.predictions["year"],
+ y=self.predictions["mean"],
+ mode="lines",
+ line=dict(color="blue", width=2),
+ name="mean prediction",
+ text=self.predictions["mean"],
+ )
+ )
+
+ credible_interval_trace = go.Scatter(
+ x=pd.concat([self.predictions["year"], self.predictions["year"][::-1]]),
+ y=pd.concat([self.predictions["high"], self.predictions["low"][::-1]]),
+ fill="toself",
+ fillcolor="rgba(0,100,80,0.2)",
+ line=dict(color="rgba(255,255,255,0)"),
+ name="95% credible interval around mean",
+ )
+
+ else:
+ fig.add_trace(
+ go.Scatter(
+ x=self.predictions_original["year"],
+ y=self.predictions_original["mean"],
+ mode="lines",
+ line=dict(color="blue", width=2),
+ name="mean prediction",
+ text=self.predictions_original["mean"],
+ )
+ )
+
+ credible_interval_trace = go.Scatter(
+ x=pd.concat(
+ [
+ self.predictions_original["year"],
+ self.predictions_original["year"][::-1],
+ ]
+ ),
+ y=pd.concat(
+ [
+ self.predictions_original["high"],
+ self.predictions_original["low"][::-1],
+ ]
+ ),
+ fill="toself",
+ fillcolor="rgba(255, 255, 255, 0.31)",
+ line=dict(color="rgba(255,255,255,0)"),
+ name="95% credible interval around mean",
+ )
+
+ fig.add_trace(credible_interval_trace)
+
+ if hasattr(self, "intervened_percentile"):
+ intervened_value = self.intervened_percentile
+ observed_intervention = self.observed_intervention_percentile
+
+ else:
+ intervened_value = round(self.intervened_value, 3)
+ observed_intervention = round(self.observed_intervention, 3)
+
+ if scaling == "transformed":
+ title = (
+ f"Predicted {self.outcome_dataset} in {self.name} under intervention {intervened_value} "
+ f"in year {self.year}
"
+ f"compared to the observed values under observed intervention "
+ f"{observed_intervention}."
+ )
+
+ else:
+ title = (
+ f"Predicted {self.outcome_dataset} in {self.name}
"
+ f"under intervention {self.intervened_value_original}"
+ f" in year {self.year}
"
+ f"{self.intervened_value_percentile}% of counties received a lower intervention
"
+ f"observed intervention: {self.observed_intervention_original}"
+ )
+
+ fig.update_yaxes(range=[y_min, y_max])
+
+ fig.update_layout(
+ title=title,
+ title_font=dict(size=12),
+ xaxis_title="Year",
+ yaxis_title="Value",
+ template="simple_white",
+ legend=dict(x=0.05, y=1, traceorder="normal", orientation="h"),
+ )
+
+ self.predictions_plot = fig
+
+ if show_figure:
+ fig.show()
+ else:
+ return fig
+
+ def plot_residuals(self):
+ predictions = self.samples["Y"].squeeze()
+ self.average_predictions = torch.mean(predictions, dim=0)
+ plt.hist(self.average_predictions - self.data["y"].squeeze(), bins=70)
+ plt.xlabel("residuals")
+ plt.ylabel("counts")
+ plt.text(
+ 0.7,
+ -0.1,
+ "(colored by year)",
+ ha="left",
+ va="bottom",
+ transform=plt.gca().transAxes,
+ )
+ plt.show()
+
+ def predictive_check(self):
+ y_flat = self.data["y"].view(-1)
+ observed_mean = torch.mean(y_flat)
+ tss = torch.sum((y_flat - observed_mean) ** 2)
+ average_predictions_flat = self.average_predictions.view(-1)
+ rss = torch.sum((y_flat - average_predictions_flat) ** 2)
+ r_squared = 1 - (rss / tss)
+ self.r_squared = r_squared
+ rounded_r_squared = np.round(r_squared.item(), 2)
+ plt.scatter(y=average_predictions_flat, x=y_flat)
+ plt.title(
+ f"{self.intervention_dataset}, {self.outcome_dataset}, "
+ f"R2={rounded_r_squared}"
+ )
+ plt.ylabel("average prediction")
+ plt.xlabel("observed outcome")
+ plt.show
+
+ def estimate_ATE(self):
+ tau_samples = self.samples["weight_TY"].squeeze().detach().numpy()
+ plt.hist(tau_samples, bins=70)
+ plt.axvline(
+ x=tau_samples.mean(),
+ color="red",
+ linestyle="dashed",
+ linewidth=2,
+ label=f"mean = {tau_samples.mean():.3f}",
+ )
+ plt.title(
+ f"ATE for {self.intervention_dataset} and {self.outcome_dataset} "
+ f"with forward shift = {self.forward_shift}"
+ )
+ plt.ylabel("counts")
+ plt.xlabel("ATE")
+ plt.legend()
+ plt.show()
diff --git a/build/cities/queries/causal_insight_slim.py b/build/cities/queries/causal_insight_slim.py
new file mode 100644
index 00000000..3efc6d09
--- /dev/null
+++ b/build/cities/queries/causal_insight_slim.py
@@ -0,0 +1,681 @@
+import os
+
+import dill
+import numpy as np
+import pandas as pd
+import plotly.graph_objects as go
+from sklearn.preprocessing import StandardScaler
+
+from cities.utils.cleaning_utils import (
+ revert_prediction_df,
+ revert_standardize_and_scale_scaler,
+ sigmoid,
+)
+from cities.utils.data_grabber import DataGrabber, find_repo_root
+from cities.utils.percentiles import transformed_intervention_from_percentile
+
+
+class CausalInsightSlim:
+ def __init__(
+ self,
+ outcome_dataset,
+ intervention_dataset,
+ num_samples=1000,
+ sites=None,
+ smoke_test=None,
+ ):
+ self.outcome_dataset = outcome_dataset
+ self.intervention_dataset = intervention_dataset
+ self.root = find_repo_root()
+ self.num_samples = num_samples
+ self.smoke_test = smoke_test
+ self.data = None
+
+ self.tau_samples_path = os.path.join(
+ self.root,
+ "data/tau_samples",
+ f"{self.intervention_dataset}_{self.outcome_dataset}_{self.num_samples}_tau.pkl",
+ )
+
+ def get_tau_samples(self):
+ if os.path.exists(self.tau_samples_path):
+ with open(self.tau_samples_path, "rb") as file:
+ self.tensed_tau_samples = dill.load(file)
+ else:
+ raise ValueError("No tau samples found. Run generate_tensed_samples first.")
+
+ def slider_values_to_interventions(self, intervened_percent, year):
+ try:
+ original_column = dg.wide[self.intervention_dataset][
+ str(year)
+ ].values.reshape(-1, 1)
+ except NameError:
+ dg = DataGrabber()
+ dg.get_features_wide([self.intervention_dataset])
+ original_column = dg.wide[self.intervention_dataset][
+ str(year)
+ ].values.reshape(-1, 1)
+
+ max = original_column.max()
+
+ intervened_original = intervened_percent * max / 100
+
+ scaler = StandardScaler()
+ scaler.fit(original_column)
+
+ intervened_scaled = scaler.transform(intervened_original.reshape(-1, 1))
+ intervened_transformed = sigmoid(intervened_scaled, scale=1 / 3)
+
+ # TODO this output is a bit verbose
+ # consider deleting what ends up not needed in the frontend
+ percent_calcs = {
+ "max": max,
+ "intervened_percent": intervened_percent,
+ "intervened_original": intervened_original,
+ "intervened_scaled": intervened_scaled[0, 0],
+ "intervened_transformed": intervened_transformed[0, 0],
+ }
+
+ return percent_calcs
+
+ def get_intervened_and_observed_values_original_scale(
+ self, fips, intervened_value, year
+ ):
+ dg = DataGrabber()
+ dg.get_features_std_wide([self.intervention_dataset, self.outcome_dataset])
+ dg.get_features_wide([self.intervention_dataset])
+
+ # intervened value, in the original scale
+ intervened_original_scale = revert_standardize_and_scale_scaler(
+ intervened_value, year, self.intervention_dataset
+ )
+
+ fips_id = (
+ dg.std_wide[self.intervention_dataset]
+ .loc[dg.std_wide[self.intervention_dataset]["GeoFIPS"] == fips]
+ .index[0]
+ )
+
+ # observed value, in the original scale
+ observed_original_scale = dg.wide[self.intervention_dataset].iloc[fips_id][
+ str(year)
+ ]
+
+ return (intervened_original_scale[0], observed_original_scale)
+
+ def get_group_predictions(
+ self,
+ group,
+ intervened_value,
+ year=None,
+ intervention_is_percentile=False,
+ produce_original=True,
+ ):
+ self.group_clean = list(set(group))
+ self.group_clean.sort()
+ self.produce_original = produce_original
+
+ if self.data is None:
+ file_path = os.path.join(
+ self.root,
+ "data/years_available",
+ f"{self.intervention_dataset}_{self.outcome_dataset}.pkl",
+ )
+ with open(file_path, "rb") as file:
+ self.data = dill.load(file)
+
+ if year is None:
+ year = self.data["years_available"][-1]
+ assert year in self.data["years_available"]
+
+ self.year = year
+
+ if intervention_is_percentile:
+ self.intervened_percentile = intervened_value
+ intervened_value = transformed_intervention_from_percentile(
+ self.intervention_dataset, year, intervened_value
+ )
+
+ self.intervened_value = intervened_value
+
+ # find years for prediction
+ outcome_years = self.data["outcome_years"]
+ year_id = [int(x) for x in outcome_years].index(year)
+ self.year_id = year_id
+
+ self.prediction_years = outcome_years[(year_id) : (year_id + 4)]
+
+ dg = DataGrabber()
+ dg.get_features_std_wide([self.intervention_dataset, self.outcome_dataset])
+ dg.get_features_wide([self.intervention_dataset, self.outcome_dataset])
+ interventions_this_year_original = dg.wide[self.intervention_dataset][str(year)]
+
+ self.intervened_value_original = revert_standardize_and_scale_scaler(
+ self.intervened_value, self.year, self.intervention_dataset
+ )
+
+ self.intervened_value_percentile = round(
+ (
+ np.mean(
+ interventions_this_year_original.values
+ <= self.intervened_value_original
+ )
+ * 100
+ ),
+ 3,
+ )
+
+ # note: ids will be inceasingly sorted
+ self.fips_ids = (
+ dg.std_wide[self.intervention_dataset]
+ .loc[
+ dg.std_wide[self.intervention_dataset]["GeoFIPS"].isin(self.group_clean)
+ ]
+ .index.tolist()
+ )
+
+ assert len(self.fips_ids) == len(self.group_clean)
+ assert set(
+ dg.std_wide[self.intervention_dataset]["GeoFIPS"].iloc[self.fips_ids]
+ ) == set(self.group_clean)
+
+ self.names = dg.std_wide[self.intervention_dataset]["GeoName"].iloc[
+ self.fips_ids
+ ]
+
+ self.observed_interventions = dg.std_wide[self.intervention_dataset].iloc[
+ self.fips_ids
+ ][str(year)]
+
+ self.observed_interventions_original = (
+ dg.wide[self.intervention_dataset].iloc[self.fips_ids][str(year)].copy()
+ )
+
+ #
+ if intervention_is_percentile:
+ self.observed_interventions_percentile = (
+ np.round(
+ [
+ np.mean(interventions_this_year_original.values <= obs)
+ for obs in self.observed_interventions_original
+ ],
+ 3,
+ )
+ * 100
+ )
+
+ self.observed_outcomes = dg.std_wide[self.outcome_dataset].iloc[self.fips_ids][
+ outcome_years[year_id : (year_id + 4)]
+ ]
+
+ self.observed_outcomes_original = dg.wide[self.outcome_dataset].iloc[
+ self.fips_ids
+ ][outcome_years[year_id : (year_id + 4)]]
+
+ self.intervention_diffs = self.intervened_value - self.observed_interventions
+
+ self.intervention_impacts = {}
+ self.intervention_impacts_means = []
+ self.intervention_impacts_lows = []
+ self.intervention_impacts_highs = []
+ for shift in [1, 2, 3]:
+ self.intervention_impacts[shift] = np.outer(
+ self.tensed_tau_samples[shift], self.intervention_diffs
+ )
+ self.intervention_impacts_means.append(
+ np.mean(self.intervention_impacts[shift], axis=0)
+ )
+ self.intervention_impacts_lows.append(
+ np.percentile(self.intervention_impacts[shift], axis=0, q=2.5)
+ )
+ self.intervention_impacts_highs.append(
+ np.percentile(self.intervention_impacts[shift], axis=0, q=97.5)
+ )
+
+ intervention_impacts_means_array = np.column_stack(
+ self.intervention_impacts_means
+ )
+ intervention_impacts_lows_array = np.column_stack(
+ self.intervention_impacts_lows
+ )
+ intervention_impacts_highs_array = np.column_stack(
+ self.intervention_impacts_highs
+ )
+
+ future_predicted_means = (
+ self.observed_outcomes.iloc[:, 1:] + intervention_impacts_means_array
+ )
+ # predicted_means = np.insert(
+ # future_predicted_means, 0, self.observed_outcomes.iloc[:, 0], axis=1
+ # ) #TODO delete if the new version raises no index error
+
+ predicted_means = np.column_stack(
+ [self.observed_outcomes.iloc[:, 0], future_predicted_means]
+ )
+
+ future_predicted_lows = (
+ self.observed_outcomes.iloc[:, 1:] + intervention_impacts_lows_array
+ )
+ predicted_lows = np.column_stack(
+ [self.observed_outcomes.iloc[:, 0], future_predicted_lows]
+ )
+ # predicted_lows = np.insert(
+ # future_predicted_lows, 0, self.observed_outcomes.iloc[:, 0], axis=1
+ # ) #TODO as above
+
+ future_predicted_highs = (
+ self.observed_outcomes.iloc[:, 1:] + intervention_impacts_highs_array
+ )
+ # predicted_highs = np.insert(
+ # future_predicted_highs, 0, self.observed_outcomes.iloc[:, 0], axis=1
+ # ) #TODO as above
+
+ predicted_highs = np.column_stack(
+ [self.observed_outcomes.iloc[:, 0], future_predicted_highs]
+ )
+
+ if self.produce_original:
+ pred_means_reverted = []
+ pred_lows_reverted = []
+ pred_highs_reverted = []
+ obs_out_reverted = []
+ for i in range(predicted_means.shape[1]):
+ y = self.prediction_years[i]
+ obs_out_reverted.append(
+ revert_standardize_and_scale_scaler(
+ self.observed_outcomes.iloc[:, i], y, self.outcome_dataset
+ )
+ )
+ pred_means_reverted.append(
+ revert_standardize_and_scale_scaler(
+ predicted_means[:, i], y, self.outcome_dataset
+ )
+ )
+
+ pred_lows_reverted.append(
+ revert_standardize_and_scale_scaler(
+ predicted_lows[:, i], y, self.outcome_dataset
+ )
+ )
+
+ pred_highs_reverted.append(
+ revert_standardize_and_scale_scaler(
+ predicted_highs[:, i], y, self.outcome_dataset
+ )
+ )
+
+ obs_out_reverted = np.column_stack(obs_out_reverted)
+ diff = obs_out_reverted - self.observed_outcomes_original
+ diff = np.array(diff)
+ obs_out_corrected = obs_out_reverted - diff
+ pred_means_reverted = np.column_stack(pred_means_reverted)
+ pred_means_corrected = pred_means_reverted - diff
+ pred_lows_reverted = np.column_stack(pred_lows_reverted)
+ pred_lows_corrected = pred_lows_reverted - diff
+ pred_highs_reverted = np.column_stack(pred_highs_reverted)
+ pred_highs_corrected = pred_highs_reverted - diff
+
+ self.observed_outcomes_corrected = pd.DataFrame(obs_out_corrected)
+ self.observed_outcomes_corrected.index = self.observed_outcomes.index
+
+ assert predicted_means.shape == pred_means_corrected.shape
+ assert predicted_lows.shape == pred_lows_corrected.shape
+ assert predicted_highs.shape == pred_highs_corrected.shape
+
+ assert int(predicted_means.shape[0]) == len(self.group_clean)
+ assert int(predicted_means.shape[1]) == 4
+ assert int(predicted_lows.shape[0]) == len(self.group_clean)
+ assert int(predicted_lows.shape[1]) == 4
+ assert int(predicted_highs.shape[0]) == len(self.group_clean)
+ assert int(predicted_highs.shape[1]) == 4
+
+ self.group_predictions = {
+ self.group_clean[i]: pd.DataFrame(
+ {
+ "year": self.prediction_years,
+ "observed": self.observed_outcomes.loc[self.fips_ids[i]],
+ "mean": predicted_means[i,],
+ "low": predicted_lows[i,],
+ "high": predicted_highs[i,],
+ }
+ )
+ for i in range(len(self.group_clean))
+ }
+
+ if self.produce_original:
+ self.group_predictions_original = {
+ self.group_clean[i]: pd.DataFrame(
+ {
+ "year": self.prediction_years,
+ "observed": self.observed_outcomes_corrected.loc[
+ self.fips_ids[i]
+ ],
+ "mean": pred_means_corrected[i,],
+ "low": pred_lows_corrected[i,],
+ "high": pred_highs_corrected[i,],
+ }
+ )
+ for i in range(len(self.group_clean))
+ }
+
+ def get_fips_predictions(
+ self, fips, intervened_value, year=None, intervention_is_percentile=False
+ ):
+ self.fips = fips
+
+ if self.data is None:
+ file_path = os.path.join(
+ self.root,
+ "data/years_available",
+ f"{self.intervention_dataset}_{self.outcome_dataset}.pkl",
+ )
+ with open(file_path, "rb") as file:
+ self.data = dill.load(file)
+
+ # start with the latest year possible by default
+ if year is None:
+ year = self.data["years_available"][-1]
+ assert year in self.data["years_available"]
+
+ self.year = year
+
+ if intervention_is_percentile:
+ self.intervened_percentile = intervened_value
+ intervened_value = transformed_intervention_from_percentile(
+ self.intervention_dataset, year, intervened_value
+ )
+
+ self.intervened_value = intervened_value
+
+ # find years for prediction
+ outcome_years = self.data["outcome_years"]
+ year_id = [int(x) for x in outcome_years].index(year)
+ self.year_id = year_id
+
+ self.prediction_years = outcome_years[(year_id) : (year_id + 4)]
+
+ dg = DataGrabber()
+ dg.get_features_std_wide([self.intervention_dataset, self.outcome_dataset])
+ dg.get_features_wide([self.intervention_dataset, self.outcome_dataset])
+ interventions_this_year_original = dg.wide[self.intervention_dataset][str(year)]
+
+ self.intervened_value_original = revert_standardize_and_scale_scaler(
+ self.intervened_value, self.year, self.intervention_dataset
+ )
+
+ self.intervened_value_percentile = round(
+ (
+ np.mean(
+ interventions_this_year_original.values
+ <= self.intervened_value_original
+ )
+ * 100
+ ),
+ 3,
+ )
+
+ self.fips_id = (
+ dg.std_wide[self.intervention_dataset]
+ .loc[dg.std_wide[self.intervention_dataset]["GeoFIPS"] == fips]
+ .index[0]
+ )
+
+ self.name = dg.std_wide[self.intervention_dataset]["GeoName"].iloc[self.fips_id]
+
+ # get observed values at the prediction times
+ self.observed_intervention = dg.std_wide[self.intervention_dataset].iloc[
+ self.fips_id
+ ][str(year)]
+
+ self.observed_intervention_original = dg.wide[self.intervention_dataset].iloc[
+ self.fips_id
+ ][str(year)]
+
+ if intervention_is_percentile:
+ self.observed_intervention_percentile = round(
+ (
+ np.mean(
+ interventions_this_year_original.values
+ <= self.observed_intervention_original
+ )
+ * 100
+ ),
+ 1,
+ )
+
+ self.observed_outcomes = dg.std_wide[self.outcome_dataset].iloc[self.fips_id][
+ outcome_years[year_id : (year_id + 4)]
+ ]
+
+ # added
+ self.observed_outcomes_original = dg.wide[self.outcome_dataset].iloc[
+ self.fips_id
+ ][outcome_years[year_id : (year_id + 4)]]
+
+ self.intervention_diff = self.intervened_value - self.observed_intervention
+
+ self.intervention_impact = {}
+ self.intervention_impact_mean = []
+ self.intervention_impact_low = []
+ self.intervention_impact_high = []
+ for shift in [1, 2, 3]:
+ self.intervention_impact[shift] = (
+ self.tensed_tau_samples[shift] * self.intervention_diff
+ )
+ self.intervention_impact_mean.append(
+ np.mean(self.intervention_impact[shift])
+ )
+ self.intervention_impact_low.append(
+ np.percentile(self.intervention_impact[shift], 2.5)
+ )
+ self.intervention_impact_high.append(
+ np.percentile(self.intervention_impact[shift], 97.5)
+ )
+
+ predicted_mean = [self.observed_outcomes.iloc[0]] + (
+ self.intervention_impact_mean + self.observed_outcomes.iloc[1:]
+ ).tolist()
+ predicted_low = [self.observed_outcomes.iloc[0]] + (
+ self.intervention_impact_low + self.observed_outcomes.iloc[1:]
+ ).tolist()
+ predicted_high = [self.observed_outcomes.iloc[0]] + (
+ self.intervention_impact_high + self.observed_outcomes.iloc[1:]
+ ).tolist()
+
+ self.predictions = pd.DataFrame(
+ {
+ "year": self.prediction_years,
+ "observed": self.observed_outcomes,
+ "mean": predicted_mean,
+ "low": predicted_low,
+ "high": predicted_high,
+ }
+ )
+
+ self.predictions_original = revert_prediction_df(
+ self.predictions, self.outcome_dataset
+ )
+
+ # this corrects for rever transformation perturbations
+ difference = (
+ self.predictions_original["observed"] - self.observed_outcomes_original
+ )
+ self.predictions_original[["observed", "mean", "low", "high"]] = (
+ self.predictions_original[["observed", "mean", "low", "high"]].sub(
+ difference, axis=0
+ )
+ )
+
+ def plot_predictions(
+ self, range_multiplier=1.5, show_figure=True, scaling="transformed", fips=None
+ ):
+ assert scaling in ["transformed", "original"]
+
+ # you need to pass fips
+ # and grab the appropriate predictions
+ # if you started with group predictions
+ if fips is not None:
+ self.fips = fips
+ self.predictions = self.group_predictions[fips]
+ self.predictions_original = self.group_predictions_original[fips]
+
+ self.observed_intervention = self.observed_interventions[
+ self.fips_ids[self.group_clean.index(fips)]
+ ]
+ self.observed_intervention_original = self.observed_interventions_original[
+ self.fips_ids[self.group_clean.index(fips)]
+ ]
+
+ self.name = self.names[self.fips_ids[self.group_clean.index(fips)]]
+
+ dg = DataGrabber()
+
+ if scaling == "transformed":
+ dg.get_features_std_long([self.outcome_dataset])
+ plot_data = dg.std_long[self.outcome_dataset]
+ self.fips_observed_data = plot_data[
+ plot_data["GeoFIPS"] == self.fips
+ ].copy()
+
+ y_min = (
+ min(
+ self.fips_observed_data["Value"].min(),
+ self.predictions["low"].min(),
+ )
+ - 0.05
+ )
+ y_max = (
+ max(
+ self.fips_observed_data["Value"].max(),
+ self.predictions["high"].max(),
+ )
+ + 0.05
+ )
+ else:
+ dg.get_features_long([self.outcome_dataset])
+ plot_data = dg.long[self.outcome_dataset]
+
+ self.fips_observed_data = plot_data[
+ plot_data["GeoFIPS"] == self.fips
+ ].copy()
+
+ y_min = 0.8 * min(
+ self.fips_observed_data["Value"].min(),
+ self.predictions_original["low"].min(),
+ )
+ y_max = 1.3 * max(
+ self.fips_observed_data["Value"].max(),
+ self.predictions_original["high"].max(),
+ )
+
+ fig = go.Figure()
+
+ fig.add_trace(
+ go.Scatter(
+ x=self.fips_observed_data["Year"],
+ y=self.fips_observed_data["Value"],
+ mode="lines+markers",
+ name=self.fips_observed_data["GeoName"].iloc[0],
+ line=dict(color="darkred", width=3),
+ text=self.fips_observed_data["GeoName"].iloc[0],
+ textposition="top right",
+ showlegend=False,
+ )
+ )
+
+ if scaling == "transformed":
+ fig.add_trace(
+ go.Scatter(
+ x=self.predictions["year"],
+ y=self.predictions["mean"],
+ mode="lines",
+ line=dict(color="blue", width=2),
+ name="mean prediction",
+ text=self.predictions["mean"],
+ )
+ )
+
+ credible_interval_trace = go.Scatter(
+ x=pd.concat([self.predictions["year"], self.predictions["year"][::-1]]),
+ y=pd.concat([self.predictions["high"], self.predictions["low"][::-1]]),
+ fill="toself",
+ fillcolor="rgba(0,100,80,0.2)",
+ line=dict(color="rgba(255,255,255,0)"),
+ name="95% credible interval around mean",
+ )
+
+ else:
+ fig.add_trace(
+ go.Scatter(
+ x=self.predictions_original["year"],
+ y=self.predictions_original["mean"],
+ mode="lines",
+ line=dict(color="blue", width=2),
+ name="mean prediction",
+ text=self.predictions_original["mean"],
+ )
+ )
+
+ credible_interval_trace = go.Scatter(
+ x=pd.concat(
+ [
+ self.predictions_original["year"],
+ self.predictions_original["year"][::-1],
+ ]
+ ),
+ y=pd.concat(
+ [
+ self.predictions_original["high"],
+ self.predictions_original["low"][::-1],
+ ]
+ ),
+ fill="toself",
+ fillcolor="rgba(255, 255, 255, 0.31)",
+ line=dict(color="rgba(255,255,255,0)"),
+ name="95% credible interval around mean",
+ )
+
+ fig.add_trace(credible_interval_trace)
+
+ if hasattr(self, "intervened_percentile"):
+ intervened_value = self.intervened_percentile
+ observed_intervention = self.observed_intervention_percentile
+
+ else:
+ intervened_value = round(self.intervened_value, 3)
+ observed_intervention = round(self.observed_intervention, 3)
+
+ if scaling == "transformed":
+ title = (
+ f"Predicted {self.outcome_dataset} in {self.name} under intervention {intervened_value} "
+ f"in year {self.year}
"
+ f"compared to the observed values under observed intervention "
+ f"{observed_intervention}."
+ )
+
+ else:
+ title = (
+ f"Predicted {self.outcome_dataset} in {self.name}
"
+ f"under intervention {self.intervened_value_original}"
+ f" in year {self.year}
"
+ f"{self.intervened_value_percentile}% of counties received a lower intervention
"
+ f"observed intervention: {self.observed_intervention_original}"
+ )
+
+ fig.update_yaxes(range=[y_min, y_max])
+
+ fig.update_layout(
+ title=title,
+ title_font=dict(size=12),
+ xaxis_title="Year",
+ yaxis_title="Value",
+ template="simple_white",
+ legend=dict(x=0.05, y=1, traceorder="normal", orientation="h"),
+ )
+
+ self.predictions_plot = fig
+
+ if show_figure:
+ fig.show()
+ else:
+ return fig
diff --git a/build/cities/queries/fips_query.py b/build/cities/queries/fips_query.py
new file mode 100644
index 00000000..5d6a14f3
--- /dev/null
+++ b/build/cities/queries/fips_query.py
@@ -0,0 +1,797 @@
+import numpy as np
+import pandas as pd
+import plotly.graph_objects as go
+
+from cities.utils.data_grabber import (
+ DataGrabber,
+ MSADataGrabber,
+ check_if_tensed,
+ list_available_features,
+)
+from cities.utils.similarity_utils import (
+ compute_weight_array,
+ generalized_euclidean_distance,
+ plot_weights,
+ slice_with_lag,
+)
+
+# from scipy.spatial import distance
+
+
+class FipsQuery:
+ """
+ Class for querying and analyzing jurisdiction data for a specific FIPS code,
+ in terms of specified feature groups, outcome variable, time lag, and other, listed parameters.
+ """
+
+ def __init__(
+ self,
+ fips,
+ outcome_var=None,
+ feature_groups_with_weights=None,
+ lag=0,
+ top=5,
+ time_decay=1.08,
+ outcome_comparison_period=None,
+ outcome_percentile_range=None,
+ ):
+ """
+ Initialize the FipsQuery instance.
+
+ :param fips: the FIPS code of interest.
+ :param outcome_var: the outcome variable for analysis (optional, defaults to None).
+ :param feature_groups_with_weights: a dictionary specifying feature groups and their weights
+ (weights should beint between -4 and 4).
+ :param lag: time lag for comparing outcomes with historical data (int between 0 and 6).
+ :param top: the number of top locations to consider in comparisons (defaults to 5).
+ :param time_decay: adjusts the weight decay over time in the generalized Euclidean distance calculation
+ (default is 1.08, giving somewhat more weight to more recent data).
+ :param outcome_comparison_period: specifies the years to consider for the outcome comparison,
+ can be used only when lag=0 (defaults to None).
+ :param outcome_percentile_range: percentile range for filtering locations based on the most recent value
+ of the outcome variable (defaults to None).
+ """
+
+ if feature_groups_with_weights is None and outcome_var:
+ feature_groups_with_weights = {outcome_var: 4}
+
+ if outcome_var:
+ outcome_var_dict = {
+ outcome_var: feature_groups_with_weights.pop(outcome_var)
+ }
+ outcome_var_dict.update(feature_groups_with_weights)
+ feature_groups_with_weights = outcome_var_dict
+
+ assert not (
+ lag > 0 and outcome_var is None
+ ), "lag will be idle with no outcome variable"
+
+ assert not (
+ lag > 0 and outcome_comparison_period is not None
+ ), "outcome_comparison_period is only used when lag = 0"
+
+ assert not (
+ outcome_var is None and outcome_comparison_period is not None
+ ), "outcome_comparison_period requires an outcome variable"
+
+ assert not (
+ outcome_var is None and outcome_percentile_range is not None
+ ), "outcome_percentile_range requires an outcome variable"
+
+ self.all_available_features = list_available_features()
+
+ feature_groups = list(feature_groups_with_weights.keys())
+
+ assert feature_groups, "You need to specify at least one feature group"
+
+ assert all(
+ isinstance(value, int) and -4 <= value <= 4
+ for value in feature_groups_with_weights.values()
+ ), "Feature weights need to be integers between -4 and 4"
+
+ self.feature_groups_with_weights = feature_groups_with_weights
+ self.feature_groups = feature_groups
+ self.data = DataGrabber()
+ self.repo_root = self.data.repo_root
+ self.fips = fips
+ self.lag = lag
+ self.top = top
+ self.gdp_var = "gdp"
+
+ # it's fine if they're None (by default)
+ self.outcome_var = outcome_var
+ self.outcome_comparison_period = outcome_comparison_period
+
+ self.time_decay = time_decay
+
+ if self.gdp_var not in self.feature_groups:
+ self.all_features = [self.gdp_var] + feature_groups
+ else:
+ self.all_features = feature_groups
+
+ self.data.get_features_std_wide(self.all_features)
+ self.data.get_features_wide(self.all_features)
+
+ assert (
+ fips in self.data.std_wide[self.gdp_var]["GeoFIPS"].values
+ ), "FIPS not found in the data set."
+ self.name = self.data.std_wide[self.gdp_var]["GeoName"][
+ self.data.std_wide[self.gdp_var]["GeoFIPS"] == self.fips
+ ].values[0]
+
+ assert (
+ self.lag >= 0 and self.lag < 6 and isinstance(self.lag, int)
+ ), "lag must be an iteger between 0 and 5"
+ assert (
+ self.top > 0
+ and isinstance(self.top, int)
+ and self.top
+ < 2800 # TODO Make sure the number makes sense once we add all datasets we need
+ ), "top must be a positive integer smaller than the number of locations in the dataset"
+
+ if outcome_var:
+ assert check_if_tensed(
+ self.data.std_wide[self.outcome_var]
+ ), "Outcome needs to be a time series."
+
+ self.outcome_with_percentiles = self.data.std_wide[self.outcome_var].copy()
+ most_recent_outcome = self.data.wide[self.outcome_var].iloc[:, -1].values
+ self.outcome_with_percentiles["percentile"] = (
+ most_recent_outcome < most_recent_outcome[:, np.newaxis]
+ ).sum(axis=1) / most_recent_outcome.shape[0]
+ self.outcome_with_percentiles["percentile"] = round(
+ self.outcome_with_percentiles["percentile"] * 100, 2
+ )
+ self.outcome_percentile_range = outcome_percentile_range
+
+ def compare_my_outcome_to_others(self, range_multiplier=2, sample_size=250):
+ """
+ Compare the outcome of the selected location to a sample of other locations.
+
+ This method generates a plot comparing the outcome of the current location to a
+ random sample of other locations. The plot creates a line for the current location
+ and lines for the sampled locations, providing a visual comparison.
+ It also marks the precentile at which the current location falls among *all* locations.
+
+ :param range_multiplier: multiplier for adjusting the y-axis range (defaults to 2).
+ :param sample_size: random sample size of other locations (defaults to 250).
+ """
+
+ assert self.outcome_var, "Outcome comparison requires an outcome variable."
+
+ self.data.get_features_long([self.outcome_var])
+ plot_data = self.data.long[self.outcome_var]
+ my_plot_data = plot_data[plot_data["GeoFIPS"] == self.fips].copy()
+ my_percentile = self.outcome_with_percentiles["percentile"][
+ self.outcome_with_percentiles["GeoFIPS"] == self.fips
+ ].values[0]
+
+ others_plot_data = plot_data[plot_data["GeoFIPS"] != self.fips]
+
+ fips = others_plot_data["GeoFIPS"].unique()
+ sampled_fips = np.random.choice(fips, sample_size, replace=False)
+ others_sampled_plot_data = plot_data[plot_data["GeoFIPS"].isin(sampled_fips)]
+
+ y_min = my_plot_data["Value"].mean() - (
+ range_multiplier * my_plot_data["Value"].std()
+ )
+ y_max = my_plot_data["Value"].mean() + (
+ range_multiplier * my_plot_data["Value"].std()
+ )
+
+ fig = go.Figure(layout_yaxis_range=[y_min, y_max])
+
+ for i, geoname in enumerate(others_sampled_plot_data["GeoName"].unique()):
+ subset = others_plot_data[others_plot_data["GeoName"] == geoname]
+ # line_color = shades_of_grey[i % len(shades_of_grey)]
+ # line_color = pastel_colors[i % len(pastel_colors)]
+ line_color = "lightgray"
+ fig.add_trace(
+ go.Scatter(
+ x=subset["Year"],
+ y=subset["Value"],
+ mode="lines",
+ name=subset["GeoName"].iloc[0],
+ line_color=line_color,
+ text=subset["GeoName"].iloc[0],
+ textposition="top right",
+ showlegend=False,
+ opacity=0.4,
+ )
+ )
+
+ fig.add_trace(
+ go.Scatter(
+ x=my_plot_data["Year"],
+ y=my_plot_data["Value"],
+ mode="lines",
+ name=my_plot_data["GeoName"].iloc[0],
+ line=dict(color="darkred", width=3),
+ text=my_plot_data["GeoName"].iloc[0],
+ textposition="top right",
+ showlegend=False,
+ )
+ )
+
+ label_x = my_plot_data["Year"].iloc[-1] - 2
+ label_y = my_plot_data["Value"].iloc[-1] * 1.2
+ fig.add_annotation(
+ text=f"Location recent percentile: {my_percentile}%",
+ x=label_x,
+ y=label_y,
+ showarrow=False,
+ font=dict(size=12, color="darkred"),
+ )
+
+ title = f"{self.outcome_var} of {self.name}, compared to {sample_size} random other locations"
+ fig.update_layout(
+ title=title,
+ xaxis_title="Year",
+ yaxis_title=f"{self.outcome_var}",
+ template="simple_white",
+ )
+
+ fig.show()
+
+ def find_euclidean_kins(self):
+ """
+ Find Euclidean kin locations based on the specified features, weights and outcome variable.
+
+ This method calculates the Euclidean distance between the specified location and other
+ locations in the dataset based on the selected feature groups and outcome variable. It
+ adds information about the distance and the percentiles of the outcome variable to the
+ resulting dataframe, allowing for the identification of similar locations.
+ """
+
+ # cut the relevant years from the outcome variable
+ if self.outcome_comparison_period and self.outcome_var:
+ start_year, end_year = self.outcome_comparison_period
+
+ outcome_df = self.data.std_wide[self.outcome_var].copy()
+
+ condition = (outcome_df.columns[2:].copy().astype(int) >= start_year) & (
+ outcome_df.columns[2:].copy().astype(int) <= end_year
+ )
+ selected_columns = outcome_df.columns[2:][condition].copy()
+ filtered_dataframe = outcome_df[selected_columns]
+
+ restricted_df = pd.concat(
+ [outcome_df.iloc[:, :2].copy(), filtered_dataframe], axis=1
+ )
+
+ elif self.outcome_var:
+ restricted_df = self.data.std_wide[self.outcome_var].copy()
+
+ if self.outcome_var:
+ self.restricted_outcome_df = restricted_df
+
+ # apply lag in different directions to you and other locations
+ # to the outcome variable
+ if self.outcome_var:
+ self.outcome_slices = slice_with_lag(restricted_df, self.fips, self.lag)
+
+ self.my_array = np.array(self.outcome_slices["my_array"])
+ self.other_arrays = np.array(self.outcome_slices["other_arrays"])
+
+ assert self.my_array.shape[1] == self.other_arrays.shape[1]
+
+ self.my_df = self.data.wide[self.outcome_var][
+ self.data.wide[self.outcome_var]["GeoFIPS"] == self.fips
+ ].copy()
+
+ self.other_df = self.outcome_slices["other_df"]
+ self.other_df = self.data.wide[self.outcome_var][
+ self.data.wide[self.outcome_var]["GeoFIPS"] != self.fips
+ ].copy()
+ else:
+ self.my_df = pd.DataFrame(
+ self.data.wide[self.gdp_var][
+ self.data.wide[self.gdp_var]["GeoFIPS"] == self.fips
+ ].iloc[:, :2]
+ )
+ self.other_df = pd.DataFrame(
+ self.data.wide[self.gdp_var][
+ self.data.wide[self.gdp_var]["GeoFIPS"] != self.fips
+ ].iloc[:, :2]
+ )
+
+ # add data on other features to the arrays
+ # prior to distance computation
+
+ if self.outcome_var:
+ before_shape = self.other_df.shape
+
+ my_features_arrays = np.array([])
+ others_features_arrays = np.array([])
+ feature_column_count = 0
+ for feature in self.feature_groups:
+ if feature != self.outcome_var:
+ _extracted_df = self.data.wide[feature].copy()
+ feature_column_count += _extracted_df.shape[1] - 2
+ _extracted_my_df = _extracted_df[_extracted_df["GeoFIPS"] == self.fips]
+ _extracted_other_df = _extracted_df[
+ _extracted_df["GeoFIPS"] != self.fips
+ ]
+
+ _extracted_other_df.columns = [
+ f"{col}_{feature}" if col not in ["GeoFIPS", "GeoName"] else col
+ for col in _extracted_other_df.columns
+ ]
+
+ _extracted_my_df.columns = [
+ f"{col}_{feature}" if col not in ["GeoFIPS", "GeoName"] else col
+ for col in _extracted_my_df.columns
+ ]
+
+ assert (
+ _extracted_df.shape[1]
+ == _extracted_my_df.shape[1]
+ == _extracted_other_df.shape[1]
+ )
+
+ self.my_df = pd.concat(
+ (self.my_df, _extracted_my_df.iloc[:, 2:]), axis=1
+ )
+
+ self.other_df = pd.concat(
+ (self.other_df, _extracted_other_df.iloc[:, 2:]), axis=1
+ )
+
+ if self.outcome_var is None:
+ assert (
+ self.my_df.shape[1]
+ == self.other_df.shape[1]
+ == feature_column_count + 2
+ )
+
+ if self.outcome_var:
+ after_shape = self.other_df.shape
+ assert (
+ before_shape[0] == after_shape[0]
+ ), "Feature merging went wrong!"
+
+ _extracted_df_std = self.data.std_wide[feature].copy()
+ _extracted_other_array = np.array(
+ _extracted_df_std[_extracted_df_std["GeoFIPS"] != self.fips].iloc[
+ :, 2:
+ ]
+ )
+ _extracted_my_array = np.array(
+ _extracted_df_std[_extracted_df_std["GeoFIPS"] == self.fips].iloc[
+ :, 2:
+ ]
+ )
+
+ if my_features_arrays.size == 0:
+ my_features_arrays = _extracted_my_array
+ else:
+ my_features_arrays = np.hstack(
+ (my_features_arrays, _extracted_my_array)
+ )
+
+ if others_features_arrays.size == 0:
+ others_features_arrays = _extracted_other_array
+ else:
+ others_features_arrays = np.hstack(
+ (others_features_arrays, _extracted_other_array)
+ )
+
+ if len(self.feature_groups) > 1 and self.outcome_var:
+ self.my_array = np.hstack((self.my_array, my_features_arrays))
+ self.other_arrays = np.hstack((self.other_arrays, others_features_arrays))
+ elif self.outcome_var is None:
+ self.my_array = my_features_arrays.copy()
+ self.other_arrays = others_features_arrays.copy()
+
+ if self.outcome_var is None:
+ assert (
+ feature_column_count
+ == self.my_array.shape[1]
+ == self.other_arrays.shape[1]
+ )
+ assert my_features_arrays.shape == self.my_array.shape
+ assert others_features_arrays.shape == self.other_arrays.shape
+
+ compute_weight_array(self, self.time_decay)
+
+ diff = self.all_weights.shape[0] - self.other_arrays.shape[1]
+ self.all_weights = self.all_weights[diff:]
+
+ # if self.outcome_var:
+ assert (
+ self.other_arrays.shape[1] == self.all_weights.shape[0]
+ ), "Weights and arrays are misaligned"
+
+ distances = []
+ featurewise_contributions = []
+ for vector in self.other_arrays:
+ _ge = generalized_euclidean_distance(
+ np.squeeze(self.my_array), vector, self.all_weights
+ )
+ distances.append(_ge["distance"])
+ featurewise_contributions.append(_ge["featurewise_contributions"])
+
+ # keep weighted distance contribution of each individual feature
+ featurewise_contributions_array = np.vstack(featurewise_contributions)
+
+ assert featurewise_contributions_array.shape[1] == len(self.all_weights)
+
+ # turn into df, add ID columns and sort by distance
+ featurewise_contributions_df = pd.DataFrame(
+ featurewise_contributions_array, columns=self.all_columns
+ )
+ featurewise_contributions_df[f"distance to {self.fips}"] = distances
+ featurewise_contributions_df = pd.concat(
+ [self.other_df[["GeoFIPS", "GeoName"]], featurewise_contributions_df],
+ axis=1,
+ )
+ featurewise_contributions_df.sort_values(
+ by=featurewise_contributions_df.columns[-1], inplace=True
+ )
+
+ # isolate ID columns with distance, tensed columns, atemporal columns
+ tensed_column_names = [
+ col for col in featurewise_contributions_df.columns if col[:4].isdigit()
+ ]
+ atemporal_column_names = [
+ col for col in featurewise_contributions_df.columns if not col[:4].isdigit()
+ ]
+ id_column_names = atemporal_column_names[0:2] + [atemporal_column_names[-1]]
+ atemporal_column_names = [
+ col for col in atemporal_column_names if col not in id_column_names
+ ]
+
+ id_df = featurewise_contributions_df[id_column_names]
+ tensed_featurewise_contributions_df = featurewise_contributions_df[
+ tensed_column_names
+ ]
+ atemporal_featurewise_contributions_df = featurewise_contributions_df[
+ atemporal_column_names
+ ]
+
+ # aggregate tensed features (sum across years)
+ aggregated_tensed_featurewise_contributions_df = (
+ tensed_featurewise_contributions_df.T.groupby(
+ tensed_featurewise_contributions_df.columns.str[5:]
+ )
+ .sum()
+ .T
+ )
+
+ # aggregate atemporal features (sum across official feature list)
+ atemporal_aggregated_dict = {}
+ for feature in list(self.all_available_features):
+ _selected = [
+ col
+ for col in atemporal_featurewise_contributions_df.columns
+ if col.endswith(feature)
+ ]
+ if _selected:
+ atemporal_aggregated_dict[feature] = (
+ atemporal_featurewise_contributions_df[_selected].sum(axis=1)
+ )
+
+ aggregated_atemporal_featurewise_contributions_df = pd.DataFrame(
+ atemporal_aggregated_dict
+ )
+
+ self.featurewise_contributions = featurewise_contributions_df
+
+ # put together the aggregated featurewise contributions
+ # and normalize row-wise
+ # numbers now mean: "percentage of contribution to the distance"
+ self.aggregated_featurewise_contributions = pd.concat(
+ [
+ id_df,
+ aggregated_tensed_featurewise_contributions_df,
+ aggregated_atemporal_featurewise_contributions_df,
+ ],
+ axis=1,
+ )
+ columns_to_normalize = self.aggregated_featurewise_contributions.iloc[:, 3:]
+ self.aggregated_featurewise_contributions.iloc[:, 3:] = (
+ columns_to_normalize.div(columns_to_normalize.sum(axis=1), axis=0)
+ )
+
+ # some sanity checks
+ count = sum([1 for distance in distances if distance == 0])
+
+ assert (
+ len(distances) == self.other_arrays.shape[0]
+ ), "Distances and arrays are misaligned"
+ assert (
+ len(distances) == self.other_df.shape[0]
+ ), "Distances and df are misaligned"
+
+ # #self.other_df[f"distance to {self.fips}"] = distances #remove soon if no errors
+ self.other_df.loc[:, f"distance to {self.fips}"] = distances
+
+ count_zeros = (self.other_df[f"distance to {self.fips}"] == 0).sum()
+ assert count_zeros == count, "f{count_zeros} zeros in alien distances!"
+
+ # sort and put together euclidean kins
+ self.other_df.sort_values(by=self.other_df.columns[-1], inplace=True)
+
+ self.my_df[f"distance to {self.fips}"] = 0
+
+ self.euclidean_kins = pd.concat((self.my_df, self.other_df), axis=0)
+
+ if self.outcome_var:
+ self.euclidean_kins = self.euclidean_kins.merge(
+ self.outcome_with_percentiles[["GeoFIPS", "percentile"]],
+ on="GeoFIPS",
+ how="left",
+ )
+
+ if self.outcome_var and self.outcome_percentile_range is not None:
+ myself = self.euclidean_kins.iloc[:1]
+ self.euclidean_kins = self.euclidean_kins[
+ self.euclidean_kins["percentile"] >= self.outcome_percentile_range[0]
+ ]
+ self.euclidean_kins = self.euclidean_kins[
+ self.euclidean_kins["percentile"] <= self.outcome_percentile_range[1]
+ ]
+ self.euclidean_kins = pd.concat([myself, self.euclidean_kins])
+
+ def plot_weights(self):
+ """
+ This method calls the external function `plot_weights` to visualize the feature weights.
+
+ """
+ plot_weights(self)
+
+ def plot_kins_other_var(self, var, fips_top_custom=None):
+ """
+ For a specified variable plot the time series for the current location and its Euclidean kin locations.
+
+ Parameters:
+ - var (str): The variable for which the time series will be plotted.
+ - fips_top_custom (list or None): Custom list of FIPS codes to use instead of the top Euclidean kin locations.
+
+ Returns:
+ - fig: Plotly figure object.
+
+ Note:
+ - The method requires running `find_euclidean_kins` first.
+ """
+
+ # assert self.outcome_var, "Outcome comparison requires an outcome variable"
+ assert hasattr(self, "euclidean_kins"), "Run `find_euclidean_kins` first"
+
+ self.data.get_features_long([var])
+ plot_data = self.data.long[var]
+ my_plot_data = plot_data[plot_data["GeoFIPS"] == self.fips].copy()
+
+ if fips_top_custom is None:
+ fips_top = self.euclidean_kins["GeoFIPS"].iloc[1 : (self.top + 1)].values
+ else:
+ fips_top = fips_top_custom
+
+ others_plot_data = plot_data[plot_data["GeoFIPS"].isin(fips_top)]
+
+ value_column_name = my_plot_data.columns[-1]
+ fig = go.Figure()
+ fig.add_trace(
+ go.Scatter(
+ x=my_plot_data["Year"],
+ y=my_plot_data[value_column_name],
+ mode="lines",
+ name=my_plot_data["GeoName"].iloc[0],
+ line=dict(color="darkred", width=3),
+ text=my_plot_data["GeoName"].iloc[0],
+ textposition="top right",
+ )
+ )
+
+ pastel_colors = ["#FFC0CB", "#A9A9A9", "#87CEFA", "#FFD700", "#98FB98"][
+ : self.top
+ ]
+
+ for i, fips in enumerate(fips_top):
+ subset = others_plot_data[others_plot_data["GeoFIPS"] == fips]
+ line_color = pastel_colors[i % len(pastel_colors)]
+ fig.add_trace(
+ go.Scatter(
+ x=subset["Year"] + self.lag,
+ y=subset[value_column_name],
+ mode="lines",
+ name=subset["GeoName"].iloc[0],
+ line_color=line_color,
+ text=subset["GeoName"].iloc[0],
+ textposition="top right",
+ )
+ )
+
+ if self.lag > 0:
+ fig.update_layout(
+ shapes=[
+ dict(
+ type="line",
+ x0=2021,
+ x1=2021,
+ y0=0,
+ y1=1,
+ xref="x",
+ yref="paper",
+ line=dict(color="darkgray", width=2),
+ )
+ ]
+ )
+
+ fig.add_annotation(
+ text=f"their year {2021 - self.lag}",
+ x=2021.0,
+ y=1.05,
+ xref="x",
+ yref="paper",
+ showarrow=False,
+ font=dict(color="darkgray"),
+ )
+
+ top = self.top
+ lag = self.lag
+ title_1 = title = f"Top {self.top} locations matching your search"
+ title_2 = (
+ f"Top {self.top} locations matching your search (lag of {self.lag} years)"
+ )
+
+ if not self.feature_groups:
+ if self.lag == 0:
+ title = title_1
+ else:
+ title = title_2
+ else:
+ if self.lag == 0:
+ title = f"Top {top} locations matching your search"
+ else:
+ title = f"Top {top} locations matching your search (lag of {lag} years)"
+
+ fig.update_layout(
+ title=title,
+ xaxis_title="Year",
+ yaxis_title=f"{var}",
+ legend=dict(title="GeoName"),
+ template="simple_white",
+ )
+
+ return fig
+
+ def plot_kins(self):
+ """
+ Creates the time series plot of the outcome variable for the current location and its Euclidean kin locations.
+ """
+
+ fig = self.plot_kins_other_var(self.outcome_var)
+ return fig
+
+ def show_kins_plot(self):
+ """
+ Plot the time series of the outcome variable for the current location and its Euclidean kin locations.
+ """
+
+ fig = self.plot_kins()
+ fig.show()
+
+
+# TODO_Nikodem add population clustering and warning if a population is much different,
+# especially if small
+
+
+class MSAFipsQuery(FipsQuery):
+ # super().__init__(
+ # fips,
+ # outcome_var,
+ # feature_groups_with_weights,
+ # lag,
+ # top,
+ # time_decay,
+ # outcome_comparison_period,
+ # outcome_percentile_range,
+ # )
+ def __init__(
+ self,
+ fips,
+ outcome_var=None,
+ feature_groups_with_weights=None,
+ lag=0,
+ top=5,
+ time_decay=1.08,
+ outcome_comparison_period=None,
+ outcome_percentile_range=None,
+ ):
+ # self.data = MSADataGrabber()
+ # self.all_available_features = list_available_features(level="msa")
+ # self.gdp_var = "gdp_ma"
+ # print("MSAFipsQuery __init__ data:", self.data)
+
+ if feature_groups_with_weights is None and outcome_var:
+ feature_groups_with_weights = {outcome_var: 4}
+
+ if outcome_var:
+ outcome_var_dict = {
+ outcome_var: feature_groups_with_weights.pop(outcome_var)
+ }
+ outcome_var_dict.update(feature_groups_with_weights)
+ feature_groups_with_weights = outcome_var_dict
+
+ assert not (
+ lag > 0 and outcome_var is None
+ ), "Lag will be idle with no outcome variable"
+
+ assert not (
+ lag > 0 and outcome_comparison_period is not None
+ ), "outcome_comparison_period is only used when lag = 0"
+
+ assert not (
+ outcome_var is None and outcome_comparison_period is not None
+ ), "outcome_comparison_period requires an outcome variable"
+
+ assert not (
+ outcome_var is None and outcome_percentile_range is not None
+ ), "outcome_percentile_range requires an outcome variable"
+
+ self.all_available_features = list_available_features("msa")
+
+ feature_groups = list(feature_groups_with_weights.keys())
+
+ assert feature_groups, "You need to specify at least one feature group"
+
+ assert all(
+ isinstance(value, int) and -4 <= value <= 4
+ for value in feature_groups_with_weights.values()
+ ), "Feature weights need to be integers between -4 and 4"
+
+ self.feature_groups_with_weights = feature_groups_with_weights
+ self.feature_groups = feature_groups
+ self.data = MSADataGrabber()
+ self.repo_root = self.data.repo_root
+ self.fips = fips
+ self.lag = lag
+ self.top = top
+ self.gdp_var = "gdp_ma"
+
+ # it's fine if they're None (by default)
+ self.outcome_var = outcome_var
+ self.outcome_comparison_period = outcome_comparison_period
+
+ self.time_decay = time_decay
+
+ if self.gdp_var not in self.feature_groups:
+ self.all_features = [self.gdp_var] + feature_groups
+ else:
+ self.all_features = feature_groups
+
+ self.data.get_features_std_wide(self.all_features)
+ self.data.get_features_wide(self.all_features)
+
+ assert (
+ fips in self.data.std_wide[self.gdp_var]["GeoFIPS"].values
+ ), "FIPS not found in the data set."
+ self.name = self.data.std_wide[self.gdp_var]["GeoName"][
+ self.data.std_wide[self.gdp_var]["GeoFIPS"] == self.fips
+ ].values[0]
+
+ assert (
+ self.lag >= 0 and self.lag < 6 and isinstance(self.lag, int)
+ ), "lag must be an iteger between 0 and 5"
+ assert (
+ self.top > 0
+ and isinstance(self.top, int)
+ and self.top
+ < 100 # TODO Make sure the number makes sense once we add all datasets we need
+ ), "top must be a positive integer smaller than the number of locations in the dataset"
+
+ if outcome_var:
+ assert check_if_tensed(
+ self.data.std_wide[self.outcome_var]
+ ), "Outcome needs to be a time series."
+
+ self.outcome_with_percentiles = self.data.std_wide[self.outcome_var].copy()
+ most_recent_outcome = self.data.wide[self.outcome_var].iloc[:, -1].values
+ self.outcome_with_percentiles["percentile"] = (
+ most_recent_outcome < most_recent_outcome[:, np.newaxis]
+ ).sum(axis=1) / most_recent_outcome.shape[0]
+ self.outcome_with_percentiles["percentile"] = round(
+ self.outcome_with_percentiles["percentile"] * 100, 2
+ )
+ self.outcome_percentile_range = outcome_percentile_range
diff --git a/build/cities/utils/__init__.py b/build/cities/utils/__init__.py
new file mode 100644
index 00000000..f19c781f
--- /dev/null
+++ b/build/cities/utils/__init__.py
@@ -0,0 +1,2 @@
+# from .cleaning_utils import find_repo_root
+# from .data_grabber import DataGrabber
diff --git a/build/cities/utils/clean_gdp.py b/build/cities/utils/clean_gdp.py
new file mode 100644
index 00000000..543d35c6
--- /dev/null
+++ b/build/cities/utils/clean_gdp.py
@@ -0,0 +1,80 @@
+import numpy as np
+import pandas as pd
+
+from cities.utils.cleaning_utils import standardize_and_scale
+from cities.utils.data_grabber import find_repo_root
+
+root = find_repo_root()
+
+
+def clean_gdp():
+ gdp = pd.read_csv(f"{root}/data/raw/CAGDP1_2001_2021.csv", encoding="ISO-8859-1")
+
+ gdp = gdp.loc[:9533] # drop notes at the bottom
+
+ gdp["GeoFIPS"] = gdp["GeoFIPS"].fillna("").astype(str)
+ gdp["GeoFIPS"] = gdp["GeoFIPS"].str.strip(' "').astype(int)
+
+ # remove large regions
+ gdp = gdp[gdp["GeoFIPS"] % 1000 != 0]
+
+ # focus on chain-type GDP
+ mask = gdp["Description"].str.startswith("Chain")
+ gdp = gdp[mask]
+
+ # drop Region number, Tablename, LineCode, IndustryClassification columns (the last one is empty anyway)
+ gdp = gdp.drop(gdp.columns[2:8], axis=1)
+
+ # 2012 makes no sense, it's 100 throughout
+ gdp = gdp.drop("2012", axis=1)
+
+ gdp.replace("(NA)", np.nan, inplace=True)
+ gdp.replace("(NM)", np.nan, inplace=True)
+
+ # nan_rows = gdp[gdp.isna().any(axis=1)] # if inspection is needed
+
+ gdp.dropna(axis=0, inplace=True)
+
+ for column in gdp.columns[2:]:
+ gdp[column] = gdp[column].astype(float)
+
+ assert gdp["GeoName"].is_unique
+
+ # subsetting GeoFIPS to values in exclusions.csv
+
+ exclusions_df = pd.read_csv(f"{root}/data/raw/exclusions.csv")
+ gdp = gdp[~gdp["GeoFIPS"].isin(exclusions_df["exclusions"])]
+
+ assert len(gdp) == len(gdp["GeoFIPS"].unique())
+ assert len(gdp) > 2800, "The number of records is lower than 2800"
+
+ patState = r", [A-Z]{2}(\*{1,2})?$"
+ GeoNameError = "Wrong Geoname value!"
+ assert gdp["GeoName"].str.contains(patState, regex=True).all(), GeoNameError
+ assert sum(gdp["GeoName"].str.count(", ")) == gdp.shape[0], GeoNameError
+
+ for column in gdp.columns[2:]:
+ assert (gdp[column] > 0).all(), f"Negative values in {column}"
+ assert gdp[column].isna().sum() == 0, f"Missing values in {column}"
+ assert gdp[column].isnull().sum() == 0, f"Null values in {column}"
+ assert (gdp[column] < 3000).all(), f"Values suspiciously large in {column}"
+
+ # TODO_Nikodem investigate strange large values
+
+ gdp_wide = gdp.copy()
+ gdp_long = pd.melt(
+ gdp.copy(), id_vars=["GeoFIPS", "GeoName"], var_name="Year", value_name="Value"
+ )
+
+ gdp_std_wide = standardize_and_scale(gdp)
+ gdp_std_long = pd.melt(
+ gdp_std_wide.copy(),
+ id_vars=["GeoFIPS", "GeoName"],
+ var_name="Year",
+ value_name="Value",
+ )
+
+ gdp_wide.to_csv(f"{root}/data/processed/gdp_wide.csv", index=False)
+ gdp_long.to_csv(f"{root}/data/processed/gdp_long.csv", index=False)
+ gdp_std_wide.to_csv(f"{root}/data/processed/gdp_std_wide.csv", index=False)
+ gdp_std_long.to_csv(f"{root}/data/processed/gdp_std_long.csv", index=False)
diff --git a/build/cities/utils/clean_variable.py b/build/cities/utils/clean_variable.py
new file mode 100644
index 00000000..75d63b59
--- /dev/null
+++ b/build/cities/utils/clean_variable.py
@@ -0,0 +1,208 @@
+import numpy as np
+import pandas as pd
+
+from cities.utils.clean_gdp import clean_gdp
+from cities.utils.cleaning_utils import standardize_and_scale
+from cities.utils.data_grabber import DataGrabber, find_repo_root
+
+
+class VariableCleaner:
+ def __init__(
+ self,
+ variable_name: str,
+ path_to_raw_csv: str,
+ year_or_category: str = "Year", # Year or Category
+ ):
+ self.variable_name = variable_name
+ self.path_to_raw_csv = path_to_raw_csv
+ self.year_or_category = year_or_category
+ self.root = find_repo_root()
+ self.data_grabber = DataGrabber()
+ self.folder = "processed"
+ self.gdp = None
+ self.variable_df = None
+
+ def clean_variable(self):
+ self.load_raw_csv()
+ self.drop_nans()
+ self.load_gdp_data()
+ self.check_exclusions()
+ self.restrict_common_fips()
+ self.save_csv_files(self.folder)
+
+ def load_raw_csv(self):
+ self.variable_df = pd.read_csv(self.path_to_raw_csv)
+ self.variable_df["GeoFIPS"] = self.variable_df["GeoFIPS"].astype(int)
+
+ def drop_nans(self):
+ self.variable_df = self.variable_df.dropna()
+
+ def load_gdp_data(self):
+ self.data_grabber.get_features_wide(["gdp"])
+ self.gdp = self.data_grabber.wide["gdp"]
+
+ def add_new_exclusions(self, common_fips):
+ new_exclusions = np.setdiff1d(
+ self.gdp["GeoFIPS"].unique(), self.variable_df["GeoFIPS"].unique()
+ )
+ print("Adding new exclusions to exclusions.csv: " + str(new_exclusions))
+ exclusions = pd.read_csv((f"{self.root}/data/raw/exclusions.csv"))
+ new_rows = pd.DataFrame(
+ {
+ "dataset": [self.variable_name] * len(new_exclusions),
+ "exclusions": new_exclusions,
+ }
+ )
+ exclusions = pd.concat([exclusions, new_rows], ignore_index=True)
+ exclusions = exclusions.drop_duplicates()
+ exclusions = exclusions.sort_values(by=["dataset", "exclusions"]).reset_index(
+ drop=True
+ )
+ exclusions.to_csv((f"{self.root}/data/raw/exclusions.csv"), index=False)
+ print("Rerunning gdp cleaning with new exclusions")
+
+ def check_exclusions(self):
+ common_fips = np.intersect1d(
+ self.gdp["GeoFIPS"].unique(), self.variable_df["GeoFIPS"].unique()
+ )
+ if (
+ len(
+ np.setdiff1d(
+ self.gdp["GeoFIPS"].unique(), self.variable_df["GeoFIPS"].unique()
+ )
+ )
+ > 0
+ ):
+ self.add_new_exclusions(common_fips)
+ clean_gdp()
+ self.clean_variable()
+
+ def restrict_common_fips(self):
+ common_fips = np.intersect1d(
+ self.gdp["GeoFIPS"].unique(), self.variable_df["GeoFIPS"].unique()
+ )
+ self.variable_df = self.variable_df[
+ self.variable_df["GeoFIPS"].isin(common_fips)
+ ]
+ self.variable_df = self.variable_df.merge(
+ self.gdp[["GeoFIPS", "GeoName"]], on=["GeoFIPS", "GeoName"], how="left"
+ )
+ self.variable_df = self.variable_df.sort_values(by=["GeoFIPS", "GeoName"])
+ for column in self.variable_df.columns:
+ if column not in ["GeoFIPS", "GeoName"]:
+ self.variable_df[column] = self.variable_df[column].astype(float)
+
+ def save_csv_files(self, folder):
+ # it would be great to make sure that a db is wide, if not make it wide
+ variable_db_wide = self.variable_df.copy()
+ variable_db_long = pd.melt(
+ self.variable_df,
+ id_vars=["GeoFIPS", "GeoName"],
+ var_name=self.year_or_category,
+ value_name="Value",
+ )
+ variable_db_std_wide = standardize_and_scale(self.variable_df)
+ variable_db_std_long = pd.melt(
+ variable_db_std_wide.copy(),
+ id_vars=["GeoFIPS", "GeoName"],
+ var_name=self.year_or_category,
+ value_name="Value",
+ )
+
+ variable_db_wide.to_csv(
+ (f"{self.root}/data/{folder}/" + self.variable_name + "_wide.csv"),
+ index=False,
+ )
+ variable_db_long.to_csv(
+ (f"{self.root}/data/{folder}/" + self.variable_name + "_long.csv"),
+ index=False,
+ )
+ variable_db_std_wide.to_csv(
+ (f"{self.root}/data/{folder}/" + self.variable_name + "_std_wide.csv"),
+ index=False,
+ )
+ variable_db_std_long.to_csv(
+ (f"{self.root}/data/{folder}/" + self.variable_name + "_std_long.csv"),
+ index=False,
+ )
+
+
+class VariableCleanerMSA(
+ VariableCleaner
+): # this class inherits functionalites of VariableCleaner, but works at the MSA level
+ def __init__(
+ self, variable_name: str, path_to_raw_csv: str, year_or_category: str = "Year"
+ ):
+ super().__init__(variable_name, path_to_raw_csv, year_or_category)
+ self.folder = "MSA_level"
+ self.metro_areas = None
+
+ def clean_variable(self):
+ self.load_raw_csv()
+ self.drop_nans()
+ self.process_data()
+ # TODO self.check_exclusions('MA') functionality needs to be implemented in the future
+ # TODO but only if data missigness turns out to be a serious problem
+ # for now, process_data runs a check and reports missingness
+ # but we need to be more careful about MSA missingnes handling
+ # as there are much fewer MSAs than counties
+ self.save_csv_files(self.folder)
+
+ def load_metro_areas(self):
+ self.metro_areas = pd.read_csv(f"{self.root}/data/raw/metrolist.csv")
+
+ def process_data(self):
+ self.load_metro_areas()
+ assert (
+ self.metro_areas["GeoFIPS"].nunique()
+ == self.variable_df["GeoFIPS"].nunique()
+ )
+ assert (
+ self.metro_areas["GeoName"].nunique()
+ == self.variable_df["GeoName"].nunique()
+ )
+ self.variable_df["GeoFIPS"] = self.variable_df["GeoFIPS"].astype(np.int64)
+
+
+def weighted_mean(group, column):
+ values = group[column]
+ weights = group["Total population"]
+
+ not_nan_indices = ~np.isnan(values)
+
+ if np.any(not_nan_indices) and np.sum(weights[not_nan_indices]) != 0:
+ weighted_values = values[not_nan_indices] * weights[not_nan_indices]
+ return np.sum(weighted_values) / np.sum(weights[not_nan_indices])
+ else:
+ return np.nan
+
+
+def communities_tracts_to_counties(
+ data, list_variables
+) -> pd.DataFrame: # using the weighted mean function for total population
+ all_results = pd.DataFrame()
+
+ for variable in list_variables:
+ weighted_avg = (
+ data.groupby("GeoFIPS").apply(weighted_mean, column=variable).reset_index()
+ )
+ weighted_avg.columns = ["GeoFIPS", variable]
+
+ nan_counties = (
+ data.groupby("GeoFIPS")
+ .apply(lambda x: all(np.isnan(x[variable])))
+ .reset_index()
+ )
+ nan_counties.columns = ["GeoFIPS", "all_nan"]
+
+ result_df = pd.merge(weighted_avg, nan_counties, on="GeoFIPS")
+ result_df.loc[result_df["all_nan"], variable] = np.nan
+
+ result_df = result_df.drop(columns=["all_nan"])
+
+ if "GeoFIPS" not in all_results.columns:
+ all_results = result_df.copy()
+ else:
+ all_results = pd.merge(all_results, result_df, on="GeoFIPS", how="left")
+
+ return all_results
diff --git a/build/cities/utils/cleaning_scripts/clean_age_composition.py b/build/cities/utils/cleaning_scripts/clean_age_composition.py
new file mode 100644
index 00000000..acb63d07
--- /dev/null
+++ b/build/cities/utils/cleaning_scripts/clean_age_composition.py
@@ -0,0 +1,30 @@
+import pandas as pd
+
+from cities.utils.clean_variable import VariableCleaner
+from cities.utils.data_grabber import DataGrabber, find_repo_root
+
+root = find_repo_root()
+
+data = DataGrabber()
+data.get_features_wide(["gdp"])
+gdp = data.wide["gdp"]
+
+
+def clean_age_first():
+ age = pd.read_csv(f"{root}/data/raw/age.csv")
+
+ age.iloc[:, 2:] = age.iloc[:, 2:].div(age["total_pop"], axis=0) * 100
+ age.drop("total_pop", axis=1, inplace=True)
+
+ age.to_csv(f"{root}/data/raw/age_percentages.csv", index=False)
+
+
+def clean_age_composition():
+ clean_age_first()
+
+ cleaner = VariableCleaner(
+ variable_name="age_composition",
+ path_to_raw_csv=f"{root}/data/raw/age_percentages.csv",
+ year_or_category="Category",
+ )
+ cleaner.clean_variable()
diff --git a/build/cities/utils/cleaning_scripts/clean_burdens.py b/build/cities/utils/cleaning_scripts/clean_burdens.py
new file mode 100644
index 00000000..cb2be9ad
--- /dev/null
+++ b/build/cities/utils/cleaning_scripts/clean_burdens.py
@@ -0,0 +1,57 @@
+import numpy as np
+import pandas as pd
+
+from cities.utils.clean_variable import VariableCleaner, communities_tracts_to_counties
+from cities.utils.data_grabber import DataGrabber, find_repo_root
+
+root = find_repo_root()
+
+data = DataGrabber()
+data.get_features_wide(["gdp"])
+gdp = data.wide["gdp"]
+
+
+def clean_burdens_first():
+ burdens = pd.read_csv(f"{root}/data/raw/communities_raw.csv")
+
+ list_variables = ["Housing burden (percent)", "Energy burden"]
+ burdens = communities_tracts_to_counties(burdens, list_variables)
+
+ burdens["GeoFIPS"] = burdens["GeoFIPS"].astype(np.int64)
+
+ common_fips = np.intersect1d(burdens["GeoFIPS"].unique(), gdp["GeoFIPS"].unique())
+ burdens = burdens[burdens["GeoFIPS"].isin(common_fips)]
+ burdens = burdens.merge(gdp[["GeoFIPS", "GeoName"]], on="GeoFIPS", how="left")
+
+ burdens = burdens[
+ ["GeoFIPS", "GeoName", "Housing burden (percent)", "Energy burden"]
+ ]
+
+ burdens.columns = ["GeoFIPS", "GeoName", "burdens_housing", "burdens_energy"]
+
+ columns_to_trans = burdens.columns[-2:]
+ burdens[columns_to_trans] = burdens[columns_to_trans].astype("float64")
+
+ burdens_housing = burdens[["GeoFIPS", "GeoName", "burdens_housing"]]
+ burdens_energy = burdens[["GeoFIPS", "GeoName", "burdens_energy"]]
+
+ burdens_housing.to_csv(f"{root}/data/raw/burdens_housing_raw.csv", index=False)
+ burdens_energy.to_csv(f"{root}/data/raw/burdens_energy_raw.csv", index=False)
+
+
+def clean_burdens():
+ clean_burdens_first()
+
+ cleaner_housing = VariableCleaner(
+ variable_name="burdens_housing",
+ path_to_raw_csv=f"{root}/data/raw/burdens_housing_raw.csv",
+ year_or_category="Category",
+ )
+ cleaner_housing.clean_variable()
+
+ cleaner_energy = VariableCleaner(
+ variable_name="burdens_energy",
+ path_to_raw_csv=f"{root}/data/raw/burdens_energy_raw.csv",
+ year_or_category="Category",
+ )
+ cleaner_energy.clean_variable()
diff --git a/build/cities/utils/cleaning_scripts/clean_ethnic_composition.py b/build/cities/utils/cleaning_scripts/clean_ethnic_composition.py
new file mode 100644
index 00000000..b18ef031
--- /dev/null
+++ b/build/cities/utils/cleaning_scripts/clean_ethnic_composition.py
@@ -0,0 +1,138 @@
+import numpy as np
+import pandas as pd
+
+from cities.utils.cleaning_utils import standardize_and_scale
+from cities.utils.data_grabber import DataGrabber, find_repo_root
+
+root = find_repo_root()
+
+
+def clean_ethnic_composition():
+ data = DataGrabber()
+ data.get_features_wide(["gdp"])
+ gdp = data.wide["gdp"]
+
+ ethnic_composition = pd.read_csv(f"{root}/data/raw/ACSDP5Y2021_DP05_Race.csv")
+
+ ethnic_composition = ethnic_composition.iloc[1:]
+ ethnic_composition["GEO_ID"].isna() == 0
+
+ ethnic_composition["GEO_ID"] = ethnic_composition["GEO_ID"].str.split("US").str[1]
+ ethnic_composition["GEO_ID"] = ethnic_composition["GEO_ID"].astype("int64")
+ ethnic_composition = ethnic_composition.rename(columns={"GEO_ID": "GeoFIPS"})
+
+ ethnic_composition = ethnic_composition[
+ ["GeoFIPS"] + [col for col in ethnic_composition.columns if col.endswith("E")]
+ ]
+ ethnic_composition = ethnic_composition.drop(columns=["NAME"])
+
+ common_fips = np.intersect1d(
+ gdp["GeoFIPS"].unique(), ethnic_composition["GeoFIPS"].unique()
+ )
+ len(common_fips)
+
+ ethnic_composition = ethnic_composition[
+ ethnic_composition["GeoFIPS"].isin(common_fips)
+ ]
+
+ ethnic_composition = ethnic_composition.merge(
+ gdp[["GeoFIPS", "GeoName"]], on="GeoFIPS", how="left"
+ )
+
+ ethnic_composition = ethnic_composition[
+ [
+ "GeoFIPS",
+ "GeoName",
+ "DP05_0070E",
+ "DP05_0072E",
+ "DP05_0073E",
+ "DP05_0074E",
+ "DP05_0075E",
+ "DP05_0077E",
+ "DP05_0078E",
+ "DP05_0079E",
+ "DP05_0080E",
+ "DP05_0081E",
+ "DP05_0082E",
+ "DP05_0083E",
+ ]
+ ]
+
+ ethnic_composition.columns = [
+ "GeoFIPS",
+ "GeoName",
+ "total_pop",
+ "mexican",
+ "puerto_rican",
+ "cuban",
+ "other_hispanic_latino",
+ "white",
+ "black_african_american",
+ "american_indian_alaska_native",
+ "asian",
+ "native_hawaiian_other_pacific_islander",
+ "other_race",
+ "two_or_more_sum",
+ ]
+ ethnic_composition = ethnic_composition.sort_values(by=["GeoFIPS", "GeoName"])
+
+ ethnic_composition.iloc[:, 2:] = ethnic_composition.iloc[:, 2:].apply(
+ pd.to_numeric, errors="coerce"
+ )
+ ethnic_composition[ethnic_composition.columns[2:]] = ethnic_composition[
+ ethnic_composition.columns[2:]
+ ].astype(float)
+
+ ethnic_composition["other_race_races"] = (
+ ethnic_composition["other_race"] + ethnic_composition["two_or_more_sum"]
+ )
+ ethnic_composition = ethnic_composition.drop(
+ ["other_race", "two_or_more_sum"], axis=1
+ )
+
+ ethnic_composition["totalALT"] = ethnic_composition.iloc[:, 3:].sum(axis=1)
+ assert (ethnic_composition["totalALT"] == ethnic_composition["total_pop"]).all()
+ ethnic_composition = ethnic_composition.drop("totalALT", axis=1)
+
+ # copy with nominal values
+ ethnic_composition.to_csv(
+ f"{root}/data/raw/ethnic_composition_nominal.csv", index=False
+ )
+
+ row_sums = ethnic_composition.iloc[:, 2:].sum(axis=1)
+ ethnic_composition.iloc[:, 3:] = ethnic_composition.iloc[:, 3:].div(
+ row_sums, axis=0
+ )
+
+ ethnic_composition = ethnic_composition.drop(["total_pop"], axis=1)
+
+ ethnic_composition_wide = ethnic_composition.copy()
+
+ ethnic_composition_long = pd.melt(
+ ethnic_composition,
+ id_vars=["GeoFIPS", "GeoName"],
+ var_name="Category",
+ value_name="Value",
+ )
+
+ ethnic_composition_std_wide = standardize_and_scale(ethnic_composition)
+
+ ethnic_composition_std_long = pd.melt(
+ ethnic_composition_std_wide.copy(),
+ id_vars=["GeoFIPS", "GeoName"],
+ var_name="Category",
+ value_name="Value",
+ )
+
+ ethnic_composition_wide.to_csv(
+ f"{root}/data/processed/ethnic_composition_wide.csv", index=False
+ )
+ ethnic_composition_long.to_csv(
+ f"{root}/data/processed/ethnic_composition_long.csv", index=False
+ )
+ ethnic_composition_std_wide.to_csv(
+ f"{root}/data/processed/ethnic_composition_std_wide.csv", index=False
+ )
+ ethnic_composition_std_long.to_csv(
+ f"{root}/data/processed/ethnic_composition_std_long.csv", index=False
+ )
diff --git a/build/cities/utils/cleaning_scripts/clean_ethnic_composition_ma.py b/build/cities/utils/cleaning_scripts/clean_ethnic_composition_ma.py
new file mode 100644
index 00000000..acc69717
--- /dev/null
+++ b/build/cities/utils/cleaning_scripts/clean_ethnic_composition_ma.py
@@ -0,0 +1,75 @@
+import numpy as np
+import pandas as pd
+
+from cities.utils.clean_variable import VariableCleanerMSA
+from cities.utils.data_grabber import find_repo_root
+
+root = find_repo_root()
+
+
+def clean_ethnic_initially():
+ ethnic_composition = pd.read_csv(f"{root}/data/raw/ethnic_composition_cbsa.csv")
+ metro_areas = pd.read_csv(f"{root}/data/raw/metrolist.csv")
+
+ ethnic_composition["CBSA"] = ethnic_composition["CBSA"].astype(np.int64)
+ ethnic_composition = ethnic_composition[
+ ethnic_composition["CBSA"].isin(metro_areas["GeoFIPS"])
+ ]
+
+ ethnic_composition = pd.merge(
+ ethnic_composition,
+ metro_areas[["GeoFIPS", "GeoName"]],
+ left_on="CBSA",
+ right_on="GeoFIPS",
+ how="inner",
+ )
+ ethnic_composition = ethnic_composition.drop_duplicates(subset=["CBSA"])
+
+ ethnic_composition.drop(columns="CBSA", inplace=True)
+
+ cols_to_save = ethnic_composition.shape[1] - 2
+ ethnic_composition_ma = ethnic_composition[
+ ["GeoFIPS", "GeoName"] + list(ethnic_composition.columns[0:cols_to_save])
+ ]
+
+ ethnic_composition_ma.iloc[:, 2:] = ethnic_composition_ma.iloc[:, 2:].apply(
+ pd.to_numeric, errors="coerce"
+ )
+ ethnic_composition_ma[ethnic_composition_ma.columns[2:]] = ethnic_composition_ma[
+ ethnic_composition_ma.columns[2:]
+ ].astype(float)
+
+ ethnic_composition_ma["other_race_races"] = (
+ ethnic_composition_ma["other_race"] + ethnic_composition_ma["two_or_more_sum"]
+ )
+ ethnic_composition_ma = ethnic_composition_ma.drop(
+ ["other_race", "two_or_more_sum"], axis=1
+ )
+
+ ethnic_composition_ma["totalALT"] = ethnic_composition_ma.iloc[:, 3:].sum(axis=1)
+ assert (
+ ethnic_composition_ma["totalALT"] == ethnic_composition_ma["total_pop"]
+ ).all()
+ ethnic_composition_ma = ethnic_composition_ma.drop("totalALT", axis=1)
+
+ row_sums = ethnic_composition_ma.iloc[:, 2:].sum(axis=1)
+ ethnic_composition_ma.iloc[:, 3:] = ethnic_composition_ma.iloc[:, 3:].div(
+ row_sums, axis=0
+ )
+
+ ethnic_composition_ma = ethnic_composition_ma.drop(["total_pop"], axis=1)
+
+ ethnic_composition_ma.to_csv(
+ f"{root}/data/raw/ethnic_composition_ma.csv", index=False
+ )
+
+
+def clean_ethnic_composition_ma():
+ clean_ethnic_initially()
+
+ cleaner = VariableCleanerMSA(
+ variable_name="ethnic_composition_ma",
+ path_to_raw_csv=f"{root}/data/raw/ethnic_composition_ma.csv",
+ year_or_category="Category",
+ )
+ cleaner.clean_variable()
diff --git a/build/cities/utils/cleaning_scripts/clean_gdp_ma.py b/build/cities/utils/cleaning_scripts/clean_gdp_ma.py
new file mode 100644
index 00000000..f14b6712
--- /dev/null
+++ b/build/cities/utils/cleaning_scripts/clean_gdp_ma.py
@@ -0,0 +1,11 @@
+from cities.utils.clean_variable import VariableCleanerMSA
+from cities.utils.data_grabber import find_repo_root
+
+root = find_repo_root()
+
+
+def clean_gdp_ma():
+ cleaner = VariableCleanerMSA(
+ variable_name="gdp_ma", path_to_raw_csv=f"{root}/data/raw/gdp_ma.csv"
+ )
+ cleaner.clean_variable()
diff --git a/build/cities/utils/cleaning_scripts/clean_hazard.py b/build/cities/utils/cleaning_scripts/clean_hazard.py
new file mode 100644
index 00000000..8efbb4cb
--- /dev/null
+++ b/build/cities/utils/cleaning_scripts/clean_hazard.py
@@ -0,0 +1,87 @@
+import numpy as np
+import pandas as pd
+
+from cities.utils.clean_variable import VariableCleaner, communities_tracts_to_counties
+from cities.utils.data_grabber import DataGrabber, find_repo_root
+
+root = find_repo_root()
+
+data = DataGrabber()
+data.get_features_wide(["gdp"])
+gdp = data.wide["gdp"]
+
+
+variables_hazard = [
+ "expected_agricultural_loss_rate",
+ "expected_building_loss_rate",
+ "expected_population_loss_rate",
+ "diesel_matter_exposure",
+ "proximity_to_hazardous_waste_sites",
+ "proximity_to_risk_management_plan_facilities",
+]
+
+
+def clean_hazard_first():
+ hazard = pd.read_csv(f"{root}/data/raw/communities_raw.csv")
+
+ list_variables = [
+ "Expected agricultural loss rate (Natural Hazards Risk Index)",
+ "Expected building loss rate (Natural Hazards Risk Index)",
+ "Expected population loss rate (Natural Hazards Risk Index)",
+ "Diesel particulate matter exposure",
+ "Proximity to hazardous waste sites",
+ "Proximity to Risk Management Plan (RMP) facilities",
+ ]
+
+ hazard = communities_tracts_to_counties(hazard, list_variables)
+
+ hazard.dropna(inplace=True)
+
+ hazard["GeoFIPS"] = hazard["GeoFIPS"].astype(np.int64)
+
+ common_fips = np.intersect1d(hazard["GeoFIPS"].unique(), gdp["GeoFIPS"].unique())
+ hazard = hazard[hazard["GeoFIPS"].isin(common_fips)]
+ hazard = hazard.merge(gdp[["GeoFIPS", "GeoName"]], on="GeoFIPS", how="left")
+
+ hazard = hazard[
+ [
+ "GeoFIPS",
+ "GeoName",
+ "Expected agricultural loss rate (Natural Hazards Risk Index)",
+ "Expected building loss rate (Natural Hazards Risk Index)",
+ "Expected population loss rate (Natural Hazards Risk Index)",
+ "Diesel particulate matter exposure",
+ "Proximity to hazardous waste sites",
+ "Proximity to Risk Management Plan (RMP) facilities",
+ ]
+ ]
+
+ hazard.columns = [
+ "GeoFIPS",
+ "GeoName",
+ "expected_agricultural_loss_rate",
+ "expected_building_loss_rate",
+ "expected_population_loss_rate",
+ "diesel_matter_exposure",
+ "proximity_to_hazardous_waste_sites",
+ "proximity_to_risk_management_plan_facilities",
+ ]
+
+ columns_to_trans = hazard.columns[-6:]
+ hazard[columns_to_trans] = hazard[columns_to_trans].astype("float64")
+
+ for variable in variables_hazard:
+ hazard_variable = hazard[["GeoFIPS", "GeoName", variable]]
+ hazard_variable.to_csv(f"{root}/data/raw/{variable}.csv", index=False)
+
+
+def clean_hazard():
+ clean_hazard_first()
+
+ for variable in variables_hazard:
+ cleaner = VariableCleaner(
+ variable_name=variable,
+ path_to_raw_csv=f"{root}/data/raw/{variable}.csv",
+ year_or_category="Category",
+ )
+ cleaner.clean_variable()
diff --git a/build/cities/utils/cleaning_scripts/clean_health.py b/build/cities/utils/cleaning_scripts/clean_health.py
new file mode 100644
index 00000000..7b7def54
--- /dev/null
+++ b/build/cities/utils/cleaning_scripts/clean_health.py
@@ -0,0 +1,74 @@
+import numpy as np
+import pandas as pd
+
+from cities.utils.clean_variable import VariableCleaner, communities_tracts_to_counties
+from cities.utils.data_grabber import DataGrabber, find_repo_root
+
+root = find_repo_root()
+
+data = DataGrabber()
+data.get_features_wide(["gdp"])
+gdp = data.wide["gdp"]
+
+
+def clean_health_first():
+ health = pd.read_csv(f"{root}/data/raw/communities_raw.csv")
+
+ list_variables = [
+ "Life expectancy (years)",
+ "Current asthma among adults aged greater than or equal to 18 years",
+ "Diagnosed diabetes among adults aged greater than or equal to 18 years",
+ "Coronary heart disease among adults aged greater than or equal to 18 years",
+ ]
+
+ health = communities_tracts_to_counties(health, list_variables)
+
+ health.dropna(inplace=True)
+
+ health["GeoFIPS"] = health["GeoFIPS"].astype(np.int64)
+
+ common_fips = np.intersect1d(health["GeoFIPS"].unique(), gdp["GeoFIPS"].unique())
+ health = health[health["GeoFIPS"].isin(common_fips)]
+ health = health.merge(gdp[["GeoFIPS", "GeoName"]], on="GeoFIPS", how="left")
+
+ health = health[
+ [
+ "GeoFIPS",
+ "GeoName",
+ "Life expectancy (years)",
+ "Current asthma among adults aged greater than or equal to 18 years",
+ "Diagnosed diabetes among adults aged greater than or equal to 18 years",
+ "Coronary heart disease among adults aged greater than or equal to 18 years",
+ ]
+ ]
+
+ health.columns = [
+ "GeoFIPS",
+ "GeoName",
+ "LifeExpectancy",
+ "Asthma",
+ "Diabetes",
+ "HeartDisease",
+ ]
+
+ columns_to_round = health.columns[-3:]
+ health[columns_to_round] = health[columns_to_round].round(0).astype("float64")
+ health["LifeExpectancy"] = health["LifeExpectancy"].round(2).astype("float64")
+
+ val_list = ["Asthma", "Diabetes", "HeartDisease"]
+
+ for val in val_list: # dealing with weird format of percentages
+ health[val] = health[val] / 100
+
+ health.to_csv(f"{root}/data/raw/health_raw.csv", index=False)
+
+
+def clean_health():
+ clean_health_first()
+
+ cleaner = VariableCleaner(
+ variable_name="health",
+ path_to_raw_csv=f"{root}/data/raw/health_raw.csv",
+ year_or_category="Category",
+ )
+ cleaner.clean_variable()
diff --git a/build/cities/utils/cleaning_scripts/clean_homeownership.py b/build/cities/utils/cleaning_scripts/clean_homeownership.py
new file mode 100644
index 00000000..832836db
--- /dev/null
+++ b/build/cities/utils/cleaning_scripts/clean_homeownership.py
@@ -0,0 +1,20 @@
+from cities.utils.clean_variable import VariableCleaner
+from cities.utils.data_grabber import find_repo_root
+
+root = find_repo_root()
+
+
+def clean_homeownership():
+ variables = [
+ "median_owner_occupied_home_value",
+ "median_rent",
+ "homeownership_rate",
+ ]
+
+ for variable in variables:
+ cleaner = VariableCleaner(
+ variable_name=variable,
+ path_to_raw_csv=f"{root}/data/raw/{variable}.csv",
+ year_or_category="Category",
+ )
+ cleaner.clean_variable()
diff --git a/build/cities/utils/cleaning_scripts/clean_income_distribution.py b/build/cities/utils/cleaning_scripts/clean_income_distribution.py
new file mode 100644
index 00000000..6525078a
--- /dev/null
+++ b/build/cities/utils/cleaning_scripts/clean_income_distribution.py
@@ -0,0 +1,13 @@
+from cities.utils.clean_variable import VariableCleaner
+from cities.utils.data_grabber import find_repo_root
+
+root = find_repo_root()
+
+
+def clean_income_distribution():
+ cleaner = VariableCleaner(
+ variable_name="income_distribution",
+ path_to_raw_csv=f"{root}/data/raw/income_distribution.csv",
+ year_or_category="Category",
+ )
+ cleaner.clean_variable()
diff --git a/build/cities/utils/cleaning_scripts/clean_industry.py b/build/cities/utils/cleaning_scripts/clean_industry.py
new file mode 100644
index 00000000..41571fb2
--- /dev/null
+++ b/build/cities/utils/cleaning_scripts/clean_industry.py
@@ -0,0 +1,118 @@
+from pathlib import Path
+
+import numpy as np
+import pandas as pd
+
+from cities.utils.clean_variable import VariableCleaner
+from cities.utils.cleaning_utils import standardize_and_scale
+from cities.utils.data_grabber import DataGrabber, find_repo_root
+
+root = find_repo_root()
+
+path = Path(__file__).parent.absolute()
+
+
+def clean_industry_step_one():
+ data = DataGrabber()
+ data.get_features_wide(["gdp"])
+ gdp = data.wide["gdp"]
+
+ industry = pd.read_csv(f"{root}/data/raw/ACSDP5Y2021_DP03_industry.csv")
+
+ industry["GEO_ID"] = industry["GEO_ID"].str.split("US").str[1]
+ industry["GEO_ID"] = industry["GEO_ID"].astype("int64")
+ industry = industry.rename(columns={"GEO_ID": "GeoFIPS"})
+
+ common_fips = np.intersect1d(gdp["GeoFIPS"].unique(), industry["GeoFIPS"].unique())
+
+ industry = industry[industry["GeoFIPS"].isin(common_fips)]
+
+ industry = industry.merge(gdp[["GeoFIPS", "GeoName"]], on="GeoFIPS", how="left")
+
+ industry = industry[
+ [
+ "GeoFIPS",
+ "GeoName",
+ "DP03_0004E",
+ "DP03_0033E",
+ "DP03_0034E",
+ "DP03_0035E",
+ "DP03_0036E",
+ "DP03_0037E",
+ "DP03_0038E",
+ "DP03_0039E",
+ "DP03_0040E",
+ "DP03_0041E",
+ "DP03_0042E",
+ "DP03_0043E",
+ "DP03_0044E",
+ "DP03_0045E",
+ ]
+ ]
+
+ column_name_mapping = {
+ "DP03_0004E": "employed_sum",
+ "DP03_0033E": "agri_forestry_mining",
+ "DP03_0034E": "construction",
+ "DP03_0035E": "manufacturing",
+ "DP03_0036E": "wholesale_trade",
+ "DP03_0037E": "retail_trade",
+ "DP03_0038E": "transport_utilities",
+ "DP03_0039E": "information",
+ "DP03_0040E": "finance_real_estate",
+ "DP03_0041E": "prof_sci_mgmt_admin",
+ "DP03_0042E": "education_health",
+ "DP03_0043E": "arts_entertainment",
+ "DP03_0044E": "other_services",
+ "DP03_0045E": "public_admin",
+ }
+
+ industry.rename(columns=column_name_mapping, inplace=True)
+
+ industry = industry.sort_values(by=["GeoFIPS", "GeoName"])
+
+ industry.to_csv(f"{root}/data/raw/industry_absolute.csv", index=False)
+
+ row_sums = industry.iloc[:, 3:].sum(axis=1)
+
+ industry.iloc[:, 3:] = industry.iloc[:, 3:].div(row_sums, axis=0)
+ industry = industry.drop(["employed_sum"], axis=1)
+
+ industry.to_csv(f"{root}/data/raw/industry_percent.csv", index=False)
+
+ industry_wide = industry.copy()
+
+ industry_long = pd.melt(
+ industry,
+ id_vars=["GeoFIPS", "GeoName"],
+ var_name="Category",
+ value_name="Value",
+ )
+
+ industry_std_wide = standardize_and_scale(industry)
+
+ industry_std_long = pd.melt(
+ industry_std_wide.copy(),
+ id_vars=["GeoFIPS", "GeoName"],
+ var_name="Category",
+ value_name="Value",
+ )
+
+ industry_wide.to_csv(f"{root}/data/processed/industry_wide.csv", index=False)
+ industry_long.to_csv(f"{root}/data/processed/industry_long.csv", index=False)
+ industry_std_wide.to_csv(
+ f"{root}/data/processed/industry_std_wide.csv", index=False
+ )
+ industry_std_long.to_csv(
+ f"{root}/data/processed/industry_std_long.csv", index=False
+ )
+
+
+def clean_industry():
+ clean_industry_step_one()
+
+ cleaner = VariableCleaner(
+ variable_name="industry",
+ path_to_raw_csv=f"{root}/data/raw/industry_percent.csv",
+ )
+ cleaner.clean_variable()
diff --git a/build/cities/utils/cleaning_scripts/clean_industry_ma.py b/build/cities/utils/cleaning_scripts/clean_industry_ma.py
new file mode 100644
index 00000000..f95a4c92
--- /dev/null
+++ b/build/cities/utils/cleaning_scripts/clean_industry_ma.py
@@ -0,0 +1,13 @@
+from cities.utils.clean_variable import VariableCleanerMSA
+from cities.utils.data_grabber import find_repo_root
+
+root = find_repo_root()
+
+
+def clean_industry_ma():
+ cleaner = VariableCleanerMSA(
+ variable_name="industry_ma",
+ path_to_raw_csv=f"{root}/data/raw/industry_ma.csv",
+ year_or_category="Category",
+ )
+ cleaner.clean_variable()
diff --git a/build/cities/utils/cleaning_scripts/clean_industry_ts.py b/build/cities/utils/cleaning_scripts/clean_industry_ts.py
new file mode 100644
index 00000000..b16daee7
--- /dev/null
+++ b/build/cities/utils/cleaning_scripts/clean_industry_ts.py
@@ -0,0 +1,124 @@
+import numpy as np
+import pandas as pd
+
+from cities.utils.cleaning_utils import standardize_and_scale
+from cities.utils.data_grabber import DataGrabber, find_repo_root
+
+root = find_repo_root()
+
+
+def clean_industry_ts():
+ data = DataGrabber()
+ data.get_features_wide(["gdp"])
+ gdp = data.wide["gdp"]
+
+ industry_ts = pd.read_csv(f"{root}/data/raw/industry_time_series_people.csv")
+
+ industry_ts["GEO_ID"] = industry_ts["GEO_ID"].str.split("US").str[1]
+ industry_ts["GEO_ID"] = industry_ts["GEO_ID"].astype("int64")
+ industry_ts = industry_ts.rename(columns={"GEO_ID": "GeoFIPS"})
+
+ common_fips = np.intersect1d(
+ gdp["GeoFIPS"].unique(), industry_ts["GeoFIPS"].unique()
+ )
+
+ industry_ts = industry_ts[industry_ts["GeoFIPS"].isin(common_fips)]
+
+ years = industry_ts["Year"].unique()
+
+ for year in years:
+ year_df = industry_ts[industry_ts["Year"] == year]
+ missing_fips = set(common_fips) - set(year_df["GeoFIPS"])
+
+ if missing_fips:
+ missing_data = {
+ "Year": [year] * len(missing_fips),
+ "GeoFIPS": list(missing_fips),
+ }
+
+ # Fill all columns from the fourth column (index 3) onward with 0
+ for col in industry_ts.columns[2:]:
+ missing_data[col] = 0
+
+ missing_df = pd.DataFrame(missing_data)
+ industry_ts = pd.concat([industry_ts, missing_df], ignore_index=True)
+
+ industry_ts = industry_ts.merge(
+ gdp[["GeoFIPS", "GeoName"]], on="GeoFIPS", how="left"
+ )
+
+ industry_ts = industry_ts[
+ [
+ "GeoFIPS",
+ "GeoName",
+ "Year",
+ "agriculture_total",
+ "mining_total",
+ "construction_total",
+ "manufacturing_total",
+ "wholesale_trade_total",
+ "retail_trade_total",
+ "transportation_warehousing_total",
+ "utilities_total",
+ "information_total",
+ "finance_insurance_total",
+ "real_estate_total",
+ "professional_services_total",
+ "management_enterprises_total",
+ "admin_support_services_total",
+ "educational_services_total",
+ "healthcare_social_services_total",
+ "arts_recreation_total",
+ "accommodation_food_services_total",
+ "other_services_total",
+ "public_administration_total",
+ ]
+ ]
+
+ industry_ts = industry_ts.sort_values(by=["GeoFIPS", "GeoName", "Year"])
+
+ industry_ts.fillna(0, inplace=True)
+
+ columns_to_save = industry_ts.columns[industry_ts.columns.get_loc("Year") + 1 :]
+
+ for column in columns_to_save:
+ selected_columns = ["GeoFIPS", "GeoName", "Year", column]
+ subsetindustry_ts = industry_ts[selected_columns]
+
+ subsetindustry_ts.rename(columns={column: "Value"}, inplace=True)
+
+ subsetindustry_ts_long = subsetindustry_ts.copy()
+
+ file_name_long = f"industry_{column}_long.csv"
+ subsetindustry_ts_long.to_csv(
+ f"{root}/data/processed/{file_name_long}", index=False
+ )
+
+ subsetindustry_ts_std_long = standardize_and_scale(subsetindustry_ts)
+
+ file_name_std = f"industry_{column}_std_long.csv"
+ subsetindustry_ts_std_long.to_csv(
+ f"{root}/data/processed/{file_name_std}", index=False
+ )
+
+ subsetindustry_ts_wide = subsetindustry_ts.pivot_table(
+ index=["GeoFIPS", "GeoName"], columns="Year", values="Value"
+ )
+ subsetindustry_ts_wide.reset_index(inplace=True)
+ subsetindustry_ts_wide.columns.name = None
+
+ file_name_wide = f"industry_{column}_wide.csv"
+ subsetindustry_ts_wide.to_csv(
+ f"{root}/data/processed/{file_name_wide}", index=False
+ )
+
+ subsetindustry_ts_std_wide = subsetindustry_ts_std_long.pivot_table(
+ index=["GeoFIPS", "GeoName"], columns="Year", values="Value"
+ )
+ subsetindustry_ts_std_wide.reset_index(inplace=True)
+ subsetindustry_ts_std_wide.columns.name = None
+
+ file_name_std_wide = f"industry_{column}_std_wide.csv"
+ subsetindustry_ts_std_wide.to_csv(
+ f"{root}/data/processed/{file_name_std_wide}", index=False
+ )
diff --git a/build/cities/utils/cleaning_scripts/clean_population.py b/build/cities/utils/cleaning_scripts/clean_population.py
new file mode 100644
index 00000000..3c4d0ead
--- /dev/null
+++ b/build/cities/utils/cleaning_scripts/clean_population.py
@@ -0,0 +1,84 @@
+import numpy as np
+import pandas as pd
+
+from cities.utils.cleaning_utils import standardize_and_scale
+from cities.utils.data_grabber import DataGrabber, find_repo_root
+
+root = find_repo_root()
+
+
+def clean_population():
+ data = DataGrabber()
+ data.get_features_wide(["gdp"])
+ gdp = data.wide["gdp"]
+
+ cainc30 = pd.read_csv(
+ f"{root}/data/raw/CAINC30_1969_2021.csv", encoding="ISO-8859-1"
+ )
+
+ population = cainc30[cainc30["Description"] == " Population (persons) 3/"].copy()
+
+ population["GeoFIPS"] = population["GeoFIPS"].fillna("").astype(str)
+ population["GeoFIPS"] = population["GeoFIPS"].str.strip(' "').astype(int)
+
+ population = population[population["GeoFIPS"] % 1000 != 0]
+
+ common_fips = np.intersect1d(
+ population["GeoFIPS"].unique(), gdp["GeoFIPS"].unique()
+ )
+ assert len(common_fips) == len(gdp["GeoFIPS"].unique())
+
+ population = population[population["GeoFIPS"].isin(common_fips)]
+ assert population.shape[0] == gdp.shape[0]
+
+ order = gdp["GeoFIPS"].tolist()
+ population = population.set_index("GeoFIPS").reindex(order).reset_index()
+
+ # align with gdp
+ assert population["GeoFIPS"].tolist() == gdp["GeoFIPS"].tolist()
+ assert population["GeoName"].is_unique
+
+ population = population.drop(population.columns[2:8], axis=1)
+ assert population.shape[0] == gdp.shape[0]
+
+ # 243 NAs prior to 1993
+ # na_counts = (population == '(NA)').sum().sum()
+ # print(na_counts)
+
+ population.replace("(NA)", np.nan, inplace=True)
+ population.replace("(NM)", np.nan, inplace=True)
+
+ # removed years prior to 1993, missigness, long time ago
+ population = population.drop(population.columns[2:26], axis=1)
+
+ assert population.isna().sum().sum() == 0
+ assert population.shape[0] == gdp.shape[0]
+
+ for column in population.columns[2:]:
+ population[column] = population[column].astype(float)
+
+ assert population.shape[0] == gdp.shape[0]
+
+ population_long = pd.melt(
+ population.copy(),
+ id_vars=["GeoFIPS", "GeoName"],
+ var_name="Year",
+ value_name="Value",
+ )
+
+ population_std_wide = standardize_and_scale(population)
+ population_std_long = pd.melt(
+ population_std_wide.copy(),
+ id_vars=["GeoFIPS", "GeoName"],
+ var_name="Year",
+ value_name="Value",
+ )
+
+ population.to_csv(f"{root}/data/processed/population_wide.csv", index=False)
+ population_long.to_csv(f"{root}/data/processed/population_long.csv", index=False)
+ population_std_wide.to_csv(
+ f"{root}/data/processed/population_std_wide.csv", index=False
+ )
+ population_std_long.to_csv(
+ f"{root}/data/processed/population_std_long.csv", index=False
+ )
diff --git a/build/cities/utils/cleaning_scripts/clean_population_density.py b/build/cities/utils/cleaning_scripts/clean_population_density.py
new file mode 100644
index 00000000..ce429f8a
--- /dev/null
+++ b/build/cities/utils/cleaning_scripts/clean_population_density.py
@@ -0,0 +1,12 @@
+from cities.utils.clean_variable import VariableCleaner
+from cities.utils.data_grabber import find_repo_root
+
+root = find_repo_root()
+
+
+def clean_population_density():
+ cleaner = VariableCleaner(
+ variable_name="population_density",
+ path_to_raw_csv=f"{root}/data/raw/population_density.csv",
+ )
+ cleaner.clean_variable()
diff --git a/build/cities/utils/cleaning_scripts/clean_population_ma.py b/build/cities/utils/cleaning_scripts/clean_population_ma.py
new file mode 100644
index 00000000..21d9ee3c
--- /dev/null
+++ b/build/cities/utils/cleaning_scripts/clean_population_ma.py
@@ -0,0 +1,13 @@
+from cities.utils.clean_variable import VariableCleanerMSA
+from cities.utils.data_grabber import find_repo_root
+
+root = find_repo_root()
+
+
+def clean_population_ma():
+ cleaner = VariableCleanerMSA(
+ variable_name="population_ma",
+ path_to_raw_csv=f"{root}/data/raw/population_ma.csv",
+ year_or_category="Year",
+ )
+ cleaner.clean_variable()
diff --git a/build/cities/utils/cleaning_scripts/clean_spending_HHS.py b/build/cities/utils/cleaning_scripts/clean_spending_HHS.py
new file mode 100644
index 00000000..6db55e06
--- /dev/null
+++ b/build/cities/utils/cleaning_scripts/clean_spending_HHS.py
@@ -0,0 +1,142 @@
+import numpy as np
+import pandas as pd
+
+from cities.utils.cleaning_utils import standardize_and_scale
+from cities.utils.data_grabber import DataGrabber, find_repo_root
+
+root = find_repo_root()
+
+
+def clean_spending_HHS():
+ data = DataGrabber()
+ data.get_features_wide(["gdp"])
+ gdp = data.wide
+ gdp = gdp.get("gdp")
+
+ spending_HHS = pd.read_csv(f"{root}/data/raw/spending_HHS.csv")
+
+ transportUnwanted = spending_HHS[
+ (
+ pd.isna(spending_HHS["total_obligated_amount"])
+ | (spending_HHS["total_obligated_amount"] == 1)
+ | (spending_HHS["total_obligated_amount"] == 0)
+ )
+ ]
+
+ exclude_mask = spending_HHS["total_obligated_amount"].isin(
+ transportUnwanted["total_obligated_amount"]
+ )
+ spending_HHS = spending_HHS[~exclude_mask] # 95 observations dleted
+
+ assert spending_HHS.isna().sum().sum() == 0, "Na values detected"
+
+ # loading names and repearing fips of value 3 and shorter
+
+ names_HHS = pd.read_csv(f"{root}/data/raw/spending_HHS_names.csv")
+
+ spending_only_fips = np.setdiff1d(spending_HHS["GeoFIPS"], gdp["GeoFIPS"])
+
+ fips4_to_repair = [fip for fip in spending_only_fips if (fip < 10000 and fip > 999)]
+ short4_fips = spending_HHS[spending_HHS["GeoFIPS"].isin(fips4_to_repair)]
+
+ full_geofipsLIST = [fip for fip in spending_only_fips if fip > 9999]
+ full_geofips = spending_HHS[spending_HHS["GeoFIPS"].isin(full_geofipsLIST)]
+
+ cleaningLIST = [full_geofips, short4_fips] # no 3digit FIPS
+
+ # replacing damaged FIPS
+
+ for badFIPS in cleaningLIST:
+ geofips_to_geonamealt = dict(zip(names_HHS["GeoFIPS"], names_HHS["GeoNameALT"]))
+
+ badFIPS["GeoNameALT"] = badFIPS["GeoFIPS"].map(geofips_to_geonamealt)
+ badFIPS = badFIPS.rename(columns={"GeoFIPS": "damagedFIPS"})
+
+ badFIPSmapping_dict = dict(zip(gdp["GeoName"], gdp["GeoFIPS"]))
+
+ badFIPS["repairedFIPS"] = badFIPS["GeoNameALT"].apply(
+ lambda x: badFIPSmapping_dict.get(x)
+ )
+ repaired_geofips = badFIPS[badFIPS["repairedFIPS"].notna()]
+
+ repair_ratio = repaired_geofips.shape[0] / badFIPS.shape[0]
+ print(f"Ratio of repaired FIPS: {round(repair_ratio, 2)}")
+
+ # assert repair_ratio > 0.9, f'Less than 0.9 of FIPS were successfully repaired!'
+
+ spending_HHS["GeoFIPS"] = spending_HHS[
+ "GeoFIPS"
+ ].replace( # no FIPS were repaired actually
+ dict(zip(repaired_geofips["damagedFIPS"], repaired_geofips["repairedFIPS"]))
+ )
+
+ common_fips = np.intersect1d(
+ gdp["GeoFIPS"].unique(), spending_HHS["GeoFIPS"].unique()
+ )
+
+ all_FIPS_spending_HHS = spending_HHS.copy()
+
+ spending_HHS = spending_HHS[
+ spending_HHS["GeoFIPS"].isin(common_fips)
+ ] # 99 FIPS deleted
+ assert (
+ spending_HHS.shape[0] / all_FIPS_spending_HHS.shape[0] > 0.9
+ ), "Less than 0.9 of FIPS are common!"
+
+ # grouping duplicate fips for years
+ # (they appeared because we have repaired some of them and now they match with number that is already present)
+
+ spending_HHS = (
+ spending_HHS.groupby(["GeoFIPS", "year"])["total_obligated_amount"]
+ .sum()
+ .reset_index()
+ )
+ spending_HHS.reset_index(drop=True, inplace=True)
+
+ # adding GeoNames
+ spending_HHS = spending_HHS.merge(
+ gdp[["GeoFIPS", "GeoName"]], on="GeoFIPS", how="left"
+ )[["GeoFIPS", "GeoName", "year", "total_obligated_amount"]]
+
+ unique_gdp = gdp[["GeoFIPS", "GeoName"]].drop_duplicates(
+ subset=["GeoFIPS", "GeoName"], keep="first"
+ )
+ exclude_geofips = set(spending_HHS["GeoFIPS"])
+ unique_gdp = unique_gdp[~unique_gdp["GeoFIPS"].isin(exclude_geofips)]
+
+ unique_gdp["year"] = np.repeat(2018, unique_gdp.shape[0])
+ unique_gdp["total_obligated_amount"] = np.repeat(0, unique_gdp.shape[0])
+ spending_HHS = pd.concat([spending_HHS, unique_gdp], ignore_index=True)
+ spending_HHS = spending_HHS.sort_values(by=["GeoFIPS", "GeoName", "year"])
+
+ assert spending_HHS["GeoFIPS"].nunique() == spending_HHS["GeoName"].nunique()
+ assert spending_HHS["GeoFIPS"].nunique() == gdp["GeoFIPS"].nunique()
+
+ # Assuming you have a DataFrame named 'your_dataframe'
+ spending_HHS = spending_HHS.rename(columns={"year": "Year"})
+
+ # standardizing and saving
+ spending_HHS_long = spending_HHS.copy()
+
+ spending_HHS_wide = spending_HHS.pivot_table(
+ index=["GeoFIPS", "GeoName"], columns="Year", values="total_obligated_amount"
+ )
+ spending_HHS_wide.reset_index(inplace=True)
+ spending_HHS_wide.columns.name = None
+ spending_HHS_wide = spending_HHS_wide.fillna(0)
+
+ spending_HHS_std_long = standardize_and_scale(spending_HHS)
+ spending_HHS_std_wide = standardize_and_scale(spending_HHS_wide)
+
+ spending_HHS_wide.to_csv(
+ f"{root}/data/processed/spending_HHS_wide.csv", index=False
+ )
+ spending_HHS_long.to_csv(
+ f"{root}/data/processed/spending_HHS_long.csv", index=False
+ )
+ spending_HHS_std_wide.to_csv(
+ f"{root}/data/processed/spending_HHS_std_wide.csv", index=False
+ )
+ spending_HHS_std_long.to_csv(
+ f"{root}/data/processed/spending_HHS_std_long.csv", index=False
+ )
diff --git a/build/cities/utils/cleaning_scripts/clean_spending_commerce.py b/build/cities/utils/cleaning_scripts/clean_spending_commerce.py
new file mode 100644
index 00000000..2463bffa
--- /dev/null
+++ b/build/cities/utils/cleaning_scripts/clean_spending_commerce.py
@@ -0,0 +1,147 @@
+import numpy as np
+import pandas as pd
+
+from cities.utils.cleaning_utils import standardize_and_scale
+from cities.utils.data_grabber import DataGrabber, find_repo_root
+
+root = find_repo_root()
+
+
+def clean_spending_commerce():
+ data = DataGrabber()
+ data.get_features_wide(["gdp"])
+ gdp = data.wide
+ gdp = gdp.get("gdp")
+
+ spending_commerce = pd.read_csv(f"{root}/data/raw/spending_commerce.csv")
+
+ transportUnwanted = spending_commerce[
+ (
+ pd.isna(spending_commerce["total_obligated_amount"])
+ | (spending_commerce["total_obligated_amount"] == 1)
+ | (spending_commerce["total_obligated_amount"] == 0)
+ )
+ ]
+
+ exclude_mask = spending_commerce["total_obligated_amount"].isin(
+ transportUnwanted["total_obligated_amount"]
+ )
+ spending_commerce = spending_commerce[~exclude_mask] # 24 values lost
+
+ assert spending_commerce.isna().sum().sum() == 0, "Na values detected"
+
+ # loading names and repearing fips of value 3 and shorter
+
+ names_commerce = pd.read_csv(f"{root}/data/raw/spending_commerce_names.csv")
+
+ spending_only_fips = np.setdiff1d(spending_commerce["GeoFIPS"], gdp["GeoFIPS"])
+
+ fips4_to_repair = [fip for fip in spending_only_fips if (fip < 10000 and fip > 999)]
+ short4_fips = spending_commerce[spending_commerce["GeoFIPS"].isin(fips4_to_repair)]
+
+ full_geofipsLIST = [fip for fip in spending_only_fips if fip > 9999]
+ full_geofips = spending_commerce[
+ spending_commerce["GeoFIPS"].isin(full_geofipsLIST)
+ ]
+
+ cleaningLIST = [full_geofips, short4_fips] # no small fips
+
+ # replacing damaged FIPS
+
+ for badFIPS in cleaningLIST:
+ geofips_to_geonamealt = dict(
+ zip(names_commerce["GeoFIPS"], names_commerce["GeoNameALT"])
+ )
+
+ badFIPS["GeoNameALT"] = badFIPS["GeoFIPS"].map(geofips_to_geonamealt)
+ badFIPS = badFIPS.rename(columns={"GeoFIPS": "damagedFIPS"})
+
+ badFIPSmapping_dict = dict(zip(gdp["GeoName"], gdp["GeoFIPS"]))
+
+ badFIPS["repairedFIPS"] = badFIPS["GeoNameALT"].apply(
+ lambda x: badFIPSmapping_dict.get(x)
+ )
+ repaired_geofips = badFIPS[badFIPS["repairedFIPS"].notna()]
+
+ repair_ratio = repaired_geofips.shape[0] / badFIPS.shape[0]
+ print(f"Ratio of repaired FIPS: {round(repair_ratio, 2)}")
+
+ # assert repair_ratio > 0.9, f'Less than 0.9 of FIPS were successfully repaired!'
+
+ spending_commerce["GeoFIPS"] = spending_commerce["GeoFIPS"].replace(
+ dict(zip(repaired_geofips["damagedFIPS"], repaired_geofips["repairedFIPS"]))
+ )
+
+ # deleting short FIPS codes
+
+ common_fips = np.intersect1d(
+ gdp["GeoFIPS"].unique(), spending_commerce["GeoFIPS"].unique()
+ )
+
+ all_FIPS_spending_commerce = spending_commerce.copy()
+
+ spending_commerce = spending_commerce[
+ spending_commerce["GeoFIPS"].isin(common_fips)
+ ] # 67 FIPS deleted
+ assert (
+ spending_commerce.shape[0] / all_FIPS_spending_commerce.shape[0] > 0.9
+ ), "Less than 0.9 of FIPS are common!"
+
+ # grouping duplicate fips for years
+ # (they appeared because we have repaired some of them and now they match with number that is already present)
+
+ spending_commerce = (
+ spending_commerce.groupby(["GeoFIPS", "year"])["total_obligated_amount"]
+ .sum()
+ .reset_index()
+ )
+ spending_commerce.reset_index(drop=True, inplace=True)
+
+ # adding GeoNames
+ spending_commerce = spending_commerce.merge(
+ gdp[["GeoFIPS", "GeoName"]], on="GeoFIPS", how="left"
+ )[["GeoFIPS", "GeoName", "year", "total_obligated_amount"]]
+
+ unique_gdp = gdp[["GeoFIPS", "GeoName"]].drop_duplicates(
+ subset=["GeoFIPS", "GeoName"], keep="first"
+ )
+ exclude_geofips = set(spending_commerce["GeoFIPS"])
+ unique_gdp = unique_gdp[~unique_gdp["GeoFIPS"].isin(exclude_geofips)]
+
+ unique_gdp["year"] = np.repeat(2018, unique_gdp.shape[0])
+ unique_gdp["total_obligated_amount"] = np.repeat(0, unique_gdp.shape[0])
+ spending_commerce = pd.concat([spending_commerce, unique_gdp], ignore_index=True)
+ spending_commerce = spending_commerce.sort_values(by=["GeoFIPS", "GeoName", "year"])
+
+ assert (
+ spending_commerce["GeoFIPS"].nunique() == spending_commerce["GeoName"].nunique()
+ )
+ assert spending_commerce["GeoFIPS"].nunique() == gdp["GeoFIPS"].nunique()
+
+ spending_commerce = spending_commerce.rename(columns={"year": "Year"})
+
+ # standardizing and saving
+ spending_commerce_long = spending_commerce.copy()
+
+ spending_commerce_wide = spending_commerce.pivot_table(
+ index=["GeoFIPS", "GeoName"], columns="Year", values="total_obligated_amount"
+ )
+ spending_commerce_wide.reset_index(inplace=True)
+ spending_commerce_wide.columns.name = None
+ spending_commerce_wide = spending_commerce_wide.fillna(0)
+
+ spending_commerce_std_long = standardize_and_scale(spending_commerce)
+ spending_commerce_std_wide = standardize_and_scale(spending_commerce_wide)
+
+ spending_commerce_wide.to_csv(
+ f"{root}/data/processed/spending_commerce_wide.csv", index=False
+ )
+ spending_commerce_long.to_csv(
+ f"{root}/data/processed/spending_commerce_long.csv", index=False
+ )
+ spending_commerce_std_wide.to_csv(
+ f"{root}/data/processed/spending_commerce_std_wide.csv", index=False
+ )
+ spending_commerce_std_long.to_csv(
+ f"{root}/data/processed/spending_commerce_std_long.csv", index=False
+ )
diff --git a/build/cities/utils/cleaning_scripts/clean_spending_transportation.py b/build/cities/utils/cleaning_scripts/clean_spending_transportation.py
new file mode 100644
index 00000000..0ff49927
--- /dev/null
+++ b/build/cities/utils/cleaning_scripts/clean_spending_transportation.py
@@ -0,0 +1,183 @@
+import numpy as np
+import pandas as pd
+
+from cities.utils.cleaning_utils import standardize_and_scale
+from cities.utils.data_grabber import DataGrabber, find_repo_root
+
+root = find_repo_root()
+
+
+def clean_spending_transportation():
+ data = DataGrabber()
+ data.get_features_wide(["gdp"])
+ gdp = data.wide
+ gdp = gdp.get("gdp")
+
+ spending_transportation = pd.read_csv(
+ f"{root}/data/raw/spending_transportation.csv"
+ )
+
+ transportUnwanted = spending_transportation[
+ (
+ pd.isna(spending_transportation["total_obligated_amount"])
+ | (spending_transportation["total_obligated_amount"] == 1)
+ | (spending_transportation["total_obligated_amount"] == 0)
+ )
+ ]
+
+ exclude_mask = spending_transportation["total_obligated_amount"].isin(
+ transportUnwanted["total_obligated_amount"]
+ )
+ spending_transportation = spending_transportation[
+ ~exclude_mask
+ ] # 66 values removed
+
+ assert spending_transportation.isna().sum().sum() == 0, "Na values detected"
+
+ # loading names and repearing fips of value 3 and shorter
+
+ names_transportation = pd.read_csv(
+ f"{root}/data/raw/spending_transportation_names.csv"
+ )
+
+ short_geofips = spending_transportation[
+ spending_transportation["GeoFIPS"].astype(str).str.len().between(1, 3)
+ ]
+
+ spending_only_fips = np.setdiff1d(
+ spending_transportation["GeoFIPS"], gdp["GeoFIPS"]
+ )
+
+ fips4_to_repeair = [
+ fip for fip in spending_only_fips if (fip < 10000 and fip > 999)
+ ]
+ short4_fips = spending_transportation[
+ spending_transportation["GeoFIPS"].isin(fips4_to_repeair)
+ ]
+
+ full_geofipsLIST = [fip for fip in spending_only_fips if fip > 9999]
+ full_geofips = spending_transportation[
+ spending_transportation["GeoFIPS"].isin(full_geofipsLIST)
+ ]
+
+ cleaningLIST = [full_geofips, short4_fips, short_geofips]
+
+ for badFIPS in cleaningLIST:
+ geofips_to_geonamealt = dict(
+ zip(names_transportation["GeoFIPS"], names_transportation["GeoNameALT"])
+ )
+
+ badFIPS["GeoNameALT"] = badFIPS["GeoFIPS"].map(geofips_to_geonamealt)
+ badFIPS = badFIPS.rename(columns={"GeoFIPS": "damagedFIPS"})
+
+ badFIPSmapping_dict = dict(zip(gdp["GeoName"], gdp["GeoFIPS"]))
+
+ badFIPS["repairedFIPS"] = badFIPS["GeoNameALT"].apply(
+ lambda x: badFIPSmapping_dict.get(x)
+ )
+ repaired_geofips = badFIPS[badFIPS["repairedFIPS"].notna()]
+
+ repair_ratio = repaired_geofips.shape[0] / badFIPS.shape[0]
+ print(f"Ratio of repaired FIPS: {round(repair_ratio, 2)}")
+
+ # assert repair_ratio > 0.9, f'Less than 0.9 of FIPS were successfully repaired!'
+
+ spending_transportation["GeoFIPS"] = spending_transportation["GeoFIPS"].replace(
+ dict(zip(repaired_geofips["damagedFIPS"], repaired_geofips["repairedFIPS"]))
+ )
+
+ # deleting short FIPS codes
+ count_short_geofips = spending_transportation[
+ spending_transportation["GeoFIPS"] <= 999
+ ]["GeoFIPS"].count()
+ assert (
+ count_short_geofips / spending_transportation.shape[0] < 0.05
+ ), "More than 0.05 of FIPS are short and will be deleted!"
+
+ spending_transportation = spending_transportation[
+ spending_transportation["GeoFIPS"] > 999
+ ]
+
+ common_fips = np.intersect1d(
+ gdp["GeoFIPS"].unique(), spending_transportation["GeoFIPS"].unique()
+ )
+
+ all_FIPS_spending_transportation = spending_transportation.copy()
+
+ spending_transportation = spending_transportation[
+ spending_transportation["GeoFIPS"].isin(common_fips)
+ ] # 0.96 of FIPS are common
+ assert (
+ spending_transportation.shape[0] / all_FIPS_spending_transportation.shape[0]
+ > 0.9
+ ), "Less than 0.9 of FIPS are common!"
+
+ # grouping duplicate fips for years
+ # (they appeared because we have repaired some of them and now they match with number that is already present)
+
+ spending_transportation = (
+ spending_transportation.groupby(["GeoFIPS", "year"])["total_obligated_amount"]
+ .sum()
+ .reset_index()
+ )
+ spending_transportation.reset_index(drop=True, inplace=True)
+
+ # adding GeoNames
+ spending_transportation = spending_transportation.merge(
+ gdp[["GeoFIPS", "GeoName"]], on="GeoFIPS", how="left"
+ )[["GeoFIPS", "GeoName", "year", "total_obligated_amount"]]
+
+ # adding missing FIPS with 0 values in total_obligated_amount column, and 2018 year (as a dummy variable)
+
+ unique_gdp = gdp[["GeoFIPS", "GeoName"]].drop_duplicates(
+ subset=["GeoFIPS", "GeoName"], keep="first"
+ )
+ exclude_geofips = set(spending_transportation["GeoFIPS"])
+ unique_gdp = unique_gdp[~unique_gdp["GeoFIPS"].isin(exclude_geofips)]
+
+ unique_gdp["year"] = np.repeat(2018, unique_gdp.shape[0])
+ unique_gdp["total_obligated_amount"] = np.repeat(0, unique_gdp.shape[0])
+ spending_transportation = pd.concat(
+ [spending_transportation, unique_gdp], ignore_index=True
+ )
+ spending_transportation = spending_transportation.sort_values(
+ by=["GeoFIPS", "GeoName", "year"]
+ )
+
+ assert (
+ spending_transportation["GeoFIPS"].nunique()
+ == spending_transportation["GeoName"].nunique()
+ )
+ assert spending_transportation["GeoFIPS"].nunique() == gdp["GeoFIPS"].nunique()
+
+ spending_transportation = spending_transportation.rename(columns={"year": "Year"})
+
+ # standardizing and saving
+ spending_transportation_long = spending_transportation.copy()
+
+ spending_transportation_wide = spending_transportation.pivot_table(
+ index=["GeoFIPS", "GeoName"], columns="Year", values="total_obligated_amount"
+ )
+ spending_transportation_wide.reset_index(inplace=True)
+ spending_transportation_wide.columns.name = None
+ spending_transportation_wide = spending_transportation_wide.fillna(0)
+
+ spending_transportation_std_long = standardize_and_scale(
+ spending_transportation_long
+ )
+ spending_transportation_std_wide = standardize_and_scale(
+ spending_transportation_wide
+ )
+
+ spending_transportation_wide.to_csv(
+ f"{root}/data/processed/spending_transportation_wide.csv", index=False
+ )
+ spending_transportation_long.to_csv(
+ f"{root}/data/processed/spending_transportation_long.csv", index=False
+ )
+ spending_transportation_std_wide.to_csv(
+ f"{root}/data/processed/spending_transportation_std_wide.csv", index=False
+ )
+ spending_transportation_std_long.to_csv(
+ f"{root}/data/processed/spending_transportation_std_long.csv", index=False
+ )
diff --git a/build/cities/utils/cleaning_scripts/clean_transport.py b/build/cities/utils/cleaning_scripts/clean_transport.py
new file mode 100644
index 00000000..df789ecb
--- /dev/null
+++ b/build/cities/utils/cleaning_scripts/clean_transport.py
@@ -0,0 +1,93 @@
+import numpy as np
+import pandas as pd
+
+from cities.utils.cleaning_utils import standardize_and_scale
+from cities.utils.data_grabber import DataGrabber, find_repo_root
+
+root = find_repo_root()
+
+
+def clean_transport():
+ data = DataGrabber()
+ data.get_features_wide(["gdp"])
+ gdp = data.wide
+ gdp = gdp.get("gdp")
+
+ # grabbing gdp for comparison
+
+ transport = pd.read_csv(f"{root}/data/raw/smartLocationSmall.csv")
+
+ # choosing transport variables
+ transport = transport[["GeoFIPS", "D3A", "WeightAvgNatWalkInd"]]
+
+ # list of GeoFips with Na values
+ transportUnwanted = transport[
+ (
+ pd.isna(transport["WeightAvgNatWalkInd"])
+ | (transport["WeightAvgNatWalkInd"] == 1)
+ )
+ | (transport["D3A"] == 0)
+ | (transport["D3A"] == 1)
+ ]
+
+ exclude_mask = transport["GeoFIPS"].isin(transportUnwanted["GeoFIPS"])
+ transport = transport[~exclude_mask]
+
+ # the step above deleted 10 records with NAs,
+ # no loss on a dataset because they were not common with gdp anyway
+
+ assert transport.isna().sum().sum() == 0, "Na values detected"
+ assert transport["GeoFIPS"].is_unique
+
+ # subsetting to common FIPS numbers
+
+ common_fips = np.intersect1d(gdp["GeoFIPS"].unique(), transport["GeoFIPS"].unique())
+ transport = transport[transport["GeoFIPS"].isin(common_fips)]
+
+ assert len(common_fips) == len(transport["GeoFIPS"].unique())
+ assert len(transport) > 2800, "The number of records is lower than 2800"
+
+ # adding geoname column
+ transport = transport.merge(gdp[["GeoFIPS", "GeoName"]], on="GeoFIPS", how="left")[
+ ["GeoFIPS", "GeoName", "D3A", "WeightAvgNatWalkInd"]
+ ]
+
+ # renaming D3A to roadDenisty
+ transport.rename(columns={"D3A": "roadDensity"}, inplace=True)
+
+ patState = r", [A-Z]{2}(\*{1,2})?$"
+ GeoNameError = "Wrong GeoName value!"
+ assert transport["GeoName"].str.contains(patState, regex=True).all(), GeoNameError
+ assert sum(transport["GeoName"].str.count(", ")) == transport.shape[0], GeoNameError
+
+ # changing values to floats
+
+ for column in transport.columns[2:]:
+ transport[column] = transport[column].astype(float)
+
+ # Standardizing, formatting, saving
+
+ transport_wide = transport.copy()
+ transport_std_wide = standardize_and_scale(transport)
+
+ transport_long = pd.melt(
+ transport,
+ id_vars=["GeoFIPS", "GeoName"],
+ var_name="Category",
+ value_name="Value",
+ )
+ transport_std_long = pd.melt(
+ transport_std_wide.copy(),
+ id_vars=["GeoFIPS", "GeoName"],
+ var_name="Category",
+ value_name="Value",
+ )
+
+ transport_wide.to_csv(f"{root}/data/processed/transport_wide.csv", index=False)
+ transport_long.to_csv(f"{root}/data/processed/transport_long.csv", index=False)
+ transport_std_wide.to_csv(
+ f"{root}/data/processed/transport_std_wide.csv", index=False
+ )
+ transport_std_long.to_csv(
+ f"{root}/data/processed/transport_std_long.csv", index=False
+ )
diff --git a/build/cities/utils/cleaning_scripts/clean_unemployment.py b/build/cities/utils/cleaning_scripts/clean_unemployment.py
new file mode 100644
index 00000000..4e25369c
--- /dev/null
+++ b/build/cities/utils/cleaning_scripts/clean_unemployment.py
@@ -0,0 +1,12 @@
+from cities.utils.clean_variable import VariableCleaner
+from cities.utils.data_grabber import find_repo_root
+
+root = find_repo_root()
+
+
+def clean_unemployment():
+ cleaner = VariableCleaner(
+ variable_name="unemployment_rate",
+ path_to_raw_csv=f"{root}/data/raw/unemployment_rate_wide_withNA.csv",
+ )
+ cleaner.clean_variable()
diff --git a/build/cities/utils/cleaning_scripts/clean_urbanicity_ma.py b/build/cities/utils/cleaning_scripts/clean_urbanicity_ma.py
new file mode 100644
index 00000000..710c8533
--- /dev/null
+++ b/build/cities/utils/cleaning_scripts/clean_urbanicity_ma.py
@@ -0,0 +1,118 @@
+import numpy as np
+import pandas as pd
+
+from cities.utils.clean_variable import VariableCleanerMSA
+from cities.utils.data_grabber import find_repo_root
+
+root = find_repo_root()
+
+
+def clean_urbanicity_initially():
+ population_urban = pd.read_csv(
+ f"{root}/data/raw/DECENNIALDHC2020.P2-2023-12-25T165149.csv"
+ )
+
+ population_urban.set_index("Label (Grouping)", inplace=True)
+ transposed_df = population_urban.transpose()
+ transposed_df.reset_index(inplace=True)
+ df_population_urban = transposed_df.copy()
+
+ filtered_df = pd.DataFrame(
+ df_population_urban[df_population_urban["index"].str.endswith("Metro Area")]
+ )
+
+ filtered_df = filtered_df.rename(columns={"index": "MetroName"})
+
+ filtered_df.columns = filtered_df.columns.str.replace("Total:", "total_pop")
+ filtered_df.columns = filtered_df.columns.str.replace("Urban", "urban_pop")
+ filtered_df.columns = filtered_df.columns.str.replace("Rural", "rural_pop")
+ filtered_df = filtered_df.iloc[:, :-1].reset_index(drop=True)
+
+ population_urban = filtered_df.copy()
+
+ housing_urban = pd.read_csv(
+ f"{root}/data/raw/DECENNIALDHC2020.H2-2023-12-25T174403.csv"
+ )
+
+ housing_urban.set_index("Label (Grouping)", inplace=True)
+ transposed_df = housing_urban.transpose()
+ transposed_df.reset_index(inplace=True)
+ housing_urban = transposed_df.copy()
+
+ filtered_df = pd.DataFrame(
+ housing_urban[housing_urban["index"].str.endswith("Metro Area")]
+ )
+
+ filtered_df = filtered_df.rename(columns={"index": "MetroName"})
+
+ filtered_df.columns = filtered_df.columns.str.replace("Total:", "total_housing")
+ filtered_df.columns = filtered_df.columns.str.replace("Urban", "urban_housing")
+ filtered_df.columns = filtered_df.columns.str.replace("Rural", "rural_housing")
+ filtered_df = filtered_df.iloc[:, :-1].reset_index(drop=True)
+ housing_urban = filtered_df.copy()
+
+ metrolist = pd.read_csv(f"{root}/data/raw/metrolist.csv")
+
+ merged_df = housing_urban.merge(population_urban, on="MetroName")
+
+ merged_df["MetroName"] = merged_df["MetroName"].str.replace("Metro Area", "(MA)")
+
+ df1_subset = metrolist[["GeoFIPS", "GeoName"]].drop_duplicates()
+
+ merged_df = pd.merge(
+ merged_df, df1_subset, left_on=["MetroName"], right_on=["GeoName"], how="left"
+ )
+
+ merged_df = merged_df.drop(columns=["GeoName"])
+ merged_df.dropna(inplace=True)
+
+ merged_df.columns = merged_df.columns.str.strip()
+ ordered_columns = [
+ "GeoFIPS",
+ "MetroName",
+ "total_housing",
+ "urban_housing",
+ "rural_housing",
+ "total_pop",
+ "urban_pop",
+ "rural_pop",
+ ]
+ ordered_df = merged_df[ordered_columns]
+
+ ordered_df = ordered_df.rename(columns={"MetroName": "GeoName"})
+
+ numeric_columns = [
+ "total_housing",
+ "urban_housing",
+ "rural_housing",
+ "total_pop",
+ "urban_pop",
+ "rural_pop",
+ ]
+ ordered_df[numeric_columns] = (
+ ordered_df[numeric_columns].replace({",": ""}, regex=True).astype(float)
+ )
+
+ ordered_df["GeoFIPS"] = ordered_df["GeoFIPS"].astype(np.int64)
+
+ ordered_df["rural_pop_prct"] = ordered_df["rural_pop"] / ordered_df["total_pop"]
+ ordered_df["rural_housing_prct"] = (
+ ordered_df["rural_housing"] / ordered_df["total_housing"]
+ )
+
+ ordered_df.drop(["total_pop", "total_housing"], axis=1, inplace=True)
+
+ ordered_df.reset_index(drop=True, inplace=True)
+
+ ordered_df.to_csv(f"{root}/data/raw/urbanicity_ma.csv", index=False)
+
+
+def clean_urbanicity_ma():
+ clean_urbanicity_initially()
+
+ cleaner = VariableCleanerMSA(
+ variable_name="urbanicity_ma",
+ path_to_raw_csv=f"{root}/data/raw/urbanicity_ma.csv",
+ year_or_category="Category",
+ )
+ cleaner.clean_variable()
diff --git a/build/cities/utils/cleaning_scripts/clean_urbanization.py b/build/cities/utils/cleaning_scripts/clean_urbanization.py
new file mode 100644
index 00000000..db199e2b
--- /dev/null
+++ b/build/cities/utils/cleaning_scripts/clean_urbanization.py
@@ -0,0 +1,78 @@
+import numpy as np
+import pandas as pd
+
+from cities.utils.cleaning_utils import standardize_and_scale
+from cities.utils.data_grabber import DataGrabber, find_repo_root
+
+root = find_repo_root()
+
+
+def clean_urbanization():
+ data = DataGrabber()
+ data.get_features_wide(["gdp"])
+ gdp = data.wide["gdp"]
+
+ dtype_mapping = {"STATE": str, "COUNTY": str}
+ urbanization = pd.read_csv(
+ f"{root}/data/raw/2020_UA_COUNTY.csv", dtype=dtype_mapping
+ )
+
+ urbanization["GeoFIPS"] = urbanization["STATE"].astype(str) + urbanization[
+ "COUNTY"
+ ].astype(str)
+ urbanization["GeoFIPS"] = urbanization["GeoFIPS"].astype(int)
+
+ common_fips = np.intersect1d(
+ gdp["GeoFIPS"].unique(), urbanization["GeoFIPS"].unique()
+ )
+
+ urbanization = urbanization[urbanization["GeoFIPS"].isin(common_fips)]
+
+ urbanization = urbanization.merge(
+ gdp[["GeoFIPS", "GeoName"]], on="GeoFIPS", how="left"
+ )
+
+ urbanization = urbanization[
+ [
+ "GeoFIPS",
+ "GeoName",
+ "POPDEN_RUR",
+ "POPDEN_URB",
+ "HOUDEN_COU",
+ "HOUDEN_RUR",
+ "ALAND_PCT_RUR",
+ ]
+ ]
+
+ urbanization = urbanization.sort_values(by=["GeoFIPS", "GeoName"])
+
+ urbanization_wide = urbanization.copy()
+
+ urbanization_long = pd.melt(
+ urbanization,
+ id_vars=["GeoFIPS", "GeoName"],
+ var_name="Category",
+ value_name="Value",
+ )
+
+ urbanization_std_wide = standardize_and_scale(urbanization)
+
+ urbanization_std_long = pd.melt(
+ urbanization_std_wide.copy(),
+ id_vars=["GeoFIPS", "GeoName"],
+ var_name="Category",
+ value_name="Value",
+ )
+
+ urbanization_wide.to_csv(
+ f"{root}/data/processed/urbanization_wide.csv", index=False
+ )
+ urbanization_long.to_csv(
+ f"{root}/data/processed/urbanization_long.csv", index=False
+ )
+ urbanization_std_wide.to_csv(
+ f"{root}/data/processed/urbanization_std_wide.csv", index=False
+ )
+ urbanization_std_long.to_csv(
+ f"{root}/data/processed/urbanization_std_long.csv", index=False
+ )
diff --git a/build/cities/utils/cleaning_scripts/cleaning_pipeline.py b/build/cities/utils/cleaning_scripts/cleaning_pipeline.py
new file mode 100644
index 00000000..542836de
--- /dev/null
+++ b/build/cities/utils/cleaning_scripts/cleaning_pipeline.py
@@ -0,0 +1,74 @@
+from cities.utils.clean_age_composition import clean_age_composition
+from cities.utils.clean_burdens import clean_burdens
+from cities.utils.clean_ethnic_composition import clean_ethnic_composition
+from cities.utils.clean_ethnic_composition_ma import clean_ethnic_composition_ma
+from cities.utils.clean_gdp import clean_gdp
+from cities.utils.clean_gdp_ma import clean_gdp_ma
+from cities.utils.clean_hazard import clean_hazard
+from cities.utils.clean_homeownership import clean_homeownership
+from cities.utils.clean_income_distribution import clean_income_distribution
+from cities.utils.clean_industry import clean_industry
+from cities.utils.clean_industry_ma import clean_industry_ma
+from cities.utils.clean_industry_ts import clean_industry_ts
+from cities.utils.clean_population import clean_population
+from cities.utils.clean_population_density import clean_population_density
+from cities.utils.clean_population_ma import clean_population_ma
+from cities.utils.clean_spending_commerce import clean_spending_commerce
+from cities.utils.clean_spending_HHS import clean_spending_HHS
+from cities.utils.clean_spending_transportation import clean_spending_transportation
+from cities.utils.clean_transport import clean_transport
+from cities.utils.clean_unemployment import clean_unemployment
+from cities.utils.clean_urbanicity_ma import clean_urbanicity_ma
+from cities.utils.clean_urbanization import clean_urbanization
+from cities.utils.cleaning_poverty import clean_poverty
+
+# from cities.utils.clean_health import clean_health
+
+
+# clean_health() lost of another 15-ish fips
+
+clean_population_density()
+
+clean_homeownership()
+
+clean_income_distribution()
+
+clean_hazard()
+
+clean_burdens()
+
+clean_age_composition()
+
+clean_gdp_ma()
+
+clean_industry_ma()
+
+clean_urbanicity_ma()
+
+clean_ethnic_composition_ma()
+
+clean_population_ma()
+
+clean_poverty()
+
+clean_unemployment()
+
+clean_gdp()
+
+clean_population()
+
+clean_transport()
+
+clean_spending_transportation()
+
+clean_spending_commerce()
+
+clean_spending_HHS()
+
+clean_ethnic_composition()
+
+clean_industry()
+
+clean_urbanization()
+
+clean_industry_ts()
diff --git a/build/cities/utils/cleaning_scripts/cleaning_poverty.py b/build/cities/utils/cleaning_scripts/cleaning_poverty.py
new file mode 100644
index 00000000..83d9d7e2
--- /dev/null
+++ b/build/cities/utils/cleaning_scripts/cleaning_poverty.py
@@ -0,0 +1,23 @@
+from cities.utils.clean_variable import VariableCleaner
+from cities.utils.data_grabber import find_repo_root
+
+root = find_repo_root()
+
+
+poverty_variables = [
+ "povertyAll",
+ "povertyAllprct",
+ "povertyUnder18",
+ "povertyUnder18prct",
+ "medianHouseholdIncome",
+]
+
+
+def clean_poverty():
+ for variable_name in poverty_variables:
+ cleaner = VariableCleaner(
+ variable_name,
+ path_to_raw_csv=f"{root}/data/raw/{variable_name}_wide.csv",
+ year_or_category="Year",
+ )
+ cleaner.clean_variable()
diff --git a/build/cities/utils/cleaning_utils.py b/build/cities/utils/cleaning_utils.py
new file mode 100644
index 00000000..fa15818d
--- /dev/null
+++ b/build/cities/utils/cleaning_utils.py
@@ -0,0 +1,83 @@
+from typing import List, Union
+
+import numpy as np
+import pandas as pd
+from sklearn.preprocessing import StandardScaler
+
+from cities.utils.data_grabber import DataGrabber
+
+
+def sigmoid(x, scale=1 / 3):
+ range_0_1 = 1 / (1 + np.exp(-x * scale))
+ range_minus1_1 = 2 * range_0_1 - 1
+ return range_minus1_1
+
+
+def standardize_and_scale(data: pd.DataFrame) -> pd.DataFrame:
+ """
+ Standardizes and scales float columns in a DataFrame to [-1,1], copying other columns. Returns a new DataFrame.
+ """
+ standard_scaler = StandardScaler() # Standardize to mean 0, std 1
+
+ # Copy all columns first
+ new_data = data.copy()
+
+ # Select float columns
+ float_cols = data.select_dtypes(include=["float64"])
+
+ # Standardize float columns to mean 0, std 1
+ standardized_floats = standard_scaler.fit_transform(float_cols)
+
+ # Apply sigmoid transformation, [-3std, 3std] to [-1, 1]
+ new_data[float_cols.columns] = sigmoid(standardized_floats, scale=1 / 3)
+
+ return new_data
+
+
+def revert_standardize_and_scale_scaler(
+ transformed_values: Union[np.ndarray, List, pd.Series, float],
+ year: int,
+ variable_name: str,
+) -> List:
+ if not isinstance(transformed_values, np.ndarray):
+ transformed_values = np.array(transformed_values)
+
+ def inverse_sigmoid(y, scale=1 / 3):
+ return -np.log((2 / (y + 1)) - 1) / scale
+
+ # needed to avoid lint issues
+ dg: DataGrabber
+
+ # normally this will be deployed in a context in which dg already exists
+ # and we want to avoid wasting time by reloading the data
+ try:
+ original_column = dg.wide[variable_name][str(year)].values
+ except NameError:
+ dg = DataGrabber()
+ dg.get_features_wide([variable_name])
+ original_column = dg.wide[variable_name][str(year)].values.reshape(-1, 1)
+
+ # dg = DataGrabber()
+ # dg.get_features_wide([variable_name])
+
+ # original_column = dg.wide[variable_name][str(year)].values.reshape(-1, 1)
+ scaler = StandardScaler()
+ scaler.fit(original_column)
+
+ inverted_values_sigmoid = inverse_sigmoid(transformed_values)
+ inverted_values = scaler.inverse_transform(
+ inverted_values_sigmoid.reshape(-1, 1)
+ ).flatten()
+
+ return inverted_values
+
+
+def revert_prediction_df(df: pd.DataFrame, variable_name: str) -> pd.DataFrame:
+ df_copy = df.copy()
+
+ for i in range(len(df)):
+ df_copy.iloc[i, 1:] = revert_standardize_and_scale_scaler(
+ df.iloc[i, 1:].tolist(), df.iloc[i, 0], variable_name
+ )
+
+ return df_copy
diff --git a/build/cities/utils/data_grabber.py b/build/cities/utils/data_grabber.py
new file mode 100644
index 00000000..ba6ee5e6
--- /dev/null
+++ b/build/cities/utils/data_grabber.py
@@ -0,0 +1,119 @@
+import os
+import re
+import sys
+from pathlib import Path
+from typing import List
+
+import pandas as pd
+
+
+def find_repo_root() -> Path:
+ return Path(__file__).parent.parent.parent
+
+
+def check_if_tensed(df):
+ years_to_check = ["2015", "2018", "2019", "2020"]
+ check = df.columns[2:].isin(years_to_check).any().any()
+ return check
+
+
+class DataGrabber:
+ def __init__(self):
+ self.repo_root = find_repo_root()
+ self.data_path = os.path.join(self.repo_root, "data/processed")
+ self.wide = {}
+ self.std_wide = {}
+ self.long = {}
+ self.std_long = {}
+
+ def get_features_wide(self, features: List[str]) -> None:
+ for feature in features:
+ file_path = os.path.join(self.data_path, f"{feature}_wide.csv")
+ self.wide[feature] = pd.read_csv(file_path)
+
+ def get_features_std_wide(self, features: List[str]) -> None:
+ for feature in features:
+ file_path = os.path.join(self.data_path, f"{feature}_std_wide.csv")
+ self.std_wide[feature] = pd.read_csv(file_path)
+
+ def get_features_long(self, features: List[str]) -> None:
+ for feature in features:
+ file_path = os.path.join(self.data_path, f"{feature}_long.csv")
+ self.long[feature] = pd.read_csv(file_path)
+
+ def get_features_std_long(self, features: List[str]) -> None:
+ for feature in features:
+ file_path = os.path.join(self.data_path, f"{feature}_std_long.csv")
+ self.std_long[feature] = pd.read_csv(file_path)
+
+
+class MSADataGrabber(DataGrabber):
+ def __init__(self):
+ super().__init__()
+ self.repo_root = find_repo_root()
+ self.data_path = os.path.join(self.repo_root, "data/MSA_level")
+ sys.path.insert(0, self.data_path)
+
+
+def list_available_features(level="county"):
+ root = find_repo_root()
+
+ if level == "county":
+ folder_path = f"{root}/data/processed"
+ elif level == "msa":
+ folder_path = f"{root}/data/MSA_level"
+ else:
+ raise ValueError("Invalid level. Please choose 'county' or 'msa'.")
+
+ file_names = [f for f in os.listdir(folder_path) if f != ".gitkeep"]
+ processed_file_names = []
+
+ for file_name in file_names:
+ # Use regular expressions to find the patterns and split accordingly
+ matches = re.split(r"_wide|_long|_std", file_name)
+ if matches:
+ processed_file_names.append(matches[0])
+
+ feature_names = list(set(processed_file_names))
+
+ return sorted(feature_names)
+
+
+def list_tensed_features(level="county"):
+ if level == "county":
+ data = DataGrabber()
+ all_features = list_available_features(level="county")
+
+ elif level == "msa":
+ data = MSADataGrabber()
+ all_features = list_available_features(level="msa")
+
+ else:
+ raise ValueError("Invalid level. Please choose 'county' or 'msa'.")
+
+ data.get_features_wide(all_features)
+
+ tensed_features = []
+ for feature in all_features:
+ if check_if_tensed(data.wide[feature]):
+ tensed_features.append(feature)
+
+ return sorted(tensed_features)
+
+
+# TODO this only will pick up spending-based interventions
+# needs to be modified/expanded when we add other types of interventions
+def list_interventions():
+ interventions = [
+ feature for feature in list_tensed_features() if feature.startswith("spending_")
+ ]
+ return sorted(interventions)
+
+
+def list_outcomes():
+ outcomes = [
+ feature
+ for feature in list_tensed_features()
+ if feature not in list_interventions()
+ ]
+ return sorted(outcomes)
diff --git a/build/cities/utils/data_loader.py b/build/cities/utils/data_loader.py
new file mode 100644
index 00000000..db3a13e3
--- /dev/null
+++ b/build/cities/utils/data_loader.py
@@ -0,0 +1,89 @@
+import os
+from typing import Dict, List
+
+import pandas as pd
+import sqlalchemy
+import torch
+from torch.utils.data import Dataset
+
+
+class ZoningDataset(Dataset):
+ def __init__(
+ self,
+ categorical,
+ continuous,
+ standardization_dictionary=None,
+ ):
+ self.categorical = categorical
+ self.continuous = continuous
+
+ self.standardization_dictionary = standardization_dictionary
+
+ if self.categorical:
+ self.categorical_levels = dict()
+ for name in self.categorical.keys():
+ self.categorical_levels[name] = torch.unique(categorical[name])
+
+ N_categorical = len(categorical.keys())
+ N_continuous = len(continuous.keys())
+
+ if N_categorical > 0:
+ self.n = len(next(iter(categorical.values())))
+ elif N_continuous > 0:
+ self.n = len(next(iter(continuous.values())))
+
+ def __len__(self):
+ return self.n
+
+ def __getitem__(self, idx):
+ cat_data = {key: val[idx] for key, val in self.categorical.items()}
+ cont_data = {key: val[idx] for key, val in self.continuous.items()}
+ return {
+ "categorical": cat_data,
+ "continuous": cont_data,
+ }
+
+
+def select_from_data(data, kwarg_names: Dict[str, List[str]]):
+ _data = {}
+ _data["outcome"] = data["continuous"][kwarg_names["outcome"]]
+ _data["categorical"] = {
+ key: val
+ for key, val in data["categorical"].items()
+ if key in kwarg_names["categorical"]
+ }
+ _data["continuous"] = {
+ key: val
+ for key, val in data["continuous"].items()
+ if key in kwarg_names["continuous"]
+ }
+
+ return _data
+
+
+def db_connection():
+ DB_USERNAME = os.getenv("DB_USERNAME")
+ HOST = os.getenv("HOST")
+ DATABASE = os.getenv("DATABASE")
+ PASSWORD = os.getenv("PASSWORD")
+ DB_SEARCH_PATH = os.getenv("DB_SEARCH_PATH")
+
+ return sqlalchemy.create_engine(
+ f"postgresql://{DB_USERNAME}:{PASSWORD}@{HOST}/{DATABASE}",
+ connect_args={"options": f"-csearch-path={DB_SEARCH_PATH}"},
+ ).connect()
+
+
+def select_from_sql(sql, conn, kwargs, params=None):
+ df = pd.read_sql(sql, conn, params=params)
+ return {
+ "outcome": df[kwargs["outcome"]],
+ "categorical": {
+ key: torch.tensor(df[key].values, dtype=torch.int64)
+ for key in kwargs["categorical"]
+ },
+ "continuous": {
+ key: torch.tensor(df[key], dtype=torch.float32)
+ for key in kwargs["continuous"]
+ },
+ }
diff --git a/build/cities/utils/percentiles.py b/build/cities/utils/percentiles.py
new file mode 100644
index 00000000..c4837a53
--- /dev/null
+++ b/build/cities/utils/percentiles.py
@@ -0,0 +1,64 @@
+import os
+
+import dill as dill
+import numpy as np
+
+from cities.utils.data_grabber import DataGrabber, find_repo_root, list_interventions
+
+
+def export_sorted_interventions():
+ root = find_repo_root()
+
+ interventions = list_interventions()
+ dg = DataGrabber()
+
+ dg.get_features_std_wide(interventions)
+
+ interventions_sorted = {}
+ for intervention in interventions:
+ intervention_frame = dg.std_wide[intervention].copy().iloc[:, 2:]
+ intervention_frame = intervention_frame.apply(
+ lambda col: col.sort_values().values
+ )
+ assert (
+ all(np.diff(intervention_frame[col]) >= 0)
+ for col in intervention_frame.columns
+ ), "A column is not increasing."
+ interventions_sorted[intervention] = intervention_frame
+
+ with open(
+ os.path.join(root, "data/sorted_interventions", "interventions_sorted.pkl"),
+ "wb",
+ ) as f:
+ dill.dump(interventions_sorted, f)
+
+
+def transformed_intervention_from_percentile(intervention, year, percentile):
+ root = find_repo_root()
+
+ with open(
+ os.path.join(root, "data/sorted_interventions", "interventions_sorted.pkl"),
+ "rb",
+ ) as f:
+ interventions_sorted = dill.load(f)
+ intervention_frame = interventions_sorted[intervention]
+
+ if str(year) not in intervention_frame.columns:
+ raise ValueError("Year not in intervention frame.")
+
+ sorted_var = intervention_frame[str(year)]
+ n = len(sorted_var)
+ index = percentile * (n - 1) / 100
+
+ lower_index = int(index)
+ upper_index = lower_index + 1
+
+ if lower_index == n - 1:
+ return sorted_var[lower_index]
+
+ interpolation_factor = index - lower_index
+ interpolated_value = (1 - interpolation_factor) * sorted_var[
+ lower_index
+ ] + interpolation_factor * sorted_var[upper_index]
+
+ return interpolated_value
diff --git a/build/cities/utils/similarity_utils.py b/build/cities/utils/similarity_utils.py
new file mode 100644
index 00000000..1db37327
--- /dev/null
+++ b/build/cities/utils/similarity_utils.py
@@ -0,0 +1,172 @@
+from typing import Dict
+
+import numpy as np
+import pandas as pd
+from plotly import graph_objs as go
+
+from cities.utils.data_grabber import check_if_tensed
+
+
+def slice_with_lag(df: pd.DataFrame, fips: int, lag: int) -> Dict[str, np.ndarray]:
+ """
+ Takes a pandas dataframe, a location FIPS and a lag (years),
+ returns a dictionary with two numpy arrays:
+ - my_array: the array of features for the location with the given FIPS
+ - other_arrays: the array of features for all other locations
+ if lag>0, drops first lag columns from my_array and last lag columns from other_arrays.
+ Meant to be used prior to calculating similarity.
+ """
+ original_length = df.shape[0]
+ original_array_width = df.shape[1] - 2
+
+ # assert error if lag > original array width
+ assert (
+ lag <= original_array_width
+ ), "Lag is greater than the number of years in the dataframe"
+ assert lag >= 0, "Lag must be a positive integer"
+
+ # this assumes input df has two columns of metadata, then the rest are features
+ # obey this convention with other datasets!
+
+ my_row = df.loc[df["GeoFIPS"] == fips].copy()
+ my_id = my_row[["GeoFIPS", "GeoName"]]
+ my_values = my_row.iloc[:, 2 + lag :]
+
+ my_df = pd.concat([my_id, my_values], axis=1)
+
+ my_df = pd.DataFrame(
+ {**my_id.to_dict(orient="list"), **my_values.to_dict(orient="list")}
+ )
+
+ assert fips in df["GeoFIPS"].values, "FIPS not found in the dataframe"
+ other_df = df[df["GeoFIPS"] != fips].copy()
+
+ my_array = np.array(my_values)
+
+ if lag > 0:
+ other_df = df[df["GeoFIPS"] != fips].iloc[:, :-lag]
+
+ assert fips not in other_df["GeoFIPS"].values, "FIPS found in the other dataframe"
+ other_arrays = np.array(other_df.iloc[:, 2:])
+
+ assert other_arrays.shape[0] + 1 == original_length, "Dataset sizes don't match"
+ assert other_arrays.shape[1] == my_array.shape[1], "Lengths don't match"
+
+ return {
+ "my_array": my_array,
+ "other_arrays": other_arrays,
+ "my_df": my_df,
+ "other_df": other_df,
+ }
+
+
+def generalized_euclidean_distance(u, v, weights):
+ featurewise_squared_contributions = (
+ abs(weights)
+ * ((weights >= 0) * abs(u - v) + (weights < 0) * (-abs(u - v) + 2)) ** 2
+ )
+
+ featurewise_contributions = featurewise_squared_contributions ** (1 / 2)
+
+ distance = sum(featurewise_squared_contributions) ** (1 / 2)
+ return {
+ "distance": distance,
+ "featurewise_contributions": featurewise_contributions,
+ }
+
+
+def divide_exponentially(group_weight, number_of_features, rate):
+ """
+ Returns a list of `number_of_features` weights that sum to `group_weight` and are distributed
+ exponentially. Intended for time series feature groups.
+ If `rate` is 1, all weights are equal. If `rate` is greater than 1, weights
+ prefer more recent events.
+ """
+ result = []
+ denominator = sum([rate**j for j in range(number_of_features)])
+ for i in range(number_of_features):
+ value = group_weight * (rate**i) / denominator
+ result.append(value)
+ return result
+
+
+def compute_weight_array(query_object, rate=1.08):
+ assert (
+ sum(
+ abs(value)
+ for key, value in query_object.feature_groups_with_weights.items()
+ )
+ != 0
+ ), "At least one weight has to be other than 0"
+
+ max_other_scores = sum(
+ abs(value)
+ for key, value in query_object.feature_groups_with_weights.items()
+ if key != query_object.outcome_var
+ )
+
+ if (
+ query_object.outcome_var
+ and query_object.feature_groups_with_weights[query_object.outcome_var] != 0
+ ):
+ weight_outcome_joint = max_other_scores if max_other_scores > 0 else 1
+ query_object.feature_groups_with_weights[query_object.outcome_var] = (
+ weight_outcome_joint
+ * query_object.feature_groups_with_weights[query_object.outcome_var]
+ )
+
+ tensed_status = {}
+ columns = {}
+ column_counts = {}
+ weight_lists = {}
+ all_columns = []
+ for feature in query_object.feature_groups:
+ tensed_status[feature] = check_if_tensed(query_object.data.std_wide[feature])
+
+ if feature == query_object.outcome_var:
+ columns[feature] = query_object.restricted_outcome_df.columns[2:]
+ else:
+ columns[feature] = query_object.data.std_wide[feature].columns[2:]
+
+ # TODO remove if all tests passed before merging
+ # column_counts[feature] = len(query_object.data.std_wide[feature].columns) - 2
+
+ column_counts[feature] = len(columns[feature])
+
+ if feature == query_object.outcome_var and query_object.lag > 0:
+ column_counts[feature] -= query_object.lag
+
+ all_columns.extend([f"{column}_{feature}" for column in columns[feature]])
+
+ # TODO: remove if tests passed
+ # column_tags.extend([feature] * column_counts[feature])
+ if tensed_status[feature]:
+ weight_lists[feature] = divide_exponentially(
+ query_object.feature_groups_with_weights[feature],
+ column_counts[feature],
+ rate,
+ )
+ else:
+ weight_lists[feature] = [
+ query_object.feature_groups_with_weights[feature]
+ / column_counts[feature]
+ ] * column_counts[feature]
+
+ query_object.all_columns = all_columns[query_object.lag :]
+ query_object.all_weights = np.concatenate(list(weight_lists.values()))
+
+
+def plot_weights(query_object):
+ fig = go.Figure()
+
+ fig.add_trace(go.Bar(x=query_object.all_columns, y=query_object.all_weights))
+
+ fig.update_layout(
+ xaxis_title="columns",
+ yaxis_title="weights",
+ title="Weights of columns",
+ template="plotly_white",
+ )
+
+ query_object.weigth_plot = fig
+ query_object.weigth_plot.show()
diff --git a/build/cities/utils/years_available_pipeline.py b/build/cities/utils/years_available_pipeline.py
new file mode 100644
index 00000000..37ea85fe
--- /dev/null
+++ b/build/cities/utils/years_available_pipeline.py
@@ -0,0 +1,31 @@
+import os
+
+import dill
+
+from cities.modeling.modeling_utils import prep_wide_data_for_inference
+from cities.utils.data_grabber import find_repo_root, list_interventions, list_outcomes
+
+root = find_repo_root()
+interventions = list_interventions()
+outcomes = list_outcomes()
+
+
+for intervention in interventions:
+ for outcome in outcomes:
+ # intervention = "spending_HHS"
+ # outcome = "gdp"
+ data = prep_wide_data_for_inference(
+ outcome_dataset=outcome,
+ intervention_dataset=intervention,
+ forward_shift=3, # shift doesn't matter here, as long as data exists
+ )
+ data_slim = {key: data[key] for key in ["years_available", "outcome_years"]}
+
+ assert len(data_slim["years_available"]) > 2
+ file_path = os.path.join(
+ root, "data/years_available", f"{intervention}_{outcome}.pkl"
+ )
+ print(file_path)
+ if not os.path.exists(file_path):
+ with open(file_path, "wb") as f:
+ dill.dump(data_slim, f)
diff --git a/build/main.py b/build/main.py
new file mode 100644
index 00000000..fbfcea0b
--- /dev/null
+++ b/build/main.py
@@ -0,0 +1,235 @@
+import os
+
+from typing import Annotated
+
+from dotenv import load_dotenv
+from fastapi import FastAPI, Depends, Query
+from fastapi.middleware.gzip import GZipMiddleware
+import uvicorn
+
+import psycopg2
+from psycopg2.pool import ThreadedConnectionPool
+
+load_dotenv()
+
+ENV = os.getenv("ENV")
+USERNAME = os.getenv("DB_USERNAME")
+PASSWORD = os.getenv("PASSWORD")
+HOST = os.getenv("HOST")
+DATABASE = os.getenv("DATABASE")
+DB_SEARCH_PATH = os.getenv("DB_SEARCH_PATH")
+INSTANCE_CONNECTION_NAME = os.getenv("INSTANCE_CONNECTION_NAME")
+
+app = FastAPI()
+
+if ENV == "dev":
+ from fastapi.middleware.cors import CORSMiddleware
+
+ origins = [
+ "http://localhost",
+ "http://localhost:5000",
+ ]
+ app.add_middleware(CORSMiddleware, allow_origins=origins, allow_credentials=True)
+
+app.add_middleware(GZipMiddleware, minimum_size=1000, compresslevel=5)
+
+
+if ENV == "dev":
+ host = HOST
+else:
+ host = f"/cloudsql/{INSTANCE_CONNECTION_NAME}"
+
+pool = ThreadedConnectionPool(
+ 1,
+ 10,
+ user=USERNAME,
+ password=PASSWORD,
+ host=HOST,
+ database=DATABASE,
+ options=f"-csearch_path={DB_SEARCH_PATH}",
+)
+
+
+def get_db() -> psycopg2.extensions.connection:
+ db = pool.getconn()
+ try:
+ yield db
+ finally:
+ pool.putconn(db)
+
+
+predictor = None
+
+
+def get_predictor(db: psycopg2.extensions.connection = Depends(get_db)):
+ from cities.deployment.tracts_minneapolis.predict import TractsModelPredictor
+
+ global predictor
+ if predictor is None:
+ predictor = TractsModelPredictor(db)
+ return predictor
+
+
+Limit = Annotated[float, Query(ge=0, le=1)]
+Radius = Annotated[float, Query(ge=0)]
+Year = Annotated[int, Query(ge=2000, le=2030)]
+
+
+@app.middleware("http")
+async def add_cache_control_header(request, call_next):
+ response = await call_next(request)
+ response.headers["Cache-Control"] = "public, max-age=300"
+ return response
+
+
+if ENV == "dev":
+
+ @app.middleware("http")
+ async def add_acess_control_header(request, call_next):
+ response = await call_next(request)
+ response.headers["Access-Control-Allow-Origin"] = "*"
+ return response
+
+
+@app.get("/demographics")
+async def read_demographics(
+ category: Annotated[str, Query(max_length=100)], db=Depends(get_db)
+):
+ with db.cursor() as cur:
+ cur.execute(
+ """
+ select tract_id, "2011", "2012", "2013", "2014", "2015", "2016", "2017", "2018", "2019", "2020", "2021", "2022"
+ from api__demographics where description = %s
+ """,
+ (category,),
+ )
+ return [[desc[0] for desc in cur.description]] + cur.fetchall()
+
+
+@app.get("/census-tracts")
+async def read_census_tracts(year: Year, db=Depends(get_db)):
+ with db.cursor() as cur:
+ cur.execute("select * from api__census_tracts where year_ = %s", (year,))
+ row = cur.fetchone()
+
+ return row[1] if row is not None else None
+
+
+@app.get("/high-frequency-transit-lines")
+async def read_high_frequency_transit_lines(year: Year, db=Depends(get_db)):
+ with db.cursor() as cur:
+ cur.execute(
+ """
+ select line_geom_json
+ from api__high_frequency_transit_lines
+ where '%s-01-01'::date <@ valid
+ """,
+ (year,),
+ )
+ row = cur.fetchone()
+
+ return row[0] if row is not None else None
+
+
+@app.get("/high-frequency-transit-stops")
+async def read_high_frequency_transit_stops(year: Year, db=Depends(get_db)):
+ with db.cursor() as cur:
+ cur.execute(
+ """
+ select stop_geom_json
+ from api__high_frequency_transit_lines
+ where '%s-01-01'::date <@ valid
+ """,
+ (year,),
+ )
+ row = cur.fetchone()
+
+ return row[0] if row is not None else None
+
+
+@app.get("/yellow-zone")
+async def read_yellow_zone(
+ year: Year, line_radius: Radius, stop_radius: Radius, db=Depends(get_db)
+):
+ with db.cursor() as cur:
+ cur.execute(
+ """
+ select
+ st_asgeojson(st_transform(st_union(st_buffer(line_geom, %s, 'quad_segs=4'), st_buffer(stop_geom, %s, 'quad_segs=4')), 4269))::json
+ from api__high_frequency_transit_lines
+ where '%s-01-01'::date <@ valid
+ """,
+ (line_radius, stop_radius, year),
+ )
+ row = cur.fetchone()
+
+ if row is None:
+ return None
+
+ return {
+ "type": "FeatureCollection",
+ "features": [
+ {"type": "Feature", "properties": {"id": "0"}, "geometry": row[0]}
+ ],
+ }
+
+
+@app.get("/blue-zone")
+async def read_blue_zone(year: Year, radius: Radius, db=Depends(get_db)):
+ with db.cursor() as cur:
+ cur.execute(
+ """
+ select st_asgeojson(st_transform(st_buffer(line_geom, %s, 'quad_segs=4'), 4269))::json
+ from api__high_frequency_transit_lines
+ where '%s-01-01'::date <@ valid
+ """,
+ (radius, year),
+ )
+ row = cur.fetchone()
+
+ if row is None:
+ return None
+
+ return {
+ "type": "FeatureCollection",
+ "features": [
+ {"type": "Feature", "properties": {"id": "0"}, "geometry": row[0]}
+ ],
+ }
+
+
+@app.get("/predict")
+async def read_predict(
+ blue_zone_radius: Radius,
+ yellow_zone_line_radius: Radius,
+ yellow_zone_stop_radius: Radius,
+ blue_zone_limit: Limit,
+ yellow_zone_limit: Limit,
+ year: Year,
+ db=Depends(get_db),
+ predictor=Depends(get_predictor),
+):
+ result = predictor.predict_cumulative(
+ db,
+ intervention=(
+ {
+ "radius_blue": blue_zone_radius,
+ "limit_blue": blue_zone_limit,
+ "radius_yellow_line": yellow_zone_line_radius,
+ "radius_yellow_stop": yellow_zone_stop_radius,
+ "limit_yellow": yellow_zone_limit,
+ "reform_year": year,
+ }
+ ),
+ )
+ return {
+ "census_tracts": [str(t) for t in result["census_tracts"]],
+ "housing_units_factual": [t.item() for t in result["housing_units_factual"]],
+ "housing_units_counterfactual": [
+ t.tolist() for t in result["housing_units_counterfactual"]
+ ],
+ }
+
+
+if __name__ == "__main__":
+ uvicorn.run(app, host="0.0.0.0", port=int(os.getenv("PORT", 8000)))
diff --git a/build/postgrest.conf b/build/postgrest.conf
new file mode 100644
index 00000000..ddb71965
--- /dev/null
+++ b/build/postgrest.conf
@@ -0,0 +1,107 @@
+## Admin server used for checks. It's disabled by default unless a port is specified.
+# admin-server-port = 3001
+
+## The database role to use when no client authentication is provided
+db-anon-role = "web_anon"
+
+## Notification channel for reloading the schema cache
+db-channel = "pgrst"
+
+## Enable or disable the notification channel
+db-channel-enabled = true
+
+## Enable in-database configuration
+db-config = true
+
+## Function for in-database configuration
+## db-pre-config = "postgrest.pre_config"
+
+## Extra schemas to add to the search_path of every request
+db-extra-search-path = "public"
+
+## Limit rows in response
+# db-max-rows = 1000
+
+## Allow getting the EXPLAIN plan through the `Accept: application/vnd.pgrst.plan` header
+# db-plan-enabled = false
+
+## Number of open connections in the pool
+db-pool = 10
+
+## Time in seconds to wait to acquire a slot from the connection pool
+# db-pool-acquisition-timeout = 10
+
+## Time in seconds after which to recycle pool connections
+# db-pool-max-lifetime = 1800
+
+## Time in seconds after which to recycle unused pool connections
+# db-pool-max-idletime = 30
+
+## Allow automatic database connection retrying
+# db-pool-automatic-recovery = true
+
+## Stored proc to exec immediately after auth
+# db-pre-request = "stored_proc_name"
+
+## Enable or disable prepared statements. disabling is only necessary when behind a connection pooler.
+## When disabled, statements will be parametrized but won't be prepared.
+db-prepared-statements = true
+
+## The name of which database schema to expose to REST clients
+db-schemas = "api"
+
+## How to terminate database transactions
+## Possible values are:
+## commit (default)
+## Transaction is always committed, this can not be overriden
+## commit-allow-override
+## Transaction is committed, but can be overriden with Prefer tx=rollback header
+## rollback
+## Transaction is always rolled back, this can not be overriden
+## rollback-allow-override
+## Transaction is rolled back, but can be overriden with Prefer tx=commit header
+db-tx-end = "commit"
+
+## The standard connection URI format, documented at
+## https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING
+db-uri = "postgresql://postgres@34.123.100.76:5432/cities"
+
+# jwt-aud = "your_audience_claim"
+
+## Jspath to the role claim key
+jwt-role-claim-key = ".role"
+
+## Choose a secret, JSON Web Key (or set) to enable JWT auth
+## (use "@filename" to load from separate file)
+# jwt-secret = "secret_with_at_least_32_characters"
+jwt-secret-is-base64 = false
+
+## Enables and set JWT Cache max lifetime, disables caching with 0
+# jwt-cache-max-lifetime = 0
+
+## Logging level, the admitted values are: crit, error, warn, info and debug.
+log-level = "error"
+
+## Determine if the OpenAPI output should follow or ignore role privileges or be disabled entirely.
+## Admitted values: follow-privileges, ignore-privileges, disabled
+openapi-mode = "follow-privileges"
+
+## Base url for the OpenAPI output
+openapi-server-proxy-uri = ""
+
+## Configurable CORS origins
+# server-cors-allowed-origins = ""
+
+server-host = "!4"
+server-port = 3001
+
+## Allow getting the request-response timing information through the `Server-Timing` header
+server-timing-enabled = true
+
+## Unix socket location
+## if specified it takes precedence over server-port
+# server-unix-socket = "/tmp/pgrst.sock"
+
+## Unix socket file mode
+## When none is provided, 660 is applied by default
+# server-unix-socket-mode = "660"
diff --git a/build/requirements.txt b/build/requirements.txt
new file mode 100644
index 00000000..15840bbf
--- /dev/null
+++ b/build/requirements.txt
@@ -0,0 +1,184 @@
+#
+# This file is autogenerated by pip-compile with Python 3.12
+# by the following command:
+#
+# pip-compile --extra=api --output-file=api/requirements.txt
+#
+annotated-types==0.7.0
+ # via pydantic
+anyio==4.4.0
+ # via
+ # httpx
+ # starlette
+ # watchfiles
+certifi==2024.8.30
+ # via
+ # httpcore
+ # httpx
+chirho @ git+https://github.com/BasisResearch/chirho.git
+ # via cities (setup.py)
+click==8.1.7
+ # via
+ # typer
+ # uvicorn
+contourpy==1.3.0
+ # via matplotlib
+cycler==0.12.1
+ # via matplotlib
+dill==0.3.8
+ # via cities (setup.py)
+dnspython==2.6.1
+ # via email-validator
+email-validator==2.2.0
+ # via fastapi
+fastapi[standard]==0.114.0
+ # via cities (setup.py)
+fastapi-cli[standard]==0.0.5
+ # via fastapi
+filelock==3.16.0
+ # via torch
+fonttools==4.53.1
+ # via matplotlib
+fsspec==2024.9.0
+ # via torch
+h11==0.14.0
+ # via
+ # httpcore
+ # uvicorn
+httpcore==1.0.5
+ # via httpx
+httptools==0.6.1
+ # via uvicorn
+httpx==0.27.2
+ # via fastapi
+idna==3.8
+ # via
+ # anyio
+ # email-validator
+ # httpx
+jinja2==3.1.4
+ # via
+ # fastapi
+ # torch
+joblib==1.4.2
+ # via scikit-learn
+kiwisolver==1.4.7
+ # via matplotlib
+markdown-it-py==3.0.0
+ # via rich
+markupsafe==2.1.5
+ # via jinja2
+matplotlib==3.9.2
+ # via cities (setup.py)
+mdurl==0.1.2
+ # via markdown-it-py
+mpmath==1.3.0
+ # via sympy
+networkx==3.3
+ # via torch
+numpy==2.1.1
+ # via
+ # cities (setup.py)
+ # contourpy
+ # matplotlib
+ # opt-einsum
+ # pandas
+ # pyro-ppl
+ # scikit-learn
+ # scipy
+opt-einsum==3.3.0
+ # via pyro-ppl
+packaging==24.1
+ # via
+ # matplotlib
+ # plotly
+pandas==2.2.2
+ # via cities (setup.py)
+pillow==10.4.0
+ # via matplotlib
+plotly==5.24.0
+ # via cities (setup.py)
+psycopg2==2.9.9
+ # via cities (setup.py)
+pydantic==2.9.1
+ # via fastapi
+pydantic-core==2.23.3
+ # via pydantic
+pygments==2.18.0
+ # via rich
+pyparsing==3.1.4
+ # via matplotlib
+pyro-api==0.1.2
+ # via pyro-ppl
+pyro-ppl==1.8.6
+ # via
+ # chirho
+ # cities (setup.py)
+python-dateutil==2.9.0.post0
+ # via
+ # matplotlib
+ # pandas
+python-dotenv==1.0.1
+ # via uvicorn
+python-multipart==0.0.9
+ # via fastapi
+pytz==2024.1
+ # via pandas
+pyyaml==6.0.2
+ # via uvicorn
+rich==13.8.0
+ # via typer
+scikit-learn==1.5.1
+ # via cities (setup.py)
+scipy==1.14.1
+ # via scikit-learn
+shellingham==1.5.4
+ # via typer
+six==1.16.0
+ # via python-dateutil
+sniffio==1.3.1
+ # via
+ # anyio
+ # httpx
+sqlalchemy==2.0.34
+ # via cities (setup.py)
+starlette==0.38.5
+ # via fastapi
+sympy==1.13.2
+ # via torch
+tenacity==9.0.0
+ # via plotly
+threadpoolctl==3.5.0
+ # via scikit-learn
+#torch==2.4.1
+torch @ https://download.pytorch.org/whl/cpu-cxx11-abi/torch-2.4.1%2Bcpu.cxx11.abi-cp312-cp312-linux_x86_64.whl
+ # via
+ # cities (setup.py)
+ # pyro-ppl
+tqdm==4.66.5
+ # via pyro-ppl
+typer==0.12.5
+ # via fastapi-cli
+typing-extensions==4.12.2
+ # via
+ # fastapi
+ # pydantic
+ # pydantic-core
+ # sqlalchemy
+ # torch
+ # typer
+tzdata==2024.1
+ # via pandas
+uvicorn[standard]==0.30.6
+ # via
+ # fastapi
+ # fastapi-cli
+uvloop==0.20.0
+ # via uvicorn
+watchfiles==0.24.0
+ # via uvicorn
+websockets==13.0.1
+ # via uvicorn
+
+# The following packages are considered to be unsafe in a requirements file:
+# setuptools
diff --git a/build/schema.sql b/build/schema.sql
new file mode 100644
index 00000000..2285c2b7
--- /dev/null
+++ b/build/schema.sql
@@ -0,0 +1,67 @@
+begin;
+drop schema if exists api cascade;
+
+create schema api;
+
+create view api.demographics as (
+ select * from api__demographics
+);
+
+create view api.census_tracts as (
+ select * from api__census_tracts
+);
+
+create function api.high_frequency_transit_lines() returns setof dev.api__high_frequency_transit_lines as $$
+ select * from dev.api__high_frequency_transit_lines
+$$ language sql;
+
+create function api.high_frequency_transit_lines(
+ blue_zone_radius double precision,
+ yellow_zone_line_radius double precision,
+ yellow_zone_stop_radius double precision
+) returns table (
+ valid daterange,
+ geom geometry(LineString, 4269),
+ blue_zone_geom geometry(LineString, 4269),
+ yellow_zone_geom geometry(Geometry, 4269)
+) as $$
+ with
+ lines as (select * from dev.stg_high_frequency_transit_lines_union),
+ stops as (select * from dev.high_frequency_transit_stops),
+ lines_and_stops as (
+ select
+ lines.valid * stops.valid as valid,
+ lines.geom as line_geom,
+ stops.geom as stop_geom
+ from lines inner join stops on lines.valid && stops.valid
+ )
+ select
+ valid,
+ st_transform(line_geom, 4269) as geom,
+ st_transform(st_buffer(line_geom, blue_zone_radius), 4269) as blue_zone_geom,
+ st_transform(st_union(st_buffer(line_geom, yellow_zone_line_radius), st_buffer(stop_geom, yellow_zone_stop_radius)), 4269) as yellow_zone_geom
+ from lines_and_stops
+$$ language sql;
+
+do $$
+begin
+create role web_anon nologin;
+exception when duplicate_object then raise notice '%, skipping', sqlerrm using errcode = sqlstate;
+end
+$$;
+
+grant all on schema public to web_anon;
+grant all on schema dev to web_anon;
+grant select on table public.spatial_ref_sys TO web_anon;
+grant usage on schema api to web_anon;
+grant all on all tables in schema api to web_anon;
+grant all on all functions in schema api to web_anon;
+grant all on schema api to web_anon;
+GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA dev TO web_anon;
+GRANT ALL PRIVILEGES ON ALL functions IN SCHEMA dev TO web_anon;
+GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA api TO web_anon;
+GRANT ALL PRIVILEGES ON ALL functions IN SCHEMA api TO web_anon;
+GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO web_anon;
+GRANT ALL PRIVILEGES ON ALL functions IN SCHEMA public TO web_anon;
+grant web_anon to postgres;
+commit;
diff --git a/cities/deployment/tracts_minneapolis/tracts_model_guide.pkl b/cities/deployment/tracts_minneapolis/tracts_model_guide.pkl
new file mode 100644
index 00000000..b99e3a1d
Binary files /dev/null and b/cities/deployment/tracts_minneapolis/tracts_model_guide.pkl differ
diff --git a/cities/deployment/tracts_minneapolis/tracts_model_params.pth b/cities/deployment/tracts_minneapolis/tracts_model_params.pth
new file mode 100644
index 00000000..07942006
Binary files /dev/null and b/cities/deployment/tracts_minneapolis/tracts_model_params.pth differ
diff --git a/cities/utils/data_grabber.py b/cities/utils/data_grabber.py
index bdd48780..3cb3736b 100644
--- a/cities/utils/data_grabber.py
+++ b/cities/utils/data_grabber.py
@@ -294,15 +294,15 @@ def get_features_std_long(self, features: List[str]) -> None:
self._get_features(features, "std_long")
-DataGrabber = DataGrabberDB
-
+# DataGrabber = DataGrabberDB
+# DataGrabber = DataGrabberCSV
-def MSADataGrabberFactory():
- return DataGrabberDB(level="msa")
+# def MSADataGrabberFactory():
+# return DataGrabberDB(level="msa")
-MSADataGrabber = MSADataGrabberFactory
+# MSADataGrabber = MSADataGrabberFactory
# this reverts to csvs
-# DataGrabber = DataGrabberCSV
-# MSADataGrabber = MSADataGrabberCSV
+DataGrabber = DataGrabberCSV
+MSADataGrabber = MSADataGrabberCSV
diff --git a/data/minneapolis/.pgpass b/data/minneapolis/.pgpass
new file mode 100644
index 00000000..ec7b3921
--- /dev/null
+++ b/data/minneapolis/.pgpass
@@ -0,0 +1 @@
+34.123.100.76:5432:cities:postgres:VA.TlSR#Z%mu**Q9
\ No newline at end of file
diff --git a/data/minneapolis/sourced/demographic/ar-two-ts-one-predictor.ipynb b/data/minneapolis/sourced/demographic/ar-two-ts-one-predictor.ipynb
new file mode 100644
index 00000000..0d6c2cb9
--- /dev/null
+++ b/data/minneapolis/sourced/demographic/ar-two-ts-one-predictor.ipynb
@@ -0,0 +1,836 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "from typing import Dict, List\n",
+ "import math\n",
+ "import torch\n",
+ "import pyro\n",
+ "import pyro.distributions as dist\n",
+ "from ts_plots import plot_ts\n",
+ "import pyro.optim as optim\n",
+ "import pyro.infer as infer\n",
+ "import seaborn as sns \n",
+ "import matplotlib.pyplot as plt\n",
+ "import pyro\n",
+ "import torch\n",
+ "from chirho.indexed.ops import IndexSet, gather, indices_of\n",
+ "from pyro.infer.autoguide import (AutoMultivariateNormal, init_to_mean, AutoNormal,\n",
+ " AutoLowRankMultivariateNormal, AutoGaussian,)\n",
+ "import copy\n",
+ "\n",
+ "# import condition from chirho\n",
+ "from chirho.observational.handlers import condition\n",
+ "\n",
+ "\n",
+ "from torch.utils.data import DataLoader\n",
+ "\n",
+ "\n",
+ "\n",
+ "smoke_test = 'CI' in os.environ\n",
+ "\n",
+ "n_samples = 10 if smoke_test else 1000\n",
+ "n_steps = 10 if smoke_test else 500\n",
+ "n_series = 2 if smoke_test else 8 #TODO upgarde to 5"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "census_tracts_data_path = \"pg_census_tracts_dataset.pt\"\n",
+ "\n",
+ "def select_from_data(data, kwarg_names: Dict[str, List[str]]):\n",
+ " _data = {}\n",
+ " _data[\"outcome\"] = data[\"continuous\"][kwarg_names[\"outcome\"]]\n",
+ " _data[\"categorical\"] = {\n",
+ " key: val\n",
+ " for key, val in data[\"categorical\"].items()\n",
+ " if key in kwarg_names[\"categorical\"]\n",
+ " }\n",
+ " _data[\"continuous\"] = {\n",
+ " key: val\n",
+ " for key, val in data[\"continuous\"].items()\n",
+ " if key in kwarg_names[\"continuous\"]\n",
+ " }\n",
+ "\n",
+ " return _data\n",
+ "\n",
+ "ct_dataset_read = torch.load(census_tracts_data_path, weights_only=False)\n",
+ "ct_loader = DataLoader(ct_dataset_read, batch_size=len(ct_dataset_read), shuffle=True)\n",
+ "data = next(iter(ct_loader))\n",
+ "\n",
+ "kwargs = {\n",
+ " \"categorical\": [\"year\", \"census_tract\", 'university_index', 'downtown_index'],\n",
+ " \"continuous\": {\n",
+ " \"housing_units\",\n",
+ " \"housing_units_original\"\n",
+ " \"total_value\",\n",
+ " \"median_value\",\n",
+ " \"mean_limit_original\",\n",
+ " \"median_distance\",\n",
+ " \"income\",\n",
+ " 'limit',\n",
+ " \"segregation_original\",\n",
+ " \"white_original\",\n",
+ " \"parcel_sqm\",\n",
+ " 'downtown_overlap', \n",
+ " 'university_overlap',\n",
+ " },\n",
+ " \"outcome\": \"housing_units\",\n",
+ "}\n",
+ "\n",
+ "subset = select_from_data(data, kwargs)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 38,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "outcome_obs = copy.deepcopy(subset['outcome'])\n",
+ "\n",
+ "series_idx = copy.deepcopy(subset['categorical']['census_tract'])\n",
+ "time_idx = copy.deepcopy(subset['categorical']['year'])\n",
+ "\n",
+ "\n",
+ "unique_series = torch.unique(series_idx)\n",
+ "unique_times = torch.unique(time_idx)\n",
+ "\n",
+ "num_series = unique_series.size(0)\n",
+ "time_steps = unique_times.size(0)\n",
+ "\n",
+ "reshaped_outcome = torch.empty((num_series, time_steps), dtype=outcome_obs.dtype)\n",
+ "reshaped_outcome[...,:] = torch.nan \n",
+ "\n",
+ "def reshape_into_time_series(variable, series_idx, time_idx):\n",
+ " \n",
+ " # raise value eror if they are not of the same shape\n",
+ " if variable.shape[0] != series_idx.shape[0] or variable.shape[0] != time_idx.shape[0]:\n",
+ " raise ValueError(\"The shapes of variable, series_idx, and time_idx must match.\")\n",
+ " \n",
+ " unique_series = torch.unique(series_idx)\n",
+ " unique_times = torch.unique(time_idx)\n",
+ "\n",
+ " num_series = unique_series.size(0)\n",
+ " time_steps = unique_times.size(0)\n",
+ "\n",
+ " reshaped_variable= torch.empty((num_series, time_steps), dtype=variable.dtype)\n",
+ " reshaped_variable[...,:] = torch.nan\n",
+ "\n",
+ " for i, series in enumerate(unique_series):\n",
+ " for j, time in enumerate(unique_times):\n",
+ " mask = (series_idx == series) & (time_idx == time)\n",
+ " index = torch.where(mask)[0]\n",
+ " if index.numel() > 0:\n",
+ " reshaped_variable[i, j] = variable[index]\n",
+ " \n",
+ " for i, series_id in enumerate(unique_series):\n",
+ " sorted_times, sorted_indices = torch.sort(time_idx[series_idx == series_id])\n",
+ " sorted_outcomes = outcome_obs[series_idx == series_id][sorted_indices]\n",
+ " assert torch.all(reshaped_variable[i,:] == sorted_outcomes)\n",
+ "\n",
+ " return { \"reshaped_variable\": reshaped_variable, \"unique_series\": unique_series, \"unique_times\": unique_times }\n",
+ "\n",
+ "reshaped_outcome_obs = reshape_into_time_series(outcome_obs, series_idx, time_idx) \n",
+ "outcome_obs_ts = reshaped_outcome_obs[\"reshaped_variable\"]\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 33,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "tensor([27053000101, 27053000102, 27053000300, 27053000601, 27053000603,\n",
+ " 27053001100, 27053001700, 27053002200, 27053002400, 27053002700,\n",
+ " 27053003200, 27053003300, 27053003800, 27053005901, 27053005902,\n",
+ " 27053006800, 27053007700, 27053007801, 27053008100, 27053008200,\n",
+ " 27053008300, 27053008400, 27053008500, 27053009500, 27053009600,\n",
+ " 27053010600, 27053010700, 27053011000, 27053011703, 27053011800,\n",
+ " 27053011998, 27053012001, 27053012003, 27053100200, 27053100400,\n",
+ " 27053100500, 27053100700, 27053100800, 27053100900, 27053101200,\n",
+ " 27053101300, 27053101600, 27053101800, 27053101900, 27053102000,\n",
+ " 27053102100, 27053102300, 27053102500, 27053102600, 27053102800,\n",
+ " 27053102900, 27053103000, 27053103100, 27053103400, 27053103600,\n",
+ " 27053103700, 27053103900, 27053104000, 27053104100, 27053104400,\n",
+ " 27053104800, 27053104900, 27053105100, 27053105201, 27053105204,\n",
+ " 27053105400, 27053105500, 27053105600, 27053105700, 27053106000,\n",
+ " 27053106200, 27053106400, 27053106500, 27053106600, 27053106700,\n",
+ " 27053106900, 27053107000, 27053107400, 27053107500, 27053107600,\n",
+ " 27053108000, 27053108600, 27053108700, 27053108800, 27053108900,\n",
+ " 27053109000, 27053109100, 27053109200, 27053109300, 27053109400,\n",
+ " 27053109700, 27053109800, 27053109900, 27053110000, 27053110100,\n",
+ " 27053110200, 27053110400, 27053110500, 27053110800, 27053110900,\n",
+ " 27053111100, 27053111200, 27053111300, 27053111400, 27053111500,\n",
+ " 27053111600, 27053125600, 27053125700, 27053125800, 27053125900,\n",
+ " 27053126000, 27053126100, 27053126200])\n",
+ "torch.Size([1130])\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(unique_series)\n",
+ "print(series_position.shape)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 30,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "tensor([-0.3007, -0.3340, -0.3229, -0.3007, -0.3340, -0.3007, -0.3229, -0.3451,\n",
+ " -0.3007, -0.3340])\n"
+ ]
+ },
+ {
+ "data": {
+ "text/plain": [
+ "tensor([27053102800, 27053105600, 27053102000, ..., 27053108600,\n",
+ " 27053100200, 27053100700])"
+ ]
+ },
+ "execution_count": 30,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "print(outcome_obs[series_idx == 27053102800])\n",
+ "\n",
+ "series_idx"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "torch.Size([8, 1])\n",
+ "predictor tensor([[ 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000,\n",
+ " 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000,\n",
+ " 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000,\n",
+ " 1.0000, -2.0000, -2.0000, -2.0000, -2.0000, -2.0000, -2.0000, -2.0000,\n",
+ " -2.0000, -2.0000, -2.0000, -2.0000, -2.0000, -2.0000, -2.0000, -2.0000,\n",
+ " -2.0000, -2.0000, -2.0000, -2.0000, -2.0000, -2.0000, -2.0000, -2.0000,\n",
+ " -2.0000, -2.0000],\n",
+ " [ 1.1000, 1.1000, 1.1000, 1.1000, 1.1000, 1.1000, 1.1000, 1.1000,\n",
+ " 1.1000, 1.1000, 1.1000, 1.1000, 1.1000, 1.1000, 1.1000, 1.1000,\n",
+ " 1.1000, 1.1000, 1.1000, 1.1000, 1.1000, 1.1000, 1.1000, 1.1000,\n",
+ " 1.1000, -1.8000, -1.8000, -1.8000, -1.8000, -1.8000, -1.8000, -1.8000,\n",
+ " -1.8000, -1.8000, -1.8000, -1.8000, -1.8000, -1.8000, -1.8000, -1.8000,\n",
+ " -1.8000, -1.8000, -1.8000, -1.8000, -1.8000, -1.8000, -1.8000, -1.8000,\n",
+ " -1.8000, -1.8000],\n",
+ " [ 1.2000, 1.2000, 1.2000, 1.2000, 1.2000, 1.2000, 1.2000, 1.2000,\n",
+ " 1.2000, 1.2000, 1.2000, 1.2000, 1.2000, 1.2000, 1.2000, 1.2000,\n",
+ " 1.2000, 1.2000, 1.2000, 1.2000, 1.2000, 1.2000, 1.2000, 1.2000,\n",
+ " 1.2000, -1.6000, -1.6000, -1.6000, -1.6000, -1.6000, -1.6000, -1.6000,\n",
+ " -1.6000, -1.6000, -1.6000, -1.6000, -1.6000, -1.6000, -1.6000, -1.6000,\n",
+ " -1.6000, -1.6000, -1.6000, -1.6000, -1.6000, -1.6000, -1.6000, -1.6000,\n",
+ " -1.6000, -1.6000],\n",
+ " [ 1.3000, 1.3000, 1.3000, 1.3000, 1.3000, 1.3000, 1.3000, 1.3000,\n",
+ " 1.3000, 1.3000, 1.3000, 1.3000, 1.3000, 1.3000, 1.3000, 1.3000,\n",
+ " 1.3000, 1.3000, 1.3000, 1.3000, 1.3000, 1.3000, 1.3000, 1.3000,\n",
+ " 1.3000, -1.4000, -1.4000, -1.4000, -1.4000, -1.4000, -1.4000, -1.4000,\n",
+ " -1.4000, -1.4000, -1.4000, -1.4000, -1.4000, -1.4000, -1.4000, -1.4000,\n",
+ " -1.4000, -1.4000, -1.4000, -1.4000, -1.4000, -1.4000, -1.4000, -1.4000,\n",
+ " -1.4000, -1.4000],\n",
+ " [ 3.4000, 3.4000, 3.4000, 3.4000, 3.4000, 3.4000, 3.4000, 3.4000,\n",
+ " 3.4000, 3.4000, 3.4000, 3.4000, 3.4000, 3.4000, 3.4000, 3.4000,\n",
+ " 3.4000, 3.4000, 3.4000, 3.4000, 3.4000, 3.4000, 3.4000, 3.4000,\n",
+ " 3.4000, 4.8000, 4.8000, 4.8000, 4.8000, 4.8000, 4.8000, 4.8000,\n",
+ " 4.8000, 4.8000, 4.8000, 4.8000, 4.8000, 4.8000, 4.8000, 4.8000,\n",
+ " 4.8000, 4.8000, 4.8000, 4.8000, 4.8000, 4.8000, 4.8000, 4.8000,\n",
+ " 4.8000, 4.8000],\n",
+ " [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000,\n",
+ " 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000,\n",
+ " 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000,\n",
+ " 3.5000, 5.0000, 5.0000, 5.0000, 5.0000, 5.0000, 5.0000, 5.0000,\n",
+ " 5.0000, 5.0000, 5.0000, 5.0000, 5.0000, 5.0000, 5.0000, 5.0000,\n",
+ " 5.0000, 5.0000, 5.0000, 5.0000, 5.0000, 5.0000, 5.0000, 5.0000,\n",
+ " 5.0000, 5.0000],\n",
+ " [ 3.6000, 3.6000, 3.6000, 3.6000, 3.6000, 3.6000, 3.6000, 3.6000,\n",
+ " 3.6000, 3.6000, 3.6000, 3.6000, 3.6000, 3.6000, 3.6000, 3.6000,\n",
+ " 3.6000, 3.6000, 3.6000, 3.6000, 3.6000, 3.6000, 3.6000, 3.6000,\n",
+ " 3.6000, 5.2000, 5.2000, 5.2000, 5.2000, 5.2000, 5.2000, 5.2000,\n",
+ " 5.2000, 5.2000, 5.2000, 5.2000, 5.2000, 5.2000, 5.2000, 5.2000,\n",
+ " 5.2000, 5.2000, 5.2000, 5.2000, 5.2000, 5.2000, 5.2000, 5.2000,\n",
+ " 5.2000, 5.2000],\n",
+ " [ 3.7000, 3.7000, 3.7000, 3.7000, 3.7000, 3.7000, 3.7000, 3.7000,\n",
+ " 3.7000, 3.7000, 3.7000, 3.7000, 3.7000, 3.7000, 3.7000, 3.7000,\n",
+ " 3.7000, 3.7000, 3.7000, 3.7000, 3.7000, 3.7000, 3.7000, 3.7000,\n",
+ " 3.7000, 5.4000, 5.4000, 5.4000, 5.4000, 5.4000, 5.4000, 5.4000,\n",
+ " 5.4000, 5.4000, 5.4000, 5.4000, 5.4000, 5.4000, 5.4000, 5.4000,\n",
+ " 5.4000, 5.4000, 5.4000, 5.4000, 5.4000, 5.4000, 5.4000, 5.4000,\n",
+ " 5.4000, 5.4000]])\n",
+ "init tensor([[ 1.6614],\n",
+ " [ 1.2669],\n",
+ " [ 1.0617],\n",
+ " [ 1.6213],\n",
+ " [ 0.5481],\n",
+ " [ 0.8339],\n",
+ " [-0.5228],\n",
+ " [ 1.3817]]) torch.Size([8, 1])\n",
+ "ytrue tensor([0., 0., 0., 0., 0., 0., 0., 0.]) torch.Size([8])\n",
+ "init tensor([[ 1.6614],\n",
+ " [ 1.2669],\n",
+ " [ 1.0617],\n",
+ " [ 1.6213],\n",
+ " [ 0.5481],\n",
+ " [ 0.8339],\n",
+ " [-0.5228],\n",
+ " [ 1.3817]]) torch.Size([8, 1])\n",
+ "sampling tensor([1.1645, 1.0568, 1.0247, 1.2985, 1.9192, 2.0835, 1.5909, 2.4027]) tensor([0.9590, 0.9442, 0.8462, 1.2869, 1.8801, 1.8904, 1.6754, 2.4561]) 0.2\n",
+ "sampling tensor([0.8836, 0.9277, 0.9385, 1.1648, 2.4521, 2.5062, 2.4702, 2.8325]) tensor([0.7994, 0.8255, 0.6240, 1.1401, 3.1695, 2.1399, 2.7899, 2.5771]) 0.2\n",
+ "sampling tensor([0.8197, 0.8802, 0.8496, 1.1060, 2.9678, 2.6060, 2.9160, 2.8808]) tensor([0.8848, 0.7844, 1.1254, 1.6118, 3.0499, 2.4084, 2.7343, 2.9893]) 0.2\n",
+ "sampling tensor([0.8539, 0.8638, 1.0502, 1.2947, 2.9200, 2.7133, 2.8937, 3.0457]) tensor([0.8760, 0.4120, 1.1715, 1.2670, 3.0862, 2.6638, 2.7331, 3.0930]) 0.2\n",
+ "sampling tensor([0.8504, 0.7148, 1.0686, 1.1568, 2.9345, 2.8155, 2.8933, 3.0872]) tensor([0.9075, 0.8527, 0.9420, 1.3327, 2.7976, 2.9062, 2.9515, 2.9209]) 0.2\n",
+ "sampling tensor([0.8630, 0.8911, 0.9768, 1.1831, 2.8191, 2.9125, 2.9806, 3.0183]) tensor([0.7525, 1.0182, 0.8974, 1.0517, 2.4905, 3.1085, 2.9722, 2.8542]) 0.2\n",
+ "sampling tensor([0.8010, 0.9573, 0.9590, 1.0707, 2.6962, 2.9934, 2.9889, 2.9917]) tensor([0.8637, 0.7302, 1.0344, 1.0142, 2.1829, 2.7073, 3.0891, 3.1004]) 0.2\n",
+ "sampling tensor([0.8455, 0.8421, 1.0138, 1.0557, 2.5731, 2.8329, 3.0356, 3.0902]) tensor([0.7643, 1.0689, 0.7915, 1.1257, 2.4191, 2.8035, 3.1611, 3.3089]) 0.2\n",
+ "sampling tensor([0.8057, 0.9776, 0.9166, 1.1003, 2.6676, 2.8714, 3.0644, 3.1735]) tensor([0.8245, 1.2252, 0.6474, 1.2027, 2.5290, 2.8380, 2.8644, 2.8440]) 0.2\n",
+ "sampling tensor([0.8298, 1.0401, 0.8590, 1.1311, 2.7116, 2.8852, 2.9458, 2.9876]) tensor([0.9918, 1.0512, 1.0858, 1.0245, 2.8434, 2.5659, 2.8704, 2.3672]) 0.2\n",
+ "sampling tensor([0.8967, 0.9705, 1.0343, 1.0598, 2.8374, 2.7764, 2.9482, 2.7969]) tensor([0.8768, 0.8262, 1.2885, 1.0594, 2.6183, 2.8967, 3.0878, 2.6368]) 0.2\n",
+ "sampling tensor([0.8507, 0.8805, 1.1154, 1.0738, 2.7473, 2.9087, 3.0351, 2.9047]) tensor([1.1584, 1.1739, 1.4344, 0.7682, 2.9505, 2.8683, 2.7778, 3.0693]) 0.2\n",
+ "sampling tensor([0.9633, 1.0196, 1.1738, 0.9573, 2.8802, 2.8973, 2.9111, 3.0777]) tensor([0.8413, 0.7604, 0.9851, 1.0909, 3.1127, 2.8327, 3.2868, 2.9644]) 0.2\n",
+ "sampling tensor([0.8365, 0.8542, 0.9940, 1.0864, 2.9451, 2.8831, 3.1147, 3.0358]) tensor([0.9169, 0.8311, 1.0574, 1.1989, 3.1183, 2.8125, 3.1844, 3.2632]) 0.2\n",
+ "sampling tensor([0.8667, 0.8824, 1.0230, 1.1296, 2.9473, 2.8750, 3.0737, 3.1553]) tensor([0.8000, 0.5880, 1.1689, 1.1033, 2.8200, 3.0836, 3.1718, 3.3616]) 0.2\n",
+ "sampling tensor([0.8200, 0.7852, 1.0676, 1.0913, 2.8280, 2.9834, 3.0687, 3.1947]) tensor([0.7002, 1.1055, 0.8529, 0.8479, 2.9574, 2.9752, 3.0332, 3.0946]) 0.2\n",
+ "sampling tensor([0.7801, 0.9922, 0.9411, 0.9891, 2.8830, 2.9401, 3.0133, 3.0879]) tensor([0.9535, 0.9376, 0.8490, 0.9693, 2.9775, 3.1411, 2.9559, 2.8555]) 0.2\n",
+ "sampling tensor([0.8814, 0.9250, 0.9396, 1.0377, 2.8910, 3.0064, 2.9823, 2.9922]) tensor([0.8869, 1.0381, 0.9373, 1.1719, 2.7924, 3.3074, 2.5171, 3.3156]) 0.2\n",
+ "sampling tensor([0.8548, 0.9652, 0.9749, 1.1187, 2.8170, 3.0730, 2.8068, 3.1762]) tensor([0.6742, 1.0000, 0.9904, 0.9320, 2.8353, 3.3518, 2.6693, 3.0751]) 0.2\n",
+ "sampling tensor([0.7697, 0.9500, 0.9961, 1.0228, 2.8341, 3.0907, 2.8677, 3.0800]) tensor([0.8082, 1.0932, 0.8338, 0.7305, 2.8807, 3.1286, 2.8236, 3.1098]) 0.2\n",
+ "sampling tensor([0.8233, 0.9873, 0.9335, 0.9422, 2.8523, 3.0014, 2.9295, 3.0939]) tensor([0.8253, 0.9624, 1.2274, 0.8632, 2.7502, 3.2247, 2.8109, 3.2757]) 0.2\n",
+ "sampling tensor([0.8301, 0.9350, 1.0910, 0.9953, 2.8001, 3.0399, 2.9244, 3.1603]) tensor([0.6143, 0.7999, 1.3726, 0.6261, 2.6870, 2.8580, 2.8266, 3.0368]) 0.2\n",
+ "sampling tensor([0.7457, 0.8700, 1.1491, 0.9005, 2.7748, 2.8932, 2.9306, 3.0647]) tensor([0.8224, 0.9470, 1.3068, 1.1277, 2.7519, 2.8896, 2.9404, 3.1502]) 0.2\n",
+ "sampling tensor([0.8290, 0.9288, 1.1227, 1.1011, 2.8007, 2.9058, 2.9762, 3.1101]) tensor([0.6490, 1.0351, 1.2034, 1.3915, 2.3171, 2.6677, 3.1155, 3.3360]) 0.2\n",
+ "sampling tensor([-0.7404, -0.4860, -0.3186, -0.1434, 3.3268, 3.5671, 3.8462, 4.0344]) tensor([-0.6961, -0.4971, -0.0775, 0.2463, 3.3115, 3.3958, 3.6888, 3.8712]) 0.2\n",
+ "sampling tensor([-1.2784, -1.0989, -0.8310, -0.6015, 3.7246, 3.8583, 4.0755, 4.2485]) tensor([-1.1690, -1.3330, -0.9250, -0.9269, 3.7021, 4.1779, 3.9066, 4.0387]) 0.2\n",
+ "sampling tensor([-1.4676, -1.4332, -1.1700, -1.0708, 3.8808, 4.1712, 4.1626, 4.3155]) tensor([-1.2799, -1.3256, -0.8626, -1.2096, 3.9243, 4.1113, 4.0877, 4.6889]) 0.2\n",
+ "sampling tensor([-1.5120, -1.4303, -1.1450, -1.1838, 3.9697, 4.1445, 4.2351, 4.5756]) tensor([-1.3311, -1.4066, -0.7662, -1.2684, 4.1184, 4.1596, 4.4624, 4.1900]) 0.2\n",
+ "sampling tensor([-1.5324, -1.4627, -1.1065, -1.2074, 4.0473, 4.1639, 4.3850, 4.3760]) tensor([-1.6437, -1.4913, -0.5709, -1.0727, 4.1306, 3.9616, 4.4953, 4.5742]) 0.2\n",
+ "sampling tensor([-1.6575, -1.4965, -1.0283, -1.1291, 4.0522, 4.0847, 4.3981, 4.5297]) tensor([-1.8631, -1.6930, -1.4019, -1.0118, 3.9594, 3.8904, 4.1861, 4.6715]) 0.2\n",
+ "sampling tensor([-1.7452, -1.5772, -1.3608, -1.1047, 3.9838, 4.0561, 4.2744, 4.5686]) tensor([-1.7631, -1.7439, -1.2705, -1.2866, 3.9983, 4.2274, 4.0546, 4.6271]) 0.2\n",
+ "sampling tensor([-1.7053, -1.5976, -1.3082, -1.2146, 3.9993, 4.1910, 4.2219, 4.5508]) tensor([-1.5366, -1.5879, -1.0659, -1.5067, 4.3592, 4.2945, 3.9881, 4.7027]) 0.2\n",
+ "sampling tensor([-1.6146, -1.5351, -1.2264, -1.3027, 4.1437, 4.2178, 4.1953, 4.5811]) tensor([-1.9884, -1.4237, -1.1947, -1.4989, 3.9685, 4.1652, 4.1970, 4.5946]) 0.2\n",
+ "sampling tensor([-1.7954, -1.4695, -1.2779, -1.2996, 3.9874, 4.1661, 4.2788, 4.5379]) tensor([-1.8362, -1.5743, -1.3397, -1.2107, 3.7901, 3.8011, 4.3986, 4.7661]) 0.2\n",
+ "sampling tensor([-1.7345, -1.5297, -1.3359, -1.1843, 3.9161, 4.0204, 4.3594, 4.6065]) tensor([-1.7281, -1.4548, -1.3361, -0.9866, 4.1320, 3.7112, 4.3819, 4.2332]) 0.2\n",
+ "sampling tensor([-1.6912, -1.4819, -1.3344, -1.0946, 4.0528, 3.9845, 4.3528, 4.3933]) tensor([-1.9101, -1.4205, -1.0189, -1.2292, 4.3946, 4.0302, 4.4955, 4.1212]) 0.2\n",
+ "sampling tensor([-1.7641, -1.4682, -1.2076, -1.1917, 4.1578, 4.1121, 4.3982, 4.3485]) tensor([-1.6857, -1.4738, -1.0328, -1.6376, 3.9828, 3.6963, 4.5198, 4.2196]) 0.2\n",
+ "sampling tensor([-1.6743, -1.4895, -1.2131, -1.3550, 3.9931, 3.9785, 4.4079, 4.3878]) tensor([-1.7335, -1.5964, -0.8609, -1.5713, 3.7540, 4.0331, 4.0207, 4.1290]) 0.2\n",
+ "sampling tensor([-1.6934, -1.5386, -1.1444, -1.3285, 3.9016, 4.1132, 4.2083, 4.3516]) tensor([-1.7766, -1.5831, -1.1011, -1.2238, 4.0234, 4.2672, 4.3486, 4.3273]) 0.2\n",
+ "sampling tensor([-1.7106, -1.5333, -1.2404, -1.1895, 4.0094, 4.2069, 4.3395, 4.4309]) tensor([-2.0009, -1.9794, -1.5991, -1.3017, 3.8831, 4.2267, 4.2345, 4.2356]) 0.2\n",
+ "sampling tensor([-1.8004, -1.6917, -1.4397, -1.2207, 3.9532, 4.1907, 4.2938, 4.3942]) tensor([-1.5994, -1.4702, -1.3363, -1.2015, 3.9650, 3.9079, 4.2204, 4.7107]) 0.2\n",
+ "sampling tensor([-1.6397, -1.4881, -1.3345, -1.1806, 3.9860, 4.0632, 4.2882, 4.5843]) tensor([-1.7306, -1.1702, -1.0202, -1.2503, 4.0385, 3.9392, 4.1451, 4.6010]) 0.2\n",
+ "sampling tensor([-1.6923, -1.3681, -1.2081, -1.2001, 4.0154, 4.0757, 4.2580, 4.5404]) tensor([-1.6326, -0.9675, -1.0959, -1.5258, 3.7411, 3.8427, 4.0080, 4.6235]) 0.2\n",
+ "sampling tensor([-1.6531, -1.2870, -1.2384, -1.3103, 3.8964, 4.0371, 4.2032, 4.5494]) tensor([-1.5052, -1.4606, -1.1210, -1.3427, 3.6279, 4.1991, 4.4115, 4.6487]) 0.2\n",
+ "sampling tensor([-1.6021, -1.4842, -1.2484, -1.2371, 3.8512, 4.1796, 4.3646, 4.5595]) tensor([-1.2590, -1.7062, -1.1769, -1.3045, 3.8121, 4.5182, 4.1822, 4.5201]) 0.2\n",
+ "sampling tensor([-1.5036, -1.5825, -1.2708, -1.2218, 3.9249, 4.3073, 4.2729, 4.5080]) tensor([-1.3670, -1.4537, -1.5777, -1.1912, 3.8517, 4.3122, 4.2566, 4.8096]) 0.2\n",
+ "sampling tensor([-1.5468, -1.4815, -1.4311, -1.1765, 3.9407, 4.2249, 4.3026, 4.6238]) tensor([-1.3126, -1.3427, -1.2613, -1.1219, 4.0166, 4.2344, 4.5727, 4.6434]) 0.2\n",
+ "sampling tensor([-1.5250, -1.4371, -1.3045, -1.1488, 4.0066, 4.1938, 4.4291, 4.5574]) tensor([-1.8126, -1.0757, -1.5558, -0.8167, 3.8255, 4.0765, 4.6362, 4.6970]) 0.2\n",
+ "sampling tensor([-1.7250, -1.3303, -1.4223, -1.0267, 3.9302, 4.1306, 4.4545, 4.5788]) tensor([-1.4974, -1.4293, -1.4973, -0.9230, 3.7944, 4.4144, 4.6211, 4.2113]) 0.2\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "/tmp/ipykernel_259967/3164560226.py:57: UserWarning: FigureCanvasAgg is non-interactive, and thus cannot be shown\n",
+ " fig.show()\n"
+ ]
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAA0wAAAIjCAYAAAAwSJuMAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8hTgPZAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOzdd3hUVfrA8e+dmcyk994TkgAhQCAh9C4gxd7AAra1u8W6/lxXV11dV9e+ltVV1AVBwIZ0BIFQE0IIkN4L6b1Nv78/RkZiCkkIROB8nicPyZ1zzz13kpDz3nPOeyRZlmUEQRAEQRAEQRCEThSD3QBBEARBEARBEITfKhEwCYIgCIIgCIIgdEMETIIgCIIgCIIgCN0QAZMgCIIgCIIgCEI3RMAkCIIgCIIgCILQDREwCYIgCIIgCIIgdEMETIIgCIIgCIIgCN0QAZMgCIIgCIIgCEI3RMAkCIIgCIIgCILQDREwCYIgCBesr776Cnd3d1paWvp87ubNm3F0dKS6uvoctEwQBEG4WIiASRAEYZDl5OSwePFiAgMDsbe3Z9iwYTz//PO0tbWd8dyVK1fy5ptvnvtGnqWNGzciSRL+/v6YzeYuy4SGhiJJkvXDwcGBhIQEPv/88y7Lm0wmnn32WR5++GEcHR2tx7du3cpdd91FTEwMSqWS0NDQLs+//PLLiYiI4OWXXz7r+xMEQRAuXiJgEgRBGEQlJSUkJCRw4MABHnroId58800mTpzIs88+y5IlS854/oUSMK1YsYLQ0FDKy8vZsWNHt+ViY2P54osv+OKLL3juuedobGxk2bJlfPTRR53Krl+/nqysLO65554Ox1euXMnKlStxcXHB39+/x3bde++9fPjhhzQ3N/fvxgRBEISLniTLsjzYjRAEQbhUvfTSSzz99NMcP36cESNGWI8vW7aMzz//nLq6Otzc3Lo9f9GiRRw/fpzCwsIzXkur1aJWq1Eozu+zstbWVnx8fHj55Zf59NNPGT16NJ9++mmncqGhocTExPDDDz9Yj1VXVxMeHk5QUBDp6ekdyl911VXU1dWxZ8+eDsdPnjyJl5cXNjY2Z3x/qqqq8Pf35z//+Q933nnn2d9sN4xGI2azGbVafc6uIQiCIJwbYoRJEARhEDU1NQHg4+PT4bifnx8KhaLHDvaMGTPYsGEDRUVF1mlsp6af/fTTT0iSxKpVq/jLX/5CQEAA9vb2NDU18dxzzyFJUqf6li9fjiRJnYKLTZs2MXXqVBwcHHBycmLhwoWcOHGi1/f4zTff0N7ezg033MDixYv5+uuv0Wq1vTrXy8uLYcOGkZeX1+G4Vqtl8+bNXHbZZZ3O8ff3x8bGplf1e3t7M2rUKL777rszlr399ttxdHQkPz+fefPm4eDggL+/P88//zynP3ssLCxEkiRee+013nzzTYYMGYJGo7EGfDt27LC+n66urlx11VVkZGR0ul5ZWRl33XUX/v7+aDQawsLCuP/++9Hr9dYyDQ0N/PGPfyQoKAiNRkNERASvvPJKp2mPq1atIi4uDicnJ5ydnRk5ciRvvfWW9XWDwcDf/vY3IiMjsbW1xcPDgylTprBt27YO9WRmZnL99dfj7u6Ora0t8fHxfP/99x3K9LYuQRCEC4VqsBsgCIJwKZsxYwavvPIKd911F3/729/w8PBg3759vP/++/z+97/HwcGh23OffvppGhsbKS0t5Y033gDosJYH4IUXXkCtVvPYY4+h0+n6PMLxxRdfsGzZMubNm8crr7xCW1sb77//PlOmTOHIkSPdrg863YoVK5g5cya+vr4sXryYP//5z6xfv54bbrjhjOcajUZKS0s7jbIdPnwYvV7P2LFj+3Q/XYmLi+Pbb7/tVVmTycTll1/OhAkT+Oc//8nmzZt59tlnMRqNPP/88x3Kfvrpp2i1Wu655x40Gg3u7u5s376d+fPnEx4eznPPPUd7ezvvvPMOkydPJiUlxfp+njx5koSEBBoaGrjnnnsYNmwYZWVlrF27lra2NtRqNW1tbUyfPp2ysjLuvfdegoOD2bdvH0899RTl5eXWqZrbtm1jyZIlzJ49m1deeQWAjIwM9u7dyx/+8AcAnnvuOV5++WXuvvtuEhISaGpqIjk5mZSUFObMmQPAiRMnmDx5MgEBAfz5z3/GwcGBr776iquvvpp169ZxzTXX9LouQRCEC4osCIIgDKoXXnhBtrOzkwHrx9NPP92rcxcuXCiHhIR0Or5z504ZkMPDw+W2trYOrz377LNyV//9f/rppzIgFxQUyLIsy83NzbKrq6v8u9/9rkO5iooK2cXFpdPxrlRWVsoqlUr+6KOPrMcmTZokX3XVVZ3KhoSEyHPnzpWrq6vl6upq+dixY/Jtt90mA/KDDz7YoezHH38sA/KxY8d6vH5378/pXnrpJRmQKysreyy3bNkyGZAffvhh6zGz2SwvXLhQVqvVcnV1tSzLslxQUCADsrOzs1xVVdWhjtjYWNnb21uura21Hjt69KisUCjkpUuXWo8tXbpUVigUclJSUqd2mM1mWZYtPzcODg5ydnZ2h9f//Oc/y0qlUi4uLpZlWZb/8Ic/yM7OzrLRaOz23kaPHi0vXLiwx/ufPXu2PHLkSFmr1XZoy6RJk+TIyMg+1SUIgnAhEVPyBEEQBlloaCjTpk3jP//5D+vWrePOO+/kpZde4t133z3rupctW4adnV2/zt22bRsNDQ0sWbKEmpoa64dSqWT8+PHs3LnzjHWsWrUKhULBddddZz22ZMkSNm3aRH19fafyW7duxcvLCy8vL0aOHMkXX3zBHXfcwauvvtqhXG1tLUCP67t661QdNTU1vSr/0EMPWT+XJImHHnoIvV7P9u3bO5S77rrr8PLysn5dXl5Oamoqt99+O+7u7tbjo0aNYs6cOWzcuBEAs9nMt99+yxVXXEF8fHyn65+aTrlmzRqmTp2Km5tbh+/PZZddhslkYvfu3QC4urrS2tra45Q4V1dXTpw4QU5OTpev19XVsWPHDm688Uaam5ut16qtrWXevHnk5ORQVlbWq7oEQRAuNGJKniAIwiBatWoV99xzD9nZ2QQGBgJw7bXXYjabefLJJ1myZAkeHh79rj8sLKzf557q8M6aNavL152dnc9Yx//+9z8SEhKora21BjljxoxBr9ezZs2aThnuxo8fz4svvojJZOL48eO8+OKL1NfXdzuVUB6AvEWn6uhqXdevKRQKwsPDOxyLiooC6LT269fvfVFREQBDhw7tVO/w4cPZsmULra2ttLS00NTURExMTI9tycnJIS0trUNQdrqqqioAHnjgAb766ivmz59PQEAAc+fO5cYbb+Tyyy+3ln3++ee56qqriIqKIiYmhssvv5zbbruNUaNGAZCbm4ssyzzzzDM888wz3V4vICDgjHUJgiBcaETAJAiCMIjee+89xowZYw2WTrnyyitZvnw5R44c6TKxQW91NbrUXWBgMpk6fH0qccAXX3yBr69vp/IqVc9/QnJyckhKSgIgMjKy0+srVqzoFDB5enpa73fevHkMGzaMRYsW8dZbb/HII49Yy50KIuvr6zu9d311aqTL09PzrOr5tf6O7PWW2Wxmzpw5PPHEE12+fiqQ8/b2JjU1lS1btrBp0yY2bdrEp59+ytKlS/nss88AmDZtGnl5eXz33Xds3bqVjz/+mDfeeIMPPviAu+++2/qz8NhjjzFv3rwurxcREdGrugRBEC40ImASBEEYRJWVlV1OKzMYDIAl6UFPejMq8munrtfQ0ICrq6v1+KkRkFOGDBkCWDrc/QnaVqxYgY2NDV988QVKpbLDa4mJibz99tsUFxcTHBzcbR0LFy5k+vTpvPTSS9x7773WJBjDhg0DoKCggJEjR/a5bacrKCjA09Oz25Ga05nNZvLz863BCEB2djbAGRNghISEAJCVldXptczMTDw9PXFwcMDOzg5nZ2eOHz/eY31DhgyhpaWlV98btVrNFVdcwRVXXIHZbOaBBx7gww8/5JlnnrEGOu7u7txxxx3ccccdtLS0MG3aNJ577jnuvvtu66iajY1Nr67XU12CIAgXGrGGSRAEYRBFRUVx5MgRa6f7lC+//BKFQnHGaUwODg40Njb26ZqnAqFTa1zAslfSqdGGU+bNm4ezszMvvfSSNYA7XXV1dY/XWbFiBVOnTuWmm27i+uuv7/Dx+OOPA5b7PJMnn3yS2traDpvXxsXFoVarSU5OPuP5Z3L48GEmTpzY6/Knry2TZZl3330XGxsbZs+e3eN5fn5+xMbG8tlnn9HQ0GA9fvz4cbZu3cqCBQsAy7S/q6++mvXr13d5f6emEN54443s37+fLVu2dCrT0NBgDbZPTYU85fSfK51O12UZR0dHIiIirK97e3szY8YMPvzwQ8rLyztd7/SfhTPVJQiCcKERI0yCIAiD6PHHH7fuc/TQQw/h4eHBDz/8wKZNm7j77rvx9/fv8fy4uDhWr17NI488wrhx43B0dOSKK67o8Zy5c+cSHBzMXXfdxeOPP45SqeSTTz7By8uL4uJiazlnZ2fef/99brvtNsaOHcvixYutZTZs2MDkyZO7TUxx8OBBcnNzOyRIOF1AQABjx45lxYoVPPnkkz22d/78+cTExPD666/z4IMPYmNjg62tLXPnzmX79u2d0nmnpaVZ9wbKzc2lsbGRF198EYDRo0d3eH+qqqpIS0vjwQcf7LENp9ja2rJ582aWLVvG+PHj2bRpExs2bOD//u//ejVC9eqrrzJ//nwmTpzIXXfdZU0r7uLiwnPPPWct99JLL7F161amT5/OPffcw/DhwykvL2fNmjUkJibi6urK448/zvfff8+iRYu4/fbbiYuLo7W1lWPHjrF27VoKCwvx9PTk7rvvpq6ujlmzZhEYGEhRURHvvPMOsbGxDB8+HIDo6GhmzJhBXFwc7u7uJCcns3bt2g7fv3//+99MmTKFkSNH8rvf/Y7w8HAqKyvZv38/paWlHD16tNd1CYIgXFAGNUefIAiCIB88eFCeP3++7OvrK9vY2MhRUVHy3//+d9lgMJzx3JaWFvnmm2+WXV1dZcCaQvtUWvE1a9Z0ed7hw4fl8ePHy2q1Wg4ODpZff/31TmnFT9m5c6c8b9482cXFRba1tZWHDBki33777XJycnK37Xr44YdlQM7Ly+u2zHPPPScD8tGjR2VZtqQV7y4d9fLly2VA/vTTT63Hvv76a1mSJGv67FNO3UdXH8uWLetQ9v3335ft7e3lpqambtt5yrJly2QHBwc5Ly9Pnjt3rmxvby/7+PjIzz77rGwymazlTqUVf/XVV7usZ/v27fLkyZNlOzs72dnZWb7iiivk9PT0TuWKiorkpUuXyl5eXrJGo5HDw8PlBx98UNbpdNYyzc3N8lNPPSVHRETIarVa9vT0lCdNmiS/9tprsl6vl2VZlteuXSvPnTtX9vb2tn6/7733Xrm8vNxaz4svvignJCTIrq6usp2dnTxs2DD573//u7WOU/Ly8uSlS5daf1YDAgLkRYsWyWvXru1zXYIgCBcKSZYHIMWQIAiCIJxnJpOJ6OhobrzxRl544YV+1TFmzBhmzJhh3fi3J7fffjtr166lpaWlX9cSBEEQLkxiDZMgCIJwQVIqlTz//PP8+9//7lcQs3nzZnJycnjqqafOQesEQRCEi4UYYRIEQRCEXhAjTIIgCJcmMcIkCIIgCIIgCILQDTHCJAiCIAiCIAiC0A0xwiQIgiAIgiAIgtANETAJgiAIgiAIgiB045IKmGRZpqmpCTELURAEQRAEQRCE3rikAqbm5mZcXFxobm4e7KYIgiAIgiAIgnABuKQCJkEQBEEQBEEQhL4QAZMgCIIgCIIgCEI3RMAkCIIgCIIgCILQDREwCYIgCIIgCIIgdOOCCZiee+45JEnq8DFs2LDBbpYgCIIgCIIgCBcx1WA3oC9GjBjB9u3brV+rVBdU8wVBEARBEARBuMBcUBGHSqXC19d3sJshCIIgCIIgCMIl4oKZkgeQk5ODv78/4eHh3HLLLRQXF/dYXqfT0dTU1OFDEARBEARBEAShty6YgGn8+PEsX76czZs38/7771NQUMDUqVN73IT25ZdfxsXFxfoRFBR0HlssCIIgCIIgCMKFTpJlWR7sRvRHQ0MDISEhvP7669x1111dltHpdOh0OuvXTU1NBAUF0djYiLOz8/lqqiAIgiAIgiAIF6gLag3T6VxdXYmKiiI3N7fbMhqNBo1Gcx5bJQiCIAiCIAjCxeSCmZL3ay0tLeTl5eHn5zfYTREEQRAEQRAE4SJ1wQRMjz32GLt27aKwsJB9+/ZxzTXXoFQqWbJkyWA3TRAEQRAEQRCEi9QFMyWvtLSUJUuWUFtbi5eXF1OmTOHAgQN4eXkNdtMEQRAEQRAEQbhIXbBJH/qjqakJFxcXkfRBEARBEARBEIReuWCm5AmCIAiCIAiCIJxvImASBEEQBEEQBEHohgiYBEEQBEEQBEEQuiECJkEQBEEQBEG4QBmNRoqLizGZTIPdlIvWBZMlTxAEQRAEQRCEX5hMJlauXEl+fj5BQUFce+21uLm5DXazLjpihEkQBEEQBEEQLkDbt28nPz8fgJKSEt5//32OHj06yK26+IiASRAEQRAE4Wd6vZ7ExESysrIGuymC0KO0tDT2798PwPz58wkKCkKv1/PNN9+wbt06tFrtILfw4iH2YRIEQRAEQQAqKipYu3YtNTU1KJVK/vSnP+Ho6DjYzRKETk6ePMknn3yC0Whk2rRpzJo1C5PJRGJiIj/99BOyLOPi4sK1115LSEjIYDf3gidGmARBEARBuKTJskxSUhIfffQRNTU1gGVtyOHDhwe5ZYLQWUtLC6tWrcJoNBIVFcWMGTMAUCqVTJ8+nTvvvBNXV1caGxtZvnw5O3bsEAkhzpIImARBEARBuGS1t7fz1VdfsWHDBkwmE5GRkcyfPx+ApKQkjEbjILdQkGWZuro6zGbzYDdl0JlMJtasWUNTUxMeHh5ce+21KBQdu/NBQUHcd999jB49GlmW2b17N59++il1dXWD1OoLn8iSJwiCIAjCb47JZKKoqIjMzExMJhNxcXH4+/sP6DVKSkpYu3YtjY2NKBQK5syZw4QJEzCbzSQmJtLc3MyJEycYPXr0gF5X6JsDBw6wZcsW3NzcmDx5MqNHj8bGxmawmzUotmzZQlFREWq1msWLF2Nra9tlOVtbW6655hoiIiL44YcfKC0t5YMPPmDBggWMHj0aSZL63Qa9Xk9ZWRlKpZLg4OB+13MhEWuYBEEQBGEQ7d+/n5ycHK677jocHBwGuzmDymg0UlBQQHp6OllZWbS1tXV4PTw8nMmTJxMeHn5WHT6z2czevXvZsWMHsizj5ubG9ddfT0BAgLXMnj17+PHHH/H19eXee+89q+sJ/afT6XjzzTdpb2+3HnN0dGTixInExcV1GzBcjFJSUvj+++8BWLJkCUOHDu3VeQ0NDXz99dcUFxcDMGLECBYtWoSdnV2vzm9qaqKkpITi4mJKSkqoqKiwjvbdcccdl8QaKREwCYIgCMIg0el0vPrqqxiNRiZPnsycOXMGu0ndqq+vJzk5mdLSUjw8PPD19cXPzw8fHx/UanW/6zUYDOTm5pKRkUFWVhY6nc76mp2dHcOGDcNoNHL8+HFOdVn8/PyYPHkyw4cPR6lU9ul6LS0tfP3119ZUzDExMSxatKhTx7utrY3XX38do9F4yXQKf4sSExPZvn077u7uJCQksG/fPpqamgDQaDQkJCQwfvz4iz45R0lJCcuXL8dkMjFz5kymT5/ep/NPjZru3LkTWZZxdnbm2muvJTQ0tFO5qqoqa3BUUlJCQ0NDp/pUKhVGo5EhQ4Zw2223ncWdXRhEwCQIgiAIgyQ1NZVvv/0WsHT+/vSnP/2mnpibzWby8vJISkoiOzu723IeHh74+fnh6+trDaR6Gi3T6XTk5OSQnp5OTk4OBoPB+pqjoyPDhw9n+PDhhISEWAOi+vp69u/fT0pKinVdkZubG5MmTSI2NrZXU7Ty8vL4+uuvaW1tRaVSsWDBAsaMGdPt6NH69es5fPgww4cP56abbjpj/cLA0uv1vPnmm7S1tXH11VcTGxtrDZ4TExOtCTpUKhVjxoxh0qRJg7JpqyzLaLVampqaaGpqor29nZCQEFxcXAak/ubmZj788ENaWloYPnw4N9xwQ6d1S71VWlrKunXrqK+vB2Dq1KmEhYVZA6TS0tIODy0AJEnC29ub4OBggoKCCA4ORpZl3n77bWRZ5ne/+12H0dmLkQiYBEEQBGGQfPHFF+Tl5Vm/vuyyy5gyZcogtsiivb2dI0eOkJyc3GGheHh4ONHR0TQ2NlJeXk5FRQUtLS1d1uHk5GQNnnx9ffHy8qKsrIz09HTy8vI6ZO1ycXFh+PDhREdHExgY2GNnsLW1laSkJA4ePGidpmVvb8/48eMZN24c9vb2nc4xmUzs3LmTxMREALy9vbn++uvx9vbu8X2oqqrivffeQ5Ikfv/73w9KZ/xStm/fPrZu3YqbmxsPPfRQh9FEs9lMVlYWiYmJlJWVAZaO/ciRI5k8eTI+Pj4D0gaz2UxbW5s1GDr9o7m52fr56UE/WDLWTZgwgalTp57VQxCj0cjy5cspLS3Fy8uLu+++G41Gc1b3pNPp2LRpE6mpqV2+rlarCQwMtAZHAQEB1nuQzTKGsha0OfVsOraTjPoChngEc0385SjsVCjsVEg//6uw/flr1YWfY04ETIIgCIIwCJqbm3n99deRZZlp06axe/duHBwc+OMf/zhoC9pPnjxJUlISx44ds47iaDQaYmNjGTduHJ6enp3OaWlpsQZPFRUVlJeX9yobl7u7O9HR0QwfPhx/f/8+rxHS6/UcOXKE/fv3W6cM2djYEBcXx8SJE61P9xsaGli7di2lpaUAxMfHM2/evF6/x6eC2okTJzJv3rw+tVHoP4PBwJtvvklraytXXnklY8eO7bKcLMsUFBSQmJhonWYJEBUVxZQpU7pNSmAymWhra6O5uZmWlhZaWlqsn5/+b3Nzc6+z89nZ2Vn7l5WVldZj06dPJz4+HpWqb7nWZFlm/fr1pKSkYGtry+9+9zs8PDz6VEdPTpw4wdatWwGswVFQUBDe3t4dglNjvRZdTgPanHq0uQ3I7Zb/GxqkVtaqD4AE1+rG4y53PS1SslF0CqJsh7nhOGFgk7icSyJgEgRBEIRBsH//frZs2UJgYCC33347b7/9Nk1NTSxatIj4+Pjz1g6j0ciJEydISkqyBhUAPj4+jBs3jlGjRvV5jZJOp6OysrJDIFVdXY27u7t1JMnb23tAEimYTCZOnDjB3r17rZ1UhULByJEjCQoKYvv27Wi1WjQaDVdeeSUjRozoU/05OTmsWLECjUbDI488ctZP94XeOXDgAJs3b8bFxYWHH364V8FGWVkZe/fuJT093XosJCSE4ODgToFRW1sbfekCOzk54eTkhLOzc6ePU8dPBeGyLJOdnc22bdus0wbd3d257LLLGD58eK9/7pOSktiwYQOSJHHLLbcQERHR6/aeDbPOiC6vEW1OPbqcBow17R1elzRKNBGuqP0cWH9iBzn1RUQ5BjPHOR6z1oi53fIha7vf+8lhgh9uV5+f+xkIImASBEEQhEHw4YcfUl5ezoIFC0hISLB2EN3d3XnooYf6vUahtxoaGkhOTiYlJcWajU6hUBAdHU1CQgJBQUEXVGY4WZbJy8sjMTGRwsLCDq8FBARw/fXX92tKndls5t///je1tbXMnz+f8ePHD1CLhe4YDAbefvttmpubWbhwIePGjevT+TU1Nezdu5ejR4/2ODokSRIODg44Ojri5OTU4d9Tnzs7O+Po6Njn5CJgCeaPHDnCzp07aW1tBSwjOXPnziUoKKjHc4uKivjss88wm83nfKqubJbRlzZbR5H0xc1gPi08UIA6yBnbSFc0kW6oA52QlJb/G8rLy/nwww+RJImHHnqowwiYbJaRtUbMWtPPQZQBc7sJud2IyssOTdjArPE6H8Q+TIIgCIJwnlVXV1NeXo5CobCOeIwdO5Zdu3ZRV1dHRkZGn0dCestsNrN+/XpSU1OtT9idnZ2Jj49n7NixF2y2MUmSiIiIICIiwjrSkJOTQ0JCArNmzepXhxcsQeSECRPYsGEDBw4cYNy4cec8mL3Upaam0tzcjJOTE2PGjOnz+Z6enlx11VXMnDmT5ORk2tvbuwyIHBwczun3UqlUEh8fz8iRI9m7dy/79++npKSE//73v0RHRzN79uwup9g1Njby1VdfYTabGTFiBJMnTz4n7TPrTDSsz6P9RK11mp217R622Ea6WYKkIa4obLsOGfz8/IiMjCQnJ4e9e/dy5ZVXWl+TFBKSvQ0K+wt/zywRMAmCIAjCeZaWlgZARESENZucWq0mISGBXbt2kZiYSHR09DkZ4UlJSeHIkSMAhIWFkZCQQFRUVL8Dit+igIAAbrzxRmRZHpD3cPTo0fz444/U19eTk5PT6/1vhL4zGo3W5BxTpkzp87qf0zk7OzNr1qyBalq/aTQaZs2aRXx8PDt37iQ1NZX09HQyMzMZN24c06ZNs/4/YDAYWLVqFa2trfj4+HDVVVeds5Hehu9yaUupAkCyVWI7xBVNlBu2Ea6oPHq3RxNYMu3l5OSQmprK9OnTByw74G+JeEQiCIIgCOeR2Wzm2LFjAIwaNarDawkJCdjY2FBeXt5hAftAaW1tZfv27QDMmzePZcuW9WsvowvFQHU01Wo1cXFxgGVtjXDuHD16lMbGRhwdHbtN9HChcnZ25qqrruK+++4jIiICs9nMwYMHefvtt0lMTMRgMLB+/XrKy8uxs7Nj8eLFZ7XHWU9aUyotwZIEHrcOx/+ZiXjcFo3jeL8+BUsAwcHBhIaGYjab2bdv3zlp72ATAZMgCIIgnEenNoJUq9VERUV1eM3BwcHaSTz1lH0gnUqA4OPjQ0JCwoDXfzEbN24ckiRRUFBARUXFoLVDNsuYWvToy1poz6yj5VA5usLGQWvPQDKZTOzZsweAyZMnD1q2yHPNx8eHW2+9laVLl+Lr64tOp2P79u3867XXSEtLQ5IkbrjhhnOWxt5Q3UbDt7kAOM8Oxi7G07omqb+mTp0KwOHDh7vdauBCJqbkCYIgCMJ5dGo63vDhw7t8ejxx4kSSkpIoKCigrKxswDaELC4utk7FW7hw4UU7qnSuuLq6Eh0dzYkTJzh48CBXXXXVgF/DrDNhatJhatJjatJjbtJhatR3OGZq1oPpV/m6FBK+j8Wjcv/tbHrcH8eOHaOhoQEHBwfriN7FLDw8nHvuuYe0tDS2b9tGy8+JIWJCgwkPDz8n15QNZupWZiLrzWjCXXCa1XXa9b4KDw8nICCAsrIyDhw4wGWXXTYg9f5WiBEmQRAEQThPTqXwhs7T8U5xdXUlJiYGOPtRJrPehKGildZjVaz/6lsAhtuHYrOygppPjyMbere/jGAxYcIEwBL0DtRTdFOrgcYthZx84QAnn91H5b8OU/PRMepXZ9G4qZCWfSdpP16LvrgZU4POEixJoHC0wSbAEaWrBswyzXtKz3yx3zCz2czu3bsBy0ODczUV7bdGoVAQEx2NZ1URmopiNBVFFGxcR+Kqz/uU9ry3GjbmYyhvReGgwn3xUCTFwExblSTJOsp06NAh66bSFwsxwiQIgiAI50lubi5arRZHR0fCwsK6LTd58mTS0tLIyMigpqamyw1jTzHrTBhr2zHWai3/1lg+N9W2Y2rSA3BcWUy1TR0aWcXYuiBM6DA16GjYkH9B7YUy2AIDA61P0Q8fPsz06dP7XZepWU/znlJaD5Qj638JXCWNEqWz+ucPDUoXNUonNUoXDYpTx5xskJSWZ97avAZqPjpGa1IlzrODUTpemIHG8ePHqaurw87Ors9pxC90u/73CbXFhbg4uzBi+myS13/NwW++ormmmrn3/R6lamCmJrYfr6F1fzkAbjcORek8sHuKRUVF4e3tTVVVFYcOHTqr34/fGhEwCYIgCMJ5cmo63siRI3tMZ+zj40NUVBTZ2dns27evQ6reU/SlzdStyuq0qeSvtdsaOSwVgAxThozDf0QMstFMwze5tB4oRxPmgv1or7O7sUuEJElMmDCBdevWkZSUxOTJk/ucxc3YqKNlVykthyrAaAmUbAIccZ4ZhCbSFYWmb/Vpwl2wCXTEUNpCy96TuMwL7dP5vwW/Hl26lDYHzk06QOqWHwCY/+AjhMXG4e4fyLaP3iV9z05aGuq58pH/Q2Nvf1bXMdZrqVubA4DjtADshrqfddt/TaFQMHXqVNatW8eBAweYMGHCRfO9FFPyBEEQBOE8aG9vJysrC+h+Ot7pTm1UefToUZqamjq8JhvN1H2VbQ2WFPYq1EFO2I/xxvmyYNxvGorXA6Pxe2YCaSPrMMhGAgICmHzrHBzifHAc74fTDMvGmfVf52A4Q9Al/CI6OhonJydaWlqs0yt7w1inpf7rHCr+mUTLvpNgNKMOdsLj9hF4PxSLXYxnn4MlsARxzj9/L1v2l2PWGc9wxm9Peno6NTU12NraXlLJSJpra9jywVsAxC26hrBYy7qtkbPmcs0Tf8VGY0vxsVRWP/sEzXU1/b6ObDJT92UmstaITZATLnNDB6L5XRoxYgTu7u60t7dz+PDhc3ad800ETIIgCIJwHmRkZGAymfDy8sLX1/eM5YODgwkODsZkMnVKZd28pxRjVRsKBxv8nkrA/68T8X4wFvebhuJ8WQj2Y7zRBDtTVFliTWG+cOHCDqNaznNCUIc5I+tM1K3IQDaYBvaGL1JKpdLaqd+/f/8Z15kYatqpW5NNxWtJtB6qAJOMOswFz7tj8Lp/NHbD3M86/blttAcqLztkrZHWg4OXwa8/Th9dmjBhAra2F3biit4ym0xsePtVtC3N+IRHMnXJ0g6vh42J56bn/oG9iyvVxYV8+ZfHqSkp6te1mrYVoS9uRrJV4rFkGJLq3HX/FQqF9WHPvn37MBgMnco0VF5YP6MgAiZBEARBOC9OTccbNWpUrzvIpzoeycnJ1kXUxtp2mn4sAcBlUThKl66nvBiNRjZu3AhYUmL7+/t3eF1SSngsGYbCwQZDeSsN6wd+36eLVVxcHCqVioqKCoqLi7ssY6hspfbLTCr/lUzb4UowgybSFa97R+F97yhsI9wGbJ8oSSHhNC0QgObEMmTjhZPMIysri6qqKtRqNePHjx/s5pw1U5OOutVZtB3reUTowNerKMs8gY2tHQv/8HiX65R8wiO4+cXXcPMPpLm2mlV/fYKSE2l9ao82u57mnywJQdyuizwvmRRHjRqFs7MzLS0tpKamdngtN+kAn/zxHvZ+teKct2MgiYBJEARBEM6xxsZGCgsLAcv6pd6KjIzE29sbvV5PcnIysixT/10eGM1oIlyxj+1+7dGBAweoqanBwcGBWbNmdVlG6azBffFQkKD1UAVtqVV9uq+LkaGilcq3Uij7234qXk2i8t+pVH9ynNpVmTR8n0fjtiJMR+qJDrDsobVvxx4MNe2YWg3IZhl9WQu1/0un8o0U2o9Wgwy2w93xfjAWr7tGoglz6XTNppp2tn16grLs+n63236MNwpnNeYmPW1HLozvoyzL7Nq1C4Dx48djZ9e3DVN/ixq+y6PtSBV1KzNoTep6JKUk/RgH1q0GYM7dD+Dm699lOQAXb1+WPP9P/KOGo2trZd1LfyVz765etcXUrKfuK8s0YIfxvtiPPD9rFVUqFZMnTwZg7969mEyW0eu2xga2/ucdZLMZo153XtoyUETSB0EQBEE4x05NiwsJCcHV1bXX50mSxOTJk/nmm284cOAAoxyGoMuuB6WE61VDuh2haGhosHZE58yZ02NH1DbSDaeZQTTvKKH+6xxs/B2x8T67BeYXKm12PbUrMpB1lg6esd0Itdouy0ZKTqRpILswl9x/7cFJtgMJOG2Gnl2MB06zglH7O3Z7zfZmPd+/nUpjVTtlWQ3c8rcJ2Gj6vkeWpFLgNCWAxo0FNO8uxT7OZ8BSRp8r2dnZVFRUYGNjw8SJEwe7OWdNm9tA+4layxcy1K/LQTbLOI73s5Zpb25i4zuvIctmRkyfzfCpM89Yr52TM9c/8yKb3vkXOYf2seHtV2mpqyVu0TXd/h8gm2XqVmdhbjFg4+uA66Jzs69Td8aOHcvu3btpaGjg2LFjjB49mq3/eZf2pkY8g0KYfNNt57U9Z0uMMAmCIAjCOXb6dLy+iomJwcXFhdbWVpJ+sOzL5DwzCBuv7oOazZs3YzAYCA4OZvTo0We8hvNlIWjCXZD1ZmpXZGDW/3bWM5Vk1pG5v/yc7ElzupZD5dQsP46sM6EJd8H792Pwum8UHkujcbshCpeFYTjNDMJhgh92ozzxHRJIkI0XsgTp6jJLJTIggV2sFz5/GovHrdE9BksGvYkN76XRWGWZbtnaoOPItq6n+PWGQ4Ivkq0KY3U72vTaftdzPpw+upSQkID9WWaBG2yySabxhzwAHCb64TjZMmrU8E0uLftPWsrIMls+eIuWulrc/AKYded9va7fRq1h0Z+eZMz8KwBLKvKdn/0Hs7nr39XmXSXochuQbBS43zwMyeb8blR9ehCcmJjI8Z+2k5d8AIVSxfyHHkVlMzCp0s8XMcIkCIIgCOdQRUUFVVVVKJVKoqOj+3y+Uqlk0qRJbNq0iaPGAqI9Q60Z7rqSnZ1NZmYmkiSxcOHCXq2TkRQS7kuGUflWCsbKNhq+z8P9+qg+t3Wg1Ve08sO7RzEbZVRqJRFx3gN+Ddks07S10LrOw36MN27XRfZqYfy0HFtWrFhBtm0FC59cjI1ZiaSQUNifuTNoNpnZ+vEJKgua0DioiJ0dxMHvCziytYjoyf44uvU9HbPCVoXjRD+ad5bQtKsU2xEeA7ZO6hRjg466VZmoXDU4zQzCxsehX/Xk5eVx8uRJVCrVRTG61JpUjqGiDYW9Cpc5IUh2KlBItOwpo+G7PGSzTG5zCnnJB1GqVCz8wxOobfs2BVGhUDJz2T04e3qz64v/cmTTelpqa5n/8KPYqH/5edEVNtK0zZIgwvWqIYM2YhwfH09iYiI1NTVsWXMECZh04y14h57f0a6BIEaYBEEQBOEcOjW6FBkZ2e81GiO8IrCVbWhWtFMRK3fbmTcYDGzatAmwZBzz8fHp9TWUTmrcFw8DCdqSK2k9XNmvtg4U2Syz84tMzEbLyNLetTkYdAM78iUbzNStyrQGS06zg3G7MarXWcSGDBmCh4cHOp2Oo8fSUDqqexUsybLM7lXZFKbVoLRRsPD+UcTND8VviAtGvZkD3+X1+54cJ/uDSoGhpBldfmO/6+mKbJapXHWcQyVpHE1Lo+yNJGr/l47+ZEvf6jltdGncuHE4OnY/CnchMLcZaNpqCVCc54SgsLdBkiRcFoThNN2SjKNxfT4nv00FYNotd+ATNqRf15IkifhF17DwD0+gVKnIObSPtS/8hfbmJmtb6r7MAjPYx3phH9f7/wMGmq2tLQk/b0Lc5uyOX9Rwxl157aC152yIgEkQBEEQzhGz2Wxdv9Sf6Xhg2UOl9fsioo2Wjteh3CPdTk9LTEykvr4eJycnZsyY0edr2Ua44jw7GICGb3MxVLb2q80D4fjuMsrzGrHRKHF009BSryN5U+GA1W9qNVD98THa02pAKVmm3c0J6dOIjEKhYMKECYAlyYbZ3LvsdIc3FXFiz0mQYO6dI/CLcLWsV7shEoCsAxVUFTWdoZauKR3VOMRbOsnNu0r7VUd3yrdm883Jn0i2yWOXOp2VmkS2Z+4l453d1Hx2An1Jc6/qKSgooKSkxDp6eqFr2l6Muc2Iysceh4Rf1itJkoTz5aE4TLMcG+06g0nR1zFmfueNqPtq2KRpXP/0i2gcHDiZncGXzzxO5t7d1KzOxNSoQ+Vph+s1EQM+wthXts11YDZhtnVg+KLrUSjO79TAgSICJkEQBEE4R4qKimhubsbW1pbIyMh+1dGy9ySGilZi1KHY2NhQUVFBXl7nEYja2loSEy1rnC6//HI0mr5P6QJwmhWMJsIV2WCmdkXmoKxnaq7Tsv+bPMymRgIjSwmMKkOWDaRuK6ahsu2s6zfUtFP9Xir6oiYkWyWed8bg0M8n8aNHj8bW1pb6+npycnLOWD5zfzkHv7ekcJ96YxThY37JXOYT6kzUeEs7Etfk9HvdltPUAJBAl12Pvqxvoz/dKUnNY8X+r6lRNGNnY4urqyt6yUiGqoxvNIdYlbeZXR/8QNnHR9CdIdg7NboUFxeHk5PTgLRvsBgqW2k5YFmj5HpFOJKyY4AiSRKHijdwvN7yuxnUHkHzzpIBuXZgdAyL//ZPnDy8qC8vI3v5j+izGjBjpmlEG0a58x5I51NNSRGH1q7Epr4agJTjx8/5WsRzRaxhEgRBEIRz5NR0vOjoaGz6scjZWK+1rkXwWTCUuNp2Dhw4QGJiIhEREdZysiyzadMmTCYT4eHh/VordYqkkHBfPJTKt45grGqj4dtc3G6IOi9Pqo0GA6UZx/nx0820VGUgm+vI2GN5Tal2wqSaxO5VLlzx+zH9bo+usJHaz9MxtxlRumnwvCPmrNZ4qNVq4uLi2Lt3LwcOHGDo0KHdli0+UcvOLzIBGDsvmFEzAzuVmXj1EPJTqinPbSQvpbpf67ZUHnbYjfKi/Wg1zbtL8VgyrM91nC7zRCbrvl2DQTLhauPEbffdjpubGwUFBaSkpJCRkUENzdQosjhYkkP4Rz6M9I0icn4stkPcOtRVWFhIUVERSqXSmnr6QiXLMg0/5IPZsnmwbYRbpzKZe3dxfOc2kCSiZ8xGcVRnmb5nlnG+LOSs2+AZFMItL73O8bWb8MuxJJpIrf2RnOUpqFZqCBsTR9SEKYSPHdfnNVNnw2Q0sOnd1zEZDET4+5JtVFJSUkJhYSFhYWHnrR0DRQRMgiAIgnAOGAwG0tPTgf5Nx5Nl2bJY3GBGHeqMfbwPE5smcujQIQoLCyktLSUw0NLhzsjIIDc3F6VSyYIFC846uFE6qvFYMpTqj47RllKFJswFh3G+Z1Vnd5pqqig4cpiC1GSKjx3FoPsljbekUBAwNJqmmmqaqitBv4Xcg4c5+M3tTLh2Rp+v1Xa0irqvssEkYxPkhOfSaJRO6rO+h3HjxrFv3z4KCgqoqKjA17fze1Vd3Mym/xzHbJaJSvBhwlVdr2FxdLMldm4wyRsK2f9NLqGjPFD1I8OZ0/RA2o9W055WjXFuCCqPvneWZVnm4MGDbNm8BRkZf8mDRdddzta3XkGSJDyDQogMDiX2ioWcbGgiLe0YtfV1ZKvKya4px+2zZKJdwomfNwnXEb5IksTu3bsBGDNmDC4unfekupBoM+rQ5TRY0vwv7BwENFRWsO2jdwGYcM2NBN+UQJNfCU2bC2naXoxslnHu4zTQrtjZOhFSH4lR0iKF2uI2MhzXg+U0VJaTc3AfOQf3oVKf3+Bp/9pVVBXmYevkzML7fo/D3n0kJyezZ88eETAJgiAIgmCRnZ2NTqfD2dmZ4ODgPp+vPVGLNrPOsr7m57UILi4ujBo1itTUVBITE1m8eDE6nY7NmzcDMHnyZDw9PQek/ZpwV5znhtC0pYj67/JQBzlh49u/jGinMxkNlGVmUJCaTMGRZGpLO6bRlhQOKFShDJ04gZnL5mDr4IjRYCB183oSV3+JyVDD3tWvUZqxgxm33YlncOgZrynLMs0/ldK0pRAA2xEeuN80FIV6YNZTuLq6Eh0dzYkTJzh48CBXXXVVh9ebatpZ/+5RjDoTgcPcmLV0eI97JI2dG0JG4kmaarSk7Shl7Ly+j0So/R3RRLmhy66neU8ZbldHnPmk05hMJjZv3kxSUhIAUSZ/Zi+aybdvvUhLnSVleVnmiQ7nOLh7EB4QQrvGmarWNuoVrextPsaBNScIX+9PaMwQ8vPzUSgUTJkypc/39FsiG800brBMrXSaGtApIDUZjWx4+5/o29vxHxrNxOtvBsB5RhCSQrLsl7WjxDLSNC+0z0GTWWdEm1lP+4katJn1yHoTShcNPktjCbAfx5Sbl1FVmE/2/j1kH9h7XoOnk9kZHPp2DWDZmNfRzZ3Jkydz+PBh8vPzOzzsuVCIgEkQBEEQzoHT915SKPq2ZNisNdLwvWWdktO0wA6pmydPnkxqaiqZmZlUV1eTmppKU1MTrq6uA94JdZoehK6gCd3PG7p6PxSLQtP3roMsy2Tt2032gb0UHTuCvr3d+pokKfCLGkZYbByVRW6UZCrxDHRi7r3xKJWW901lY0P8FdcSNXEmXzz1NtqmwxSlpfD5E6nEzLyMSTfeiqObe9fXNpmp/yaXtmRL1j/HKQG4LAgb8E1dJ0yYwIkTJ0hLS2P48OFERkYiSRLtLXrWv3OU9iY9HgGOXH7vSJRnyMJno1Ey4eoh/PhZBsmbChk20Q97576PhDnPCKQ6u57W5AqcZwf3ejRNq9Wydu1acnNzAUgwRDA2dhTfLrcESx6BwYy78jpqy0qoKS6kpqSI5ppqWutqaf05mLJTKMHVD9k9gFYbEzm6UnIOW5JQDFUHYfi6lFrbchR2KiRbFQpbFQpb5WlfK1HYqpDsLJ9LauWgJzA4XcvekxhrtSicbHCa2TnN/97VX1CRm43GwYGFDz+GQvlLcO40LRAUEo0/5NP8UymyWcZlftgZ78/UakCbXkv7iVq0ufVg/GU9kNJVg8ctw61ZGiVJwidsCD5hQ5iyZBlVBXlkH0jsMniKmTmHqTcvG5DAyaDVsunfryPLZoZPmUHUBMv/SW5ubowaNYqjR4+yZ88elixZctbXOp9EwCQIgiAIA6ytrc2aAKA/0/GathZhatKj9LDFeVbHzpiXlxdDhw4lKyuLjRs3UlRkWeM0f/581Oqzn152Okkh4X7TUKreSsFY3U79N7m43zS0zx3XlI3f8dPnH1u/tnN2ISw2jrAx8YSMGoOdoxOFaTUc3pqGQgGzlg6zBkunc/Z04/L772fj+3swafdi0mdzbMdWMvfuJv6Kaxl3xbXY2Npay5u1RmpXZFimTUngeuUQHCf69/v96ElgYCDBwcEUFxezcuVKfH19mTRxMtmbtTRUtuHoruGKh0ejsetd12voeF/SdpZSXdzMofX5zLil4zokg8HAsWPHSElJwWw2M2rUKEaNGtVhA1h1mAvqICf0Jc207D2Jy+WhZ7xuQ0MDK1eupKqqCpWkZIYumnDPIL7f+aY1WLrxry9h7+La4TxdWys1JcXUlhRRXVxo+bekiPbcUlztPbD1iqTWTkZCYmRjALqG+l69D1ZKCU24C3YxnthFewzIVMr+MjXradphGRl1uTys00OEwqMpJH2/DoB59/4BZ6/O69CcpgQgKSUavsujZXcZmGRcFoV3+t0yNujQnqih/UQtuoJGy+bIP1N52mEX44HdCE9sAh27/b2UJAmf8Ah8wiO6DJ5St/xAQWoyl9/3RwKjY87mrWHX/z6hoaIcRw/PThvzTp06laNHj5KVlUVlZWWftj0YbJJ8oaar6IempiZcXFxobGzE2dl5sJsjCIIgXKSSkpLYsGEDvr6+3HfffWc+4TT60maq/p0KMnjeGYNtVOeF5CUlJfz3v/+1fj106NBz+sRWV9hI9X/SwAyu10TgON7vzCf9rKakiP899UdMBgOx8xYyYvpl+IQNQTpt1E3fbmTl3w7S2qAjdk4wk6/rfvqYLMusfzuVkox6vINbMLTtojwnCwAHN3cm33grI2bMxtxkpObT4xgr25DUCtyXDMNuuEf/34ReaGtrY8+ePSQnJ2MwWDKUKY12OBtCufnh+XgF9q3vcTKngW/+lYIkwU1/ScAjwJH6+nqSkpI4cuQI7aeN1IFlk+Phw4czZswYwsLCUCgUtJ+oofaLDCRbJX5/TkBh233AVlpaypdffklraysOansuax6Bl9KFnxrWUFmdh0dgMDc883ccXDv/THZFlmXaGhusAVTRkaM051bhZOvMtMV3olHZI2uNmLUmzO3Gnz//+Wvtz1+3m8D8q66qBOpQZ+xGeGIX44nKtX8ZIfurbm02bcmV2AQ64v1AbIfRytaGej5/4mHaGhsYPWc+l939YI91tRwop+Fby0iew0Q/XK8cgrGmnfYTtbQfr8FQ2jHLoY2/w8/37YHK2/6sRt1kWaYo7Qhb//MOzTXVIEnELbiSyYuXdtgIt7cKUg/z9cvPAnD90y8SMiq2U5mvvvqK9PR0YmJiuP766/vd9vNNBEyCIAiCMAAqcrMxm834RkTy6afLKSkpYe7cuX3aZ0Y2yVS9l4qhrAW70V49Zjf79NNPKSoqQqVS8eCDD+Lm1rtObH817yqhcVMhqCTcro7AfqzPGae1mYwGVjz9KNWF+YSNieeaJ5/tsoP308osTuwuw9nLjsXPJGBzhrVF9RWtrHrhEGaTzIL7R6Jvy2LPl8tprKwAICRoFOOdFiC1yyic1HjePgJ1wPnbHLW1tZWvPtlIcXUWssIIWNY5TZ48mdjY2D5lTNz84TFyj1ThGmFE4V9Ldna29TVXV1fGjRuHSqXiyJEjVFRUdHhtzJgxjB41Gu0nuRir23GZ/8tGqr92/Phxvv32W4xGI94eXsyqGIqjScOx9kTSK/biHhDEjX99qdfBUldMRgOr/voEFXk5BAyL5sa/vtxhqlpXZFlGNpgxNehoT+8miAhywm6EB/Yxnqg8z20yg9MfaHjdPxpNSMf+5Pevv0TOwX14BoVw80uv9yrwaD1UQf03OSCDwlmNuUn/y4sSqEN+Dg5HeKByt+2+on7StbXx0+cfc3znVgDc/AOZ/8Cf8IvsPuPjr7W3NPPZYw/SWl/HmMuvYNYd93ZZrry8nPXr1zNt2jSGDTu77I3nkwiYBEEQBOEs1ZQU8fnjDyPLZmxc3KnzDwfg/t/djU9A7xc3NyeW0fhDPpKtEt9H43ucdlSQl8u6r79hxowZxI8bd9b3cCayWab2i3S0GXWA5Um3y8JwbIe4dntO4qrPOfjNV9g6ObPs1Xe7XGd0Mqeeb/51BICr/jSGwKG965Dv+zqXI1uLcfa0Zcmz4wEzR7duIOf73SQ4z0ettKVN0YLPPbG4hPZ+RGwgHN5cyIFv8zErjARNM5NdnEZbm2X/KEdHRyZNmkR8fPwZp1BqtVr2JyaRuGsfJuUvo0lDhgwhISGByMjIDuvjTp48SUpKCseOHUOn0wGW6VihnoGEl7kSau9P4J/HI522hkqWZfbs2cOOHTsAiIyIZGrlEBTVBioMRewqXYW7fyA3PvvyWQVLpzRUlPPFn3+Pvr2dCdctYfKNt/S5DmODlvbjluBJX9TUYZqaja+DZZpajCcqn7Mbgfk1WZap/iANfVET9mO8cb+pY0CRfSCR9W/8A4VSyS0vvYF3aHiv625NrqR+XbblXhQSmghX7EZ4nNfph/lHktj64Tu01tchSQrGXXUdE6+/GVUvAvwf3nyFrP17cPMP5LZ/vImNpvvATpbl39R6tN4QAZMgCIIgnKX9675k31crANB5+KH3DkDZ2oR9SQ4+YRGEjYkjLDYO34iobne6NzbqqPzXYWS9CderI3Cc8Esn32Q0UF1USEVuNhV5OVTkZVNXVoosm/GLGsbVjz+DvfO5T9EsG8207DtJ04/FyDrLhra20R64zA/FxqvjXkZlWRmsfvZJZNnMFY88RdT4znvuGPUmVr14iMaqdqKn+DPz1t4/cdZrjax8zjKNL+GKMMYtDKMtrZq61VlgkqnWlrKnci0+wyK58ZmXOkwBPJeyDpSzfXkGAFNujGT0rCD0ej0pKSns27ePpibLpq52dnZMmDCBhIQE7Ow6jopUVlaSlJTE0aNHrVP7JLMSFymQmx9chLe3Fz3R6/VkZGSQkpJiXeMGYCvbEBMezfgFU/Dy8sJoNLJ+/XqOHj0KWBJXjGsPp/1gJVpzG5tKPsbB14Mb//pSt0k1+iNj7y42vv0qkqTghr/+naDokf2uy9Sst05f0+U3gPmX106t8bEf63NWe22d0pZaRd2qLCQbBb6PxaN0+WX0qL25ieWPPkBbYwMTrr2JyTfd1uf6dfkNmJr12Ea5o+jlWreB1t7SzM5PPyQj8ScAPINDufyBP+ET1nUafDjt+6lQcPMLr+EbEXWeWnv+iIBJEARBEM7Siv/7ExV5Ocy+6352Hs+ksakZL30r2ryMDuVsHZ0IHT2WsNg4QkeP7bBwvvaLdNpP1GIT5IjiCjcq83OpyMuhMi+b6qICTEZjp+tKkgJZNuPi7cM1Tz6LR6AlfblsljHoTOi1RvRaEwat5fNT/+q1JjwDHfGPdO1UZ2+YWvQ0bS+m9VC5pYOqkHCc6Ifz7GAU9jbote188cTvaagsJ3rqTOY/9GiX9ez/Jo+ULUXYu6i5+dnxaOz7trlvTnIlWz8+gdJGwfVXhKLdWQKyJW04U+1Z8ddHMep0zLnnYUbNnteve+2Lkow6fnjnKGaz3OVaLKPRyNGjR0lMTKS+3pL0QKPRMG7cOMaPH09JSYl1n61TvLy8GDsmnhPftKFrlpl6UySjusjK1p3a2lqOHDnCkUMptOrbrMdPpXUuLS1FkiTmz5/PSKch1H5m2Tvsp/LV6N2NAx4snbL5/Tc58dN2HN09uO2Vtwck4De1GtBm1NJ+vBZtTj2Yfu7iKsBxgj/OlwVbs8j1lVlvovJfyZga9TjPDcF5VsetAja++y8y9uzEIzCYW//xVq9GZX7Lcg7uY9vH/6a9qRGFUsmEaxeTcPUNKFUdA7nmuho+e+xBdK2tTLx+CZNu6PuI4YVABEyCIAiCcBZaG+r54F7L0+Sr/vYa/1u1CpVKxWOPPYaxvY3CoykUHEmmKO0IurbWX06UJOvok5ciEPtkJTJmfqz+ktqW0k7XsXV0wndIJL5DIvEZEoWdUwApW3PIT/oUo64OSaHB0etqkIIx/Dz6cyYx0wOYfH1EvzZGBTBUttK4sQBtlqXzL9mpcJ4dzIH0b0j7cTNOHl4se+1dNPad92+qLm5mzT+Skc0y8+8bSXhsz6MmXZFlme/eOIJzcRNRtpZ7cJhgWTgvKSQOb/iWnz7/GI29A7e//v456fif0tak58vnD6JtMRAZ782cO0d0u8bLZDJx4sQJ9uzZQ3V1dafXJUli2LBhJCQkEBpq2aPn+O4ydq3MQuOg4tbnJ2Lr0LcOubHdwKFXNpJlKqFYVcup7p9areaGG24g3DeEiteTkdtNZDUeokiTzY3PvnzO3jODVssXT/2R+pOlhMclcPXjzwzoNC2z1og2q462lCrrz6fCQYXLvDDs48+8/u7XGrcV0fxjMUo3Db6PxCGd9juTn5LEN6/8DUlSsOSFV/u09ue3rK2xge0fv0fOoX0A+IRHcPkDf8IzyLIvmCzLrHvprxSlHcEnPJIlL7zaKaC6WIiASRAEQRDOwrGdW9n6wdv4hEfiOX0eBw4cYMSIEdxwww0dyplNJk7mZFKYepiCI4epKrTss6SUbJgfeBcOKhcyGg6SVv8TNhpbvMOGWAMk3yFRuPj4WjuUxem1bPnoBPp2I7K5HX3r98jGMkBCZT8LlWY0AAqFhI2dErVGhdpOic3P/4JE8QnLfjkeAY7M+90I3M5iU1ptdj0NG/IxVlpGMJr0tRyt/4nJj9xO8MjYTuXNJjNr/pFMTUkLQ8Z6c/k9/UtlLJvMVHyRgSnTsq7KHOtF0Glpz81mE1/+5TEq8nKIHD+JKx/5v/7d4JnaIcts+uAYBUdr8Ahw5IY/x6O0OfMUQLPZTFZWFnv27OHkyZPY29sTFxdHfHw8Li4dR1zMJjOr/55E3clWRs8KYsqNkX1uZ+PWQpp3lKD3V1E2xkh5eTnTp0/H28ubiv+kYCpso0FXRZK8nRv++iKO7uc2q2BVYT4r//IoJoOBmbffw9j5V56T62hz6mlYn4exyrIOzCbAEdcrh3RK2NAdY4OWitcOg9GM+y3DsB/5S3Cva2tl+aMP0FJXS9zCq5mx9O5zcg+DRZZlMvfuYscnH6BtbUGpUjH5ptuIW3Q1R7dtYscnH6CyUXPrK2/hEdD7kc8LjQiYBEEQBOEsfPfa38lN2s/465awv+gkra2tLFmyhKFDe37K3FJfR+GRwxj3NODZ7otBpad1uhnfoZG4BwZ1udZJlmWO/VRK4ppcZLOM3xAXYqYHoFTJpG5ZTmHqXgBGXXYF05fegY3aptun9sUnatm+PJ32ZgMqtYJpi6MYNtGv30/5ZZNMfWIB9Rty0Cgs60U0Q1xwWRiO2r9jhrqULUXs/yYPjb2Km5+b0K9NWc06k2WPpex6ZCC1zUijiy1L/jq+Q7BSVZjPiv/7E2aTiSsf/T8iE3qftbC3Mg+U8+PyDBRKiRueiscz0KlP58uyTGNjI46Ojqh6eEJfnF7L+rePolBILHl2PK4+fVuXY2rRU/FKErLBjOfdI7GNcAWgeksWup1VGM0GDho2sfCZP5/zYOmUI5vXs+PTD1GqVCx58V89rpU5G7LJTMu+cpq2F1nX39mP9cZlftgZkyrUrsygPa0GdZgLXveM7PA7su0/75L242ZcffxY+uo7PSY7uJC11NWy9T/vUHAkGQC/iKFUFxdi1OvOabD7W3F+VkAKgiAIwkXIqNdTlGbJ8GbrH0Rrayt2dnYMGXLmTp+djSM++b54tvsC4HvrGGLmzMEzOLTLYMlkMrNrZRZ7Vucgm2WGTfDlqj+OISrBlyFj/bj2z39m8o23ApC2fT0b3voHBp222+sHj/Dgpr8kEDjMDaPezI7PM9n2STr69s5rpXpFAXsPr+aH4g8pMJ0AlYQur5Gqd45QtzYb08+pkhsq2zj0QwEAU26I7FewZGrWU/2fNHTZ9Ug2ClxvHkaNrQ2N1e0c2V7coax3aDjjrrwOgB9/fko+kJrrtOxZZUn1PW5RWJ+DJbBMwXN1de0xWAIIjvYgJMYDs1lm77rcPl9H6ajGPt6yWWjzrhIA6tNLaNtRDkCOKYUFzzx53oIlgNh5ixgSPwGT0ciGt/6JXtt+5pP6QVIqcJoagO9j8djHWd6DtpQqKl5Lpnl3KbLR3OV5uoJG2tNqLBsfX9FxY9ni40dJ+3EzAHPv+/1FGywBOLp7cM2TzzL3vt+jtrOjPDcLo15HcMxoxsxbNNjNO+dEwCQIgiAI/VSafgyDToujmzvFVTUAxMTEnLHjqytuouqdVHS5DUg2CtwXD8VuWPdrRbQtBta/ncqJPSdBgonXDmHWsuEdRlIkSWLCdYtZ9McnUdrYkH/4EKuefZLm2ppu63Vw0XDl72OZcHU4kkIiJ6mS1S8lUVXU1Md3AjISfyL74F7MChNR91+G76Px2I32AhnakiupeC2J+q9zSPnvcTCYCYp2Z+gE3z5fx1jTTtUHRzGUtaBwUOF1zyicRnkx6ecEC4c3FtJc1zFQnHDtYtz8Amitr2PPyuV9vmZ3ZFlmx+cZ6LUmfMKcGTs3+MwnnaVJ10UgKSQK02oo/XkqYl84TQ0EBehyGqg7WkTF8iMoJSXVplLG/98ynNw9z0GruydJEvPu+z2OHp7Ul5ex45MPz+n1lE5q3G+IwvvBWGwCHZF1Jho3FlD5Vgra7PoOZWWzTMN6y9RZhwTfDiOlBq2WrR++DcDoOQvOKtPfhUKSJEbOnMuyV/9N+NhxeAWHMu/+P563DJSD6YK9w3/84x9IksQf//jHwW6KIAiCcInKS0kCwG/UWDIyLBnxRo0a1W15WZZp2X+S6g/TMDXqUHna4f1gLPax3t2eU1feyppXkinLasBGo2TB/aMYOzek26lzQydO5ca/voy9iyvVhfmsePoRKvO7H42QFBJxl4dyzaNjcXTX0FTdzrp/HiZ1ezG9nbXfVFPFj/99H4CJ19+MT3gEKjdbPJYMw+v+0aiDnZD1ZloPVRDVpGO+i4pxNtCSWIahqq3X19GXNlP1/lFMtVqU7rZ43R+LOsgyohOV4INfhAtGg5m9a3M6nKdSq5lzz0MApG3fTGn68V5d70yO7yqjNLMelY2Cy26PRqHsf7eqprgQk9FwxnLufg7ETPUHIHFNLmZz31ZWqNxtsR9lWYPTtDIXJ4UbOrmd8Idn4uzR98QbA8HOyZmFDz2GJCk4sWs7GXt2nnWd2hYDGfvK2fyf4yRvLOj0M6YOcsL7gVjcrotE4WCDsbqdmk+OU/N5OsZayyhXW3IlhpOtSLZKnOeEdDg/cdXnNFZV4uThxdSbbz/r9l5InL28uebJZ1n66rs4ew7Oz8z5dkEGTElJSXz44Yc9/lESBEEQhHNJlmXyUw5hcHLjWF0LBoMBT09Pa7rmXzPrTdR/lU3Dd3lgkrEb4YH3Q7HY9JBsofhELeteSaapuh0nD1uueyKOsFFnHgHwjxrGzS/+C8+gEFrr61j17JPWTFfd8Rviwk1PJxA+xguzSWbv2lw2vJdGe4u+5/fBbGbze2+ib2/DL3IoCVdd3+F1TYgzXvePxuGGKIqMMq0mGaUkYSpqpnFDAZWvH6bilSTqv8mhPb0WczcZ/rRZdVT/Jw1zqwEbfwe87x+Njecv+xdJksS0xUORFBJ5KdWUZHQcfQmKHsmo2ZcDsPU/72DU93xfZ9JQ1ca+ry2B6IRrhvR5PdHpkn/4hs8ef4j1b7zSq+Bx3BVhqO1U1Ja1kLmvvM/Xs59i2eNLJVmmQ7pdH4VLcO9G+1rqtfzw7lES1+ag6+/0zS4ERscw4brFAGz7+D3qK072uY7mOi1Hd5Tw7espfPJEIjs+zyAvpYqD3xdYRmd/RVJIOIzzxfexeBynBIACtOm1VLxxmMbNhTRuKQTA+bIQlI6/TB0ty8ogZfN6AObc8xAa+7Pf42kwyGaZgrQaGqrazlz4EnfBBUwtLS3ccsstfPTRR7i5nf2O04IgCILQH+X5uVSq7NEGDsFgNBIYGMgtt9zS5ciPoaad6vdSaTtSBQpwWRCG+63DUdh2PXVPlmWO/ljCD+8eRa814Rfhwg1/jscjwLHL8l1x8fZh8fOvEhobh1Gv4/vXX+bQd2t77JDbOthw+T0xTF8ShVKloOhYLatfOERZVn2356RsWk/JiTRUGg3zH3oUhbLj+itZlmlr1LP3QAWpLUaOedjh/aexuCwKRxPpCioJU4OO1oMV1H6ezsnn91P98TGad5diqGxFlmVaD1dS81k6st6MJtIVr3tHdblQ3zPQkZHTAwDYvSob06/WpUy95XYc3NypLy/jwNere/1e/prZLPPj8gyMejMBQ10ZNaPrILk3KnKzrdME85IPcOzHLWc8x85RzbiFoQAc+D4fvbZvgUvK/g2UtVpG4WxiXXEfF9qr8/TtRn54N42i47Uc3V7CimcPkHWgvNcjhGcy4bqbCBweg0Hbzoa3Xj3jiJssy9SebCF5YwFfvZTE5/+3j8SvcijLbkA2y3gEOjJkjGUEZM/qbCoKGrusR2GnwnVROD5/GIsmwhWMMs0/lWBuNaDyssNx4i+bSBv1erZ88BbIMiOmzyYsNm5A7v18a6nX8f3bqWx8L43VLx4iL6VqsJv0m3bBZclbtmwZ7u7uvPHGG8yYMYPY2FjefPPNLsvqdDp0Op3166amJoKCgkSWPEEQBOGslJWVseKzz2j7eZRi2rRpTJ8+HaWyc7KG9hO11H2VhawzoXC0wePmYWjCXbut22Q0s3tVNumJlifiwyb5MWPJ0F6lqe6K2WRi52cfkbrlBwBiZs7lsrvvR6nqeR+fmtIWtn58nPqKNpAgfkEo4xaEdph2VltazBd//gMmg4HZdz1A5PjZ1J1stXxUtFJ/spW68lZ0bZYOvUIpcePT4/A4bS2IWW9Cl9+INqsObVY9pl+tP1I6q60JI+xjvXC7PgpJ1f17oWszsOLZA7Q3G5h4zRDGzus4lSrn0D6+/9dLKJRKbn35TbxCwnrxLnZ0Ksufja2Sxc8k4Oxhd+aTumxrG1/8+fc0Vlbg7OVNU3UVKo2Gpa+8jZtfQI/nmoxmvvzbQRqr2xl7eQgTr+5ddrny3Cy+fOZxbFBz+bUPE3HtNCTlmTMjmkxmNv47jeL0Ouyc1WjsVDT8nEbeL8KF6UuG9img705zbQ2fP/Ew2pZm4hZdw4zb7urwumyWqSxsIv9INfmp1TRW/5IkQpLAL8KVsNGehMd64exphyzLbP7wOPmp1Ti6abjhqXE9JhqRZRntiVoafsjH1KTH844R2Eb+8oB+z8rlHPpuLQ6ubiz713vYOfY9ycdgy0upYueKTHStHQPtcQtDGbcwrM97VF0KLqiAadWqVfz9738nKSkJW1vbMwZMzz33HH/72986HRcBkyAIgtAfZrOZffv2sWPHDsxmM5JBz5Qxo5h9/eJOZWWTTNPWQpp3WTahVYc443HLMJTOmm7r17YY2PThMU7mNIAEk66NIPayoAHZ0DNl03p++uwjZNlM0IhRXPnI/2Hr2HMH16AzsWd1Nhk/T/vyi3Bhzp0jUCglqksa2fr+szTXlGDnHImN49Xo27ueTidJ4OJtz7iFoUQldD/1S5ZljDXtaLPq0WbXo8tvAKOlm+I4LRCXy0N71ZnL3F/Oj59loNIoufnZ8Ti5d8xe9v2/XiLn0D58h0Sy5MXXusxK2J3asha+ejkJs1Fm5m3DiJ7s3+tzTyfLMhvfeY3Mvbtw9vLm1n+8xfrXX6bkRBp+EUNZ/Pw/O43W/Vp+ajWbPjiGQikx73cxZ9z816DT8sWTf6C+vIyhE6ey6I9P9rqtP63IIj3xJCq1gmseHYtHgCNHfywhaUMBRr0ZSSExakYg464IQ2N3dpuX5iYf5LtXXwDg2j8/R/CosZRl1pOfWk3B0Rramn6ZTqlUKQgc7kZ4rBehIz27DIb07UbW/COZhso2Aoa6cuXvY8+43kw2mjFrjR2m4lXm57Li6UeQzWaufOxpIsdNPKv7PN/0WiN7vsqxTuP0CnZi9u3DydhbztEfLVkTw8d4MXvZcNTdjH5fqi6YgKmkpIT4+Hi2bdtmXbskRpgEQRCE86WpqYlvvvmGggJLSmxVUz225YXc887HnRY+m1r01H2ZiS7PMgXIcbI/LgvCkHropNWdbGXDe0dpqtFiY6tk7l0jCB05sBnL8o8k8cOb/8SgbcfOyZkh8eMZEj+BkJGje0yJnJ1UwU8rsjBoTSABMhja92LSHgTJFo3zUiSFozUwcvO1x93fwfLh54Crjz0qm94HJaeY9SZ0BY1IgO3Q7rMI/ppslvn6tRQq8htxdNMw754YfMN+2Qi2pa6W5Y8+gK6tlRlLf0fcwqt6Va/JaGbtK5YNd0NHerDggVH9DmaP79zGlg/eQlIoWPy3V/CPGk5TTRWfP/4wurZWJt1wCxOvX9Lzfcoy2z5JJyepEoVCYs5dI4iI6z6ByI+ffEDqlh9wdHNn6Wv/7vXoyKkRNSRYcN9Iwkb/8vPeXKdl79oc8lKqAbB3VjPpugiiEnzOKtA/1VaV2hF7j2Xotb88aFDbKgkZaRlFCh7h3qvOfd1JS/IUo87EmLnBTLo2ok/tMRkNrHjqT1QXFxI1cSpX9DLY/K2oyG9k26fpNFW3gwRj54WQsCgM5c+jtRn7TvLTyizMRhmPAEcW3D8SZ8/+jZxejC6YgOnbb7/lmmuu6TDdwWQyIUkSCoUCnU7X5VSI04mNawVBEIT+yMjI4Pvvv6e9vR0bGxtGDwkj85uVeIeEsfSf73Qoqytqom5FBqYmPZJagdt1UdiP7vnJf9HxWrZ+fBy91oSzpy0LHhjVYdraQKouLuS7V1+gsarSekxloyZ4VCxD4sYzJC4BB9fOa4Qbq9vY+vEJqoqakU0n0TWtBmSGTrmdqPFTcPfvf2B0LjRUtfHDu0dprGpHoZSYfH0kI2cEWDvxaT9uZtt/3kWl0XD7a+/h4u1zxjoPfp9P8sZCNA4qlvx1PA4u3Y8W9qS2rIT/PfVHjDodUxYvZfw1N1pfy9izk43v/gtJoWDJC6/iF9HzBshmk5ntyzPISapEUkjMuTOayPjO91J4NIV1L/0VgOv+73lCR4/tVVtzkivZ+vEJAKbeFMmomUFdlitOr2X3qmwaqyxT5PwjXZm2OKpP0/RkWaa2rIWcpEqyDpZRV7wc2VSNQhWMs99ihozxJjzWi4ChbtaO/q9pW1torKygobKCxirLh7OXDzEzLqM8z8CWjywZEi+/J4YhY7sPLn9t/7ov2ffVCmydnLnjX+9h7+La63MHk9lkJnlTEckbC5HNMo7uGubcEY1/ZOff8fK8RjZ9eIz2Jj22jjbMvzemy3KXogsmYGpubqaoqKjDsTvuuINhw4bx5JNPEhMTc8Y6RMAkCIIg9IVer2fr1q0kJ/+8u72fH9dddx37v/iY7AOJTLj2JibfdBtg6ey17i+nYUM+mGRUXnZ43DocG5/us+ABpO0sJfGrbGTZMuVt/r0jsesiocFAMhkNlGacIC/5IHmHD9JUfdqCb0nCLyLKEjzFj8cjMNgaZMhmmbryBr555QkaK8sZPmUGCx5+7Jy29Wzo2o3s/DyDvCOW0Y+IeG9m3joMta0K2Wzmqxf+j9L044SOHsu1T/2txxGRysIm1v3zMLJZZu7dI7oMSnrDqNez8i+PUl1UQHDMaK5/+oUO+9jIssyGt/5J1v49uPn5c9s/3sbGtucNUc1my35QWQcqkCSYfXs0Q8f/MvWxvaWZzx97kJb6OmLnLWT2nff3qq3luQ1892YqJqOZUbMCmXpjVI/lTQYzR7YXc3hjIUbDz9P0ZgWSsNCS1a87jdXt5CRVkp1USX15q/W4UtVIe90XmE16Jt90GxOuvQmz2URzTQ2NVT8HRZXlNFRV0lhZTmNlRbcbEyuUKiLHT0KpHkVeqg1qWxXX/zked7+efz/BkvL9iz//EbPJyIKHH2P4lBlnPOe3oLG6ne2fnqAi37KvWuQ4H6YviUJj3/36xeY6LZs+OEZ1cTMKhcTUxVHETOt5Pd2l4IIJmLpypil5vyYCJkEQBKG3KioqWLt2LTU1lo1fJ02axKxZs5CQee/uW9C3t3Hzi//CL3IossFM/bps2lItHXO7kZ64XR+JQtN9J1GWZQ5vKuLg9/kADJ/kx/Sbh3b75PxckWWZmuJCa/BUkddxDyNXHz+GxCcwJG48AcNGsOPTDzm6bSOOHp4se/VdbB3OzUjYQJFlmbQdpexbZ9mzyNXHnsvvicEjwJG6k2V8/sRDmAwG5j/0KNFTZ3ZZh1Fv4quXkqivaCMy3pu5d5/5IW13dnz6IUc2r8fO2YWl/3wHR7fOUw21LS189viDtNTVMnrOAi67+4Ez1ms2y/z0v0zLejMJZi8dzrCfs7v98NY/ydq3Gze/AG575a0ep1+e0lDVxrpXDqNtNRA22pPL7x2JopfJAJpq29m7Jpf8n38f7F3UTL4+gsj4X6bptTXpyT1cSfahSioLftkoWalSEDLSg6hxPoTEeJC5bydb3n8TSaHAxduHpuoqzKau18qdYu/iiquPHy4+vjh7elF8Io3y7Ezr62o7b8zE4BEUx41PT+5xSp/ZZOLLZx6jIi+H8LgErn78mQFZU3guybJM5v4K9qzOxqAzobZVMv3moT2uHzydQW9i5+cZ5CRbHqTETA9gyo2RKM9in7ELnQiYBEEQBOE0sixz8OBBtm3bhslkwtHRkWuuuYYhQyxZyIqPH2XNC09j7+LKfR98jmwwU/t5umW9kgJc5ofjOMW/x06VLMsc+DaPlC3FAIxbFMa4haG/iY5Yc10N+YeTyDt8kOLjRzEZfkntrHFwQNdqGQG4/i8vEjIydpBa2XcV+Y1s+eg4LfU6VDYKZtwylKET/Dj4zVckrvrcMtXq9fexd3bpdG7i2hyObi/B3lnNkr+Ox9ax5wyD3fl1MoOwMfHdli06lsraF/8CwDV/fpbwMePOWL9slvnpyyzS95wECWbeOgwFOWx4+1UkhYKbX3gN34ieR4kA2lv0rHvlMI3V7XiHOHH1I2Ox0fR9qmXR8Vp2r862rJsBAoa6EhHnQ35qNaUZdZzqgUoSBAx1IyrBh/BYrw4jILIss+ndf5GR+JP1mFKlwtnbF1dvH1x8/HD18cXF29f6b1cjcpUFeRzdtpGMxJ8wWte32+AWMJZFf7gV726yJSat/5rd//sEtZ09t7/+Hk7uA7uucKBpWw38tCLTuqbML8KFy+6I7nMmR1mWSdlSxIHv8kGGgChX5t0Tg53juR39/q26oAOmvhIBkyAIgtCTtrY2vv76a3JzLRuSRkVFcdVVV+Hg8Mu0nZ2ffUTKxu8YMeMy5i57kOpPT2AoaUZSK/FYOhzbiJ7n/MtmmT1f5XDsJ0v2vEnXRTBmTvC5u6mzoNe2U3T0CHmHD5KXkoS22TISMHb+lcy8/Z5Bbl3ftTfr2fZpOiXplk1to6f6M+naMFb/9VGqiwu7nGJYll3Pt28cARkWPjiq34k4OqTLXng1M5befcZzTv2s2bu4suy1f3cZzP2abJbZvTqb47vKkM3NmLQrMOramHj9EibdcMsZzzcaTHz/ZirleY04udty3ZNx/V6rdaq+1G3FJG8qwmTouC+WT5gzkfE+RMR793gNo8FAQUoStk5OuHj74uTu0WEaY19oW1tI372DwxvW01T9y6a/AcNGEDt3gWXa3s8p9+vLy/j88YcxGvTMvff3jJw1t1/XPF9KM+vYvjyD1gYdCoVEwpVhjJkb0uuRwa4UpNWw7b8nMOhMOHnYsvCBUQOSPv5CIwImQRAEQfjZN998w9GjR1GpVMydO5dx48Z1GPWRZZlP/nAPDZXlXHn/UzgdscVY2YbCXoXnHTGog3rOOmY2y+z8IoPM/RUgwfQlQy+Y9QFms4mT2Zk0VlYwbPJ0lKoLM+2w2SyTvLGQpA0FIINnkCNx8+z4/rWnkWVzh5EfvdbIqhcO0VyrZfhkP2bdNryf1zSx5oWnKU0/jk94BEteePWM+2ABGPQ6Vjz1J2pLi4kYN4ErH326V6OQsiyz56ssDn//DmZjEc5eIdz55ltn/J7JZpltn5wgJ7kKtZ2K6x6Pw93/zGt8eqOppp0D3+bRWKMlbJQHEfE+uHrbD0jd/SHLMrtX/kjKpg2YDbmApTts7+LKyFnzGDV7Hhvf/RdlmScIHhlrWWv2GxgB/jVZlmlt0HF0Rymp2ywj1q4+9sy5MxrvkIHp69aebGHj+8doqm5HpVEy547oM6awv9iIgEkQBEEQsCQXeuONNzCbzdx+++2EhoZ2KlN3spRP/3QfzhoPFo54AHO9HoWTGq+7Y86Y3MFkMrP9k3RyD1dZFuYvG87QCX7n6G6EMylOr2XbJ+loWwyo7VR4B6aSe2grTp5e3P6v91Db2rFzRSbpe07i5G7L4mcSekxc0JP9a79k35oV2Njacdsrb+Hm2/u9m6oK81nxf49gNhmZd98fiJk5p1fnpWxaz87lHwIq1M63Mm3xeEbP7jrDnbWd3+aRsrkIhULiit+PJnBY71O5X4hkWWbHZxlk7MtBIh0FGbQ11nUoY6OxZdlr7+Li3bv1P+eKLMu0NxuoPdnyy+bQP28MrW//ZQPaEVP9mXx9ZL+mUPZE22rJMFiaWQ/A+CvDiJv/25hGfD5cmI+HBEEQBGGAJScnYzabCQwM7DJYAsg/fAhnG09mB9yCuV6P0t0Wr7tiUJ1hfYDRYGLLRycoTKtBoZSYe9eIPqU0FgZecLQHNz09ji0fHaciv4mS7KFoHJJprqlm76ovCI+/zrIWCEtw299gqTT9OPvXfgnAZXc/0KdgCcA7NJzJN93KnpXL2bH8PwRGj8TVp+fOe21ZCXtWLgcgLO4qyvPdSVyTg8lkZuzckC7PObGnjJTNlmzEM28bdtEHSwCSJDH95qHUlLVQU+KEZ+gsRk41cGzHRoqPpwEwZcmy8x4saVsM1JW3UFtmCYhOBUfaVkOX5SWFhLufA+OvDOuwR9ZAsnWw4YqHR7N3bS5pO0s5+H0B5bmNBAx1w9nTDmdPW1y87HrMwHchEwGTIAiCcMkzGAzW1OETJkzotlxlchaz/G5GLdui8rHH666RKJ17XgSt1xrZ9MExSjPrUdoomH/vSEJiPAa0/UL/OLrZcvWjY9n/dR5HfyzBJM0AviZl83pyU10BT0bNCiRgaP/2omlvbmLDu68hy2aip83qNgvfmcRfcQ35KUmUZZ5g079f56bnXkah6HoEwWQ0sund1zHqdYSMGsPVjy4laYNlH579X+dhNsnEzw/tcE5xei27vsy2XGthqDW73qVApVYy/96RfPVSElWFrVQEB3DDMy9RW1ZCc001IaPGnJd26LVG9q7JofBYLW1N+q4LSeDiaWfdFNrD39Gy/5m3PUqbc5/BTqFUMPUmy95au77Moji9juL0jiNyGnvVzwGUHS5ettbPnT3tcHLXoLhAM+2JgEkQBEG45B0/fpzW1lacnZ0ZPrzrdSpNx8uJbk/ARqlG4WuL9z2jUJzhaaqu3cgP7xylIr8RlUbJogdG9bvzLZwbSqWCKTdE4jfEhR8/V2LSD8esz6CpYjX2LiMZEnvmRAldkWWZLR+8TUttDW5+/sy+q3d7H3VFoVAy/8FH+PyJhziZlU7Sd+s6bHZ7uoPfrKYyPweNgwPz7v8DCqWS8VeGo1BKHFpfwMHv8pHNMuMWWrLC1ZS2sPk/x5HNMlHjfUhY1HW2uIuZs6cdc+4awQ/vHuXE7jJ8Qp0ZPikIj4CepzAOlPqKVjZ9cIz6ijbrMScPW0tg5OeAh78D7v6OuPraY6Me/I2ho6f44xXiRH5qNU017TRVa2mqaaetSY+uzUh1cTPVxc2dzpMUEk7uGpw97Qgb7dntJsi/RSJgEgRBEC5pp9KIAyQkJKBUdu6QtKfX0rgyBxuFmlpzBSPvvxbFGdYItLfoWf/2UaqLm9HYq1j00Gh8w8+c5UwYHEPGeuMR4MiG92Qqs6uQzbW0NSTz5TPJeIcOIWbmZQyfMhNbx95lCEvduoG85AMolCoW/v4J1LZ9S+v8ay7ePsy64z42v/cG+9asIHT0WHzCIzqUKc/N4sDXqwG47K4HOqTAHrcwDIVS4sC3+RxaX4DZJBMzLYAN/z6KQWvCP9KVWbcOv2TWpPxayAgPEhaFcWh9AbtWZuEZ6IhXcM9JXAZCfmo1Py5PR6814eCiZuZtw/GLcOlxb6jfAq8gJ7x+leTGoDNZAqiadppqtDSe+rza8rXJaKapRktTjRY3n8FL+NEfIumDIAiCcEkrLCxk+fLlqFQqHnnkEeztO/4hbz1SRf2aLDBDaWs2hvEqpt66rMc6Wxt1fP9WKnUnW7FzsuGK38d26lwIv01GvYnU7UUYtEXUFh8iN2k/JqNlUb3SxobIhEnEzJhDcMyoblNbVxXms/Ivj2IyGJix9HfELbxqQNomyzLr33iZnIP7cA8I4tZ/vImN2pKO26DT8sWTf6C+vIyhk6ax6A9PdFnHka3F7Pvakjbf1tEGbYsBVx97rnsiDluHi3P9SW/JZpmN76dReKwWJw9bbnxqXL/33DoTs1km6YcCkjcWApb9kub9LuasUrj/lslmmdZGvTWgcvG2x2/IhfMA6bcdvgqCIAjCOXbgwAEARo8e3SlYatl3kobv8wAobs/kQNX33Bj/jx7ra6pt5/s3U2msbsfBRc2VfxyDu9/ApGYWzj2VWkn8gnAgHJhJe3MTGYk/cXzHVqqLC8ncu4vMvbtw9vIhZsZljJgxG2fPXxJ4GLRaNrz1T0wGA+FjxzF2wZUD1jZJkpjzu4c4mZ1JXVkJe1YsZ9Yd9wKwe8Wn1JeX4ejm3uP0vzFzg1EoJRLX5KBtMWDnZMOih0Zf8sESWKaMXXZHNF+9nExTdTvbPjnB5feNHPBpcNpWA9s+OUHxCcv6n1GzApl0XQTKC3R9T29ICglHNw2Obhr8I10Huzl9JkaYBEEQhEtWfX09b731FgAPPPAA3t6Wjq8syzTvKKFpmyVrGMM1rN74PLaOTtz/0f+6XXDfUNnGd28eoaVeh5OHLVf9cQwuXmc3FUv4bZBlmaqCPI7t2Erm3l3o2lotL0gSISNjGTlrLkPiJ/Djf9/n+M6tOLi5s/Sf7/Rqs9m+Kkw9zLqXnwXguv97HoB1L/3V8vXTLxDai0QF6XtPknWggknXReATKvpEp6spbWHdK8kYDWbsnGyIvSyYmOkBAzJNrqa0mU0fHKOpRovKRsGMW4cxdPzgpiwXzkwETIIgCMIla8uWLezfv58hQ4Zw2223AZaOceOGAloSywBwmh1MasWPJP/wNcOnzmTBQ492WVdtWQvfvZVKe5MeVx97rvpjLI5utuftXoTzx6DXkXtwH8d2bqPkRJr1uMbewRJISRI3/OXvBMeMOmdt+PGT90ndsgFHN0v675b6OmLnLWL2nfeds2teSopO1LL7yyyaarSAJfvb6NlBjJwR2O/RuOxDFez8IhOjwYyzpy2X3ztSTNW9QIgpeYIgCMIlSafTkZKSAsD48eMByzz7+q9zaEuuBMBlUThOUwLIfyQJgPCx47qsq6Vey7evH0HbasAjwJEr/xCL/RnSjQsXLhu1huFTZzJ86kwaKso5sWs7x3/aTktdLQATrrnxnAZLANNuuYPiY0epO1kKgJt/INNuuf2cXvNSEjLCg5v/NoGcpEoObyqiobKNQ+sLSN1WzMgZgYyeHYSdU+9+x00mM/vW5ZK2w/K9Co52Z85dI8Q0yAuICJgEQRCES1Jqaio6nQ4PDw8iIizZxpp3lliCJQncrovCId6Hhopy6spKUCiVhI4e22Vde9flWoKlQEeu/tMY0RG6hLj6+jH5ptuYeMPNFKWl0lpfR/S0Wef8ujYaWxY8/Bgr//Iosiyz4MFHsNGIEc2BpFQqGDbBj6gEX/JSqji8qZDaslYOby7i6I4SRkwLYMyc4B4TNbQ16dny0XFO5jQAEDc/hIQrwlEoLs1shBcqETAJgiAIlxyz2WxNJT5+/HgUCgWy0UzL/pMAuF0TiUO8DwD5KYcACBg2AluHzimlS7PqyU2uQpJg9tLhIli6RCkUSsJi487rNX3CI1j8/D+RzTK+EVHn9dqXEoVCIjLeh4ix3hSk1XB4UyFVRc0c3V7C8Z/KiJ7sx5h5ITi5dwxYK/Ib2fzhMVob9djYKrlsWTThY7wG6S6EsyECJkEQBOGSk5ubS11dHRqNhtGjRwOWvZbMLQYUTmrs437JepaX0v10PJPJzO5V2QCMmBZwXvZtEYTT+UUMHewmXDIkhUR4rBdhoz0pSa8jeWMh5XmNHNtVxok9Jxk60Zex80Jw8bLjxJ6T7Fmdjdkk4+Zrz/z7RuLmK7JlXqhEwCQIgiBcck6lEo+Li0OjsUynaT1YDoDDOB+kn9P76traKE0/DsCQuIRO9aTtKKW+vBVbRxvGXxl+PpouCMIgkySJ4BEeBEW7czK7geRNhZRm1pOxt5zMfeV4hThTVdgEQPgYL2YvG/6b34hW6Jn47gmCIAiXlKqqKvLz85EkiYQESxBkqG5Dl9cIEjgk/JLitygtBbPJiJtfAG5+AR3qaW3QkfRDAQATrxkipuIJwiVGkiQChroRMNSNivxGkjcVUnSslqrCJiQJxl8Vzth5IUiSWK90oRMBkyAIgnBJOTW6NGzYMFxdXQFoPVgBgO0wd1Suv6xDyO9hOt6+r3Mx6Ez4hDkzfKLfOW61IAi/Zb7hLix6cDTVxc1kHignbLQXgUPdBrtZwgARAZMgCIJwyWhrayMtzbJvzoQJEwCQDSbaUixpxB3G/xL4mM0m8o8kA52n453MqSf7kCWb3rTFUUgi45UgCIBXsJNYy3gRUgx2AwRBEAThfDl8+DBGoxE/Pz+Cg4MBaDtWg7nNiNJVg23UL0+EK3JzaG9qRGPvgP/QaOtx82mJHqKn+OMdIjZCFwRBuJiJgEkQBEG4JJhMJg4dsqQIHz9+vHVdQeuBn5M9jPftMFJ0Kp146OixKFW/TMg49lMZtWWtaBxUTLxqyPlqviAIgjBIRMAkCIIgXBLS09Npbm7GwcGBmJgYAPQnW9AXN4NCwiHet0P5/MOWgOn06XitjToOrc8HYMJVQ7B1FIkeBEEQLnYiYBIEQRAuCac2qh03bhyqn0eMWg9Zkj3YxXigdFJbyzbVVFFdXIgkKQg9bTPSA9/kodea8Ap2InqK/3lsvSAIgjBYRMAkCIIgXPRKS0spLS1FqVQSHx8PgFlnpC2lCuiY7AEg/7AlO57/0GHYOVnWKJXnNZJ5wBJgTVsShUIkehAEQbgkiIBJEARBuOidSiUeExODo6MjAG2p1ch6EypPOzThLh3Kn1q/FD7WMh3PbJbZvSoLgOGT/fAN61heEARBuHiJgEkQBEG4qDU1NZGeng6clkpclmk9eCrZg1+HjSUNWi3FJyypx0+tXzqxu4yakhY09iomXi0SPQiCIFxKRMAkCIIgXNSSkpIwm82EhITg52eZemcobcFwshVUEg5x3h3KFx1LxWQw4OLtg3tAEO3Neg5+b0n0MP7KcOxOW+skCIIgXPxEwCQIgiBctAwGA8nJls1nT40uAbT8nErcfpQXCvuOme6s0/HiEpAkif3f5qFrM+IZ5MiIaQHnqeWCIAjCb4UImARBEISLVlpaGu3t7bi6ujJ06FAAzG0G2o5WA52TPchmM/lHLAFW+NgEKgoaydhrCa6mLR4qEj0IgiBcgkTAJAiCIFyUZFm2JntISEhAobD8yWtNqQKjGRtfB9TBTh3OqSzIo7W+DhtbO/yHjmD3l9kADJvgi98QkehBEAThUiQCJkEQBOGilJ+fT3V1NWq1mrFjxwK/SvYwwbdDsgeAvJ83qw0dPYbsg9VUFzejtlUy8dqI89t4QRAE4TdDBEyCIAjCRenURrWxsbHY2toCoC9oxFjdjqRWYB/r3emcvKT9AASPiOPAd3kAJFwRjr2zSPQgCIJwqVINdgMEQRAGysmTJzl06BDp6enExsYyf/78TiMIFzpZlmmuraaqsACwpL2+2O5xINTW1pKdbZlON378eOvxloOWjWftx3ijsO34J7C+4iTVxYUolEpqTnqha23AI8CBkTNEogdBEIRLmQiYBEG4oBmNRtLT00lKSqKkpMR6/NChQzg4ODB9+vRBbN3ZMZtM1JeXUVWQR2VhPtWFeVQVFqBtabaWGTP/CmYu/R2SQkwYON2p0aXIyEg8PDwAMLXoaT9eA4BDgl+nc3IPWUaXfMKjyTrUAMC0xVEolOK9FQRBuJSJgEkQhAtSU1MThw8fJjk5mdbWVgAUCgUjRozAzc2N3bt3s3PnTlxcXIiNjR3cxvagtbUVjUaDbDJSU1xEVWEeVYX5VBXmU1NUiNGg73SOQqnEzS+A2tJijmxaj1Gn47LfPYhCoRyEO/htkWWZxMREDh2yrEU6PZV4a3IlmGTUQU6oAxw7nZtzaB8AOm0wyBCV4IN/pNv5abggCILwmyUCJkE4C0ajkV27dgEwa9YsMTXqHJNlmeLiYg4dOkRGRgZmsxkAJycn4uPjGTt2LE5OlqxnJpOJvXv38v333+Ps7Ex4ePhgNr1LRfl5fPb5FyhNRjT56UjGzsGRjcYWr5AwvMPC8Q4dgndoOB6BwajUak7s+pEt77/FsR1bMeh0XP7An1CqLt3/1nU6Hd999x3p6ekAxMfHW7/vsvm0ZA/jO48uNdfVUJ6TBUi0NgWgtlMy6TqR6EEQBEEQAZMg9JtWq2X16tUUFFjWkvj7+zN8+PBBbtVvj664Ccwy6hDnfgeUer2eY8eOcejQISorK63Hg4ODSUhIYPjw4SiVHUdXZs+eTUNDAydOnGD16tXceeed+Pj4nNW9DKTC1MN8+eWXmO0cMStVmIIi8KivwDckFO/QcLxCLQGSm69ft9PtRkyfjUqtYeM7r5K5dxdGvZ6Ff3gClY1Nl+UvZrW1taxatYrq6moUCgULFiwgPj7e+roupx5TvQ7JVoXdKM9O5+cmWdKP2zkHIyscGTM3GAcXzXlrvyAIgvDbJQImQeiH5uZmVqxYQUVFhfXYrl27GDZsmBhlOo2hspXqD46CGWz8HHCcHIB9rBeSqndrQurq6khKSuLIkSNotVoAVCoVo0aNIiEhAV9f327PVSgUXH311TQ3N1NcXMyKFSu4++67cXZ2HpB76y+jXs+eLz8j6cet6MJHgCyj0WjQAaqxk1mwdCl2dna9rm/oxCmo1GrWv/EyuUn7+e61F7nykaew0dgOaLu1Wi27du3Czs6OKVOmWPc0+i3Izs5m3bp16HQ6HB0duemmmwgKCupQ5lSyB4c4bxTqzlMXc3+ejidLlhGp4GiPc9xqQRAE4ULx2/mLJwgXiJqaGv773/9SUVGBg4MDt912G2q1moqKCjIzMwe7eb8pzbvLwDJrDkN5K/Vrsyn/xyGathdhauk8/ex0O3bs4O2332b//v1otVrc3NyYO3cujz76KFdeeWWPwdIpNjY2LF68GA8PD5qamli5ciU6nW4gbq1fakqKWPn0I6Rs/A69lz8AI2JGcNfdd2Nvb095eTn/+9//+tzGIXEJXPPEs6g0GgpTD/PNP/6Gvr1twNpdVFTE+++/z/79+9mxYwcbN25EluUBq7+/zGYzu3btsn5fg4KCuPfeezsFS8ZGHdqMWqDr6XhtTY2UpB+31CmHo7JR4BnceY2TIAiCcGkSAZMg9EFJSQn//e9/aWhowN3dnbvuuoshQ4ZY0xb/9NNP1nU1lzpTo4621CoAPO4YgfPloShd1JhbDDRtL6b85UPUrclGX97a6dxjx46xe/duACIiIrj55pt5+OGHmTRpUp9GXwDs7e255ZZbsLe3p6KigjVr1mAymc7+BvtAlmWObPmBFU/9ieriQlQe3hid3JAkiZkzZ+Ht7c3Sn0eWysrKWLFiBXp9zwHlr4WMiuW6/3setZ0dJenHWPv3Z9C2tpxVu00mEzt27GD58uU0NjZa14clJyezefPmQQ2atFotX331FTt37gQs65WWLVtmbePpWg9VgAzqMBdsvO07vZ5/+BCy2YyzVxAKpQs+4c4oRWY8QRAE4WfiL4Ig9FJmZiafffYZ7e3tBAQEcNddd+Hu7g7AxIkTUavVVFZWkpWVNcgt/W1o3nvSkpEs1Bm7oe44zwjC94lxuC8Zik2QE5hk2g5XUvVWCtUfpdGeXotslqmqquL7778HYMqUKdx6661ERUWd1RQwd3d3br75ZlQqFbm5uWzYsOG8dfbbGhv49p/Ps+OTDzAa9ITGxuGWMA2AkSNH4ulpWU/j6+vLbbfdhkajobi4mC+//BKDwdCnawUOG8ENf/k7tg6OlOdkseb5p2lrauxXu+vq6vjkk0/YvXs3siwzYvhwJkWEMD4mGrCk7d62bdugBE01NTV8/PHHZGZmolQqufLKK1m0aBGqLhJeyCaZ1iTLdDzHCV2PSp7KjmfvYlmD6DfE9dw0XBAEQbggiYBJEHrh8OHDrF69GqPRSGRkJMuWLcPBwcH6ur29vRhlOo1Za7RmJHOaFmg9LikV2I/2xufBWLzuH21ZfK8AXV4jtZ+nU/Lafr5cvgKDwUB4eDizZs0asDYFBgZy/fXXA5CSkkJiYuKA1d2dgtTDfPb4Q+SnJKFUqZi57Hck3HIX+YWFSJLUaY8of39/br31VtRqNQUFBdafub7wjYjixmdfxt7FlarCPL7621O01Nf1+nxZljly5AgffPABZWVlqG1sCLe3ofS7lexbuZz0tV8wZVwcAPv27bOO8JwvmZmZfPTRR9TU1ODk5MQdd9zB2LFjuy2vzajF3KRH4WCD3YjOyR707W0UpR2xfK4PAcAvwuXcNF4QBEG4IImASRB6IMsyO3fuZP369ciyzJgxY1i8eDFqtbpT2dNHmS71tUythyqQdSZU3nbYDnPvsowmxBmPm4fj+8Q4HKcFgq2CH5uPUN/WiAO2XOYSj7lOh2wwD9goxrBhw5g/fz4AP/74I2lpaQNS768Z9Xp2Lv8PX7/8LG2NDXgEBnPLS28wdsFV/PRzGvrRo0dbN1Q9XVBQELfccgs2Njbk5ub2awqhV0gYNz33DxzdPagtLWb1c0/SVFN1xvPa29tZs2YN3333HXq9Ho1Rj036YaoP78dsMmHn7AKyTOmOTcybNxeA3bt3W1Prn0tms5mdO3eyatUqdDodwcHB3HvvvQQGBvZ4XsupVOLjfLpMNpJ/JBmT0YiLjz+tTY5IEviGiYBJEARB+IXIkncJ0WuNKG0UYm5+L5lMJjZs2EBKSgoA06ZNY+bMmd1mwbO3t2fChAns3r2bn376iWHDhv2mMomdL7LRTEtiGQBOUwORFD1nDVS52uK6IIxjDqUU7axGgYLZuhjM+2up2G9ZqI9SQqFRItmqUKiVSLbKX77WKJE0v/raVoXtEBcU9p3Ta48fP56Ghgb279/Pd999h7OzM6GhoQN2/zXFhWx45zVqigsBiJ23iGm33oGN2jLVLi8vD4VCwbRp07qtIyQkhCVLlrBixQqysrJYt24d1113XafU6T1x9w9k8d9eYc0LT9NQUc6qZ5/khmf+jpuvf5flCwoKWLtmDa1tbSDLqKvLsKmtwEajIXr65YyeuwAnD08+feR+akuLkU4WM3fuXLZu3crOnTtRKBRMnTq1T+9Vb2m1Wr7++muys7MBSEhIYN68eWd8P4y17ehyGkACh3HdTcfbD4BXaCyl2RIegY6o7cSfRkEQBOEX4q/CJaKhqo3VLxxCY68ibn4o0ZP9Udpcep353tLr9axdu5bs7GwkSWLhwoUd9nQBy0aY+uImtBl1KBxscJwawIQJEzh48CBVVVVkZmYSHR09SHcweNqOVmNq0qNwUmM/xrtX5+Tn57Pjpx0AzF84n+GO4bQklqHLbwQZMMmY24zQZqS3Yy2SnQqXeSE4JPh1CtrmzJlDQ0MDGRkZrFq1irvuugsvL68+3GVnsiyTuuUHdv3vE0wGA/Yursy7/w+EjxlnLXNq+lpsbKx1/Vt3wsPDWbx4MV9++SXp6ekolUquueaaPgXhLt6+3PScJWiqLy9j9XN/5oa/vIhHYLC1jK69nW9WrSSzsBgkCUmnxe5kPl4eHsTecS/R02ahsf9l+unMZb9j4zuvceDrVSx99V1Ms2fz448/8uOPP6JUKpk0aVKv29cbVVVVrF69mtraWpRKJVdccQWxsbG9OrflkGXtkibSDZVH52QhRr2egpQkAGzsogDwj3AdkHYLgiAIFw8RMF0iSjPqMBrMGBv17F6VTcrWIsYtDGPoBF8x4vQrra2tfPnll5SWlqJSqbj++usZNmwYAGa9CV1OA+3ptWgz6zC3nrYoX7KMqIwfP/6SHWWSZZnm3aUAOE7279V+S42NjaxduxZZlomNjSU+Ph5JkrAb7oFslpH1JsxaE7LOiFlnQtaaMOuMyLpTx02W4zqj9WtjTTvGmnYavs2j9VAFrldFoAn5Zf8lhULBtddey2effUZpaSn/+9//uPvuu7vMsNYbbY3/z959x1VZtw8c/9xncliHvUFAUUHce++Vmpq2TW1bNqzM6ikbWvr0q54en/a2UiszLbdp7r1wICogILL3PHDm/fvjKEaigoI4vu9X56Wcc4/vOQLd1/29rutbxLrP55F09uI7rF1Hhj4xDSc396ptUlJSSE5OrtNMTEREBHfddReLFy/m6NGjqFQqRo0aVafvKRdPL+5+898seWcmeakp/PLmy4x7dTZaRyd2rV7OgYQkLBoHkCTUxXm0Cgmi4wMzCYqMrnE2tWXPvsRt3UjK4YOs/+pj7po5B6vVyubNm/nzzz9RKpVV9XxXo7y8nN27d7N7927MZjOurq7cfffdBAYG1mp/2WLDsP9ss4caWokDnD4ag9lYibOnFyX5LkA5/iJgEgRBEP5BBEy3iLw0e3th/6Z6ivMqKCswsunHExxYe5ouI0KJ6OKH4jKpU7eCwsJCFixYQH5+PjqdjnvvvZdADz/K92VREZePMbEI2Xy+oYPkoEIT6ITxVDHFa5JRBzjTvXv3qlmm48eP06pVq0Z8R9dWZXwhlmwDkkZ50YvUv7NYLCxevBiDwYCfnx8jRoyoukiXbfa6JYWDCoWDCtDWehyyVaZ8TybFf57GnFFO7meHcezgg354GEoXe/2ZWq3m3nvv5ZtvvqGgoIBFixYxefJktNranwcgOWY/az/7L4biIpRqNX3uf4j2w0ZeEGxs3rwZgA4dOuDu7l7DkWrWsmVLxo0bx5IlS4iJiUGlUnHbbbfVaYFkJzd37npjLr+98zrZSQn89PoMKhxdMfoGgcYByWalbWgIg+94tlqQVxNJkhj0yFTmT3+StLhYjm5aT98BQ7BarWzbto01a9agVCovmJGtrdLSUnbt2sW+ffuqugSGhoYyfvx4nJ1rvzZSRWwetnILSlfNRevoEvbY0/HCO3Qj4YC9vb1/U1G/JAiCIFQnAqZbRH66PWBq3T+IsDZexG5N5+C605TkVrBh/nEOrD1N55FhNOvgc9mak+tZaUEe+WlnaNK6XZ0uKAEyMzNZuHAhZWVluLq4ckfUYBxWFZKZetqeFnaW0k2LLsoThyhPtGGuoJAoXByPISaHgkUn8HmmPd26dWPLli1s2bKFyMjIW2aWqWyLfXbJqYsfCp0KU6WFA2tPU5BRjs1qw2qRsVlsWC02rFaZTEssxaSjkFUoksP5/qVdWC02bBYZm03GyU3LkEda1TlNSlJKOPcIQNfGi+K1KRj2Z2M4mEPFsXxcBzfBubs/klKBk5MT999/P9988w2ZmZn89ttv3H333bWqFTKbjGxbOJ+YtSsA8Apuwm3PvIh3SOgF2yYnJ5OSkoJSqbyiOp9WrVphsVhYtmwZ+/btQ6lUMnTo0Dp9j0sqNb0eeYp1P3xNbpkBi4s9MPLz9OSeCRNwq0MQp/fxpeddE9jy4zdsXfgtTTt2YcCAAVitVnbu3MnKlStRKpW0b9++1scsLi5mx44dHDx4sKozoJ+fH3379qVFixZ1+hmyVVoo3Wavo3Pq4oekvPBzslosnDqwBwAP/9bIshFXLwec3OoWMAuCIAg3PxEw3QJkm0x+uv3uqWegMyqNknaDQojqFcDRzWnE/JlKYZaBP78+xoHA03QZFUZYW686Bxz/VFRURGJiIi1btqzTneErlRYXyx/vv01leRmDHplK28HDa73v6dOnWbhgISazCU+FC0Ny26DaUsS5pUPVgc7oIj1wiPJE7e90wWfjNrYZ5qxyzJnlFCw4TteJXdi9e/d1N8skyzKy2YZCU/vmAbVlSiu11xwpJJx7BZKZWMSG+XGU5FXWuH2lQzalbqkgg3NhS8wmFVC9hXZ5kZF1X8Zy16udcdLX/UJW6azBY3xznLr4UbT8FOa0MopXJlG+Lwu325vi0NQNT09P7r33Xr7//nvi4+NZs2ZNtZmumuSeTmbV/94jPy0VgPbDR9HnvgdR1dA98VynRbDPLun1VzaD0bZtW6xWK8uXL2f37t2oVCoGDhxYbZwWi4XCwkLy8/MveJSVnV3EVtKCixaFQsGgQYPo1q3bFQX0HYbfzokdW8hOSmTj/C8ZNe0lBg8ejNVqZc+ePfzxxx8oFAratm17yeMUFhayfft2YmJiqtrxBwUF0adPHyIiIur8e8iUXkb+wuNYCyqR1IqLNntIOx5LZVkpOlc9ZrMvkCrS8QRBEIQaSXJjLtV+jZWUlKDX6ykuLsbV1fXyO9wkinMNLJi5G6VKwWPz+qD4R82SscLC4b/OcHhDKqZKe0m9d4gLXW8PJ6SVx2UvWGSbTFmRkaIsA4XZ5aQkp5KUeYwiUwZIoNXouPueOwkPD2+w93h8+2bWffZfrGfvTDu4uPLwf7/E4TKBmizLnN4bz6K1v2KSLfhb3RlsboNGoUbbVG+fSYr0RFWLu86W/AqyPz6EXGHBqasfh9zS2LJlC97e3jzxxBONPstkLTGS990xLIVGvB+JRhN0ZfU6F5O/6DgVR/LQtfMmwUHNwXWnkWVw9tDSfnATNA5KFCoJpVJBUVk+q7cswWq10rFNN7p27IFSpUChlM7+af+sVnx0iIKMcvyb6hn9fPta1dvlpqaw/suP8ApuQqdRd+ARYG87LdtkyvdnUbI2xd5AAtC19UZ/WxgqvZa4uDgWL14MQM+ePRk0aNAF3/uyzcbBNSvYtug7rBYLjno3hj0xjbD2F08/O3XqFD/++CNKpZJnn332qn/37N27l9WrVwP2AEyj0ZCfn09eXh5FRUWXbMHu5OSEp6cnXl5edO7cGX//y6dNXkp28ikW/us5ZJuNMTNep2nHLsiyzKpVq9i/fz+SJDFu3Diio6Mv2DcvL49t27Zx5MiRqjE3adKEvn37EhYWVudASZZlyndnUrQyCawySjctHve1RBtS8+e94ZvPOPznKloPGIKhvBfp8UX0u78FrXrXrkZKEARBuHWIgOkWkBSTy5ovjuId4sJd/+p80e0qy80cWp/K4U1pWIz2wMkv3JUut4cT1MIdq9lGUY6BwiwDRdn2PwuzyinKNmA2WTFp8zA4pWPRlFQdU7KpkRX2OoQ+ffrQt2/fOrVGvhxZltmzbDE7fvkRgIiuPShITyM/LZX2w0Yx4MHHL7qf8VQxKWuPsix3K0bJjJ/sxtiIgbhE++LQwgPFFbQWrjhZQP78YyCDbnQIX25ehNFo5M4772zUWSZLfgW5Xx/FWmgEQOnhgO8z7c/WBl1eQkICv//+OyEhIfTo0YPg4OALjp/1/n6QIcZRTWqGAYCW3fzodXdztH/7LCsqKvjyyy8pLCykWbNm3HfffRcNJouyDfw6dx+mSittBgTR+67mlxxnWUE+C197gbL8PPsTkkSzTt3oMno8/hEtALAZzBT/edq+sK4MkkaBy4AQXHoFsj/mAKtWrQLs369/Xzi3rLCAtZ9+WLXIaXiHzgyd8iyOereLjkeWZb755hvS0tLo2rVr1RpQV2vXrl2sW7euxtfUajWenp5VgdG5v3t4eKDTXdgp7mptWfAt+1csxdnTiwc/+BSNzhGbzcaKFSuIiYlBkiTuuusuIiMjAcjOzmbbtm3ExsZWHaNp06b06dOHJk2aXNEYbJUWCpcmUHHE/u/uEOmBx53Na2wrD/bA94snJ1NeWMDoF9/gr+/LsZht3PtGVzz8nWrcRxAEQbh13TAB02effcZnn31GSkoKYM/pf/311+t0AXKrBkx7Vyazb2UyLXv4M3Bi5GW3ryg1cXDdaY5uScd6tsGBzlVDRampWi0PgE2yUKnLosIpA5vSnnolSQpCAyLo1KEz6UcMHDy+k0pHe7eqkJAQxo0bd8VpSX9ntVjY8PWnxG76E4BOo+6gz32TST12hCVvv4akUDDx/+wzDefIsowxqZiSDafJS8lipeYABsmEr6Mnkx6chKP31X9flGw4TcmGVFBJnOhczvaYXY06y2TOKif3m6PYSs2oPB2QrTLWIiO6aE887o+87J18i8XCJ598QmFhYdVzQUFB9OjRo6oLYOHviZTvziTHIrOrzIKDk5p+97egaYfqbcVtNhs///wz8fHxuLm58dhjj+Ho6HjJ8ycdymXN50cBGPxwFM0vkmJlqqzglzdeJiflFO4BQbj7B5B0YG/V68FRrel8+zhC23VEkiRM6WUU/ZGIKbUUAJWXDrfbmxKTf6IqGOnfvz99+/Ylcd9u1n3xPypLS1CpNfSd+AhtBw+/7GeXkJDAwoULUalUPPvss1fcha8m+/fvJyEhAQ8Pj6qgyNPTExcXl6tOp60Ls7GS7198iuLsrGo3KWw2G7///jtHjhxBoVAwdOhQkpOTqy3q3KJFC3r37n3ZxWcvxZReRv6i41jzK0EhoR8ehnOvgEt+Bhnxx/lp5otodI7c8eqn/P7BERyc1Dz0fq9r+tkJgiAIN4YbJmBasWIFSqWSiIgIZFnm+++/57333iMmJqbWd+5v1YBpzRdHSYrJpdedEbQdGHz5Hc4qLzZyYO1pjm1Lx2axf5toHVW4+zmh85TJtyaTmpOA2WKv9NHpdHTu3JnOnTtXXRhazTaW/ecgpzMTKXdLwIYVnU7HmDFjaNGixRW/J6PBwIoP53L6SAySpGDAg4/TbuiIqtf/eP8dEvftIqR1O8a/OhtJkqg8VUTJhtOYkksop5IV2gOUSZV4e3rz4MMPXvbCvbZkm0z+D3FUnijAolfwE1sxGo2MHz++xtSkhmRMLSHvu2PIFRbUfk54PRyNtchIzueHwSrjdntTnHvUvJDpOXv27GHNmjU4OzsTERHBkSNHsFrtM5Du7u60i2xP0EYbWpTsKLPg2MKDARNb1lhztGXLFjZt2oRSqeThhx8mIODS5z5n9++nOLD2NCqNgvEvdcIzsHqqpc1m5Y/33yHpwF50rnrue/sD3Hz9yDtzmv0rlnJ8+2ZsZ8fs3SSMzrePo0X33kiSAkNMDsVrkrGV2WdCtc3diXVOZ0ucvYNaE1dHCvZste8bGs6Ip1/EM+jyP0eyLPPVV1+RkZFB9+7dGTp0aK3e640o5UgMv70zEySJe2e9R0Dzs234bTaWLl1abTYJICoqit69e19VSqAs2zshFq2oXQre352bFWvZsy8BkXez87dEQtt4MeLJNlc8HkEQBOHmdcMETDXx8PDgvffe4+GHH67V9rdqwPTjzF2U5FYwelo7gi7SXvdSyouNlORV4uajI784h127dhEXF1dVd+Dl5UW3bt1o27YtavWFKTDlRUYWz91HaVkxxoAEys1FAHTr1o1BgwahUtUt9a0kL5dl775FXmoKKq2Wkc++RNOOXaptU5SdxfwXnsBqNjNm0iu4nHHBlFwMQIXSzGrnQxQaS/Dw8ODBBx+s1zv/ALYKCzkfx2DJr+SwTwb7So5f81mmyoRC8n+IQzbb0DRxxWtSVFWKUun2dIpXJoFSwueJthetZzIajcybNw+DwcDIkSPp1KkTpaWl7N27l/3791NRUQGAVlYRZg6kad8edBrSvMa79ImJiSxYsACA0aNH16mDms0ms+J/h0g7UYjeR8edr3Sulua3cf4XxKxZgUqt4c7X51RdsJ9TkpfLwdW/c2TDOsxG+0yoq7cvnUaOIbr/YJSyipL1pynblQFnu8bHqJM5oEwCQJedTtce3eh1z0RUNXyP1yQ+Pp5FixahVqt59tlnr0njk8a05pP/ELd1I17BTZjw73koz/5cW61Wli1bxrFjx4iOjqZ37974+NRuQeOLqWsK3t/Jssw3zz5KcXYWo55/hcSDLiQfzqP7HU3pMOTKUgIFQRCEm9sN2evYarXy888/U15eTvfu3S+6ndFopKSkpNrjVmOqtFCSa7+o9Qy6sgs2nYuaQmM6ixb/yNdff82xY8eQZZmwsDDuu+8+nnzySTp16lRjsATg5KZl+OOtUUuO6M5EE+ZrTwvcvXt31Ro4tZWTksRPr71AXmoKTm7u3PPmuxcESwBuvn707Hcv/fzuQbvVZg+WlBLKzh5sCDhBobEEV1dXJk6cWO/BEoBCp8LzgSgktYKWOd5olRpyc3OJi4ur93PVpCI2j7z5x5DNNrQRbng9HF3tYtK5ZwAOrTzBKpO/6AS2CkuNx9m5cycGgwFPT8+qAMfFxYWe3frQxmMYrsXNcLHpMEoWTmhOs3bvYv744w+ys7OrHaewsJDffvsNgI4dO9YpWAJQKCSGPNIKZw8txTkV/DU/rmqdpoNrlhOzxt7ae9jU5y8IlgBcvbzpN/FRHv30O3reNQGdq56S3Gw2fvcFX019iD0rf0Xbzxvf5zri0j8Ii85Ce3MY7S2h9s/TNxC5yBtjbCG2ypo/q7/7e2e8Ll263PTBEkDfBx5G5+JaNat3jlKpZPz48bz66quMGzfuqoMlU3oZOR/F2IMlhYR+RBieE6NqFSyBvcNhcXYWKrWG0DYdyDxlv5FS19b1giAIwq3jhmorfvToUbp3705lZSXOzs4sW7aMqKioi24/d+5c3nrrrWs4wutPQYa9nbiTXoPO+cKWx5djs9lYuHAhp06dAkChUNC6dWu6d++On1/NtSQ18QvX0/feFmz68QRlh70ZMCaMXYc3kZmZyeeff86oUaNo3br1JY+RHLOfFf99F3NlBZ5BIdzxypu4el148WVMKqJkQyr+Sf6gA6tsocLPSMiEniz64xeyc3NwcnJi0qRJuLm51enzqAu1nxPu4yIo+PkkrSoDOahOZsuWLURFRTXoLFP5/iwKf0uwN55o7YXH3S2QVNXPJ0kSHuObk50Zg7WgkoIl8XhOqF7PVFZWxs6dOwEYMGBAVbOOM8cL+Ov745QXGWmpDSLaFEKaaxHHfXM5c+YMhw4d4tChQzRt2pQePXoQEhLC4sWLqaioICAg4IobH+icNQx7rDVL3z9A8uE8Dv55GjfvbDZ//zUAve+bTIvuvS5zDBe6jbuHjqPGcmzTBvavXEpxTjY7f13I3uVLaDNgKHlnUkiNO4Je7UXrZgNppw7nkDmJLfkHsS2poAWBODR3twecIa4YzTYMZ+v7Apq7oVQqOHnyJJmZmajVanr06HFF7/dG4+iqp9+kR1nz8Qfs+u0nIrr2xCPgfMe5us4k/5M9BS+LopWnwFK3FLy/S9hrT7UMbdeB8mIblWVmlGoF3sH1f+NEEARBuDncUCl5JpOJ1NRUiouLWbJkCV9//XXVBWhNjEYjRqOx6uuSkhKCg4NvqZS82K3pbFl0kpBWHox6ul2d9z9y5AhLly5FpVLRvXt3unTpclUzMlt/jufo5jTUDkqGTm3OX9vWkJpqX8umQ4cODBs2DM0/1rKRZZmja9eye9FPaCUdQWGt6DjgdiSThK3MhLXMjK3MXPV3+WyHP5QSpiAra7d9jlltwaX3UFLT0nBwcODBBx/E19f3it9HXRStOEXBjlR+1u7AJFkYN27cZYPDK1W6LZ3iVfY0MsdOvrjfEXHJhYhNaaXkfGavZ9KPDMel1/kL3FWrVrFv3z4CAwN55JFHsJpt7Pr9FEc22hen1Xs70E+nhBITbqPCce4ZSFpaGjt37uT48eNVKZs6nY6Kigp0Oh2PP/74VQepx7als3nhSWRrNpaKX7GaTbQeMITBjz1d54J9m9VK/O7t7F3+G7kpSVXPK9Va2gy6F5/wblSUGImN383pEnsQ2tccRYTNXntjlWVyLTIZJhuZFhnvpnqGPNKKH3/6juzsbHr16sWgQYOu6v3eSGRZZuncN0g5fJDgqNbc+fqcemmicDUpeP/0/fSp5J05zfCpz4OyJZt+PEFAhBtjX+hw1eMUBEEQbk43VMD0T4MGDaJp06Z88cUXtdr+Vqxh2vLTSWK3pNN+SAg97mhWp31NJhMff/wxJSUlDBgwgD59+lz1eKxWGyvmHSI9vgi9j45xMzqwa+8Otm49W1Tv7c24MXeg3V+O6UwptjIz5uIKFHIdZmSUEk6d/XDpF4xSr+Gn12dwymDG6uKGRqNh4sSJV9WVq65kq43cr4+yJ/UwB9RJeHl68eTUJ+t1lkmWZUrWn6Z04xkAnPsEoh9eu7VsynZmULT8lL2eaUpbNMEu5Ofn88knn2Cz2Zg0aRIezn6s+vQIhZn2GcvoPoF0iHSn+JeTKBxV+L3cpdpiuIWFhezevZuDBw9iNtubKTzwwAM0bdq0Xt7rui93c2zTf0EuJzCyDXe+NquqZuZK2Gw2dv62iYOrfsdqtqJyHIhC6X7+nMiUuSRS6WRvRd7CEElb/HFVnv98rbLMnnIruS6F5GqPoNFomDZtWr01E7lRFOdkMf+FqVhMRoZMeYbW/Ydc1fFMGWUULDyOpaoLXijOvQKvKBAryEjnu+ceR6FU8sSXC9nxWyondmXRcVgTuo25+u9NQRAE4eZ0Q6Xk/ZPNZqs2gyRcKD+9DACvK6hf2rVrV1WQealasbpQKhUMfTSaxXP3UZxTwYbvjjNian9CQ0NZunQpubm5fP3V13QzR9DCGoCEhOJsqZ1NYUPt5ojSRYPCSY3SRY3CWYPSWY3CWY3SWWP/U6+tuni32WzYmkZhTTkNNhtDeve8psESgKRU4HlfJK3/V8pRUyp5+XkcO3as3maZZJtM0YpTlO/KBMB1aBNc+gXX+oLSqbs/xuRiKo7mkb/wOL7PtGfjxo3YbDaaNWtGSHATfvu/AxRmluPoqmHAxEhCWnmQ8+lh+/7d/KsFS2Dvnjd8+HD69evHkSNHcHNzq5dgCcBcWUF2wkKQy5EUnkiq4VCXgPof8tJK2fpzPJmJEkqHsWhdFDjqNehc7A9HFzU6Fw0OzhEcPb2TU2dOEO98gpYjIvEObIrxWB6Gw7mQW0FbFyWLFYkANA1odcsFSwB6Hz963HU/Wxd8y9YfvyW8fWec3Nwvv+M/WIqMlG45Q/neLHsXPP3ZFLwmV36zK3GfPR0vuFUbHJydyUy01y/5i/olQRAE4RJumIDplVdeYfjw4YSEhFBaWsqiRYvYvHnzRRdvFOx34vPT7AHTP9swX05JSQnbt28H7DN5F2vocCV0Lhpum9KGpe8dIPVYAXv+OEX3sc14dMJDLPn6J85YctmuPsFRZSKKgjxUhhL6PzCBqP4DLn/wv5FlmZUrV5KYchoJcEhL5MTqUjr27I10jddDUrpo8J/QhtZfJ3FAmcSmNRto1arVVc8yyVYbhUsSMMTkgARuo5vi3K12rbrPkSQJ93ERmNLLsBZUcnzhHo6lHwPs//b7ViWTm1qK1knFna90wtndAWNSMeYzpaCSLtmWXKfT0bVr16t6j39ns1pZ+d93yTuTgs5Fj8ppPHlpZrYtjqff/Rc2e7iUynIze5YncWxrOrIMKrWCjsNDaTc4GJW65sWV29iC+OOPPzh8+DArVv+O7q67aDmoJc69Asl6bx/ZFelY1QYkm5KcPQ5sUh6n9z3NL3q8m1XH20ZzYscWcpJPsWn+l4yc9lKt97XkV1C6JY3yA9lgtSdAXE0K3t8l7LXX5EV06UF5sZHi3AqQ7At0C4IgCMLF3DBd8nJycpg4cSItWrRg4MCB7Nu3j3Xr1jF48ODGHtp1qzS/ElOlFYVKws2vbne6N23ahNlsJigoqEHWDvIOcaH/RPsF7sF1qSRuTqNiQRJDylrTWYoAWaZYYaHQy43ckBA2HI1l9erVJCQkVKV4XYosy6xbt46DBw8iSRKjRtyGo81CdlICx7b8Ve/vpza0TVzpMaQPWllFgaGYQxv3Xn6nS5DNVvIXHLcHSwoJj7tbXDZYKsnLIXHfbrKTEjGUFFfVGSkcVHjeH4mshB2pBwBo06YNtjIHDq49DUC/+1ri7O4AQOlWex2TU0dflFfQTORKyLLMxu++IPnQAVQaLXe8/AbDHusBEhzblsHxnRm1Oo7NJhO7NZ2Fr+8mdos9WGrW0Yf73upGp9tCLxncKBQKRo8eTevWrbHZbPz6668kJCSgcFDh1C+Ig6pkAJoHR6NETdyOTJZ9EENZYWW9fAY3CoVSyZDHnkZSKDi5axtJB/dddh9zroGCxSfJ+mB/1aySNlyP16Ot69QF72JK8nLJSowHSaJZ525kne2O5xngjPYqjy0IgiDc3G6YGaZvvvmmsYdwwzmXjufh74RSWfvYODMzk5iYGACGDh1aL0XbNWne2Y+8M2UkbEhFXp2ERSGhdNVQkhuDU24ySv8gXJtFkZGdTUFBAXv37mXv3r2oVCpCQ0Np1qwZEREReHp6XnDszZs3s3v3bgBuv/122rdvj23cPWxZ8C3bfvqeiK490TZCupRHryZ0OBLFrtwjbN2+jegObdF46Op8HFulhbzv4+zt0lUKPCdEorvMGluGkmIWvTad8sLzbdxVWi2uXj7ovX1w9fYBHzcyCgtRyBJtfZqz/ttjyDK07OZHs472joTm7HIqTxSABM69r11644FVv3N4/WqQJG57+gX8mjUHoOuoMPYsT2bLoni8glzwDrl4U5LMU8Vs+yWe3NRSADwCnOh9d3OCWtQ+ZUyhUDBmzBisVitxcXH8/PPP3HfffZS7lFOkKEcjqxjctC2Gnnr+/OYYOSklLJ6zj2GPRRMQUffUtPpks9oozDJQkl+Jf1M9Dk4NFyj4hjej44gx7F+xlA1ff8rk/3yKxuHC73VzdjklG89QcSQXzlbUVrhWkuGQTEZaIob/FdKyZz963HnfVf0uStxn/30Q2CISJzd3MhMTAPBvpr/iYwqCIAi3hhsmYBLq7lzAVJd0vHMzMwDR0dEEBwc3yNjO6djFF//9WahtMmWyjYN5S8jMOIHe05t7ZryKq7cPRqORpKQkEhISSExMpKSkhMTERBITE1m7di0eHh5VwVNoaCh79+5ly5YtAAwfPrxqzZ/2w0dx5K91FGams3vpz/Sd8FCDvreaSJJE30nDOPjBcYrkcvZ8u4H2fTqBVUa22JCtMpz9U7bYqj0vW2xVr1lyDVjyK5G0SrwmRaENd7vkeWVZZv2XH1FeWICDswtKlYryokIsRiMF6WcoSD+DDBjCosDBkShrEJbfUyhJ/xGLUkNeShDrPvfDzS8A30x/VIA20gO1V92DvSuRsHcnWxZ8C0DfCQ8R0eV8q+6Ow0LJTi4h5Wg+a744yl2vdMbBuXogUF5sZNfSU5zckwWARqeiy6gwWvcNRFGHmwnnKJVKxo0bh9Vq5eTJk/z0009V9UqtLSEYt2UTNCOEu17pzOrPj5KfVsYfHx6ix/hmtOkf1GA3If7OYrZSkFFObmopuWfKyE0tJT+9DKvZvjKv1lFFp9tCad0vCKWqYZINeoy/j/jdOyjJzWb7Tz/QYfjtFGVnUpSdRcXpApzPOOJu8q7aPr08gbiiXRQkZ1Y7zu7ffqK8MJ9Bj05Fobiy9MbEv6XjAWSeKgJEwCQIgiBc3g3dJa+ubrUueWu/PMqpg7n0GNeM9oNDarXP8ePH+eWXX1CpVDz11FMNuk6RMbnYvriq0UoJMpvSfqbSnIqTmwd3v/Vv3P0uTC+TZZmcnBwSExNJSEggNTUVm81W9bpKpcJisS8sOnDgQHr37l1t/6SYfSz791solComvf9JtXVirqXN6zayeddW9DZHxpm6oaDuF9AKJxVeD0ajCbp8m/ejG//kzy/+h0Kp4r53PsA3rCkWk4nS/FyKc3Moyc0h/tQpYjNyUMgyow3t8FR6kV6ewPac84uQOiidGRk8BaWkZEPmAqx6G+4BgXgEBOLuH2T/MyAIJzf3egsKshLj+eWtV7CYjLQdMoKBD0254NhGg5nFc/dTkltBSJQHI55qi0IhYbXYOLzxDPtXpWA2WkGCyB7+dBvdFEfXq08ltFgs/PLLLyQk2GcrdDod96r6osg149I/GP3QUMwmK5sXnCB+r30x3+Zdfel3f0vUmvqrazIbreSllZ0NjkrJTS2lMKMcm+3CX+9qByVanYqyQnvDHL23ju53NCW8nXeDBHIphw/y25zXq7720PgR5daDQKeIqufOlJ8krngXVmcbel9f3Hz9cfP1R+/ji6G4iM0/fIMs22jZsy/Dnnyuzh0RDSXFfP7YA8iyjUc++gadqydfP78N2SYzcU4PXDwc6u39CoIgCDcfMcN0E8tPt7eA9qrlDJPFYmH9+vUAdO/evUGDpcqTBeQvOI5stqEOcebQ6Z+oNKeCpCOs0yM1Bktgn6Hx9fXF19eXnj17Vs0+nQugSkpKAOjVq9cFwRJAePvOhLXvRHLMfrb8+DVjX3qjwd7jP1lMJoqyMynMTEddnotKkihWGEjxK6SVd3MkpQQqBZJSsi80q1QgqSSks3+e+7qs2ETMX2eQXB0YUIsZnsKsDDbN/xKAnndPwDfM3q1OpdHg7h+Iu38gFouFjUePA9CjRz9O/Kmkm0Im0CmCu8e/SYlvCcU52WhPKFCWKckzpZNfmQ6VUJSdSXLM/mrn1Oh0fwugAnH18sFJ74ZO74ajXo+jqx6l6vLpYCW5OSz7v1lYTEbC2nVkwOTHaryo1zqqGf54NL+9e4DUuAL2rUrGP1zPtsUJFGUbAPAJdaXPPc3xDa2/myUqlYq77rqLn376iaSkJHr37o23vhn5Px6nbHs6zj0CULtoGPRgFD5NXNnxWyLxe7IpyChn+OOtcb2CGTpDiYn8tDLy0srISzsbHGUbqtLZ/s7BSY13iDPeIS54BdvTFfVeOmTgxM5M9ixPoji3grVfxOLfTE/P8RH1+vkAhLbtQOsBQ8ncEUsrj174OYQC9lbtRm8zyg7ONG8+lM7eE1Fpag5indw9Wf3Re5zYsQWz0cjIZ2dcdNuanNq/B1m24RPaFL2PL2dOFCDbZJw9tCJYEgRBEC5LBEw3KbPJSlGO/ULRs5Ytxfft20dBQQHOzs706tWrwcZmOJJLwS8nwSqjidCz9cwvZKbEoXZwRFLfQeIBCyGtMoi8RPe1c7RaLZGRkURGRiLLMrm5uZSXlxMaGnrRffpNfJTTRw6RdHAfyTH7CWvfqd7em81mpSQ3l8LM9L89MijMzKAkLwf+NqGr8PQDnyC25u2l4+Q+tWq9bKq0sGHOPopLzFBSSPn/DjHy6XZodTX/KNusVtZ89AFmYyVBUdF0GjW2xu32799PUVERzs7OlB53Jq+8lBQ/HU0rLRBTSdPHO6Hu4khm7F5krDR/eBDNAoZQmJFGQUY6hZln/8xIpzgnG1NFBdlJCWQnJVz0vTg4OdsDKFf92SDKDUf9uYcenbMLf337OYbiIrxDQhk57SUUyovPyngFudBvQks2fBfH/lUpVc/rXNR0H9uUlt38L7mI75VSq9Xcf//9ZGdn4+9vX9BWE+KCKbWUko2puI9uhiRJtB0YjFeQM+u+jiXvTBmL5+5j6MPRBEfVXHtms9oozDZUBUfn/jSUmGrc3kmvqRYYeYe44OyurRZgyrJMxZFcSrel4ynDbeEulBSbyM81YE4v5dh/D5IW6EyT9t7oPBxQaFVIDkoUWiWSVonCQQU2GVuFBZvBYv+z6mGu+rtcUf21qIr2RAW0sw9CAY7tfHDpH4zau3Z1hC2690Kt1bL8P3M4tX83v783m9EvvIraoXbBzvnuePblEaraiTd1q9X+giAIwq1NBEw3qYL0cpBB56qpVeqRwWCoqvsZMGAAWq22QcZVvjeLwmUJIINDtCdbUxdz+vhhNDod41+bzZkTGvatTGbzopO4+znhF177+gJJkvDx8bnsdh4BgXS47Xb2r1jKpu+/IqR121rNdtTEUFxE7OYNZMSfoDAzneLsTKxnUwJrotE5nk1fC8TZ25dNcQlYVGoWfPBvHpo5C7Xm4p+7LMtsXniS4pwKnNy0WExWspJK+OPDGG5/pt0FdTsAu5f+TGbiSbSOTgyf+nyN9R+VlZVVCwc3829L+rZSVGoFrR9vjbw5jYrDuRQsOoFjBx/kSisqbx26SE8khYSzuwfBrdpUO57FbKY4O5OCjPNBVFlhPobiIvujpBjZZqOyvIzK8jIKM9Iu+Rk7u3sw9uU30eguf3Hdoqsf2UnFHN2SjqSQaNMviM4jQxu8C5pSqSQg4HyArx8WSu6XRynfk4VLr0BUnvaZpMAW7tz5SmfWfnGUnNOlrPjoEN3GNCWqV8D5WaN0e3BUkFGO1WK78GSSPY3OK8gZryBnvENc8Qp2xkl/6Z9Za6mJwmWJVMblV3teBwSpFOf/b5BfQeWGVOq9r59SwqmDLy79gqo+j7oI79CZO15+k9//bzanj8Tw29zXGfvSG2gdnS65n9FQTurRQwBEdD1bv5RYBECAqF8SBKERWK3WWnX8Fa6eWq1GeYmbrbUlAqabVNWCtYH2iwnZbKVwmX1BTW2YHk2oKyovXdXd582bN1NZWYmvry/t2rVrkDGVbk2jeLW97bJjJx+2Jv1K8uH9qLRaxr78Jv7NWuAXLpN3ppTkw3ms/eIod73apV5qTf6p2x13E7d1I4WZ6cSsWUGnUXfUel9Zlkk/GcfhP1cTv3sHNmv1AEmpVuPm629PdwsIxN0/AHf/QDz8A9G56qvd8S9b/gd7D8aQbZZZ88mHjHp2xkXXiDq+M5OEfdlIComhj7RC7aBk+bxD5KaW8vuHB7n92fbVPquM+OPsXvoLAAMfeRJXr5qDyZ07d2IwGHDTu5O5y/5Lpef4Znj4O2Mb2wxzehmWvApKN50BwKVP0CVnalRqNZ5BIXgG1Vw3dy5YMhQXYygpqhZI2f9+9s+SIiSFkhHPvIiLp9dFz/dPPe+KwK+pHq9gFzz8L30x3VC04W5om7tjjC+keP1pPO85v0aUi4cDY6d3YOtP8RzfmcmuZafYtexUjcdRa5V4BtoDI8+zAZJnoDNqbe1/+cuyjOFQLkXLTyFXWEAh4dI/GE2IC3KlFZvRcvZPK4bcCrLiCzGVmFBLoFEpcHFRo1ZKyJVWZJO9DkzhoELhqEbSqVBc4lH1uqMapZMK6SrXowqJbsv412azdO6bpJ+I49fZrzLuX7PQuVw8jTApZj9WiwX3gCA8AoOxWW1kJdtTd8WCtYIgXEuyLJOVlUVRUVFjD+WW4ubmhp+f31XV6YqA6SaVd65D3tmGABXH8jEczAGo+lPhrEYb6kqpt419++zrpAwdOvSqF1P9J1mWKVl/mtKN9gtu5z4BbD+5hFMHdqNUqxnz4kyCWrYCQFJIDHowiiX/3k9hloH13x5j1DPtUNRzKpXW0Yle907kz8//x67ffiayd//LpsSZKgzEbdvM4fWryUtNqXrev1kLWvbsg0dQCO5+Abh4edW6k1f/wUOIOXIUs4MjcbGxeCxeQK97Jl6wXUFGOdt+jgeg6+1hVRd6Y57rwB/zYshPL+f3/xxk9LT2OLlpMVUYWP3xB8g2G5G9+hHZs2+N5y8tLWXXrl0AOJWGYbJAk2hPWvWxN8NQOKjwuK8lOZ8eAouMwkWNY/vLz+JdiqRQoHNxRefiiif134VRqVTQvItfvR+3rvRDQ8mJL6TiUC6mPkFoAs6nxqrUSvo/0BKfUFe2LY7HZpFx8XSoFhh5BTnj6qm7qjRCa4mRwqWJ9jbwgDrQGffxzdFcJJDUA36yTPLhPHYtO2Wv/yo24+7vRI/xzQmJ8kBSSNeky9/FBDSP5M7X5/DbOzPJTkpk8VuvMP61ty/685u453w6niRJ5KaVYjFa0ehUjRZQC4JwazoXLPn4+ODo6Niov0tvBbIsYzAYyMmxX/eeS5u/EiJguknlp1WfYTKdsa87ow50RlIrMJ0pxVZmpiI2n7/Uh5CVMk1kb1w2GyhJSUUbqkcT7IKkvrrgSbbJFK04Rfkue5tg16FN2Hl8KSd3b0OhVHH7C/+iSet21fbROKgY9lhrfv33PtJOFLJ/dQpdRoZd1ThqEt13EIf/XEN2UgLbf/6RoVOeqXG73NQUDv+5mrhtmzBXVgCg0mhp2bMv7Ybchm94syseg06no1v37mzbtg2Tlz+7ly3Gzdef6P7nF2Q2m6ys+zoWi9lGcJQHHYY0qXrNI8CJsc934I//xlCYZWDpBwcZPa0du379iuLsLFy8vBnw0JSLnn/Lli2YzWb0Oi+MyU7onNX0f6BltV/imgBn3MdGULg0AdfBTewNKYTL0gQ6o2vrTcXhXErWpeD1YPUFoCVJIrpPIC26+mGzyRetQ7sSsixjOJhD0Yok5EoLKCVcB4XYZwcv00ZdkiTC23nTpLUnx7ams3dlMoWZ5az65AhBLd3pMjIM33B9vd/EqElJXgVZycUENHPH2f18yqFvWFPufvPf/Pr2a+SdOc3Pb8zgzpnvXDCLajYZSTpkb0hS1U68qn5J3yA1bYIgCDWxWq1VwVJN60cKDUOns6eA5+Tk4OPjc8XpeSJgugnJsnx+DaazDR9MZwMo516BOLX3QTbbMKWVcvJgHGeO5iPJEl1MzTDGF2KML7QfSCmhCXJBG+aK2s8Jzl1cSJy/oJaAf/7d/h9IEoaYHAwxOfa6i9ubsufY7xzbsgFJoWDEsy8S3r5zje/BI8CJfve1YMP84/aOZ031BEdeemHWupIUCgY8+Bg/zXyR2M3rqwU/FrOZhL07OfznatJPHKvaxz0giHaDhxPVdyAOTrVf3+pSunXrxu7duzHjhNVJz/qvPsbV24eQ6LYAbP8lnoKMchxdNQyaHHXBRZ6bryNjX7AHTSW5Ffz81k+UZq+3L/A69YWLjjMvL48DBw7Yv0gPQEKi/4SWNdbCOHX0xbG9j7jArCP94CZUHM2j8mQhxqRitDXU5NUlva42LMVGipYmUHnS/nOsDnLGY3xz+89wHSiVCtr0D6Z5Fz8OrEnhyOY00k4UknaiEK2jiqCW7gRHehAc6XFF3f5qYjZaSY8vJDWugNRj+RTn2G9QeAY6c9ernasFaZ5BIdzz5rv8+varFGVl8vMbL3Hna2/j7n9+qYDTh2OwGI24eHpX/WyL9ZcEQWgM52qWzq3ZJ1w75z5zs9ksAibhvLJCI0aDBYVCwt3XCdlqw3Q2gNKcDaAktQJViDPb18QA0KVbFyLa9MSYUowppQRjcjG2MjOm0yWYTpdc3YAU4D6+Oftil3N4/WqQJIZPfZ7mXXtecrcW3fzJSCwmbnsG6789xl3/6lLtLnN9CGgeSWSvfhzfvpmN333BbU9P58hfa4ndtB5DcRFgD6wiOnen7ZDbCG7Vpt6n0J2cnOjcuTM7d+5EEdYCa+xelv9nDvfOfp/8dDVxOzJBgsEPRV20nsvVS8fYFzqw9L1t5CatBqDNwNsJioqucXuAjRs3IssyOqsXGpMbUb0CCG/nfdHtRbBUdyovHU6dfSnfk0XxuhS8p9T/9885sixj2J9N0cokZKPVPqs0uAkuvYPsLeuvkIOTmp7jI4juG8S+lckkH8nDaLBw6mAupw7mAvYmFMGRHgRHeRDYwr3Ws2X2mzvlpMblcyaugIzEImyW850kJYWEpLDXZCbszaJFt+rpFG5+/tz95rssefs1CjPT+eXNlxn/6my8QkKB6t3xJElClmXRIU8QhEYl0vCuvfr4zEXAdBM6N7vk7u+IUq3AlFEGFhuSg7Jad6qYmBhycnLQ6XT069cPjU6HJtAZegYiyzKW/EpMycUYU0qwFlae7Ygt29d7OXdNI8tVT537+9+flzRKXPoHc/DwKg6s+gOAIY8/TWSvfrV6L73viiA7pYT8tDL+/CaWMc+1R3GZlKK66n3/ZBL37SYj/jhfP/1w1fPO7h60HjiM1gOH4OJR+6YDV6J79+7s3buXcouFoOatKI4/xm/vvIFNeSegpdNtoQS1vPQMm5Neg1azGeRKJKUPqScjyE8vw7OGdbjS0tKIi4sDwKGwCXpvHT3HX3lqoXBxrgNDMBzMwXS6hMrjBeii6j8Vw1JUSeFvCRgTigDQBLvgfmdz1D71dydT761j0INR2Kw2ck6XcuZ4AWeOF5CVVEJxbgXFuenEbrV3J/QNdSU40p3gKE98Q12q/cxWlpk5c7yA1Lh8UuMKMBRXb5Pu4ulASJQHIVGeBLZ059jWdHYtO8We5ck06+iL8h9pwq5e3tz95r/57Z2Z5Kam8Mtbr1QFTacO7AHOp+OV5FVgKDGhUEn4hF5+wWdBEARBABEw3ZTyzqbfnbtQPle/pAlyqZolqKysZOPGjQD069evKsfzHEmSUHvpUHvpcOp8dQX0e5YtrurWNuDBx2ndf0it91VplAx7NJrFc/eRmVjMnuVJdB9bvxf2Lh5edB17F9t//gGwd+JqN2QE4R27oFRdmx8RFxcXOnbsyJ49eyCkGfqifIpzspCUS2nS/hE6j7h8DVfM2hWkHT+MSqPBM3Q8xbk2lv3nIKOfbY93yPmLQ1mW2bBhAwDaCl/UNmcGPRSFxkH8OmgISlctzj0DKN2cRvG6FBxaetTbbJ0sy5TvzaJ4dbJ9VkmlQD+kCc69AhtsRlChVOAXrscvXE/nEWGYKiykxxdyJq6AMycKKco2kJVUTFZSMftWpaBxUBLYwh03X0fS44vIOV1SbZFdlUZBYHN3gqM8CInywM23eiF06/5BHNl4htKCSmK3ptN24IWNQpzc3LnzjbksnfsGWYnxLJ71LzqOGIOxvBydq56AlpHA+folnxBXVFfZsU8QBEG4dYgrpJtQVf3S2YDJnHYuHe/8RfO2bdswGAx4enrSqVP9Ldz6TwdW/VEViPS5/0HaDxtV52O4+Toy4IFI1n0Vy8F1qfg3dSO0Tf3O+HQZPR7PoBA8AoPwCAiq12PXVs+ePdm/fz9p6elEtbyd4tzvka2ZKNmIRGfOVobVKC81ha2L5gPQ94FHaNlzMCs/Pkx2cgm/fxjDqKfbVq1plZiYSEpKCsgKnEqb0Om2UPzCRD1HQ3LpE0TZ7iws2QYMh3Jw6uB71ce0FFRSuDQB49k1hTRNXHEfH1HrxWDri0anIqytN2Ft7emcJfkVpB231yGlnSzAWG4h+XBetX08A50IjvIkJMoD/2b6SwYvao2SziPD2LzwJPvXpBDZwx9NDSl/OmcX7nztbZa9O4u047HsWrIIgGadu1V1rTy3/pKoXxIEQRDqQrS7uglVdcgL+scMU7D968LCQnbv3g3AkCFD6mVBr5oc+Wsdm3/4CoAed95P59vHXfGxmnX0oU1/eyCzYX4cJXkV9TLGcySFgmaduzVasATg6upK+/btAUhIS0LtNApJoeTU/h3sWLzwovtZzGZWf/Q+VrOZ8A6daTt4OA5Oam5/ph3+zfSYKiwsn3eIjIRCbDZb1eySzhBAQBMfOg1vctFjC/VD4ajGpZ/9e6tk/WnkmhakrSXZJlO2I53s/x7AmFiEpFagHxmO9+NtrnmwVBNXTx1RvQIY9lg0D73Xm/Evd6Lr6HCievozYGIkk//dk3tmdqXnuGYER3rUaqYnsoc/br6OVJaZiVmfetHtNDpH7njlTULbdax67lw6HkDmqbP1S2L9JUEQhFrr168f06ZNa+xhAJCamsqIESNwdHTEx8eHF198EYvFcvkdr5IImG4yFpPVvnYK9g55NpMVc3Y5cH6Gaf369VitVsLDw2nevHmDjCNhz042fPUJAJ1vH0e3cfdc9TF7jGuGT6grRoOFdV/FYr2Ki87rVbuoTiBLmLVFNOvbgiGPPw3AnmW/ELtpfY37bP/5B3JTU9C56hny+DNV6UwanYpRT7cjqKU7ZqOVFf87zJZ1u8nOzkayKXE1hzLowah6rwkTaubcIwCFiwZroZHyPZlXdAxzroHcL47Y24WbbGhCXfF9tgMuDZiCdzUUZ+uZOg0Ppf8DkUT28MfJre6NWxRKBd3GhANw6K8zlBcbL7qtWuvA6Omv0XbwcJp361XVbbKi1ERhlv13o39TMcMkCIJQX2RZviZBi9VqZcSIEZhMJnbu3Mn333/P/Pnzef311xv83OJK6SZTkFmOLIODsxpHVw3mjDKQQeGqQanXcvr0aeLi4pAkiaFDhzZIt5Yzx46w6qP3kGUbrQcMofd9k+vlPEqVgqGPtkLrqCLndCk7fkush9FeP6xWG7t/TcOhwp6uVaA4RXS/QXQdezcA67/6mNTYI9X2OX30EAdWLgNg6JRnL1i8U61VMmJqG5pEe2K0Gti2axMAjuXB9B0fhVs9NgUQLk2hUeI6KASAko1nsBlr/z8X2SpTsvkM2fMOYjpdgqRR4ja6Kd6PtUFVTy29r3fh7bzxDXPFYrSyf3XKJbdVqdUMemQqo557uaoO8dzskkeAEw5O6oYeriAIwmXJsozBZGmUhyzLlx8gMHnyZLZs2cK8efOQJPvC5fPnz0eSJNasWUPHjh3RarVs376dyZMnM2bMmGr7T5s2jX79+lV9bbPZmDt3LmFhYeh0Otq2bcuSJUtqNZY///yTuLg4FixYQLt27Rg+fDizZ8/mk08+wWQyXf4AV0HUMN1k/l6/JElStYYPNpuNdevWAdChQwd8fa++juKfspNP8ft7s7GazTTr3J1Bj0yt16DM1VPHoMlRrPr0CEc3pRHQzI1mHX0uv+MNYM/vSWQnl+DmFEq2lE1iYiLp6en0vOt+irIzOblzK8v/8w73zn4fz8BgKspKWfvphwC0HTycph271HhclVpJ/8kRHPvvX9gsZpRmJ1o2bUNkzytf8Vq4Mk6dfCnbmoYlv5Ky7Rm4Dgy57D6mjDIKf0vAfPZnW9vcHfexzVC5OzT0cK8rkiTRfUxTfv8whrhtGbQdGFyngL8qHU/MLgmCcJ2oMFuJen1do5w7btZQHDWXDwPmzZtHfHw80dHRzJo1C4Bjx+zrU7788su8//77hIeH4+7ufqnDVJk7dy4LFizg888/JyIigq1btzJhwgS8vb3p27fvJffdtWsXrVu3rnb9OnToUJ544gmOHTtWVdbQEMQM000mP82efuf1jwVrNcHOHD16lIyMDDQaDf3796/3cxdlZbJ07huYKioIiopmxDMvomiA+qjQNl50GGq/0Nz44/GqFMQb2enY/KrajMETOtCmTRsAtm7diqRQMOyJaQQ0j8RYXs6yf7+JoaSYDV99QllBPu7+gfSd8PBFj22xWFjy269UWErQqnREevVk0APRYi2IRiApFbgOCQWgdGsa1nLzRbeVLTaK/0wh5+NDmNPLkHQq3O9sjteDrW65YOmcwBbuhLTyxGaT2bs8qU77nm/44Fb/AxMEQbhJ6fV6NBoNjo6O+Pn54efnV1X7PmvWLAYPHkzTpk3x8Lj00icARqOROXPm8O233zJ06FDCw8OZPHkyEyZM4Isvvrjs/llZWRfc7D/3dVZW1hW8u9oTM0w3mbx0+4zSP1uKl+utVcX+vXv3xtn5wrV5rkZ5USFL5szEUFyEd5Mwxrw4E5Wm5kVW60PX28PJPFVMZmIxa7+MZfxLHVFpbsw2wWWFRjbMt6+J1LpfEE3b+6AP7s3hw4c5efIkWVlZ+Pn5MfrF11j02gsUZ2ex4JVplOblolAque3p6agdar6AlmWZlStXkpycjFqtZvJDE/H3FzNLjUnX2gv1FifMGeWUbjqD28jwC7YxppZQuCQBS479ZoCulSduY5qhdGm4n6kbRfex4aTG5ZOwP4f2Q0qrtcy/GLPJSm6q/XehmGESBOF6oVMriZs1tNHOfbXq2mU5MTERg8HA4MGDqz1vMpkadHaoPogZppuILMvVZpis5WaKC4rYpjrOlyt+oLS0FDc3N7p161av5zUayvltzusUZ2eh9/Vj3L9moXV0qtdz/JNCqWDoI9HoXNTkp5ex7Zf4Bj1fQ7FZbaz/9hiVZWa8gp3pMa4pAF5eXkRHRwP2WSYAR1c9Y196A62TE6V5uYC9+6Bf04iLHn/r1q0cOnQISZK48847RbB0HZAUEvph9nW1ynZnYCmqrHrNZrJStDKJ3M8OY8kxoHBW43F/SzwfiBLB0lleQS4072y/o7hrWe3qGHNSSrBZZZzctLh43pqzc4IgXH8kScJRo2qUR31kmTg5Vb/WUygUF9RGmc3nMynKyuxZT6tWreLQoUNVj7i4uFrVMfn5+ZGdnV3tuXNf+/ld3ZqhlyMCppuIodhEZbkZSSGhdrGx5o9VLNbu5KQqA1mWadasGRMmTECtrr+CZ4vJxO/vzSb3dDKOejfG/2v2BY0HGoqTm5bBD7UCCeJ2ZHJi95V1HmtM+1ankJFQhFqrZOgj0dVaLPfu3RuAuLg4cnJyAPAMDOb2519F6+hEWPtOdB598VbtR44cYdMme5OH4cOHN1hHRKHutBFuaMP1YJEp2WBPxaw8VUT2vIOUbU8HGRzb++D7XEccW3s38mivP11vD0ehlDhzvJAzJwouu/25BWv9m+lFKqogCEIdaTQarFbrZbfz9vYmM7P6tdihQ4eq/h4VFYVWqyU1NZVmzZpVewQHX7go+T91796do0ePVl0Tgb3zs6urK1FRUbV/Q1dApOTdRPLSyrBJZmw+2Xzy6U57VC9BoM6bIfeMpEmT+l1vx2a1sup//0daXOzZ9U/ews3v2s5gBEd60HlEGPtWJrNl0Um8Q1zwDKjfdMOGYLXaOLkrq6rbV7/7W+DmW72A3dfXl8jISI4fP862bdsYN84eHIVEt+HxL35ApdZc9OIvJSWFP/74A7D/gunSpeaGEELjkCQJ12Gh5H56GMOBbGSLjYpD9llDpV6D29gIdC0vnw9+q3L10hHdJ5Ajm9LYtfQUQS+7X7KtelX9UlO3azNAQRCEm0hoaCh79uwhJSUFZ2dnbLaal3UZMGAA7733Hj/88APdu3dnwYIFxMbGVqXbubi4MH36dJ577jlsNhu9evWiuLiYHTt24OrqyqRJky45jiFDhhAVFcUDDzzA//3f/5GVlcVrr73G1KlT0WrrvmRFXYgZpptEZWUlO3dvp8B7L4UkYTab8dV4MNzUnrt7jq73YEmWZTZ8/QmJ+3ajVKsZ8+Jr+IY1rddz1Fan20IJjnTHYrKx7stYTJUNvxbAlbKYrBzdnMbC13ezacEJkO2LcjbvUvNUcp8+fQCIjY0lLy+v6nm1RnvRYCkvL4+ff/4Zq9VKZGTkBbnCwvVBG+KKQ5QnyFQFS05d/fB9rqMIlmqh4/BQ1FoluamlJB7Mueh2NptMZtL5GSZBEAShbqZPn45SqSQqKgpvb29SU2teQHzo0KHMnDmTGTNm0LlzZ0pLS5k4cWK1bWbPns3MmTOZO3cukZGRDBs2jFWrVhEWFnbZcSiVSlauXIlSqaR79+5MmDCBiRMnVnXva0iSXNtG7DeBkpIS9Ho9xcXFuLq6NvZw6oXJZGLfvn1s376diooKAFwd3bnt9qG4/FqIXG7B+4m2aJvU7/vd/vOP7Fn2C5KkYNTzLxPRpUe9Hr+uDCUmFr+zl/JiExGdfRn8UNR1lXpjNJg5uiWdIxvPUFFqz+fVuahpOzCYdoNCUKoufu9i0aJFxMfH07ZtW8aOHXvJ85SXl/P1119TWFhIYGAgkydPrtcUTKF+mXMM5Hx6GIWTCvc7InAQMyB1sndlMvtWJqP31nHvm11R1rAIc+6ZUha/sw+1g5JH/tMHxXW4wK8gCDe/yspKkpOTCQsLw+EijZqEhlEfn71IybtBWSwWDh48yNatW6uK6NQ44VAUzNg7BhLs50JWeS4oJDQB9duA4eDqP9iz7BcABj36ZKMHSwCOrhqGPBLN7x/GkLAvm4AIN6L7BDb2sCgvNnJk4xlit6RjqrTn/7p4ONB+SAiRPfxr1dmvb9++xMfHc+TIEfr27XvR1p1ms5mffvqJwsJC3NzcuPfee0WwdJ1T+zji/68uSGrFdRXg3yjaDQomdksaxbkVHN+RWePPfFX9UrheBEuCIAjCFREpeTcYm81GTEwMH330EatXr6asrAy9Xs+okbfjntMRh0ofvINdqtqJq/0ckeqhdeQ5x7dvZtP3XwHQ656JtBk4rN6OfbUCItzoNtreonn74oSqNsKNoTi3gs2LTvLjq7s4uC4VU6UVjwAnBj0Yxf2zu9G6X1Ct26AHBgbStGlTZFlm+/btNW5js9lYtmwZaWlpODg4cP/999d763ihYSg0ShEsXSGNg4pOt9nTOPatTMZsvLAoOfNUESDS8QRBEK53U6ZMwdnZucbHlClTGnVsYobpBrN161Y2b94MgLOzM3369KFDhw4UZlaw27YPraMKJzctxWc7xmmCL79GSW0lHzrA2k8/BKDD8NvpMubOejt2fWk/OITMU8WkHMlj7Vex3PWvzmh11+7bPD+9jANrT5N4IAfZZs929Q1zpeOwJoS29rpkYfql9O3bl1OnTnHo0CH69OmDm5tbtdc3bNhAXFwcCoWCe+65B29v0VlNuDW06h3A4b9SKcmr5PBfZ+h0W2jVa7Isk5lQBIiGD4IgCNe7WbNmMX369Bpfa+xSGhEw3WDi4uwLnPbo0YN+/fqhObs4bH66PS3PM9AZSZIwn51h0gTVT8CUmXCS5f+Zg81qpWXPvvSb+Mh1eVdcUkgMnBTJ4nf2UZJbwaYfjjP0segGH2tmYhEH1p3m9NH8qudCojzoMKwJARFuV33+kJAQQkNDSUlJYceOHYwYMaLqtX379rFz504ARo8eTWho6FWdSxBuJEqVgq63h7P+2zhi/jxNdJ9AHJztqail+ZWUF5tQKCR8wm6OulVBEISblY+PDz4+Po09jBqJlLwbiNForOo9371796pgCewtxcG+YK1skzGdDaDqY4YpP+0MS999C4vRSGjbDgx7chqS4vr91nFwUjP00WgUSolTMbkc2ZTWYOcyG62s/OQwS98/aA+WJGjW0Ye7/tWZUc+0I7C5e70Fa3379gXg4MGDlJSUAJCQkMDq1asB6NevH23btq2XcwnCjSSiky9ewc6YKq3sX5tS9XzmKXv9kncTF9S1TIEVBEEQhH+6fq96hQukp6cDoNfrcXGpHgjlnw2YPIOcseQakI1WJLUClbfjBcepi7LCAn6b+zqVpSX4N2vB7c//C6Xq+m8k4BvmSs/xzQDY+VsiWcnF9X4Os8nKqk8Oc/poPgqVRFSvAO5/sxtDH43GO6T+UiHPCQ0NJSQkBKvVys6dO8nMzOTXX39FlmXatm1bFVAJwq1GUkh0H2Nf1uDo5jRK8u0dQ8+vvyTqlwRBEIQrJwKmG8i5gCkoKOiC1/6ekmc6c7ZrXqAzkvLKZzdMlRUse/ctSvNycQ8IYuzLb6C+gVphtu4XRNMO3tisMuu+iqWy3Fxvx7aYrKz+9Ajp8UWoHZSMfaED/Se0vGDx2fokSVLVukz79+9n0aJFmEwmwsLCGDVq1HWZIikI10pwlAeBLdywWWT2rUgGzs8w+Tdza8SRCYIgCDc6ETDdQNLS7Kll/wyYDCUmKkrNSBJ4BDhhSjtbv3QV6Xg2q5VV8/6PnORT6Fz13PHym+hcbqwaAEmS6P9AJHpvHWUFRv6aH1fViOFqWExWVn92hLQThai1SkY93Q6/sGtzB7tp06YEBgZisVgoLS3F29ubu+66C5VKlCMKtzZJkug+1j6rfGJPFhkJhRRklANihkkQBEG4OiJgukHIsnzRgCnvbICk93FErVGeD5iusOGDLMtsnP8lSQf3oVJrGDvjddx8/a5i9I1Hq1Mx9LFolCoFKUfziVlf8+rUtWUxW1nzxVHOHC9EpVUy8um21/RiTJIk+vXrB4CTkxP33XcfOp3ump1fEK5nvqGuNO3gDTKs++oYAG6+juhcNJfZUxAEQRAuTgRMN4iioiLKy8tRKBT4+VUPXvLT7HdRPQOdkS02zJn2r690hunAymUc/nMVSBK3PT0d/4gWVzf4RuYd7ELvuyMA2P1HEhln2wzXldVsY+0XsaQeK0ClUTDqqTYENEKqT0REBJMmTeKxxx7D3d39mp9fEK5n3UY3RVJIGEpMgFh/SRAEQbh6ImC6QZybXfLz80Otrt504Vz9kleQkz1YssooHFUo3bV1Pk/87u1sWfAtAP0eeJiIrj2ucuTXh6heATTv6otsk/nz69iqi6naslpsrP0qltOx+ajUCkZObUtAROMFK2FhYej14kJQEP7JzdeRyJ7+VV+L9ZcEQRCuTr9+/Zg2bVpjDwOAZ555ho4dO6LVamnXrt01O68ImG4Ql2r4kFfVIc+lWv1SXZsAZMQfZ83H/wGg/bBRdLht9NUM+boiSRJ9722Bu58j5cUm1n97DFst65msFhvrvool5UgeSrWC26a2IbCFmNkRhOtVlxFhqDQKJAkCm7s19nAEQRBuarIsY7FYrtn5HnroIe6+++5rdj4QAdMN42L1S1aLjcKscyl5TpjOLlirrmP9UmFWBr//32wsZhPhHbvQb9L1uTDt1dA4qBj2WGtUGgVpJwo5sCblsvtYrTb+/OYYyYfzUKoUjHiiDcEtPRp+sIIgXDEnNy1jX+jAqKfb4eolavwEQbhOyTKYyhvnIdfupvHkyZPZsmUL8+bNQ5IkJEli/vz5SJLEmjVrqmZ7tm/fzuTJkxkzZky1/adNm1ZVew1gs9mYO3cuYWFh6HQ62rZty5IlS2r9kf3vf/9j6tSphIeH13qf+iBaa90ALBYLmZmZwIUBU1G2AZtVRqNT4eLhQPYVdMirKC1h2b/fpKK0BN/wCEY+MwOF4uZc5NEjwIl+97Vgw/zj7F2ZjF9T/UUDIJvVxvpv4kiKyUWhkhj+RGuCo0SwJAg3Ap8mN1ZXT0EQbkFmA8wJaJxz/ysDNE6X3WzevHnEx8cTHR3NrFmzADh2zN5U5+WXX+b9998nPDy81jXVc+fOZcGCBXz++edERESwdetWJkyYgLe393W9nqQImG4AWVlZWK1WHB0dL/iGrErHC3RCNlqx5NoXbNQEOdfq2BaTid/fe5vCzAxcvX0Y+9LrN9RaS1eiRTd/MhKKiNuRyfpvjnH3q11wcqte72Wz2lj/XRynDubYg6XHW9OklWcjjVgQBEEQBOHa0+v1aDQaHB0dq5qOnThxAoBZs2YxePDgWh/LaDQyZ84cNmzYQPfu3QEIDw9n+/btfPHFFyJgEq7OufqlwMDAC9Lk8s8GTF6BzpjSykAGpZsWpfPl2+jKNhtrP/2QjJNxaB2duOPlN3FyuzVqc3rf3ZzslFLy08v485tjjJ7WDoXSnqFqs8lsmH+cxP05KJQSwx5rTWhrr0YesSAIgiAINxW1o32mp7HOfZU6depUp+0TExMxGAwXBFkmk4n27dtf9XgakgiYbgAXq1+C8x3yPIOc67xg7faff+Dkrm0olCpuf+FVPINC6mnE1z+VRsmwx6JZPHcfGQlF7FmRTPcxTbHZZDZ+f5yEfdkoFBJDH40mrI0IlgRBEARBqGeSVKu0uOuVk1P1sSsUCuR/1EaZzeaqv5eV2a9ZV61aRWBgYLXttNq6d3a+lkTAdAO4VMCUdy5gCnTGvMM+E1WbBWuPbFjL3j/sRXZDpzxDSHSb+hruDcPN15H+E1ry59fHOLj2NP7hek7F5HByTxaSQmLIo60Ib+fd2MMUBEEQBEFoNBqNBqvVetntvL29iY2NrfbcoUOHqpbDiYqKQqvVkpqael2n39VEBEzXufLycgoLCwEICKheGFhRasJQbALJ3szgXHqeJvjS9UvJMfvZ8M2nAPS4836i+gxogJHfGCI6+ZKZWMzRzWms/uwIsow9WHq4FU3b+zT28ARBEARBEBpVaGgoe/bsISUlBWdnZ2w2W43bDRgwgPfee48ffviB7t27s2DBAmJjY6vS7VxcXJg+fTrPPfccNpuNXr16UVxczI4dO3B1dWXSpEmXHUtiYiJlZWVkZWVRUVHBoUOHAHswptFcvhzlSom24te5c7NLXl5e6HTV2+Oem13Se+lQmm1Yi40ggTrw4gFTTkoSK/77LrLNRqu+g+g27p6GG/wNoue4Zvg0cbEHSxIMfiiKZh1FsCQIgiAIgjB9+nSUSiVRUVF4e3uTmppa43ZDhw5l5syZzJgxg86dO1NaWsrEiROrbTN79mxmzpzJ3LlziYyMZNiwYaxatYqwsLBajeWRRx6hffv2fPHFF8THx9O+fXvat29PRkbD1oJJ8j+TDW9iJSUl6PV6iouLcXW9MVrObty4ka1bt9KuXbsLetsf2pDKjiWJhLf3pl/vAPJ/iEPl64jfcx1rPFZJXi4/vfYCZYUFhES35Y5X3kSpUl+Dd3H9Ky2oZO/yJMLbexPWVqThCYIgCIJQfyorK0lOTiYsLAyHm7wb8fWmPj57kZJ3nbtkw4dzHfL+3vDhIvVLRkM5y959i7LCAjyDQhj1/CsiWPobFw8HBk6OauxhCIIgCIIgCNcZkZJ3HbPZbFUtxS/X8MF0ifols7GSZe++RV5qCk5u7tzx8ps4ONVunSZBEARBEARBaGhTpkzB2dm5xseUKVMadWxihuk6lpeXh9FoRK1W4+1dPU3MZrVRkFkO2Bs+lP9R8wyT1WJmxX/mkn7i7FpLr7yFq7eozxEEQRAEQRCuH7NmzWL69Ok1vtbYpTQiYLqOnZtdCggIQKlUVnutKLsCm0VGrVXiJEFZhQWUEmq/8z3xbTYraz7+D8mHDqDSaBn70hv4hIZf0/cgCIIgCIIgCJfj4+ODj8/1eVNfpORdxy69/pJ9Rskz0Bnz2dQ8TYAzksr+TyrLMhu+/rRqYdrRL/yLwJaiRkcQBEEQBEEQ6kIETNexSzd8sKfjeQY5YzpjD57UQefrkrYtms/Rv9YhSQpue3o6oe1q7pwnCIIgCIIgCMLF3TAB09y5c+ncuTMuLi74+PgwZswYTp482djDajBGo5GcnBwAAgMDL3g9/+yskleg098aPtjrl/b8/iv7lv8GwODHnqJF917XYsiCIAiCIAiCcNO5YQKmLVu2MHXqVHbv3s369esxm80MGTKE8vLyxh5ag8jIyECWZVxdXWssdMs7GyR5BDhjzjgbMAW5cOjP1Wz/6XsA+k54iNYDhly7QQuCIAiCIAjCTeaGafqwdu3aal/Pnz8fHx8fDhw4QJ8+fRppVA3nUu3EK8vMlBcZAdCrJArNNiStksT4ffz17WcAdB17N51G3XHtBiwIgiAIgiAIN6EbJmD6p+LiYgA8PDwuuo3RaMRoNFZ9XVJS0uDjqi+XrF86m47n6uWAnGsAwKqXWfvpf0CWaTd0BD3vnnDtBisIgiAIgiAIN6kbJiXv72w2G9OmTaNnz55ER0dfdLu5c+ei1+urHsHBwddwlFdOluVLd8hLO79grfns3+MTd2OzWons1Y8Bkx9HkqRrN2BBEARBEAThptSvXz+mTZvW2MPg8OHD3HvvvQQHB6PT6YiMjGTevHnX5Nw35AzT1KlTiY2NZfv27Zfc7pVXXuH555+v+rqkpOSGCJqKi4spKytDoVDg7+9/wevnZpg8g5wpP54NQF5FOuEduzD0iWlIihsyDhYEQRAEQRBuMLIsY7VaUakaNqw4cOAAPj4+LFiwgODgYHbu3Mljjz2GUqnkqaeeatBz33BX1k899RQrV65k06ZNNc6+/J1Wq61qmnCx5gnXo3P1S76+vqjV6gtePxcwaVXF2PJNADg00TNq2ssoG/ibVRAEQRAEQbh6sixjMBsa5SHLcq3GOHnyZLZs2cK8efOQJAlJkpg/fz6SJLFmzRo6duyIVqtl+/btTJ48mTFjxlTbf9q0afTr16/qa5vNxty5cwkLC0On09G2bVuWLFlSq7E89NBDzJs3j759+xIeHs6ECRN48MEHWbp0aW0/8it2w1xdy7LM008/zbJly9i8eTNhYWGNPaQGc6l0PJtNJj+jHJu1mBMr1tDL9XaMVDB8xnRUGs21HqogCIIgCIJwBSosFXRd1LVRzr3nvj04qh0vu928efOIj48nOjqaWbNmAXDs2DEAXn75Zd5//33Cw8Nxd3ev1Xnnzp3LggUL+Pzzz4mIiGDr1q1MmDABb29v+vbtW+f3UVxcfMl+BvXlhgmYpk6dyqJFi/jjjz9wcXEhKysLAL1ej06na+TR1a9LBUzFOQYsxlLM5b/h5NQMAOcIX7SOTtd0jIIgCIIgCMLNTa/Xo9FocHR0xM/PD4ATJ04AMGvWLAYPHlzrYxmNRubMmcOGDRvo3r07AOHh4Wzfvp0vvviizgHTzp07+eWXX1i1alWd9rsSN0zA9Nln9nbZf5/WA/juu++YPHnytR9QA7FYLGRmZgI1L1ibkZiNqew3ZGsRvq72WTZdWO2iekEQBEEQBOH6oFPp2HPfnkY799Xq1KlTnbZPTEzEYDBcEGSZTCbat29fp2PFxsYyevRo3njjDYYMafg1R2+YgKm2uZY3uuzsbCwWCw4ODnh6el7w+qE1vyBb81BpXQj0ao5cbEET5NIIIxUEQRAEQRCulCRJtUqLu145OVXPblIoFBdcr5vN5qq/l5XZa/BXrVp1waSAVqut9Xnj4uIYOHAgjz32GK+99lpdh31FbpiA6Vbx9wVra2oNnp9unwZt3XsC8ikLAJog52s3QEEQBEEQBOGWodFosFqtl93O29ub2NjYas8dOnSoqoFZVFQUWq2W1NTUK6pXAnv91IABA5g0aRLvvPPOFR3jSoiA6TpzqfqlitISzBX5AIQFRcCpbFSeDigcL+ykJwiCIAiCIAhXKzQ0lD179pCSkoKzszM2m63G7QYMGMB7773HDz/8QPfu3VmwYAGxsbFV6XYuLi5Mnz6d5557DpvNRq9evSguLmbHjh24uroyadKkS44jNjaWAQMGMHToUJ5//vmqfgZKpRJvb+/6fdP/cMO1Fb/ZnQuYaqpfSj0WB4CkcEcv2YMkdbBIxxMEQRAEQRAaxvTp01EqlURFReHt7U1qamqN2w0dOpSZM2cyY8YMOnfuTGlpKRMnTqy2zezZs5k5cyZz584lMjKSYcOGsWrVqlp1v16yZAm5ubksWLAAf3//qkfnzp3r5X1eiiTfKsVB2Beu1ev1FBcXX5drMhkMBv7v//4PgBkzZuDoWD2vdd3nXxO76Xe0zq25p88EKo8XoB8ZjkuvC4MrQRAEQRAE4fpQWVlJcnIyYWFhODg4NPZwbin18dmLGabryLn6JU9PzwuCJYDMBHv9kt4nDNOZUgA0YoZJEARBEARBEBqMCJiuI5desNZKYVYyAIEhLbCVmUEBmgCx/pIgCIIgCIJwY5syZQrOzs41PqZMmdKoYxNNH64jlwqY8tPOYLMYATXBXn5wOge1rxOSWnmNRykIgiAIgiAI9WvWrFlMnz69xtcau5RGBEzXCZvNVpWSV+OCtfH2dDyFyg9nG1gR6XiCIAiCIAjCzcHHxwcfH5/GHkaNREredSI/P5/KykpUKhW+vr4XvH7mbIc8hcofZVElgFiwVhAEQRAEQRAamAiYrhPnZpcCAgJQKi9Mszs3w+TsFYY5oxwQLcUFQRAEQRAEoaGJgOk6can6pcqyMkrzMgAICWyKbLQiqRWofS7spCcIgiAIgiAIQv0RAdN14lIL1mYlngRAUrjhp7cXvakDnZGU0rUboCAIgiAIgiDcgkTA1EgKyk3sTMwDwGQykZ2dDdQ8w5Rxdv0lSeWP69l1hkX9kiAIgiAIgiA0PBEwNYLEnFI6v7OBx348QKXZSmZmJrIs4+Ligl6vv2D7jHj7DJNC6Y/GbANA7SvS8QRBEARBEAShoYmAqRE09XbG10WLocLI1vjcS9YvyTYbmWdnmNQOgUiVFgCUeu21G7AgCIIgCIJwS+rXrx/Tpk1r7GGQn5/PsGHDCAgIQKvVEhwczFNPPUVJSUmDn1usw9QI5MpKXklYSU5yGqvbB9FdcfGAqSAjHVOFAVDhGRyKtdgEgFKvuXbjtVqRLRYUWhGkCYIgCIIgCOfJsozVakWlatiwQqFQMHr0aN5++228vb1JTExk6tSpFBQUsGjRooY9d4MeXaiRKfUMTfduoEdmLLbVKy7Z8CEj4TgAksoX7wBn5Gs8wyTLMqmTHySxbz+MycnX5JyCIAiCIAg3O1mWsRkMjfKQz9bEX87kyZPZsmUL8+bNQ5IkJEli/vz5SJLEmjVr6NixI1qtlu3btzN58mTGjBlTbf9p06bRr1+/qq9tNhtz584lLCwMnU5H27ZtWbJkSa3G4u7uzhNPPEGnTp1o0qQJAwcO5Mknn2Tbtm21/civmJhhagQOLZrj/eyz5H3wAXcfX836psORJImAgIALts08u/6SQhmAl6cDAJJGgaS9cK2mhlC2aROGffvsY3nlXzRZuACphnWiBEEQBEEQhNqTKyo42aFjo5y7xcEDSI6Xr4efN28e8fHxREdHM2vWLACOHTsGwMsvv8z7779PeHg47u7utTrv3LlzWbBgAZ9//jkRERFs3bqVCRMm4O3tTd++fev0HjIyMli6dGmd97sSYoapkXg99CC54VGUu9nbhPv6+KDRXJhml5lwtuGDyh93Z/vrSlctktTwLcVlWSbv40+qvq44dIiC775r8PMKgiAIgiAIjU+v16PRaHB0dMTPzw8/Pz+UZ2+cz5o1i8GDB9O0aVM8PDwueyyj0cicOXP49ttvGTp0KOHh4UyePJkJEybwxRdf1HpM9957L46OjgQGBuLq6srXX399xe+vtsQMUyORlEqcXp/F6XlzAPCooWDNaCgnLy0VsAdMTloFBkDpem3ql8q2bKEyLg7J0RHvJ58g5/0PyJ33P5z79kUbEXFNxiAIgiAIgnAzknQ6Whw80GjnvlqdOnWq0/aJiYkYDAYGDx5c7XmTyUT79u1rfZwPP/yQN954g/j4eF555RWef/55Pv300zqNpa5EwNSI2neJYlmT5qiQcdqylYoRI9FFt6p6PSsxAWQZSeGKztUN1dmW4teifkmWZfI+sX/zedx3Lx4PP0z5vn2Ub9lKxsuvEPrzT0hqdYOPQxAEQRAE4WYkSVKt0uKuV05OTtW+VigUF9RGmc3mqr+XlZUBsGrVqgvq9rV1aCx2bqarZcuWeHh40Lt3b2bOnIm/v39d30KtiZS8RiTLNlQ6+z+BV24uGTNmYKusrHq9quGD0h/PACdsJWc75F2DGabybduoPHoUSafD48EHkSQJ/1mzUej1VB47Rt5XXzX4GARBEARBEITGpdFosFqtl93O29ubzMzMas8dOnSo6u9RUVFotVpSU1Np1qxZtUdwcPAVjc1ms08mGI3GK9q/tkTA1IhycnJAtmKSFZjNEqakJHLe/6Dq9b/XL3kEOmMttn8zNPQMkyzL5H5ir11yv+ceVJ6eAKh9ffB77TUA8j79jMrjxxt0HIIgCIIgCELjCg0NZc+ePaSkpJCXl1cVpPzTgAED2L9/Pz/88AMJCQm88cYbxMbGVr3u4uLC9OnTee655/j+++85deoUBw8e5KOPPuL777+/7DhWr17Nd999R2xsLCkpKaxatYopU6bQs2dPQkND6+vt1kgETI3oXDvxYoUrH7a/C4DCBQso274DWZb/FjAF4BnghPUazTCV79hJ5eEjSFotng89WO0115EjcBk8GCwWMl5+BdlkatCxCIIgCIIgCI1n+vTpKJVKoqKi8Pb2JjU1tcbthg4dysyZM5kxYwadO3emtLSUiRMnVttm9uzZzJw5k7lz5xIZGcmwYcNYtWoVYWFhlx2HTqfjq6++olevXkRGRvLcc89x++23s3Llynp5n5ciybVtxH4TKCkpQa/XU1xcjKura2MPh2XLlnH48GHwi2J+ihPvZ6yj1d71qHx8cP/qc75//UWQVGj1Uxn/UhfkX05gLTbhM7UdmmCXBhmTLMucvu9+KmJi8Jg0Ed9XXrlgG0t+PkkjR2EtLMRzyuP4XAerPwuCIAiCIFyvKisrSU5OJiwsDAcHh8Yezi2lPj57McPUiM7NMHVq1QyAuSGDUYeGYsnJ4fgH7wMgKXyQJCXufo5YSxt+hsmwezcVMTFIGg0eDz1c4zYqT0/83nwTgPwvv6LiyJEGG48gCIIgCIIgNCYRMDWSiooK8vPzAejXvgU+LlryLQoyp74MKhVZp+IBe/2Si6cDKosMNkAChXPDBUznOuO53XUXal+fi27nOnQIriNGgM1GxsuvVGtWIdx8ZFmmsLKwsYchCIIgCMJNasqUKTg7O9f4mDJlSqOOrc4B06RJk9i6dWtDjOWWkp6eDoCHhwfOzk4Mj/YD4PcKN7yefIJCJ/uUoULlj2egM9YSe8MHhYsGSdkwi9aW79mLYf9+JLUaz0dqnl36O7+Zr6H09sKUlETuvP81yJiExpdWmsaTfz1Jn1/68NUR0R1REARBEIT6N2vWLA4dOlTjY9asWY06tjoHTMXFxQwaNIiIiAjmzJlTdeEv1M25dLygoCAAbmtt7x2/Pi4Lh/vup1Rn74SnUPnj4e94vkNeA6bj5Z1d9MvtzvGo/fwuu73SzQ3/s9/ABfPnYzjQOIuvCQ3DbDPzbey3jP1jLNvTtwPw5ZEvyavIa+SRCYIgCIJws/Hx8bmg3fi5h4/PxbOeroU6B0y///476enpPPHEE/zyyy+EhoYyfPhwlixZUm1xKuHSzgVM5xbu6hTqgbeLlpJKC5t2HQJAgQ5J4YI28cD5DnkN1FLcsH8/hj17QK3G85FHar2fS//+6O+4A2SZjFf+hc1gaJDxCdfW4dzD3LPyHj488CGV1ko6+XaipUdLKq2VYpZJuCGU79pFydq1jT0MQRAE4SZwRTVM3t7ePP/88xw+fJg9e/bQrFkzHnjgAQICAnjuuedISEio73HeVGRZrpqZOzfDpFRIVWl5B/YfBkChsb9m+/UbjMn2hcAaaobp3LpLbnfcgTogoE77+r7yMip/f8ypqeR88J+GGJ5wjZSaSnl799s8sPoB4gvj0Wv1zO45m2+HfsvzHZ8H4Nf4X8ksy7zMkQThvPyKfLanbye/Iv+anK/0r79IffgR0qc9h+ki7W8FQRAEobauqulDZmYm69evZ/369SiVSm677TaOHj1KVFQUH374YX2N8aZTWFhIRUUFKpUKX1/fqufPpeUVp5wNOJUBSNjQlaZTtnWf/akGmGEyHDyIYdduUKnweuzROu+vdHHB/+3ZABQuXEj57t31PUShgcmyzJ8pfzL699H8cvIXZGRub3o7y8csZ0yzMUiSRDf/bnTy7YTZZuaLI1809pCFG4DVZuXnEz8zctlIntjwBP0X9+eelffwUcxHxOTEYLFZ6v2cFYcOkf7CdDi7sGLFYdHFUxAEQbg6dQ6YzGYzv/32GyNHjqRJkyb8+uuvTJs2jYyMDL7//ns2bNjA4sWLG70463rm4eHBiy++yAMPPIBKpap6vnOoB15OGjwN9rv3CpU/7r6OqN30yCb7P1VDzDBVdcYbOwb12RTBunLu2RO3e+4GIONf/8JaVlZv4xMaVkZZBk9tfIoXtrxAbkUuTVyb8PWQr3mn1zt4OHhUbSdJEs90eAaA3xN/53TJ6cYasnADOFFwggfWPMA7e96hzFyGp4MnMjLH8o/x5ZEvmbhmIn1+7sPzm59nWcIyssuzr/qcppQUzjzxJHJlJZJaDUDl31aZFwRBEIQrobr8JtX5+/tjs9m499572bt3L+3atbtgm/79++Pm5lYPw7t5OTk54eTkVO05pUJiRKgGx9hKZEmJpPTBM0SP/9uzKVhqT2UxZyRDB9+aDnlFKg4donzHDlAq8Xz88as6lu+LL1K+fQfmtDRy3n0X/9mz67S/zWjEsH8/5du2U757Nw4tW+L/zttISuVVjUuomcVmYeHxhXxy6BMqLBWoFCoeaf0Ij7R+BK2y5pnM9j7t6RXYi+3p2/n00Ke82+fdazxq4XpnMBv49NCnLDi+AKtsxUntxDPtn+HuFndTaCxkR/oOdqTvYGfmToqNxaw/vZ71p9cDEOEeQa+AXvQM7EkHnw6olepan9eSn0/qY49jLSzEIToa/R1jyZ41m4pjImASBEEQrk6dA6YPP/yQO++885Ir5bq5uZGcnHxVA7tVdXQsIQEwqbzRSSo8A51wGTiQ4vWbAMj79ANc+n6L0sWlXs6Xe7Yznn70aDRn66mulMLJCf8575A6cRJFvy7BZfBgnPv0ueQ+ppQUyrZtp2z7Ngx79iL/bT0n44kTKN3c8H35pasal3Cho7lHmbV7FicKTgDQ0bcjr3d7nXC38Mvu+3T7p9mevp01yWt4pPUjRLhHNPRwhRvE5jObmbNnDpnl9lnyIU2G8FKXl/BxtHc38tJ5MbrZaEY3G43VZuVY/jG2p29nR/oOjuYdJaEwgYTCBL479h06lY6u/l3pFdCLgU0G4qXzuuh5bQYDZ6Y8gTk1FXVQEMGff4a1qAiAyrjjyFaruPEiCIIgXLE6B0wPPPBAQ4xDOMuxyN49T1La65k8A52xVVo4909lPp1A9ttvE/Du1d/Zrzh6lPKt20CpxGvK1c0unePUpQvuEx+g8IcfyXxtJuErlqPU66tetxkMlO/ZQ/m27ZRt3475HwXZKl9fnHr3Qu3rR94nn1Awfz7aiGa4jRtXL+O71ZWZyvgo5iN+OvETMjKuGlemd5rO6GajUUi1y9CN8oxicJPBrD+9no9jPmbegHkNPGrhepdVnsW7e99lQ+oGAAKcAni126v0Cbr4DROlQkkb7za08W7Dk+2epKiyiF2Zu6oCqPzKfDaf2czmM5t5d9+7jAwfyQNRD1wQoMsWC+nPv0Dl0aMo3dwI/upLVF5eKN3dkRwdkQ0GTMnJaJs1a8BPQBAE4ebVr18/2rVrx3//+9/GHkqV/Px82rZtS3p6OoWFhQ2e2VbngEmoJzYbGItB517t6azEkwDo/hYwnWspLqkB2UzxH8tRuLjiPW0aSufqaX11ca52ST9yJJqQkCs+zj/5PPcc5Vu2Yjp9mqx33sHr0Ucp27qNsu3bqNh/APnv7efVahw7dsS5dy+cevVG2zwCSTq/MG/eJ5+Q+eZbaJo0wbFTp3ob463odMlpntzwJKml9iB1VPgoXuj0Ap46zzofa2q7qWw4vYGNZzZyLO8Yrbxa1fdwhRuA1Wbl55M/87+D/8NgMaCUlExsNZEpbabgqHas07HcHNwYHjac4WHDsck2ThacZEfGDjac3sCx/GMsS1zGssRldPfvzgNRD9AzsCcSElmzZlO2eTOSVkvQZ5+iDQsDQFIqcYiKpGL/ASpiY0XAJAiC0EBkWcZqtVary29oDz/8MG3atLlm68FeVZc84QqVZsPCcbDwLrCe7xJlNlaSe9qeyqhSBWCSZLSu6vOL1ro74v3M0wAULlhA0siRlG7ceEVDqDh2jLLNm0GhwLOeZpfOUeh0+P97LigUlCxfQdKo28l57z0Mu3Yjm82og4Jwv+9egj79lBa7d9Fk/nd4PvwwDi2aVwuWvKY+icvQoWA2k/b0M5jSxCLJVyomJ4YJqyeQWppKgFMAXw7+kjm951xRsATQ1K0pI8NHAvBRzEf1OVThBnEs/xj3rb6Pf+/9NwaLgTbebfhl5C883/H5OgdL/6SQFER6RvJI60f4eeTP/Dj8RwY3GYxCUrArcxdP/vUkY/8Yy5Z3nqZo8WKQJAI/eB/H9u2rHUfXKhqAyqOijkkQhOuPLMuYjdZGeciyXKsxTp48mS1btjBv3jwkSUKSJObPn48kSaxZs4aOHTui1WrZvn07kydPZsyYMdX2nzZtGv369av62mazMXfuXMLCwtDpdLRt25YlS5bU6XP77LPPKCoqYvr06XXa72qIGabGYKmEtAP2GaYt78KAVwHITkrEZrWiddKD5EKuwsaupAI6ldrb4yr1GrwenoJDq2iy3noLc1oaaU9OxWXwYHxfexW1b+2bQeR9+hkAriNGVN2RrU+O7dvj+dij5H/+BZJWi2PXLjj36o1T715oQkOrBUYXIykUBPx7LqfPnKEyLo60J5+kyaJFVzWrditam7KWV7e9islmItozmo8GfnTJepDaeqLtE6xJXsOOjB0cyD5AR9+O9TBa4XpXZirj40Mf89OJn7DJNlzULkzrOI3xzcfXOq2zrtr5tKOdTzvSy9JZeHwhSxOWErwtEd9V8QCcnNwb757t+Gdlp0P02YBJdMoTBOE6ZDHZ+PLZLY1y7sfm9UWtvXxt57x584iPjyc6OrqqA/axY8cAePnll3n//fcJDw/H3d39UoepMnfuXBYsWMDnn39OREQEW7duZcKECXh7e9O3b9/L7h8XF8esWbPYs2cPSUlJtTpnfRAzTI3BvQmMOrtO1bb3IWUHABnx9gJ8J/cmSJJErlJm9ZFMrCVnZ5hc7Z3LnHv3InzFcjwffQSUSkrXryfpthEULFyIbLVe9vSVx49T9tdfIEl4PTGlAd6gnfezzxK+cgXN9+wm5Msv8Zj4ANqwsFoFS+codDqCPv0EpbcXxvh4MmbMQD67vsoNz2KEzMNV68XUN1mW+Tb2W17c8iImm4n+wf35dti39RIsAQS7BjMmYgwA/zv4v1rfrRJuXFvObGH0H6NZeHwhNtnG8LDhLB+7nLta3NVgwdLfBToHMqPzDFYGvsMTa+3P/dFNYqbfToYsGcJr21/jZMHJqu0dou2popUnTlRPBRYEQRBqRa/Xo9FocHR0xM/PDz8/P5Rnm+jMmjWLwYMH07RpUzw8PC5zJDAajcyZM4dvv/2WoUOHEh4ezuTJk5kwYQJffHH59R2NRiP33nsv7733HiH1WEpSG2KGqbFEj4PEjXBoASx9FKZsJzPB/j96pToAgDyljX1xWcxorbM//7c1mBQ6HT4vvIDryJFkvv46lYePkD37bYqXL8d/1iwcWrS46KmrZpeGD0cbfvmuaFdKkqR6qRtQ+/kR/PHHnH5gImUbN5L74X/xeeH5ehhhIzqzD35/AvIToNlgGPs5ONVPIAP2luFz98xlcfxiAO6PvJ8XO72IUlG/ncIeb/M4yxOXczDnIDszdtIzsGe9Hl+4fhzPP84zm57BJtsIcg5iZreZ9Ajscc3HUXn8OPnPv4TCasN5xHA6TB1C7IkFHM49zB+n/uCPU3/Q1b8rE6Mm0jOkBwpnZ2xlZRhPncKhZctrPl5BEISLUWkUPDbv8rMqDXXuq9WpjrXliYmJGAwGBg8eXO15k8lE+3+kVNfklVdeITIykgkTJtTpvPVBBEyNafi7kLoLCk4hL3+azAT7N6/Z6A2A0UlJkcFEXmYZTthT8v7JoUULQhctovDnn8n9z4dUHj5C8rjxeD44Ga8nn0Sh01XbvvLkSUrXr7fPLj35RIO/xfqia9sW/3feJuPFGeR/9RXaiGbob7+9sYdVd+ZKbOtnU77sG0pStRiLvfFqtQ3X7F4w/ltocvUXoAazgelbprMtfRsSEjM6z2BCVMP8cvFz8uOuFnex4PgCPor5iB4BPeo0g3gxNpOJkuXLMSYk4jxgAI5dOtfLcYUrI8syc/bMwSbb6Bfcj/f6vIeD6uJLSzQUc3o6Zx57HFt5OY5duxI4998EazQMCR/G4dzD/Bj3I+tPr2dP5h72ZO4hyjOKd6Miqdi7j8rYWBEwCYJwXZEkqVZpcderf64nqlAoLsg2Mf9tdr+srAyAVatWERgYWG07rbbm9R//buPGjRw9erSq5uncuby8vHj11Vd566236v4makmk5DUmrTOM/wYUakqPrqe8qBCFUkmFwQ2A9q3ta5eU51UA51Py/klSKvG4/37CV6/CZfAgsFjI/+prkm4fTdn2HdW2zfvscwBchg294bpG6UeNwvOxxwDIfG0mFYcONe6A6sBWWUnJL5+TfkdnEmYsIW2bOyWnHTEWqUnf4UH21nLkb0fA1veuKkUvx5DD5LWT2Za+DQelAx/2/7D+giVTeY1PP9L6EXQqHcfyj7HxzJU1ITnHWlpK3ldfkThwIJmvzaTg++9JnTSJpJGjKPjhR6wlJVd1fOHKrExayaHcQ+hUOl7r+lqjBEvW4mJSH3scS24u2ogIgj76HwrN+ZtIbb3b8n7f91lzxxomRU1Co9AQlx9HWTM/ACpEHZMgCMIV0Wg0WGtR8uHt7U1mZma15w797VotKioKrVZLamoqzZo1q/YIDg6+7PF/++03Dh8+zKFDhzh06BBff/01ANu2bWPq1Kl1e1N1JAKmxhbQHga+TobBFQA3bz8kSY2TXsNtHe3Rt8pg76Sn1F86+lb7+hL00UcEffIxKj8/zGfOcOaRR0h/cQaW/HyMCQmUrlsHgNeU+p9dMlvNHM8/ztKEpby9+23uX3U/fX/py+KTi+vtHN7TnsV54EBkk4kzTz2N+R8/mNcTW0UFJWvXkT5tGvFdOpP+xjxKEizYLApUXm54TJqE+9l1zQpOOnP6Lw/MK+fAgjugLLfO50soTOD+1fdzvOA4Hg4efDP0GwaGDKyfN3Pge+Q5gbDzwo54njpPJkTag7KPYz7Garv8L9V/MmfnkP3eeyT260/uB//BmpuHys8P11GjkBwdMZ06RfacOST07UfGa69REXusxuNYbBZWJa1iyoYprE1eW+dxXGsVhw5xavhtpE9/EZvJ1NjDqVGZqYwP9n8A2FMwfZ1q31ymvtiMRs5MnYrp1ClUvr4Ef/kFSlfXGrcNcA5geufpdPbrDEB6kH2WvfIi3zOCIAjCpYWGhrJnzx5SUlLIy8vDdpEbuwMGDGD//v388MMPJCQk8MYbbxD7t5tVLi4uTJ8+neeee47vv/+eU6dOcfDgQT766CO+//77y46jadOmREdHVz3CzjYti4yMxMfHp37e7EWIlLzrQfenyFyxAbDhbC7FgH39pa5hHng7anAz2Df7ew3TpbgMHIhj127kzptH4YIFlKxYQfnWraiDgkCWcRkyBIcWza9qyCariYSiBOLy44jLj+N4/nHiC+Mx2y4srJ69ezYmq6leZjokhYLA/3uXlPvux3jyJGeenErowgUoHK+ujXF9sZWXU7Z1KyVr11G2dStyRUXVa2pHCy4dQnB99E0cOvdEUtjvVzh160rGy69QkVdK8jofAop34pzTC8Z9DWG9a3XeXRm7eH7z85SZywh1DeXTQZ8S7HL5uzW1UpCEedUM1MiYN8xGHT0OXAOqbTKp1SR+PvEziUWJrE1Zy4jwEbU6tDEpifxvvqF4+Qo4O22vjWiGx8MPo7/tNiSNBmtZGcXLl1P0088YExIoXvIbxUt+w6F1a9zvuQfX24Zj0ShZfmo538Z+y5nSMwDszdxLiGsIUZ5R9fM51LOSP/8k48UZyEYjpuRkbJUVBH34IZJa3dhDq+azw5+RX5lPqGsoD0Rd+4XLZZuNjJdepmL/ARTOzgR/+SVqf//L7tfcvTk7MnZwzNtICPZ0ZJvJVG1WShAEQbi86dOnM2nSJKKioqioqOC7776rcbuhQ4cyc+ZMZsyYQWVlJQ899BATJ07k6NGjVdvMnj0bb29v5s6dS1JSEm5ubnTo0IF//etf1+rtXBFJvoVaW5WUlKDX6ykuLsb1IncnG8vCl58mKzmZcHd/MriXdoND6DmuGe/8dJhJh0uwShDyTi8kRd3qOCqOHiVz5usYT5yoei7s92V1yuU328ycyD9hD44K7MFRQlECFpvlgm1dNC5EeUQR6RlJlGcUx/KO8X2c/a7B9E7TmdRqUp3Gf9ExpaeTfOddWAsKcBkyhMD/flgVgFxr1rJyyjZvpnTdWsq2bkM2GqteUztZcA2uwKW5DofJHyBF1Vx3ZTpzhrRnn8UYdxwAr1aleEWXI/V/GfpMh0s0a/g98Xfe2vkWFtlCR9+OzOs/D71WXz9vzmaj4NPBeOTtr3oqM3Qs/pPnX7DpF4e/4ONDHxPiEsLvY35Hrbj4hb8hJob8b76h7K+NcPZXkK5TRzwffhjnvn1r/LeUZZmKgwcp/OlnStetq+p6ZnFyYGsbJX+0riTTU8Jd646/sz9x+XGEuISweNRinNTXTyt6WZYp+P57ct79P5BldB07Unn0KLLJhMvwYQS+9x7SNVz871JOFZ1i/PLxWGQLnw36jF6Bva75GLL//S4F8+eDWk3IV1/i1K1brfZbmbSSV7a9Qgfv9rz6VjzW4mJCf/0VXevohh2wIAhCDSorK0lOTiYsLAwHh2uf1nwrq4/P/vr4v/ItzmIykZNqvytuVjQFG3gqk4BmDAnxgMMl5CMTKMuoqFvApGvdmrAlv1Lw/Q/kffEFrrcNr1OwJMsyD697mJicmAte02v11YKjKI8oglyCqhXnDwsdhlal5csjX/L+/vexylYein6oTu+hJurAQII++h+nJz9I6Z9/kvfxJ1WL+l4rloICCr7/gcKFC7GdLWQEUAf44upfjIvnGRzczUjRd8Bt74PTxReJ1QQHE/rTT2S/M4eixYvJO+ZCRZ6aAOO/UZ3eDnd8DS7VU6FkWebTw5/y+WF7XdptYbcxu+dsNMr6u4Oeuf6/+Oftp0x24P+0TzHL9D7+KcvIi38Cr+Zdq207IWoCC48vJLU0leWJyxnXfFz18dpslG3ZQv4331Cx/0DV886DBuL58MMXLDr6T5Ik4dixI44dO1L4wlR2fvU2Lqt34V1UyYBdMGAXlLRuQvjkJ1H37clda+8ltTSVWbtm8e/e/74umkbIVivZc+ZSuHAhAG733oPfq69SvmMHZ556mtI1a8lQqwmYOxdJ2biFwLIsM3fvXCyyhf7B/RslWCrdtMkeLAEBc+bUOlgCaOFu7xQaX5SAQ3Q05Tt2UHksVgRMgiAIQp2JgOk6kJ18CpvVgqPejRJlW3vAdPQdGNiGSGctRUC2bGV3UgG9IureelpSqfB8+CE8HnqwzvvuyNhBTE4MaoWaLn5d7IGRpz1ICnAKuOxFqCRJPN3+aVSSik8Pf8qHBz7EYrPwWJvH6jyWf3Ls2BH/t94i81//Iu/TT9E2a4rrbbdd9XEvx5yVRf6331K0+FfkykoA1CEhuA4dgqt3Jtrk75BkCzh6woj/QKsxtTquQqvFf9ZbOHbsQOabb1GeDcl/+hBYtgvHnF4w7isI72cfg9XMGzvfYEXSCgAebf0oT7V/ql7XwilIPY77rrkA/OrxGC8/8Qqb3t9Pf9Nmcpa8gPtLW1Eqz5/PSe3Ew60f5v397/P5kc8Z1XQUGqUG2WymeNUqCr75BmNCon1jtRr97aPwfPjhOrW2z6vIY0HcAn4++TPlTcqRpkgMyvTlnuPuuOyPx/XoafJeeAmltxfvP3kfE6UvWJ28mm7+3RgbMfbSB5dlWP0iFKfB0HfAs2mdP7NLsRkMpL8wnbJNmwDwmTEDjwcnI0kSzn37EvThf0h7dholy1cgqdX4z57dcLOm5gpQOcAlfn7PdZvTKDTM6DyjYcZxCbIsk/+5fV0Oj8mT0Y8aWaf9Q/WhqBVqysxlmJuHwI4dVMTGUrulFQVBEIRrbcqUKSxYsKDG1yZMmMDnn39+jUd0ngiYrgOZCfZ0OZ/QCLJStUjYcLfEwbLHkcM+ASAXma1HM68oYDrnSu6wfxv7LQB3t7ibl7q8dMXnfqLdEygkBf/P3n2HR1F1ARz+zbb03hNCQgi99y5FmhWwgYI0QVCUothFPkFFBRFEkSZNBCk2EEHpvfcaEiC9955t8/0xSSASIAmBDXLf5+FhMzvl7pKEOXvuPefbk98y58QcTGYTrzS988ITzk/1oyA0lNQlS4h97320/tXv2ifI+ogIUhYtIv33P4rX21g3bIj76FHY1/dA+mMMXCmcp1u/Dzz6Fdh7lPs6Tn36YF2/PtFjx6G/epWI7R54NsnANbsvUue3KOg0gTHbx3Io/hBqSc2ktpNuyObcqQK9noQfh1MPPUfVTXhqxCRsdRpqPj+d/KXtqa8/w59rF/H4gJKBb/86/Vl+bjnxOfGsvbSW/t6PEjFwEPrCbtwqOzucB/THdfAQtF5lX6AZlx3HknNL+DX0VwpMypTHYOdgRjYaSc/AnmhUGgwxMaStXUv6ul8wJSWjnfotH018gsnqjUw7PI0mHk0Icr5FcBaxH44sVB5f3Q2PzYAmz98yqCgrQ2Ii0a+8Sv65c0g6Hb5ffolj714l9nHo3h2/GdOJeXMiGb/8iqTT4f3RR5WXGSvIhnO/wrGlEHMM/NtAr8+g2o19NPKMeUw/Oh2A4Y2GU82hWuWMoRzyjh4l79QpJJ0OtxEvlft4rUpLTeeaXEy9SLy/Pa6Iwg+CIAhV2ZQpU5g4cWKpz1l6KY2okmchZ5LOcCntEgBxl5SAycE9EAAndx0arRqu7MR08SAASZj5+1w8RlPFS05XZIxH4o+gkTSVsvZoVJNRjGs+DoC5p+by7Ylvb6jXXxGeE9/ErvNDyAUFRI8ZgyEh8Y7Peb38kEvEvDmRy488SvradWAwYNuqFf4/LCJw7RocbM4jLeoGCWfAxhWeWQLPLa9QsFTEqlYtFnmLUQAAvwRJREFUAteuVTJmZkg84UTMPmdMW6fz04peHIo/hK3Glu8e/q5cwZJsNiPfpmy5LMts/mEy9QznycYGzxcW4GSnTPOrXqM2EXWUm9eGF2ZwKLRklUJrjTWjmowCYOGpBUS//x76K1dQu7ri8eYbBO/cgddbb5U5WArPCGfSvkk8+uujrLq4igJTAY3cG/FN12/45clfeDToUTQq5XMfrZ8fnuPHU2v7Npz69QOzmfqzN/NMbn3yjHm8uetN8o35N79YUQVAK0cw5CiNhX95CfLSyzTWmykIDSV8wADyz51D7eJC9aVLbwiWijg+8gi+n08DSSJ91c8kfv75nf+MxJ2GP9+Ar+rC+teVYAkg6hAsehjWvQTpkSUOWXRmEfE58fja+VbKFNqKSC4sF+vUrx8a94p9UFTbRSluE+KlrLcsCA3FnH+L7wFBEATBYjw9PW8oN170525XwbsdkWGygCvpVxi9dTQyMnMfnktsWAgAGiuljLhbdRdo8TlsGIcp/CLgRY5WRWpOHoeuptIhuOJZpvIoyi49GvQo3nbelXLOEY1GoJE0fHXsK+afno9JNjG22dg7+hRdUqvx++orwgcMQB92majRo3F54XmsatbEKigItbNzhc6bd+oUyfMXkL39Wm8hu84P4T5qFLbNmysbQjbBlknK47qPw+Nfg33l/FCr7e3w/WoGNi2ak/D5F2RFQ26GFX87pICvmg+CnqKDX4dSjzVlZqK/ehV9eDgF4eHow8PRXw1HHxGBysaGgB+XY1Wz9ClnazZvp0/8QpAgrs0H1KpZcs1bnacnkTl9DYHGBP74+XNqvTEdV7tr66b6Bfdj8dnF1N8dRd6OXUhaLdV/WIR1vXplet3xOfFsj9zOjqgdHIo7hIwSMLTxbsOIxiNo493mlt8vkk6Hz9QpmNLTyd6xg/4/XOHSi46cJowvj3zJR+0+uvGg5FC4tEl5PGIrXNwIOz6Fs79A1BFlOmT1sq+fKZJz8CDRr4/FnJWFLiAA/wXz0QUE3PIYpyefRDYYiPvgQ1KXLUfS6fB4443y/Yz8O5tUxKUGtBgKwQ/Dwe/h5Eo4uw4ubIB2Y6DjBKL0GSw5q1RAeqvVW9hobEq9xN2UH3KJnF27QaXCrQJTiYsUBUxnVXF0dHfHlJxMwcWL2DRtWkkjFQRBEB4E91XAtHv3bqZPn86xY8eIi4vjt99+o2/fvpYeVrm52bgR5BTEyaSTjP1jFH1SPJAkFUaDO5CKm589NB8CYdswnXQFIMDPGsLz2Hgm7p4ETFczrrItchsAwxpU/IalNEMbDkWtUvPlkS9ZdGYRJrOJCS0m3FHQpLa3x3/uXMKffY6CCxeIn3Ttpljt5oZVzZroagZhFVQTq5pB6GrWROPpWXzN9Px0Nl7dSCffTrhdiCV5/nxyDyjZPSQJh169cB/1csmb/pxk5RN7gLavKtObKrmwgCRJuA4ciE3jxsSMG48hNpb3f5LZ0E3PI1emU5DpRoEqQAmIioKi8HBMqak3PacpL4/oMa8RuHYNageHEs9tPx9LrQNvY60yEOPWjlq9X7vxBFb2WPX8H/w1lmHGNUz6+QlmDX+4+L3UqrWMc+qH77ZZADiOf+2WwZIsy1xKu8SOqB1sj9zOhdQLJZ7vUq0LIxqPoIlHk7K9aSjr9vxmfkXkiJHkHTvG+6udGNsf1l5aSxufNvQK/FeG54Ay9ZXaj4BHHeVPjc7wy3BIC4clj0Dnd6HTm6Au26/N9N9/J27SR2AwYNO8udIfzeW6FTSyDFd3KUELFF63LnjUxblvH2S9nviPp5CycBGSVle2oiZxp5Ug6fQa0Gcp21RaqPc4tBgGgZ2gaF1U37nQ+mX450MI3wN7Z8KJH/myZgMMZgPtfNpVXh+vckr5QckuOfTsedsAs1T5GXB+PXX0ys9BSNolbBo0IHvXLvLOnhMBkyAIglAu91XAlJOTQ5MmTRg+fDhPPfWUpYdTYU5WTszvMZ83dr1B9FGl+pyVtyvpicq6GDdfe+XG+8lvMJ/eDiboKa9nMp34+2w8U55sgEZ9d2dTLju3DBmZztU6E+wSXOnnf7H+i2hUGj479BlLzi3BKBt5q+VbdxQ06apXJ2D2R6T9/DP6TC0FV69ijIvDlJJCbkoKuYcPl9hfZW+PLiiIWHcV21UhJFoV4HxKRVB0Ybl0jQanJ5/EbcQIrIJqlLyYLMOf4yEnCTzqwcOTKz1Yup5No0bY/zSfHSP70jTMxNP/qAjDHVZ/ddNjNJ6e6AID0dWoofwdGIDGw5Po119HHx5O7FtvU23ud8WFBS4lZHFi9ae8qQojX2WH74sLb/qarFoOIv/QPBxTztP06gIW76vJSx2V98icn0/t2RvRG+F4kER6cz3/7r9tNBs5kXiiOJMUkx1T/JyERFPPpnTz70a36t2o7li9Qu+ZysYG/7nfEfHiYAouXeLLX50Y2z+b/+3/H/Xd6l/rU5WTDKdWKY/bXxcgVmsBo/bApreV53d+Bld2wFMLwPnmY5JlmeTv5pL87bcAODzSG9/PP0dlVdh4Wp8Lp1fDofmQdKH0k6itcHELRn4kgIRNESTPnYtkyMB93Nug+VcVxJtlk1yDlGxSkxduPj3UtykM2aBkSv/5kN15MezMuoJGhne9OluksqAhJobMjX8B4DZiRNkPNJuuBaAXNoAxn9oqNQT4EZUVhbp+d9i1i/zrmigKgiAIQlncVwHTI488wiOPPGLpYVQKW60tc7rOYdrhl4BUTmnDaRKTCUi4+io9Y2RrZ0ySMr3LI+43Btjo+DmnDYevptL+LmaZknKTWH95PcBdXb/wfN3nUUtqph6cyo/nf8RkNvFu63crdpNmNsGemVjtnIa3swm6D4fHt2PKzkF/9QoFly+jv3yFgitX0F++jD4yEnN2NvmnT+MKPHPtROg1YNevDwGjX0fr51f69U6vVm7KVBp4aj5o735PhYXhq1j7DIw+40fXTXFgNqPSmtE5mNDVb4auWRd0gYFY1aiBtnoAavvSew9VmzOHiIEDyd65k+Rvv8Vj7FhSc/RMXfIbi1gNgObRz5Gcb9H4VqXG+rHPYfmTvKjewmObetA68DkaVXMi8cvp6ENDMbk4MPfxXEwXVvBCvYFYqa04EHuA7VHb2RW9i4yCjOLTWamtaOfTjq7Vu9K5WmfcbG5egr081E5O+C9cSMQLL2AbE8PUX21597ks3t71NssfWY5WrYUji8CYD77NIOBfUxytHaHfPKj5MPw5ASIPwPcd4YlZ0PDGD21kvZ64jyaT8fvvALiNHIHHhAlKUJoeqVzr2DLIT1cO0NpB0+fBwQeSL0HSRUi6BMY8SDyHqxOYm9iTdMqRpIU/IR2dh1sHX/CorWSjclNLySY9oQRK12eTbkWSoO6j6Gs8xBe/9AZDBoMyMgn67VU4/Qv0/AS87l3z35Sly8BkwrZdW2waNijDAZeVgPbkKsiMLvGUq9mEh9aRJEMmyQFOaIG8s2dKP48gCIIg3MR9FTCVV0FBAQXXNRHNzMy04GhupFVrCc7zJI5Usuy0kCWBxoyDu3LzLecZkQuTHWophf9Ji9gr1WDjmbi7GjD9eOFHDGYDTT2a0tyr+V27DsBzdZ5DLan5+MDHrLy4EpNs4v0275evPHZaBPz6MkQdvLbt6GLwboy65TBsGjXCplGj4qfic+JZeHA6p0/8jV+yTFC6FZ1NQXhkSGxzjWVF4wzsPE+wzFlNqSu3MqKV8tMAXd4Fn7JPFauoiMwIfgn9BVmScBs6iU+rFVDdXsPbjmtxvrAS+AfaPAItH7vtuWwaNsBnysfEvvMuyXO/R127LmPCbXkndxZWKgP6Gg+ja/Hi7QcV1Bm5ziNoQjbxluonXlsVzNpGBtJWKlPMAqZ/hW/aN1xMvcjgTYOJy4krrnAHSqa1c7XOdPPvRjvfdthqbSv69tyS1ssT/0ULiRg4CJ/oVN79TcPUZ87wzYlveLPJq3C4sDJeu9duniVs/KxSTe7XkRB9BNYNg8vboPcXYGUPKOvGoseOI/fgQVCr8Z40CZf+zynV9w59r6yLkgsLbjgHQJtR0GwQWP+rybDZDBmRkBQCSRdxbxqCbHeU5P1ZJJ5wIF4bx4mmsZxM3o8EvCLnEVSWbNJtLL/0M5GGDDys3RhV+xE4slh5jfN2QPPB0PWDSlufdzPGtDTS160DbpNdKsiC83/AiZ8gcv+17dZO0PAZaDYQ9n0D53+ndmHAFOYlUw/QX76COScHlV3VaWYsCIIgVG3/6YBp2rRpfPzxx5Yexk2ZjAYSr14GoH2NvhAPiVbRzDg2nbdavYUpUw+AylaD5Ncc66hDfKP9llfOfsKUPg1Rqyp/ukyWPou1IWuBu5tdut7TtZ9GJamYvH8yq0NWY5JNTGo7qWxB0+k1sPFNKMgEnQM89hVkRMH2qUpQ41mveLG+3qRn+fnlLDi9gDxjHpKHirYdnmF0s9dxsVbWlrjkJfPn5qFEZEYw4p8RLO29FHeb64JTsxl+f1W5XrVW0GHC3XhLbvDdie8wySaqW7fgg1U5GM0yJBhYJT3GYq98uqb/qmRADPnQ7tXbns+pTx/yzp0jbfmPRL/9Dh07N6Cp22VMOkd0/b4t8/RCqcdU5NAtdOcE62K3E71yMzaA67BhOHTsxOvRMmO2jSE8MxwAP3s/uvp3pVv1bjTzbFZc4e5us6pRA/8FC4gcPJh6V3N5fYPELNUSWmdl0Ck3GZz8oX7fW5/EtQYM2wS7voDdM+DECog4AM/8QEGeIzHjx1MQGobK1ha/GV9g7xwH8zop1ROL1OgMbUZD7V6gukljWpUKXALBJZD06m04lXSKk/61cbDbQLstsagO23PUVcW2ZsrPx3YHJ15r9jqDGwxBfbNz3kZ8TjwLTi8AYELLN7Gv+YQyzi2T4cJ6ZbrfmXXQ6Q1lvZ727hSCSPtpJXJeHlb162HXvn3JJ81mJTg68ZMSLBlyCp+QoGY3JUiq89i1bK97LQBqm1XsA85LcTTy8sKYkED+hQvYtryxnLogCIIglOY/HTC99957vPHGG8VfZ2Zm4u9/i2lG91hi+BVMBgPWDo60dOnNUSJItY3l1wuryCjI4H0P5WZc7WgFTy1EnteR5gVhDCr4mV+ONeS5VpX/WtaErCHbkE1Np5p09u9c6ee/mX61+qFRafhw34esu7QOk9nE/9r/7+ZBU146/DURzijBHf5tlLUlLoHK+qL4M3D+d1j9IvLI7ezOusIXR74gKisKgGaezXiv9XvUcytZjMDdxp1FPRcxZNMQIjIjGPnPSBb3WlwcUHF4gbJOQmsL/eaXuQDAnTifcp5N4ZtAlrhwviNms8xjjXwwmWU2n4tnWPzTvKMx8IpmA/z9njKdq9Obtz2v11tvEXH4FI4XT9PrwCFMPSXUfb8AR9+yD869FlKrEcgH5jHu2C+QJ5MXEIzHhPEAdPLrxHut3yNLn0UX/y7UdqltkXUxoGTWqn33LVEvj6LdRQNZNmY+6PEra9VqvNq+UrZ/S7UWun2oNBD+9WX0keEkj3iKjKs2IMuYXVyo/nJr7I6+DLkpyjEaG2jSH1qPuuXUNrNsJjwjnJNJJzmZeJKTSSe5mnH12g4tZJIzJJ44LPPy32a6BHXn7/p69sbsZebxr9kWtZ2pHaZSw6nGTa9xMzOOziDPmEdzz+Y8HlTYINY1CPr/qGTI/n4fYk/AtilwfDn0WwDV25T7Ordizs0lrbBhofuIEde+T9Ijlel2p1YqBTiKuNZUgqTGA8CplKmzboUBU24OqOBS2iWsGzYkOyGBvLNnRcAkCIJQRl26dKFp06bMmjXL0kMp9R5i1apVDBgw4K5e9z8dMFlZWWFVtNC6Cirqv+Rbqw6pcbkAtG3YjD0Fa9hwZQM1rrrzCK1QO+nAJQDpiVmwbjhj1H8w5w+J98Ne4p2+bXGy0VbKeApMBay4oNywDGs4rHzT4irBEzWfQCWpeH/v+/wW9htXM67S0rsldV3rUs+1HtUcqiljitivTMHLiAJJrUyL6/jGtRteSVIqgKWEEZ5ykS9+7cNetVJQw8PGgzdavsFjNR676Y27t503i3otYuimoYSlhzFqyygW9VqEY0Y8bJ2s7NRjCriVXpa7sk3arTQQNWQ2wUauxpRnG/JUcz8kSeJifCZztofx5ZkB5Ms6Jmh/gW1TSEjNwOvJj2+ZKdobns4bNfuwLPI0hmwNsWdqU21qf8odznR+h5Qf10GijEGt5q16zzI/vYBgTx2SJPFCvRcq/uIrmV27dvhO/5KYCW/Q84RMhp2Z91p7srDJC5QnN2OwrU1Kej/SNv0GJhmQkXwhqEUINhFKc9Rsax+MLUfg3H442LqWep6EnAQ2Xd3EkYQjnEo6VWJdV5FAx0CaeTajqWdTmvRpgu23K0n/aSV15m2l65dfsr19T7488iWnkk7x7IZnGdtsLAPrDSxztulw3GH+Dv8blaTivTbv3fhzEdAeRmxXyo9v/V9h1cDe0HGCUjnw30UoKij9l18xpaej9ffHoWdP0OfAjs+U8ueySdlJ5wAN+0HTgcqHJLcKvt2VYjV10uPAVacETA0Gk71tm2hgKwiCUIlkWcZkMqHR3JuwYsmSJfTu3bv4a+cKto8pj/90wFTVxYYq/Zd8gutw+bQyveShRm2o5jCbN3e9SXJ8PAAm+8KbgoZPY7q8C/WJZYzT/EbGxb/5eUYfGj/9Du3ql/9T5X/bcHkDyXnJeNl68WiNR+/4fBXxWNBjqFVq3t39rvJJe9LJ4ufstHbUkWyol3yVuiY99dwCCXpyHtqAdjecJ0eC+Y178GNoBkbJgAaJwQ2G8XKTl7HT3n7tgr+DPwt7LWTY5mFcSL3AK1tGsyAmBjtjvjL9p1U5qndVUK7eyNjf1nIp/yiyrCJI/RRzx3Yi0P3a+Ot6O/LdC825lFCLb7f78fk5He9qVuF1YjabLsfh88yXNK3ucsO5ryRlM+an47xovYMaHZOI2OZOdmgWSd9+i+e4ceUaZ15YNEmnrAEz7i3ziLe25bWVJ/h9TAestRWbIgaQnqsnObuAmh72lZqVcuzdG1NaGvEfT+HZvTKLbNUsuPQzrzR95bbHGtPSSP3hB1JX/IRc2AA12deThvVDcXFXfoYPmeuy2NibrfktMG1VU+v0GbrU8aBLHU9aBrqAZGRH1A5+D/udA7EHMMvXGglbqa1o6N5QCZA8mtLYo/G17GYh+YMPQG8gfe1aYt95hy4ffUTbJ39l8v7JHIg7wPSj09kWuY2pHabetsKgwWxg2uFpADxb+1nqutYtfUeVCho/p0wl3PSuku3Z8xWEbYWnFiol0e+AbDCQukTp/eQ2fBjSle3KVNuMwoa6gZ2U9V71ngBdGdceFWaYAjPi0brXJMeQQ06wD4ColCcIQpUgyzLG69ba30saK6sy/d86dOhQdu3axa5du5g9ezagBCzDhg3jr7/+4sMPP+TMmTP8888/LF26lPT0dH4vLHwEMH78eE6ePMnOnTsBMJvNfPHFFyxYsID4+Hhq167NpEmTeOaZZ0q5eumcnZ3x9q6c/qBldV8FTNnZ2YSFhRV/ffXqVU6ePImrqyvVq1es9LAlxYUqGSbPoNoc35oOgKufHf5OnZnXfR5nlu8AYEPiJvrmeeFu4476iVkQ3JW8LZ/ilB7KKNMq0lZvYIvvIDoOfB8be6ebXO3WTGYTS88tBWBw/cFK9bB7RJZlkrIKiEjNJSIll8jUINropmHlcAVn5yQupV0kNO0SOYYcjpPDccei3kFmtLvHEOwcTF3Xukomyq0e0VnRfH3sa5LykkCS6JSbzzspqQTU0ipVycooyCmIBT0WMPzv4ZxOPsPr5ny+s3bCps93d7WEOMCZ6AzG/nycBPsfUdtAXdserHqxD9qblJOv7eXAN883IyxxBr/94k6/hDk8krmGpQvS+brGm4ztXocWAcqNd0augRHLjuJTcIXxVr+idTXi88pTxH77Gynfz8O6Xj0ce/Ys0zhN2dnEvDkRTGYcaqrxCUzmTdVGpsQ/w6cbLzC1b8Nyve7ErHz+OZfA5rPxHLiSgsks09TfmfHda9G5tkelBU4unetibJhJ8llHhv9j5hvb7zji3ZJW3q1K3d+UnU3q0mWkLl2KOTsbAKsmTfi+Zk/Wmr1oaZ/MohZRODZ5Ajs5kEYhiaSEJHE8Mo3QxGxCE7P44cgebFyPo3U6hYmc4nM392xO94DuNPNsRh2XOrf92ZNUKrw//h+ybCZj3S/ET56MW9RLzJvwPb9c/pUZR2ZwPPE4T69/mvEtxvN83edvmi3++eLPhKWH4WzlzOvNytDnydoJ+n0PdXrDhnEQdwrmPwTdP1Z6OpWlKl8pMjdvxhAbi9rVBSd5M6z8XXnCqbqyLrF22b4fS47VEey90GYnUNPWh4vZkVz1UeENSq+yrKwb+pAJgiDcS8aCAr4ZUvZAoTKNXbYOrfXtK/zOnj2bS5cu0bBhQ6ZMmQLAuXNKlv7dd99lxowZBAUF4eJy44ezpZk2bRorVqxg3rx51KpVi927dzNo0CA8PDzo3LlsS0HGjBnDiBEjCAoKYvTo0QwbNuyuT/e/rwKmo0eP0rVr1+Kvi9YnDRkyhKVLl1poVBWTnZZKZlIiSBI6W19kOR1rey22jsr0lpbeLXF3NkGqnhBDGEM2DWFBzwX42ftBg37Y1HuSglPryNo8FfeCKHrEzSPtq1WktHyNaj1eA135Ko5tj9pORGYEjjpHnqld8oc3Jj2PK0nZWGvV2GjVWGvVWGtV2GjV2OjUWGvUqG5TgKLAaCI6LY/IlFwiiwOjXCJTc4hMzSXfYC7lqEDqezdiUWNfPE7t4ioGQuyduVCnGxclIxdTLpJlyOJC6oUbmp0CVHeozjut3+Gh6POw+R3YMklZQ1KzW5nflzqudZjfZAIjDk3miI01E7wa8I2dO5UzCelGZrPMwj1XmPFPCLLNWWw8otCprJn3xLs3DZauF+xpT/Arn5C80x/Xne8wVPMPVlcNPPv9S7QP9mRM12Dm7gwjMjmDjTbz0cpGqPMoTgM+JT/LgdRly4l79z2sgoKwCr59/634KVMwREWh9fXF5+NXkP58iaHSn/xAZ348CO1ruvFII59bniM2PY/NZ+PZfDaeIxGpyPK15zQqiZNR6QxdcqRyA6f93+LeIBuTXR3SDsXx2noT3ztNoOaE9bhaX5s+Z87PJ+2nlaQsXIgpPR0Aq7p1cR07ljci7NgekoSDtYapL/XD2ccRgIZAQz8nXutWi6up8cw7to7dcX+RLSvr50yA2eCEIaM5fuqHqO1en3bu/gS7l/3mXVKp8Jk6Fa23D8nffkvKoh/QR8fw9OfTaO/bnsn7JnMo/hCfH/6crRFbmdJhyrW+U4WS85KZe3IuAGObj8XJqhwfttTvo0yJ+2OMkmXa/A5c2qxMhS3PGjiUD0xSFimNal0DElBdOgeSSiku0fX9smeUSuNWC7ITqK115iKRXDTH4u/nhyEmhvxz57FrW7nrsARBEP5rnJyc0Ol02NraFmd1Ll5UPvCfMmUKPXr0KPO5CgoK+Oyzz9i6dSvt2imzg4KCgti7dy/z588vU8A0ZcoUunXrhq2tLf/88w+vvvoq2dnZjB07tgKvruzuq4CpS5cuyNffTd3HirJL7v4BZKUor8nNz67EjaBNnhYDemQHNZFZkQz+azDze8xXGsmq1Fg1649V46cJ2boY2wNf4S/H43LkU7JPfo9117fQtBpepv5Asiyz+MxiAAbUHYCt1pbMfAObzsTxy/EYDl9Nve05dJrCAKowmLIuDKbUkkRseh5xmfnc6p9OJYGvsw0BbrZUd7XFzc6KDQfPMjbla3x3HwEgOKATtZ+azxOFC7xlWSYmO4aLqRe5kHqBi6kXuZhykQJzAUMbDGVw/cHo1Drw66QUgTi5AtYOg5d3KAvay8KQR8PtXzA3K4nRvj7sy7zMW7veYkaXGWhVlZuFS8jM5401J9kXlgKY8PTfRh4wpMGLJSv1lYF7l1Hg4oT8+ys8r9mBraTnjbDR7A1LBuBN3QbqyFfBxgUenwWShOdbb5F/MYTcQ4eIHvMagWvXoHZ0vOk1MtavJ3P9BlCp8J0xHXWzZnB2KarwPSzw+4vHYobw9i+naejnhL9ryQA+PDmHTWfj2Xw2jlPRJdftNPF35pGG3vRu4I2dlYYFuy/z48GI4sCpWXVnxnevzUO13CsWOKVHwbnfkCTw+vQrCqYvIffvLYxcmcI3nmOZPPxHMBhI/+UXkud+jzEpCQBdjRp4jH0dux49mfjLGbaHxGCtVbF4aCvq+Vx7nwxmA3ui9/B72O/sid6DsbA3gE6lo4VHJ1zMHQiL8OFEcgZXZVgYf5WfDkUyb1ALHqpd9pLgkiTh8doYdP7ViP1wElmbNxMZH0+1ud+xoOcC1oSsYeaxmRxNOMrT65/mzRZv8mydZ4uzTbOOzSLbkE19t/o8FVyBRuAO3jBwndJb6p9JSlPfue3g8ZnQ8Okynybnz1UUhFxCpTHjUiMFfJrCE7OVprp3yj0YIvZSWy5szpx6iScaNiwMmM6KgEkQBIvSWFkxdtk6i137TrUsZ/GcsLAwcnNzbwiy9Ho9zZo1K9M5Jk2aVPy4WbNm5OTkMH36dBEw/VfFFa1fqlWHlFhlio+br32JfUyZyrzWid3e4fLpWMLSwxj691DmPjyXxh6NlZ3UGur0epmM9oNYtfJrOsYswd+QBP+8h2HvLLRd3lJ6qGhu/oNxJP4IZ1POYqW2IkDbk9dXneCfc/EUGJWsjyRBTQ97TGaZPL2JPIOJfIOp+HkAvdGM3mgmI89w0+vY6tRUd7UtDoqqu9kR4Ko89nOxKZlBubKTCWfeR50dj15WM8P4HHsznmd6jj0NCj8IlySJag7VqOZQje4B3YsPlWW55I20JCk3cUkXIeYorHoBRmwBK+UT/cTMfH7Ye5U/T8dRy8ueIe0D6VzLQ8mabZsCyZdobu/NNw/NYMzed9ketZ0P9nzAtE7TKlzG+d/+ORfPO7+cJi3XgI1WTd9O8fwZF4uTlRPDGg6r2EmbDEBS6+DXkfRhH0GuWvonv0SgHMUY9W8gA4/OAAcv5W3SaPD7eiZXn3kGfUQEMW+9hf/cuUjqG1+jPjKS+I+V1Lz7mFexbV7Yr6vXpzC/Mw1S/uZZ716sjfdm3M8nWD2qHVeTc9h0Jp5NZ+O4GJ9VfC5JglYBrvRu6E3vht74OpcsWf3BY/UZ+VAQC3ZdYcWhCE5EpjNk8WGaFwZOncobOB2apxQRqPEQUrVm+E9vQEjKi1gfPc0j3x7j1/iXqLMlFHW8Elyavd0xDX0aU++uxGh1LNywhT/OxaG1UvPpU82o7mEgOS+ZlLwU1l9ez59X/iQ1/9qHDA3dGtI3uC+9a/QukcVJz9WzJzSZHw9EcDg8lZeWHeGr55ryZJPyZWic+vRB4+1D9Ouvk3fyJOEDnsd/3jwG1B1AB78OTNo3iWMJx/jk0CdsidzClPZTSMxN5I/LfwDwfpv3y/R9nL1nL0nffIPaxRm/mTNR29sr/3itRxZWDRypVNJbNxxCNsOj08HG+eYnNBbAnpmkTF8E6HCuZUD9xKdKNcHKqj5ZuI6pTq7yOzYkLQTrhv3I+vtv8s6IdUyCIFiWJEllmhZXVdn9q5+dSqW6IbFhMFy7L8wunNK+ceNG/PxKVjetaJG2Nm3aMHXqVAoKCu5qoTdJ/q+kbMogMzMTJycnMjIycLzFJ+f3ws+T3yHm4jl6jR7HlTMeRJ1PpeugutTvqNwsyUYzMR/uA8BnUluyNbm8uu1VTiedxkZjw+yus2nne2Oxg79PR3Dkt+8YZl6Hn6SUNZadqiE99JZSWaqU9RGD/hzBqZRDqLI6kBH9RPH2YE97nmruR9+mfjfcxIIyfSzfaCJPbyLfaFb+NlwLqPL0JgwmGW8nawLcbHGz05XtxjbxAszrCGYjslst9jSexrhdMmm5BjQqiTFdgxnTNRidppzrJTLjYEEXyI6Huo8T1WM+8/dcZc3RaPTGklMCa7jb8VbtRB49PlLZMHAd1OrB7ujdjNsxDqPZSN/gvnzc/uM7qiaYpzfxycbz/HRIWdzewNeRGc/VZ+zeAcTnxDOx5USGNBhS4fMDSsPUtUPBpKcgqCdkRGOVch7qPg79V9ywHivv3DkiXhiIXFCA2+hReI4fX+J52WAgfOAg8k+fxqZlCwKWLSsZVP0+Bk6uoMC7JS3j3yIr34S7vY7kbH3xLmqVRLsgN3o39KZnAy88Hcr2H0ZiVj7zd11hxcGI4oC9XIFTfgbMbAD6LHhhbfHaGFN2Dif7P4nt5djiXdPs4NcOKrY1kTBqypfJcrV25YmgJ+gb3FfJCN9CgdHEm2tO8efpOCQJJj9en6Edyl/EpeDyZaJGjcYQHY3KyQn/b+dg26oVZtnMqourmHVsFvmmfGw1trhauxKdHU2fmn34pOMntz1vwhdfkLN7T/E2m2bN8F+4ELX9df9Zmgywe7rSo0o2gaMf9P0egkqZYhG+FzaMJ+9SOOFbPEAlEfz7j2hrtyj3676lS3/DyudI9apPZ1vlP+qdQXNIHPkK2mrVCN66pXKvJwiCcBP5+flcvXqVGjVqYH2fBUk9e/akTp06zJkzB4CdO3fStWtX0tLSSlSoe+edd9ixYweHDx8u3tahQwe0Wi07d+4kKysLDw8PFi5cyIsvvlgpY/v000/56quvSE29+WyoynjvRYbJAkxGIwlXlOIVPrXqcmRzBACuvtduPoqa1qKRUNlqcJKcWNhjIRN2TmB/7H7GbBvDzC4z6eLfpcS5ezUOoHmNqUz6pS8eoasZo/kD74xoZYH2npnQ6FnwakCyXTC/RVix6sxRkhwPIcsSmQntcbXT8WQTX55uXo2Gfo63vAFVqSRsdRpsdZX8bbRtKpiNULMbUv+feEhnyz/NC5j0+1k2n4tn9rZQ/j4Xz4xnm9DQrxzrLhx9oP8KzEseRXXxT9adHccKozIVqUWAC0PbB3IyKp01R6JITk6iSda7IMFhtz64OrUlGHio2kN8+dCXTNw1kd/Dfsdabc37bd4v99SwuIw8DlxOYe7Oy4QlKjdyLz8UxJs9a7M65Cfic+LxsvWif53+5Tpvqeo+Bs+vgp8HYnXlH2WbjSs8/nWpxStsGjTA55OpxL71Ninz5mNdv36JIhBJc74l//RpVI6O+H355Y0ZqG4fwrnfsIo/yrLWsTy124vkbD06tYqOtdzp3dCbHvW8cLErZSWYLCuZwJBNyp+sOKW/VoDSxNTTwZpJj9dnVOeg4sDpeGQ6gxcfpkWAC+O716Jj8C0Cp+PLlWDJvQ4EX8tKqu3taLx8Daee74uUnMa+rh4caOdCvlammtmI0WwkM7+A9LwCkEzY6CQ0ahmT2YRRVp7XqDQ85PcQfYP70rFaxzJP2bTSqJk9oBmudjqWH4jgfxvOk5qjZ0KP8vWssqpZk8DVPxP16qvknzpN5PCX8PnsM5yeeJyB9QbSya8TH+77kBOJJ8jNzsVea8/4FuNLnMNslsnKN5KWqyctPgnTkoXYbvodyWzGrFJzuVU3gs4cIO/ECaJGj6L6ggWobAunW6q1ypqj4B7w28uQegWWPwntXoNuk5TpwbmpsOUjOPEjACmXlPnwTn36lBosGU1m/jmfQKNSpnWWiZsSrLqmXMXDrT5JeUlE+WqwAgzR0RjT0tCUcaGyIAjCgyowMJBDhw4RHh6Ovb09ZnNp686hW7duTJ8+neXLl9OuXTtWrFjB2bNni6fbOTg4MHHiRCZMmIDZbKZjx45kZGSwb98+HB0dGTLk1h8Qb9iwgYSEBNq2bYu1tTVbtmzhs88+Y+LEiZX+mv9NZJgsMY6kRNZO/YC87ExemrWcxW8rmaSRsx5CZ60EHwXhGSTNO43a1Rqft69V7tKb9Ly9+222RW5DI2mY1mkavWv0vuEasiyz7lg0n284yZPGv3lVsx4PqeRakQJZy1gPH/Y7QEODD28FvUyj5u3QOvne9SpwNxV1BH7oriz6fvVgiXLFsiyz8UwcH/1xjtQcPRqVxKtdavJat1plyjadikpn7s4wHC+uZrp2AQCz3P9Hu0cH07qGa/HNaU6Bkfhlw6gZu55wsxeP6qeRizWdarkzpF0gXet68tfVP/lg7wfIyAxtMJQ3Wrxxy5vblOwCDl5JZf/lZA5cTuFK8rUqaZ4OVnz1XBM61fIgW5/NI78+QnpBOh+3/5inalVgbcnNXN0NKweAIQeeWXzbdSYJ0z4nddkyJFtbaqz+Gatatcg5eJDIYcNBlvGbPRvHXjepXrbrS9jxKThXZ/vDG8k2a+haxwMH61KCCJNB6a0VsgkubSrZnBRAZw+D10O1G2+oEzPzmbfrCj8dupZxahngwvjutekQ7Fby38RkgNlNIDMGnpyjTFX9F9lkAklC+le1t42n43ht1XFkGV7vFsybPUuW0S76NXonxShkWeabbWF8vfUSAM+3rs4nfRuivk1BlX8z5+cT+/Y7ZP2jBMce48biNno0kiRhMpv49tgSVoesIUDVD1t9S9Jy9aTnGkjL1ZORZ0AymXj86n4GXvwHB0MeAAe8G/BDw8eJsfegdlok0w8uRFeQh22bNvjP+x6Vzb8y0AXZ8M+HcEwpFY5nfWg+BPbMgBxlTViB/3NcmbEPZJmgjX9iVbNkX7M8vYnXVh5n28VEbHVqPu3XkH7NqpXvTTUZ4VNvMBsY3aYf+xKPMantJJqP/QFDRCT+ixZh37FD+c4pCIJQAfdzhunSpUsMGTKEU6dOkZeXV1xW/N8ZJoDJkyczf/588vPzGT58OAaDgTNnzhSXFZdlmW+++Ybvv/+eK1eu4OzsTPPmzXn//fd56KGHbjmOzZs389577xEWFoYsywQHB/PKK68wcuRIVLeo0loZ770ImCxIn5dLUmQBv399Akd3a178pH3xc7mnkkhddRFdoCOeo5uUOM5oNvLhvg/ZeGUjKknF/9r9j361+pV6jei0XN5ae5oTV2Lpq95HY+ky9VRR1JaiSNMaeayaL2ZJYk1MHPX0hfNMbVzAswF4NVCqyhU9LmflvXKTZVj2BITvgaaDoO93pe6WnF3A5D/OsfFMHAB1vR1umm2SZZmDV1KZuzOMPaHKmhRJgsWea+ma8ZtyMz5iK3jWu3bQhQ2wehCypOJMj1XMCXNn24UEzIU/Kf6uNgxuG4i16xG+PKZMZxrRaASvNX2teC1IVr6Bw1dT2X85hX1hySXW7IBS5KKRnxMdgt15qWMN3OyVebffnviW+afnE+gYyG99fkOjquTsXepVyIyFwNvfJMpGI5EjRpJ78CC6gAD8Fy4gYtCLGBMTcX72WXymTrn5wfpcmNMCsmKh+/+UJqfXy0tXqquF/AWhW+H6hq1qK2UaV+3ecP53JdCzdoIhf4JP41Ivl5iZz/e7LvPTocji6ZWNqzkxpF0gjzfxwUqjhtNr4dcRYOcB48+WqSAKwJ7QJIYvPYLBJDOwjRLE3M3ypSsORjDpj7PIMvRu4M2sAU3L3c9KNptJnPEVqYuVYi5OTz2Fw/sfMndvBD/suYreVMqng7JMq4QLjDy7Af9sJaiJc/Nje7eBpNdrgoutDpMss+pwJHVSIvji4EJ0+nzs2rej2ty5qEr7T+jS30olvcIgCVCye0/MJnbhRjLW/YJ9t274zy35s56ao2f40iOcjEovsf3ZFtX4uE+D8mW1v20NySHM7DiUJTHb6V+nP8PWppO5cSMe48fjPnpU2c8lCIJQQfdzwHS/EwFTOVW1gAng9I4o9qwOJbCxO4+9eu1mMGt3NBl/XcWmiQduz9/YTNIsm5l6cCrrLinVVd5r/R4v1Huh1GuYzTLLDoTz5eYQ3B10PNWsGv2a+vDT+U/4Ofwv2lv7MF/ygcTzkBIGcik3Uw6+MHwTuARWyusuVdg2WPEUqHXw+nFw9r/l7htPx/HRH2dJydGjLs42BWOlUSPLMtsvJvLdjjCOR6YDyrqZvk39eKVLEMFu1vBjPyU4cw2CkduVQDE7Cea2hdxk5Sa/+/8AiErNZcXBCH4+ElVc2MJGq6Zxg9OcL1CmFwXY1aGB1XBCIp04E5OByVzyR6uutwPtarrRvqY7rWu44mRTMtuSnJfMo78+Sp4xj5ldZtIjoOylOu8WY1oa4U8/gyE2FsnaGjk/H11QEDXWrb02FetmTv0Mv40CnQOMPaFMhQvZrARJkQeUaZdFbN2VAKnOI1Cz67VS0gXZyvdE1CGwdYNhm27ZJDUhM595/wqc3O11vNDKn9cvj0SbeBq6fgCd3y7T6z8RmcbARYfI1Zt4rLEP3wxoVu6MT0X8dSaO8T+fRG8y0zbIlYWDW5aenbuNtFWriJ/6CZjNnPOuzeTmL5Kjs6FDsBudanngbKPF2VaHW2IU9ou/g6OHAFC7uuIxbhzOzzx9w5TLDadieWPNSYITrzLt4CKsDPnYdexIte++RVXagtucZPhzAlzeDh3GQYdxGFIyuNy9O7LBQMDKldg2v1YdKSo1l8GLD3M1OQcnGy0LB7dk/+VkZm8LRZahlqc93w1sTm2vMpZh/3kgXPyTje1f4t24LTT1aMqshIdJ/OILHHp0p1rhnHxBEIS7SQRMliMCpnKqigHTjhUXOb83lhaPBNC2z7UpKel/XiF7bwz2D/nh/GjpJbBlWWb60en8eF65YR/XfBwjGo246bX0RjNatYQkSaTmp9JrXS/yTfks6rmINj6F5XUN+ZAcAgnnIfEcJJxTKl/lpSlrEwauvTvT9cxmWNhFaYTZ9lXoPa1Mh6VkF/DR+nNsPK1km+p4OTCwbXVWHooszuroNCoGtPJnZKegkusgclKUIhAZkUpvphfWwpoXlRt6r4ZKEPWv6oJ5ehN/nIxh6f7w4vNrnI5g7bURSZ2PLEsY0tpRkNSTGq5uhQGSG22D3HC3v3X1lmmHprHy4koauDVg1WOr7noTtrK6vgiEpNUSuGY11vXq3f5AsxkWdVO+f6ydIT+95PMe9ZQGqHUeBb8WcLNKbfkZsOxJiDsJ9t4w7C9wq1n6voVSsgv4+UgUPx6IID4zn7aq8/ys+wS9ZMX5AftpUrvmbd/fsMQsnpl3gPRcA51qubNoSEslU3WP7A9L5uUfj5FdYKSBryNLh7XGw6F8FYCOhKey+tvVPL95PrbGAmKdfVB9/jVdOjdGkiSMqakkzZlD+uo1YDYjabW4DhmM26hRt2zqujc0mVE/HiUwNpTPDi5CZ9Rj1/khqs2Zg0p3ky5lZnNxY9uE6dNJ/WExNs2bE7jyp+JdzsZkMHTJEZKzC/BztmHZ8FYEeyrj2H85mXE/nyQpqwBrrYopTzbk2ZbVbv9zsmUy7JtFWLPn6Ze+DzutHVuDvyFq8BA0Pj7U2rG9XO+pIAhCRYiA6fZGjx7NihUrSn1u0KBBzJs3r0LnFQFTOVXFgOmXL48SfyWTni81oFYrr+LtKSsvkHc6GafHg3Do6HfT42VZZu6pucw7pXwTjWw0ktebvX7bm4iiqV9lujlPDoXv24NJD88sgYaVuK6myLnflEpuOnsYdwrsytd36K8zcUz6Xck2FbG30jCobQDDOwbevApb3GlY3AsMuVCtNUQfVjJcI3eAd8ObXk+WZQ5dTWXZ/nD+OZ+AWcrEudomjLbHAXC1cuf9tu/SM6BnmQKf6Kxonvj9CYxmIwt7LqStT9tyvf67LXPz3yROn477mDE4P1X69M9SReyHJY8oj1UapXhDnUeVbJJrOSrB5abC0seULKiTv5Jpuk0GEsBgMvPPuQS8Nw6mRcFhfjR2Z5JxOI38nBja/rrpev8Sk57HM9/vJy4jnyb+zqwc0QY7q3tfI0cJIA6TnK0nwM2WH4e3obrb7afGxqTnMe2vC/xZ+EFCw9x4Pjm8BKv0FNTu7lT75hvyTp4k+fvvMWcpgb9Dz554vjURnf/t31eAM9EZDFt6GJ+r5/nk4A/oTAbsu3al2uxZSDcLmgBTZiZhXbthzsmh2ty5OHRTmpHvvpTEKyuOkaM3UdfbgWXDW+PlWPLnNjm7gAmrTxZPr+3b1JdP+jXC/lb/NidWwB9jMNToTBtVNAazgY291pHTtR/IMrX27kHjXr7fN4IgCOUlAqbbS0xMJDMzs9TnHB0d8fT0rNB5RcBUTlUtYJJlmYUTdmPINzHgo9Yl+jAlfn8KfUQmri/Uxbbx7ZtZLj67mK+PfQ3AoHqDeLvV2ze9Uc815NJjXQ8y9Zl81fkregbeZOH+9XZ+Djungb0XjDl86/4q5WUywtw2ynTAzu9C1/cqdJrUHD1TNpzj8NVUBrSuzpB2gTjZlmEa09lfYd11vY66fwwdx5f5uklZBeTpTfi72nAg7gCfHvyUyCylTHhHv4683+Z9/B1ufRP6/p732XBlA2192rKw58IyX/u+ELoFCjKh5sN39n2TnagEXylhyjTKYZuU5qm3kxQC37VGRmJ6rRUsuqAunq7nZqfjhTbVGdgmAG8n5ZdoSnYBz84/wJWkHII97Vkzqh2upVX0u0fCk3N4cfEholLz8HCwYtmw1tT3Lf33V67eyLydl5m/+woFRjOSBANa+fNmzzo4ZacRNfoVCgo7tBexql8Pr3ffxa516wqNbfDiw7iGnOLjg4vRmQw49OiO38yZSNrSf/aSFywkaeZMdME1CVq/Hkml4rcT0by19jRGs0y7IDfmD26B402mIJrNMvN2X+arfy5hMssEudsx54VmNPC9ScXMyEOwuCc4VuO5Wg24kHqBWV1nEfjK1+gvX6bavO9x6NKl3K9dEAShPETAZDmV8d5XvIGMcMeyUvIx5JtQqSWcvUp+alzUtFbtVLYpOMMbDueDNh8AsOLCCj4+8DEms6nUfX8J/YVMfSYBjgE8XP3hsg224wSlCWR2gtLMtTKd/Em5CbZxhXZjKnwaVzsdswY0Y/97DzP24VplC5ZAyZh1elN5XL09tH+9XNf1cLCiupstkiTR3rc9v/b5lVeavIJWpWVvzF76/dGPhacXYjCV3tT3Utol/rzyJwDjm48v17XvC7V6KBX57jTItvdUquU5BxSWrO6jrI+5nQNKQQGp7mO8PfBxDr73MG/3roOPkzUpOXrmbA+j4xfbeW3lcfaFJTNs6RGuJOXg62TN8uGtLRosAQS62/HL6PbU9XYgKauA/vMPcPBKSol9zGaZ305E023GLr7ZHkaB0UybGq78+XpHpj3VGHd7K7Te3gSsWIFdp04AqD3c8fn0U2qsXVuhYKlobOteaYe+cQs+bjMUvUpD1patxEx8C9lovGF/c0EBqcuXA+D20giQJObtusyE1acwmmWebOLL0uGtbhosgdLO4NUuwax+uS0+TtZcSc6h39z9/Hgw4oaGiQC4K81ryYymlpMyvflS6iVsGjYAIP/suQq9dkEQBOHBIQImC0qJVUpLu3jboVZf+6eQZbm4D5Pasew3awPqDuCTDp+gklT8EvoL7+99H4O55E26wWxg+XnlhmVIgyHFVd1uS2MFT8xSHh9dDFGHb7l7mRnyYNcXyuOHJoK1hTJ/3SbByzth8O83X0tTRlZqK15t+iq/PPkLbbzbUGAq4JsT3/DMhmc4En/khv2/Of4NMjI9A3rSwL3BHV37P8/JD4asV4qQJF2EH/sq6+tuJjtRKT4BSj8glMD61S7B7Hm7K98PbE7rGq4YzTJ/no5j4KJDnI7OUHoivdSm1IbNluDpaM3qUe1oXcOVrAIjgxcf5u9z8QCcjErn6Xn7mbD6FPGZ+VRzseH7gc35+eW2N2Rd1PZ2+H8/l4AVPxK8eTPOTz91Yx+t8o7NwZrVo9pi3a49n7QegkGlJuvvv4l9+50bgqaM3//AlJyMxtsb+0ce4eMN5/l8k5LxGtmpBrP6Ny3zOrGWga78NbYTD9f1RG80M+n3s4xZeZzM/H99MGHrqnwYA9TRKT2XQtJCsG6gTLnNP3v2Tl6+IAiC8AAQAZMFpcQoDUvd/OxKbDfnGMCkfFKqdijfp9t9gvvw5UNfopE0/HX1LybunIjedG1dz6arm4jPicfN2o0naz5ZvgEHdlTKfSMrjXBvkjEplyOLlL44jtWg5Ut3fr6KkiTwbXZDkYc7UcOpBgt7LmRap2m4WrtyJeMKw/8ezgd7PyA1X+lIfTzhOLuid6GW1LzW7LVKu/Z/mkugEjTZeUD8GVjxDBRklb7vkUVgKlCKSlQvuS5Mo1bxSCMf1oxqx19jO9G/pT9WGhUOVhqWDmtFsKd96ee0ECcbLcuHt6ZHfS/0RjOvrDjGkMWH6fvdPk5EpmOrU/NWrzpsfaMzjzTyuemUXEmjwbZlS1R2dqU+XxEO1lqWDGuFZ89ufNpqMAZJTeZffxH73vtKbyuUHlcpi38AwPHFwYz95SxL94cD8OFj9fjgsfqoylmB0MVOx6IhLfnwsXpoVBJ/nYnnsW/2cOpf5ciLGtjWNivB2KW0S1g3VAKmvHNnS89MCYIgCEIhETBZUGphhsnVt+SNiylDCXBU9lqkMjRk/bdegb2Y1XUWOpWO7VHbeX376+QZ8zDLZpacVRpJDqo/CCt1BYKDnlOV8s6J5+HAt+U//nr5GbBnpvK4y7tl7otzP5EkiceDHmd93/U8W/tZANZfXs+Tvz/Jr6G/Mvv4bAD6BvelhlM5iiA86NxrwYu/K6XgY44qDXn1uSX30ecqARMo0yxvUXyjvq8jXzzTmKMfdmfvO91oXM35rg39Tlhr1Xw/sDnPtayGWYZdl5T+Rs+0qMbOiV0Y0zW43D2bKouVRs2cAc2o+9SjTGv1IkZJReaGDcR+8CGy2UzWlq0YIiKRHB15MyeQv87Eo1OrmPN8M0Z0Kr0SaFlIksSITkGse6U91VxsiErN45l5+/lh79VrgVDhtLw6+cr3SFRWFKbg6qBWY0pKxpiYeMevXxAEQfjvEgGTBV3LMJX8JLu865dK09m/M991/w4bjQ37Y/czestoNl/dTFh6GHZaO56r81zFTmzrCj0/VR7v/EJphFpR+7+FvFRwrw1Nnq/4ee4DTlZOfNTuI3585Edqu9QmoyCDyfsnczzxOFZqK0Y3GW3pId5/vBvCoF/ByhEi9sLqgWAsuPb8qVWQmwLO1aHuE2U6pYO1tuxr3yxEo1bxxdONead3XXo18GL9ax2Y8WwTPB0t/4GDSiXxvycb0GHoU3zechAmSUXm778T++EkUhYpwevm4I7sjc1VMnnDW/FEE99KuXZTf2c2ju1E7wbeGEwyU/88z9ifTypBU2GGySUtCk8bpcrS5fworIKV7WJaniAIgnArImCyEJPRTHq88mnnjQFT+dcvlaatT1sW9FiAvdae44nHeWfPOwA8V+c5HHV3sFaoyQCo8RAY82Djm1CR6SzZScWL8en6AajvfclmS2jq2ZTVj69mYsuJ2GiU9TEv1HsBb7syVHsTbuTXXOkNprVVGqOuHaZMFTWb4eBcZZ+2r/7nvr8kSeKVLjWZ/2LLKpcNkySJ17rV4onXBzK95QuYkMj89Vfyz55Fr9ayxKsVXo5WrBndjvY1K7ect5ONlu8HNWdqnwZoVBIbTsUSkpB1rfBDSii1XJXHyrQ8Zc1gngiYBEEQhFsQAZOFpCfkYjbL6Gw02LuUzCSZMgozTHcYMIFyg/5Drx9wtnIGQKvS8mK9F+/spJIEj30Naiu4vA3O/lL+c+yZAYYc8GkK9fvc2XjuMxqVhiENhrC+73qmd57O2GZjLT2k+1v1tvD8KuX7MWQj/DZK+TslDKydoNkgS4/wgTSgdXUGvTeC2a2VoAng7+qt8PD34ddXO1DP5+4UeJEkiRfbBdKsujMAIfFZSoVPgOQw6jjXUbanhmDTsKjwg6iUJwiCcDNdunRh/Pjxlh5GsaVLl9K4cWOsra3x9PRkzJiKV1guKxEwWUjxdDxfuxsWZ1/LMFVOAYL6bvVZ0msJTTya8Fqz1/CwvX1fp9tyD1aq2gFsfu/Wlcr+LT1SqbQH0H3yLdeW/Jd523nTO7A3GtV/K/thEUFdoP+PoNIqAfwvI5TtLYaBlYNFh/Yg61Hfi1FTXmVWh6Hs9m3MuYefYd3odvjdg+qDtb2Uf/eQ+CylSbKkAn0WtW2VBuHXF37IP3NGFH4QBEGoIFmWMZbSSuJumDlzJh988AHvvvsu586dY+vWrfTq1euuX1cETBZSVFLc1e/GSlzFAZNT5fV/CXYJZsWjKxjecHilnZMO45T1RzmJsPXjsh+383Mw6ZVpfUFdK288woOtdi945gflxtiYDyoNtBll6VE98FoGuvLRl6/h8sUM5o/ribPtvelrVcf7uoBJY6X07wLqXFcpT1u7Fmi1mNLTMcTE3pNxCYIg3E+GDh3Krl27mD17NpIkIUkSS5cuRZIkNm3aRIsWLbCysmLv3r0MHTqUvn37ljh+/PjxdLmuObjZbGbatGnUqFEDGxsbmjRpwrp168o0lrS0ND788EOWL1/OCy+8QM2aNWncuDFPPlnOqs8VIAImC0m9LsP0b8VT8u6g6MM9obGCx2cpj48tgciDtz8m8aKyGB/g4Qc3uyTcJfX7QN95oNYpZeodK6eggHBnqrvZ8lwr/3tawa9OUYYpobDkfOE6psC8LLQqLbnGXOL0SVjXUraLwg+CINxrsixj1pss8qesWfXZs2fTrl07Ro4cSVxcHHFxcfj7+wPw7rvv8vnnn3PhwgUaN25cpvNNmzaN5cuXM2/ePM6dO8eECRMYNGgQu3btuu2xW7ZswWw2ExMTQ7169ahWrRrPPfccUVFRZbr2nRBzgSwkJUbJMP27BxNcKyteGWuY7rrADtDsRTjxI2wYD6N2g+YW494+FWQz1H0cqrW8Z8MUHiBN+kO9J0BbNZrOCpZRNCUvOi2P7AIj9m61IPQfNClXCHYO5kLqBS6lXqJ+w4bknz9P/rmzOPa++9M6BEEQisgGM7Ef7bfItX2ntEfS3f5DLCcnJ3Q6Hba2tnh7KwWqLl5UGo5PmTKFHj16lPmaBQUFfPbZZ2zdupV27doBEBQUxN69e5k/fz6dO3e+5fFXrlzBbDbz2WefMXv2bJycnPjwww/p0aMHp0+fRqe7e/fNIsNkAfo8I1mp+QC4+packmfWm5DzlXmgVT7DVKTHFLB1h6QLcGDOzfeLPgoX/1SmTHX78N6NT3jw6GxF9vIB52Knw9NB+R16KSFLWXcJkBJKbZfaAISkhYhKeYIgCBXUsmX5PvgOCwsjNzeXHj16YG9vX/xn+fLlXL58+bbHm81mDAYD33zzDb169aJt27asWrWK0NBQduzYUdGXUSYiw2QBZrNM6ydqkJ2aj7VdyZ4vReuXJJ0KycoyDSjLzdYVek+DX0fCri+hQT9w/VcjSlmGrf9THjceAJ717vkwBUF4sNTxdiAxq4BL8Vk09yiqlBdK7aZKX65LaZewaaSso8w/ew5Zlm8owiMIgnC3SFoVvlPaW+zad8rOruQsKZVKdcNUP4PBUPw4O1tZjrJx40b8/PxK7GdldfskgY+PDwD169cv3ubh4YG7uzuRkZHlG3w5iYDJAqzttLR6rEapz10rKW51f/3H3ehZOPkTXNkJf74BL/5W8hP+KzsgfI+ytqTLuxYbpiAID446Xg7sCU3mYnwW1CsMmNIjqOOkfKATkhqCVcdgJJ0Oc1YWhshIdAEBFhyxIAgPEkmSyjQtztJ0Oh0mk+m2+3l4eHD2X9n6kydPotUqyYH69etjZWVFZGTkbafflaZDhw4AhISEUK1aNQBSU1NJTk4m4C7/7hZT8qoYcyU1rb3nJAkem6n0wrmyA85cV/FElmHbFOVxy+HgIm5IBEG4+4oq5V1KyAJ7L9A5gGymNsrv1+jsaHLRY1WvLiCm5QmCIJQmMDCQQ4cOER4eTnJyMmazudT9unXrxtGjR1m+fDmhoaFMnjy5RADl4ODAxIkTmTBhAsuWLePy5cscP36cOXPmsGzZstuOo3bt2vTp04dx48axf/9+zp49y5AhQ6hbty5du97dqssiYKpiTJn3SYW80rjVhM5vKY//fg9yU5XHF9ZD7AnQ2kGniZYbnyAID5QSAZMkFa9jcslKwNPGE4DQtFBsGogGtoIgCDczceJE1Go19evXx8PD46bT33r16sWkSZN4++23adWqFVlZWQwePLjEPlOnTmXSpElMmzaNevXq0bt3bzZu3EiNGqXPvPq35cuX06ZNGx577DE6d+6MVqtl8+bNxVmsu0WSH6BufZmZmTg5OZGRkYGj493pMn+n0tdfJnt/LA5dquHUu2zfPFWKUQ/zO0HSRWg+GB77Gua2hZRQeOht6PaBpUcoCMIDIk9vov7kzcgyHP2wO+5/vwZn1sDDk3mlIJS9MXv5sM2H9DqvI+7997Ft2ZKAFT9aetiCIPwH5efnc/XqVWrUqIG1tbWlh/NAqYz3XmSYqpjr1zDdlzS6a72Zji+HjROUYMnGBdq/ZtGhCYLwYLHRqQlwtQXgUnxWcS8mUsKKK+VdSrtUXCkv//x55DLM0xcEQRAeLCJgqmJM9+sapusFtIPmQ5THx5crf3d6E6ydLDcmQRAeSLWvb2DrVlhaPDmUOi51lO1pIVgFBSHZ2GDOzUUfHm6hkQqCIDzYRo8eXaLc+PV/Ro8ebdGxiSp5Vcx9vYbpej0+hpC/ICcJHHyh1QhLj0gQhAdQHW8H/jmfQEh8FtQsyjCFUsdVCZhC00KR1Sqs69Uj7/hx8s+exapmTQuOWBAE4cE0ZcoUJk4sfa27pZfSiICpCpHNMqas/0CGCZQpeH2+U0qMP/IFaG0sPSJBEB5ARYUfQhKywLWZsjEvjQC1PTqVjlxjLjFZMdg0akje8ePknT2HU58+FhyxIAjCg8nT0xNPT09LD6NUYkpeFWLONoAZkEBlf58HTAC1e8Eb56De45YeiSAID6g6hVPyLsVnYdbYgJM/AJq0q9R0VjJJIWkhWDcsqpQnSosLgiAIJYmAqQopLvjgoENS30dNawVBEKqoQHc7dGoVOXoTMel5JdYxlSj8UFRa/MIFZKPRUsMVBEEQqiARMFUhReuXVPf7+iVBEIQqQqtWEeRhBxT2Y3K/cR1TSGoIusAAVHZ2yPn5FFy+YqnhCoIgCFWQCJiqkP9EhTxBEIQqpmgd08X46yvllSwtLqlUWDcoLC9+9oxFxikIgiBUTSJgqkJMGSJgEgRBqGxFAdOl60uLp1wrLR6dHU22Prt4HVOeWMckCIIgXEcETFXIf6akuCAIQhVSVPgh5PrmtalXcdba42mrVGQKTQ/FpqiB7dlzFhmnIAiCUDWJgKkKEVPyBEEQKl9R89rLSdkY7H1BYwNmA6RHXJuWl3qpOMNUcPEisl5vsfEKgiBUJV26dGH8+PGWHgZLly5FkqRS/yQmJt7Va4uAqQoprpInMkyCIAiVppqLDXY6NQaTTHhKHrgVNqZNCSuelheSFoLW3x+VkxOywUB+aKgFRywIgnD/kGUZ4z2oLtq/f3/i4uJK/OnVqxedO3e+6/2bRMBUhYg1TIIgCJVPkiRql1r4oWRpcUmSsCkq/HBGrGMSBEEYOnQou3btYvbs2cXZnKJMz6ZNm2jRogVWVlbs3buXoUOH0rdv3xLHjx8/ni5duhR/bTabmTZtGjVq1MDGxoYmTZqwbt26Mo3FxsYGb2/v4j9qtZrt27fz0ksvVeIrLp3mrl9BKBNzvhFZbwJEhkkQBKGy1fV24ERk+o2lxes/AigBk1k2Y92gATn795N/TqxjEgTh7pJlGYPBYJFra7VaJOn2PT9nz57NpUuXaNiwIVOmTAHgXOHvx3fffZcZM2YQFBSEi4tLma47bdo0VqxYwbx586hVqxa7d+9m0KBBeHh40Llz53K9huXLl2Nra8szzzxTruMqQgRMVUTR+iXJWo1Kp7bwaARBEP5bal9f+KFJYcCUHEaAYwA6lY48Yx7RWdE4F2WYRMAkCMJdZjAY+Oyzzyxy7ffffx+d7vYzmpycnNDpdNja2uLt7Q3AxYsXAZgyZQo9evQo8zULCgr47LPP2Lp1K+3atQMgKCiIvXv3Mn/+/HIHTD/88AMvvPACNjY25TquIkTAVEUUr19yFNklQRCEylZcKS8hC9yvlRbXqDTUdK7JhdQLXEq7xENFlfJCQzHr9ajKcEMhCILwIGrZsmW59g8LCyM3N/eGIEuv19OsWbNynevAgQNcuHCBH3/8sVzHVZQImKqI4gp5TuI/Z0EQhMpW1IspMjWXXMcm2AJkJ0B+JnVc63Ah9QIhaSE83ORhVE5OmDMyKLh0rdS4IAhCZdNqtbz//vsWu/adsrOzK/G1SqVCluUS266fcpidnQ3Axo0b8fPzK7GflVX5EgaLFi2iadOmtGjRolzHVZQImKqI4h5MIsMkCIJQ6dzsrXC315GcrSc0XUUTey8lYEoJLVFaXCn8UJ+c/QfIP39OBEyCINw1kiSVaVqcpel0Okwm02338/Dw4Oy/Gn+fPHmyODirX78+VlZWREZGlnv63fWys7NZs2YN06ZNq/A5yktUyasiiivkiQyTIAjCXVH7+ml5btfWMV1fWhzAungd0/l7P0hBEIQqJjAwkEOHDhEeHk5ycjJms7nU/bp168bRo0dZvnw5oaGhTJ48uUQA5eDgwMSJE5kwYQLLli3j8uXLHD9+nDlz5rBs2bIyj2f16tUYjUYGDRp0x6+trETAVEWINUyCIAh3V9G0vEvxJdcxFWWYYrJjyNZnY12/PiAKPwiCIABMnDgRtVpN/fr18fDwIDIystT9evXqxaRJk3j77bdp1aoVWVlZDB48uMQ+U6dOZdKkSUybNo169erRu3dvNm7cSI0aNco8nh9++IGnnnoKZ2fnO3lZ5SKm5FURxWuYRA8mQRCEu6JE4Yd6RRmmUJytnfG09SQxN5HQ9FAaFGaYCkJCkPV6pPtgyowgCMLdUrt2bQ4cOFBi29ChQ0vd9+OPP+bjjz++6bkkSWLcuHGMGzeuwuPZv39/hY+tKJFhqiKK1zCJHkyCIAh3RVGGKST++l5MYcpzRdPyUkPQ+vujcnRENhgoCAuzyFgFQRCEqkMETFWAbDJjzlaqiIgMkyAIwt1RqzDDlJhVQIZtgLIx5TKYzdcKP6QphR+KpuXliWl5giAI98To0aOxt7cv9c/o0aMtOjYxJa8KMGUZQAbUEiq7Oy/zKAiCINzI3kpDNRcbotPyuJDvQluVFox5kBlNHdd/F36oT+7Bg8o6pmefteSwBUEQHghTpkxh4sSJpT7n6Oh4j0dT0n2XYfruu+8IDAzE2tqaNm3acPjwYUsP6Y4VT8dz0CGpJAuPRhAE4b+rblHhh6Q8cA1SNiaHFk/JC00LxSybsRGV8gRBEO4pT09PgoODS/3j6elp0bHdVwHT6tWreeONN5g8eTLHjx+nSZMm9OrVi8TEREsP7Y4UV8gT65cEQRDuqqLS4hf/tY6pumN1dCodecY8orOii0uLF4SEIF/XeFEQBOFO/Luxq3D3VcZ7fl8FTDNnzmTkyJEMGzaM+vXrM2/ePGxtbVm8eLGlh3ZHinswifVLgiAId1WJ0uJuhaXFk0PRqDQEuyhfh6SFoK1eHZWDA7JeT8Hly5YariAI/xFFzVtzc3MtPJIHT9F7XvRvUBH3zRomvV7PsWPHeO+994q3qVQqunfvfkOpwyIFBQUUFBQUf52ZmXnXx1kRoqS4IAjCvVFcKS8hC7lNMBJASigAtV1qcz7lPJfSLtEjoAfW9euTe+gQ+efOYV23ruUGLQjCfU+tVuPs7Fw8K8rW1hZJEssw7iZZlsnNzSUxMRFnZ2fUanWFz3XfBEzJycmYTCa8vLxKbPfy8uLixYulHjNt2rRb1oKvKkRJcUEQhHsjyN0ejUoiK99IinV13AGSbywtDpQImHj6aQuNWBCE/wpvb2+A+34pyf3G2dm5+L2vqPsmYKqI9957jzfeeKP468zMTPz9/S04otKJKXmCIAj3hk6jIsjDjksJ2Vw0etERIDMa9DklSosDxeuYRGlxQRAqgyRJ+Pj44OnpiUGsjbwntFrtHWWWitw3AZO7uztqtZqEhIQS2xMSEm4aNVpZWWFlVfWzNmaRYRIEQbhnans5cCkhm3NpWjrauEBeGqRcLi4tHpMdQ5Y+C+sGSi+mgoshyEYjkua++S9TEIQqTK1WV8pNvHDv3DdFH3Q6HS1atGDbtm3F28xmM9u2baNdu3YWHNmdkWUZo8gwCYIg3DN1CivlhcRngVtRpbxQnKyc8LJVpn2HpoWiCwhAZWeHXFAgCj8IgiA8wO6bgAngjTfeYOHChSxbtowLFy7wyiuvkJOTw7Bhwyw9tAqT84xgNAOgdhQZJkEQhLvt+sIPxaXFC9cxFU3LC0kLQVKpsK6vZJnyz4ppeYIgCA+q+ypg6t+/PzNmzOCjjz6iadOmnDx5ks2bN99QCOJ+UlQhT2WrQdLeV/8cgiAI96WigCk0MRuTa01lY2GlvAbuyrql4wnHgWvrmPLPiwa2giAID6r77g79tddeIyIigoKCAg4dOkSbNm0sPaQ7Uty0VmSXBEEQ7gl/F1tstGr0RjNJVtWVjclKwNTOR5nifSDuACaz6VrAJAo/CIIgPLDuu4Dpv6a4B5OTWL8kCIJwL6hUErW97AEINRYWDUoJA1mmkUcj7LR2ZBRkcCH1wrWA6eJFZKPRUkMWBEEQLEgETBYmMkyCIAj3Xu3Cwg8nsl1AUoE+G7IT0Kq0tPFWZi7sj92PLjAAla0tcn4+BVeuWHLIgiAIgoWIgMnCRIZJEATh3itax3QhqQCcA5SNhdPy2vu2B5SASVKpsKpfD4D8c2IdkyAIwoNIBEwWJjJMgiAI916plfIKCz+091MCplOJp8jWZ2Mj1jEJgiA80ETAZGHFVfJEhkkQBOGeKerFFJ6cg9GlsFJeYWlxfwd//B38McpGjsQfEYUfBEEQHnAiYLIwU6bIMAmCINxrHg5WuNhqMcuQoPNXNhZmmODatLx9sftKFn4wme75WAVBEATLEgGTBclGM+YcpeqS2lFkmARBEO4VSZKKCz+EmXyUjck3BkwHYg+gCwxEsrVFzstDf/XqPR+rIAiCYFkiYLKgoul4aCRUthrLDkYQBOEBU7SO6VSeh7IhPQKMSta/tXdrNJKGyKxIonNjsa5XVPhBTMsTBEF40IiAyYKKp+M5WSFJkoVHIwiC8GApCpiOp+pA5wCyGVKVDJK9zp7GHo0BJctk3aA+AHkiYBIEQXjgiIDJgq5VyBPT8QRBEO61osIPIQnZ4B6sbCxlHdP+2P1Y11cCJlFaXBAE4cEjAiYLMmUU9mASBR8EQRDuudqFGaa4jHz0zkWV8q4FTB38OgBwKO4Qmnp1AMi/cEEUfhAEQXjAiIDJgkTTWkEQBMtxtNbi62QNQKJVUaW8sOLn67nWw8nKiWxDNqGOuUg2Nsi5uejDwy0wWkEQBMFSRMBkQaKkuCAIgmUVZZmumH2VDddlmNQqNe182gGwL/4A1nXrAqLwgyAIwoNGBEwWdG1KnsgwCYIgWEJR4Ycz+YWV8q5bwwQly4uLBraCIAgPJhEwWdD1VfIEQRCEe6+o8MPBdGdlQ14a5KQUP9/OV8kwnU05i1ynBiAKPwiCIDxoRMBkIbJZFmuYBEEQLKyoee2ZJAOyYzVl43VZJm87b2o61cQsm7ngofzOzr9wAdlsvudjFQRBECxDBEwWYs41gEkGCdQOImASBEGwhGBPe1QSpOcaMDgHKRuTS07LK8oy7VFfQbK2xpyTgz484l4PVRAEQbAQETBZSNH6JZWdFkkt/hkEQRAswVqrJtDdDoAkq+rKxn+tYyoqL743QRR+EARBeBCJO3ULEeuXBEEQqoa6hYUfIqSiSnlhJZ5v4dUCnUpHfE48BcHKtD0RMAmCIDw4RMBkIcXrl0SFPEEQBIsqWsd0tsBT2fCvDJONxobmXs0BuOKlbBMBkyAIwoNDBEwWYsoo6sEkAiZBEARLKqqUdyjTTdmQehVMxhL7FJUX3++YAED++fOi8IMgCMIDQgRMFnKtQp6YkicIgmBJRb2Y9idbIWtswGyA9JJFHYoCpi2qC0hWVkrhhwhR+EEQBOFBIAImC7mWYRIBkyAIgiUFuNmh06jIM4DBSem19O9KebVdauNm7UaOOR9DkB+gZJkEQRCE/z4RMFmI6MEkCIJQNahVErU87QFItSmslJd8qcQ+kiQVZ5li/KwB0cBWEAThQSECJgspKisu1jAJgiBYXtG0vCuaYGVD5IEb9mnvpwRMx10yAFH4QRAE4UEhAiYLMOtNyPnKgmKxhkkQBMHyigo/7DY3UjZc3Q1GfYl92vkoDWz3O8QDhYUfZPneDVIQBEGwCBEwWYJZxr6DL7ZNPZCs1JYejSAIwgOvKMO0Pc0L7DxAnw1Rh0rs42bjRj3XekS7g1mrwZyVhSEy0hLDFQRBEO4hETBZgMpag/MTNXEdUBdJkiw9HEEQhAdeUcB0OSUPU42uysbL227Yr51vO0xqiVQ/Zf/7cVqeLMtsi9jG/pj9lh6KIAjCfUEETIIgCMIDz9vRGgdrDSazTLxnR2Vj2NYb9uvg2wGA8+75AOTdZwGTwWzgk4OfMH7neMZsH0NGQYalhyQIglDliYBJEARBeOBJkkTdwizTaatmysb4M5CVUGK/pp5NsdHYcN5DaQ1xP1XKyyjI4JWtr7Dm0hoAjGYjB+MOWnhUgiAIVZ8ImARBEAQBqF1Y+OF0mg58migbL28vsY9OraOlV0uueCvTqe+Xwg/hGeEM+msQh+IOYaOxoYVXCwD2xeyz8MgEQRCqPhEwCYIgCALX1jFdis+C4O7KxlLWMXXw60CUBxjVEubMTAzR0fdymOV2KO4QA/8aSHhmOD52Pvz4yI+83PhlQAmY7oeATxAEwZJEwCQIgiAIXCstHpKQBTUfVjZe3g5mc4n9igo/RHgqX1flwg9rQtYwestoMvWZNPZozMrHVlLHtQ4tvFpgrbYmMS+R0PRQSw9TEAShShMBkyAIgiBwLcMUnZZHtmdz0DlAbgrEnSyxXw3HGvjY+XDFW/m6KgZMRrORLw5/wdSDUzHKRh4LeozFvRbjbuMOgJXaipbeLQExLU8QBOF2RMAkCIIgCICzrQ4vR6WZ+KXkfAjqrDzxr2l5kiTR3rc9V7wK1zFVsYApS5/Fa9tfY8WFFQC83ux1pnWchpW6ZKP0jn5KNcB9sSJgEgRBuBURMAmCIAhCoaLCDyHxWVCzm7Ix7MZ1TO192xcXfsg7V3UKP0RlRfHiXy+yL2Yf1mprZnaZycuNXy61519RifTjCcfJNeTe66EKgiDcN0TAJAiCIAiFGldzAuD3EzEQXLiOKeow5JfsV9TGpw0xniqMKjBnZGCIibnXQ73B0fijvLDxBS5nXMbTxpOljyylR0CPm+4f4BiAn70fBrOBI/FH7uFIBUEQ7i8iYBIEQRCEQgPbBKBVSxy6msrBNAdwCwbZBFd3l9jPycqJul6NiCwq/HDWstPyfgv9jZFbRpJekE4DtwasenwVDdwa3PIYSZKKp+Xtjdl7L4YpCIJwXxIBkyAIgiAU8nW24bmW/gDM2R56rbx42NYb9m3v175EPyZLMJlNzDw6k4/2f4TRbKRnQE+W9F6Cp61nmY5v79seEOuYBEEQbkUETIIgCIJwnVe61ESjktgXlkKoQ2tlY9h2+Nc6pZLrmM7e62ESnxPP+J3jWXJuCQCjm4xmeufp2GhsynyONj5t0EgaorKiiMyMvFtDFQRBuK+JgEkQBEEQrlPNxZZnWlQD4IuL7qC2goxISC7Zr6iReyPiq9kCkHP2zD0p/GCWzeyP2c+47ePo9UsvdkbtRKfS8UWnLxjTdAwqqXz/rdtp7Wjm1QwQWSZBEISbEQGTIAiCIPzLq12CUasktoZlk+nVStn4r/LiGpUGn8btMKpAysjCGBt718aTUZDBsnPLeOK3Jxi1dRTbo7Zjls208m7FskeW8WjQoxU+d1G1PNGPSRAEoXQiYBIEQRCEf6nuZstTzfwA2JRbX9lYSnnxtgEdifJQHufdhX5M55LPMWnfJB5e+zAzjs4gMisSe609L9R9gd/7/M7iXotp6N7wjq5RVPjhcPxh9CZ9ZQxbEIT/OENCArlHjyKbzZYeyj2hsfQABEEQBKEqGtM1mF+OR/NDQk36WwHhe8GQD1rr4n3a+7bnd2+JGgkymadP4tiz5x1fN9+Yz6arm1gTsoazKdfWRtVxqcOAugN4tMaj2Gpt7/g6RWq71Mbdxp3kvGSOJx6nrU/bSju3IAj/PcakJK4+8wympGR0NWrgOmwoTn36oLKyuv3B9ykRMAmCIAhCKQLd7ejb1I9fT8ikqt1xNSZD5P5rDW2Bag7VSA9wg1PJJJ08SLU7uF5EZgRrQtbwe9jvZOozAdCqtPQK7EX/Ov1p4tGk1Aa0d0qSJNr7tmf95fXsi9knAiZBEG5KNhqJeXMipqRkAPRXrxL/0WSSvpmD66CBuAwYgNrZ2bKDvAvElDxBEARBuIkx3YKRJIktBYXT3kqZlufatCUAUsiVchd+SMxNZP3l9YzaMorHf3uc5eeXk6nPxM/ejwktJrD12a1M6zSNpp5N70qwVKRoWp4o/PDgOJl4klnHZpGcl2zpoQj3kaQ535J7+DAqW1sC167F89130Pj4YEpOJmnWbEK7diP+k0/RR0dbeqiVSpLvRVmfKiIzMxMnJycyMjJwdHS09HAEQRCE+8DYVScwnfmV73TfgEc9GHOwxPM7wv7B/clxaMwQvGM7Wh+fm54rx5DD0fijHIg7wMHYg1zOuFz8nIREp2qd6F+nPx18O6BWqe/aa/q39Px0Hlr9EDIyW5/Ziped1z27tnBvmcwmFpxZwLxT8zDLZoKcgljcazFuNm6WHppQxWXt3En06FcA8Jv5FY6PKsVmZIOBzM1/k7J4MQUXLig7q1Q49u6F67Dh2DS6s3WWVYEImARBEAThFkITsnh21l8c041CLckw4Tw4+RU/n2PIYffDrQhMlLH+cjI1nhxQ/JzBbOBc8jkOxB7gYNxBTiedxigbi5+XkKjvVp8Ofh3oF9yPag53MqnvzgzcOJDTyaeZ0n4K/Wr1s9g4hLsnMTeR9/a8x+H4wwDYaGzIM+ZRy6UWi3suxtna2bIDFKosfXQMV59+GnNGBi4DB+I96cMb9pFlmdwDB0j5YTE5+65lq21bt8Z1+DDsH3oISXV/Tm4Ta5gEQRAE4RZqeTnQoVEtTl2sSXMpTCkv3nxw8fN2Wjsya7hDYhIRR3Ygd25dHCAdiT9CjiGnxPmq2VejnW872vq0pY1PG5ysnO71SypVB78OnE4+zd6YvSJg+g/aG7OXD/Z+QGp+KjYaGya1nURjj8YM3TyU0LRQXt7yMot6LcJRJz5QFkoy6/XETJiAOSMD68aN8Xzn7VL3kyQJu/btsWvfnvyQEFIXLyZj41/kHj5M7uHD6IJr4jZsOI5PPI5Kp7vHr+LOiAyTIAiCINzGxfhMNn87jvGaX8mq+TgOL/5U4vkN08cQ/MN2TtRUMe25kp+gOlk50ca7DW1929LWpy3+Dv73cuhldjLxJC9uehEHnQO7++9GoxKfqf4XGEwG5pyYw5JzSwCo61qX6Q9NJ9ApEIDL6ZcZ/vdwUvNTaezemAU9F2CntbPgiIWqJn7KVNJWrkTt5ESNX39B6+d3+4MKGeLiSP1xBemrV2POUT480nh44DZyBK6DB9/m6Krj/syLCYIgCMI9VNfbkYIApTqe+upOMBlLPF+rvTKXv9FVMw1j1LTxacO45uP4+fGf2fXcLr7q8hXP1n62ygZLAA3dG+KocyRLn8XZ5LO3P0Co8qKzohmyeUhxsPR83edZ8eiK4mAJoKZzTRb0WICTlROnk0/z6tZXyTXkWmjEQlWTsXEjaStXAuA7/ctyBUsAWh8fvN5+i+CdO/B8ayIaLy+MSUnowyPuxnDvmvsmYPr0009p3749tra2OP8HyxUKgiAIVdvjjzxGumyHrTmb6HN7SzxXp/2jGLu0RmOG//1py/cNPmZEoxE0cGtwT4s33AmNSkM733aAqJb3X/BP+D88t+E5ziSfwUHnwKwus3i/zftYqW/slVPHtQ4LeizAQevA8cTjjN0+lnxjvgVGXcU8OJOwSlVw+TJxkz4CwG30KOwfeqjC51I7OOD20ksEb/kHn8+n4Tp8WGUN8564bwImvV7Ps88+yyuvvGLpoQiCIAgPoAbVXAmzV0qIn939W4nnJEmiwczvsapXD3NqGlFjXiuefnI/6eDbAYB9MSJgul/lG/OZemAqb+56kyxDFk09mrLuiXU8HPDwLY+r71afeT3mYaux5VD8IcbvHI/epL9Ho65iCrJhzRCYWQ8Szlt6NBZhzs0letw45NxcbNu0weP11yvlvJJOh3PfvuiqWa7ATUXcNwHTxx9/zIQJE2jUqJGlhyIIgiA8oLybPw6AV+JeriaXDIhUtrb4f/ctajc3Ci5eJPbd95DNZksMs8I6+CkB09nks6Tlp1l4NPcXWZZJ+vY7IgYPoeDKVYuM4Ur6FV746wXWXFqDhMTIRiNZ3Hsxvva+ZTq+sUdjvu/+PTYaG/bF7OPNnW9iMBnu8qirmMw4WPIInP8dsuJg5zRLj+iek2WZuP/9D33YZTQeHvjNmI6kvj8y5XfLfRMwVURBQQGZmZkl/giCIAhCRVVr+RgAjaXLLP7n2A3Pa319qTZnDpJWS9aWLSR/+929HuId8bT1pLZLbWRkDsQeKPGcLMvoIyPJOXy43A16/+tks5n4yf8j+VulqWfksGH3tHGnLMv8FvobAzYOIDQtFDdrN+b1mMfY5mPRqrTlOldzr+bM6TYHK7UVO6N38s6edzCajbc/8L8g4Tws6g7xp8HGRdl2YQMkXbLsuO6x9DVryVy/AdRq/GZ+hcbDw9JDsrj/dAmcadOm8fHHH1t6GIIgCMJ/hZMfeS51sEkLIeP8FiJSWhDgVrKimG3zZnhPmULce++RPHcuVrVr4di7t4UGXH4dfDtwKe0S+2L30dOhFTkHD5Fz8AC5Bw5iiI0FwPWl4Xi99ZaFR1o1yEYjcR98QMYf60GS0Hh7Y4yLI3LIUAJ+WoHW2/u257iQcoEP9n2AyWzCQeeAvc4eB61Dicf2OnscdA43PNaqtcw4OoONVzYC0M6nHZ91+gx3G/cKv6Y2Pm2Y3XU2r29/nS0RW/hg7wd81vGz+2Y9XpF8Yz5TD05Fb9LzcPWHeajaQ9hqbUvf+cpOWP0iFGSCWzAMXAt/fwAhf8G+2dD3/vrwo6Lyzp0j4ZNPAPCcMB7bVq0sPKKqwaJlxd99912++OKLW+5z4cIF6tatW/z10qVLGT9+POnp6bc9f0FBAQUFBcVfZ2Zm4u/vL8qKC4IgCBX39wdw4FvWGDtzrOknfPFM41J3S/jiS1KXLEGytibgpxXYNGhwjwdafqasLE5uXsGu3+fQJFKFb5Kp5A4aDRiVbIP31Cm4PPusBUZZdcgGAzFvvU3W5s2gVuP7xRfYtm5FxIsvYoiIRBcYSMCPy2/5CX22PptnNzxLdPadZaTUkprXmr3G8IbDUUmVM4FoZ9ROJuyYgFE20qdmH6Z0mFJp574Xph6YyppLa4q/1ql0tPdrT8+AnnT273yt59TJlbD+dTAboXo7GLASbF0h6gj80B1UGhh7EpyrbpXLymDKyODq089giI7Gvls3qn07575tNFvZLBowJSUlkZKScst9goKC0F3X3Ko8AdO/iT5MgiAIwh27vB1+7Ee87EJHw3fsmNgVf9cbP7WWTSaiXnmFnN170Hh7U2Ptmio3tcWcn0/eiRPkHDhIzsGD5J89C9evu5IkrOrVxa5tO+zatcW2eXNSFi8h+bvvQKOh+qKF2LVta7kXYEHmggJixk8ge8cO0Gqp9vVMHLp3B8AQG0v4oEEYY+OwqlWL6suXoXFxueEcsizzzu532BS+CV87Xya3n0yeIY8sQxbZ+myy9FnFj7MN2WTqM4sfZ+mzyNJnYTAb8Hfw59OOn9LMs1mlv84tEVt4a9dbmGQTz9Z+lkltJyFJUqVfp7L9Hf43E3dNBODpWk9zJP4IkVmRxc9rVBraeLehR76Brid+xdVshoZPQ5+5oLW+dqKlj0P4HmgzGh659Yf89zNZlol+7XWyt21D6+dHjV9/Qe1UNZpqVwX3XeNaETAJgiAIFmXIhy8CwZhHr4LPad6qI9OeKr0gkSkri/D+A9BfuYJNkyZUX74MldWNZZ3vJVmWSV+7lsy/NpF3/DiyvmQlNF1AAMf9jfztEUfHJ0YztP3rNxwfO/EtMjduROXoSODPP2MVVOOujDWjIIN39rxDdFY0Lzd+mceDHq8SGQ5zXh7RY14jZ/9+JCsrqn07B/tOnUrso4+MJGLgIIxJSVjXr0/1pUtQ/+ve47fQ3/ho/0eoJTXLHllGE48m5R5LgakAnUp3V4OYjVc28t6e95CRGVhvIO+0eqdKB03RWdE8u+FZsg3ZvNTwJca3GI8sy1xKu8TWyK1sjdhKWHpY8f4qWaallSfdm47g4YDueNp6XjtZ2DZY8RRobGDCWbCr+FTHqizlh8UkTp+OpNUSsGoVNg1vkxHPSoCsWPCt/CC9KrL8b50yioyM5OTJk0RGRmIymTh58iQnT54kOzvb0kMTBEEQHiRaawjsCMBDqtOsOxZFTHpeqbuqHRzwn/sdKicn8k6dIv6jyRYtmGAuKCD27XeI/2gyuQcPIuv1aDw9cerzJD6ffUbwju3U/HszpokjOFRXxa7M4zecQ5IkfD77FJumTTFnZhI1ejTGtMqvqBedFc2gvwaxL2YfEZkRfLD3A/r/2Z+DcQcr/VrlYcrOIWrky0qwZGuL//z5NwRLALrq1ZUgydWV/PPniRr5Mqbsa5UVL6df5rNDnwHwerPXKxQsAVipre568PJY0GNM6TAFgJ8u/MTXx76usoU/DGYDb+9+m2xDNk08mjCm2RhA+b6t41qHMU3H8FvPJayXfRmbmk69Aj1mSeKwPonPDk+j+9ruvPjXiyw7t4zwjHAMNTqBTxMw5sGheRZ+dXdH7tGjJM6cCYDXB+/fPlhKDoO5bWBBF7i48e4PsAq4bzJMQ4cOZdmyZTds37FjB126dCnTOUSGSRAEQagUB+fB5nc4o2vKE5lv82LbAKb2bXjT3XP27ydy5MtgMuH51lu4vTT8Hg5WYUxOJvq118k7eRLUajxeG4NDr17oatS44YY7KjOKR397FI2kYc+APdjr7G88X0oK4c/1xxATg03LFlRfvBjVdVPo78S55HOM2TaGlPwUvO286VOzDysvrCTLkAVAR7+OvNHiDWq51KqU65WVKTOTyJEjyT91GpW9Pf4LFmDb/NafsOdfvEjEkKGYMzKwbd0a//nz0Gslnt/4PGHpYbTzace8HvOqRObsdtaErGHqwakAjGs+jhGNRlh4RDeaeXQmS84twUHnwLon1t1YUj09En56FpIugs4enl1GtHddtkZsZUvkFk4nnS6xu4SEq8YOr5wUvGQVng374+3oj6etJ162XsV/37SYRBVnTE7mar+nMCYl4fjEE/h++cWtA/DsJGVdV1q48rW9F7x6UFnz9R923wRMlUEETIIgCEKlSA6Fb1tiVulokDsPk9qW3W93xdvJ+qaHpP64goRPPwVJwn/e99h37nzPhpsfcomoV0ZjjI1D5ehItdmzsGvX7pbHPPbrY0RmRTKr6ywerl5609OC0FDCn38Bc3Y2Tn364PP5tDvOduyK2sVbu98iz5hHHZc6zO0+F09bT9Ly05h/ej6rL67GKBtRSSr6BfdjTNMxeNje/bVhxrQ0Il96iYLzF1A7OeH/ww+3/yS+UN6ZM0QOHYY5Jwe7jh35cbAfP1/9BTdrN9Y9ue6OKtrdayvOr+CLI19gpbbi76f/xs3GzdJDKrYneg+vbnsVgFldZt3YrDf2BKzsD9kJ4OALA9eAd8nptPE58WyL3MbWiK2cTDpZ5pLqDloHvOyuBVD13OrR3LM5tVxqVclgWJZlCi5cIGHa5+QeOYIuuCY1Vq9GZWd384P0ubDsCYg5Cs4BoNZBSig0eR76/Tezb0VEwCQIgiAI5SXLMLsxpEcyzWUK8+OCGdo+kP89efMbaFmWif9oMulr16Kytydw9c9Y1ax514eatWMHsW9OxJybiy4ggGrff1+mNUefHfqMVRdX8Vzt55jUbtJN98ves5eo0aPBZMJj/HjcR4+q8FjXhKzh00OfYpbNtPdtz1edv7ohuxWRGcHs47PZErEFABuNDUMbDGVog6F37VN+Q2IikcOHow+7jNrNjeqLF2Ndp3a5zpF77BiRI0Yi5+VxpJbEzH4q5vZeQHvf9ndlzHeLLMs8v/F5zqWc4+XGL/N6s9dvf9A9kJibyDPrnyGtII0BdQbwQdsPSu4QshnWDQNDLng1hBfWgJPfLc9pls2k5aeRkJtAwpnVJB5bRIKNIwkN+5CQn0xibiIJOQnkGnNveg5HnSPNPJvRwqsFzb2aU9+tfrl7Y1UWc14eOQcOkr1zJ9k7d2JMTARAsrWlxto1t/59ZDbBmsFw8U+lR9VLWyAvDX7oCcjK+1m71715IRYgAiZBEARBqIgN4+HYEmLrDKb9qd5YaVTsebsrno43zzLJej2Rw18i9+hRtNWrU2PNatTOzndleLIsk7p0GYlffgmyjG2bNlSbPavM19sdvZsx28bgZ+/Hpqc23TJzlLZqFfEfK2tc/GZ9Xe6+U2bZzDfHv+GHsz8A0De4Lx+1++iWN5YnE08y4+gMTiWdAsDdxp0xTcfQN7gvGlXltZk0xMUROXQY+ogINF5eVF+ypMJFLiK2rSdj7DtoTRDXtiZdf/gDSX1/9TYCpXLeGzvfwFHnyD/P/IOd9hZZiXvAZDbx8paXORx/mLqudVnx6Aqs1NcVVzm8EDa9DbIZanaDZ5eBdTnvA416+KYpZMbA419Dy2vTarP12UpQlZtAQk4CsTmxnEo8xcmkk+QZS65vtFZb08SjCc29mtPCqwWNPRpjo7G5g1d/a4bYWLJ37SJr505yDx5Cvq7djmRjg12H9rgNH45t8+a3PtGmd+HQ96C2gsF/QEBhhrqwzQIOvjDmIFj/NyvriYBJEARBECriwgZYPQjZLZin1d9wPDKdp5r78WnfRtjobn4TbExNJfzZ5zDExGDbri3VFyxA0lbuJ86yXk/81Kmkr10HgPOzz+L90aRyXSfXkEvHnztiMBtY33c9NZxuHSTEf/YZact/RLKyImD5MmyalK2Igd6kZ9K+Sfx19S8AXm36KqMbjy7T1D5ZltkSsYVZx2cRlRUFQE2nmrzR8g06+XW64+mB+qgoIocMxRAbi9bPj+pLl6Dzr1gvHoPZwNDNQ1EfOMnbv8qoTTJO/frh8+kn912vG5PZRN8/+hKeGc7ElhMZ0mCIRccz79Q8vjv5HTYaG9Y8voZAp0DlCVmGLR/B/m+Ur5sPhsdmgrqCP28Hv4fN74JLILx2DNS3DswNZgMhqSEcSzjGsYRjnEg8QXpBeol9NJKG+m71izNQrbxb3VEAKptM5J85Q9bOnWTv3EXBxYslntf6+mLfpQv2Xbtg27p12ap2HpgLf7+nPH5msVJ+vYg+F+Z1gNQryvv75JwKj70qEwGTIAiCIFREfgZ8GQRmI4ee3EH/NXEAuNnpGN6xBoPaBuBkU/qNWX7IJSKefx5zbi4uAwfiPenDShuWMS2NmHHjyT18GFQqvN55G5fBgysUPIz8ZyQH4w7yTqt3GFR/0C33lU0mol8dQ/auXajd3amx+me0free8pSpz2T8jvEciT+CRtLwv/b/o09wn3KP02AysDpkNfNOzyOjIAOANt5teKPlG9R3q1/u8wEUXLlC5NBhGBMT0QUEUH3pErQ+PhU6F8DXx75m8dnFOOgcWGn7OnnvfQJmM87PD8D7o4+qdJnu0vxy6Rf+d+B/eNp4sunpTejUlVPwo7yOxh/lpX9ewiyb+azjZzxR84lrTxYWZwHg4Y+g4xtwJ++zPge+bgh5qfD0D9DomXIdbpbNXEm/wvHE4xxNOMrxhOMk5CaU2MfXzpefH/8ZF+sb+3bd9LwFBWTvUKbZZe/ejSk19dqTKhU2TZsqQVKXzljVqlW+77Xz65WpeMjQYwp0GHfjPuH7YOmjyuMXf1OyeP8xImASBEEQhIpa/AhE7ofHZrJO1YtZWy8RnaZMwXGw0vBiuwCGd6yBu/2Nn+JmbdtG9Guvgyzj/b//4TKg/x0Pp+DKFaJeeQVDRCQqOzt8v5qBQxkryZZm6dmlfHXsKzr4dWBe99sv6jZl5xAxcCAFISFY1a5NwMqfUNvfWGEPIC47jle2vsLljMvYae2Y2WXmHa/nydRnsuj0IlZcWIHBbACUpqUTWkzAyarsU4XyQy4ROXw4ppQUrGoFU33x4jtqOrw/Zj+jtipru2Z2mUmPgB5krF9P7DvvgizjOnQonu+8fV8FTXqTnt6/9CYpL4kp7afQr1a/u3Id2WwmZ99+snfvRuvri03jRljXr4/Kxoa0/DSe2fAMibmJPFnzST7t+Om1AyMPwtLHwGyEXp9BuzGVM6BdX8KOT5V1UKP33lEAJssysTmxHEs4xvGE4+yI2kFqfir96/Tnw7a3/xDFnJtL2uo1pCz+AVNScvF2lYMD9p06Yt+lC3adOpXaNLlMog4rRR6M+dDyJXjsq5u/3r/egsMLwMkfXj0AVg4Vu2YVJQImQRAEQaio3TNg+1So+zgM+AmjycyG07F8v/MylxKUPoFWGhUDWvkz8qEgqrmULEqQPG8+SbNmAaALrold6zbYtm6NbetWaFzLV6Y3e98+YsZPwJyVhdbXl2rzvse6dvkKE/xbaFooT61/Cmu1NXsG7MFac/P1WUUMcXFcfe45TEnJ2D3UCf+5c5E0JacuXUi5wJhtY0jKS8LTxpO53edSx7XOHY31ejHZMXxz/JviaX6u1q5MbDmRx4Mev21Qog8PJ7z/AEwZGVjVq0f1xT9U/IYTSM5L5un1T5d6I5y2Zg3xH00GwP3VV/AYO7bC17mX9JGRqBwcWBHzB18d+4pAx0B+7/M7alXlrccyZeeQ8cfvpK34Cf3VqyWfVKuxCg7muFsWe53iyQ324+vh67CzKby3y06E+Q9BVhw0eEqZRlZZwWhempJl0mdXeqGDI/FHGP73cFSSijWPr7npz4QpO5u0FT+RumwZpsIeaBpvbxx798a+SxdsWzS/82m+KZfhhx6QmwK1e0P/n249BbEgG75vD+kRSnD1+Mw7u34VIwImQRAEQaio2BNK80adA7xztXhthNkss/VCAt/tvMypqHQANCqJvs38GN25JsGeStZFlmXip0whfdXPN5zaqlYtbNu0wbZ1K2xbtbrlTXvqypUkfPoZmEzYNGtGtW/noHG783LPsizTfV13EnMTmd99Pu39ypYByjtzhogXByPn5+MyaBDeH16rWLYvZh9v7HyDXGMuwc7BfN/9e7ztvO94rKU5lnCMKQemcCXjCgBtfNrwYZsPr61x+RdTdjbh/Qegv3wZ64YNqf7DItROFV/EbpbNjNoyioNxB6nlUouVj668IehMXf4jCZ8pDWy9Jn2I68CBFb7evZC9axdRr7wKkoRV65YscDvD7poFTHl0Ft0Dut/x+fUREaStXEn6L79izlY+dFDZ2eHwSG9Maenknz6NMSnphuMka2us69XDpmEDrDP+wcZ0Bm2NYKSR28Gq9Cxnhf3zIeyfA/5t4aW/K/XUb+58k38i/qGlV0sW91pcIsA3paeTuvxHUleswJyZCYDW3x/3US/j9OSTSJXUB42cFKXXUuoV8GkKw/4CXRnWVV3ZCcsLp9QO+RNq3NjQ+X4lAiZBEARBqCizGWbUgtxkGPoXBHYo8bQsy+y/nMLcnWHsC0sBlA+6ezfw5tUuwTSqptyMG9PSyD16lNzDR8g9dIiCS5duuJRVnTrYtmmNXevW2LZsidrZGdloJGHa56T99BMAjk8+gc/UqWVbyF1Gk/dP5tfQX3mx/ou83ertMh+XuflvYsaPB64FAr+G/sqUA1MwySba+LTh6y5f46C7u1N3DCYDS88tZf7p+RSYCtCpdIxoPIKXGr5UYt2NbDYT/drrZG/fjsbTk8B1a9F6et7RtRedWcTs47Ox0djw82M/E+QcVOp+RZlGla0tNf/efEfT/+4mU0YGVx5/4oaAxSRBeC0HOgx6C4ce3cudkZNlmZz9+0n7cQXZu3YpxRoAXWAgLoMG4dS3L2r7azfs5y7sZtaK16gRa6R7TiBOV5KKg6vrqR0dsG7UGOdnnsbxkUcq8IpvIjNOaStg0sOwTRBQOaXhTWaZuOxY+q7vQ4GpgBmdZ9ArsBfGlBRSly4l7aeVmHOVEua6oCDcR4/C8dFHb8jg3hFDnhL0RB0Cp+owYis4eJX9+A3j4NhSpTDGK/vLFmjdB0TAJAiCIAh34peRcGaNsqC8++Sb7nYiMo25Oy+z5fy1Rd6darkzpmswbWq4lvgk2ZiaSu6Ro+QeOkTukcMUhIaVPJkkYVW3LpJOS/6p0wB4jB+P26iXK30dzD/h//DmrjcJcgrij75/lOvY5PkLSPr6a1CpOPHWo0xTbQbgiaAn+Lj9x2grWq2sAqIyo/j00Kfsi90HQKBjIB+1+4hW3q0ASPpmDslz5yLpdASs+BGbxo3v6HonE08ydPNQTLLptmt8ZLOZ8AHPk3/6NE7PPI3vJ5/c0bXvlth33iHjj/XogoKoNnsWWTv+3959h0dVbQ0c/k0y6b0XQhJ6rwFCxEBoIiCCCCJdQEQFaVa+q2K7guJVRBEQpAiCNFHpIB2E0HsPJYUUQnqbTDnfHweCkUwISSCg632eeZI5bfaEnXDW7L3X2k7qujUYzv4lwLe0xKFlS5w7P4lj+/bFBk+m7GzSf/+dlEU/kR8VVbDdoXU47gMH4tCq1R0ZBLPys+i9ujexWbF0COzAlxFfgqKQf+UqeRsWkLv+B3JTrNFl2KPobxedLU26+2LdCgyqd4QBK8p8ucSMPAbMiSQ5S0f9epEczVpGbaMXU+Pbkrn8F5S8PED94MTzlZdx6tixzCnp47LieGfnOzT1acrYpmPRKAosHwxnflfTgw/bDF73OFU2LwO+C4OMWAh9BTpPLlMbHxYSMAkhhBBlcexnWDUC/BrBiJ13PfxcQiYzd0Tx+7FrGE3qf8ENA1wIq+ZBk8puNA1yxdup8LQtw40b5OzfT/b+/eRE7if/0qWCfRpbW/w/+wznTk+U7/u6KV2XTuulrTEpJjY+uxF/R/8Sn6soCsfHvYT1ht3kWMN7gyzp3H4EoxqPqpAEB4qisOHKBj7b/xk38tQRv6erPc3I9BDSX1enDfpNmoTrMz3K9DrpunR6r+5NfHY8Xap0YXL45Lu+35wjR7jatx9oNFRZuQLbuqXL7ne/ZG7ZQuzIUWBhQfCSxYXSxn/129ukrFtDhyg7vGOzb5+k1RYET07t2xfUAMuPjSX1p8WkrViBKTMTAAt7e1x69sStfz9sqhSdwl5RFN7e+Tbrr6zH38GfZd2W3U7mkXwBvm8L+ZkQNgql7UTyzl8gdfFi0n/5RU13v2ghdg0alM8PJOUSfBOi1nYasQv8Sh9gp2Tn02fWXi4kqaNk3jmJ9E/8hrYn87AyqsfYNmiA5yuv4Ng2olx+d26luT9+Xf3A5Z0W79A/+rRaU8nSWs12F/x46S5+8Q9Y9CyggaEbILBlmdtb0SRgEkIIIcoiK0mdlgfwxgVwLNk0rpiUHGbtjGLZwVjyDaZC+yq52tEk0JWmgW40CXSlrr8zNtrbnyYbrl8ne/9+8k6fxuWpp7CtU6fc3k5RBq4byNHrR3k/7H161+xdonMupV/iy4Nfsvvqdt792Ui9aDC4OlJjwU/Y1ipbMoqyysjPYNrhaSw7t4yA6yb+u8CIrR7cBg7A9z//ufsFiqEoCuO3j+eP6D+o7FSZZU8tw9G6ZGto4l5/g4y1a7Fv3pzAHxc8NFnzDKmpXOr2NMbkZDyGv4j3668X2h+bGctTq57CqBhZ2ugrvPdFkbFhQ+EaQFotDmFhaKysyNq2rWDanVVQIO79B+DS8xmzGRVvuZXK3FJjyfwn59PYu7G6Iz8bZreH62cgqBUM+r0gQUGhdPdenlRZuhQr/5IH/cVaMRROrlQTS/SeV6pLZOTp6T87kvNXkwgxpjIy+zguu7dgaVIjpTMBFpxuM4IeQwZRP8C1fNrN7TT3WgstBpMBSzTMiU+gWZ4Oes6BhiX7PTfr15FwdBF4VFezCVrdv+K8D4IETEIIIURZzQyHhOPwzPfQ6N7Sgydl5rH97HWOxKRyJDqNc4mZ/P1/ZmutBfX9nWlyM4BqGuiGn4vtA7uhvlUYtENgB75q+1Wxx6blpTHj2AyWnVuGQTGg1WgZUKk7T399GMP5C1i4uBA4+/syT3krD8ej/iRz4Mu4p+g5EaRh3WvNeO/xiVRzrVbqay49u5RPIj9Ba6FlUedF1POsV+Jz9deuEdW5C4pOR6VpX+P8xP0ZNbxXtwI56+rVqLJyZZFr5N7e+TbrLq+jU3AnvmjzBQC6y5fJ3LiRjPUb0J07V+h4h8cfx33gABzCw0tUuPdi6kX6ru1LnjGPsU3HMqzBMHWHosDKF+HkCnD0VUd5/7bmxpiVzdV+/dCdP49NrVoE/fRToTVRpZZwAmY+DhoLGHUQPO7eb4xZWeRHRaG7GEX2+Qsc3HEI56RYfHNSCx2nb9yM/9VN5nBQLPqMhuTF9SO8hicvt6nGY9U8yvS7v+XKTsbuUNOsV1dexc1mGwfyz+BuNLK0yvP4tn2/1NcukJsG37VUMxU+Nhqe+Ljs16xAEjAJIYQQZfXHB7D7K3VaXmCYWrfEoCv6qz7vzu3WDlC7KzToRaZ3CMfjMjkSrQZQR2LSSMnOv+MlfZxtCAlyY0z7mtTyvb+JE04mn6Tv2r44Wjmy8/mdWFncufZIb9Sz5OwSZh6fSWa+Os0qIiCC8c3GU8WlCsb0dGJeGkHusWNY2NsTMGMGDqEt7mu7i6MYDMS8NILsP/9E5+3C+IEGrlvr0FpoGVJvCL1q9sKkmDAqRowmo/r15vcGxVCwzWAyFGxPz0/nwz8/JN+Uz5vN3mRQvUH33K7r06aR/N0MrAICqLpuLRbllfmslDI2bSJu9BiwtCT45yVmp7SdSzlHr9W9sNBY8HuP3wlyDiq0X3fpMpmbNmLKycWlR3dsqhadAKMouYZc+q7pS1R6FK38W/Fdh++w0NwMsiJnwfq3wEKrZmYLCivyGvpr17j8XB+Myck4RkQQMP3bMq8BAuCn3nBhEzQdDE9PK9hsTE9HFxWF7uLFggBJFxWFISHB7KUsPTywb9oEj2HDsGvcmHMp5+i9+jkUTORFj0CfrU5VbFDJhRFtqtK5vh+WFncPnBIz8jh4JZUDV1KIjL5CjN3HaLTZ5KeGUjuxAfNtPmG4vzvnbKxp6NmAeU/OL59CxOc2wJI+akA5bDMENCv7NSuIBExCCCFEWV39E+aVUxYu5wCo/wzU7wV+jVCAqzdyCkagjkSncTo+o2D9k6+zLevGhOPucP9urE2KiYilEaTqUpn/5HxCfEIK9imKwtborXx56EuiM6MBqOlWkzebv0lLv8JrF0zZ2cSMHEXOvn1obGyo9PXUMhXWLYvEzz4nZd48NHZ2BP+8hNRKzny6/1O2x2wv87VbB7Tm23bflmoUwJSdTVTnLhiSkvB6fTyew4eXuT2lZUhJ4dJT3TCmpOAxYgTe48YWe/yrf7zKrrhd9KrZi4lh5hOg3AuTYuKtnW+x8cpGPO08WdFtBR52N1PmR0fC/C43i9NOgrBXi71W7rFjXB00GEWnw33wYHwmvFP2Bl7dC/OeVNf9jDlO5qGzJH7yX/RxcWZPsfT25pKDF0c0biS4+fFC37Y0aNW4yAQZH+/9mGXnl1HVuQaNNB+w7FAceXp1Cm+guz3DW1eld0gAtlZq8GcyKURdz+LAlVQOXknh4NVUolNybl7NhF3gXLQOF7HU+/GmPpSuSfNxVTLY696cN7xyycjPKNd/v4KkOF611dE/bfll8HyQJGASQgghykpRYP9stWij1vbmw6bkX1Mvw4mVcHYN6DJuX9ejuho41X8WvG6v+8nNN3IiLp13fjnOpevZtK3lxQ+Dm2NRgk+bS+vWlKvhDYYzuqlaYPX0jdNMOTCFg4kH1ebaejC66Wi6V+tutoipSacjbtx4srZuBa2WSp9/hnOXLvet3UVJX72aa2+qKdL/nj1tS/QWvjz4JfHZ8WgttFhqLLG0sMRSY4lWo739/d/23fo+yDmIN5q9gZtt6Yvdpv36K/HvTMDCwYFqG9ZXWJrx2HHjyFy/AZsaNQheueKuo12HEg/xwoYXsLKwYuOzG/GyL3u7Pz/wOQtPL0RroeX7jt8XZDUsbXHajPXriRs3HgDfDz7A7fl7m0JbpLlPolzZS3J6O5I33l63pfX3w6ZadWyqVcOmejWsq1XDqkpV3tp4mVVH4rDWWjDvhea0qu5p9tKpeal0XdWVzPxM3mv5Hh0CerDgzyss2HuFtBw9AB4O1jzd2J/oGzkcvJpKeq6+0DU0Gqjj64yT7w5O5y3FVqNlaXI2VTPUjJ0nTMEMt/iQj19w4/Udr6Gg8EHYBzxb89my/2xyUmB6KGQnQfjr0L4cpvtVAAmYhBBCiIeFPk+d3nNyBZzfqE7Zu8W3we3gybUyAGfiM+gxfQ86g4kJnWszok3p197cze9Rv/Of3f+hjnsdvmn3DdOOTGN11GoUFGwsbRhUdxDDGgzDwerua0MUvZ5rE/6PjDVrQKPB7+OPcO3V6761/a9yT57iav/+KDpdiUZNKoJiMnGlz/PknTiBa+9e+H384Nd/ZGzYQNzYcepUvGVLsat397VYiqIwcP1Ajl0/xtD6QxkXMq5Mbfjx1I9MOTgFgMnhk+latau6w2iAhT3gyi7wrAX3WJw2ecYMrn89DSwtCZz9PQ6Pla2OkvHQKuLeGE92vJrd0q1fP7zGj7sjiYWiKLz760l+iozG0kLDrAEhdKh79xpHP535icn7J+Nq48qaZ9bgYuNCTr6BZQdimL3rMnFpuYWOt7WyoEllN5oHuxES7E6TQFeiEvcwZPtojCh8dP0Gz2Rlg0tlTGGv0WVXMGdvGPhPlzrguoVpR6ZhZWHF/Cfn09CrHNYanv4dlg0EjaX6b+XfuOzXfMAkYBJCCCEeRnkZcG4dnFgBl7ap045uqdwSGvSCuj1YfCqX/1t1AksLDctGhBESVPqRjeIk5ybTdllbAOy0duQa1Ju0rlW7MqbJGPwc/e7peorRSMKHH5G2bBkAPhPewX3w4PJt9N8YkpO53Ks3hoQEHNu0IWDGdyVKOFARcg4f4Wq/m2nGf1l53zMh/pXhxg11Kl5qKp6vvoLX6NElPndb9DZGbxuNo5Ujm3ptKnVh4g1XNvDmjjcBGBcyjqH1h97eufl92PM1WDuqN+D3WCtIURSuvf02Gb+vxsLJieCfl2BTrXQfNuSdOUPsa6PRx8aisVTwG9gal3e+L/I1J68/y6ydl9BoYGqfxnRvXKlEr6E36en9e2+i0qPoV7sfE0In3N5nNLH2eDz7Lt2gurcjzYPdqevvjJXlzX6dcY303V/SK34dCVpLumZlM8nkhib8dWjQGyytWHogmrdXnsDX2ZYdb0bw9u7X2RK9BW97b5Y+tRRPO/MjYCW2/AU4tQp86sPwbaCt2LV590oCJiGEEOJhl30DzvymTtu7uge4+V+3xhKlWlsWZLdk8uVqeLi6snb047ja35+bkedWP8eZlDMANPZqzJvN3yzTJ9CKopA05QtS5s4FwPO1UXi++up9yf6n5OdzdchQcg8dwrpKFYKXLcXS6f4myygxRYGcG2DnDn8J4OLGv07GunUPNM24oijEjRlL5qZN2NSqRZXly9DcQ+IJk2Ki5289iUqPYkzTMbzY4MV7bsOBhAOM2DwCvUlP39p9mdBiwu33fmY1LB2gft97PtQzXxC42Hbm5xP9whByDx/GqnJlgpctLbbIblHSf/uN+Pcnouh0WHm7EdDkHLb+zjDupJrI5S++3XqBLzapBX4n92zA8y0C7+m19l7by0ubX8JSY8nybsup4Vaj+BNuRMGer1GOLmaspwtbHewJNFmwrNl/cKj3LPxlyqzOYKT159tIzNDxea+GdGnoRr91/bicfplmPs34/onvi0z0ck+yk2F6C7WfR0yAiHJYP/YAScAkhBBCPErS49RPak+ugGtHCjbnYMs6YwuuVOrG68OHorlZh6Y87Ynbw6Izi+hevTudgjqVyw28oijcmDlTnSIFuA8Zgvdbb5bt2iYT7JsOhxZA437w2GvEf/xf0n5eioWjI8HLlmFTtejiqPfNraDoRhTcuAgpN7/euKR+r88BGxeo3Bwqh0LlUPQaf6J69FbTjH8zDeeOHe97M9PXruXa62+AVkuV5ctKNbJ1a/qmh60HG57dgK3W9u4n3XQx9SKDNgwiMz+T9oHt+V+b/91eD5d8Eb6PUIvTthwJT356z237K0NKClee64M+Nha7kBAC580tUVZCJT+fxMmfkbp4MQAOrcOpNGkSlovaq+sR/5aAYu7uy3y05jQA73atw4vhJc8Q+Fdjto5ha8xWQv1Cmd1xdtG/Iwkn1Yydp34BxcRPzo5M9nDHSmPJoi4/UddMmvtZO6KYtP4sVb0c+GNcG65kXqbf2n5k67MZUGcAb7d4u1RtLuTkSrV2lYUWXtoBvvXLfs0HRAImIYQQ4lGVfBGOL1UfaVcLNmfbeOPQrC80fB586lZgA0su5ccfSfx0EgCuvXvj+8HE0qV9To+FVS+r61tuSk2qQcLWbNBoqDxzBo5t2pRXs++Ul3EzGIq6HRzdCpDy0u/tWhpLki5V4UZkDlY+blRdvhALr6olSm5QGobr19WpeOnpeI4ahdeokaW6jt6kp8svXUjITuC9lu/xXK3nSnReYnYi/df1JzEnkSbeTfi+4/e3g638bJjTAZJOq6n7B68GyzKOegC6ixe58nxfTFlZuHTvjt/kScUG6/rEJOLGjiX3iPphheerr+I5aqQ6tfPgPFgzFpz8YcxR0Nqw7EAMb608DsC4DjUZ0+EuI0PFiMmMocevPcg35TM1Yirtg9r/Zed+2PU/OL+hYNOZ6m3ob4pBrxh4p8U79K/T3+y1M/P0PDZ5K5l5BmYNDKFTPV+2XN3C2O1jgb+tISstRVFHB8+ugcDHYOj6sl3vAZKASQghhHjUKQpE7+P85tn4xKzHRZNze59vAzVwatD7joKeD5u0lb8Q/957YDLh3KUL/p9NRmN1DzfFJ1fCmnFqYGJlD82GkrNxCVfXW4OiwatbAzw/XQBWduXf+Kt/wrZPCwVqRXIOUAucelS//dW9GrhUguvnICZSfURHQkYsJr2GqHXeGHIt8W6UgUcLZwgMLRiFwrdBuQQOiqIQO+o1srZswaZOHaosW3pvP/u/uZWoIMAxgNXPrEZrUfyIZ2Z+JoM3DOZC6gWCnYNZ2HkhrrautxoHvwyHE8vB0edmcVrfUrft77J27yFmxAgwGvEaOxbPl0cUeVzOgQPEjhuPMTkZCycn/D//DKe2bW8fYNDB1IaQlQDdvmaN1ROMXnIEkwLDw6vwf13qlHlUdtrhacw+MZtKjpX4rcdv2FhYw6Z3Ye+3N4/QQL1nyG75Mn0OfszVjKtEVI5gWttpd33tzzec5bvtUTSu7MqqVx9Do9EUvJ6tpS2Luiyilvu9rRe7Q2YirB2vFrJ1L91IW0WQgEkIIYT4h1AUhTGL9pF/ZgP9bP8knCNoTDdTDGssoGpbaPS8WiTX+u7Z7CpCxoYNxL35Fuj1OEZEUGnqV1jY3mVKV146rHtTHWkDqBQCPWejz7fn8rPPYkxJxalyLpUeS0XjUQ2e/gaCW5VPg2MPwbZPIGrr7W0OXrcDob8GR25VwNq+5NdOj4WYSNJ+WUn84iNYaE1UeyoJra3p9jFaO6jWDiLeVgsnl1JBqnUrK6qsWI5trbLdGOfoc+i0shNpujQ+b/05nauYr1OmN+p55Y9XiEyIxNPOk0VdFlHJ8S8JEY4ugV9fVrOsvbAGgsqW1a4oqUuWkPDhR8CdqeYVRSH1xx9J/HwKGI3Y1KxJwDfTsA4KuvNCf34Dm97FaGlLv7y3iTTWom+LQD59pn65TGHN0efQbVU3knKTGN1kNMNTU2Hbf9WdTQZAq3HgWZ3/2/V/rL60Gh97H1Z0W3E7+CzG9UwdrT7bSr7BxM8vtaRlVQ+MJiMjt4xkz7U9VHKsxNKnluJi41Lm9/GokYBJCCGE+AfJyNPTddouYlJyeba2HV/UjUJzfCnEHrh9kLUjNBkIHT96KLNVZe3cSexro1F0OuxDQwmYPh1LxzsDPMVgQLmwA9Oq0Sip1zCZLFEav4BSvy+mfCNJU6aQd/IkNrVrE/zBICy2TFDr9gA0GwodPgTbUt4PxB9TR5RuTYGy0Ko3rOGvg+u9Lei/G8Vk4spzfcg7eRLXJ8Px61lDnYIVE1l4ml+9ntD2P+BZ/Z6ur09M4tLTT2NKT8drzGg8X3mlXNo94+gMvjv2HbXda7PsqWVFBgwmxcSEXRNYd3kd9lp75j85nzoef1k3ZTLCt83VKY3t3oXWb5ZL24qS8OmnpP64EI2NDUGLFmLXoAGmnBzi33ufjLVrAXB+6in8PvoQC3szga8hn9S5vXC7toMMxY5ZVaYyflAfLMuxRtqaS2uYsGsCdhorVl+9go/RCJ0/h1B1ZOy3i7/x7p53sdBYMLfT3EKFpu/mP6tO8FNkNBG1vJg/pAUA6bp0+qzpQ1xWHK38WzG9/XSzddaKoygK525c4rMdaxke2orHAkreroomAZMQQgjxD3M8No1nZ/yJ3qjw4dP1GPxYsLqe5vhSOPbz7fVOweHQZyHY3Z9U5GWRvX8/sa+8iik7G62XFxYODph0OhSdDiUvD1N+PhgMd72OpasrwStWYB1QSQ0uNr8Ph+arO5384akvoZb50Y87JJ1RA6Uzv6vPNRbQqK96I+9+/xJJ5Bw+zNV+/QunGTeZIPGkmmL75Iqb7bGEJv2hzdvgEnDX6yqKQuwrr5K1fTu29eoR/POSMk3F+6u0vDSeWPkEuYZcZnaYSatKd47qfXnoS+adnIdWo2V6++k8Vulvo0e3suLZusK4U/dUb+leKUYjsa+OJGvHDiy9PKk05QsS//tfdBcugFaLz1tv4TZwgNmRIr3RxNIDMfxv3TFm8CktLc6g2LmjGbIOvMsvLbyiKAxa2ZWj2TF0zcpmct3h0FZNNX45/TJ91vQh15DLqMajGNGo6OmF5ly9kU3bL7ZjUmD9mHDq+Kn3y+dSzjFg3QDyjHmFilffrZ3RmdEcSDjAgYQDHEw4SFJuEgBuhgh2DL37NMGHhQRMQgghxD/Qrcxc1pYW/PLqY9SvdHMajaKo9Z1+GaFmG/OsCf2Xg1twhba3KLknThDz4nCM6XdPlqCxtkZjY4PG1gYLaxs0trZo3dzwen089k2aFD748i5YPRpSLqnP6/VUP6F39DL/AskXYfskdZ0UCqBRiwhHvAOepV/Ify/ixo8nY9167Fu0IHDB/MI3mwknYMvHcGGj+tzSBpq/COHjwcF8HZ20Vb8SP2ECGisrgleuwLZmzXJt82f7P2PRmUU0923O3E5zC+1bfGYxk/ariT7++/h/ebra04VPVhT4oaM6Ohr+OrR/v1zbVhRjVjZX+/VDd/58wTZLL08Cpk7FPqToERFFUdh4KpHPN5zlUnI2AB2r2TNL+QiLa4fVdVdD1qvTMsvDhc2cWjmQvr6eKBoNC5/8kcY+TdAZdfRb24/zqecJ9Q1lVsdZpRoJGrX4MGuOx9O9sT9fP3/7d2ftpbW8s0tNBz617VTaB7YvdF5xAVLBMSZLTHmBDGrYg3daDeVRIQGTEEII8Q+kKAojFh5i0+lEgjzsWfPa4zjZ/mXkIOEkLH4OMuLUNTd9f4aAZhXX4CLk5BuYuGgv2qhzjH+qAY6O9lhcWIMm8hs05GLh4IKm2xdoGvW89wK0+lw1APrzG1BM6ijbk5OhYZ/CWehSr8COz+HYEvU4gDpPq7VkHnAGQn1cHFFduhafZjx6H2z56Ga9LtTpl2GjIGzkHdMP9YmJXHqqG6bMTLzGjcNzxEvl3uaE7AQ6r+yMQTGwqMsiGnmp66z+uPoH47ePR0FR1+I0HH7nyVf3wrwn1eBv7IkHlrREHxfH5T7PY0xOxq5pUypN/Qorb+8ijz10NYVP153l0NVUADwcrBnToQZ9WwRipUuDBd3UUUCXyjB0Q4lG/YoVHQk/dgdDLhNrhPCL4Tp1PeqypOsSJkVO4udzP+Nu686Kbivwsi/mA4BinIxL56lvdmNpoWH7GxFUdr89/fBWAOxg5cDirovRarTsT9hvNkCysrCikVcjqjk1YvEOK3IyKzGufb0yZQusCBIwCSGEEP9Q6Tl6ukzbRVxaLl0b+vFt3yaFRyUyrqlBU8IJ0NpCz9lQ92nzF3yAsnQGhs47wP4rKQBMCHdnRNqXcGGTekDVttBjBjj7le2Frh2B316DxBPq82rtodtUdardzilwZBGYbk79q9lZnfpUhuQKZZX09dfcmDETq8qVqbp2TdF1gxQForaogVP8MXWbnbs62tT8RbCyQ1EUYkaMIHvnLmwbNCB4yWI02vKv3QXw7u53+S3qN9pVbsfX7b7mSNIRhm8ajs6o47maz/Fuy3eLnpq1pK86Gtp0MDw97b60zZz82DhyjxzBudMTRRbuvXQ9i883nGPDqQQAbK0sGB5elZdaVy38wURWEszrrKaW96iujjQ5Fh183VXiKfVaeelQvSM3npnOU7/1IEufxZPBT7LhirqebkaHGTxe6fHSvcZNA3+IZNeFZAaHBfFh99v1kvQmPS9teomDiQfRarQYlMLTYq0srGjo1ZDmvs1p7tNcLWytWPHMd39yJj6DsKoeLHoxtFzXdD0IEjAJIYQQ/2BHolPpPXMvBpPCJz3qM6Dl3zJ76TLVYpIXNgEaeOITdTSiAtcWpOfqeWHefo5Ep2FlqaGNcpDPrGbjoclQRxs6fgQtXoJ7HVUyx6iHP6fB9s/AqAMrBzDpwZiv7q/WTk2m8BCMwJmys4l6sjOG69fxfvMNPIYNK+Zgk7rWausncOMCAEYbf7JdupN5MZ+MdevRWFtT5ZeV2FQvYaIIo16tiWTrUuI+cintEt1/6w7A1IipvP/n+2TkZxBROYKpEVOLnjZ2/TxMbw5oYNSBBzbt8W6uZ+qYtuUCi/dHYzQpWGjguWaVGdexJj7OZrI5psfC3M6QHg3e9dRMf/bu9/bCKZdhbifISoTKLWHgKrC2Z8GpBXxx8IuCw4bUH8L4kPFleIeqPy8m029OJLZWFux5ux0ejjYF+5Jzk3l+zfMk5iQWGSD9vVDxe7+eZOG+q3g4WLNuTLj5n9NDTAImIYQQ4h9u9s5L/HfdGay1Fqx69THq+f8tLbDRABvehgNz1OfNX4QnPwPL+zPiUJy0nHwG/rCfE3Hp+NvqWV1rAx7nlgCQZF8d78EL799UuOQL8PtrEL1XfR70OLT7z31JY10Wt9YdWTg4UG3jBrSe5tcoKYqC7tQpspZPJ3vndnISFFBuBzregzrj0akR6DLU4Dnv5ldd+l+e/2WfIVc9sd4z0Ht+ids8eutotsVsK3je0Kshc56Yg53WTE2s30bBkYVQqyv0XYzRpFToqEROvoE5uy4za0cU2flGANrX9ubtzrWp6eN09wvciFJHh7IS1bT3g34DmxKcB2rtorlPqNNDvevBkLUFiVr0Rj09f+/JlYwrNPRsyPzO87GyKJ+6XN2n7+F4bDqj21Vn/BOF08wn5yYTnRFNHY865v8NgfUn4nnlp8MAzB/SnIhapRxdq2ASMAkhhBD/cIqi8OKCg2w5m0QVTwdWv/Y4jjbavx8Ee6erRTBRoEYn6DX3vmYl+7sbWTr6z4nkbEImT9qfZZr9D1hnxQEwy9CVbzXPs+WtJ/B2uo+fUN8alXHwhKBWFTrSZo5iMnGl93PknTqF63PP4ffRh4X2G9PTyf7zT7J27SZ71y4M168X2m/trODgm41TpTwcfPJL35B+y6BmpxIdeuz6MQasGwBAkHMQCzsvxM3WTHbGzASY2kAd4Ru6iZXXK/H2yuO0re3NhM61qer14PqkwWhi+aFYvtp8nqRMHQANA1yY0LkOYdU87u1iSWdgXhfITVGD8f7L716XKzcN5ndV10G5BcPQjXcU7T2Xco7l55czvMFwfBzKb53XuhPxvPrTYVzsrPjznXY4/P1vxl3EpOTQZdouMvMMvNymGu90rl1ubXvQJGASQggh/gVSs/PpMm0X8el5dG/sz9Q+jYteN3L6d/hlOBjywLehelNc1nVCJZCUkUf/OZFcS7rOh3ZL6aXcXKvkGoTS/VueWWfJ0Zg0hrQKZmK3eve9PQ+7nEOHuNp/AFhYUGXlChSTiexdu8jauYvcY8fAaCw4VmNnh0NoKA6tw3EMD8fa2w32zYDz69W1azZOYOOsfrV1/sv3Ln/bfvO43V+qyTLcq8Kr+0BrU0xLb/vP7v9wMvkk37b/lspOlc0f+MeH6mtUDiX1+TVEfLGd9Fy1ALPWQsPAsCDGtK+Bq/39qyGmKApbziQxecNZLiZlAVDZ3Y63OtWmawM/LEo72nXtCCx4Wh21q94Rnl9svhZafg4sfAZi9qmZ9oZuvK+p6//OaFLo8OUOLidn827XOrwYXrXE5+qNJnrP3MvRmDSaBrqydEQYVpblNIW2AkjAJIQQQvxLHLySQp/v92E0KUzu2YDnW5gpsBp7EBb3gZxkcK6kfhLuc/+ClPj0XPrPjsQnZT9fWn+PHzdHRJoPhw4fgI0jey4m039OJNaWFmx/MwJ/V/PTgP4tbqUZx9KyUIAEYF2tGo7h4Ti2DscuJAQLm5IFNSWiy4RvmkFWArSfqCaTKM9rf1VPTWzQ5yfeOxfMwn1XqenjSICbPVvPqlnYXOysGN2+BgNbBmGtLb8b8Ty9kdXHrvHj3quciFPT2bvZW/Fauxr0bxmIjfbe03Tf4epeNRAy5KoZF3vNu3P6q1EPP/dT1xbausAL68C3ftHXu4+W7I9mwi8n8HOxZcebbUv8s5607gyzdl7C2VbLujHhBLjdZSTtIScBkxBCCPEv8t32i3y+4RxWlhqmPd+Ezg3MjB6lXFYz6CWfB2sneG4BVG9f9LFlEJuaw7Dvt9M/cy6DtJvVjS6B0P1bqNqm4DhFUeg7ex/7LqXQt0Ugk3o2KPe2PGr0cXFEPdUNJTcXC3t77MPC1CAp/HGsKlW6vy9+bCmseklNkPHaQXD2L5/r7p0OG/8PPKpz5tktdP1mDyYFlgxvSVg1D3ZduM5/157hbEImAMEe9kzoUocn6vqUqQhqTEoOiyKvsvRADGk56miWjdaCoY9X4eU21XCxK59ivgWitqofShjz1cLH3b+7ncTEZIJVI+DEMtDawaBfIbBl+b5+CekMRsI/20ZSpo4pvRrSu1kxI4M3bTuXxJB5BwCYOSCEJ+v73uWMh58ETEIIIcS/iMmkMPrnI6w5Ho+FBj7v1YheIWZqw+SmwtKBcGUXaCzhqS8h5IVya8vVG9l8PusH3s6bRqDFzVGlZkPVLHhFLIg/cCWF3jP3orXQsOX1NgR5OJRbWx5VukuXMN64gV2jRkWmv75vFEXN2hYTCfV7Qa8fyn5Nox6+bgwZsShPfU2/w7XZe+kGXRr48l3/20VjjSaF5Qdj+GLTeZKz1HVFLau6827XurcLNJeAyaSw+2IyP+69wpazSdy6I67kaseAlkH0aV4Zd4f7+DM9u1b9/VKMaqKVLjez3a1/G/bPAgstPL8Eaj5x/9pQAjN3RDF5/VmqezuyaWzrYqcjJqTn0WXaLlKy8+9ISf4ok4BJCCGE+JcxmhQm/HKcZQdjAfigW11eaGVmbYQhX80cd/xn9XmrMWoh1NLWkrnp0rUkDs4ZzXOm9WqbnAKw7PEtVGtb7HmD5+5nx/nr9GxaiS+fa1ymNjzq8g0mJq8/y7W0XBoEuNCgkgsNA1zu69qeQq4dhe8jAEWtL1TWbILHl6nr5xy82dhxMyN+PoWN1oI/xrcpVDz1liydgZnbo5i96xI6gwmNBp5tGsAbT9TC18V8YpCMPD0rDsayaN9VLiVnF2wPr+HJoLBg2tX2fnAZ+Y4vV98zCrQaC1Z2akFlNGpdtIa9H0w7ipGRp6fVpK1k6gzMHtSMjnWLTixhNCn0m72PyMsp1PN3ZuUrj2FrVQ5TGB8CEjAJIYQQ/0Imk8Ina88wd89lAF7vWJNR7aoXPa1JUWDHZzdv5G5yDYKA5jcfzcC3QYkX/8cc2Yzmt5EEkAhAbsOB2HX5VE0scBfHY9N4+ts9WGhg07jWVPcuYWrmf5g8vZFRiw/zx5mkO/YFutvTMMDl5sOV+pVc7syKWF5Wj4VD88CnAYzYAUXVVCoJRYGZ4ZB4An2b/9B2fzNiU3OLTGn9d3FpuUzZcJZfj14DwM7KkhFt1CKy9ta33/fZhAx+3HuVX4/EkXMzNbiTjZZnQwIYGBZEtQeYfa+Qg/NgzdjC2zpPgdCXKqQ5Rflsw1lmbI+iaaArK195rMi/E19tPs/XWy7gYG3JmtHhVPH854wAS8AkhBBC/EspisLXWy4w9Q+1qOlLrasyoXNt82tBTqyAXf9T0yPzt9sHS2vwa3Q7gApoDi6VC6flzs/hxu/v4nZyLhYoJGm8sH32W5zrP3lP7X7px4NsOp1I14Z+TO/X9J7OvZ8MRhPZOiMu9uW83uVvcvONvLTwILsuJGOjtWB4eFWupuRwIjaNKzdy7jheo4Gqng40CnClwc0gqp6/c/l8+p99A75pCnlp0PV/6tSy0ri4BRb1BCsHvg/5nU+3J+LnYsuW19sUCnqKcyQ6lU/WnuHQ1VQAfJxteLNTbeysLFmw9wr7L6cUHFvTx5FBYcE806TSPafLvi/+/BY2/Uf9PmICRLxTse35m6TMPB7/bBv5BhPLRoTRokrhwrt7o27Qf84+TApM7dOYHk3u8xq6B0wCJiGEEOJf7ofdl/l4zWkA+rYI5JMe9YufkpSXAdcOQ+wBNaNe7AHIuXHncY4+twMol8roNn+MTcYVADbadCL05e9wdTNfdNWcswkZdP56F4oC60aHU9e/Yv9Pz9YZWLI/mh92XyYpU8fbT9ZieHjVMiUhMCdLZ2Do/APsv5yCvbUlcwY147Hqt3+G6Tl6TsSlczwujeMx6ZyISycuLfeO61haaKjh7Uhdf2fq+qmPOn7OuJVmzc7+2bDuDbWY6muHwd797uf83Y/d4dJ2shu/SLNDHcnVG/n6+cZ0b3xvN96KorDuRAKT1p8hNrXw+7a00NCpng+DwoIJreJ+X/59yuTUKjWVeON+D2X9rwm/nGDJ/mja1fZm7gvNC7bfyNLR+etdJGXqeK5ZAJ/3alSBrbw/JGASQgghBMsOxPDOL8cxKdCtkT9fPteo5HVTFAVSL98OnmIPQMIJMBnuOPSa4s5s13GMe+UVnG1LPxLz2pIjrD52jQ51fJgzuFmpr1MWyVk65u+5wsJ9VwvqBN3Sub4vU3o3KtepcOm5egbP3c/RmDScbLTMG9KcZsF3D06uZ+o4GZfO8dh0jsemcSw2vSBZwt/5udhSx8+ZOn5O1PVzoY6fE8EeDsXXHTIa4Ps2anHVZkPhqa/u7Y3FH4NZrUFjyYdVFjHvtELzYDeWjQgrdVCTpzey4M8rfLc9CitLC/q1qEzf0ED8XCQdfWldSc6m3f+2Y1Jgw9hwavs6YzIpDJl/gB3nr1Pd25HfR7Uq8Yjgo0QCJiGEEEIAsPZ4PGOXHkFvVGhX25vv+jct/bQtfS7EHyP13J/En96JTco59hnrsMn/VaYPiyhzIBF1PYuOX+7ApMCvI1vRuLJrma53L67eyGb2rkssPxiLzmACoIqnAy+1rkq+wcQna0+jNypU83Jg1sCQcllnlZKdz8AfIjl1LQMXOysWDmtBwwDXUl1LURQSMvI4EZvOmfhMzsRncDo+g+iUO6fzgbomqLaf081ASh2NahTggvavAfWV3TC/K6BR1zL53cMow8oX4cRyblR5mpAzz6PRwOpRj99TxrviKIry8I0mPaJG/nSYtSfieaZJJb7q05hZO6KYtP4sNloLfh/1OLV8/5lrCiVgEkIIIUSBbeeSeHnhIXQGEy2rujNncPN7Dm4URWHPxRvM//MKW84mFqRrfry6J98PCim3T6DfWH6MFYdiCa/hycJhoeVyzeKciE1n5s4o1p+Ix3TzPTWq7MorbarSsa5vwTTGw9GpvLroMAkZedhbWzKlVyO6NjRT76oEkjLzGDAnkvOJWXg4WLPoxVDq+JX/fUxmnp6zCWoAdSY+g9PXMjibkFkQFP5VowAX5g9pUXgK34qhcHIlBIapWfNKEqSkRaupxBUjo12+5vdEL55vXpnJzzYsvzcmys2J2HS6fbsbSwsNU/s0ZtzSoxhMCpN6NqCvuULY/wASMAkhhBCikMhLNxi24CBZOkPRN8Zm5OQb+OVwHAv+vMKFpKyC7eE1PBnSKpiImt7FT+26RzEpObT733b0RoWlL7UktKpHuV37FkVRa/XM3BHFnou312lF1PLi5TbVzK6FSc7S8driI+y9pJ4zPLwKbz9Zu/CoTAlcS8ul/5xILidn4+Nsw08vtqS694PL5mY0KVxOzub0zSDqTHwGB6+kkqUzUNvXiUUvhuLpeDM7YnocfNsM9Dk3U2I/d/cXWP8ORM4gwSOUlnFjcLLRsu3NiNvXFA+dAXMi2X0xueD5Uw39+KZvk3/0KJ4ETEIIIYS4w/HYNAbP3U9qjp5aPk4sHNYCb+eia9vEpOTw494rLD0QQ0aeum7JwdqSZ0MCGBQWfF9v8N/99QSL9kXTItidpSNalttNm8FoYt3JBGbtiOLUtQxATRrwdCN/XmpdtUQjPAajiSmbzjFrxyUAQqu4802/Jng7ma8R9FcxKTn0nb2P2NRcKrnasXh46ENRrPdCYib95kRyPVNHNS8Hfnqx5e26Rzu/gK0fg6MvvHawyALEBXJT4ct6oM9mpMW7rM2py7td6/BieNUH80ZEqey+kMyAHyIBCPKwZ81rj+NUhvWIjwIJmIQQQghRpAuJmfSfE0lSpo4gD3sWDQstKCCqKAp/Rt1g3p7C0+6CPOwZHBZMr2YBZUrqUFIJ6Xm0nqKmO/5xaAta1/Qq0/WMJoWlB2KYuSOqYE2PnZUlfZpX5sXwKgS43VlA9W7Wn4jnzRXHydIZ8Hay4bv+Te+arOHS9Sz6z4kkPj2PIA97Fg9vSSXXhydhweXkbPrP3se19DwC3e1ZPDxU/dno8+C7lmoSkFZjoeOH5i9yM7hKtKtBaOoHVPVyZMOY1lhr720UTjxYiqLQb3Ykx2PTWPJSy1KvpXuUSMAkhBBCCLOib+Qw4IdIolNy8HW2ZfagZhyPS2P+nsLT7lrX9OKFx4LKfdpdSXy85jQ/7L5MowAXfh3ZqtSjTBcSM3lr5XGORKcB4O5gzeCwYAaFBZUu3fZfXEzK4uVFh7iYlIXWQsO7Xesw+LHgItt6LkENVJOzdFT3duSnF0PxMTO6V5FiUnLoP0ftG/4utiwe3pJgTwc4tx6WPA8WVvDqPvCsfufJ+jyY2gCyk3jd8CorDY8zf0hzImp5P/g3Iu6Z3mgiT2/8x48s3SIBkxBCCCGKlZihJh34a4AEYG9tSa8HMO3ubpKzdIR/to1cvZHZg5rRsa7PPZ2vN5qYuT2Kb7ZeJN9owslGy7iONenbIhA763Io7npTts7AWyuPs/Z4PAA9Gvvzac8GhZJgnIxLZ+APkaTm6Knj58zCYS0e6vU8Cel59Juzj0vXs/F2suGnF0Op4e0IP/WGi5uhxhPQf/mdJx6aD6vHcMPSi9Ds/9G6tn+h2j5CPEwkYBJCCCHEXaVm5zN43n6Ox6YT5GHPoLBgej+gaXcl8fmGs3y3PYravk6sGx1e4lGuk3HpvLXiOKfj1XVK7Wp7899n6t+3ej2KovDD7stMWn8Wo0mhlo8TMweGUMXTgcPRqQyeu5/MPDXZxoKhLXC1L9vI1oNwPVPHwB8iOZuQibuDNQuHtaCe9XV1ap5JD32XQq0nb59gMsH0FnDjAh/rB/AjXdk4tjVVvSou6BaiOBIwCSGEEKJEdAYjFxKzqOvn/MCn3d1NWk4+4Z9tI1Nn4Nt+TXiqoX+xx+fpjUzbcoFZOy9hNCm42VsxsVs9ujf2fyDZviIv3WDk4iMkZ+lwstHyckQ1vtt2kex8I82D3Zj7QvNHarpTanY+g+bu50RcOs62Wn4cFkrjs1/Cnq/BrQqMjATtzZGys2vh535k4UBo3jT6t67H/3WpU7FvQIhiyKo6IYQQQpSIjdaS+pVcHrpgCcDV3rogu9qXm89jMN5ZO+iWQ1dT6DptF99tj8JoUuja0I/N49vQo0mlB5YaObSqB2tHP06zIDcydQambDxHdr6Rx6p5sGBoi0cqWAJwc7Dmp+GhhAS5kZFnYMCcSA4Gvahmy0u9DHu/vX3wnmkA/Ghoj52jC6+1K2KNkxAPEQmYhBBCCPGPMPTxYFztrbh0PZvfjl67Y39OvoEPfj9Fr5l7ibqejZeTDbMGhjC9X9MKWSfk42zLkpda8sJjwQC0r+3N3Beal1th3wfN2daKH4e2oGVVd7J0BgYuPM25hm+qO3d+odZpio6EmH3kK1rmGTrxVqfaj1xwKP59ZEqeEEIIIf4xZu6IYvL6s1R2t2Pr6xFY3SwUu+diMu/8cpyYlFwAeoUE8F7XurjYPxw366nZ+bjaW/0jin/m6Y2MWHiIHeevY63VsN93Cq7Jh6F+LzDkwdk1LDVE8JPvm/z6aquHcsRSiL96JEaYrly5wrBhw6hSpQp2dnZUq1aNiRMnkp+fX9FNE0IIIcRDZFBYEJ6ONsSk5LL8YCwZeXreWXmc/nMiiUlRC8AuGNqCL3o3emiCJVCntP0TgiUAWytLvh8UwhN1fcg3KAyO74WCBk6ugLNrAPje2JWJ3epJsCQeCY9EwHT27FlMJhOzZs3i1KlTfPXVV8ycOZP/+7//q+imCSGEEOIhYm+tZVTbagB89cd5On65g58PxABqMLVxXGvalLG4rbg7G60l0/s3pVsjf44Zg1libFewb7OxKQ0aNSckyK0CWyhEyT2yU/KmTJnCjBkzuHTpUonPkSl5QgghxD+fzmCk7ZTtXEvPA6CKpwOTezYgtKpHBbfs38doUnh75XG2HDrNVps3cNNk0d/4If9742V8XR6+YrxCFOXRXFUIpKen4+7uXuwxOp0OnU5X8DwjI+N+N0sIIYQQFcxGa8kHT9fj/d9O0b2xP+M61sTWqvwK0IqSs7TQ8PmzDXnfyoJekRPx0aTyWIeuEiyJR8ojOcJ08eJFQkJC+OKLLxg+fLjZ4z744AM+/PDDO7bLCJMQQgghxINzq2BvTEoOE7rUkQBWPFIqNGB65513+Oyzz4o95syZM9SuXbvgeVxcHG3atCEiIoI5c+YUe25RI0yVK1eWgEkIIYQQQghRIhUaMF2/fp0bN24Ue0zVqlWxtrYG4Nq1a0RERNCyZUvmz5+PhcW95ayQNUxCCCGEEEKIe1Gha5i8vLzw8ipZppq4uDjatm1LSEgI8+bNu+dgSQghhBBCCCHu1SOR9CEuLo6IiAiCgoL44osvuH79esE+X1/fCmyZEEIIIYQQ4p/skQiYNm/ezMWLF7l48SIBAQGF9j2COSuEEEIIIYQQj4hHMkteackaJiGEEEIIIcS9kIVAQgghhBBCCGGGBExCCCGEEEIIYYYETEIIIYQQQghhhgRMQgghhBBCCGGGBExCCCGEEEIIYYYETEIIIYQQQghhhgRMQgghhBBCCGGGBExCCCGEEEIIYYYETEIIIYQQQghhhgRMQgghhBBCCGGGBExCCCGEEEIIYYYETEIIIYQQQghhhgRMQgghhBBCCGGGtqIb8CApigJARkZGBbdECCGEEEII8TBwcnJCo9GY3f+vCpgyMzMBqFy5cgW3RAghhBBCCPEwSE9Px9nZ2ex+jXJr2OVfwGQyce3atbtGkQ9CRkYGlStXJiYmpth/ICGKIv1HlIX0H1Fa0ndEWUj/EWVxP/uPjDD9hYWFBQEBARXdjEKcnZ3lj4YoNek/oiyk/4jSkr4jykL6jyiLiug/kvRBCCGEEEIIIcyQgEkIIYQQQgghzJCAqYLY2NgwceJEbGxsKrop4hEk/UeUhfQfUVrSd0RZSP8RZVGR/edflfRBCCGEEEIIIe6FjDAJIYQQQgghhBkSMAkhhBBCCCGEGRIwCSGEEEIIIYQZEjAJIYQQQgghhBkSMFWQ6dOnExwcjK2tLaGhoezfv7+imyQeQjt37qRbt274+/uj0Wj49ddfC+1XFIX3338fPz8/7Ozs6NChAxcuXKiYxoqHyqRJk2jevDlOTk54e3vTo0cPzp07V+iYvLw8Ro4ciYeHB46Ojjz77LMkJiZWUIvFw2TGjBk0bNiwoEBkWFgY69evL9gvfUeU1OTJk9FoNIwdO7Zgm/QfYc4HH3yARqMp9Khdu3bB/orqOxIwVYClS5cyfvx4Jk6cyOHDh2nUqBGdOnUiKSmpopsmHjLZ2dk0atSI6dOnF7n/888/Z9q0acycOZPIyEgcHBzo1KkTeXl5D7il4mGzY8cORo4cyb59+9i8eTN6vZ4nnniC7OzsgmPGjRvH6tWrWb58OTt27ODatWv07NmzAlstHhYBAQFMnjyZQ4cOcfDgQdq1a0f37t05deoUIH1HlMyBAweYNWsWDRs2LLRd+o8oTr169YiPjy947N69u2BfhfUdRTxwLVq0UEaOHFnw3Gg0Kv7+/sqkSZMqsFXiYQcoq1atKnhuMpkUX19fZcqUKQXb0tLSFBsbG2XJkiUV0ELxMEtKSlIAZceOHYqiqH3FyspKWb58ecExZ86cUQBl7969FdVM8RBzc3NT5syZI31HlEhmZqZSo0YNZfPmzUqbNm2UMWPGKIoif3tE8SZOnKg0atSoyH0V2XdkhOkBy8/P59ChQ3To0KFgm4WFBR06dGDv3r0V2DLxqLl8+TIJCQmF+pKLiwuhoaHSl8Qd0tPTAXB3dwfg0KFD6PX6Qv2ndu3aBAYGSv8RhRiNRn7++Weys7MJCwuTviNKZOTIkXTt2rVQPwH52yPu7sKFC/j7+1O1alX69+9PdHQ0ULF9R3tfry7ukJycjNFoxMfHp9B2Hx8fzp49W0GtEo+ihIQEgCL70q19QgCYTCbGjh1Lq1atqF+/PqD2H2tra1xdXQsdK/1H3HLixAnCwsLIy8vD0dGRVatWUbduXY4ePSp9RxTr559/5vDhwxw4cOCOffK3RxQnNDSU+fPnU6tWLeLj4/nwww8JDw/n5MmTFdp3JGASQoh/uJEjR3Ly5MlC88CFuJtatWpx9OhR0tPTWbFiBYMHD2bHjh0V3SzxkIuJiWHMmDFs3rwZW1vbim6OeMR07ty54PuGDRsSGhpKUFAQy5Ytw87OrsLaJVPyHjBPT08sLS3vyOiRmJiIr69vBbVKPIpu9RfpS6I4o0aNYs2aNWzbto2AgICC7b6+vuTn55OWllboeOk/4hZra2uqV69OSEgIkyZNolGjRnz99dfSd0SxDh06RFJSEk2bNkWr1aLVatmxYwfTpk1Dq9Xi4+Mj/UeUmKurKzVr1uTixYsV+rdHAqYHzNrampCQELZs2VKwzWQysWXLFsLCwiqwZeJRU6VKFXx9fQv1pYyMDCIjI6UvCRRFYdSoUaxatYqtW7dSpUqVQvtDQkKwsrIq1H/OnTtHdHS09B9RJJPJhE6nk74jitW+fXtOnDjB0aNHCx7NmjWjf//+Bd9L/xEllZWVRVRUFH5+fhX6t0em5FWA8ePHM3jwYJo1a0aLFi2YOnUq2dnZDBkypKKbJh4yWVlZXLx4seD55cuXOXr0KO7u7gQGBjJ27Fg++eQTatSoQZUqVXjvvffw9/enR48eFddo8VAYOXIkixcv5rfffsPJyalgfreLiwt2dna4uLgwbNgwxo8fj7u7O87Ozrz22muEhYXRsmXLCm69qGgTJkygc+fOBAYGkpmZyeLFi9m+fTsbN26UviOK5eTkVLBW8hYHBwc8PDwKtkv/Eea88cYbdOvWjaCgIK5du8bEiROxtLSkb9++Ffu3577m4BNmffPNN0pgYKBibW2ttGjRQtm3b19FN0k8hLZt26YAdzwGDx6sKIqaWvy9995TfHx8FBsbG6V9+/bKuXPnKrbR4qFQVL8BlHnz5hUck5ubq7z66quKm5ubYm9vrzzzzDNKfHx8xTVaPDSGDh2qBAUFKdbW1oqXl5fSvn17ZdOmTQX7pe+Ie/HXtOKKIv1HmNenTx/Fz89Psba2VipVqqT06dNHuXjxYsH+iuo7GkVRlPsbkgkhhBBCCCHEo0nWMAkhhBBCCCGEGRIwCSGEEEIIIYQZEjAJIYQQQgghhBkSMAkhhBBCCCGEGRIwCSGEEEIIIYQZEjAJIYQQQgghhBkSMAkhhBBCCCGEGRIwCSGEEEIIIYQZEjAJIYQQQgghhBkSMAkhhPhXiYiIYOzYsRXdDCGEEI8ICZiEEEIIIYQQwgyNoihKRTdCCCGEeBBeeOEFFixYUGjb5cuXCQ4OrpgGCSGEeOhJwCSEEOJfIz09nc6dO1O/fn0++ugjALy8vLC0tKzglgkhhHhYaSu6AUIIIcSD4uLigrW1Nfb29vj6+lZ0c4QQQjwCZA2TEEIIIYQQQpghAZMQQgghhBBCmCEBkxBCiH8Va2trjEZjRTdDCCHEI0ICJiGEEP8qwcHBREZGcuXKFZKTkzGZTBXdJCGEEA8xCZiEEEL8q7zxxhtYWlpSt25dvLy8iI6OrugmCSGEeIhJWnEhhBBCCCGEMENGmIQQQgghhBDCDAmYhBBCCCGEEMIMCZiEEEIIIYQQwgwJmIQQQgghhBDCDAmYhBBCCCGEEMIMCZiEEEIIIYQQwgwJmIQQQgghhBDCDAmYhBBCCCGEEMIMCZiEEEIIIYQQwgwJmIQQQgghhBDCDAmYhBBCCCGEEMKM/weP8QwIHpXWZgAAAABJRU5ErkJggg==",
+ "text/plain": [
+ "