diff --git a/.gitignore b/.gitignore
index 1ae7cfb7..a51fb346 100644
--- a/.gitignore
+++ b/.gitignore
@@ -18,3 +18,8 @@ __pycache__
.DS_STORE
conf.cfg
.pytest_cache
+
+# Java related
+.classpath
+.project
+.settings/
diff --git a/.travis.yml b/.travis.yml
index 1723c6bc..e7163c60 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,16 +1,17 @@
# Set-up a python centric enviroment in order to easily choose py version:2.7
# bonus: Java 7 and mvn also included
language: python
-# Target py version 2.7
+# Target py version 3.6
python:
- - "2.7"
+ - "3.6"
script:
- pip install -r ./bin/requirements.txt
- pytest
- - cd flink_jobs/ams_ingest_metric/ && travis_wait mvn test
- - cd ../batch_ar && travis_wait mvn test
- - cd ../batch_status && travis_wait mvn test
- - cd ../stream_status && travis_wait mvn test
- - cd ../ams_ingest_sync && travis_wait mvn test
+ - cd flink_jobs/ams_ingest_metric/ && travis_wait mvn -B -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn test
+ - cd ../batch_ar && travis_wait mvn -B -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn test
+ - cd ../batch_status && travis_wait mvn -B -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn test
+ - cd ../stream_status && travis_wait mvn -B -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn test
+ - cd ../ams_ingest_sync && travis_wait mvn -B -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn test
+
diff --git a/Jenkinsfile b/Jenkinsfile
new file mode 100644
index 00000000..c22985f4
--- /dev/null
+++ b/Jenkinsfile
@@ -0,0 +1,79 @@
+
+pipeline {
+ agent none
+ options {
+ checkoutToSubdirectory('argo-streaming')
+ newContainerPerStage()
+ }
+ environment {
+ PROJECT_DIR='argo-streaming'
+ REQUIREMENTS="${PROJECT_DIR}/bin/requirements.txt"
+ }
+ stages {
+ stage('Configuration scripts Tests') {
+ agent {
+ docker {
+ image 'argo.registry:5000/epel-7-py36'
+ args '-u jenkins:jenkins'
+ }
+ }
+ steps {
+ echo 'Testing compute engine auto configuration scripts'
+ sh """
+ pip3 install -r ${REQUIREMENTS} --user
+ pytest --junit-xml=${PROJECT_DIR}/junit.xml --cov=${PROJECT_DIR} --cov-report=xml
+ """
+ junit '**/junit.xml'
+ cobertura coberturaReportFile: '**/coverage.xml'
+ }
+ post {
+ always {
+ cleanWs()
+ }
+ }
+ }
+ stage('Flink Jobs Testing & Packaging') {
+ agent {
+ docker {
+ image 'argo.registry:5000/epel-7-java18'
+ args '-u jenkins:jenkins'
+ }
+ }
+ steps {
+ echo 'Packaging & Testing Flink Jobs'
+ sh """
+ mvn clean package cobertura:cobertura -Dcobertura.report.format=xml -f ${PROJECT_DIR}/flink_jobs/stream_status/pom.xml
+ mvn clean package cobertura:cobertura -Dcobertura.report.format=xml -f ${PROJECT_DIR}/flink_jobs/batch_ar/pom.xml
+ mvn clean package cobertura:cobertura -Dcobertura.report.format=xml -f ${PROJECT_DIR}/flink_jobs/batch_status/pom.xml
+ mvn clean package cobertura:cobertura -Dcobertura.report.format=xml -f ${PROJECT_DIR}/flink_jobs/ams_ingest_metric/pom.xml
+ mvn clean package cobertura:cobertura -Dcobertura.report.format=xml -f ${PROJECT_DIR}/flink_jobs/ams_ingest_sync/pom.xml
+ mvn clean package cobertura:cobertura -Dcobertura.report.format=xml -f ${PROJECT_DIR}/flink_jobs/status_trends/pom.xml
+ """
+ junit '**/target/surefire-reports/*.xml'
+ cobertura coberturaReportFile: '**/target/site/cobertura/coverage.xml'
+ archiveArtifacts artifacts: '**/target/*.jar'
+ }
+ post {
+ always {
+ cleanWs()
+ }
+ }
+ }
+ }
+ post {
+ success {
+ script{
+ if ( env.BRANCH_NAME == 'master' || env.BRANCH_NAME == 'devel' ) {
+ slackSend( message: ":rocket: New version for <$BUILD_URL|$PROJECT_DIR>:$BRANCH_NAME Job: $JOB_NAME !")
+ }
+ }
+ }
+ failure {
+ script{
+ if ( env.BRANCH_NAME == 'master' || env.BRANCH_NAME == 'devel' ) {
+ slackSend( message: ":rain_cloud: Build Failed for <$BUILD_URL|$PROJECT_DIR>:$BRANCH_NAME Job: $JOB_NAME")
+ }
+ }
+ }
+ }
+}
diff --git a/README.md b/README.md
index 2ea45779..15b095a1 100644
--- a/README.md
+++ b/README.md
@@ -368,3 +368,82 @@ Ingest Sync | Ingesting sync data from `{{ams-endpoint}}`/v1/projects/`{{project
Batch AR | Ar Batch job for tenant:`{{tenant}}` on day:`{{day}}` using report:`{{report}}`
Batch Status | Status Batch job for tenant:`{{tenant}}` on day:`{{day}}` using report:`{{report}}`
Streaming Status | Streaming status using data from `{{ams-endpoint}}`/v1/projects/`{{project}}`/subscriptions/`[`{{metric_subscription}}`,`{{sync_subscription}}`]
+
+## Status Trends
+Flink batch Job that calculate status trends for critical,warning,unknown status
+Job requires parameters:
+
+`--yesterdayData` : file location of previous day's data
+`--todayData` : file location of today day's data
+`--N` : (optional) number of displayed top results
+`--mongoUri` : uri to the mongo db , to store results
+`--apiUri` : uri to the web-api
+`--key` : users's token, used for authentication
+`--proxy` : (optional) proxy url
+`--clearMongo` : (optional) defines if the collections in mongo will be cleared from previous documents or not. if false or is missing collection will remain as it is
+
+
+Flink batch Job that calculate flip flop trends for service endpoints metrics
+Job requires parameters:
+
+`--yesterdayData` : file location of previous day's data
+`--todayData` : file location of today day's data
+`--N` : (optional) number of displayed top results
+`--mongoUri` : uri to the mongo db , to store results
+`--apiUri` : uri to the web-api
+`--key` : users's token, used for authentication
+`--proxy` : (optional) proxy url
+`--clearMongo` : (optional) defines if the collections in mongo will be cleared from previous documents or not. if false or is missing collection will remain as it is
+
+
+Flink batch Job that calculate flip flop trends for service endpoints
+Job requires parameters:
+
+`--yesterdayData` : file location of previous day's data
+`--todayData` : file location of today day's data
+`--N` : (optional) number of displayed top results
+`--mongoUri` : uri to the mongo db , to store results
+`--apiUri` : uri to the web-api
+`--key` : users's token, used for authentication
+`--proxy` : (optional) proxy url
+`--clearMongo` : (optional) defines if the collections in mongo will be cleared from previous documents or not. if false or is missing collection will remain as it is
+
+
+Flink batch Job that calculate flip flop trends for service
+Job requires parameters:
+
+`--yesterdayData` : file location of previous day's data
+`--todayData` : file location of today day's data
+`--N` : (optional) number of displayed top results
+`--mongoUri` : uri to the mongo db , to store results
+`--apiUri` : uri to the web-api
+`--key` : users's token, used for authentication
+`--proxy` : (optional) proxy url
+`--clearMongo` : (optional) defines if the collections in mongo will be cleared from previous documents or not. if false or is missing collection will remain as it is
+
+
+Flink batch Job that calculate flip flop trends for groups
+Job requires parameters:
+
+`--yesterdayData` : file location of previous day's data
+`--todayData` : file location of today day's data
+`--N` : (optional) number of displayed top results
+`--mongoUri` : uri to the mongo db , to store results
+`--apiUri` : uri to the web-api
+`--key` : users's token, used for authentication
+`--proxy` : (optional) proxy url
+`--clearMongo` : (optional) defines if the collections in mongo will be cleared from previous documents or not. if false or is missing collection will remain as it is
+
+Flink batch Job that calculate flip flop trends for all levels of groups
+Job requires parameters:
+
+`--yesterdayData` : file location of previous day's data
+`--todayData` : file location of today day's data
+`--N` : (optional) number of displayed top results
+`--mongoUri` : uri to the mongo db , to store results
+`--apiUri` : uri to the web-api
+`--key` : users's token, used for authentication
+`--proxy` : (optional) proxy url
+`--clearMongo` : (optional) defines if the collections in mongo will be cleared from previous documents or not. if false or is missing collection will remain as it is
+
+
diff --git a/bin/ar_job_submit.py b/bin/ar_job_submit.py
index b643d0e5..5743bc36 100755
--- a/bin/ar_job_submit.py
+++ b/bin/ar_job_submit.py
@@ -6,18 +6,31 @@
import datetime
from snakebite.client import Client
import logging
-from urlparse import urlparse
+from urllib.parse import urlparse
from utils.argo_mongo import ArgoMongoClient
from utils.common import cmd_to_string, date_rollback, flink_job_submit, hdfs_check_path, get_log_conf, get_config_paths
from utils.update_profiles import ArgoProfileManager
from utils.argo_config import ArgoConfig
from utils.recomputations import upload_recomputations
+from datetime import datetime
log = logging.getLogger(__name__)
def compose_hdfs_commands(year, month, day, args, config):
+ """Checks hdfs for available files back in time and prepares the correct hdfs arguments
+
+ Args:
+ year (int): year part of the date to check for hdfs files
+ month (int): month part of the date to check for hdfs files
+ day (int): day part of the date to check for hdfs files
+ config (obj.): argo configuration object
+
+
+ Returns:
+ list: A list of all hdfs arguments to be used in flink job submission
+ """
# set up the hdfs client to be used in order to check the files
namenode = config.get("HDFS", "namenode")
@@ -27,12 +40,12 @@ def compose_hdfs_commands(year, month, day, args, config):
hdfs_user = config.get("HDFS", "user")
tenant = args.tenant
- hdfs_sync = config.get("HDFS", "path_sync")
- hdfs_sync = hdfs_sync.fill(namenode=namenode.geturl(), hdfs_user=hdfs_user, tenant=tenant).geturl()
+
hdfs_metric = config.get("HDFS", "path_metric")
- hdfs_metric = hdfs_metric.fill(namenode=namenode.geturl(), hdfs_user=hdfs_user, tenant=tenant).geturl()
+ hdfs_metric = hdfs_metric.fill(
+ namenode=namenode.geturl(), hdfs_user=hdfs_user, tenant=tenant).geturl()
# dictionary holding all the commands with their respective arguments' name
hdfs_commands = dict()
@@ -42,57 +55,26 @@ def compose_hdfs_commands(year, month, day, args, config):
hdfs_metric + "/" + str(datetime.date(year, month, day) - datetime.timedelta(1)), client)
# file location of target day's metric data (local or hdfs)
- hdfs_commands["--mdata"] = hdfs_check_path(hdfs_metric + "/" + args.date, client)
-
- # file location of report configuration json file (local or hdfs)
- hdfs_commands["--conf"] = hdfs_check_path(hdfs_sync + "/" + args.tenant+"_"+args.report+"_cfg.json", client)
-
- # file location of metric profile (local or hdfs)
- hdfs_commands["--mps"] = date_rollback(
- hdfs_sync + "/" + args.report + "/" + "metric_profile_" + "{{date}}" + ".avro", year, month, day, config,
- client)
-
- # file location of operations profile (local or hdfs)
- hdfs_commands["--ops"] = hdfs_check_path(hdfs_sync+"/"+args.tenant+"_ops.json", client)
-
- # file location of aggregations profile (local or hdfs)
- hdfs_commands["--apr"] = hdfs_check_path(hdfs_sync+"/"+args.tenant+"_"+args.report+"_ap.json", client)
-
- if args.thresholds:
- # file location of thresholds rules file (local or hdfs)
- hdfs_commands["--thr"] = hdfs_check_path(
- os.path.join(hdfs_sync, "".join([args.tenant, "_", args.report, "_thresholds.json"])), client)
-
- # file location of endpoint group topology file (local or hdfs)
- hdfs_commands["-egp"] = date_rollback(
- hdfs_sync + "/" + args.report + "/" + "group_endpoints_" + "{{date}}" + ".avro", year, month, day, config,
- client)
-
- # file location of group of groups topology file (local or hdfs)
- hdfs_commands["-ggp"] = date_rollback(hdfs_sync + "/" + args.report + "/" + "group_groups_" + "{{date}}" + ".avro",
- year, month, day, config, client)
-
- # file location of weights file (local or hdfs)
- hdfs_commands["--weights"] = date_rollback(hdfs_sync + "/" + args.report + "/weights_" + "{{date}}" + ".avro", year,
- month, day, config, client)
+ hdfs_commands["--mdata"] = hdfs_check_path(
+ hdfs_metric + "/" + args.date, client)
- # file location of downtimes file (local or hdfs)
- hdfs_commands["--downtimes"] = hdfs_check_path(
- hdfs_sync + "/" + args.report + "/downtimes_" + str(datetime.date(year, month, day)) + ".avro", client)
+ return hdfs_commands
- # file location of recomputations file (local or hdfs)
- # first check if there is a recomputations file for the given date
- # recomputation lies in the hdfs in the form of
- # /sync/recomp_TENANTNAME_ReportName_2018-08-02.json
- if client.test(urlparse(hdfs_sync+"/recomp_"+args.tenant+"_"+args.report+"_"+args.date+".json").path, exists=True):
- hdfs_commands["--rec"] = hdfs_sync+"/recomp_"+args.tenant+"_"+args.report+"_"+args.date+".json"
- else:
- hdfs_commands["--rec"] = hdfs_check_path(hdfs_sync+"/recomp.json", client)
- return hdfs_commands
+def compose_command(config, args, hdfs_commands, dry_run=False):
+ """Composes a command line execution string for submitting a flink job. Also calls mongodb
+ clean up procedure before composing the command
+ Args:
+ config (obj.): argo configuration object
+ args (dict): command line arguments of this script
+ hdfs_commands (list): a list of hdfs related arguments to be passed in flink job
+ dry_run (bool, optional): signifies a dry-run execution context, if yes no mongodb clean-up is perfomed.
+ Defaults to False.
-def compose_command(config, args, hdfs_commands):
+ Returns:
+ list: A list of all command line arguments for performing the flink job submission
+ """
# job submission command
cmd_command = []
@@ -120,13 +102,16 @@ def compose_command(config, args, hdfs_commands):
# MongoDB uri for outputting the results to (e.g. mongodb://localhost:21017/example_db)
cmd_command.append("--mongo.uri")
group_tenant = "TENANTS:"+args.tenant
- mongo_endpoint = config.get("MONGO","endpoint").geturl()
- mongo_uri = config.get(group_tenant, "mongo_uri").fill(mongo_endpoint=mongo_endpoint, tenant=args.tenant)
+ mongo_endpoint = config.get("MONGO", "endpoint").geturl()
+ mongo_uri = config.get(group_tenant, "mongo_uri").fill(
+ mongo_endpoint=mongo_endpoint, tenant=args.tenant)
cmd_command.append(mongo_uri.geturl())
- if args.method == "insert":
- argo_mongo_client = ArgoMongoClient(args, config, ["service_ar", "endpoint_group_ar"])
- argo_mongo_client.mongo_clean_ar(mongo_uri)
+ # do action if method is insert and not dry run
+ if args.method == "insert" and dry_run == False:
+ argo_mongo_client = ArgoMongoClient(
+ args, config, ["endpoint_ar", "service_ar", "endpoint_group_ar"])
+ argo_mongo_client.mongo_clean_ar(mongo_uri, dry_run)
# MongoDB method to be used when storing the results, either insert or upsert
cmd_command.append("--mongo.method")
@@ -137,21 +122,28 @@ def compose_command(config, args, hdfs_commands):
cmd_command.append(command)
cmd_command.append(hdfs_commands[command])
- # get optional ams proxy
- proxy = config.get("AMS", "proxy")
+ # get the api endpoint
+ api_endpoint = config.get("API","endpoint")
+ if api_endpoint:
+ cmd_command.append("--api.endpoint")
+ cmd_command.append(api_endpoint.hostname)
+
+ # get the api token
+ cmd_command.append("--api.token")
+ cmd_command.append(config.get("API","access_token"))
+
+ # get report id
+
+ cmd_command.append("--report.id")
+ cmd_command.append(config.get("TENANTS:"+args.tenant,"report_"+args.report))
+
+
+ # get optional api proxy
+ proxy = config.get("API", "proxy")
if proxy is not None:
- cmd_command.append("--ams.proxy")
+ cmd_command.append("--api.proxy")
cmd_command.append(proxy.geturl())
- # ssl verify
- cmd_command.append("--ams.verify")
- ams_verify = config.get("AMS", "verify")
- if ams_verify is not None:
- cmd_command.append(str(ams_verify).lower())
- else:
- # by default assume ams verify is always true
- cmd_command.append("true")
-
return cmd_command
@@ -173,47 +165,41 @@ def main(args=None):
log.info("Tenant: "+args.tenant+" doesn't exist.")
sys.exit(1)
- # check and upload recomputations
- upload_recomputations(args.tenant, args.report, args.date, config)
-
- # optional call to update profiles
- if args.profile_check:
- profile_mgr = ArgoProfileManager(config)
- profile_type_checklist = ["operations", "aggregations", "reports", "thresholds"]
- for profile_type in profile_type_checklist:
- profile_mgr.profile_update_check(args.tenant, args.report, profile_type)
# dictionary containing the argument's name and the command assosciated with each name
hdfs_commands = compose_hdfs_commands(year, month, day, args, config)
- cmd_command = compose_command(config, args, hdfs_commands)
-
- log.info("Getting ready to submit job")
- log.info(cmd_to_string(cmd_command)+"\n")
+ cmd_command = compose_command(config, args, hdfs_commands, args.dry_run)
# submit the script's command
- flink_job_submit(config, cmd_command)
+ flink_job_submit(config, cmd_command, None, args.dry_run)
if __name__ == "__main__":
+ today = datetime.today().strftime('%Y-%m-%d')
+
parser = argparse.ArgumentParser(description="Batch A/R Job submit script")
parser.add_argument(
"-t", "--tenant", metavar="STRING", help="Name of the tenant", required=True, dest="tenant")
parser.add_argument(
- "-r", "--report", metavar="STRING", help="Report status", required=True, dest="report")
+ "-r", "--report", metavar="STRING", help="Name of the report", required=True, dest="report")
parser.add_argument(
- "-d", "--date", metavar="DATE(YYYY-MM-DD)", help="Date to run the job for", required=True, dest="date")
+ "-d", "--date", metavar="DATE(YYYY-MM-DD)", help="Date to run the job for", required=False, dest="date", default=today)
parser.add_argument(
- "-m", "--method", metavar="KEYWORD(insert|upsert)", help="Insert or Upsert data in mongoDB", required=True, dest="method")
+ "-m", "--method", metavar="KEYWORD(insert|upsert)", help="Insert or Upsert data in mongoDB", required=False, dest="method", default="insert")
parser.add_argument(
- "-c", "--config", metavar="PATH", help="Path for the config file", dest="config")
+ "-c", "--config", metavar="PATH", help="Path for the config file", dest="config", required=True)
parser.add_argument(
"-u", "--sudo", help="Run the submition as superuser", action="store_true")
parser.add_argument("--profile-check", help="check if profiles are up to date before running job",
dest="profile_check", action="store_true")
+ parser.add_argument("--historic-profiles", help="use historic profiles",
+ dest="historic", action="store_true")
parser.add_argument("--thresholds", help="check and use threshold rule file if exists",
dest="thresholds", action="store_true")
+ parser.add_argument("--dry-run", help="Runs in test mode without actually submitting the job",
+ action="store_true", dest="dry_run")
# Pass the arguments to main method
sys.exit(main(parser.parse_args()))
diff --git a/bin/metric_ingestion_submit.py b/bin/metric_ingestion_submit.py
index 47ec738b..2d67e42d 100755
--- a/bin/metric_ingestion_submit.py
+++ b/bin/metric_ingestion_submit.py
@@ -10,6 +10,15 @@
def compose_command(config, args):
+ """Composes a command line execution string for submitting a flink job.
+
+ Args:
+ config (obj.): argo configuration object
+ args (dict): command line arguments of this script
+
+ Returns:
+ list: A list of all command line arguments for performing the flink job submission
+ """
# job submission command
cmd_command = []
@@ -48,7 +57,7 @@ def compose_command(config, args):
# ams port
ams_port = 443
if ams_endpoint.port is not None:
- ams_port = ams_endpoint.port
+ ams_port = ams_endpoint.port
cmd_command.append("--ams.port")
cmd_command.append(str(ams_port))
@@ -73,9 +82,11 @@ def compose_command(config, args):
hdfs_user = config.get("HDFS", "user")
hdfs_metric = config.get("HDFS", "path_metric")
- hdfs_metric.fill(namenode=namenode.geturl(), hdfs_user=hdfs_user, tenant=args.tenant)
+ hdfs_metric.fill(namenode=namenode.geturl(),
+ hdfs_user=hdfs_user, tenant=args.tenant)
- hdfs_metric = hdfs_metric.fill(namenode=namenode.geturl(), hdfs_user=hdfs_user, tenant=args.tenant).geturl()
+ hdfs_metric = hdfs_metric.fill(namenode=namenode.geturl(
+ ), hdfs_user=hdfs_user, tenant=args.tenant).geturl()
cmd_command.append("--hdfs.path")
cmd_command.append(hdfs_metric)
@@ -86,7 +97,8 @@ def compose_command(config, args):
# interval for checkpont in ms
cmd_command.append("--check.interval")
- cmd_command.append(str(config.get(section_tenant_job, "checkpoint_interval")))
+ cmd_command.append(
+ str(config.get(section_tenant_job, "checkpoint_interval")))
# num of messages to be retrieved from AMS per request
cmd_command.append("--ams.batch")
@@ -110,7 +122,7 @@ def compose_command(config, args):
else:
# by default assume ams verify is always true
cmd_command.append("true")
-
+
return cmd_command, job_namespace
@@ -131,20 +143,20 @@ def main(args=None):
cmd_command, job_namespace = compose_command(config, args)
- log.info("Getting ready to submit job")
- log.info(cmd_to_string(cmd_command)+"\n")
-
# submit script's command
- flink_job_submit(config, cmd_command, job_namespace)
+ flink_job_submit(config, cmd_command, job_namespace, args.dry_run)
if __name__ == "__main__":
- parser = argparse.ArgumentParser(description="AMS Metric Ingestion submission script")
+ parser = argparse.ArgumentParser(
+ description="AMS Metric Ingestion submission script")
parser.add_argument(
"-t", "--tenant", metavar="STRING", help="Name of the tenant", required=True)
parser.add_argument(
"-c", "--config", metavar="PATH", help="Path for the config file")
parser.add_argument(
"-u", "--sudo", help="Run the submition as superuser", action="store_true")
+ parser.add_argument("--dry-run", help="Runs in test mode without actually submitting the job",
+ action="store_true", dest="dry_run")
sys.exit(main(parser.parse_args()))
diff --git a/bin/requirements.txt b/bin/requirements.txt
index 64fb15a3..eb5cbfef 100644
--- a/bin/requirements.txt
+++ b/bin/requirements.txt
@@ -1,5 +1,5 @@
-requests==2.20.0
-responses==0.6.0
-pytest==3.4.0
-snakebite==2.11.0
-pymongo==3.6.1
+requests==2.22.0
+responses==0.10.7
+pytest==5.3.1
+snakebite-py3==3.0.5
+pymongo==3.10.0
diff --git a/bin/status_job_submit.py b/bin/status_job_submit.py
index 57acdda4..38c2803c 100755
--- a/bin/status_job_submit.py
+++ b/bin/status_job_submit.py
@@ -6,16 +6,30 @@
from snakebite.client import Client
import logging
-from urlparse import urlparse
+from urllib.parse import urlparse
from utils.argo_mongo import ArgoMongoClient
from utils.common import cmd_to_string, date_rollback, flink_job_submit, hdfs_check_path, get_log_conf, get_config_paths
from utils.update_profiles import ArgoProfileManager
from utils.argo_config import ArgoConfig
+from datetime import datetime
+
log = logging.getLogger(__name__)
def compose_hdfs_commands(year, month, day, args, config):
+ """Checks hdfs for available files back in time and prepares the correct hdfs arguments
+
+ Args:
+ year (int): year part of the date to check for hdfs files
+ month (int): month part of the date to check for hdfs files
+ day (int): day part of the date to check for hdfs files
+ config (obj.): argo configuration object
+
+
+ Returns:
+ list: A list of all hdfs arguments to be used in flink job submission
+ """
# set up the hdfs client to be used in order to check the files
namenode = config.get("HDFS", "namenode")
@@ -26,11 +40,13 @@ def compose_hdfs_commands(year, month, day, args, config):
hdfs_user = config.get("HDFS", "user")
tenant = args.tenant
hdfs_sync = config.get("HDFS", "path_sync")
- hdfs_sync = hdfs_sync.fill(namenode=namenode.geturl(), hdfs_user=hdfs_user, tenant=tenant).geturl()
+ hdfs_sync = hdfs_sync.fill(namenode=namenode.geturl(
+ ), hdfs_user=hdfs_user, tenant=tenant).geturl()
hdfs_metric = config.get("HDFS", "path_metric")
- hdfs_metric = hdfs_metric.fill(namenode=namenode.geturl(), hdfs_user=hdfs_user, tenant=tenant).geturl()
+ hdfs_metric = hdfs_metric.fill(
+ namenode=namenode.geturl(), hdfs_user=hdfs_user, tenant=tenant).geturl()
# dictionary holding all the commands with their respective arguments' name
hdfs_commands = dict()
@@ -40,49 +56,26 @@ def compose_hdfs_commands(year, month, day, args, config):
hdfs_metric + "/" + str(datetime.date(year, month, day) - datetime.timedelta(1)), client)
# file location of target day's metric data (local or hdfs)
- hdfs_commands["--mdata"] = hdfs_check_path(hdfs_metric+"/"+args.date, client)
-
- # file location of report configuration json file (local or hdfs)
- hdfs_commands["--conf"] = hdfs_check_path(hdfs_sync+"/"+args.tenant+"_"+args.report+"_cfg.json", client)
-
- # file location of metric profile (local or hdfs)
- hdfs_commands["--mps"] = date_rollback(
- hdfs_sync + "/" + args.report + "/" + "metric_profile_" + "{{date}}" + ".avro", year, month, day, config,
- client)
-
- # file location of operations profile (local or hdfs)
- hdfs_commands["--ops"] = hdfs_check_path(hdfs_sync+"/"+args.tenant+"_ops.json", client)
-
- # file location of aggregations profile (local or hdfs)
- hdfs_commands["--apr"] = hdfs_check_path(hdfs_sync+"/"+args.tenant+"_"+args.report+"_ap.json", client)
-
- if args.thresholds:
- # file location of thresholds rules file (local or hdfs)
- hdfs_commands["--thr"] = hdfs_check_path(
- os.path.join(hdfs_sync, "".join([args.tenant, "_", args.report, "_thresholds.json"])), client)
-
- # file location of endpoint group topology file (local or hdfs)
- hdfs_commands["-egp"] = date_rollback(
- hdfs_sync + "/" + args.report + "/" + "group_endpoints_" + "{{date}}" + ".avro", year, month, day, config,
- client)
+ hdfs_commands["--mdata"] = hdfs_check_path(
+ hdfs_metric+"/"+args.date, client)
- # file location of group of groups topology file (local or hdfs)
- hdfs_commands["-ggp"] = date_rollback(hdfs_sync + "/" + args.report + "/" + "group_groups_" + "{{date}}" + ".avro",
- year, month, day, config, client)
+ return hdfs_commands
- # file location of recomputations file (local or hdfs)
- # first check if there is a recomputations file for the given date
- if client.test(urlparse(hdfs_sync+"/recomp_"+args.tenant+"_"+args.report+"_"+args.date+".json").path, exists=True):
- hdfs_commands["--rec"] = hdfs_sync+"/recomp_"+args.tenant+"_"+args.report+"_"+args.date+".json"
- log.info("Using recomputations file for the given date")
- else:
- hdfs_commands["--rec"] = hdfs_check_path(hdfs_sync+"/recomp.json", client)
- log.info("Recomputations file for the given date was not found. Using default.")
- return hdfs_commands
+def compose_command(config, args, hdfs_commands, dry_run=False):
+ """Composes a command line execution string for submitting a flink job. Also calls mongodb
+ clean up procedure before composing the command
+ Args:
+ config (obj.): argo configuration object
+ args (dict): command line arguments of this script
+ hdfs_commands (list): a list of hdfs related arguments to be passed in flink job
+ dry_run (bool, optional): signifies a dry-run execution context, if yes no mongodb clean-up is perfomed.
+ Defaults to False.
-def compose_command(config, args, hdfs_commands):
+ Returns:
+ list: A list of all command line arguments for performing the flink job submission
+ """
# job sumbission command
cmd_command = []
@@ -110,15 +103,16 @@ def compose_command(config, args, hdfs_commands):
# MongoDB uri for outputting the results to (e.g. mongodb://localhost:21017/example_db)
cmd_command.append("--mongo.uri")
group_tenant = "TENANTS:" + args.tenant
- mongo_endpoint = config.get("MONGO","endpoint").geturl()
- mongo_uri = config.get(group_tenant, "mongo_uri").fill(mongo_endpoint=mongo_endpoint,tenant=args.tenant)
+ mongo_endpoint = config.get("MONGO", "endpoint").geturl()
+ mongo_uri = config.get(group_tenant, "mongo_uri").fill(
+ mongo_endpoint=mongo_endpoint, tenant=args.tenant)
cmd_command.append(mongo_uri.geturl())
- if args.method == "insert":
+ if args.method == "insert" and dry_run == False:
argo_mongo_client = ArgoMongoClient(args, config, ["status_metrics", "status_endpoints", "status_services",
"status_endpoint_groups"])
- argo_mongo_client.mongo_clean_status(mongo_uri)
+ argo_mongo_client.mongo_clean_status(mongo_uri, dry_run)
# MongoDB method to be used when storing the results, either insert or upsert
cmd_command.append("--mongo.method")
@@ -129,21 +123,27 @@ def compose_command(config, args, hdfs_commands):
cmd_command.append(command)
cmd_command.append(hdfs_commands[command])
- # get optional ams proxy
- proxy = config.get("AMS", "proxy")
+ # get the api endpoint
+ api_endpoint = config.get("API","endpoint")
+ if api_endpoint:
+ cmd_command.append("--api.endpoint")
+ cmd_command.append(api_endpoint.hostname)
+
+ # get the api token
+ cmd_command.append("--api.token")
+ cmd_command.append(config.get("API","access_token"))
+
+ # get report id
+
+ cmd_command.append("--report.id")
+ cmd_command.append(config.get("TENANTS:"+args.tenant,"report_"+args.report))
+
+ # get optional api proxy
+ proxy = config.get("API", "proxy")
if proxy is not None:
- cmd_command.append("--ams.proxy")
+ cmd_command.append("--api.proxy")
cmd_command.append(proxy.geturl())
- # ssl verify
- cmd_command.append("--ams.verify")
- ams_verify = config.get("AMS", "verify")
- if ams_verify is not None:
- cmd_command.append(str(ams_verify).lower())
- else:
- # by default assume ams verify is always true
- cmd_command.append("true")
-
return cmd_command
@@ -165,44 +165,41 @@ def main(args=None):
year, month, day = [int(x) for x in args.date.split("-")]
- # optional call to update profiles
- if args.profile_check:
- profile_mgr = ArgoProfileManager(config)
- profile_type_checklist = ["operations", "aggregations", "reports", "thresholds"]
- for profile_type in profile_type_checklist:
- profile_mgr.profile_update_check(args.tenant, args.report, profile_type)
-
# dictionary containing the argument's name and the command associated with each name
hdfs_commands = compose_hdfs_commands(year, month, day, args, config)
- cmd_command = compose_command(config, args, hdfs_commands)
-
- log.info("Getting ready to submit job")
- log.info(cmd_to_string(cmd_command)+"\n")
+ cmd_command = compose_command(config, args, hdfs_commands, args.dry_run)
# submit the script's command
- flink_job_submit(config, cmd_command)
+ flink_job_submit(config, cmd_command, None, args.dry_run)
if __name__ == "__main__":
- parser = argparse.ArgumentParser(description="Batch Status Job submit script")
+ today = datetime.today().strftime('%Y-%m-%d')
+
+ parser = argparse.ArgumentParser(
+ description="Batch Status Job submit script")
parser.add_argument(
"-t", "--tenant", metavar="STRING", help="Name of the tenant", required=True, dest="tenant")
parser.add_argument(
"-r", "--report", metavar="STRING", help="Report status", required=True, dest="report")
parser.add_argument(
- "-d", "--date", metavar="DATE(YYYY-MM-DD)", help="Date to run the job for", required=True, dest="date")
+ "-d", "--date", metavar="DATE(YYYY-MM-DD)", help="Date to run the job for", required=True, dest="date", default=today)
parser.add_argument(
- "-m", "--method", metavar="KEYWORD(insert|upsert)", help="Insert or Upsert data in mongoDB", required=True, dest="method")
+ "-m", "--method", metavar="KEYWORD(insert|upsert)", help="Insert or Upsert data in mongoDB", required=True, dest="method", default="insert")
parser.add_argument(
"-c", "--config", metavar="PATH", help="Path for the config file", dest="config")
parser.add_argument(
"-u", "--sudo", help="Run the submit job as superuser", action="store_true")
+ parser.add_argument("--historic-profiles", help="use historic profiles",
+ dest="historic", action="store_true")
parser.add_argument("--profile-check", help="check if profiles are up to date before running job",
dest="profile_check", action="store_true")
parser.add_argument("--thresholds", help="check and use threshold rule file if exists",
dest="thresholds", action="store_true")
+ parser.add_argument("--dry-run", help="Runs in test mode without actually submitting the job",
+ action="store_true", dest="dry_run")
# Pass the arguments to main method
sys.exit(main(parser.parse_args()))
diff --git a/bin/stream_status_job_submit.py b/bin/stream_status_job_submit.py
index 144dae44..c7042332 100755
--- a/bin/stream_status_job_submit.py
+++ b/bin/stream_status_job_submit.py
@@ -11,6 +11,19 @@
def compose_hdfs_commands(year, month, day, args, config):
+ """Checks hdfs for available files back in time and prepares the correct hdfs arguments
+
+ Args:
+ year (int): year part of the date to check for hdfs files
+ month (int): month part of the date to check for hdfs files
+ day (int): day part of the date to check for hdfs files
+ config (obj.): argo configuration object
+
+
+ Returns:
+ list: A list of all hdfs arguments to be used in flink job submission
+ """
+
# set up the hdfs client to be used in order to check the files
namenode = config.get("HDFS", "namenode")
client = Client(namenode.hostname, namenode.port, use_trash=False)
@@ -20,31 +33,77 @@ def compose_hdfs_commands(year, month, day, args, config):
hdfs_user = config.get("HDFS", "user")
tenant = args.tenant
hdfs_sync = config.get("HDFS", "path_sync")
- hdfs_sync = hdfs_sync.fill(namenode=namenode.geturl(), hdfs_user=hdfs_user, tenant=tenant).geturl()
+ hdfs_sync = hdfs_sync.fill(namenode=namenode.geturl(
+ ), hdfs_user=hdfs_user, tenant=tenant).geturl()
# dictionary holding all the commands with their respective arguments' name
hdfs_commands = dict()
- # file location of metric profile (local or hdfs)
- hdfs_commands["--sync.mps"] = date_rollback(
- hdfs_sync + "/" + args.report + "/" + "metric_profile_" + "{{date}}" + ".avro", year, month, day, config,
- client)
+ # if profile historic mode is used reference profiles by date
+ if args.historic:
+ # file location of historic operations profile (local or hdfs)
+ hdfs_commands["--ops"] = hdfs_check_path(
+ hdfs_sync+"/"+args.tenant+"_ops_" + args.date + ".json", client)
+
+ # file location of historic aggregations profile (local or hdfs)
+ hdfs_commands["--apr"] = hdfs_check_path(
+ hdfs_sync+"/"+args.tenant+"_"+args.report+"_ap_" + args.date + ".json", client)
+
+ # TODO: Don't Use YET metric profiles from api in json form until status computation jobs are updated
+ # accordingly - After that uncomment the following
+ # #file location of historic metric profile (local or hdfs) which is in json format
+ # hdfs_commands["--mps"] = hdfs_check_path(
+ # hdfs_sync+"/"+args.tenant+"_"+args.report+"_metric_" + args.date + ".json", client)
+
+ # TODO: when compute jobs are updated to use metric profiles in json format comment the following:
+ # file location of metric profile (local or hdfs)
+ hdfs_commands["--mps"] = date_rollback(
+ hdfs_sync + "/" + args.report + "/" + "metric_profile_" +
+ "{{date}}" + ".avro", year, month, day, config,
+ client)
+ else:
+
+ # file location of operations profile (local or hdfs)
+ hdfs_commands["--ops"] = hdfs_check_path(
+ hdfs_sync+"/"+args.tenant+"_ops.json", client)
- # file location of operations profile (local or hdfs)
- hdfs_commands["--sync.ops"] = hdfs_check_path(hdfs_sync+"/"+args.tenant+"_ops.json", client)
+ # file location of aggregations profile (local or hdfs)
+ hdfs_commands["--apr"] = hdfs_check_path(
+ hdfs_sync+"/"+args.tenant+"_"+args.report+"_ap.json", client)
- # file location of aggregations profile (local or hdfs)
- hdfs_commands["--sync.apr"] = hdfs_check_path(hdfs_sync+"/"+args.tenant+"_"+args.report+"_ap.json", client)
+ # file location of metric profile (local or hdfs)
+ hdfs_commands["--mps"] = date_rollback(
+ hdfs_sync + "/" + args.report + "/" + "metric_profile_" +
+ "{{date}}" + ".avro", year, month, day, config,
+ client)
+
+ # get downtime
+ # file location of metric profile (local or hdfs)
+ hdfs_commands["--sync.downtime"] = date_rollback(
+ hdfs_sync + "/" + args.report + "/" + "downtimes_" +
+ "{{date}}" + ".avro", year, month, day, config,
+ client)
# file location of endpoint group topology file (local or hdfs)
hdfs_commands["-sync.egp"] = date_rollback(
- hdfs_sync + "/" + args.report + "/" + "group_endpoints_" + "{{date}}" + ".avro", year, month, day, config,
+ hdfs_sync + "/" + args.report + "/" + "group_endpoints_" +
+ "{{date}}" + ".avro", year, month, day, config,
client)
return hdfs_commands
def compose_command(config, args, hdfs_commands):
+ """Composes a command line execution string for submitting a flink job.
+
+ Args:
+ config (obj.): argo configuration object
+ args (dict): command line arguments of this script
+ hdfs_commands (list): a list of hdfs related arguments to be passed in flink job
+
+ Returns:
+ list: A list of all command line arguments for performing the flink job submission
+ """
# job submission command
cmd_command = []
@@ -55,12 +114,16 @@ def compose_command(config, args, hdfs_commands):
# get needed config params
section_tenant = "TENANTS:" + args.tenant
section_tenant_job = "TENANTS:" + args.tenant + ":stream-status"
- job_namespace = config.get("JOB-NAMESPACE", "stream-status-namespace")
+
ams_endpoint = config.get("AMS", "endpoint")
ams_project = config.get(section_tenant, "ams_project")
- ams_sub_metric = config.get(section_tenant_job, "ams_sub_metric")
- ams_sub_sync = config.get(section_tenant_job, "ams_sub_sync")
+ if args.report.lower() == "critical":
+ ams_sub_metric = config.get(section_tenant_job, "ams_sub_metric")
+ ams_sub_sync = config.get(section_tenant_job, "ams_sub_sync")
+ else:
+ ams_sub_metric = "stream_metric_" + report.lower()
+ ams_sub_sync = "stream_sync_" + report.lower()
# flink executable
cmd_command.append(config.get("FLINK", "path"))
@@ -80,8 +143,10 @@ def compose_command(config, args, hdfs_commands):
cmd_command.append(ams_endpoint.hostname)
# ams port
- cmd_command.append("--ams.port")
- cmd_command.append(ams_endpoint.port)
+ ams_port = cmd_command.append("--ams.port")
+ if not ams_port:
+ ams_port = 443
+ cmd_command.append(str(ams_port))
# tenant's token for ams
cmd_command.append("--ams.token")
@@ -100,8 +165,9 @@ def compose_command(config, args, hdfs_commands):
cmd_command.append(ams_sub_sync)
# fill job namespace template with the required arguments
- job_namespace.fill(ams_endpoint=ams_endpoint.hostname, ams_port=ams_endpoint.port, ams_project=ams_project,
- ams_sub_metric=ams_sub_metric, ams_sub_sync=ams_sub_sync)
+ job_namespace = config.get("JOB-NAMESPACE", "stream-status-namespace")
+ job_namespace = job_namespace.fill(ams_endpoint=ams_endpoint.hostname, ams_port=ams_port, ams_project=ams_project,
+ ams_sub_metric=ams_sub_metric, ams_sub_sync=ams_sub_sync)
# add the hdfs commands
for command in hdfs_commands:
@@ -112,9 +178,16 @@ def compose_command(config, args, hdfs_commands):
cmd_command.append("--run.date")
cmd_command.append(args.date)
+ # report
+ cmd_command.append("--report")
+ cmd_command.append(args.report)
+
# flink parallelism
cmd_command.append("--p")
- cmd_command.append(config.get(section_tenant_job, "flink_parallelism"))
+ flink_parallelism = config.get(section_tenant_job, "flink_parallelism")
+ if not flink_parallelism:
+ flink_parallelism = "1"
+ cmd_command.append(flink_parallelism)
# grab tenant configuration section for stream-status
@@ -128,63 +201,80 @@ def compose_command(config, args, hdfs_commands):
# hbase endpoint
if config.has(section_tenant_job, "hbase_master"):
cmd_command.append("--hbase.master")
- cmd_command.append(config.get(section_tenant_job, "hbase_master").hostname)
+ cmd_command.append(config.get(
+ section_tenant_job, "hbase_master").hostname)
# hbase endpoint port
if config.has(section_tenant_job, "hbase_master"):
cmd_command.append("--hbase.port")
- cmd_command.append(config.get(section_tenant_job, "hbase_master").port)
+ cmd_command.append(config.get(
+ section_tenant_job, "hbase_master").port)
# comma separate list of zookeeper servers
if config.has(section_tenant_job, "hbase_zk_quorum"):
cmd_command.append("--hbase.zk.quorum")
- cmd_command.append(config.get(section_tenant_job, "hbase_zk_quorum"))
+ cmd_command.append(config.get(
+ section_tenant_job, "hbase_zk_quorum"))
# port used by zookeeper servers
if config.has(section_tenant_job, "hbase_zk_port"):
cmd_command.append("--hbase.zk.port")
- cmd_command.append(config.get(section_tenant_job, "hbase_zk_port"))
+ cmd_command.append(config.get(
+ section_tenant_job, "hbase_zk_port"))
# table namespace, usually tenant
if config.has(section_tenant_job, "hbase_namespace"):
cmd_command.append("--hbase.namespace")
- cmd_command.append(config.get(section_tenant_job, "hbase_namespace"))
+ cmd_command.append(config.get(
+ section_tenant_job, "hbase_namespace"))
# table name, usually metric data
if config.has(section_tenant_job, "hbase_table"):
cmd_command.append("--hbase.table")
- cmd_command.append(config.get(section_tenant_job, "hbase_table"))
+ cmd_command.append(config.get(
+ section_tenant_job, "hbase_table"))
elif output == "kafka":
# kafka list of servers
if config.has(section_tenant_job, "kafka_servers"):
cmd_command.append("--kafka.servers")
- kafka_servers = ','.join(config.get(section_tenant_job, "kafka_servers"))
+ kafka_servers = ','.join(config.get(
+ section_tenant_job, "kafka_servers"))
cmd_command.append(kafka_servers)
# kafka topic to send status events to
if config.has(section_tenant_job, "kafka_topic"):
cmd_command.append("--kafka.topic")
- cmd_command.append(config.get(section_tenant_job, "kafka_topic"))
+ cmd_command.append(config.get(
+ section_tenant_job, "kafka_topic"))
elif output == "fs":
# filesystem path for output(use "hdfs://" for hdfs path)
if config.has(section_tenant_job, "fs_output"):
cmd_command.append("--fs.output")
- cmd_command.append(config.get(section_tenant_job, "fs_output"))
+ cmd_command.append(config.get(
+ section_tenant_job, "fs_output"))
elif output == "mongo":
cmd_command.append("--mongo.uri")
- mongo_endpoint = config.get("MONGO","endpoint").geturl()
- mongo_uri = config.get(section_tenant, "mongo_uri").fill(mongo_endpoint=mongo_endpoint,tenant=args.tenant)
+ mongo_endpoint = config.get("MONGO", "endpoint").geturl()
+ mongo_uri = config.get(section_tenant, "mongo_uri").fill(
+ mongo_endpoint=mongo_endpoint, tenant=args.tenant)
cmd_command.append(mongo_uri.geturl())
# mongo method
+ mongo_method = config.get("MONGO", "mongo_method")
+ if not mongo_method:
+ mongo_method = "insert"
cmd_command.append("--mongo.method")
- cmd_command.append(config.get(section_tenant_job, "mongo_method"))
+ cmd_command.append(mongo_method)
+ # report id
+ report_id = config.get(section_tenant, "report_" + args.report)
+ cmd_command.append("--report-id")
+ cmd_command.append(report_id)
# num of messages to be retrieved from AMS per request
cmd_command.append("--ams.batch")
- cmd_command.append(config.get(section_tenant_job, "ams_batch"))
+ cmd_command.append(str(config.get(section_tenant_job, "ams_batch")))
# interval in ms betweeb AMS service requests
cmd_command.append("--ams.interval")
- cmd_command.append(config.get(section_tenant_job, "ams_interval"))
+ cmd_command.append(str(config.get(section_tenant_job, "ams_interval")))
# get optional ams proxy
proxy = config.get("AMS", "proxy")
@@ -226,33 +316,50 @@ def main(args=None):
year, month, day = [int(x) for x in args.date.split("T")[0].split("-")]
+ # optional call to update profiles
+ if args.profile_check:
+ dateParam = None
+ if args.historic:
+ dateParam = args.date
+ profile_mgr = ArgoProfileManager(config)
+ profile_type_checklist = [
+ "operations", "aggregations", "reports", "thresholds", "metrics"]
+ for profile_type in profile_type_checklist:
+ profile_mgr.profile_update_check(
+ args.tenant, args.report, profile_type, dateParam)
+
# dictionary containing the argument's name and the command assosciated with each name
hdfs_commands = compose_hdfs_commands(year, month, day, args, config)
cmd_command, job_namespace = compose_command(config, args, hdfs_commands)
- log.info("Getting ready to submit job")
- log.info(cmd_to_string(cmd_command)+"\n")
-
# submit the script's command
- flink_job_submit(config, cmd_command, job_namespace)
+ flink_job_submit(config, cmd_command, job_namespace, args.dry_run)
if __name__ == "__main__":
- parser = argparse.ArgumentParser(description="Stream Status Job submit script")
+ parser = argparse.ArgumentParser(
+ description="Stream Status Job submit script")
parser.add_argument(
"-t", "--tenant", metavar="STRING", help="Name of the tenant", required=True, dest="tenant")
parser.add_argument(
"-d", "--date", metavar="DATE(ISO-8601)",
- default=str(datetime.datetime.utcnow().replace(microsecond=0).isoformat()) + "Z",
+ default=str(datetime.datetime.utcnow().replace(
+ microsecond=0).isoformat()) + "Z",
help="Date in ISO-8601 format", dest="date")
parser.add_argument(
"-r", "--report", metavar="STRING", help="Report status", required=True, dest="report")
parser.add_argument(
"-c", "--config", metavar="PATH", help="Path for the config file", dest="config")
parser.add_argument(
- "-u", "--sudo", help="Run the submition as superuser", action="store_true", dest="sudo")
+ "-u", "--sudo", help="Run the submission as superuser", action="store_true", dest="sudo")
+ parser.add_argument("--dry-run", help="Runs in test mode without actually submitting the job",
+ action="store_true", dest="dry_run")
+ parser.add_argument("--historic-profiles", help="use historic profiles",
+ dest="historic", action="store_true")
+ parser.add_argument("--profile-check", help="check if profiles are up to date before running job",
+ dest="profile_check", action="store_true")
parser.add_argument(
"-timeout", "--timeout", metavar="INT",
help="Controls default timeout for event regeneration (used in notifications)", dest="timeout")
diff --git a/bin/sync_ingestion_submit.py b/bin/sync_ingestion_submit.py
index 48b52b78..6146c9c6 100755
--- a/bin/sync_ingestion_submit.py
+++ b/bin/sync_ingestion_submit.py
@@ -10,6 +10,15 @@
def compose_command(config, args):
+ """Composes a command line execution string for submitting a flink job.
+
+ Args:
+ config (obj.): argo configuration object
+ args (dict): command line arguments of this script
+
+ Returns:
+ list: A list of all command line arguments for performing the flink job submission
+ """
# job submission command
cmd_command = []
@@ -49,7 +58,7 @@ def compose_command(config, args):
# ams port
ams_port = 443
if ams_endpoint.port is not None:
- ams_port = ams_endpoint.port
+ ams_port = ams_endpoint.port
cmd_command.append("--ams.port")
cmd_command.append(str(ams_port))
@@ -74,9 +83,11 @@ def compose_command(config, args):
hdfs_user = config.get("HDFS", "user")
hdfs_sync = config.get("HDFS", "path_sync")
- hdfs_sync.fill(namenode=namenode.geturl(), hdfs_user=hdfs_user, tenant=args.tenant)
+ hdfs_sync.fill(namenode=namenode.geturl(),
+ hdfs_user=hdfs_user, tenant=args.tenant)
- hdfs_sync = hdfs_sync.fill(namenode=namenode.geturl(), hdfs_user=hdfs_user, tenant=args.tenant).geturl()
+ hdfs_sync = hdfs_sync.fill(namenode=namenode.geturl(
+ ), hdfs_user=hdfs_user, tenant=args.tenant).geturl()
# append hdfs sync base path to the submit command
cmd_command.append("--hdfs.path")
@@ -125,21 +136,21 @@ def main(args=None):
cmd_command, job_namespace = compose_command(config, args)
- log.info("Getting ready to submit job")
- log.info(cmd_to_string(cmd_command)+"\n")
-
# submit the job
-
- flink_job_submit(config, cmd_command, job_namespace)
+
+ flink_job_submit(config, cmd_command, job_namespace, args.dry_run)
if __name__ == "__main__":
- parser = argparse.ArgumentParser(description="AMS Sync Ingestion submission script")
+ parser = argparse.ArgumentParser(
+ description="AMS Sync Ingestion submission script")
parser.add_argument(
"-t", "--tenant", metavar="STRING", help="Name of the tenant", required=True)
parser.add_argument(
"-c", "--config", metavar="PATH", help="Path for the config file")
parser.add_argument(
"-u", "--sudo", help="Run the submission as superuser", action="store_true")
+ parser.add_argument("--dry-run", help="Runs in test mode without actually submitting the job",
+ action="store_true", dest="dry_run")
sys.exit(main(parser.parse_args()))
diff --git a/bin/test_ar_job_submit.py b/bin/test_ar_job_submit.py
index 7a1d6dcf..0cf4d1a6 100644
--- a/bin/test_ar_job_submit.py
+++ b/bin/test_ar_job_submit.py
@@ -5,32 +5,22 @@
from utils.common import cmd_to_string
from utils.argo_config import ArgoConfig
-CONF_TEMPLATE = os.path.join(os.path.dirname(__file__), '../conf/conf.template')
-CONF_SCHEMA = os.path.join(os.path.dirname(__file__), '../conf/config.schema.json')
+CONF_TEMPLATE = os.path.join(
+ os.path.dirname(__file__), '../conf/conf.template')
+CONF_SCHEMA = os.path.join(os.path.dirname(
+ __file__), '../conf/config.schema.json')
+
# This is the command that the submission script is expected to compose based on given args and config
-expected_result = """flink_path run -c test_class test.jar --run.date 2018-02-11 \
---mongo.uri mongodb://localhost:21017/argo_TENANTA --mongo.method upsert \
+expected_result = """flink_path run -c test_class test.jar --run.date 2018-02-11 --mongo.uri mongodb://localhost:21017/argo_TENANTA \
+--mongo.method upsert --pdata hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/mdata/2018-02-10 \
--mdata hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/mdata/2018-02-11 \
---rec hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/sync/recomp.json \
---downtimes hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/\
-TENANTA/sync/Critical/downtimes_2018-02-11.avro \
---mps hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/\
-TENANTA/sync/Critical/metric_profile_2018-02-11.avro \
---apr hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/\
-TENANTA/sync/TENANTA_Critical_ap.json \
---ggp hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/\
-TENANTA/sync/Critical/group_groups_2018-02-11.avro \
---conf hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/\
-TENANTA/sync/TENANTA_Critical_cfg.json \
---egp hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/\
-TENANTA/sync/Critical/group_endpoints_2018-02-11.avro \
---pdata hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/\
-TENANTA/mdata/2018-02-10 --weights hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/\
-TENANTA/sync/Critical/weights_2018-02-11.avro \
---ops hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/sync/TENANTA_ops.json \
---ams.proxy test_proxy --ams.verify true"""
+--api.endpoint api.foo --api.token key0 --report.id report_uuid"""
+expected_result2 = """flink_path run -c test_class test.jar --run.date 2021-01-01 --mongo.uri mongodb://localhost:21017/argo_TENANTA \
+--mongo.method insert --pdata hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/mdata/2020-12-31 \
+--mdata hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/mdata/2021-01-01 \
+--api.endpoint api.foo --api.token key0 --report.id report_uuid"""
class TestClass(unittest.TestCase):
@@ -46,23 +36,39 @@ def test_compose_command(self):
parser.add_argument('--sudo', action='store_true')
parser.add_argument('--method')
args = parser.parse_args(
- ['--tenant', 'TENANTA', '--date', '2018-02-11', '--report', 'Critical', '--method', 'upsert'])
+ ['--tenant', 'TENANTA', '--date', '2018-02-11', '--report', 'report_name', '--method', 'upsert'])
hdfs_metric = "hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/mdata"
- hdfs_sync = "hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/sync"
-
+
test_hdfs_commands = dict()
test_hdfs_commands["--pdata"] = hdfs_metric+"/2018-02-10"
test_hdfs_commands["--mdata"] = hdfs_metric+"/2018-02-11"
- test_hdfs_commands["--conf"] = hdfs_sync+"/TENANTA_Critical_cfg.json"
- test_hdfs_commands["--mps"] = hdfs_sync+"/Critical/"+"metric_profile_2018-02-11.avro"
- test_hdfs_commands["--ops"] = hdfs_sync+"/TENANTA_ops.json"
- test_hdfs_commands["--apr"] = hdfs_sync+"/TENANTA_Critical_ap.json"
- test_hdfs_commands["--egp"] = hdfs_sync+"/Critical/group_endpoints_2018-02-11.avro"
- test_hdfs_commands["--ggp"] = hdfs_sync+"/Critical/group_groups_2018-02-11.avro"
- test_hdfs_commands["--weights"] = hdfs_sync+"/Critical/weights_2018-02-11.avro"
- test_hdfs_commands["--downtimes"] = hdfs_sync+"/Critical/downtimes_2018-02-11.avro"
- test_hdfs_commands["--rec"] = hdfs_sync+"/recomp.json"
-
- self.assertEquals(expected_result, cmd_to_string(compose_command(config, args, test_hdfs_commands)))
+
+ self.assertEqual(expected_result, cmd_to_string(
+ compose_command(config, args, test_hdfs_commands)))
+
+ def test_compose_second_command(self):
+
+ # set up the config parser
+ config = ArgoConfig(CONF_TEMPLATE, CONF_SCHEMA)
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--tenant')
+ parser.add_argument('--date', required=False, default="2021-01-01")
+ parser.add_argument('--report')
+ parser.add_argument('--sudo', action='store_true')
+ parser.add_argument('--method', required=False, default="insert")
+
+ args = parser.parse_args(
+ ['--tenant', 'TENANTA', '--report', 'report_name'])
+
+ hdfs_metric = "hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/mdata"
+
+ test_hdfs_commands = dict()
+
+ test_hdfs_commands["--pdata"] = hdfs_metric+"/2020-12-31"
+ test_hdfs_commands["--mdata"] = hdfs_metric+"/2021-01-01"
+
+ self.assertEqual(expected_result2, cmd_to_string(
+ compose_command(config, args, test_hdfs_commands, True)))
diff --git a/bin/test_metric_ingestion_submit.py b/bin/test_metric_ingestion_submit.py
index e6928bb1..4726bb95 100644
--- a/bin/test_metric_ingestion_submit.py
+++ b/bin/test_metric_ingestion_submit.py
@@ -5,8 +5,10 @@
import argparse
import os
-CONF_TEMPLATE = os.path.join(os.path.dirname(__file__), '../conf/conf.template')
-CONF_SCHEMA = os.path.join(os.path.dirname(__file__), '../conf/config.schema.json')
+CONF_TEMPLATE = os.path.join(
+ os.path.dirname(__file__), '../conf/conf.template')
+CONF_SCHEMA = os.path.join(os.path.dirname(
+ __file__), '../conf/config.schema.json')
# This is the command that the submission script is expected to compose based on given args and config
expected_result = """sudo flink_path run -c test_class test.jar --ams.endpoint test_endpoint --ams.port 8080 \
@@ -27,6 +29,5 @@ def test_compose_command(self):
parser.add_argument('--sudo', action='store_true')
args = parser.parse_args(['--tenant', 'TENANTA', '--sudo'])
- print cmd_to_string(compose_command(config, args)[0])
-
- self.assertEquals(expected_result, cmd_to_string(compose_command(config, args)[0]))
+ self.assertEqual(expected_result, cmd_to_string(
+ compose_command(config, args)[0]))
diff --git a/bin/test_status_job_submit.py b/bin/test_status_job_submit.py
index a0029abd..e90a2935 100644
--- a/bin/test_status_job_submit.py
+++ b/bin/test_status_job_submit.py
@@ -5,25 +5,21 @@
from status_job_submit import compose_command
from utils.common import cmd_to_string
-CONF_TEMPLATE = os.path.join(os.path.dirname(__file__), '../conf/conf.template')
-CONF_SCHEMA = os.path.join(os.path.dirname(__file__), '../conf/config.schema.json')
+CONF_TEMPLATE = os.path.join(
+ os.path.dirname(__file__), '../conf/conf.template')
+CONF_SCHEMA = os.path.join(os.path.dirname(
+ __file__), '../conf/config.schema.json')
# This is the command that the submission script is expected to compose based on given args and config
-expected_result = """sudo flink_path run -c test_class test.jar --run.date 2018-02-11 \
---mongo.uri mongodb://localhost:21017/argo_TENANTA --mongo.method upsert \
+expected_result = """sudo flink_path run -c test_class test.jar --run.date 2018-02-11 --mongo.uri mongodb://localhost:21017/argo_TENANTA \
+--mongo.method upsert --pdata hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/mdata/2018-02-10 \
--mdata hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/mdata/2018-02-11 \
---mps hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/\
-TENANTA/sync/Critical/metric_profile_2018-02-11.avro \
---apr hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/\
-TENANTA/sync/TENANTA_Critical_ap.json \
---ggp hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/\
-TENANTA/sync/Critical/group_groups_2018-02-11.avro \
---conf hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/sync/TENANTA_Critical_cfg.json \
---egp hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/\
-TENANTA/sync/Critical/group_endpoints_2018-02-11.avro \
---pdata hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/mdata/2018-02-10 \
---ops hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/sync/TENANTA_ops.json \
---ams.proxy test_proxy --ams.verify true"""
+--api.endpoint api.foo --api.token key0 --report.id report_uuid"""
+
+expected_result2 = """sudo flink_path run -c test_class test.jar --run.date 2021-01-01 --mongo.uri mongodb://localhost:21017/argo_TENANTA \
+--mongo.method insert --pdata hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/mdata/2020-12-31 \
+--mdata hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/mdata/2021-01-01 \
+--api.endpoint api.foo --api.token key0 --report.id report_uuid"""
class TestClass(unittest.TestCase):
@@ -39,20 +35,38 @@ def test_compose_command(self):
parser.add_argument('--sudo', action='store_true')
parser.add_argument('--method')
args = parser.parse_args(
- ['--tenant', 'TENANTA', '--date', '2018-02-11', '--report', 'Critical', '--method', 'upsert', '--sudo'])
+ ['--tenant', 'TENANTA', '--date', '2018-02-11', '--report', 'report_name', '--method', 'upsert', '--sudo'])
hdfs_metric = "hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/mdata"
- hdfs_sync = "hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/sync"
-
+
test_hdfs_commands = dict()
test_hdfs_commands["--pdata"] = hdfs_metric+"/2018-02-10"
test_hdfs_commands["--mdata"] = hdfs_metric+"/2018-02-11"
- test_hdfs_commands["--conf"] = hdfs_sync+"/TENANTA_Critical_cfg.json"
- test_hdfs_commands["--mps"] = hdfs_sync+"/Critical/"+"metric_profile_2018-02-11.avro"
- test_hdfs_commands["--ops"] = hdfs_sync+"/TENANTA_ops.json"
- test_hdfs_commands["--apr"] = hdfs_sync+"/TENANTA_Critical_ap.json"
- test_hdfs_commands["--egp"] = hdfs_sync+"/Critical/group_endpoints_2018-02-11.avro"
- test_hdfs_commands["--ggp"] = hdfs_sync+"/Critical/group_groups_2018-02-11.avro"
-
- self.assertEquals(expected_result, cmd_to_string(compose_command(config, args, test_hdfs_commands)))
+
+ self.assertEqual(expected_result, cmd_to_string(
+ compose_command(config, args, test_hdfs_commands)))
+
+ def test_compose_command2(self):
+
+ config = ArgoConfig(CONF_TEMPLATE, CONF_SCHEMA)
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--tenant')
+ parser.add_argument('--date', default="2021-01-01")
+ parser.add_argument('--report')
+ parser.add_argument('--sudo', action='store_true')
+ parser.add_argument('--method', default="insert")
+ args = parser.parse_args(
+ ['--tenant', 'TENANTA', '--report', 'report_name', '--sudo'])
+
+ hdfs_metric = "hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/mdata"
+
+
+ test_hdfs_commands = dict()
+
+ test_hdfs_commands["--pdata"] = hdfs_metric+"/2020-12-31"
+ test_hdfs_commands["--mdata"] = hdfs_metric+"/2021-01-01"
+
+ self.assertEqual(expected_result2, cmd_to_string(
+ compose_command(config, args, test_hdfs_commands, True)))
diff --git a/bin/test_stream_status_job_submit.py b/bin/test_stream_status_job_submit.py
index b2f1fd12..4fefa573 100644
--- a/bin/test_stream_status_job_submit.py
+++ b/bin/test_stream_status_job_submit.py
@@ -5,22 +5,23 @@
from stream_status_job_submit import compose_command
from utils.common import cmd_to_string
-CONF_TEMPLATE = os.path.join(os.path.dirname(__file__), '../conf/conf.template')
-CONF_SCHEMA = os.path.join(os.path.dirname(__file__), '../conf/config.schema.json')
+CONF_TEMPLATE = os.path.join(
+ os.path.dirname(__file__), '../conf/conf.template')
+CONF_SCHEMA = os.path.join(os.path.dirname(
+ __file__), '../conf/config.schema.json')
-expected_result = """sudo flink_path run -c test_class test.jar --ams.endpoint test_endpoint --ams.port 8080 \
+
+expected_result = """sudo flink_path run -c test_class test.jar --ams.endpoint test_endpoint --ams.port 443 \
--ams.token test_token --ams.project test_project --ams.sub.metric metric_status --ams.sub.sync sync_status \
+--sync.mps hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/sync/Critical/metric_profile_2018-03-01.avro \
+--sync.ops hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/sync/TENANTA_ops.json \
--sync.apr hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/sync/TENANTA_Critical_ap.json \
---sync.egp hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/\
-TENANTA/sync/Critical/group_endpoints_2018-03-01.avro \
---sync.mps hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/\
-TENANTA/sync/Critical/metric_profile_2018-03-01.avro \
---sync.ops hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/\
-sync/TENANTA_ops.json --run.date 2018-03-05T00:00:00Z --p 1 \
---hbase.master hbase.devel --hbase.port 8080 --hbase.zk.quorum ['test_zk_servers'] \
+--sync.egp hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/sync/Critical/group_endpoints_2018-03-01.avro \
+--run.date 2018-03-05T00:00:00Z --report Critical --p 1 --hbase.master hbase.devel --hbase.port 8080 --hbase.zk.quorum ['test_zk_servers'] \
--hbase.zk.port 8080 --hbase.namespace test_hbase_namespace --hbase.table metric_data \
---kafka.servers kafka_server:9090,kafka_server2:9092 --kafka.topic test_kafka_topic --fs.output None --mongo.uri mongodb://localhost:21017/argo_TENANTA --mongo.method upsert --ams.batch 10 --ams.interval 300 --ams.proxy test_proxy --ams.verify true --timeout 500"""
-
+--kafka.servers kafka_server:9090,kafka_server2:9092 --kafka.topic test_kafka_topic --fs.output None \
+--mongo.uri mongodb://localhost:21017/argo_TENANTA --mongo.method insert --report-id None \
+--ams.batch 10 --ams.interval 300 --ams.proxy test_proxy --ams.verify true --timeout 500"""
class TestClass(unittest.TestCase):
@@ -42,9 +43,13 @@ def test_compose_command(self):
hdfs_sync = "hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/sync"
test_hdfs_commands = dict()
- test_hdfs_commands["--sync.mps"] = hdfs_sync+"/Critical/"+"metric_profile_2018-03-01.avro"
+ test_hdfs_commands["--sync.mps"] = hdfs_sync + \
+ "/Critical/"+"metric_profile_2018-03-01.avro"
test_hdfs_commands["--sync.ops"] = hdfs_sync+"/TENANTA_ops.json"
- test_hdfs_commands["--sync.apr"] = hdfs_sync+"/TENANTA_Critical_ap.json"
- test_hdfs_commands["--sync.egp"] = hdfs_sync+"/Critical/group_endpoints_2018-03-01.avro"
+ test_hdfs_commands["--sync.apr"] = hdfs_sync + \
+ "/TENANTA_Critical_ap.json"
+ test_hdfs_commands["--sync.egp"] = hdfs_sync + \
+ "/Critical/group_endpoints_2018-03-01.avro"
- self.assertEquals(expected_result, cmd_to_string(compose_command(config, args, test_hdfs_commands)[0]))
+ self.assertEqual(expected_result, cmd_to_string(
+ compose_command(config, args, test_hdfs_commands)[0]))
diff --git a/bin/test_sync_ingestion_submit.py b/bin/test_sync_ingestion_submit.py
index d82c8d6e..2ec53b73 100644
--- a/bin/test_sync_ingestion_submit.py
+++ b/bin/test_sync_ingestion_submit.py
@@ -27,4 +27,4 @@ def test_compose_command(self):
parser.add_argument('--sudo', action='store_true')
args = parser.parse_args(['--tenant', 'TENANTA', '--sudo'])
- self.assertEquals(expected_result, cmd_to_string(compose_command(config, args)[0]))
+ self.assertEqual(expected_result, cmd_to_string(compose_command(config, args)[0]))
diff --git a/bin/update_engine.py b/bin/update_engine.py
index d5308e7d..333f8e4e 100755
--- a/bin/update_engine.py
+++ b/bin/update_engine.py
@@ -52,6 +52,8 @@ def main(args):
tenants = config.get("API","tenants")
profile_type_checklist = ["operations", "aggregations", "reports", "thresholds", "recomputations"]
for tenant in tenants:
+ if args.tenant and tenant != args.tenant:
+ continue
reports = config.get("TENANTS:"+tenant,"reports")
for report in reports:
for profile_type in profile_type_checklist:
@@ -65,6 +67,8 @@ def main(args):
ams = ArgoAmsClient(ams_host, ams_token)
for tenant in tenants:
+ if args.tenant and tenant != args.tenant:
+ continue
ams.check_project_exists(tenant)
missing = ams.check_tenant(tenant)
if is_tenant_complete(missing):
@@ -99,7 +103,7 @@ def tenant_ok_reports(status):
for report_name in status["hdfs"]["sync_data"]:
result = 1
report = status["hdfs"]["sync_data"][report_name]
- for key in report.keys():
+ for key in list(report.keys()):
result = result * report[key]
if result > 0:
rep_list.append(report_name)
@@ -109,6 +113,8 @@ def tenant_ok_reports(status):
if __name__ == "__main__":
parser = ArgumentParser(description="Update engine")
+ parser.add_argument(
+ "-t", "--tenant", help="tenant owner ", dest="tenant", metavar="STRING", required=False, default=None)
parser.add_argument(
"-b", "--backup-conf", help="backup current configuration", action="store_true", dest="backup")
parser.add_argument(
diff --git a/bin/utils/argo_config.py b/bin/utils/argo_config.py
index 8a209d0d..6a79aebd 100755
--- a/bin/utils/argo_config.py
+++ b/bin/utils/argo_config.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python
-from ConfigParser import SafeConfigParser
-from urlparse import urlparse
+from configparser import ConfigParser
+from urllib.parse import urlparse
import json
import re
import logging
@@ -26,7 +26,6 @@ def __repr__(self):
return self.tmpl
def get_args(self):
-
"""
Get arguments used in template
@@ -49,7 +48,8 @@ def fill(self, **args_new):
# If provided arguments fill the needed ones we are ok (extra arguments will be ingored)
if not set(args).issubset(set(args_new.keys())):
- raise RuntimeError("Argument mismatch, needed arguments:"+str(args))
+ raise RuntimeError(
+ "Argument mismatch, needed arguments:"+str(args))
for arg in args:
txt = re.sub(r"{{\s*"+str(arg)+r"\s*}}", str(args_new[arg]), txt)
@@ -68,9 +68,10 @@ def partial_fill(self, **args_new):
args = self.get_args()
for arg in args:
- if arg not in args_new.keys():
+ if arg not in list(args_new.keys()):
continue
- txt = re.sub(r"{{\s*" + str(arg) + r"\s*}}", str(args_new[arg]), txt)
+ txt = re.sub(r"{{\s*" + str(arg) + r"\s*}}",
+ str(args_new[arg]), txt)
return txt
@@ -109,7 +110,7 @@ def __init__(self, config=None, schema=None):
self.log_changes = True
self.conf_path = None
self.schema_path = None
- self.conf = SafeConfigParser()
+ self.conf = ConfigParser()
self.schema = dict()
self.fix = dict()
self.var = dict()
@@ -131,19 +132,20 @@ def set(self, group, item, value):
log.info("config section added [{}]".format(group))
if self.conf.has_option(group, item):
old_val = self.conf.get(group, item)
- else:
+ else:
old_val = None
if old_val != value:
self.conf.set(group, item, value)
if self.log_changes:
- log.info("config option changed [{}]{}={} (from:{})".format(group, item, value, old_val))
+ log.info("config option changed [{}]{}={} (from:{})".format(
+ group, item, value, old_val))
def set_default(self, group, item_name):
- self.set(group,item_name,str(self.get_default(group, item_name)))
+ self.set(group, item_name, str(self.get_default(group, item_name)))
def get_var_origin(self, group_name, ):
- # reverse keys alphabetically
- keys = sorted(self.schema.keys(), reverse=True)
+ # reverse keys alphabetically
+ keys = sorted(list(self.schema.keys()), reverse=True)
for item in keys:
if "~" in item:
@@ -158,18 +160,18 @@ def get_var_origin(self, group_name, ):
return ""
if group_name.startswith(item_prefix) and group_name.endswith(item_postfix):
- return item
+ return item
return ""
def get_default(self, group, item_name):
group_og = self.get_var_origin(group)
-
+
item = self.schema[group_og][item_name]
- if "default" not in item.keys():
+ if "default" not in list(item.keys()):
return ""
item_type = item["type"]
if item_type == "string":
- result =item["default"]
+ result = item["default"]
elif item_type == "int" or item_type == "long":
result = int(item["default"])
elif item_type == "bool":
@@ -190,8 +192,8 @@ def get_default(self, group, item_name):
sub_type = "string"
result = Template(item["default"], sub_type)
-
return result
+
def get(self, group, item=None):
"""
Given a group and an item return its value
@@ -221,11 +223,11 @@ def get(self, group, item=None):
r = re.compile(item.replace('*', '.*'))
results = {}
if group in self.fix:
- items = filter(r.match, self.fix[group].keys())
+ items = list(filter(r.match, list(self.fix[group].keys())))
for item in items:
results[item] = self.fix[group][item]
if group in self.var:
- items = filter(r.match, self.var[group].keys())
+ items = list(filter(r.match, list(self.var[group].keys())))
for item in items:
results[item] = self.var[group][item]
return results
@@ -236,13 +238,13 @@ def get(self, group, item=None):
return self.fix[group][item]["value"]
if group in self.var:
if item in self.var[group]:
- if self.var[group][item] is not None:
- return self.var[group][item]["value"]
+ if self.var[group][item] is not None:
+ return self.var[group][item]["value"]
return None
def load_conf(self, conf_path):
"""
- Load configuration from file using a SafeConfigParser
+ Load configuration from file using a ConfigParser
"""
self.conf.read(conf_path)
self.conf_path = conf_path
@@ -274,8 +276,8 @@ def get_as(self, group, item, item_type, og_item):
dict: result dictionary with value and optional reference to original item in schema
"""
pack = dict()
-
- try:
+
+ try:
result = None
if item_type == "string":
result = self.conf.get(group, item)
@@ -303,10 +305,10 @@ def get_as(self, group, item, item_type, og_item):
if og_item != item:
pack["og_item"] = og_item
- except Exception, e:
- log.error("Not found [{}][{}]".format(group,item))
+ except Exception as e:
+ log.error("Not found [{}][{}]".format(group, item))
self.valid = False
- return
+ return
return pack
def add_config_item(self, group, item, og_item, dest, og_group):
@@ -325,7 +327,7 @@ def add_config_item(self, group, item, og_item, dest, og_group):
else:
schema_group = group
- if "optional" in self.schema[schema_group][og_item].keys():
+ if "optional" in list(self.schema[schema_group][og_item].keys()):
if self.schema[schema_group][og_item]["optional"]:
if not self.conf.has_option(group, item):
return
@@ -335,7 +337,8 @@ def add_config_item(self, group, item, og_item, dest, og_group):
if og_group is not None:
dest[group]["og_group"] = og_group
- dest[group][item] = self.get_as(group, item, self.schema[schema_group][og_item]["type"], og_item)
+ dest[group][item] = self.get_as(
+ group, item, self.schema[schema_group][og_item]["type"], og_item)
def add_group_items(self, group, items, var, og_group):
"""
@@ -358,7 +361,8 @@ def add_group_items(self, group, items, var, og_group):
self.add_config_item(group, item, item, dest, og_group)
else:
for sub_item in item["vars"]:
- self.add_config_item(group, sub_item, item["item"], dest, og_group)
+ self.add_config_item(
+ group, sub_item, item["item"], dest, og_group)
@staticmethod
def is_var(name):
@@ -398,9 +402,9 @@ def get_item_variations(self, group, item, ogroup):
name_pool = self.conf.get(map_pool[0], map_pool[1]).split(",")
if name_pool == [""]:
return None
- except Exception, e:
- log.error("Not found [{}]{}".format(map_pool[0],map_pool[1]))
- self.valid=False
+ except Exception as e:
+ log.error("Not found [{}]{}".format(map_pool[0], map_pool[1]))
+ self.valid = False
return None
for name in name_pool:
variations["vars"].append(item.replace("~", name))
@@ -435,56 +439,55 @@ def get_group_variations(self, group):
return variations
def check_conf(self):
-
"""
Validate schema and configuration file. Iterate and extract
all configuration parameters
"""
self.valid = True
- fix_groups = self.schema.keys()
+ fix_groups = list(self.schema.keys())
var_groups = list()
-
for group in fix_groups:
if self.is_var(group):
-
+
var_group = self.get_group_variations(group)
if var_group is not None:
var_groups.append(var_group)
-
+
continue
fix_items = list()
var_items = list()
- for item in self.schema[group].keys():
+ for item in list(self.schema[group].keys()):
if self.is_var(item):
- group_vars = self.get_item_variations(group,item,None)
+ group_vars = self.get_item_variations(group, item, None)
if group_vars is not None:
- var_items.append(self.get_item_variations(group, item, None))
+ var_items.append(
+ self.get_item_variations(group, item, None))
continue
fix_items.append(item)
self.add_group_items(group, fix_items, False, None)
self.add_group_items(group, var_items, True, None)
-
for group in var_groups:
-
+
for sub_group in group["vars"]:
fix_items = list()
var_items = list()
- for item in self.schema[group["group"]].keys():
+ for item in list(self.schema[group["group"]].keys()):
if item == "~":
continue
if self.is_var(item):
- item_vars = self.get_item_variations(sub_group, item, group["group"])
+ item_vars = self.get_item_variations(
+ sub_group, item, group["group"])
if item_vars is not None:
var_items.append(item_vars)
continue
fix_items.append(item)
# Both fix and var items are in a var group so are considered var
- self.add_group_items(sub_group, fix_items, True, group["group"])
- self.add_group_items(sub_group, var_items, True, group["group"])
-
-
+ self.add_group_items(sub_group, fix_items,
+ True, group["group"])
+ self.add_group_items(sub_group, var_items,
+ True, group["group"])
diff --git a/bin/utils/argo_mongo.py b/bin/utils/argo_mongo.py
index 1f143d49..81482f1a 100755
--- a/bin/utils/argo_mongo.py
+++ b/bin/utils/argo_mongo.py
@@ -5,9 +5,9 @@
import pymongo
from pymongo import MongoClient
from pymongo.errors import ServerSelectionTimeoutError
-from argo_config import ArgoConfig
-from common import get_config_paths
-from common import get_log_conf
+from .argo_config import ArgoConfig
+from .common import get_config_paths
+from .common import get_log_conf
log = logging.getLogger(__name__)
@@ -19,55 +19,94 @@ def __init__(self, args, config, cols):
self.config = config
self.cols = cols
-
def ensure_status_indexes(self, db):
"""Checks if required indexes exist in specific argo status-related collections
in mongodb
-
+
Args:
db (obj): pymongo database object
-
+
"""
log.info("Checking required indexes in status collections...")
+
def is_index_included(index_set, index):
"""gets a set of mongodb indexes and checks if specified
mongodb index exists in this set
-
+
Args:
index_set (dict): pymongo mongodb index object
index (obj): pymongo index object
-
+
Returns:
bool: If index exists in set return true
"""
- for name in index_set.keys():
+ for name in list(index_set.keys()):
if index_set[name]['key'] == index:
- return True
+ return True
return False
# Used in all status collections
- index_report_date = [("report",pymongo.ASCENDING), ("date_integer",pymongo.ASCENDING)]
+ index_report_date = [("report", pymongo.ASCENDING),
+ ("date_integer", pymongo.ASCENDING)]
# Used only in status_metrics collection
- index_date_host = [("date_integer",pymongo.ASCENDING), ("report",pymongo.ASCENDING)]
- status_collections = ["status_metrics","status_endpoints","status_services","status_endpoint_groups"]
+ index_date_host = [("date_integer", pymongo.ASCENDING),
+ ("report", pymongo.ASCENDING)]
+ status_collections = ["status_metrics", "status_endpoints",
+ "status_services", "status_endpoint_groups"]
+
+ index_history = [("date_integer",pymongo.DESCENDING),
+ ("id",pymongo.ASCENDING)]
+
+ index_downtimes = [("date_integer",pymongo.DESCENDING)]
+
+ # Check indexes in sync collections
+ for col_name in ["topology_endpoints", "topology_groups", "weights"]:
+ col = db[col_name]
+ indexes = col.index_information()
+ if not is_index_included(indexes, index_history):
+ # ensure index
+ col.create_index(index_history, background=True)
+ log.info("Created (date_integer,id) index in %s.%s",
+ col.database.name, col.name)
+
+ # Check for index in downtimes
+ col = db["downtimes"]
+ indexes = col.index_information()
+ if not is_index_included(indexes, index_downtimes):
+ col.create_index(index_downtimes, background=True)
+ log.info("Created (date_integer) index in %s.%s",
+ col.database.name, col.name)
+
+
+
# Check first for index report,date
for status_name in status_collections:
col = db[status_name]
indexes = col.index_information()
- if not is_index_included(indexes,index_report_date):
+ if not is_index_included(indexes, index_report_date):
# ensure index
- col.create_index(index_report_date,background=True)
- log.info("Created (report,date) index in %s.%s",col.database.name,col.name)
-
+ col.create_index(index_report_date, background=True)
+ log.info("Created (report,date) index in %s.%s",
+ col.database.name, col.name)
+
# Check for index date,host in status_metrics
col = db["status_metrics"]
- if not is_index_included(indexes,index_date_host):
- col.create_index(index_date_host,background=True)
- log.info("Created (report,date) index in %s.%s",col.database.name,col.name)
+ indexes = col.index_information()
+ if not is_index_included(indexes, index_date_host):
+ col.create_index(index_date_host, background=True)
+ log.info("Created (report,date) index in %s.%s",
+ col.database.name, col.name)
+ def mongo_clean_ar(self, uri, dry_run=False):
+ """Gets a mongo database reference as a uri string and performs
+ a/r data removal for a specific date
- def mongo_clean_ar(self, uri):
+ Args:
+ uri (str.): uri string pointing to a specific mongodb database
+ dry_run (bool, optional): Optional flag that specifies if the execution is in dry mode run.
+ If yes no data removal is performed. Defaults to False.
+ """
tenant_report = None
@@ -76,7 +115,8 @@ def mongo_clean_ar(self, uri):
report_name = self.args.report
tenant_group = "TENANTS:" + self.args.tenant
if report_name in self.config.get(tenant_group, "reports"):
- tenant_report = self.config.get(tenant_group, "report_"+report_name)
+ tenant_report = self.config.get(
+ tenant_group, "report_"+report_name)
else:
log.critical("Report %s not found", report_name)
sys.exit(1)
@@ -101,34 +141,46 @@ def mongo_clean_ar(self, uri):
# iterate over the specified collections
for col in self.cols:
if tenant_report is not None:
- num_of_rows = db[col].find({"date": date_int, "report": tenant_report}).count()
+ num_of_rows = db[col].find(
+ {"date": date_int, "report": tenant_report}).count()
log.info("Collection: " + col + " -> Found " + str(
num_of_rows) + " entries for date: " + self.args.date + " and report: " + self.args.report)
else:
num_of_rows = db[col].find({"date": date_int}).count()
log.info("Collection: " + col + " -> Found " + str(
num_of_rows) + " entries for date: " + self.args.date + ". No report specified!")
-
- if num_of_rows > 0:
-
- if tenant_report is not None:
- # response returned from the delete operation
- res = db[col].delete_many({"date": date_int, "report": tenant_report})
- log.info("Collection: " + col + " -> Removed " + str(res.deleted_count) +
- " entries for date: " + self.args.date + " and report: " + self.args.report)
- else:
- # response returned from the delete operation
- res = db[col].delete_many({"date": date_int, "report": tenant_report})
- log.info("Collection: " + col + " -> Removed " + str(
- res.deleted_count) + " entries for date: " + self.args.date + ". No report specified!")
- log.info("Entries removed successfully")
+ if dry_run:
+ log.info("Results won't be removed to dry-run mode")
else:
- log.info("Zero entries found. Nothing to remove.")
+ if num_of_rows > 0:
+ if tenant_report is not None:
+ # response returned from the delete operation
+ res = db[col].delete_many(
+ {"date": date_int, "report": tenant_report})
+ log.info("Collection: " + col + " -> Removed " + str(res.deleted_count) +
+ " entries for date: " + self.args.date + " and report: " + self.args.report)
+ else:
+ # response returned from the delete operation
+ res = db[col].delete_many(
+ {"date": date_int, "report": tenant_report})
+ log.info("Collection: " + col + " -> Removed " + str(
+ res.deleted_count) + " entries for date: " + self.args.date + ". No report specified!")
+ log.info("Entries removed successfully")
+ else:
+ log.info("Zero entries found. Nothing to remove.")
# close the connection with mongo
client.close()
- def mongo_clean_status(self, uri):
+ def mongo_clean_status(self, uri, dry_run=False):
+ """Gets a mongo database reference as a uri string and performs
+ status data removal for a specific date
+
+ Args:
+ uri (str.): uri string pointing to a specific mongodb database
+ dry_run (bool, optional): Optional flag that specifies if the execution is in dry mode run.
+ If yes no data removal is performed. Defaults to False.
+ """
tenant_report = None
@@ -137,7 +189,8 @@ def mongo_clean_status(self, uri):
report_name = self.args.report
tenant_group = "TENANTS:" + self.args.tenant
if report_name in self.config.get(tenant_group, "reports"):
- tenant_report = self.config.get(tenant_group, "report_"+report_name)
+ tenant_report = self.config.get(
+ tenant_group, "report_"+report_name)
else:
log.critical("Report %s not found", report_name)
sys.exit(1)
@@ -165,7 +218,8 @@ def mongo_clean_status(self, uri):
# iterate over the specified collections
for col in self.cols:
if tenant_report is not None:
- num_of_rows = db[col].find({"date_integer": date_int, "report": tenant_report}).count()
+ num_of_rows = db[col].find(
+ {"date_integer": date_int, "report": tenant_report}).count()
log.info("Collection: " + col + " -> Found " + str(
num_of_rows) + " entries for date: " + self.args.date + " and report: " + self.args.report)
else:
@@ -173,21 +227,25 @@ def mongo_clean_status(self, uri):
log.info("Collection: " + col + " -> Found " + str(
num_of_rows) + " entries for date: " + self.args.date + ". No report specified!")
- if num_of_rows > 0:
-
- if tenant_report is not None:
- # response returned from the delete operation
- res = db[col].delete_many({"date_integer": date_int, "report": tenant_report})
- log.info("Collection: " + col + " -> Removed " + str(res.deleted_count) +
- " entries for date: " + self.args.date + " and report: " + self.args.report)
- else:
- # response returned from the delete operation
- res = db[col].delete_many({"date_integer": date_int, "report": tenant_report})
- log.info("Collection: " + col + " -> Removed " + str(
- res.deleted_count) + " entries for date: " + self.args.Date + ". No report specified!")
- log.info("Entries removed successfully")
+ if dry_run:
+ log.info("Results won't be removed to dry-run mode")
else:
- log.info("Zero entries found. Nothing to remove.")
+ if num_of_rows > 0:
+ if tenant_report is not None:
+ # response returned from the delete operation
+ res = db[col].delete_many(
+ {"date_integer": date_int, "report": tenant_report})
+ log.info("Collection: " + col + " -> Removed " + str(res.deleted_count) +
+ " entries for date: " + self.args.date + " and report: " + self.args.report)
+ else:
+ # response returned from the delete operation
+ res = db[col].delete_many(
+ {"date_integer": date_int, "report": tenant_report})
+ log.info("Collection: " + col + " -> Removed " + str(
+ res.deleted_count) + " entries for date: " + self.args.Date + ". No report specified!")
+ log.info("Entries removed successfully")
+ else:
+ log.info("Zero entries found. Nothing to remove.")
# close the connection with mongo
client.close()
@@ -205,20 +263,21 @@ def main_clean(args=None):
# Get main configuration and schema
config = ArgoConfig(conf_paths["main"], conf_paths["schema"])
- # set up the mongo uri
+ # set up the mongo uri
section_tenant = "TENANTS:" + args.tenant
- mongo_endpoint = config.get("MONGO","endpoint")
- mongo_uri = config.get(section_tenant,"mongo_uri").fill(mongo_endpoint=mongo_endpoint,tenant=args.tenant)
-
+ mongo_endpoint = config.get("MONGO", "endpoint")
+ mongo_uri = config.get(section_tenant, "mongo_uri").fill(
+ mongo_endpoint=mongo_endpoint, tenant=args.tenant)
if args.job == "clean_ar":
- argo_mongo_client = ArgoMongoClient(args, config, ["service_ar", "endpoint_group_ar"])
- argo_mongo_client.mongo_clean_ar(mongo_uri)
+ argo_mongo_client = ArgoMongoClient(
+ args, config, ["endpoint_ar", "service_ar", "endpoint_group_ar"])
+ argo_mongo_client.mongo_clean_ar(mongo_uri, args.dry_run)
elif args.job == "clean_status":
argo_mongo_client = ArgoMongoClient(args, config, ["status_metrics", "status_endpoints", "status_services",
"status_endpoint_groups"])
- argo_mongo_client.mongo_clean_status(mongo_uri)
+ argo_mongo_client.mongo_clean_status(mongo_uri, args.dry_run)
# Provide the ability to the script, to run as a standalone module
@@ -234,6 +293,8 @@ def main_clean(args=None):
"-c", "--config", metavar="STRING", help="Path for the config file", dest="config")
parser.add_argument(
"-j", "--job", metavar="STRING", help="Stand alone method we wish to run", required=True, dest="job")
+ parser.add_argument("--dry-run", help="Runs in test mode without actually submitting the job",
+ action="store_true", dest="dry_run")
# Parse the arguments
sys.exit(main_clean(parser.parse_args()))
diff --git a/bin/utils/check_tenant.py b/bin/utils/check_tenant.py
index 1d1b9f3d..46b3eb3e 100755
--- a/bin/utils/check_tenant.py
+++ b/bin/utils/check_tenant.py
@@ -1,14 +1,14 @@
#!/usr/bin/env python
from argparse import ArgumentParser
-from common import get_config_paths, get_log_conf
-from argo_config import ArgoConfig
+from .common import get_config_paths, get_log_conf
+from .argo_config import ArgoConfig
import sys
import logging
import json
from snakebite.client import Client
from datetime import datetime, timedelta
-from update_ams import ArgoAmsClient
+from .update_ams import ArgoAmsClient
import requests
@@ -112,7 +112,7 @@ def check_tenant_hdfs(tenant, target_date, days_back, namenode, hdfs_user, clien
except Exception:
sync_result[report][item] = False
- for item in report_profiles.keys():
+ for item in list(report_profiles.keys()):
profile_path = "".join([hdfs_sync.path,"/",report_profiles[item].format(tenant,report)])
try:
client.test(profile_path)
@@ -158,13 +158,13 @@ def check_tenant_ams(tenant, target_date, ams, config):
if ams.check_project_exists(tenant):
tenant_topics = ams.get_tenant_topics(tenant)
- topic_types = tenant_topics.keys()
+ topic_types = list(tenant_topics.keys())
if "metric_data" in topic_types:
ams_tenant["metric_data"]["publishing"] = True
if "sync_data" in topic_types:
ams_tenant["sync_data"]["publishing"] = True
- sub_types = ams.get_tenant_subs(tenant,tenant_topics).keys()
+ sub_types = list(ams.get_tenant_subs(tenant,tenant_topics).keys())
if "ingest_metric" in sub_types:
ams_tenant["metric_data"]["ingestion"] = True
if "status_metric" in sub_types:
@@ -204,15 +204,24 @@ def check_tenants(tenants, target_date, days_back, config):
# ams client init
ams_token = config.get("AMS", "access_token")
ams_host = config.get("AMS", "endpoint").hostname
- ams = ArgoAmsClient(ams_host, ams_token)
+ ams_proxy = config.get("AMS","proxy")
+ if ams_proxy:
+ ams_proxy = ams_proxy.geturl()
+ ams_verify = config.get("AMS","verify")
+
+ ams = ArgoAmsClient(ams_host, ams_token, ams_verify, ams_proxy)
log.info("connecting to AMS: {}".format(ams_host))
# Upload tenant statuses in argo web api
api_endpoint = config.get("API","endpoint").netloc
api_token = config.get("API","access_token")
+ api_proxy = config.get("API","proxy")
+ if api_proxy:
+ api_proxy = api_proxy.geturl()
+ api_verify = config.get("API","verify")
# Get tenant uuids
- tenant_uuids = get_tenant_uuids(api_endpoint, api_token)
+ tenant_uuids = get_tenant_uuids(api_endpoint, api_token, api_verify, api_proxy)
if not tenant_uuids:
log.error("Without tenant uuids service is unable to check and upload tenant status")
sys.exit(1)
@@ -235,7 +244,7 @@ def check_tenants(tenants, target_date, days_back, config):
log.info("Status for tenant[{}] = {}".format(tenant,json.dumps(status_tenant)))
# Upload tenant status to argo-web-api
complete_status.append(status_tenant)
- upload_tenant_status(api_endpoint,api_token,tenant,tenant_uuids[tenant],status_tenant)
+ upload_tenant_status(api_endpoint,api_token,tenant,tenant_uuids[tenant],status_tenant,api_verify,api_proxy)
return complete_status
@@ -280,12 +289,14 @@ def run_tenant_check(args):
-def get_tenant_uuids(api_endpoint, api_token):
+def get_tenant_uuids(api_endpoint, api_token, verify=False, http_proxy_url=None):
"""Get tenant uuids from remote argo-web-api endpoint
Args:
api_endpoint (str.): hostname of the remote argo-web-api endpoint
api_token (str.): access token for the remote argo-web-api endpoint
+ verify (boolean): flag if the remote web api host should be verified
+ http_proxy_url (str.): optional url for local http proxy to be used
Returns:
dict.: dictionary with mappings of tenant names to tenant uuidss
@@ -294,12 +305,15 @@ def get_tenant_uuids(api_endpoint, api_token):
log.info("Retrieving tenant uuids from api: {}".format(api_endpoint))
result = dict()
url = "https://{}/api/v2/admin/tenants".format(api_endpoint)
+ proxies = None
+ if http_proxy_url:
+ proxies = {'http':http_proxy_url,'https':http_proxy_url}
headers = dict()
headers.update({
'Accept': 'application/json',
'x-api-key': api_token
})
- r = requests.get(url, headers=headers, verify=False)
+ r = requests.get(url, headers=headers, verify=verify, proxies=proxies)
if 200 == r.status_code:
@@ -313,7 +327,7 @@ def get_tenant_uuids(api_endpoint, api_token):
return result
-def upload_tenant_status(api_endpoint, api_token, tenant, tenant_id, tenant_status):
+def upload_tenant_status(api_endpoint, api_token, tenant, tenant_id, tenant_status, verify=False, http_proxy_url=None):
"""Uploads tenant's status to a remote argo-web-api endpoint
Args:
@@ -322,6 +336,8 @@ def upload_tenant_status(api_endpoint, api_token, tenant, tenant_id, tenant_stat
tenant (str.): tenant name
tenant_id (str.): tenant uuid
tenant_status (obj.): json representation of tenant's status report
+ verify (boolean): flag if the remote web api host should be verified
+ http_proxy_url (str.): optional url for local http proxy to be used
Returns:
bool: true if upload is successfull
@@ -334,7 +350,10 @@ def upload_tenant_status(api_endpoint, api_token, tenant, tenant_id, tenant_stat
'Accept': 'application/json',
'x-api-key': api_token
})
- r = requests.put(url, headers=headers, data=json.dumps(tenant_status), verify=False)
+ proxies = None
+ if http_proxy_url:
+ proxies={'http':http_proxy_url, 'https':http_proxy_url}
+ r = requests.put(url, headers=headers, data=json.dumps(tenant_status), verify=verify, proxies=proxies)
if 200 == r.status_code:
log.info("Tenant's {} status upload succesfull to {}".format(tenant, api_endpoint))
return True
diff --git a/bin/utils/common.py b/bin/utils/common.py
index bd8b4fcf..16e57b37 100644
--- a/bin/utils/common.py
+++ b/bin/utils/common.py
@@ -3,7 +3,7 @@
import json
import subprocess
from subprocess import check_call
-from urlparse import urlparse, urlsplit, urlunsplit
+from urllib.parse import urlparse, urlsplit, urlunsplit
import logging.config
import logging
import os.path
@@ -78,7 +78,7 @@ def date_rollback(path, year, month, day, config, client):
sys.exit(1)
-def flink_job_submit(config, cmd_command, job_namespace=None):
+def flink_job_submit(config, cmd_command, job_namespace=None, dry_run=False):
"""Method that takes a command and executes it, after checking for flink being up and running.
If the job_namespace is defined, then it will also check for the specific job if its already running.
If flink is not running or the job is already submitted, it will execute.
@@ -86,30 +86,48 @@ def flink_job_submit(config, cmd_command, job_namespace=None):
config(ConfigParser): script's configuration
cmd_command(list): list contaning the command to be submitted
job_namespace(string): the job's name
+ dry_run(boolean, optional): signifies a dry-run execution - no submission is performed
"""
# check if flink is up and running
+ if dry_run:
+ log.info("This is a dry run. Job won't be submitted")
+ else:
+ log.info("Getting ready to submit job")
+
+ log.info(cmd_to_string(cmd_command)+"\n")
try:
flink_response = requests.get(config.get("FLINK", "job_manager").geturl()+"/joboverview/running")
-
+ issues = False
+ job_already_runs = False
if job_namespace is not None:
# if the job's already running then exit, else sumbit the command
for job in json.loads(flink_response.text)["jobs"]:
if job["name"] == job_namespace:
log.critical("Job: "+"'"+job_namespace+"' is already running")
- sys.exit(1)
+ job_already_runs = True
+ issues = True
log.info("Everything is ok")
try:
- check_call(cmd_command)
+ if not dry_run and not job_already_runs:
+ check_call(cmd_command)
except subprocess.CalledProcessError as esp:
log.fatal("Job was not submitted. Error exit code: "+str(esp.returncode))
+ issues = True
sys.exit(1)
except requests.exceptions.ConnectionError:
log.fatal("Flink is not currently running. Tried to communicate with job manager at: " +
config.get("FLINK", "job_manager").geturl())
- sys.exit(1)
-
+ issues = True
+
+ # print dry-run message if needed
+ if dry_run:
+ # print output in green and exit
+ print(("\033[92m" + cmd_to_string(cmd_command) + "\033[0m"))
+ # if isses exit
+ if issues:
+ exit(1)
def hdfs_check_path(uri, client):
"""Method that checks if a path in hdfs exists. If it exists it will return the path,
diff --git a/bin/utils/recomputations.py b/bin/utils/recomputations.py
index 14e06153..cfffc726 100755
--- a/bin/utils/recomputations.py
+++ b/bin/utils/recomputations.py
@@ -7,24 +7,25 @@
from pymongo import MongoClient
from bson import json_util
import logging
-from common import get_config_paths
-from common import get_log_conf
-from argo_config import ArgoConfig
+from .common import get_config_paths
+from .common import get_log_conf
+from .argo_config import ArgoConfig
import subprocess
log = logging.getLogger(__name__)
+
def write_output(results, tenant, report, target_date, config):
"""Write recomputation output to hdfs
-
+
Args:
results (list(obj)): List of recomputation definitions
tenant (str.): tenant name
report (str.): report name
target_date ([type]): target date
config ([type]): argo configuration object
-
+
Returns:
bool: False if upload had errors
"""
@@ -33,24 +34,27 @@ def write_output(results, tenant, report, target_date, config):
log.info("No recomputations found skipping")
return True
# create a temporary recalculation file in the ar-sync folder
- recomp_name = "".join(["recomp", "_", tenant, "_", report, "_", target_date, ".json"])
+ recomp_name = "".join(
+ ["recomp", "_", tenant, "_", report, "_", target_date, ".json"])
recomp_filepath = os.path.join("/tmp/", recomp_name)
# write output file to the correct job path
with open(recomp_filepath, 'w') as output_file:
json.dump(results, output_file, default=json_util.default)
-
+
# upload file to hdfs
hdfs_writer = config.get("HDFS", "writer_bin")
hdfs_namenode = config.get("HDFS", "namenode")
hdfs_user = config.get("HDFS", "user")
- hdfs_sync = config.get("HDFS", "path_sync").fill(namenode=hdfs_namenode.geturl(), hdfs_user=hdfs_user, tenant=tenant).path
-
- status = subprocess.check_call([hdfs_writer, "put", recomp_filepath, hdfs_sync])
+ hdfs_sync = config.get("HDFS", "path_sync").fill(
+ namenode=hdfs_namenode.geturl(), hdfs_user=hdfs_user, tenant=tenant).path
+
+ status = subprocess.check_call(
+ [hdfs_writer, "put", recomp_filepath, hdfs_sync])
# clear temp local file
os.remove(recomp_filepath)
if status == 0:
- log.info("File uploaded successfully to hdfs: %s", hdfs_sync )
+ log.info("File uploaded successfully to hdfs: %s", hdfs_sync)
return True
else:
log.error("File uploaded unsuccessful to hdfs: %s", hdfs_sync)
@@ -59,20 +63,19 @@ def write_output(results, tenant, report, target_date, config):
def get_mongo_collection(mongo_uri, collection):
"""Return a pymongo collection object from a collection name
-
+
Args:
mongo_uri (obj.): mongodb uri
collection (str.): collection name
-
+
Returns:
obj.: pymongo collection object
"""
- log.info ("Connecting to mongodb: %s", mongo_uri.geturl())
- print mongo_uri.geturl()
+ log.info("Connecting to mongodb: %s", mongo_uri.geturl())
client = MongoClient(mongo_uri.geturl())
log.info("Opening database: %s", mongo_uri.path[1:])
- db = client[mongo_uri.path[1:]]
+ db = client[mongo_uri.path[1:]]
log.info("Opening collection: %s", collection)
col = db[collection]
@@ -81,12 +84,12 @@ def get_mongo_collection(mongo_uri, collection):
def get_mongo_results(collection, target_date, report):
"""Get recomputation results from mongo collection for specific date and report
-
+
Args:
collection (obj.): pymongo collection object
target_date (str.): date to target
report (str.): report name
-
+
Returns:
list(dict): list of dictionaries containing recomputation definitions
"""
@@ -94,36 +97,38 @@ def get_mongo_results(collection, target_date, report):
# Init results list
results = []
# prepare the query to find requests that include the target date
- query = "'%s' >= this.start_time.split('T')[0] && '%s' <= this.end_time.split('T')[0]" % (target_date, target_date)
+ query = "'%s' >= this.start_time.split('T')[0] && '%s' <= this.end_time.split('T')[0]" % (
+ target_date, target_date)
# run the query
- for item in collection.find({"report":report,"$where": query}, {"_id": 0}):
+ for item in collection.find({"report": report, "$where": query}, {"_id": 0}):
results.append(item)
return results
+
def upload_recomputations(tenant, report, target_date, config):
"""Given a tenant, report and target date upload the relevant recomputations
as an hdfs file
-
+
Args:
tenant (str.): tenant name
report (str.): report name
target_date (str.): target date
config (obj.): argo configuration object
-
+
Returns:
bool: True if upload was succesfull
"""
- tenant_group = "TENANTS:" +tenant
- mongo_endpoint = config.get("MONGO","endpoint").geturl()
- mongo_location = config.get_default(tenant_group,"mongo_uri").fill(mongo_endpoint=mongo_endpoint,tenant=tenant)
- col = get_mongo_collection(mongo_location, "recomputations" )
+ tenant_group = "TENANTS:" + tenant
+ mongo_endpoint = config.get("MONGO", "endpoint").geturl()
+ mongo_location = config.get_default(tenant_group, "mongo_uri").fill(
+ mongo_endpoint=mongo_endpoint, tenant=tenant)
+ col = get_mongo_collection(mongo_location, "recomputations")
recomp_data = get_mongo_results(col, target_date, report)
return write_output(recomp_data, tenant, report, target_date, config)
-
def main(args=None):
# Get configuration paths
conf_paths = get_config_paths(args.config)
@@ -136,11 +141,13 @@ def main(args=None):
if not res:
sys.exit(1)
+
if __name__ == "__main__":
# Feed Argument parser with the description of the 3 arguments we need
# (input_file,output_file,schema_file)
- arg_parser = ArgumentParser(description="Get relevant recomputation requests")
+ arg_parser = ArgumentParser(
+ description="Get relevant recomputation requests")
arg_parser.add_argument(
"-d", "--date", help="date", dest="date", metavar="DATE", required="TRUE")
arg_parser.add_argument(
@@ -151,4 +158,4 @@ def main(args=None):
"-c", "--config", help="config ", dest="config", metavar="STRING", required="TRUE")
# Parse the command line arguments accordingly and introduce them to
# main...
- sys.exit(main(arg_parser.parse_args()))
\ No newline at end of file
+ sys.exit(main(arg_parser.parse_args()))
diff --git a/bin/utils/test_argo_config.py b/bin/utils/test_argo_config.py
index 8bf80ca2..397b9019 100644
--- a/bin/utils/test_argo_config.py
+++ b/bin/utils/test_argo_config.py
@@ -1,7 +1,7 @@
import unittest
import os
-from argo_config import ArgoConfig
-from urlparse import urlparse
+from .argo_config import ArgoConfig
+from urllib.parse import urlparse
CONF_FILE = os.path.join(os.path.dirname(__file__), '../../conf/argo-streaming.conf')
diff --git a/bin/utils/test_update_ams.py b/bin/utils/test_update_ams.py
index 20fe1b85..58da67ec 100644
--- a/bin/utils/test_update_ams.py
+++ b/bin/utils/test_update_ams.py
@@ -1,6 +1,6 @@
import unittest
import responses
-from update_ams import ArgoAmsClient
+from .update_ams import ArgoAmsClient
class TestClass(unittest.TestCase):
@@ -40,7 +40,7 @@ def test_urls(self):
actual = ams.get_url(test_case["resource"], test_case["item_uuid"], test_case["group_uuid"],
test_case["action"])
expected = test_case["expected"]
- self.assertEquals(expected, actual)
+ self.assertEqual(expected, actual)
@responses.activate
def test_basic_request(self):
@@ -109,6 +109,24 @@ def test_basic_request(self):
],
"name": "ams_projecta_consumer",
+ }, status=200)
+ responses.add(responses.GET, 'https://ams.foo/v1/users/ams_projecta_archiver?key=faketoken',
+ json={
+ "uuid": "id02",
+ "projects": [
+ {
+ "project": "PROJECTA",
+ "roles": [
+ "consumer"
+ ],
+ "topics": [
+
+ ],
+ "subscriptions": ["archive_metric"]
+ }
+ ],
+ "name": "ams_projecta_archiver",
+
}, status=200)
responses.add(responses.GET, 'https://ams.foo/v1/users/ams_projecta_publisher?key=faketoken',
json={
@@ -153,22 +171,22 @@ def test_basic_request(self):
ams = ArgoAmsClient("ams.foo", "faketoken")
- self.assertEquals("PROJECTA", ams.get_project("PROJECTA")["name"])
+ self.assertEqual("PROJECTA", ams.get_project("PROJECTA")["name"])
users = ams.get_users()
- self.assertEquals("id01", users[0]["uuid"])
- self.assertEquals("id02", users[1]["uuid"])
+ self.assertEqual("id01", users[0]["uuid"])
+ self.assertEqual("id02", users[1]["uuid"])
user = ams.get_user("ams_projecta_consumer")
- self.assertEquals("ams_projecta_consumer", user["name"])
+ self.assertEqual("ams_projecta_consumer", user["name"])
- self.assertEquals(["sync_data", "metric_data"], ams.user_get_topics(users[0], "PROJECTA"))
- self.assertEquals([], ams.user_get_subs(users[0], "PROJECTA"))
- self.assertEquals([], ams.user_get_topics(users[1], "PROJECTA"))
- self.assertEquals(["ingest_sync", "ingest_metric", "status_sync", "status_metric"],
+ self.assertEqual(["sync_data", "metric_data"], ams.user_get_topics(users[0], "PROJECTA"))
+ self.assertEqual([], ams.user_get_subs(users[0], "PROJECTA"))
+ self.assertEqual([], ams.user_get_topics(users[1], "PROJECTA"))
+ self.assertEqual(["ingest_sync", "ingest_metric", "status_sync", "status_metric"],
ams.user_get_subs(users[1], "PROJECTA"))
- self.assertEquals("PROJECTA", ams.check_project_exists("projectA")["name"])
+ self.assertEqual("PROJECTA", ams.check_project_exists("projectA")["name"])
expected_missing = {'topics': ['sync_data'], 'topic_acls': [],
- 'subs': ['ingest_sync', 'ingest_metric', 'status_sync', 'status_metric'],
- 'sub_acls': ['ingest_sync'], 'users': ['project_admin']}
+ 'subs': ['ingest_sync', 'ingest_metric', 'status_sync', 'status_metric', 'archive_metric'],
+ 'sub_acls': ['ingest_sync', 'archive_metric'], 'users': ['project_admin']}
- self.assertEquals(expected_missing, ams.check_tenant("projectA"))
+ self.assertEqual(expected_missing, ams.check_tenant("projectA"))
diff --git a/bin/utils/test_update_cron.py b/bin/utils/test_update_cron.py
index 753b5bcc..91ee64fc 100644
--- a/bin/utils/test_update_cron.py
+++ b/bin/utils/test_update_cron.py
@@ -1,7 +1,7 @@
import unittest
import os
-from update_cron import get_daily, get_hourly, gen_entry, gen_batch_ar, gen_batch_status, gen_tenant_all, gen_for_all
-from argo_config import ArgoConfig
+from .update_cron import get_daily, get_hourly, gen_entry, gen_batch_ar, gen_batch_status, gen_tenant_all, gen_for_all
+from .argo_config import ArgoConfig
CONF_FILE = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../conf/argo-streaming.conf'))
SCHEMA_FILE = os.path.join(os.path.dirname(__file__), '../../conf/config.schema.json')
@@ -23,24 +23,24 @@ def test_update_cron(self):
config = ArgoConfig(CONF_FILE, SCHEMA_FILE)
# Test get_hourly
- self.assertEquals("8 * * * *", get_hourly(8))
- self.assertEquals("44 * * * *", get_hourly(44))
- self.assertEquals("32 * * * *", get_hourly(32))
- self.assertEquals("12 * * * *", get_hourly(12))
- self.assertEquals("5 * * * *", get_hourly())
+ self.assertEqual("8 * * * *", get_hourly(8))
+ self.assertEqual("44 * * * *", get_hourly(44))
+ self.assertEqual("32 * * * *", get_hourly(32))
+ self.assertEqual("12 * * * *", get_hourly(12))
+ self.assertEqual("5 * * * *", get_hourly())
# Test get_daily
- self.assertEquals("8 1 * * *", get_daily(1, 8))
- self.assertEquals("44 3 * * *", get_daily(3, 44))
- self.assertEquals("32 4 * * *", get_daily(4, 32))
- self.assertEquals("12 5 * * *", get_daily(5, 12))
- self.assertEquals("0 5 * * *", get_daily())
+ self.assertEqual("8 1 * * *", get_daily(1, 8))
+ self.assertEqual("44 3 * * *", get_daily(3, 44))
+ self.assertEqual("32 4 * * *", get_daily(4, 32))
+ self.assertEqual("12 5 * * *", get_daily(5, 12))
+ self.assertEqual("0 5 * * *", get_daily())
# Test gen_entry
- self.assertEquals("#simple command\n5 * * * * root echo 12\n",
+ self.assertEqual("#simple command\n5 * * * * root echo 12\n",
gen_entry(get_hourly(), "echo 12", "root", "simple command"))
- self.assertEquals("#foo command\n8 12 * * * foo echo 1+1\n",
+ self.assertEqual("#foo command\n8 12 * * * foo echo 1+1\n",
gen_entry(get_daily(12, 8), "echo 1+1", "foo", "foo command"))
# Test generation of ar cronjob for a specific tenant and report
@@ -48,14 +48,14 @@ def test_update_cron(self):
+ "5 5 * * * foo " + BATCH_AR + " -t TENANT_A -r report1 -d " + YESTERDAY + " -m upsert " + "-c "\
+ config.conf_path + "\n"
- self.assertEquals(expected, gen_batch_ar(config, "TENANT_A", "report1", "daily", "foo", "upsert"))
+ self.assertEqual(expected, gen_batch_ar(config, "TENANT_A", "report1", "daily", "foo", "upsert"))
# Test generation of ar cronjob for a specific tenant and report
expected = "#TENANT_A:report1 hourly A/R\n"\
+ "5 * * * * " + BATCH_AR + " -t TENANT_A -r report1 -d " + TODAY + " -m insert " + "-c "\
+ config.conf_path + "\n"
- self.assertEquals(expected, gen_batch_ar(config, "TENANT_A", "report1", "hourly"))
+ self.assertEqual(expected, gen_batch_ar(config, "TENANT_A", "report1", "hourly"))
# Test generation of ar cronjob for a specific tenant and report
expected = "#TENANT_B:report1 daily Status\n"\
@@ -63,14 +63,14 @@ def test_update_cron(self):
+ YESTERDAY + " -m upsert " + "-c "\
+ config.conf_path + "\n"
- self.assertEquals(expected, gen_batch_status(config, "TENANT_B", "report1", "daily", "foo", "upsert"))
+ self.assertEqual(expected, gen_batch_status(config, "TENANT_B", "report1", "daily", "foo", "upsert"))
# Test generation of status cronjob for a specific tenant and report
expected = "#TENANT_B:report1 hourly Status\n"\
+ "5 * * * * " + BATCH_STATUS + " -t TENANT_B -r report1 -d " + TODAY + " -m insert " + "-c "\
+ config.conf_path + "\n"
- self.assertEquals(expected, gen_batch_status(config, "TENANT_B", "report1", "hourly"))
+ self.assertEqual(expected, gen_batch_status(config, "TENANT_B", "report1", "hourly"))
# Test generation of cronjobs for a tenant's reports
expected = "#Jobs for TENANT_A\n\n" \
@@ -100,7 +100,7 @@ def test_update_cron(self):
+ config.conf_path + "\n\n" \
+ "\n"
- self.assertEquals(expected, gen_tenant_all(config, "TENANT_A"))
+ self.assertEqual(expected, gen_tenant_all(config, "TENANT_A"))
# Test generation of cronjobs for all tenants and all reports
expected2 = "#Jobs for TENANT_B\n\n" \
@@ -131,4 +131,4 @@ def test_update_cron(self):
+ "\n"
expected = expected + expected2
- self.assertEquals(expected, gen_for_all(config))
+ self.assertEqual(expected, gen_for_all(config))
diff --git a/bin/utils/test_update_profiles.py b/bin/utils/test_update_profiles.py
index c162d722..67b2f01a 100644
--- a/bin/utils/test_update_profiles.py
+++ b/bin/utils/test_update_profiles.py
@@ -1,6 +1,6 @@
import unittest
-from update_profiles import HdfsReader
-from update_profiles import ArgoApiClient
+from .update_profiles import HdfsReader
+from .update_profiles import ArgoApiClient
class TestClass(unittest.TestCase):
@@ -13,9 +13,9 @@ def test_hdfs_reader(self):
test_cases = [
{"tenant": "TA", "report": "Critical", "profile_type": "operations",
- "expected": "/user/foo/argo/tenants/TA/sync/TA_ops.json"},
+ "expected": "/user/foo/argo/tenants/TA/sync/TA_Critical_ops.json"},
{"tenant": "TA", "report": "Super-Critical", "profile_type": "operations",
- "expected": "/user/foo/argo/tenants/TA/sync/TA_ops.json"},
+ "expected": "/user/foo/argo/tenants/TA/sync/TA_Super-Critical_ops.json"},
{"tenant": "TA", "report": "Critical", "profile_type": "reports",
"expected": "/user/foo/argo/tenants/TA/sync/TA_Critical_cfg.json"},
{"tenant": "TA", "report": "Critical", "profile_type": "aggregations",
@@ -27,13 +27,44 @@ def test_hdfs_reader(self):
{"tenant": "TB", "report": "Critical", "profile_type": "aggregations",
"expected": "/user/foo/argo/tenants/TB/sync/TB_Critical_ap.json"},
{"tenant": "TB", "report": "Critical", "profile_type": "reports",
- "expected": "/user/foo/argo/tenants/TB/sync/TB_Critical_cfg.json"}
+ "expected": "/user/foo/argo/tenants/TB/sync/TB_Critical_cfg.json"},
+ {"tenant": "TB", "report": "Critical", "profile_type": "metrics",
+ "expected": "/user/foo/argo/tenants/TB/sync/TB_Critical_metrics.json"}
]
for test_case in test_cases:
- actual = hdfs.gen_profile_path(test_case["tenant"], test_case["report"], test_case["profile_type"])
+ actual = hdfs.gen_profile_path(
+ test_case["tenant"], test_case["report"], test_case["profile_type"])
expected = test_case["expected"]
- self.assertEquals(expected, actual)
+ self.assertEqual(expected, actual)
+
+ # Test with dates
+ test_cases_dates = [
+ {"tenant": "TA", "report": "Critical", "profile_type": "operations", "date": "2019-12-11",
+ "expected": "/user/foo/argo/tenants/TA/sync/TA_Critical_ops_2019-12-11.json"},
+ {"tenant": "TA", "report": "Super-Critical", "profile_type": "operations", "date": "2019-10-04",
+ "expected": "/user/foo/argo/tenants/TA/sync/TA_Super-Critical_ops_2019-10-04.json"},
+ {"tenant": "TA", "report": "Critical", "profile_type": "reports", "date": "2019-05-11",
+ "expected": "/user/foo/argo/tenants/TA/sync/TA_Critical_cfg.json"},
+ {"tenant": "TA", "report": "Critical", "profile_type": "aggregations", "date": "2019-06-06",
+ "expected": "/user/foo/argo/tenants/TA/sync/TA_Critical_ap_2019-06-06.json"},
+ {"tenant": "TA", "report": "Crit", "profile_type": "reports", "date": "2019-07-04",
+ "expected": "/user/foo/argo/tenants/TA/sync/TA_Crit_cfg.json"},
+ {"tenant": "TA", "report": "Super-Critical", "profile_type": "aggregations", "date": "2019-03-04",
+ "expected": "/user/foo/argo/tenants/TA/sync/TA_Super-Critical_ap_2019-03-04.json"},
+ {"tenant": "TB", "report": "Critical", "profile_type": "aggregations", "date": "2019-01-04",
+ "expected": "/user/foo/argo/tenants/TB/sync/TB_Critical_ap_2019-01-04.json"},
+ {"tenant": "TB", "report": "Critical", "profile_type": "reports", "date": "2019-01-05",
+ "expected": "/user/foo/argo/tenants/TB/sync/TB_Critical_cfg.json"},
+ {"tenant": "TB", "report": "Critical", "profile_type": "metrics", "date": "2019-02-24",
+ "expected": "/user/foo/argo/tenants/TB/sync/TB_Critical_metrics_2019-02-24.json"}
+ ]
+
+ for test_case_date in test_cases_dates:
+ actual = hdfs.gen_profile_path(
+ test_case_date["tenant"], test_case_date["report"], test_case_date["profile_type"], test_case_date["date"])
+ expected = test_case_date["expected"]
+ self.assertEqual(expected, actual)
def test_api(self):
@@ -60,10 +91,15 @@ def test_api(self):
{"resource": "tenants", "item_uuid": None,
"expected": "https://foo.host/api/v2/admin/tenants"},
{"resource": "tenants", "item_uuid": "12",
- "expected": "https://foo.host/api/v2/admin/tenants/12"}
- ]
+ "expected": "https://foo.host/api/v2/admin/tenants/12"},
+ {"resource": "metrics", "item_uuid": None,
+ "expected": "https://foo.host/api/v2/metric_profiles"},
+ {"resource": "metrics", "item_uuid": "12",
+ "expected": "https://foo.host/api/v2/metric_profiles/12"}
+ ]
for test_case in test_cases:
- actual = argo_api.get_url(test_case["resource"], test_case["item_uuid"])
+ actual = argo_api.get_url(
+ test_case["resource"], test_case["item_uuid"])
expected = test_case["expected"]
- self.assertEquals(expected, actual)
+ self.assertEqual(expected, actual)
diff --git a/bin/utils/update_ams.py b/bin/utils/update_ams.py
index d9148830..6c9d88bf 100755
--- a/bin/utils/update_ams.py
+++ b/bin/utils/update_ams.py
@@ -2,8 +2,8 @@
import requests
import json
import logging
-from common import get_config_paths, get_log_conf
-from argo_config import ArgoConfig
+from .common import get_config_paths, get_log_conf
+from .argo_config import ArgoConfig
from argparse import ArgumentParser
import sys
@@ -18,16 +18,23 @@ class ArgoAmsClient:
It connects to an argo-messaging host and retrieves project/user/topic/subscription information
"""
- def __init__(self, host, admin_key, verify=True):
+ def __init__(self, host, admin_key, verify=True, http_proxy_url=None):
"""
Initialize ArgoAAmsClient
Args:
host: str. argo ams host
admin_key: str. admin token
+ verify (boolean): flag if the remote web api host should be verified
+ http_proxy_url (str.): optional url for local http proxy to be used
"""
# flag to verify https connections or not
self.verify = verify
+ # proxy configuration
+ if http_proxy_url:
+ self.proxies = {'http':http_proxy_url,'https':http_proxy_url}
+ else:
+ self.proxies = None
# ams host
self.host = host
# admin key to access ams service
@@ -88,7 +95,7 @@ def post_resource(self, url, data):
'Accept': 'application/json'
})
# do the post requests
- r = requests.post(url, headers=headers, verify=self.verify, data=json.dumps(data))
+ r = requests.post(url, headers=headers, verify=self.verify, data=json.dumps(data), proxies=self.proxies)
# if successful return data (or empty json)
if 200 == r.status_code:
if r.text == "":
@@ -116,7 +123,7 @@ def put_resource(self, url, data):
'Accept': 'application/json'
})
# do the put request
- r = requests.put(url, headers=headers, verify=self.verify, data=json.dumps(data))
+ r = requests.put(url, headers=headers, verify=self.verify, data=json.dumps(data), proxies=self.proxies)
# if successful return json data (or empty json)
if 200 == r.status_code:
if r.text == "":
@@ -143,7 +150,7 @@ def get_resource(self, url):
'Accept': 'application/json'
})
# do the get resource
- r = requests.get(url, headers=headers, verify=self.verify)
+ r = requests.get(url, headers=headers, verify=self.verify, proxies=self.proxies)
# if successful return the json data or empty json
if 200 == r.status_code:
if r.text == "":
@@ -306,8 +313,8 @@ def get_tenant_users(self, tenant):
dict. json representation of list of AMS users
"""
- # tenant must have 3 users: project_admin, publisher, consumer
- lookup = [("project_admin", "ams_{}_admin"), ("publisher", "ams_{}_publisher"), ("consumer", "ams_{}_consumer")]
+ # tenant must have 4 users: project_admin, publisher, consumer, archiver(consumer)
+ lookup = [("project_admin", "ams_{}_admin"), ("publisher", "ams_{}_publisher"), ("consumer", "ams_{}_consumer"), ("archiver", "ams_{}_archiver")]
lookup = [(x, y.format(tenant.lower())) for (x, y) in lookup]
users = dict()
for (role, name) in lookup:
@@ -372,6 +379,9 @@ def get_tenant_subs(self, tenant, topics):
if name.endswith('status_metric'):
if topics["metric_data"] == sub["topic"]:
found["status_metric"] = name
+ if name.endswith('archive_metric'):
+ if topics["metric_data"] == sub["topic"]:
+ found["archive_metric"] = name
return found
@staticmethod
@@ -518,10 +528,14 @@ def create_tenant_user(self, tenant, role):
project_name = tenant.upper()
if role == "project_admin":
username = "ams_{}_admin".format(tenant.lower())
+ elif role == "archiver":
+ username = "ams_{}_archiver".format(tenant.lower())
+ # archiver is actually a consumer
+ role = "consumer"
else:
username = "ams_{}_{}".format(tenant.lower(), role)
- print username, role
+
url = self.get_url("users", username)
data = {"projects": [{"project": project_name, "roles": [role]}]}
return self.post_resource(url, data)
@@ -575,10 +589,10 @@ def check_tenant(self, tenant):
# Things that sould be present in AMS definitions
topics_lookup = ["sync_data", "metric_data"]
- subs_lookup = ["ingest_sync", "ingest_metric", "status_sync", "status_metric"]
- users_lookup = ["project_admin", "publisher", "consumer"]
+ subs_lookup = ["ingest_sync", "ingest_metric", "status_sync", "status_metric", "archive_metric"]
+ users_lookup = ["project_admin", "publisher", "consumer", "archiver"]
topic_acl_lookup = ["sync_data", "metric_data"]
- sub_acl_lookup = ["ingest_sync", "ingest_metric"]
+ sub_acl_lookup = ["ingest_sync", "ingest_metric", "archive_metric"]
# Initialize a dictionary with missing definitions
missing = dict()
@@ -601,19 +615,21 @@ def check_tenant(self, tenant):
if users is None:
users = {}
+
+
# For each expected topic check if it was indeed found in AMS or if it's missing
for item in topics_lookup:
- if item not in topics.keys():
+ if item not in list(topics.keys()):
missing["topics"].append(item)
# For each expected sub check if it was indeed found in AMS or if it's missing
for item in subs_lookup:
- if item not in subs.keys():
+ if item not in list(subs.keys()):
missing["subs"].append(item)
# For each expected user check if it was indeed found in AMS or if it's missing
for item in users_lookup:
- if item not in users.keys():
+ if item not in list(users.keys()):
missing["users"].append(item)
user_topics = []
@@ -658,13 +674,17 @@ def fill_missing(self, tenant, missing):
# For each missing sub attempt to create it in AMS
for sub in missing["subs"]:
# create sub
+ if sub.startswith("archive") and sub.endswith("metric"):
+ topic = "metric_data"
if sub.endswith("metric"):
topic = "metric_data"
elif sub.endswith("sync"):
topic = "sync_data"
else:
continue
+
sub_new = self.create_tenant_sub(tenant, topic, sub)
+
log.info("Tenant:{} - created missing subscription: {} on topic: {}".format(tenant, sub_new["name"],
sub_new["topic"]))
@@ -688,7 +708,10 @@ def fill_missing(self, tenant, missing):
# For each missing subscription attempt to set it in AMS
for sub_acl in missing["sub_acls"]:
acl = self.get_sub_acl(tenant, sub_acl)
- user_con = "ams_{}_consumer".format(tenant.lower())
+ if sub_acl.startswith("archive"):
+ user_con = "ams_{}_archiver".format(tenant.lower())
+ else:
+ user_con = "ams_{}_consumer".format(tenant.lower())
if user_con not in acl:
acl.append(user_con)
@@ -759,10 +782,14 @@ def run_ams_update(args):
ams_token = config.get("AMS", "access_token")
ams_host = config.get("AMS", "endpoint").hostname
+ ams_verify = config.get("AMS", "verify")
+ ams_proxy = config.get("AMS", "proxy")
+ if ams_proxy:
+ ams_proxy = ams_proxy.geturl()
log.info("ams api used {}".format(ams_host))
tenant_list = config.get("API", "tenants")
- ams = ArgoAmsClient(ams_host, ams_token)
+ ams = ArgoAmsClient(ams_host, ams_token, ams_verify, ams_proxy)
if args.tenant is not None:
# Check if tenant exists in argo configuarion
diff --git a/bin/utils/update_cron.py b/bin/utils/update_cron.py
index 3b219eed..835dbddd 100755
--- a/bin/utils/update_cron.py
+++ b/bin/utils/update_cron.py
@@ -6,8 +6,8 @@
from argparse import ArgumentParser
from subprocess import check_output, CalledProcessError, check_call
from datetime import datetime
-from common import get_log_conf, get_config_paths
-from argo_config import ArgoConfig
+from .common import get_log_conf, get_config_paths
+from .argo_config import ArgoConfig
log = logging.getLogger(__name__)
diff --git a/bin/utils/update_profiles.py b/bin/utils/update_profiles.py
index e48e490d..5b1fc9e7 100755
--- a/bin/utils/update_profiles.py
+++ b/bin/utils/update_profiles.py
@@ -6,12 +6,12 @@
import logging
import os
import uuid
-from urlparse import urlparse
+from urllib.parse import urlparse
from argparse import ArgumentParser
import sys
import subprocess
-from common import get_log_conf, get_config_paths
-from argo_config import ArgoConfig
+from .common import get_log_conf, get_config_paths
+from .argo_config import ArgoConfig
log = logging.getLogger(__name__)
@@ -23,39 +23,52 @@ class ArgoApiClient:
It connects to an argo-web-api host and retrieves profile information per tenant and report
"""
- def __init__(self, host, tenant_keys):
+ def __init__(self, host, tenant_keys, verify=False, http_proxy_url=None):
"""
Initialize ArgoApiClient which is used to retrieve profiles from argo-web-api
Args:
host: str. argo-web-api host
tenant_keys: dict. a dictionary of {tenant: api_token} entries
+ verify (boolean): flag if the remote web api host should be verified
+ http_proxy_url (str.): optional url for local http proxy to be used
"""
self.host = host
+ self.verify = verify
+ if http_proxy_url:
+ self.proxies = {'http': http_proxy_url, 'https': http_proxy_url}
+ else:
+ self.proxies = None
+
self.paths = dict()
self.tenant_keys = tenant_keys
self.paths.update({
'reports': '/api/v2/reports',
'operations': '/api/v2/operations_profiles',
+ 'metrics': '/api/v2/metric_profiles',
'aggregations': '/api/v2/aggregation_profiles',
'thresholds': '/api/v2/thresholds_profiles',
'tenants': '/api/v2/admin/tenants'
})
- def get_url(self, resource, item_uuid=None):
+ def get_url(self, resource, item_uuid=None, date=None):
"""
Constructs an argo-web-api url based on the resource and item_uuid
Args:
resource: str. resource to be retrieved (reports|ops)
item_uuid: str. retrieve a specific item from the resource
+ date: str. returns the historic version of the resource
Returns:
str: url path
"""
+ dateQuery = ""
+ if date:
+ dateQuery = "?date=" + date
if item_uuid is None:
- return "".join(["https://", self.host, self.paths[resource]])
+ return "".join(["https://", self.host, self.paths[resource], dateQuery])
else:
- return "".join(["https://", self.host, self.paths[resource], "/", item_uuid])
+ return "".join(["https://", self.host, self.paths[resource], "/", item_uuid, dateQuery])
def get_resource(self, tenant, url):
"""
@@ -73,7 +86,8 @@ def get_resource(self, tenant, url):
'Accept': 'application/json',
'x-api-key': self.tenant_keys[tenant]
})
- r = requests.get(url, headers=headers, verify=False)
+ r = requests.get(url, headers=headers,
+ verify=self.verify, proxies=self.proxies)
if 200 == r.status_code:
return json.loads(r.text)["data"]
@@ -90,17 +104,16 @@ def get_tenants(self, token):
dict: list of tenants and access keys
"""
+
tenants = self.get_admin_resource(token, self.get_url("tenants"))
tenant_keys = dict()
for item in tenants:
for user in item["users"]:
if user["name"].startswith("argo_engine_") and user["api_key"]:
- print len(user["api_key"])
tenant_keys[item["info"]["name"]] = user["api_key"]
return tenant_keys
- @staticmethod
- def get_admin_resource(token, url):
+ def get_admin_resource(self, token, url):
"""
Returns an argo-web-api resource by tenant and url
Args:
@@ -116,14 +129,15 @@ def get_admin_resource(token, url):
'Accept': 'application/json',
'x-api-key': token
})
- r = requests.get(url, headers=headers, verify=False)
+ r = requests.get(url, headers=headers,
+ verify=self.verify, proxies=self.proxies)
if 200 == r.status_code:
return json.loads(r.text)["data"]
else:
return None
- def get_profile(self, tenant, report, profile_type):
+ def get_profile(self, tenant, report, profile_type, date=None):
"""
Gets an argo-web-api profile by tenant, report and profile type
Args:
@@ -140,9 +154,11 @@ def get_profile(self, tenant, report, profile_type):
return []
item_uuid = self.find_profile_uuid(tenant, report, profile_type)
+
if item_uuid is None:
return None
- profiles = self.get_resource(tenant, self.get_url(profile_type, item_uuid))
+ profiles = self.get_resource(
+ tenant, self.get_url(profile_type, item_uuid, date))
if profiles is not None:
return profiles[0]
@@ -171,6 +187,7 @@ def find_report_uuid(self, tenant, name):
"""
r = self.get_reports(tenant)
+
if r is None:
return ''
@@ -189,6 +206,7 @@ def get_report(self, tenant, item_uuid):
obj: Returns an array of reports or one report
"""
+
reports = self.get_resource(tenant, self.get_url("reports", item_uuid))
if reports is not None:
return reports[0]
@@ -204,13 +222,16 @@ def find_profile_uuid(self, tenant, report, profile_type):
Returns:
"""
- if profile_type is "aggregations":
- profile_type = "aggregation"
-
+ if profile_type is "aggregations":
+ profile_type = "aggregation"
+
+ if profile_type is "metrics":
+ profile_type = "metric"
+
report = self.get_report(tenant, self.find_report_uuid(tenant, report))
if profile_type == "reports":
- return report["id"]
- for profile in report["profiles"]:
+ return report["id"]
+ for profile in report["profiles"]:
if profile["type"] == profile_type:
return profile["id"]
@@ -234,51 +255,56 @@ def __init__(self, namenode, port, base_path):
self.client = Client(namenode, port)
self.base_path = base_path
- def gen_profile_path(self, tenant, report, profile_type):
+ def gen_profile_path(self, tenant, report, profile_type, date=None):
"""
Generates a valid hdfs path to a specific profile
Args:
tenant: str. tenant to be used
report: str. report to be used
- profile_type: str. profile_type (operations|reports|aggregations|thresholds)
+ profile_type: str. profile_type (operations|reports|aggregations|thresholds|metrics)
Returns:
str: hdfs path
"""
templates = dict()
+ if date:
+ date = "_" + date
+ else:
+ date = ""
templates.update({
- 'operations': '{0}_ops.json',
- 'aggregations': '{0}_{1}_ap.json',
+ 'operations': '{0}_{1}_ops{2}.json',
+ 'aggregations': '{0}_{1}_ap{2}.json',
+ 'metrics': '{0}_{1}_metrics{2}.json',
'reports': '{0}_{1}_cfg.json',
- 'thresholds': '{0}_{1}_thresholds.json',
+ 'thresholds': '{0}_{1}_thresholds{2}.json',
'recomputations': 'recomp.json'
})
sync_path = self.base_path.replace("{{tenant}}", tenant)
- filename = templates[profile_type].format(tenant, report)
+ filename = templates[profile_type].format(tenant, report, date)
return os.path.join(sync_path, filename)
- def cat(self, tenant, report, profile_type):
+ def cat(self, tenant, report, profile_type, date=None):
"""
Returns the contents of a profile stored in hdfs
Args:
tenant: str. tenant name
report: str. report name
- profile_type: str. profile type (operations|reports|aggregations|thresholds)
+ profile_type: str. profile type (operations|reports|aggregations|thresholds|metric)
Returns:
"""
- path = self.gen_profile_path(tenant, report, profile_type)
+ path = self.gen_profile_path(tenant, report, profile_type, date)
try:
txt = self.client.cat([path])
- j = json.loads(txt.next().next())
+ j = json.loads(next(next(txt)))
return j, True
except FileNotFoundException:
return None, False
- def rem(self, tenant, report, profile_type):
+ def rem(self, tenant, report, profile_type, date=None):
"""
Removes a profile file that already exists in hdfs (in order to be replaced)
Args:
@@ -289,10 +315,10 @@ def rem(self, tenant, report, profile_type):
Returns:
"""
- path = self.gen_profile_path(tenant, report, profile_type)
+ path = self.gen_profile_path(tenant, report, profile_type, date)
try:
- self.client.delete([path]).next()
+ next(self.client.delete([path]))
return True
except FileNotFoundException:
return False
@@ -320,7 +346,8 @@ def __init__(self, config):
namenode = config.get("HDFS", "namenode")
hdfs_user = config.get("HDFS", "user")
full_path = config.get("HDFS", "path_sync")
- full_path = full_path.partial_fill(namenode=namenode.geturl(), hdfs_user=hdfs_user)
+ full_path = full_path.partial_fill(
+ namenode=namenode.geturl(), hdfs_user=hdfs_user)
short_path = urlparse(full_path).path
@@ -330,12 +357,16 @@ def __init__(self, config):
for tenant in tenant_list:
tenant_key = config.get("API", tenant + "_key")
tenant_keys[tenant] = tenant_key
-
- print namenode.hostname, namenode.port, short_path
+
+ ams_proxy = config.get("API", "proxy")
+ if ams_proxy:
+ ams_proxy = ams_proxy.geturl()
+
self.hdfs = HdfsReader(namenode.hostname, namenode.port, short_path)
- self.api = ArgoApiClient(config.get("API", "endpoint").netloc, tenant_keys)
+ self.api = ArgoApiClient(config.get("API", "endpoint").netloc, tenant_keys, config.get(
+ "API", "verify"), ams_proxy)
- def profile_update_check(self, tenant, report, profile_type):
+ def profile_update_check(self, tenant, report, profile_type, date=None):
"""
Checks if latest api profiles are aligned with profile files stored in hdfs.
If not the updated api profile are uploaded to hdfs
@@ -343,20 +374,22 @@ def profile_update_check(self, tenant, report, profile_type):
tenant: str. Tenant name to check profiles from
report: str. Report name to check profiles from
profile_type: str. Name of the profile type used (operations|aggregations|reports)
+ date: str. Optional date to retrieve historic version of the profile
"""
- prof_api = self.api.get_profile(tenant, report, profile_type)
+ prof_api = self.api.get_profile(tenant, report, profile_type, date)
if prof_api is None:
- log.info("profile type %s doesn't exist in report --skipping", profile_type)
+ log.info(
+ "profile type %s doesn't exist in report --skipping", profile_type)
return
log.info("retrieved %s profile(api): %s", profile_type, prof_api)
-
- prof_hdfs, exists = self.hdfs.cat(tenant, report, profile_type)
-
-
+
+ prof_hdfs, exists = self.hdfs.cat(tenant, report, profile_type, date)
+
if exists:
- log.info("retrieved %s profile(hdfs): %s ", profile_type, prof_hdfs)
+ log.info("retrieved %s profile(hdfs): %s ",
+ profile_type, prof_hdfs)
prof_update = prof_api != prof_hdfs
if prof_update:
@@ -366,13 +399,15 @@ def profile_update_check(self, tenant, report, profile_type):
else:
# doesn't exist so it should be uploaded
prof_update = True
- log.info("%s profile doesn't exist in hdfs, should be uploaded", profile_type)
+ log.info(
+ "%s profile doesn't exist in hdfs, should be uploaded", profile_type)
# Upload if it's deemed to be uploaded
if prof_update:
- self.upload_profile_to_hdfs(tenant, report, profile_type, prof_api, exists)
+ self.upload_profile_to_hdfs(
+ tenant, report, profile_type, prof_api, exists, date)
- def upload_profile_to_hdfs(self, tenant, report, profile_type, profile, exists):
+ def upload_profile_to_hdfs(self, tenant, report, profile_type, profile, exists, date=None):
"""
Uploads an updated profile (from api) to the specified hdfs destination
Args:
@@ -389,9 +424,10 @@ def upload_profile_to_hdfs(self, tenant, report, profile_type, profile, exists):
# If file exists on hdfs should be removed first
if exists:
- is_removed = self.hdfs.rem(tenant, report, profile_type)
+ is_removed = self.hdfs.rem(tenant, report, profile_type, date)
if not is_removed:
- log.error("Could not remove old %s profile from hdfs", profile_type)
+ log.error(
+ "Could not remove old %s profile from hdfs", profile_type)
return
# If all ok continue with uploading the new file to hdfs
@@ -403,29 +439,34 @@ def upload_profile_to_hdfs(self, tenant, report, profile_type, profile, exists):
local_path = "/tmp/" + temp_fn
with open(local_path, 'w') as outfile:
json.dump(profile, outfile)
- hdfs_host = self.cfg.get("HDFS","namenode").hostname
- hdfs_path = self.hdfs.gen_profile_path(tenant, report, profile_type)
- status = subprocess.check_call([hdfs_write_bin, hdfs_write_cmd, local_path, hdfs_path])
+ hdfs_host = self.cfg.get("HDFS", "namenode").hostname
+ hdfs_path = self.hdfs.gen_profile_path(
+ tenant, report, profile_type, date)
+ status = subprocess.check_call(
+ [hdfs_write_bin, hdfs_write_cmd, local_path, hdfs_path])
if status == 0:
- log.info("File uploaded successfully to hdfs host: %s path: %s", hdfs_host, hdfs_path)
+ log.info(
+ "File uploaded successfully to hdfs host: %s path: %s", hdfs_host, hdfs_path)
return True
else:
- log.error("File uploaded unsuccessful to hdfs host: %s path: %s", hdfs_host, hdfs_path)
+ log.error(
+ "File uploaded unsuccessful to hdfs host: %s path: %s", hdfs_host, hdfs_path)
return False
def upload_tenant_reports_cfg(self, tenant):
reports = self.api.get_reports(tenant)
report_name_list = []
for report in reports:
-
+
# double check if indeed report belongs to tenant
if report["tenant"] == tenant:
report_name = report["info"]["name"]
report_name_list.append(report_name)
report_uuid = report["id"]
# Set report in configuration
- self.cfg.set("TENANTS:"+tenant, "report_" + report_name, report_uuid)
+ self.cfg.set("TENANTS:"+tenant, "report_" +
+ report_name, report_uuid)
# update tenant's report name list
self.cfg.set("TENANTS:"+tenant, "reports", ",".join(report_name_list))
@@ -437,13 +478,13 @@ def upload_tenants_cfg(self):
"""
token = self.cfg.get("API", "access_token")
tenant_keys = self.api.get_tenants(token)
- self.api.tenant_keys=tenant_keys
- tenant_names = ",".join(tenant_keys.keys())
-
+ self.api.tenant_keys = tenant_keys
+ tenant_names = ",".join(list(tenant_keys.keys()))
+
self.cfg.set("API", "tenants", tenant_names)
# For each tenant update also it's report list
- for tenant_name in tenant_keys.keys():
+ for tenant_name in list(tenant_keys.keys()):
self.cfg.set("API", tenant_name+"_key", tenant_keys[tenant_name])
# Update tenant's report definitions in configuration
self.upload_tenant_reports_cfg(tenant_name)
@@ -451,36 +492,46 @@ def upload_tenants_cfg(self):
def upload_tenant_defaults(self, tenant):
# check
- section_tenant = "TENANTS:"+ tenant
- section_metric = "TENANTS:"+ tenant + ":ingest-metric"
- mongo_endpoint = self.cfg.get("MONGO","endpoint").geturl()
- mongo_uri = self.cfg.get_default(section_tenant,"mongo_uri").fill(mongo_endpoint=mongo_endpoint,tenant=tenant).geturl()
- hdfs_user = self.cfg.get("HDFS","user")
- namenode = self.cfg.get("HDFS","namenode").netloc
- hdfs_check = self.cfg.get_default(section_metric,"checkpoint_path").fill(namenode=namenode,hdfs_user=hdfs_user,tenant=tenant)
-
-
- self.cfg.get("MONGO","endpoint")
-
- self.cfg.set(section_tenant,"mongo_uri",mongo_uri)
- self.cfg.set_default(section_tenant,"mongo_method")
-
-
- self.cfg.set_default(section_metric,"ams_interval")
- self.cfg.set_default(section_metric,"ams_batch")
- self.cfg.set(section_metric,"checkpoint_path",hdfs_check.geturl())
- self.cfg.set_default(section_metric,"checkpoint_interval")
- section_sync = "TENANTS:"+ tenant + ":ingest-sync"
-
- self.cfg.set_default(section_sync,"ams_interval")
- self.cfg.set_default(section_sync,"ams_batch")
- section_stream = "TENANTS:"+ tenant + ":stream-status"
-
- self.cfg.set_default(section_stream,"ams_sub_sync")
- self.cfg.set_default(section_stream,"ams_interval")
- self.cfg.set_default(section_stream,"ams_batch")
-
-
+ section_tenant = "TENANTS:" + tenant
+ section_metric = "TENANTS:" + tenant + ":ingest-metric"
+ mongo_endpoint = self.cfg.get("MONGO", "endpoint").geturl()
+ mongo_uri = self.cfg.get_default(section_tenant, "mongo_uri").fill(
+ mongo_endpoint=mongo_endpoint, tenant=tenant).geturl()
+ hdfs_user = self.cfg.get("HDFS", "user")
+ namenode = self.cfg.get("HDFS", "namenode").netloc
+ hdfs_check = self.cfg.get_default(section_metric, "checkpoint_path").fill(
+ namenode=namenode, hdfs_user=hdfs_user, tenant=tenant)
+
+ self.cfg.get("MONGO", "endpoint")
+
+ self.cfg.set(section_tenant, "mongo_uri", mongo_uri)
+ self.cfg.set_default(section_tenant, "mongo_method")
+
+ self.cfg.set_default(section_metric, "ams_interval")
+ self.cfg.set_default(section_metric, "ams_batch")
+ self.cfg.set(section_metric, "checkpoint_path", hdfs_check.geturl())
+ self.cfg.set_default(section_metric, "checkpoint_interval")
+ section_sync = "TENANTS:" + tenant + ":ingest-sync"
+
+ self.cfg.set_default(section_sync, "ams_interval")
+ self.cfg.set_default(section_sync, "ams_batch")
+
+ section_stream = "TENANTS:" + tenant + ":stream-status"
+ streaming_kafka_servers = self.cfg.get("STREAMING", "kafka_servers")
+ if (streaming_kafka_servers):
+ streaming_kafka_servers = ",".join(streaming_kafka_servers)
+ self.cfg.set(section_stream, "kafka_servers",
+ streaming_kafka_servers)
+ else:
+ self.cfg.set_default(section_stream, "kafka_servers")
+
+ self.cfg.set_default(section_stream, "ams_sub_sync")
+ self.cfg.set_default(section_stream, "ams_sub_metric")
+ self.cfg.set_default(section_stream, "ams_interval")
+ self.cfg.set_default(section_stream, "ams_batch")
+ self.cfg.set(section_stream, "output", "kafka,mongo")
+
+ self.cfg.set(section_stream, "mongo_method", "insert")
def save_config(self, file_path):
"""
@@ -514,18 +565,18 @@ def run_profile_update(args):
if args.tenant is not None:
# check for the following profile types
- profile_type_checklist = ["operations", "aggregations", "reports", "thresholds", "recomputations"]
- reports = []
- if args.report is not None:
- reports.append(args.report)
- else:
- reports = config.get("TENANTS:"+args.tenant,"reports")
-
- for report in reports:
- for profile_type in profile_type_checklist:
- argo.profile_update_check(args.tenant, report, profile_type)
-
+ profile_type_checklist = [
+ "reports", "operations", "aggregations", "thresholds", "recomputations", "metrics"]
+ reports = []
+ if args.report is not None:
+ reports.append(args.report)
+ else:
+ reports = config.get("TENANTS:"+args.tenant, "reports")
+ for report in reports:
+ for profile_type in profile_type_checklist:
+ argo.profile_update_check(
+ args.tenant, report, profile_type, args.date)
else:
argo.upload_tenants_cfg()
argo.save_config(conf_paths["main"])
@@ -541,6 +592,8 @@ def run_profile_update(args):
"-r", "--report", help="report", dest="report", metavar="STRING", required=False, default=None)
arg_parser.add_argument(
"-c", "--config", help="config", dest="config", metavar="STRING")
+ arg_parser.add_argument(
+ "-d", "--date", help="historic date", dest="date", metavar="STRING", required=False, default=None)
# Parse the command line arguments accordingly and introduce them to the run method
sys.exit(run_profile_update(arg_parser.parse_args()))
diff --git a/conf/argo-streaming.conf b/conf/argo-streaming.conf
index 3076ac36..c4bc4021 100644
--- a/conf/argo-streaming.conf
+++ b/conf/argo-streaming.conf
@@ -6,6 +6,9 @@ path_metric= hdfs://{{namenode}}/user/{{hdfs_user}}/argo/tenants/{{tenant}}/mdat
path_sync= hdfs://{{namenode}}/user/{{hdfs_user}}/argo/tenants/{{tenant}}/sync
writer_bin= /home/root/hdfs
+[STREAMING]
+kafka_servers=localhost:9092
+
[API]
endpoint=https://api_host
access_token=token01
@@ -32,7 +35,7 @@ job_manager= http://localhost:8081
[JOB-NAMESPACE]
ingest-metric-namespace= Ingesting metric data from {{ams_endpoint}}:{{ams_port}}/v1/projects/{{project}}/subscriptions/{{ams_sub}}
ingest-sync-namespace= Ingesting sync data from {{ams_endpoint}}:{{ams_port}}/v1/projects/{{project}}/subscriptions/{{ams_sub}}
-stream-status-namespace= Streaming status using data {{ams_endpoint}}:{{ams_port}}/v1/projects/{{project}}/subscriptions/[{{ams_sub_metric}}, {{ams_sub_sync}}]
+stream-status-namespace= Streaming status using data from {{ams_endpoint}}:{{ams_port}}/v1/projects/{{project}}/subscriptions/[{{ams_sub_metric}},{{ams_sub_sync}}]
[CLASSES]
ams-ingest-metric= argo.streaming.AmsIngestMetric
@@ -48,9 +51,6 @@ batch-ar= /path/to/ArgoArBatch-1.0.jar
batch-status= /path/to/ArgoStatusBatch-1.0.jar
stream-status= /path/to/streaming-status-multi2.jar
-[AMS]
-ams_endpoint= localhost:8080
-access_token= secret
[TENANTS:TENANT_A]
ams_project= TENANT_A
diff --git a/conf/conf.template b/conf/conf.template
index 1c71731b..eb380712 100644
--- a/conf/conf.template
+++ b/conf/conf.template
@@ -14,13 +14,16 @@ path_sync: {{namenode}}/user/{{hdfs_user}}/argo/tenants/{{tenant}}/sync
writer_bin: /path/to/binary
[API]
-endpoint = api.foo
+endpoint = https://api.foo
tenants = TENANTA
access_token = key0
TENANTA_key = key1
TENANTB_key = key2
TENANTC_key = key3
+[STREAMING]
+kafka_servers: localhost:9092
+
[MONGO]
endpoint = mongodb://localhost:21017
@@ -36,7 +39,7 @@ ingest-metric-namespace: Ingesting data from {{ams_endpoint}}:{{ams_port}}/v1/pr
# Template to check if a sync job with similar name already runs
ingest-sync-namespace: Ingesting sync data from {{ams_endpoint}}:{{ams_port}}/v1/projects/{{ams_project}}/subscriptions/{{ams_sub}}
#Template to check if a stream status job with similar name already runs
-stream-status-namespace: Streaming status using data {{ams_endpoint}}:{{ams_port}}/v1/projects/{{ams_project}}/subscriptions/[{{ams_sub_metric}}, {{ams_sub_sync}}]
+stream-status-namespace: Streaming status using data from {{ams_endpoint}}:{{ams_port}}/v1/projects/{{ams_project}}/subscriptions/[{{ams_sub_metric}},{{ams_sub_sync}}]
[CLASSES]
# Specify class to run during job submit
diff --git a/conf/config.schema.json b/conf/config.schema.json
index 30536502..b469e73e 100644
--- a/conf/config.schema.json
+++ b/conf/config.schema.json
@@ -31,6 +31,15 @@
}
},
+ "STREAMING": {
+ "kafka_servers":{
+ "desc": "comma-separated list of kafka servers to send messages to",
+ "type": "list",
+ "optional": true,
+ "default": "localhost:9092"
+ }
+ },
+
"MONGO": {
"endpoint": {
"desc": "mongodb core endpoint",
@@ -73,6 +82,18 @@
"desc": "list of tenants",
"type": "list"
},
+ "proxy": {
+ "desc": "ams proxy to be used",
+ "type": "uri",
+ "optional": true,
+ "default": "http://localhost:3128"
+ },
+ "verify":{
+ "desc":"ssl verify ams endpoint",
+ "type":"bool",
+ "optional": true,
+ "default": "true"
+ },
"~_key": {
"~": "tenants",
"desc": "tenants key",
@@ -234,11 +255,13 @@
"~":"API.tenants",
"ams_sub_metric":{
"desc": "subscription for ingesting metric data",
- "type": "string"
+ "type": "string",
+ "default": "status_metric"
},
"ams_sub_sync":{
"desc": "subscription for ingesting sync data",
- "type": "string"
+ "type": "string",
+ "default": "status_sync"
},
"ams_interval":{
"desc": "interval for polling ams for sync data",
diff --git a/docs/submission-scripts.md b/docs/submission-scripts.md
index 02c80866..81a7bea3 100644
--- a/docs/submission-scripts.md
+++ b/docs/submission-scripts.md
@@ -1,15 +1,17 @@
# Python utility scripts for easier flink job submission/handling
-| Script | Description | Shortcut |
-|--------|-------------|---------- |
-| metric_ingestion_submit.py | Python wrapper over flink sumbit metric ingestion job.| [Details](#ingest-metric) |
-| sync_ingestion_submit.py | Python wrapper over flink submit sync ingestion job.| [Details](#ingest-synbc) |
-| ar_job_submit.py | Python wrapper over the flink batch AR job. | [Details](#batch-ar) |
-| status_job_submit.py | Python wrapper over the flink batch Status jon. | [Details](#batch-status) |
+| Script | Description | Shortcut |
+| --------------------------- | ------------------------------------------------------ | ------------------------- |
+| metric_ingestion_submit.py | Python wrapper over flink sumbit metric ingestion job. | [Details](#ingest-metric) |
+| sync_ingestion_submit.py | Python wrapper over flink submit sync ingestion job. | [Details](#ingest-synbc) |
+| ar_job_submit.py | Python wrapper over the flink batch AR job. | [Details](#batch-ar) |
+| status_job_submit.py | Python wrapper over the flink batch Status jon. | [Details](#batch-status) |
| stream_status_job_submit.py | Python wrapper over flink sumbit status streaming job. | [Details](#stream-status) |
+
## Metric Ingestion Submit Script
+
Python wrapper over flink sumbit metric ingestion job.
Metric Ingestion job receives metric data from an AMS endpoint subscription and stores them to a proper hdfs destination.
@@ -22,7 +24,9 @@ Metric Ingestion job receives metric data from an AMS endpoint subscription and
`-u : If specified the flink command will run without sudo`
+
## Sync Ingestion Submit Script
+
Same as Metric Ingestion but for connector data
This job connects to AMS and stores connector data (by report) in an hdfs destination
@@ -35,7 +39,9 @@ This job connects to AMS and stores connector data (by report) in an hdfs destin
`-u : If specified the flink command will run without sudo`
+
## A/R Batch Job
+
A/R job submission is a batch job that will run and finish on the cluster
`ar_job_submit.py -t -c -u -r -d -m`
@@ -54,10 +60,14 @@ A/R job submission is a batch job that will run and finish on the cluster
`--profile-check: (optional) Check if profiles used in computation are out of date and update them`
+`--historic: (optional) Ar job submission script will use the historic versions of the available profiles according to the (-d) date parameter`
+
`--thresholds: (optional) Check if threshold rules are defined and use them during computations`
+
## Status Batch Job
+
Status job submission is a batch job that will run and finish on the cluster
`status_job_submit.py -t -c -u -r -d -m`
@@ -76,10 +86,14 @@ Status job submission is a batch job that will run and finish on the cluster
`--profile-check: (optional) Check if profiles used in computation are out of date and update them`
+`--historic: (optional) status job submission script will use the historic versions of the available profiles according to the (-d) date parameter`
+
`--thresholds: (optional) Check if threshold rules are defined and use them during computations`
+
## Status Stream Job
+
Status streaming job receives metric and sync data from AMS calculates and generates status events which are forwarded to kafka
`stream_status_job_submit.py -t -c -u -r -d`
@@ -96,14 +110,20 @@ Status streaming job receives metric and sync data from AMS calculates and gener
`-t : long(ms) - controls default timeout for event regeneration (used in notifications)`
-### Important
+`--historic: (optional) status stream job submission script will use the historic versions of the available profiles according to the (-d) date parameter`
-- Sometimes connector data (metric profiles,endpoint,group endpoints,weights) appear delayed (in comparison with the metric data) or might be missing. We have a check mechanism that looks back (up to three days) for connector data that might be missing and uses that.
+`--profile-check: (optional) Check if profiles used in computation are out of date and update them`
+`--thresholds: (optional) Check if threshold rules are defined and use them during computations`
-- Flink job receives a parameter of insert or upsert when storing results. Give the ability to honor that parameter and when insert is used, call a clean mongo script for removing (if present) any mongo a/r report data for that very day
+### Important
+
+- Sometimes connector data (metric profiles,endpoint,group endpoints,weights) appear delayed (in comparison with the metric data) or might be missing. We have a check mechanism that looks back (up to three days) for connector data that might be missing and uses that.
+
+* Flink job receives a parameter of insert or upsert when storing results. Give the ability to honor that parameter and when insert is used, call a clean mongo script for removing (if present) any mongo a/r report data for that very day
## Configuration file
+
```
[HDFS]
HDFS credentials
diff --git a/docs/update_profiles.md b/docs/update_profiles.md
index 77ff0a29..27c11405 100644
--- a/docs/update_profiles.md
+++ b/docs/update_profiles.md
@@ -3,14 +3,14 @@
Argo-streaming engine maintains profile files stored in flink shared storage per tenant and report.
These profiles are essential for computing a/r and status results during flink-jobs and are not provided
automatically by connectors. Profiles include:
-- operations_profile: `TENANT_ops.json` which includes truth tables about the fundamental aggregation operations applied
- on monitor status timelines (such as 'AND', 'OR' .etc between statuses of 'OK', 'WARNING', 'CRITICAL', 'MISSING' etc.)
-- aggregation_profile: `TENANT_REPORT_aps.json` which includes information on what operations ('AND','OR') and how are
- applied on different service levels
-- report configuration profile: `TENANT_REPORT_cfg.json` which includes information on the report it self, what profiles
-it uses and how filters data
-- threhsolds_profile (optional): `TENANT_REPORT_thresholds.json` which includes thresholds rules to be applied during computation
+- operations_profile: `TENANT_ops.json` which includes truth tables about the fundamental aggregation operations applied
+ on monitor status timelines (such as 'AND', 'OR' .etc between statuses of 'OK', 'WARNING', 'CRITICAL', 'MISSING' etc.)
+- aggregation_profile: `TENANT_REPORT_aps.json` which includes information on what operations ('AND','OR') and how are
+ applied on different service levels
+- report configuration profile: `TENANT_REPORT_cfg.json` which includes information on the report it self, what profiles
+ it uses and how filters data
+- threhsolds_profile (optional): `TENANT_REPORT_thresholds.json` which includes thresholds rules to be applied during computation
Each report uses an operations profile. The operation profile is defined also in argo-web-api instance at the following url
`GET https://argo-web-api.host.example/api/v2/operations_profiles/{{profile_uuid}}`
@@ -24,15 +24,15 @@ Each report optionally contains a thresholds profile. The thresholds profile is
Each report contains a configuration profile. The report is defined also in argo-web-api instance at the following url
`GET https://argo-web-api.host.example/api/v2/reports/{{report_uuid}}`
-
-
-Providing a specific `tenant` and a specific `report`, script `update_profiles` checks corresponding profiles on hdfs against
+Providing a specific `tenant` and a specific `report`, script `update_profiles` checks corresponding profiles on hdfs against
latest profiles provided by argo-web-api. If they don't match it uploads the latest argo-web-api profile definition in hdfs
# Submission scripts automatic invoke
+
Script logic is programmatically called in a/r and status job submission scripts
# Invoke manually from command line
+
Script logic can be invoked from command line by issuing
`$ ./update_profiles -t TENANT -r REPORT`
@@ -52,9 +52,14 @@ optional arguments:
report
-c STRING, --config STRING
config
+ -d STRING, --date STRING date
```
+If `-d` parameter is set, update profile script will check for historic version of the profiles and will update
+them accordingly to HDFS
+
# Config file parameters used
+
Update_profiles script will search for the main argo-streaming.conf file but uses only the following
configuration parameters:
@@ -85,6 +90,7 @@ TENANT_B_key = secret2
```
# Dependencies
+
Update_profiles script is deployed alongside the other scripts included in the `./bin/` folder of argo-streaming engine
and relies on the same dependencies. Specifically it uses `requests` lib for contacting argo-web-api and python `snakebite` lib
for checking hdfs files. Because `snakebite` lib lacks upload mechanism the script relies on a binary client wrapper to upload
diff --git a/flink_jobs/OperationsManager/.gitignore b/flink_jobs/OperationsManager/.gitignore
new file mode 100644
index 00000000..6c4e323f
--- /dev/null
+++ b/flink_jobs/OperationsManager/.gitignore
@@ -0,0 +1,8 @@
+/target/
+.project
+.settings/
+.classpath/
+.classpath
+/nbproject
+nbactions.xml
+
diff --git a/flink_jobs/OperationsManager/pom.xml b/flink_jobs/OperationsManager/pom.xml
new file mode 100644
index 00000000..e2d68601
--- /dev/null
+++ b/flink_jobs/OperationsManager/pom.xml
@@ -0,0 +1,52 @@
+
+
+ 4.0.0
+ operations.manager
+ OperationsManager
+ 1.0-SNAPSHOT
+ jar
+
+ UTF-8
+ 1.8
+ 1.8
+ 1.7.7
+ 1.2.17
+
+
+
+ com.google.code.gson
+ gson
+ 2.2.4
+
+
+ log4j
+ log4j
+ ${log4j.version}
+
+
+
+ org.slf4j
+ slf4j-log4j12
+ ${slf4j.version}
+
+
+ commons-io
+ commons-io
+ 2.10.0
+
+
+ junit-addons
+ junit-addons
+ 1.4
+ test
+
+
+ junit
+ junit
+ 4.13.1
+ test
+
+
+
+
+
\ No newline at end of file
diff --git a/flink_jobs/OperationsManager/src/main/java/operations/OperationsManager.java b/flink_jobs/OperationsManager/src/main/java/operations/OperationsManager.java
new file mode 100644
index 00000000..8a89d992
--- /dev/null
+++ b/flink_jobs/OperationsManager/src/main/java/operations/OperationsManager.java
@@ -0,0 +1,395 @@
+package operations;
+
+//import argo.utils.RequestManager;
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import org.apache.log4j.Logger;
+
+import com.google.gson.JsonArray;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParseException;
+import com.google.gson.JsonParser;
+import java.io.Serializable;
+import org.apache.commons.io.IOUtils;
+
+/**
+ * OperationsManager class implements objects that store the information parsed
+ * from a json object containing operation profile data or loaded from a json
+ * file *
+ *
+ * The OperationsManager keeps info of the defined statuses, the defined
+ * operations, creates a truth table containing all the combinations of statuses
+ * per operation , also it convert string operations and statuses to integer
+ * based on their position in the list storage
+ */
+public class OperationsManager implements Serializable {
+
+ private static final Logger LOG = Logger.getLogger(OperationsManager.class.getName());
+
+ private HashMap states;
+ private HashMap ops;
+ private ArrayList revStates;
+ private ArrayList revOps;
+
+ private int[][][] truthTable;
+
+ private String defaultDownState;
+ private String defaultMissingState;
+ private String defaultUnknownState;
+
+ private boolean order;
+ // private final String url = "/operations_profiles";
+
+ /**
+ * Constructs an OperationsManager object initializing fields
+ */
+ public OperationsManager() {
+ this.states = new HashMap();
+ this.ops = new HashMap();
+ this.revStates = new ArrayList();
+ this.revOps = new ArrayList();
+
+ this.truthTable = null;
+
+ this.order = false;
+ }
+
+ public String getDefaultDown() {
+ return this.defaultDownState;
+ }
+
+ public String getDefaultUnknown() {
+ return this.defaultUnknownState;
+ }
+
+ public int getDefaultUnknownInt() {
+ return this.getIntStatus(this.defaultUnknownState);
+ }
+
+ public int getDefaultDownInt() {
+ return this.getIntStatus(this.defaultDownState);
+ }
+
+ public String getDefaultMissing() {
+ return this.defaultMissingState;
+ }
+
+ public int getDefaultMissingInt() {
+ return this.getIntStatus(this.defaultMissingState);
+ }
+
+/**
+ * Clears the OperationsManager fields
+ */
+
+ public void clear() {
+ this.states = new HashMap();
+ this.ops = new HashMap();
+ this.revStates = new ArrayList();
+ this.revOps = new ArrayList();
+
+ this.truthTable = null;
+ }
+
+/**
+* Retrieves the status which is a combination of 2 statuses based on the truth table of the operation, as an int
+* @param op , the operation (e.g 0, 1)
+* @param a , the 1st status (e.g 3 )
+* @param b , the 2nd status (e.g 2)
+* @return the final status which is the combination of the two statuses retrieved from the operation's truth table
+*/
+ public int opInt(int op, int a, int b) {
+ int result = -1;
+ try {
+ result = this.truthTable[op][a][b];
+ } catch (IndexOutOfBoundsException ex) {
+ LOG.info(ex);
+ result = -1;
+ }
+
+ return result;
+ }
+/**
+ * Retrieves the status which is a combination of 2 statuses based on the truth table of the operation, as an int
+ * @param op , the operation in the form of a string (e.g AND , OR)
+ * @param a , the 1st status in the form of a string (e.g OK , MISSING)
+ * @param b , the 2nd status in the form of a string (e.g OK, MISSING)
+ * @return . the final status which is the combination of the two statuses retrieved from the operation's truth table
+ */
+ public int opInt(String op, String a, String b) {
+
+ int opInt = this.ops.get(op);
+ int aInt = this.states.get(a);
+ int bInt = this.states.get(b);
+
+ return this.truthTable[opInt][aInt][bInt];
+ }
+/**
+ * Retrieves the status which is a combination of 2 statuses based on the truth table of the operation, as a string
+ * @param op , the operation as an int (e.g 0, 1)
+ * @param a , the 1st status as an int (e.g 1, 3)
+ * @param b , the 2nd status as an int (e.g 1, 3)
+ * @return the final status which is the combination of the two statuses , as a string,
+ * retrieved from the operation's truth table
+ */
+ public String op(int op, int a, int b) {
+ return this.revStates.get(this.truthTable[op][a][b]);
+ }
+/**
+ * Retrieves the status which is a combination of 2 statuses based on the truth table of the operation, as a string
+ * @param op, the operation as a string (e.g AND, OR)
+ * @param a , the 1st status as a string (e.g OK, MISSING)
+ * @param b, the 1st status as a string (e.g OK , MISSING)
+ * @return the final status which is the combination of the two statuses , as a string,
+ * retrieved from the operation's truth table
+ */
+ public String op(String op, String a, String b) {
+ int opInt = this.ops.get(op);
+ int aInt = this.states.get(a);
+ int bInt = this.states.get(b);
+
+ return this.revStates.get(this.truthTable[opInt][aInt][bInt]);
+ }
+/**
+* Maps a status as string to an int based on the position of the status in the stored list of statuses
+ * @param status a status as an int (e.g 1 ,2)
+ * @return the status as a string
+ */
+ public String getStrStatus(int status) {
+ return this.revStates.get(status);
+ }
+/**
+ * Maps a status as string to an int
+ * @param status ,a status as a string (e.g OK,MISSING)
+ * @return the status as an int
+ */
+ public int getIntStatus(String status) {
+ return this.states.get(status);
+ }
+/**
+ * Maps an operation as int to a string based on the position of the operation in the stored list of operations
+ * @param op , an operation as an int
+ * @return the operation as a string
+ */
+ public String getStrOperation(int op) {
+ return this.revOps.get(op);
+ }
+/**
+ * Maps an operation as string to an int
+ * @param op, an operation as a string
+ * @return the operation as an int
+ */
+ public int getIntOperation(String op) {
+ return this.ops.get(op);
+ }
+
+ public ArrayList availableStates() {
+
+ return this.revStates;
+ }
+
+ public ArrayList availableOps() {
+ return this.revOps;
+ }
+/**
+ * reads from a json file and stores the necessary information to the OperationsManager object fields
+ * @param jsonFile a json file containing information about the operation profiles
+ * @throws IOException
+ */
+ public void loadJson(File jsonFile) throws IOException {
+ // Clear data
+ this.clear();
+
+ BufferedReader br = null;
+ try {
+ br = new BufferedReader(new FileReader(jsonFile));
+
+ JsonParser json_parser = new JsonParser();
+ JsonElement j_element = json_parser.parse(br);
+ JsonObject jRoot = j_element.getAsJsonObject();
+ JsonArray jData = jRoot.get("data").getAsJsonArray();
+ JsonElement jItem = jData.get(0);
+
+ readJson(jItem);
+ } catch (FileNotFoundException ex) {
+ LOG.error("Could not open file:" + jsonFile.getName());
+ throw ex;
+
+ } catch (JsonParseException ex) {
+ LOG.error("File is not valid json:" + jsonFile.getName());
+ throw ex;
+ } finally {
+ // Close quietly without exceptions the buffered reader
+ IOUtils.closeQuietly(br);
+ }
+
+ }
+/**
+ * reads from a JsonElement and stores the necessary information to the OperationsManager object fields
+ * @param j_element , a JsonElement containing the operations profiles data
+ */
+ private void readJson(JsonElement j_element) {
+ JsonObject j_obj = j_element.getAsJsonObject();
+ JsonArray j_states = j_obj.getAsJsonArray("available_states");
+ JsonArray j_ops = j_obj.getAsJsonArray("operations");
+ this.defaultMissingState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("missing").getAsString();
+ this.defaultDownState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("down").getAsString();
+ this.defaultUnknownState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("unknown").getAsString();
+ // Collect the available states
+ for (int i = 0; i < j_states.size(); i++) {
+ this.states.put(j_states.get(i).getAsString(), i);
+ this.revStates.add(j_states.get(i).getAsString());
+
+ }
+
+ // Collect the available operations
+ int i = 0;
+ for (JsonElement item : j_ops) {
+ JsonObject jObjItem = item.getAsJsonObject();
+ this.ops.put(jObjItem.getAsJsonPrimitive("name").getAsString(), i);
+ this.revOps.add(jObjItem.getAsJsonPrimitive("name").getAsString());
+ i++;
+ }
+ // Initialize the truthtable
+ int num_ops = this.revOps.size();
+ int num_states = this.revStates.size();
+ this.truthTable = new int[num_ops][num_states][num_states];
+
+ for (int[][] surface : this.truthTable) {
+ for (int[] line : surface) {
+ Arrays.fill(line, -1);
+ }
+ }
+
+ // Fill the truth table
+ for (JsonElement item : j_ops) {
+ JsonObject jObjItem = item.getAsJsonObject();
+ String opname = jObjItem.getAsJsonPrimitive("name").getAsString();
+ JsonArray tops = jObjItem.getAsJsonArray("truth_table");
+ // System.out.println(tops);
+
+ for (int j = 0; j < tops.size(); j++) {
+ // System.out.println(opname);
+ JsonObject row = tops.get(j).getAsJsonObject();
+
+ int a_val = this.states.get(row.getAsJsonPrimitive("a").getAsString());
+ int b_val = this.states.get(row.getAsJsonPrimitive("b").getAsString());
+ int x_val = this.states.get(row.getAsJsonPrimitive("x").getAsString());
+ int op_val = this.ops.get(opname);
+
+ // Fill in truth table
+ // Check if order sensitivity is off so to insert two truth
+ // values
+ // ...[a][b] and [b][a]
+ this.truthTable[op_val][a_val][b_val] = x_val;
+ if (!this.order) {
+ this.truthTable[op_val][b_val][a_val] = x_val;
+ }
+ }
+ }
+
+ }
+/**
+ * Calls a JsonParser to read from a list of strings containing the operations profiles data , extracts the JsonElement and
+ * calls the readJson() to read and store the operations profiles data
+ * @param opsJson , a list of strings
+ * @throws JsonParseException
+ */
+ public void loadJsonString(List opsJson) throws JsonParseException {
+ // Clear data
+ this.clear();
+
+ JsonParser json_parser = new JsonParser();
+ // Grab the first - and only line of json from ops data
+ JsonElement j_element = json_parser.parse(opsJson.get(0));
+ readJson(j_element);
+ }
+
+ public int[][][] getTruthTable() {
+ return truthTable;
+ }
+
+ public void setTruthTable(int[][][] truthTable) {
+ this.truthTable = truthTable;
+ }
+
+ public HashMap getStates() {
+ return states;
+ }
+
+ public void setStates(HashMap states) {
+ this.states = states;
+ }
+
+ public HashMap getOps() {
+ return ops;
+ }
+
+ public void setOps(HashMap ops) {
+ this.ops = ops;
+ }
+
+ public ArrayList getRevStates() {
+ return revStates;
+ }
+
+ public void setRevStates(ArrayList revStates) {
+ this.revStates = revStates;
+ }
+
+ public ArrayList getRevOps() {
+ return revOps;
+ }
+
+ public void setRevOps(ArrayList revOps) {
+ this.revOps = revOps;
+ }
+
+ public String getDefaultDownState() {
+ return defaultDownState;
+ }
+
+ public void setDefaultDownState(String defaultDownState) {
+ this.defaultDownState = defaultDownState;
+ }
+
+ public String getDefaultMissingState() {
+ return defaultMissingState;
+ }
+
+ public void setDefaultMissingState(String defaultMissingState) {
+ this.defaultMissingState = defaultMissingState;
+ }
+
+ public String getDefaultUnknownState() {
+ return defaultUnknownState;
+ }
+
+ public void setDefaultUnknownState(String defaultUnknownState) {
+ this.defaultUnknownState = defaultUnknownState;
+ }
+
+ public boolean isOrder() {
+ return order;
+ }
+
+ public void setOrder(boolean order) {
+ this.order = order;
+ }
+
+ public static Logger getLOG() {
+ return LOG;
+ }
+
+// public String getUrl() {
+// return url;
+// }
+}
diff --git a/flink_jobs/OperationsManager/src/main/resources/log4j.properties b/flink_jobs/OperationsManager/src/main/resources/log4j.properties
new file mode 100644
index 00000000..da32ea0f
--- /dev/null
+++ b/flink_jobs/OperationsManager/src/main/resources/log4j.properties
@@ -0,0 +1,23 @@
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+log4j.rootLogger=INFO, console
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{HH:mm:ss,SSS} %-5p %-60c %x - %m%n
diff --git a/flink_jobs/OperationsManager/src/main/resources/operations/operations.json b/flink_jobs/OperationsManager/src/main/resources/operations/operations.json
new file mode 100644
index 00000000..8486d509
--- /dev/null
+++ b/flink_jobs/OperationsManager/src/main/resources/operations/operations.json
@@ -0,0 +1 @@
+{"status":{"message":"Success","code":"200"},"data":[{"id":"8ce59c4d-3761-4f25-a364-f019e394bf8b","date":"2015-01-01","name":"egi_ops","available_states":["OK","WARNING","UNKNOWN","MISSING","CRITICAL","DOWNTIME"],"defaults":{"down":"DOWNTIME","missing":"MISSING","unknown":"UNKNOWN"},"operations":[{"name":"AND","truth_table":[{"a":"OK","b":"OK","x":"OK"},{"a":"OK","b":"WARNING","x":"WARNING"},{"a":"OK","b":"UNKNOWN","x":"UNKNOWN"},{"a":"OK","b":"MISSING","x":"MISSING"},{"a":"OK","b":"CRITICAL","x":"CRITICAL"},{"a":"OK","b":"DOWNTIME","x":"DOWNTIME"},{"a":"WARNING","b":"WARNING","x":"WARNING"},{"a":"WARNING","b":"UNKNOWN","x":"UNKNOWN"},{"a":"WARNING","b":"MISSING","x":"MISSING"},{"a":"WARNING","b":"CRITICAL","x":"CRITICAL"},{"a":"WARNING","b":"DOWNTIME","x":"DOWNTIME"},{"a":"UNKNOWN","b":"UNKNOWN","x":"UNKNOWN"},{"a":"UNKNOWN","b":"MISSING","x":"MISSING"},{"a":"UNKNOWN","b":"CRITICAL","x":"CRITICAL"},{"a":"UNKNOWN","b":"DOWNTIME","x":"DOWNTIME"},{"a":"MISSING","b":"MISSING","x":"MISSING"},{"a":"MISSING","b":"CRITICAL","x":"CRITICAL"},{"a":"MISSING","b":"DOWNTIME","x":"DOWNTIME"},{"a":"CRITICAL","b":"CRITICAL","x":"CRITICAL"},{"a":"CRITICAL","b":"DOWNTIME","x":"CRITICAL"},{"a":"DOWNTIME","b":"DOWNTIME","x":"DOWNTIME"}]},{"name":"OR","truth_table":[{"a":"OK","b":"OK","x":"OK"},{"a":"OK","b":"WARNING","x":"OK"},{"a":"OK","b":"UNKNOWN","x":"OK"},{"a":"OK","b":"MISSING","x":"OK"},{"a":"OK","b":"CRITICAL","x":"OK"},{"a":"OK","b":"DOWNTIME","x":"OK"},{"a":"WARNING","b":"WARNING","x":"WARNING"},{"a":"WARNING","b":"UNKNOWN","x":"WARNING"},{"a":"WARNING","b":"MISSING","x":"WARNING"},{"a":"WARNING","b":"CRITICAL","x":"WARNING"},{"a":"WARNING","b":"DOWNTIME","x":"WARNING"},{"a":"UNKNOWN","b":"UNKNOWN","x":"UNKNOWN"},{"a":"UNKNOWN","b":"MISSING","x":"UNKNOWN"},{"a":"UNKNOWN","b":"CRITICAL","x":"CRITICAL"},{"a":"UNKNOWN","b":"DOWNTIME","x":"UNKNOWN"},{"a":"MISSING","b":"MISSING","x":"MISSING"},{"a":"MISSING","b":"CRITICAL","x":"CRITICAL"},{"a":"MISSING","b":"DOWNTIME","x":"DOWNTIME"},{"a":"CRITICAL","b":"CRITICAL","x":"CRITICAL"},{"a":"CRITICAL","b":"DOWNTIME","x":"CRITICAL"},{"a":"DOWNTIME","b":"DOWNTIME","x":"DOWNTIME"}]}]}]}
\ No newline at end of file
diff --git a/flink_jobs/OperationsManager/src/main/resources/operations/truthtable.json b/flink_jobs/OperationsManager/src/main/resources/operations/truthtable.json
new file mode 100644
index 00000000..705de7f4
--- /dev/null
+++ b/flink_jobs/OperationsManager/src/main/resources/operations/truthtable.json
@@ -0,0 +1,587 @@
+{ "data":[{
+ "operations": ["AND","OR"],
+ "available_states": [
+ "OK",
+ "WARNING",
+ "UNKNOWN",
+ "MISSING",
+ "CRITICAL",
+ "DOWNTIME"
+ ],
+
+ "inputs": [
+ {
+ "name":"AND",
+ "truth_table":[
+ {
+ "a": "OK",
+ "b": "OK",
+ "x": "OK"
+ },
+ {
+ "a": "OK",
+ "b": "WARNING",
+ "x": "WARNING"
+ },
+ {
+ "a": "OK",
+ "b": "UNKNOWN",
+ "x": "UNKNOWN"
+ },
+ {
+ "a": "OK",
+ "b": "MISSING",
+ "x": "MISSING"
+ },
+ {
+ "a": "OK",
+ "b": "CRITICAL",
+ "x": "CRITICAL"
+ },
+ {
+ "a": "OK",
+ "b": "DOWNTIME",
+ "x": "DOWNTIME"
+ },
+ {
+ "a": "WARNING",
+ "b": "WARNING",
+ "x": "WARNING"
+ },
+ {
+ "a": "WARNING",
+ "b": "UNKNOWN",
+ "x": "UNKNOWN"
+ },
+ {
+ "a": "WARNING",
+ "b": "MISSING",
+ "x": "MISSING"
+ },
+ {
+ "a": "WARNING",
+ "b": "CRITICAL",
+ "x": "CRITICAL"
+ },
+ {
+ "a": "WARNING",
+ "b": "DOWNTIME",
+ "x": "DOWNTIME"
+ },
+ {
+ "a": "UNKNOWN",
+ "b": "UNKNOWN",
+ "x": "UNKNOWN"
+ },
+ {
+ "a": "UNKNOWN",
+ "b": "MISSING",
+ "x": "MISSING"
+ },
+ {
+ "a": "UNKNOWN",
+ "b": "CRITICAL",
+ "x": "CRITICAL"
+ },
+ {
+ "a": "UNKNOWN",
+ "b": "DOWNTIME",
+ "x": "DOWNTIME"
+ },
+ {
+ "a": "MISSING",
+ "b": "MISSING",
+ "x": "MISSING"
+ },
+ {
+ "a": "MISSING",
+ "b": "CRITICAL",
+ "x": "CRITICAL"
+ },
+ {
+ "a": "MISSING",
+ "b": "DOWNTIME",
+ "x": "DOWNTIME"
+ },
+ {
+ "a": "CRITICAL",
+ "b": "CRITICAL",
+ "x": "CRITICAL"
+ },
+ {
+ "a": "CRITICAL",
+ "b": "DOWNTIME",
+ "x": "CRITICAL"
+ },
+ {
+ "a": "DOWNTIME",
+ "b": "DOWNTIME",
+ "x": "DOWNTIME"
+ }
+ ]
+ },{
+ "name": "OR",
+ "truth_table": [
+ {
+ "a": "OK",
+ "b": "OK",
+ "x": "OK"
+ },
+ {
+ "a": "OK",
+ "b": "WARNING",
+ "x": "OK"
+ },
+ {
+ "a": "OK",
+ "b": "UNKNOWN",
+ "x": "OK"
+ },
+ {
+ "a": "OK",
+ "b": "MISSING",
+ "x": "OK"
+ },
+ {
+ "a": "OK",
+ "b": "CRITICAL",
+ "x": "OK"
+ },
+ {
+ "a": "OK",
+ "b": "DOWNTIME",
+ "x": "OK"
+ },
+ {
+ "a": "WARNING",
+ "b": "WARNING",
+ "x": "WARNING"
+ },
+ {
+ "a": "WARNING",
+ "b": "UNKNOWN",
+ "x": "WARNING"
+ },
+ {
+ "a": "WARNING",
+ "b": "MISSING",
+ "x": "WARNING"
+ },
+ {
+ "a": "WARNING",
+ "b": "CRITICAL",
+ "x": "WARNING"
+ },
+ {
+ "a": "WARNING",
+ "b": "DOWNTIME",
+ "x": "WARNING"
+ },
+ {
+ "a": "UNKNOWN",
+ "b": "UNKNOWN",
+ "x": "UNKNOWN"
+ },
+ {
+ "a": "UNKNOWN",
+ "b": "MISSING",
+ "x": "UNKNOWN"
+ },
+ {
+ "a": "UNKNOWN",
+ "b": "CRITICAL",
+ "x": "CRITICAL"
+ },
+ {
+ "a": "UNKNOWN",
+ "b": "DOWNTIME",
+ "x": "UNKNOWN"
+ },
+ {
+ "a": "MISSING",
+ "b": "MISSING",
+ "x": "MISSING"
+ },
+ {
+ "a": "MISSING",
+ "b": "CRITICAL",
+ "x": "CRITICAL"
+ },
+ {
+ "a": "MISSING",
+ "b": "DOWNTIME",
+ "x": "DOWNTIME"
+ },
+ {
+ "a": "CRITICAL",
+ "b": "CRITICAL",
+ "x": "CRITICAL"
+ },
+ {
+ "a": "CRITICAL",
+ "b": "DOWNTIME",
+ "x": "CRITICAL"
+ },
+ {
+ "a": "DOWNTIME",
+ "b": "DOWNTIME",
+ "x": "DOWNTIME"
+ }
+ ]
+ }
+],
+"output":[
+ {"op":"1",
+ "a":"0",
+ "b":"0",
+ "x":"0"},
+ {"op":"1",
+ "a":"0",
+ "b":"2",
+ "x":"0"},
+ {"op":"1",
+ "a":"0",
+ "b":"3",
+ "x":"0"},
+ {"op":"1",
+ "a":"0",
+ "b":"4",
+ "x":"0"},
+ {"op":"1",
+ "a":"0",
+ "b":"5",
+ "x":"0"},
+
+ {"op":"1",
+ "a":"1",
+ "b":"0",
+ "x":"0"},
+ {"op":"1",
+ "a":"1",
+ "b":"1",
+ "x":"1"},
+
+
+ {"op":"1",
+ "a":"1",
+ "b":"2",
+ "x":"1"},
+
+ {"op":"1",
+ "a":"1",
+ "b":"3",
+ "x":"1"},
+ {"op":"1",
+ "a":"1",
+ "b":"4",
+ "x":"1"},
+ {"op":"1",
+ "a":"1",
+ "b":"5",
+ "x":"1"},
+
+ {"op":"1",
+ "a":"2",
+ "b":"0",
+ "x":"0"},
+
+ {"op":"1",
+ "a":"2",
+ "b":"1",
+ "x":"1"},
+ {"op":"1",
+ "a":"2",
+ "b":"2",
+ "x":"2"},
+
+ {"op":"1",
+ "a":"2",
+ "b":"3",
+ "x":"2"},
+ {"op":"1",
+ "a":"2",
+ "b":"4",
+ "x":"4"},
+
+ {"op":"1",
+ "a":"2",
+ "b":"5",
+ "x":"2"},
+
+ {"op":"1",
+ "a":"3",
+ "b":"0",
+ "x":"0"},
+
+ {"op":"1",
+ "a":"3",
+ "b":"1",
+ "x":"1"},
+
+ {"op":"1",
+ "a":"3",
+ "b":"2",
+ "x":"2"},
+
+ {"op":"1",
+ "a":"3",
+ "b":"3",
+ "x":"3"},
+
+ {"op":"1",
+ "a":"3",
+ "b":"4",
+ "x":"4"},
+
+ {"op":"1",
+ "a":"3",
+ "b":"5",
+ "x":"5"},
+
+
+ {"op":"1",
+ "a":"4",
+ "b":"0",
+ "x":"0"},
+
+ {"op":"1",
+ "a":"4",
+ "b":"1",
+ "x":"1"},
+
+ {"op":"1",
+ "a":"4",
+ "b":"2",
+ "x":"4"},
+
+ {"op":"1",
+ "a":"4",
+ "b":"3",
+ "x":"4"},
+
+
+ {"op":"1",
+ "a":"4",
+ "b":"4",
+ "x":"4"},
+
+ {"op":"1",
+ "a":"4",
+ "b":"5",
+ "x":"4"},
+
+
+ {"op":"1",
+ "a":"5",
+ "b":"0",
+ "x":"0"},
+
+
+ {"op":"1",
+ "a":"5",
+ "b":"1",
+ "x":"1"},
+
+ {"op":"1",
+ "a":"5",
+ "b":"2",
+ "x":"2"},
+
+ {"op":"1",
+ "a":"5",
+ "b":"3",
+ "x":"5"},
+
+ {"op":"1",
+ "a":"5",
+ "b":"4",
+ "x":"4"},
+
+ {"op":"1",
+ "a":"5",
+ "b":"5",
+ "x":"5"},
+
+
+ {"op":"0",
+ "a":"0",
+ "b":"0",
+ "x":"0"},
+
+
+ {"op":"0",
+ "a":"0",
+ "b":"1",
+ "x":"1"},
+
+ {"op":"0",
+ "a":"0",
+ "b":"2",
+ "x":"2"},
+
+ {"op":"0",
+ "a":"0",
+ "b":"3",
+ "x":"3"},
+
+ {"op":"0",
+ "a":"0",
+ "b":"4",
+ "x":"4"},
+
+ {"op":"0",
+ "a":"0",
+ "b":"5",
+ "x":"5"},
+
+ {"op":"0",
+ "a":"1",
+ "b":"0",
+ "x":"1"},
+
+ {"op":"0",
+ "a":"1",
+ "b":"1",
+ "x":"1"},
+
+ {"op":"0",
+ "a":"1",
+ "b":"2",
+ "x":"2"},
+
+ {"op":"0",
+ "a":"1",
+ "b":"3",
+ "x":"3"},
+
+ {"op":"0",
+ "a":"1",
+ "b":"4",
+ "x":"4"},
+
+ {"op":"0",
+ "a":"1",
+ "b":"5",
+ "x":"5"},
+
+
+ {"op":"0",
+ "a":"2",
+ "b":"0",
+ "x":"2"},
+
+ {"op":"0",
+ "a":"2",
+ "b":"1",
+ "x":"2"},
+
+ {"op":"0",
+ "a":"2",
+ "b":"2",
+ "x":"2"},
+
+ {"op":"0",
+ "a":"2",
+ "b":"3",
+ "x":"3"},
+
+ {"op":"0",
+ "a":"2",
+ "b":"4",
+ "x":"4"},
+
+ {"op":"0",
+ "a":"2",
+ "b":"5",
+ "x":"5"},
+
+
+ {"op":"0",
+ "a":"3",
+ "b":"0",
+ "x":"3"},
+ {"op":"0",
+ "a":"3",
+ "b":"1",
+ "x":"3"},
+ {"op":"0",
+ "a":"3",
+ "b":"2",
+ "x":"3"},
+ {"op":"0",
+ "a":"3",
+ "b":"3",
+ "x":"3"},
+
+ {"op":"0",
+ "a":"3",
+ "b":"4",
+ "x":"4"},
+ {"op":"0",
+ "a":"3",
+ "b":"5",
+ "x":"5"},
+ {"op":"0",
+ "a":"4",
+ "b":"0",
+ "x":"4"},
+
+ {"op":"0",
+ "a":"4",
+ "b":"1",
+ "x":"4"},
+
+ {"op":"0",
+ "a":"4",
+ "b":"2",
+ "x":"4"},
+
+ {"op":"0",
+ "a":"4",
+ "b":"3",
+ "x":"4"},
+
+ {"op":"0",
+ "a":"4",
+ "b":"4",
+ "x":"4"},
+
+ {"op":"0",
+ "a":"4",
+ "b":"5",
+ "x":"4"},
+
+ {"op":"0",
+ "a":"5",
+ "b":"0",
+ "x":"5"},
+
+ {"op":"0",
+ "a":"5",
+ "b":"1",
+ "x":"5"},
+
+ {"op":"0",
+ "a":"5",
+ "b":"2",
+ "x":"5"},
+
+ {"op":"0",
+ "a":"5",
+ "b":"3",
+ "x":"5"},
+
+ {"op":"0",
+ "a":"5",
+ "b":"4",
+ "x":"4"},
+
+ {"op":"0",
+ "a":"5",
+ "b":"5",
+ "x":"5"}
+ ]
+
+}
+]
+}
\ No newline at end of file
diff --git a/flink_jobs/OperationsManager/src/test/java/operations/OperationsManagerTest.java b/flink_jobs/OperationsManager/src/test/java/operations/OperationsManagerTest.java
new file mode 100644
index 00000000..1e7f2cdf
--- /dev/null
+++ b/flink_jobs/OperationsManager/src/test/java/operations/OperationsManagerTest.java
@@ -0,0 +1,673 @@
+/*s
+ * To change this license header, choose License Headers in Project Properties.
+ * To change this template file, choose Tools | Templates
+ * and open the template in the editor.
+ */
+package operations;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.text.ParseException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import static junit.framework.Assert.assertEquals;
+import static junit.framework.Assert.assertNotNull;
+import org.junit.After;
+import org.junit.AfterClass;
+import static org.junit.Assert.assertArrayEquals;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/*
+ *
+ * A unit test class to test OperationsManager
+ */
+public class OperationsManagerTest {
+
+ public OperationsManagerTest() {
+ }
+
+ @BeforeClass
+ public static void setUpClass() {
+ assertNotNull("Test file missing", OperationsManagerTest.class.getResource("/operations/operations.json"));
+ }
+
+ @AfterClass
+ public static void tearDownClass() {
+ }
+
+ @Before
+ public void setUp() {
+ }
+
+ @After
+ public void tearDown() {
+ }
+
+ /**
+ * Test of loadOperationProfile method, of class OperationsParser.
+ */
+ /**
+ * Test of getDefaultDown method, of class OperationsManager.
+ */
+ @Test
+ public void testGetDefaultDown() throws IOException {
+ System.out.println("getDefaultDown");
+ OperationsManager instance = new OperationsManager();
+ instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile()));
+ String expResult = "DOWNTIME";
+ String result = instance.getDefaultDown();
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ //fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of getDefaultUnknown method, of class OperationsManager.
+ */
+ @Test
+ public void testGetDefaultUnknown() throws IOException {
+ System.out.println("getDefaultUnknown");
+ OperationsManager instance = new OperationsManager();
+ instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile()));
+
+ String expResult = "UNKNOWN";
+ String result = instance.getDefaultUnknown();
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ //fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of getDefaultUnknownInt method, of class OperationsManager.
+ */
+ @Test
+ public void testGetDefaultUnknownInt() throws IOException {
+ System.out.println("getDefaultUnknownInt");
+ OperationsManager instance = new OperationsManager();
+ instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile()));
+
+ int expResult = 2;
+ int result = instance.getDefaultUnknownInt();
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ //fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of getDefaultDownInt method, of class OperationsManager.
+ */
+ @Test
+ public void testGetDefaultDownInt() throws IOException {
+ System.out.println("getDefaultDownInt");
+ OperationsManager instance = new OperationsManager();
+ instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile()));
+
+ int expResult = 5;
+ int result = instance.getDefaultDownInt();
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ //fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of getDefaultMissing method, of class OperationsManager.
+ */
+ @Test
+ public void testGetDefaultMissing() throws IOException {
+ System.out.println("getDefaultMissing");
+ OperationsManager instance = new OperationsManager();
+ instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile()));
+
+ String expResult = "MISSING";
+ String result = instance.getDefaultMissing();
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ //fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of getDefaultMissingInt method, of class OperationsManager.
+ */
+ @Test
+ public void testGetDefaultMissingInt() throws IOException {
+ System.out.println("getDefaultMissingInt");
+ OperationsManager instance = new OperationsManager();
+ instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile()));
+
+ int expResult = 3;
+ int result = instance.getDefaultMissingInt();
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+// fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of clear method, of class OperationsManager.
+ */
+ @Test
+ public void testClear() throws IOException {
+ System.out.println("clear");
+ OperationsManager instance = new OperationsManager();
+ instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile()));
+
+ instance.clear();
+ // TODO review the generated test code and remove the default call to fail.
+ //fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of opInt method, of class OperationsManager.
+ */
+ @Test
+ public void testOpInt_3args_1() throws IOException {
+ System.out.println("opInt");
+ int op = 0;
+ int a = 0;
+ int b = 0;
+ OperationsManager instance = new OperationsManager();
+ instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile()));
+
+ int expResult = 0;
+ int result = instance.opInt(op, a, b);
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ //fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of opInt method, of class OperationsManager.
+ */
+ @Test
+ public void testOpInt_3args_2() throws IOException {
+ System.out.println("opInt");
+ String op = "AND";
+ String a = "OK";
+ String b = "OK";
+ OperationsManager instance = new OperationsManager();
+ instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile()));
+
+ int expResult = 0;
+ int result = instance.opInt(op, a, b);
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ // fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of op method, of class OperationsManager.
+ */
+ @Test
+ public void testOp_3args_1() throws IOException {
+ System.out.println("op");
+ int op = 0;
+ int a = 0;
+ int b = 0;
+ OperationsManager instance = new OperationsManager();
+ instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile()));
+
+ String expResult = "OK";
+ String result = instance.op(op, a, b);
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ //fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of op method, of class OperationsManager.
+ */
+ @Test
+ public void testOp_3args_2() throws IOException {
+ System.out.println("op");
+ String op = "AND";
+ String a = "OK";
+ String b = "OK";
+ OperationsManager instance = new OperationsManager();
+ instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile()));
+
+ String expResult = "OK";
+ String result = instance.op(op, a, b);
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ //fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of getStrStatus method, of class OperationsManager.
+ */
+ @Test
+ public void testGetStrStatus() throws IOException {
+ System.out.println("getStrStatus");
+ int status = 0;
+ OperationsManager instance = new OperationsManager();
+ instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile()));
+
+ String expResult = "OK";
+ String result = instance.getStrStatus(status);
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ //fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of getIntStatus method, of class OperationsManager.
+ */
+ @Test
+ public void testGetIntStatus() throws IOException {
+ System.out.println("getIntStatus");
+ String status = "WARNING";
+ OperationsManager instance = new OperationsManager();
+ instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile()));
+
+ int expResult = 1;
+ int result = instance.getIntStatus(status);
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ // fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of getStrOperation method, of class OperationsManager.
+ */
+ @Test
+ public void testGetStrOperation() throws IOException {
+ System.out.println("getStrOperation");
+ int op = 1;
+ OperationsManager instance = new OperationsManager();
+ instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile()));
+
+ String expResult = "OR";
+ String result = instance.getStrOperation(op);
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ //fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of getIntOperation method, of class OperationsManager.
+ */
+ @Test
+ public void testGetIntOperation() throws IOException {
+ System.out.println("getIntOperation");
+ String op = "OR";
+ OperationsManager instance = new OperationsManager();
+ instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile()));
+
+ int expResult = 1;
+ int result = instance.getIntOperation(op);
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ // fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of availableStates method, of class OperationsManager.
+ */
+ @Test
+ public void testAvailableStates() throws IOException {
+ System.out.println("availableStates");
+ OperationsManager instance = new OperationsManager();
+ instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile()));
+
+ ArrayList expResult = new ArrayList<>();
+ expResult.add("OK");
+ expResult.add("WARNING");
+ expResult.add("UNKNOWN");
+ expResult.add("MISSING");
+ expResult.add("CRITICAL");
+ expResult.add("DOWNTIME");
+
+ ArrayList result = instance.availableStates();
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ // fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of availableOps method, of class OperationsManager.
+ */
+ @Test
+ public void testAvailableOps() throws IOException {
+ System.out.println("availableOps");
+ OperationsManager instance = new OperationsManager();
+ instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile()));
+
+ ArrayList expResult = new ArrayList<>();
+ expResult.add("AND");
+ expResult.add("OR");
+ ArrayList result = instance.availableOps();
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ // fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of loadJson method, of class OperationsManager.
+ */
+ @Test
+ public void testLoadJson() throws Exception {
+ System.out.println("loadJson");
+ File jsonFile = new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile());
+ OperationsManager instance = new OperationsManager();
+
+ instance.loadJson(jsonFile);
+ // TODO review the generated test code and remove the default call to fail.
+ //fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of loadJsonString method, of class OperationsManager.
+ */
+// @Test
+// public void testLoadJsonString() {
+// System.out.println("loadJsonString");
+// List opsJson = null;
+// OperationsManager instance = new OperationsManager();
+// instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile()));
+//
+// instance.loadJsonString(opsJson);
+// // TODO review the generated test code and remove the default call to fail.
+// fail("The test case is a prototype.");
+// }
+ /**
+ * Test of getTruthTable method, of class OperationsManager.
+ */
+ @Test
+ public void testGetTruthTable() throws IOException, FileNotFoundException, ParseException {
+ System.out.println("getTruthTable");
+ OperationsManager instance = new OperationsManager();
+ instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile()));
+ Utils utils=new Utils();
+
+ int[][][] expResult = utils.readTruthTable();
+ int[][][] result = instance.getTruthTable();
+ assertArrayEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ //fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of setTruthTable method, of class OperationsManager.
+ */
+ @Test
+ public void testSetTruthTable() throws IOException, FileNotFoundException, ParseException {
+ System.out.println("setTruthTable");
+ Utils utils=new Utils();
+
+
+ int[][][] truthTable = utils.readTruthTable();
+ OperationsManager instance = new OperationsManager();
+ instance.setTruthTable(truthTable);
+ // TODO review the generated test code and remove the default call to fail.
+ // fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of getStates method, of class OperationsManager.
+ */
+ @Test
+ public void testGetStates() throws IOException {
+ System.out.println("getStates");
+ OperationsManager instance = new OperationsManager();
+ instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile()));
+
+ HashMap expResult = new HashMap<>();
+ expResult.put("OK", 0);
+ expResult.put("WARNING", 1);
+ expResult.put("UNKNOWN", 2);
+ expResult.put("MISSING", 3);
+ expResult.put("CRITICAL", 4);
+ expResult.put("DOWNTIME", 5);
+ HashMap result = instance.getStates();
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ // fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of setStates method, of class OperationsManager.
+ */
+ @Test
+ public void testSetStates() {
+ System.out.println("setStates");
+ HashMap states = new HashMap<>();
+ states.put("OK", 0);
+ states.put("WARNING", 1);
+ states.put("UNKNOWN", 2);
+ states.put("MISSING", 3);
+ states.put("CRITICAL", 4);
+ states.put("DOWNTIME", 5);
+ OperationsManager instance = new OperationsManager();
+ instance.setStates(states);
+ // TODO review the generated test code and remove the default call to fail.
+ //fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of getOps method, of class OperationsManager.
+ */
+ @Test
+ public void testGetOps() throws IOException {
+ System.out.println("getOps");
+ OperationsManager instance = new OperationsManager();
+ instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile()));
+
+ HashMap expResult = new HashMap<>();
+ expResult.put("AND", 0);
+ expResult.put("OR", 1);
+ HashMap result = instance.getOps();
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ // fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of setOps method, of class OperationsManager.
+ */
+ @Test
+ public void testSetOps() {
+ System.out.println("setOps");
+ HashMap ops = new HashMap<>();
+ ops.put("AND", 0);
+ ops.put("OR", 1);
+
+ OperationsManager instance = new OperationsManager();
+ instance.setOps(ops);
+ // TODO review the generated test code and remove the default call to fail.
+ // fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of getRevStates method, of class OperationsManager.
+ */
+ @Test
+ public void testGetRevStates() throws IOException {
+ System.out.println("getRevStates");
+ OperationsManager instance = new OperationsManager();
+ instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile()));
+
+ ArrayList expResult = new ArrayList<>();
+ expResult.add("OK");
+ expResult.add("WARNING");
+ expResult.add("UNKNOWN");
+ expResult.add("MISSING");
+ expResult.add("CRITICAL");
+ expResult.add("DOWNTIME");
+ ArrayList result = instance.getRevStates();
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ //fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of setRevStates method, of class OperationsManager.
+ */
+ @Test
+ public void testSetRevStates() {
+ System.out.println("setRevStates");
+ ArrayList revStates = new ArrayList<>();
+ revStates.add("OK");
+ revStates.add("WARNING");
+ revStates.add("UNKNWON");
+ revStates.add("MISSING");
+ revStates.add("CRITICAL");
+ revStates.add("DOWNTIME");
+
+ OperationsManager instance = new OperationsManager();
+ instance.setRevStates(revStates);
+ // TODO review the generated test code and remove the default call to fail.
+ // fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of getRevOps method, of class OperationsManager.
+ */
+ @Test
+ public void testGetRevOps() throws IOException {
+ System.out.println("getRevOps");
+ OperationsManager instance = new OperationsManager();
+ instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile()));
+
+ ArrayList expResult = new ArrayList<>();
+ expResult.add("AND");
+ expResult.add("OR");
+ ArrayList result = instance.getRevOps();
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ //fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of setRevOps method, of class OperationsManager.
+ */
+ @Test
+ public void testSetRevOps() {
+ System.out.println("setRevOps");
+ ArrayList revOps = new ArrayList<>();
+ revOps.add("AND");
+ revOps.add("OR");
+ OperationsManager instance = new OperationsManager();
+ instance.setRevOps(revOps);
+ // TODO review the generated test code and remove the default call to fail.
+ // fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of getDefaultDownState method, of class OperationsManager.
+ */
+ @Test
+ public void testGetDefaultDownState() throws IOException {
+ System.out.println("getDefaultDownState");
+ OperationsManager instance = new OperationsManager();
+ instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile()));
+
+ String expResult = "DOWNTIME";
+ String result = instance.getDefaultDownState();
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ //fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of setDefaultDownState method, of class OperationsManager.
+ */
+ @Test
+ public void testSetDefaultDownState() {
+ System.out.println("setDefaultDownState");
+ String defaultDownState = "DOWNTIME";
+ OperationsManager instance = new OperationsManager();
+ instance.setDefaultDownState(defaultDownState);
+ // TODO review the generated test code and remove the default call to fail.
+ //fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of getDefaultMissingState method, of class OperationsManager.
+ */
+ @Test
+ public void testGetDefaultMissingState() throws IOException {
+ System.out.println("getDefaultMissingState");
+ OperationsManager instance = new OperationsManager();
+ instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile()));
+
+ String expResult = "MISSING";
+ String result = instance.getDefaultMissingState();
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ //fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of setDefaultMissingState method, of class OperationsManager.
+ */
+ @Test
+ public void testSetDefaultMissingState() {
+ System.out.println("setDefaultMissingState");
+ String defaultMissingState = "MISSING";
+ OperationsManager instance = new OperationsManager();
+ instance.setDefaultMissingState(defaultMissingState);
+ // TODO review the generated test code and remove the default call to fail.
+ // fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of getDefaultUnknownState method, of class OperationsManager.
+ */
+ @Test
+ public void testGetDefaultUnknownState() throws IOException {
+ System.out.println("getDefaultUnknownState");
+
+ OperationsManager instance = new OperationsManager();
+ instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile()));
+ String expResult = "UNKNOWN";
+ String result = instance.getDefaultUnknownState();
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ //fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of setDefaultUnknownState method, of class OperationsManager.
+ */
+ @Test
+ public void testSetDefaultUnknownState() {
+ System.out.println("setDefaultUnknownState");
+ String defaultUnknownState = "UNKNOWN";
+ OperationsManager instance = new OperationsManager();
+ instance.setDefaultUnknownState(defaultUnknownState);
+ // TODO review the generated test code and remove the default call to fail.
+ // fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of isOrder method, of class OperationsManager.
+ */
+ @Test
+ public void testIsOrder() throws IOException {
+ System.out.println("isOrder");
+ OperationsManager instance = new OperationsManager();
+ instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile()));
+
+ boolean expResult = false;
+ boolean result = instance.isOrder();
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+// fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of setOrder method, of class OperationsManager.
+ */
+ @Test
+ public void testSetOrder() {
+ System.out.println("setOrder");
+ boolean order = false;
+ OperationsManager instance = new OperationsManager();
+ instance.setOrder(order);
+ // TODO review the generated test code and remove the default call to fail.
+ // fail("The test case is a prototype.");
+ }
+
+}
diff --git a/flink_jobs/OperationsManager/src/test/java/operations/Utils.java b/flink_jobs/OperationsManager/src/test/java/operations/Utils.java
new file mode 100644
index 00000000..750b175b
--- /dev/null
+++ b/flink_jobs/OperationsManager/src/test/java/operations/Utils.java
@@ -0,0 +1,124 @@
+/*
+ * To change this license header, choose License Headers in Project Properties.
+ * To change this template file, choose Tools | Templates
+ * and open the template in the editor.
+ */
+package operations;
+
+import com.google.gson.JsonArray;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParser;
+import java.io.BufferedReader;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.io.IOException;
+import java.text.ParseException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+
+/**
+ *
+ * A utils class to process resource files for tests and provide the information
+ */
+public class Utils {
+
+ public int[][][] readTruthTable() throws IOException, FileNotFoundException, ParseException, java.text.ParseException {
+
+ BufferedReader br = null;
+ try {
+ br = new BufferedReader(new FileReader(Utils.class.getResource("/operations/truthtable.json").getFile()));
+
+ JsonParser json_parser = new JsonParser();
+ JsonElement j_element = json_parser.parse(br);
+ JsonObject jRoot = j_element.getAsJsonObject();
+ JsonArray jData = jRoot.get("data").getAsJsonArray();
+ JsonElement jItem = jData.get(0);
+ int[][][] truthTable = readJson(jItem);
+ return truthTable;
+ } catch (FileNotFoundException ex) {
+
+ throw ex;
+
+ }
+
+ }
+
+ private int[][][] readJson(JsonElement j_element) {
+ HashMap operations = new HashMap();
+ ArrayList revOps = new ArrayList();
+ HashMap states = new HashMap();
+ ArrayList revStates = new ArrayList();
+
+ JsonObject j_obj = j_element.getAsJsonObject();
+ JsonArray j_ops = j_obj.getAsJsonArray("operations");
+ int i = 0;
+ for (JsonElement item : j_ops) {
+ String jObjItem = item.getAsString();
+ operations.put(jObjItem, i);
+ revOps.add(jObjItem);
+ i++;
+ }
+ JsonArray j_states = j_obj.getAsJsonArray("available_states");
+ i = 0;
+ for (JsonElement item : j_states) {
+ String jObjItem = item.getAsString();
+ states.put(jObjItem, i);
+ revStates.add(jObjItem);
+ i++;
+ }
+
+ int num_ops = revOps.size();
+ int num_states = revStates.size();
+ int[][][] truthTable = new int[num_ops][num_states][num_states];
+
+ for (int[][] surface : truthTable) {
+ for (int[] line : surface) {
+ Arrays.fill(line, -1);
+ }
+ }
+ JsonArray input = j_obj.getAsJsonArray("inputs");
+
+ // Fill the truth table
+ for (JsonElement item : input) {
+ JsonObject jObjItem = item.getAsJsonObject();
+ String opname = jObjItem.getAsJsonPrimitive("name").getAsString();
+ JsonArray tops = jObjItem.getAsJsonArray("truth_table");
+ // System.out.println(tops);
+
+ for (int j = 0; j < tops.size(); j++) {
+ // System.out.println(opname);
+ JsonObject row = tops.get(j).getAsJsonObject();
+
+ int a_val = revStates.indexOf(row.getAsJsonPrimitive("a").getAsString());
+ int b_val = revStates.indexOf(row.getAsJsonPrimitive("b").getAsString());
+ int x_val = revStates.indexOf(row.getAsJsonPrimitive("x").getAsString());
+ int op_val = revOps.indexOf(opname);
+
+ // Fill in truth table
+ // Check if order sensitivity is off so to insert two truth
+ // values
+ // ...[a][b] and [b][a]
+ truthTable[op_val][a_val][b_val] = x_val;
+ truthTable[op_val][b_val][a_val] = x_val;
+
+ }
+ }
+
+ int[][][] outputTruthTable = new int[num_ops][num_states][num_states];
+ JsonArray output = j_obj.getAsJsonArray("output");
+
+ // Fill the truth table
+ for (JsonElement item : output) {
+ JsonObject jObjItem = item.getAsJsonObject();
+ int op = jObjItem.getAsJsonPrimitive("op").getAsInt();
+ int a = jObjItem.getAsJsonPrimitive("a").getAsInt();
+ int b = jObjItem.getAsJsonPrimitive("b").getAsInt();
+ int x = jObjItem.getAsJsonPrimitive("x").getAsInt();
+ outputTruthTable[op][a][b] = x;
+
+ }
+ return outputTruthTable;
+ }
+}
diff --git a/flink_jobs/Timelines/.gitignore b/flink_jobs/Timelines/.gitignore
new file mode 100644
index 00000000..6c4e323f
--- /dev/null
+++ b/flink_jobs/Timelines/.gitignore
@@ -0,0 +1,8 @@
+/target/
+.project
+.settings/
+.classpath/
+.classpath
+/nbproject
+nbactions.xml
+
diff --git a/flink_jobs/Timelines/pom.xml b/flink_jobs/Timelines/pom.xml
new file mode 100644
index 00000000..78170300
--- /dev/null
+++ b/flink_jobs/Timelines/pom.xml
@@ -0,0 +1,55 @@
+
+
+ 4.0.0
+ timeline.manager
+ Timelines
+ 1.0-SNAPSHOT
+ jar
+
+
+ org.junit.jupiter
+ junit-jupiter-api
+ 5.6.0
+ test
+
+
+ org.junit.jupiter
+ junit-jupiter-params
+ 5.6.0
+ test
+
+
+ org.junit.jupiter
+ junit-jupiter-engine
+ 5.6.0
+ test
+
+
+ joda-time
+ joda-time
+ 1.6
+
+
+ log4j
+ log4j
+ ${log4j.version}
+
+
+ org.slf4j
+ slf4j-log4j12
+ ${slf4j.version}
+
+
+ com.googlecode.json-simple
+ json-simple
+ 1.1.1
+
+
+
+ UTF-8
+ 1.8
+ 1.8
+ 1.7.7
+ 1.2.17
+
+
\ No newline at end of file
diff --git a/flink_jobs/Timelines/src/main/java/timelines/Timeline.java b/flink_jobs/Timelines/src/main/java/timelines/Timeline.java
new file mode 100644
index 00000000..576a5b48
--- /dev/null
+++ b/flink_jobs/Timelines/src/main/java/timelines/Timeline.java
@@ -0,0 +1,486 @@
+package timelines;
+
+/*
+ * To change this license header, choose License Headers in Project Properties.
+ * To change this template file, choose Tools | Templates
+ * and open the template in the editor.
+ */
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+import java.util.TreeMap;
+
+import org.joda.time.DateTime;
+import org.joda.time.LocalDate;
+import org.joda.time.format.DateTimeFormat;
+import org.joda.time.format.DateTimeFormatter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Timeline class implements objects that store a set of status per timestamp.
+ * The set of status/timestamp is stored ascending by the timestamp (in a
+ * TreeMap). The key of the map is the timestamp in the form of DateTime and the
+ * status is expressed as an integer , given as input.
+ *
+ * A timeline can be constructed empty and then the map could be passed as
+ * parameter and stored.
+ *
+ * Also a timeline could be constructed by giving a timestamp. The timestamp
+ * would define the timeline's date.
+ *
+ * Also a timeline could be constructed by giving a timestamp and a status. The
+ * timestamp would define the timeline's date and the status would be stored as
+ * the status of the 00:00:00 timestamp .
+ *
+ *
+ * Timeline supports insert of a pair of timestamp, status
+ *
+ */
+public class Timeline {
+
+ private LocalDate date;
+
+ static Logger LOG = LoggerFactory.getLogger(Timeline.class);
+
+ private TreeMap samples;
+
+ /**
+ * Constructs an empty timeline
+ */
+ public Timeline() {
+ this.date = null;
+ this.samples = new TreeMap();
+
+ }
+
+ /**
+ *
+ * @param timestamp a timestamp Constructs a timeline where the timestamp
+ * would define the date of the timeline *
+ *
+ */
+ public Timeline(String timestamp) {
+ DateTime tmp_date = new DateTime();
+ DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'");
+ tmp_date = fmt.parseDateTime(timestamp);
+ tmp_date.withTime(0, 0, 0, 0);
+ this.date = tmp_date.toLocalDate();
+ this.samples = new TreeMap();
+ }
+
+ /**
+ *
+ * @param timestamp a timestamp
+ * @param state , the status that pairs the timestamp Constructs a timeline
+ * , where the timestamp defines the timeline's date and the state is paired
+ * at a timestamp , describing midnight (00:00:00)
+ *
+ */
+ Timeline(String timestamp, int state) {
+ DateTime tmp_date = new DateTime();
+ DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'");
+ tmp_date = fmt.parseDateTime(timestamp);
+ tmp_date = tmp_date.withTime(0, 0, 0, 0);
+ this.date = tmp_date.toLocalDate();
+ this.samples = new TreeMap();
+ this.samples.put(tmp_date, state);
+
+ }
+
+ /**
+ *
+ * @param timestamp a timestamp
+ * @return the state for the input timestamp as is stored in the map
+ */
+ public int get(String timestamp) {
+ DateTime tmp_date = new DateTime();
+ DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'");
+ tmp_date = fmt.parseDateTime(timestamp);
+ if (this.samples.floorEntry(tmp_date) != null) {
+ return -1;
+ // throw new NullPointerException("no item found in timeline, size of timeline:" + this.samples.size() + "," + tmp_date.toString());
+ }
+
+ return this.samples.floorEntry(tmp_date).getValue();
+ }
+
+ /**
+ *
+ * @param point a timestamp , passed as datetime
+ * @return the state for the input timestamp as is stored in the map
+ */
+ public int get(DateTime point) {
+ if (this.samples.floorEntry(point) == null) {
+ return -1;
+ //throw new NullPointerException("no item found in timeline, size of timeline:" + this.samples.size() + "," + point.toString());
+ }
+ return this.samples.floorEntry(point).getValue();
+ }
+
+ /**
+ *
+ * @param timestamp a timestamp
+ * @param status the status for the given timestamp
+ *
+ * inserts a pair of timestamp, status in the map.
+ */
+ public void insert(String timestamp, int status) {
+
+ DateTime tmp_date = new DateTime();
+ DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'");
+ tmp_date = fmt.parseDateTime(timestamp);
+ this.samples.put(tmp_date, status);
+ }
+
+ /**
+ *
+ * @param timestamp, a timestamp in the form of datetime
+ * @param status , the status of the given timestamp
+ *
+ * inserts a pair of timestamp, status in the map
+ *
+ */
+ public void insert(DateTime timestamp, int status) {
+ samples.put(timestamp, status);
+
+ }
+
+ /**
+ *
+ * @param timestamps a map of timestamp, status to be stored in the timeline
+ */
+ public void insertStringTimeStamps(TreeMap timestamps) {
+ for (String dt : timestamps.keySet()) {
+ int status = timestamps.get(dt);
+ this.insert(dt, status);
+
+ }
+ }
+
+ /**
+ *
+ * @param timestamps a map of timestamp, status to be stored in the
+ * timeline. the timestamps are in the form of datetime
+ */
+ public void insertDateTimeStamps(TreeMap timestamps) {
+ for (DateTime dt : timestamps.keySet()) {
+ int status = timestamps.get(dt);
+ this.insert(dt, status);
+ }
+ this.optimize();
+
+ }
+
+ /**
+ *
+ * @param timestamp, a timestamp
+ * @param state, the status for the given timestamp
+ *
+ * inserts in the map of pairs (timestamp, status) a new entry where the new
+ * timestamp is the midnight (00:00:00) of the date of the given timestamp
+ * and the status is the given state
+ */
+ public void setFirst(String timestamp, int state) {
+ DateTime tmp_date = new DateTime();
+ DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'");
+ tmp_date = fmt.parseDateTime(timestamp);
+ this.samples = new TreeMap();
+ tmp_date = tmp_date.withTime(0, 0, 0, 0);
+ this.samples.put(tmp_date, state);
+ this.optimize();
+ }
+
+ /**
+ * clears the map of timestamps, status
+ */
+ public void clear() {
+ this.samples.clear();
+ }
+
+ /**
+ *
+ * @param samples an entry set of timestamp,status clears the existing map
+ * and stores the new entry set to the empty map
+ */
+ public void bulkInsert(Set> samples) {
+ this.samples.clear();
+ for (Map.Entry entry : samples) {
+ this.samples.put(entry.getKey(), entry.getValue());
+ }
+ }
+
+ /**
+ *
+ * @return the entry set of the map of timestamp, status
+ */
+ public Set> getSamples() {
+ return samples.entrySet();
+ }
+
+ /**
+ *
+ * @return the date of the timeline
+ */
+ public LocalDate getDate() {
+ return this.date;
+ }
+
+ /**
+ *
+ * @return the number of the timestamps stored in the map
+ */
+ public int getLength() {
+ return this.samples.size();
+ }
+
+ /**
+ *
+ * @return checks if the map of timestamp,state is empty
+ */
+ public boolean isEmpty() {
+ return this.samples.isEmpty();
+ }
+
+ /**
+ * optimizes the map of timestamp, status if two or continuous timestamps
+ * have the same status then in the map the first timestamp , status is
+ * stored when the status of the next timestamp is different from the
+ * previous timestamp's status then both timestamp, status pairs are stored
+ */
+
+ public void optimize() {
+ TreeMap optimal = new TreeMap();
+ int prevstate = -1;
+ for (DateTime key : this.samples.keySet()) {
+ int value = this.samples.get(key);
+ if (prevstate == -1) {
+
+ optimal.put(key, value);
+ prevstate = value;
+
+ }
+ if (prevstate != value) {
+ optimal.put(key, value);
+ prevstate = value;
+ }
+ }
+
+ this.samples = optimal;
+ }
+
+ /**
+ *
+ * @return return the timestamps in the map
+ */
+ public Set getPoints() {
+ return this.samples.keySet();
+ }
+
+ /**
+ *
+ * @param second, the second timeline whose timestamps,status will be
+ * aggregated to the existing timeline timestamp, status
+ * @param truthTable
+ * @param op aggregate a set of timestamp,status pairs that are stored in a
+ * timeline with a set of timestamp,status pairs of a different timeline,
+ */
+
+ public void aggregate(Timeline second, int[][][] truthTable, int op) {
+ if (this.isEmpty()) {
+ this.bulkInsert(second.getSamples());
+ // Optimize even when we have a single timeline for aggregation
+ this.optimize();
+ return;
+ }
+
+ Timeline result = new Timeline();
+
+ // Slice for first
+ for (DateTime point : this.getPoints()) {
+ result.insert(point, -1);
+ }
+ // Slice for second
+ for (DateTime point : second.getPoints()) {
+ result.insert(point, -1);
+ }
+
+ // Iterate over result and ask
+ for (DateTime point : result.getPoints()) {
+ int a = this.get(point);
+ int b = second.get(point);
+ if(a!=-1 && b!=-1){
+ int x = -1;
+ x = truthTable[op][a][b];
+ if (x == -1) {
+ x = truthTable[op][b][a];
+ }
+
+ result.insert(point, x);
+ }
+ }
+
+ result.optimize();
+
+ // Engrave the result in this timeline
+ this.clear();
+ this.bulkInsert(result.getSamples());
+ }
+
+ /**
+ *
+ * @param timestampList, a list of pairs of timestamp, status where status is in the form of string
+ * @param states, a list of the existing states
+ * @return a sorted map of timestamp, status pairs in an ascending order
+ * receives pairs of timestamp , status where status is a string (e.g "OK", "WARNING") and converts the string to an integer
+ * based on the position of the status in the existing list of the states. Next this pair is stored in the map
+ *
+ */
+ public TreeMap buildStringTimeStampMap(ArrayList timestampList, ArrayList states) {
+
+ TreeMap timestampMap = new TreeMap();
+
+ for (String[] timestamp : timestampList) {
+
+ String time = timestamp[0];
+
+ timestampMap.put(time, states.indexOf(timestamp[1]));
+ }
+ return timestampMap;
+
+ }
+
+ /**
+ *
+ * @param timestampList, a list of pairs of timestamp, status where status is in the form of string and timestamp is in the form of a datetime
+ * @param states, a list of the existing states
+ * @return a sorted map of timestamp, status pairs in an ascending order
+ * receives pairs of timestamp , status where status is a string (e.g "OK", "WARNING") and converts the string to an integer
+ * based on the position of the status in the existing list of the states. Next this pair is stored in the map
+ *
+ */
+
+ public TreeMap buildDateTimeStampMap(ArrayList timestampList, ArrayList states) {
+
+ TreeMap timestampMap = new TreeMap();
+
+ for (String[] timestamp : timestampList) {
+
+ DateTime tmp_date = new DateTime();
+ DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'");
+ tmp_date = fmt.parseDateTime(timestamp[0]);
+ timestampMap.put(tmp_date, states.indexOf(timestamp[1]));
+ }
+ return timestampMap;
+
+ }
+
+ /**
+ *
+ * @param timestamp, a timestamp
+ * removes a pair of timestamp , status from the map
+ */
+
+ public void removeTimeStamp(DateTime timestamp) {
+
+ if (this.samples.containsKey(timestamp)) {
+ Iterator iter = this.samples.keySet().iterator();
+ while (iter.hasNext()) {
+ DateTime tmpTimestamp = (DateTime) iter.next();
+ if (tmpTimestamp.equals(timestamp)) {
+ iter.remove();
+ break;
+ }
+ }
+ }
+
+ }
+ /**
+ *
+ * @return the number of the times a status changes between the timestamps of the timeline , after the map is optimized
+ */
+
+ public int calcStatusChanges() {
+ this.optimize();
+ return this.samples.keySet().size() - 1;
+ }
+
+ /**
+ *
+ * @param date, a timestamp
+ * @param availStates , the list of the available states
+ *
+ * checks if in the map the midnight exists and if not it is added with status "MISSING"
+ */
+
+ public void replacePreviousDateStatus(DateTime timestamp, ArrayList availStates) {
+ DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'");
+
+ DateTime firsTime = timestamp;
+ firsTime = firsTime.withTime(0, 0, 0, 0);
+
+ DateTime firstEntry = this.samples.floorKey(firsTime);
+ if (firstEntry != null && !firstEntry.equals(firsTime)) {
+ int previousStatus = this.samples.get(firstEntry);
+ this.samples.put(firsTime, previousStatus);
+ this.samples.remove(firstEntry);
+ } else if (firstEntry == null) {
+ this.samples.put(firsTime, availStates.indexOf("MISSING"));
+ }
+
+ this.optimize();
+
+ }
+
+ @Override
+ public int hashCode() {
+ int hash = 7;
+ hash = 83 * hash + Objects.hashCode(this.date);
+ hash = 83 * hash + Objects.hashCode(this.samples);
+ return hash;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null) {
+ return false;
+ }
+ if (getClass() != obj.getClass()) {
+ return false;
+ }
+ final Timeline other = (Timeline) obj;
+ if (!Objects.equals(this.date, other.date)) {
+ return false;
+ }
+ if (!Objects.equals(this.samples, other.samples)) {
+ return false;
+ }
+ return true;
+ }
+ /**
+ *
+ * @param truthTable the truthtable of the combination of various statuses with each other
+ * @param op , the operation
+ * @param a, the status a
+ * @param b, the status b
+ * @return , the result of the combination as defined from the truth table of the defined operation
+ */
+
+ public int opInt(int[][][] truthTable, int op, int a, int b) {
+ int result = -1;
+ try {
+ result = truthTable[op][a][b];
+ } catch (IndexOutOfBoundsException ex) {
+ // LOG.info(ex);
+ result = -1;
+ }
+
+ return result;
+ }
+
+}
diff --git a/flink_jobs/Timelines/src/main/java/timelines/TimelineAggregator.java b/flink_jobs/Timelines/src/main/java/timelines/TimelineAggregator.java
new file mode 100644
index 00000000..32eb8998
--- /dev/null
+++ b/flink_jobs/Timelines/src/main/java/timelines/TimelineAggregator.java
@@ -0,0 +1,186 @@
+package timelines;
+
+import java.text.ParseException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.joda.time.DateTime;
+import org.joda.time.LocalDate;
+import org.joda.time.format.DateTimeFormat;
+import org.joda.time.format.DateTimeFormatter;
+
+/**
+ TImelineAggregator class implements an aggregator which is able to receive a list of different timelines
+ and concude into one timeline by aggregating all the timestamps and the statuses
+
+ */
+public class TimelineAggregator {
+
+ private Timeline output;
+ private Map inputs;
+
+ /**
+ *
+ * @param timestamp a timestamp
+ * @throws ParseException Constructs the TimelineAggregator object
+ */
+ public TimelineAggregator(String timestamp) throws ParseException {
+ this.output = new Timeline(timestamp);
+ this.inputs = new HashMap();
+ }
+
+ /**
+ * Constructs the TimelineAggregator object
+ */
+ public TimelineAggregator() {
+ this.output = new Timeline();
+ this.inputs = new HashMap();
+
+ }
+
+ /**
+ *
+ * @param inputs, a map of timelines Constructs a TimelineAggregator object,
+ * containing the timelines
+ */
+ public TimelineAggregator(Map inputs) {
+ this.inputs = inputs;
+ this.output = new Timeline();
+ }
+
+ /**
+ * Clears the input timelines and the output timeline
+ */
+ public void clear() {
+ this.output.clear();
+ this.inputs.clear();
+ }
+
+ /**
+ *
+ * @param date
+ * @return the date given as input with midnight time (00:00:00) in
+ * yyyy-MM-dd'T'HH:mm:ss'Z format
+ *
+ */
+ public String tsFromDate(String date) {
+ DateTime tmp_date = new DateTime();
+ DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd");
+ tmp_date = fmt.parseDateTime(date);
+ tmp_date = tmp_date.withTime(0, 0, 0, 0);
+ return tmp_date.toString(DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"));
+ }
+
+ /**
+ *
+ * @param name the owner of the created timeline
+ * @param timestamp , a timestamp
+ * @param prevState , a status
+ *
+ * Creates a timeline with the given status set at midnight of the date
+ * defined from timestamp and add this timeline to the input timelines
+ */
+ public void createTimeline(String name, String timestamp, int prevState) {
+ Timeline temp = new Timeline(timestamp, prevState);
+ this.inputs.put(name, temp);
+ }
+
+ /**
+ *
+ * @param name , the owner of the created timeline
+ * @param timestamp, a timestamp
+ * @param status , a status for the given timestamp if the owner does not
+ * have an existing timeline add a new timeline to the inputs
+ *
+ */
+ public void insert(String name, String timestamp, int status) {
+ // Check if timeline exists, if not create it
+ if (this.inputs.containsKey(name) == false) {
+ Timeline temp = new Timeline(timestamp, status);
+ this.inputs.put(name, temp);
+ return;
+ }
+
+ this.inputs.get(name).insert(timestamp, status);
+ }
+
+ /**
+ *
+ * @param name the owner of the created timeline
+ * @param timestamp, a timestamp
+ * @param status , the status of the given timestamp if the owner does not
+ * have an existing timeline add a new timeline to the inputs the created
+ * timeline contains the given status for the midnight (00:00:00) of the
+ * timestamp
+ */
+
+ public void setFirst(String name, String timestamp, int status) {
+ // Check if timeline exists, if not create it
+ if (this.inputs.containsKey(name) == false) {
+ Timeline temp = new Timeline(timestamp, status);
+ this.inputs.put(name, temp);
+ return;
+ }
+
+ this.inputs.get(name).setFirst(timestamp, status);
+ }
+
+ /**
+ *
+ * @return the date of the output timeline
+ */
+ public LocalDate getDate() {
+ return output.getDate();
+ }
+
+ public Set> getSamples() {
+ return this.output.getSamples();
+ }
+
+ public void clearAndSetDate(String timestamp) {
+ this.output = new Timeline(timestamp);
+ this.inputs.clear();
+
+ }
+
+ /**
+ *
+ * @param truthTable a truth table containing all possible status
+ * combinations for the existing operations
+ * @param op , the operation to be applied in order to aggregate the
+ * timeline statuses
+ *
+ * aggregates the input timelines into one combined output including all the
+ * timestamp status combinations as produced from the input timelines
+ */
+ public void aggregate(int[][][] truthTable, int op) {
+ if (this.output != null) {
+ this.output.clear();
+ }
+
+ //Iterate through all available input timelines and aggregate
+ for (Timeline item : this.inputs.values()) {
+ this.output.aggregate(item, truthTable, op);
+ }
+
+ }
+
+ public Timeline getOutput() {
+ return output;
+ }
+
+ public void setOutput(Timeline output) {
+ this.output = output;
+ }
+
+ public Map getInputs() {
+ return inputs;
+ }
+
+ public void setInputs(Map inputs) {
+ this.inputs = inputs;
+ }
+
+}
diff --git a/flink_jobs/Timelines/src/main/java/timelines/Utils.java b/flink_jobs/Timelines/src/main/java/timelines/Utils.java
new file mode 100644
index 00000000..d6ee8f6a
--- /dev/null
+++ b/flink_jobs/Timelines/src/main/java/timelines/Utils.java
@@ -0,0 +1,97 @@
+/*
+ * To change this license header, choose License Headers in Project Properties.
+ * To change this template file, choose Tools | Templates
+ * and open the template in the editor.
+ */
+package timelines;
+
+import java.io.IOException;
+import java.text.ParseException;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Calendar;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.Iterator;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import java.util.TimeZone;
+import org.joda.time.DateTime;
+import org.joda.time.format.DateTimeFormat;
+import org.joda.time.format.DateTimeFormatter;
+
+/**
+ * A utils class to provide functions processing dates
+ */
+public class Utils {
+
+ static Logger LOG = LoggerFactory.getLogger(Utils.class);
+
+ public static String convertDateToString(String format, DateTime date) throws ParseException {
+
+ //String format = "yyyy-MM-dd'T'HH:mm:ss'Z'";
+ DateTimeFormatter dtf = DateTimeFormat.forPattern(format);
+ String dateString = date.toString(dtf);
+ return dateString;
+
+ }
+
+ public static DateTime convertStringtoDate(String format, String dateStr) throws ParseException {
+ DateTimeFormatter formatter = DateTimeFormat.forPattern(format);
+ DateTime dt = formatter.parseDateTime(dateStr);
+
+ return dt;
+ }
+
+ public static DateTime createDate(String format, Date dateStr, int hour, int min, int sec) throws ParseException {
+
+ //String format = "yyyy-MM-dd'T'HH:mm:ss'Z'";
+ SimpleDateFormat sdf = new SimpleDateFormat(format);
+ sdf.setTimeZone(TimeZone.getDefault());
+ Calendar newCalendar = Calendar.getInstance();
+ newCalendar.setTime(dateStr);
+
+ newCalendar.set(Calendar.HOUR_OF_DAY, hour);
+ newCalendar.set(Calendar.MINUTE, min);
+ newCalendar.set(Calendar.SECOND, sec);
+ newCalendar.set(Calendar.MILLISECOND, 0);
+ return new DateTime(newCalendar.getTime());
+ }
+
+ public static boolean isPreviousDate(String format, Date nowDate, Date firstDate) throws ParseException {
+ // String format = "yyyy-MM-dd'T'HH:mm:ss'Z'";
+
+ Calendar cal = Calendar.getInstance();
+ SimpleDateFormat sdf = new SimpleDateFormat(format);
+ sdf.setTimeZone(TimeZone.getDefault());
+ cal.setTime(nowDate);
+
+ Calendar calFirst = Calendar.getInstance();
+ calFirst.setTime(firstDate);
+
+ if (firstDate.before(nowDate)) {
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ public static DateTime createDate(String format, int year, int month, int day, int hour, int min, int sec) throws ParseException {
+
+ // String format = "yyyy-MM-dd'T'HH:mm:ss'Z'";
+ SimpleDateFormat sdf = new SimpleDateFormat(format);
+ sdf.setTimeZone(TimeZone.getDefault());
+ Calendar newCalendar = Calendar.getInstance();
+ newCalendar.set(Calendar.YEAR, year);
+ newCalendar.set(Calendar.MONTH, month);
+ newCalendar.set(Calendar.DAY_OF_MONTH, day);
+
+ newCalendar.set(Calendar.HOUR_OF_DAY, hour);
+ newCalendar.set(Calendar.MINUTE, min);
+ newCalendar.set(Calendar.SECOND, sec);
+ newCalendar.set(Calendar.MILLISECOND, 0);
+
+ return new DateTime(newCalendar.getTime());
+ }
+
+}
diff --git a/flink_jobs/Timelines/src/main/resources/log4j.properties b/flink_jobs/Timelines/src/main/resources/log4j.properties
new file mode 100644
index 00000000..da32ea0f
--- /dev/null
+++ b/flink_jobs/Timelines/src/main/resources/log4j.properties
@@ -0,0 +1,23 @@
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+log4j.rootLogger=INFO, console
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{HH:mm:ss,SSS} %-5p %-60c %x - %m%n
diff --git a/flink_jobs/Timelines/src/main/resources/timelines/timeline.json b/flink_jobs/Timelines/src/main/resources/timelines/timeline.json
new file mode 100644
index 00000000..30b368fd
--- /dev/null
+++ b/flink_jobs/Timelines/src/main/resources/timelines/timeline.json
@@ -0,0 +1,362 @@
+{ "data":{
+ "operations":["AND","OR"],
+ "available_states": [
+ "OK",
+ "WARNING",
+ "UNKNOWN",
+ "MISSING",
+ "CRITICAL",
+ "DOWNTIME"
+ ],
+ "operation":"AND",
+ "inputs": [
+ {
+ "name":"timeline1",
+ "timestamps":[
+
+ {"timestamp": "2021-01-15T00:15:50Z",
+ "status": "WARNING"
+ },
+ {"timestamp": "2021-01-15T01:15:50Z",
+ "status": "WARNING"
+ },
+ {"timestamp": "2021-01-15T02:15:50Z",
+ "status": "OK"
+ },
+ {"timestamp": "2021-01-15T03:15:50Z",
+ "status": "WARNING"
+ },
+ {"timestamp": "2021-01-15T15:15:50Z",
+ "status": "OK"
+ },
+ {"timestamp": "2021-01-15T20:16:50Z",
+ "status": "WARNING"
+ }
+
+ ]
+ }, {
+ "name":"timeline2",
+ "timestamps":[
+ {"timestamp": "2021-01-15T00:00:00Z" , "status": "OK"
+ },
+ {"timestamp": "2021-01-15T00:05:00Z",
+ "status": "OK"
+ },
+ {"timestamp": "2021-01-15T12:00:00Z",
+ "status": "WARNING"
+ },
+ {"timestamp": "2021-01-15T14:00:00Z",
+ "status": "OK"
+ },
+
+ {"timestamp": "2021-01-15T23:05:00Z",
+ "status": "WARNING"
+ }
+ ]
+ }, {
+ "name":"timeline3",
+ "timestamps":[
+ {"timestamp": "2021-01-15T00:00:00Z" , "status": "OK"
+ },
+ {"timestamp": "2021-01-15T00:05:00Z",
+ "status": "UNKNOWN"
+ },
+
+ {"timestamp": "2021-01-15T02:00:00Z",
+ "status": "WARNING"
+ },
+ {"timestamp": "2021-01-15T03:00:00Z",
+ "status": "OK"
+ },
+
+ {"timestamp": "2021-01-15T06:00:00Z",
+ "status": "OK"
+ }
+ ]
+ }, {
+ "name":"timeline4",
+ "timestamps":[
+ {"timestamp": "2021-01-15T00:00:00Z" , "status": "OK"
+ },
+ {"timestamp": "2021-01-15T20:00:00Z",
+ "status": "CRITICAL"
+ },
+
+ {"timestamp": "2021-01-15T21:00:00Z",
+ "status": "OK"
+ },
+ {"timestamp": "2021-01-15T22:00:00Z",
+ "status": "CRITICAL"
+ },
+
+ {"timestamp": "2021-01-15T23:00:00Z",
+ "status": "OK"
+ }
+ ]
+ }
+
+],
+ "output":{
+ "name":"merged",
+ "timestamps":[
+ {"timestamp": "2021-01-15T00:00:00Z" , "status": "MISSING"
+ },
+ {"timestamp": "2021-01-15T00:15:50Z",
+ "status": "UNKNOWN"
+ },
+ {"timestamp": "2021-01-15T02:00:00Z",
+ "status": "WARNING"
+ },
+ {"timestamp": "2021-01-15T03:00:00Z",
+ "status": "OK"
+ },
+
+ {"timestamp": "2021-01-15T03:15:50Z",
+ "status": "WARNING"
+ },
+
+ {"timestamp": "2021-01-15T15:15:50Z",
+ "status": "OK"
+ },
+ {"timestamp": "2021-01-15T20:00:00Z",
+ "status": "CRITICAL"
+ }
+ ,
+ {"timestamp": "2021-01-15T21:00:00Z",
+ "status": "WARNING"
+ },
+
+ {"timestamp": "2021-01-15T22:00:00Z",
+ "status": "CRITICAL"
+ },
+
+ {"timestamp": "2021-01-15T23:00:00Z",
+ "status": "WARNING"
+ }
+ ]
+ }, "operation_truth_table": [
+ {
+ "name": "AND",
+ "truth_table": [
+ {
+ "a": "OK",
+ "b": "OK",
+ "x": "OK"
+ },
+ {
+ "a": "OK",
+ "b": "WARNING",
+ "x": "WARNING"
+ },
+ {
+ "a": "OK",
+ "b": "UNKNOWN",
+ "x": "UNKNOWN"
+ },
+ {
+ "a": "OK",
+ "b": "MISSING",
+ "x": "MISSING"
+ },
+ {
+ "a": "OK",
+ "b": "CRITICAL",
+ "x": "CRITICAL"
+ },
+ {
+ "a": "OK",
+ "b": "DOWNTIME",
+ "x": "DOWNTIME"
+ },
+ {
+ "a": "WARNING",
+ "b": "WARNING",
+ "x": "WARNING"
+ },
+ {
+ "a": "WARNING",
+ "b": "UNKNOWN",
+ "x": "UNKNOWN"
+ },
+ {
+ "a": "WARNING",
+ "b": "MISSING",
+ "x": "MISSING"
+ },
+ {
+ "a": "WARNING",
+ "b": "CRITICAL",
+ "x": "CRITICAL"
+ },
+ {
+ "a": "WARNING",
+ "b": "DOWNTIME",
+ "x": "DOWNTIME"
+ },
+ {
+ "a": "UNKNOWN",
+ "b": "UNKNOWN",
+ "x": "UNKNOWN"
+ },
+ {
+ "a": "UNKNOWN",
+ "b": "MISSING",
+ "x": "MISSING"
+ },
+ {
+ "a": "UNKNOWN",
+ "b": "CRITICAL",
+ "x": "CRITICAL"
+ },
+ {
+ "a": "UNKNOWN",
+ "b": "DOWNTIME",
+ "x": "DOWNTIME"
+ },
+ {
+ "a": "MISSING",
+ "b": "MISSING",
+ "x": "MISSING"
+ },
+ {
+ "a": "MISSING",
+ "b": "CRITICAL",
+ "x": "CRITICAL"
+ },
+ {
+ "a": "MISSING",
+ "b": "DOWNTIME",
+ "x": "DOWNTIME"
+ },
+ {
+ "a": "CRITICAL",
+ "b": "CRITICAL",
+ "x": "CRITICAL"
+ },
+ {
+ "a": "CRITICAL",
+ "b": "DOWNTIME",
+ "x": "CRITICAL"
+ },
+ {
+ "a": "DOWNTIME",
+ "b": "DOWNTIME",
+ "x": "DOWNTIME"
+ }
+ ]
+ },
+ {
+ "name": "OR",
+ "truth_table": [
+ {
+ "a": "OK",
+ "b": "OK",
+ "x": "OK"
+ },
+ {
+ "a": "OK",
+ "b": "WARNING",
+ "x": "OK"
+ },
+ {
+ "a": "OK",
+ "b": "UNKNOWN",
+ "x": "OK"
+ },
+ {
+ "a": "OK",
+ "b": "MISSING",
+ "x": "OK"
+ },
+ {
+ "a": "OK",
+ "b": "CRITICAL",
+ "x": "OK"
+ },
+ {
+ "a": "OK",
+ "b": "DOWNTIME",
+ "x": "OK"
+ },
+ {
+ "a": "WARNING",
+ "b": "WARNING",
+ "x": "WARNING"
+ },
+ {
+ "a": "WARNING",
+ "b": "UNKNOWN",
+ "x": "WARNING"
+ },
+ {
+ "a": "WARNING",
+ "b": "MISSING",
+ "x": "WARNING"
+ },
+ {
+ "a": "WARNING",
+ "b": "CRITICAL",
+ "x": "WARNING"
+ },
+ {
+ "a": "WARNING",
+ "b": "DOWNTIME",
+ "x": "WARNING"
+ },
+ {
+ "a": "UNKNOWN",
+ "b": "UNKNOWN",
+ "x": "UNKNOWN"
+ },
+ {
+ "a": "UNKNOWN",
+ "b": "MISSING",
+ "x": "UNKNOWN"
+ },
+ {
+ "a": "UNKNOWN",
+ "b": "CRITICAL",
+ "x": "CRITICAL"
+ },
+ {
+ "a": "UNKNOWN",
+ "b": "DOWNTIME",
+ "x": "UNKNOWN"
+ },
+ {
+ "a": "MISSING",
+ "b": "MISSING",
+ "x": "MISSING"
+ },
+ {
+ "a": "MISSING",
+ "b": "CRITICAL",
+ "x": "CRITICAL"
+ },
+ {
+ "a": "MISSING",
+ "b": "DOWNTIME",
+ "x": "DOWNTIME"
+ },
+ {
+ "a": "CRITICAL",
+ "b": "CRITICAL",
+ "x": "CRITICAL"
+ },
+ {
+ "a": "CRITICAL",
+ "b": "DOWNTIME",
+ "x": "CRITICAL"
+ },
+ {
+ "a": "DOWNTIME",
+ "b": "DOWNTIME",
+ "x": "DOWNTIME"
+ }
+ ]
+ }
+ ]
+}
+
+
+}
diff --git a/flink_jobs/Timelines/src/test/java/timelines/TimelineAggregatorTest.java b/flink_jobs/Timelines/src/test/java/timelines/TimelineAggregatorTest.java
new file mode 100644
index 00000000..1f9d15e8
--- /dev/null
+++ b/flink_jobs/Timelines/src/test/java/timelines/TimelineAggregatorTest.java
@@ -0,0 +1,389 @@
+/*
+ * To change this license header, choose License Headers in Project Properties.
+ * To change this template file, choose Tools | Templates
+ * and open the template in the editor.
+ */
+package timelines;
+
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.text.ParseException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.TreeMap;
+import org.joda.time.DateTime;
+import org.joda.time.LocalDate;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+/**
+ *
+ * A unit test to test TImelineAggregator class
+ */
+public class TimelineAggregatorTest {
+
+ public TimelineAggregatorTest() {
+ }
+
+ @BeforeClass
+ public static void setUpClass() {
+ }
+
+ @AfterClass
+ public static void tearDownClass() {
+ }
+
+ @Before
+ public void setUp() {
+ }
+
+ @After
+ public void tearDown() {
+ }
+
+ /**
+ * Test of clear method, of class TimelineAggregator.
+ */
+ @Test
+ public void testClear() {
+ System.out.println("clear");
+ TimelineAggregator instance = new TimelineAggregator();
+ instance.clear();
+ // TODO review the generated test code and remove the default call to fail.
+
+ }
+
+ /**
+ * Test of tsFromDate method, of class TimelineAggregator.
+ */
+// @Test
+// public void testTsFromDate() {
+// System.out.println("tsFromDate");
+// String date = "";
+// TimelineAggregator instance = new TimelineAggregator();
+// String expResult = "";
+// String result = instance.tsFromDate(date);
+// assertEquals(expResult, result);
+// // TODO review the generated test code and remove the default call to fail.
+//
+// }
+ /**
+ * Test of createTimeline method, of class TimelineAggregator.
+ */
+ @Test
+ public void testCreateTimeline() throws ParseException {
+ System.out.println("createTimeline");
+ String name = "test";
+ String timestamp = Utils.convertDateToString("yyyy-MM-dd'T'HH:mm:ss'Z'", Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 12, 50, 0));
+ int prevState = 0;
+ TimelineAggregator instance = new TimelineAggregator();
+ instance.createTimeline(name, timestamp, prevState);
+ HashMap expRes = new HashMap<>();
+ Timeline exptimeline = new Timeline(timestamp);
+ exptimeline.insert(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 0, 0, 0), 0);
+ expRes.put(name, exptimeline);
+
+ assertEquals(expRes.toString(), instance.getInputs().toString());
+ // TODO review the generated test code and remove the default call to fail.
+
+ }
+
+ /**
+ * Test of insert method, of class TimelineAggregator.
+ */
+ @Test
+ public void testInsert() throws ParseException {
+ System.out.println("insert");
+ String name = "test";
+ String timestamp = Utils.convertDateToString("yyyy-MM-dd'T'HH:mm:ss'Z'", Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 12, 50, 0));
+
+ int status = 0;
+ TimelineAggregator instance = new TimelineAggregator();
+ instance.insert(name, timestamp, status);
+ HashMap expRes = new HashMap<>();
+ Timeline exptimeline = new Timeline(timestamp);
+ exptimeline.insert(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 0, 0, 0), 0);
+ expRes.put(name, exptimeline);
+
+ assertEquals(expRes.toString(), instance.getInputs().toString());
+
+ // TODO review the generated test code and remove the default call to fail.
+ //fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of setFirst method, of class TimelineAggregator.
+ */
+ @Test
+ public void testSetFirst() throws ParseException {
+ System.out.println("setFirst");
+ String name = "test1";
+ String timestamp = Utils.convertDateToString("yyyy-MM-dd'T'HH:mm:ss'Z'", Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 12, 50, 0));
+ String name2 = "test2";
+ String timestamp2 = Utils.convertDateToString("yyyy-MM-dd'T'HH:mm:ss'Z'", Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 21, 50, 0));
+ HashMap map = new HashMap();
+ map.put(name, new Timeline(timestamp));
+ map.put(name2, new Timeline(timestamp2));
+
+ int status = 0;
+ TimelineAggregator instance = new TimelineAggregator(map);
+ instance.insert(name, timestamp, status);
+ instance.setFirst(name2, timestamp2, status);
+ // TODO review the generated test code and remove the default call to fail.
+
+ HashMap expRes = new HashMap<>();
+ Timeline exptimeline = new Timeline(timestamp);
+ exptimeline.insert(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 0, 0, 0), 0);
+ Timeline exptimeline2 = new Timeline(timestamp);
+
+ exptimeline2.insert(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 12, 50, 0), 0);
+ expRes.put(name2, exptimeline);
+ expRes.put(name, exptimeline2);
+
+ assertEquals(expRes, instance.getInputs());
+ }
+
+ /**
+ * Test of getDate method, of class TimelineAggregator.
+ */
+ @Test
+ public void testGetDate() throws ParseException {
+ System.out.println("getDate");
+ String name = "test1";
+ String timestamp = Utils.convertDateToString("yyyy-MM-dd'T'HH:mm:ss'Z'", Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 12, 50, 0));
+ int status = 0;
+ TimelineAggregator instance = new TimelineAggregator(timestamp);
+ instance.insert(name, timestamp, status);
+
+ LocalDate expResult = Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 12, 50, 0).toLocalDate();
+ LocalDate result = instance.getDate();
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ //fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of getSamples method, of class TimelineAggregator.
+ */
+ @Test
+ public void testGetSamples() throws ParseException {
+ System.out.println("getSamples");
+ String name = "test1";
+ String timestamp = Utils.convertDateToString("yyyy-MM-dd'T'HH:mm:ss'Z'", Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 12, 50, 0));
+ String name2 = "test2";
+ String timestamp2 = Utils.convertDateToString("yyyy-MM-dd'T'HH:mm:ss'Z'", Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 21, 50, 0));
+ HashMap map = new HashMap();
+ map.put(name, new Timeline(timestamp));
+ map.put(name2, new Timeline(timestamp2));
+
+ TimelineAggregator instance = new TimelineAggregator(map);
+ instance.aggregate(createTruthTable(), 0);
+ TreeMap expRes = new TreeMap<>();
+ Timeline exptimeline = new Timeline();
+ exptimeline.insert(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 12, 50, 0), 0);
+ Set> expResult = expRes.entrySet();
+ Set> result = instance.getSamples();
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ //fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of aggregate method, of class TimelineAggregator.
+ */
+ @Test
+ public void testAggregate() throws IOException, FileNotFoundException, org.json.simple.parser.ParseException, ParseException {
+ System.out.println("aggregate");
+ TimelineUtils timelineUtils = new TimelineUtils();
+ TimelineUtils.TimelineJson timelinejson = timelineUtils.readTimelines();
+
+ ArrayList inputTimelines = timelinejson.getInputTimelines();
+ int op = timelinejson.getOperation();
+ int[][][] truthTable = timelinejson.getTruthTable();
+ ArrayList states = timelinejson.getStates();
+
+ TimelineAggregator instance = new TimelineAggregator();
+
+ HashMap inputs = new HashMap();
+ int counter = 1;
+ for (TreeMap map : inputTimelines) {
+ Timeline timeline = new Timeline();
+ checkForMissingMidnightStatus(map, states.indexOf("MISSING"));
+
+ timeline.insertDateTimeStamps(map);
+ inputs.put(timeline + "_" + counter, timeline);
+ counter++;
+ }
+ instance.setInputs(inputs);
+
+ instance.aggregate(truthTable, op);
+
+ Set> expRes = timelinejson.getOutputTimeline().entrySet();
+ Set> res = instance.getOutput().getSamples();
+ assertEquals(expRes, res);
+ // TODO review the generated test code and remove the default call to fail.
+ // fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of getOutput method, of class TimelineAggregator.
+ */
+ @Test
+ public void testGetOutput() {
+ System.out.println("getOutput");
+ TimelineAggregator instance = new TimelineAggregator();
+ Timeline expResult = null;
+ Timeline result = instance.getOutput();
+ //assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ // fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of setOutput method, of class TimelineAggregator.
+ */
+ @Test
+ public void testSetOutput() {
+ System.out.println("setOutput");
+ Timeline output = null;
+ TimelineAggregator instance = new TimelineAggregator();
+ instance.setOutput(output);
+ // TODO review the generated test code and remove the default call to fail.
+ // fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of getInputs method, of class TimelineAggregator.
+ */
+ @Test
+ public void testGetInputs() {
+ System.out.println("getInputs");
+ TimelineAggregator instance = new TimelineAggregator();
+ Map expResult = null;
+ Map result = instance.getInputs();
+// assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ // fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of setInputs method, of class TimelineAggregator.
+ */
+ @Test
+ public void testSetInputs() {
+ System.out.println("setInputs");
+ Map inputs = null;
+ TimelineAggregator instance = new TimelineAggregator();
+ instance.setInputs(inputs);
+ // TODO review the generated test code and remove the default call to fail.
+ //fail("The test case is a prototype.");
+ }
+
+ private int[][][] createTruthTable() {
+
+ int[][][] truthtable = new int[2][6][6];
+
+ truthtable[0][0][0] = 0;
+ truthtable[0][0][1] = 0;
+ truthtable[0][0][2] = 0;
+ truthtable[0][0][3] = 0;
+ truthtable[0][0][4] = 0;
+ truthtable[0][0][5] = 0;
+
+ truthtable[0][1][0] = -1;
+ truthtable[0][1][1] = 1;
+ truthtable[0][1][2] = 1;
+ truthtable[0][1][3] = 1;
+ truthtable[0][1][4] = 1;
+ truthtable[0][1][5] = 1;
+
+ truthtable[0][2][0] = -1;
+ truthtable[0][2][1] = -1;
+ truthtable[0][2][2] = 2;
+ truthtable[0][2][3] = 2;
+ truthtable[0][2][4] = 4;
+ truthtable[0][2][5] = 2;
+
+ truthtable[0][3][0] = -1;
+ truthtable[0][3][1] = -1;
+ truthtable[0][3][2] = -1;
+ truthtable[0][3][3] = 3;
+ truthtable[0][3][4] = 4;
+ truthtable[0][3][5] = 5;
+
+ truthtable[0][4][0] = -1;
+ truthtable[0][4][1] = -1;
+ truthtable[0][4][2] = -1;
+ truthtable[0][4][3] = -1;
+ truthtable[0][4][4] = 4;
+ truthtable[0][4][5] = 5;
+
+ truthtable[0][5][0] = -1;
+ truthtable[0][5][1] = -1;
+ truthtable[0][5][2] = -1;
+ truthtable[0][5][3] = -1;
+ truthtable[0][5][4] = -1;
+ truthtable[0][5][5] = 5;
+
+ truthtable[1][0][0] = 0;
+ truthtable[1][0][1] = 1;
+ truthtable[1][0][2] = 2;
+ truthtable[1][0][3] = 3;
+ truthtable[1][0][4] = 4;
+ truthtable[1][0][5] = 5;
+
+ truthtable[1][1][0] = -1;
+ truthtable[1][1][1] = 1;
+ truthtable[1][1][2] = 2;
+ truthtable[1][1][3] = 3;
+ truthtable[1][1][4] = 4;
+ truthtable[1][1][5] = 5;
+
+ truthtable[1][2][0] = -1;
+ truthtable[1][2][1] = -1;
+ truthtable[1][2][2] = 2;
+ truthtable[1][2][3] = 3;
+ truthtable[1][2][4] = 4;
+ truthtable[1][2][5] = 5;
+
+ truthtable[1][3][0] = -1;
+ truthtable[1][3][1] = -1;
+ truthtable[1][3][2] = -1;
+ truthtable[1][3][3] = 3;
+ truthtable[1][3][4] = 4;
+ truthtable[1][3][5] = 5;
+
+ truthtable[1][4][0] = -1;
+ truthtable[1][4][1] = -1;
+ truthtable[1][4][2] = -1;
+ truthtable[1][4][3] = -1;
+ truthtable[1][4][4] = 4;
+ truthtable[1][4][5] = 4;
+
+ truthtable[1][5][0] = -1;
+ truthtable[1][5][1] = -1;
+ truthtable[1][5][2] = -1;
+ truthtable[1][5][3] = -1;
+ truthtable[1][5][4] = -1;
+ truthtable[1][5][5] = 5;
+
+ return truthtable;
+
+ }
+
+ private void checkForMissingMidnightStatus(TreeMap map, int missingStatus) throws ParseException {
+ DateTime midnight = Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 0, 0, 00);
+ if (!map.containsKey(midnight)) {
+ map.put(midnight, missingStatus);
+ }
+ }
+
+}
diff --git a/flink_jobs/Timelines/src/test/java/timelines/TimelineTest.java b/flink_jobs/Timelines/src/test/java/timelines/TimelineTest.java
new file mode 100644
index 00000000..747b9aa9
--- /dev/null
+++ b/flink_jobs/Timelines/src/test/java/timelines/TimelineTest.java
@@ -0,0 +1,499 @@
+/*
+ * To change this license header, choose License Headers in Project Properties.
+ * To change this template file, choose Tools | Templates
+ * and open the template in the editor.
+ */
+package timelines;
+
+import java.text.ParseException;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.TreeSet;
+import org.joda.time.DateTime;
+import org.joda.time.LocalDate;
+import org.joda.time.format.DateTimeFormat;
+import org.joda.time.format.DateTimeFormatter;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+/**
+ *
+ * A unit test to test TImeline class
+ */
+public class TimelineTest {
+
+ public TimelineTest() {
+ }
+
+ @BeforeClass
+ public static void setUpClass() {
+ }
+
+ @AfterClass
+ public static void tearDownClass() {
+ }
+
+ @Before
+ public void setUp() {
+ }
+
+ @After
+ public void tearDown() {
+ }
+
+ /**
+ * Test of get method, of class Timeline.
+ */
+ @Test
+ public void testGet_String() throws ParseException {
+ System.out.println("get");
+
+ DateTimeFormatter dtf = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'");
+ DateTime timestamp = Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 4, 31, 1);
+ String timestampStr = timestamp.toString(dtf);
+ Timeline instance = new Timeline();
+ instance.insertDateTimeStamps(createTimestampList());
+ int expResult = -1;
+ int result = instance.get(timestampStr);
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ //fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of get method, of class Timeline.
+ */
+ @Test
+ public void testGet_DateTime() throws ParseException {
+ System.out.println("get");
+ DateTime point = Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 4, 31, 1);
+ Timeline instance = new Timeline();
+ instance.insertDateTimeStamps(createTimestampList());
+ int expResult = 1;
+ int result = instance.get(point);
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ //fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of insert method, of class Timeline.
+ */
+ @Test
+ public void testInsert_String_int() throws ParseException {
+ System.out.println("insert");
+ DateTimeFormatter dtf = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'");
+
+ DateTime timestamp = Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 4, 31, 1);
+ String timestampStr = timestamp.toString(dtf);
+
+ int status = 1;
+ Timeline instance = new Timeline();
+ instance.insert(timestampStr, status);
+ // TODO review the generated test code and remove the default call to fail.
+ //fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of insert method, of class Timeline.
+ */
+ @Test
+ public void testInsert_DateTime_int() throws ParseException {
+ System.out.println("insert");
+ DateTime timestamp =Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 4, 31, 1);
+
+ int status = 0;
+ Timeline instance = new Timeline();
+ instance.insert(timestamp, status);
+ // TODO review the generated test code and remove the default call to fail.
+ // fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of insertStringTimeStamps method, of class Timeline.
+ */
+ @Test
+ public void testInsertStringTimeStamps() throws ParseException {
+ System.out.println("insertStringTimeStamps");
+ TreeMap timestamps = createStringTimestampList();
+ Timeline instance = new Timeline();
+ instance.insertStringTimeStamps(timestamps);
+ // TODO review the generated test code and remove the default call to fail.
+ // fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of insertDateTimeStamps method, of class Timeline.
+ */
+ @Test
+ public void testInsertDateTimeStamps() throws ParseException {
+ System.out.println("insertDateTimeStamps");
+ TreeMap timestamps = createTimestampList();
+ Timeline instance = new Timeline();
+ instance.insertDateTimeStamps(timestamps);
+ // TODO review the generated test code and remove the default call to fail.
+ // fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of setFirst method, of class Timeline.
+ */
+ @Test
+ public void testSetFirst() throws ParseException {
+ System.out.println("setFirst");
+ DateTimeFormatter dtf = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'");
+
+ DateTime timestamp = Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 0, 0, 0, 1);
+ String timestampStr = timestamp.toString(dtf);
+
+ int state = 0;
+ Timeline instance = new Timeline();
+ instance.setFirst(timestampStr, state);
+ // TODO review the generated test code and remove the default call to fail.
+ // fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of clear method, of class Timeline.
+ */
+ @Test
+ public void testClear() throws ParseException {
+ System.out.println("clear");
+ Timeline instance = new Timeline();
+ instance.insertDateTimeStamps(createTimestampList());
+ instance.clear();
+ // TODO review the generated test code and remove the default call to fail.
+ // fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of bulkInsert method, of class Timeline.
+ */
+ @Test
+ public void testBulkInsert() throws ParseException {
+ System.out.println("bulkInsert");
+ Set> samples = createTimestampList().entrySet();
+ Timeline instance = new Timeline();
+ instance.bulkInsert(samples);
+ // TODO review the generated test code and remove the default call to fail.
+ // fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of getSamples method, of class Timeline.
+ */
+ @Test
+ public void testGetSamples() throws ParseException {
+ System.out.println("getSamples");
+ Timeline instance = new Timeline();
+ instance.insertDateTimeStamps(createTimestampList());
+ Set> expResult = instance.getSamples();
+ Set> result = instance.getSamples();
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ //fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of getDate method, of class Timeline.
+ */
+ @Test
+ public void testGetDate() throws ParseException {
+ System.out.println("getDate");
+ DateTimeFormatter dtf = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'");
+
+ Timeline instance = new Timeline(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 1, 0, 0, 0, 0).toString(dtf));
+
+ LocalDate expResult = new LocalDate(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 1, 0, 0, 0, 0));
+ LocalDate result = instance.getDate();
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ //fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of getLength method, of class Timeline.
+ */
+ @Test
+ public void testGetLength() throws ParseException {
+ System.out.println("getLength");
+ Timeline instance = new Timeline();
+ instance.insertDateTimeStamps(createTimestampList());
+ int expResult = 2;
+ int result = instance.getLength();
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ // fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of isEmpty method, of class Timeline.
+ */
+ @Test
+ public void testIsEmpty() throws ParseException {
+ System.out.println("isEmpty");
+ Timeline instance = new Timeline();
+ instance.insertDateTimeStamps(createTimestampList());
+
+ boolean expResult = false;
+ boolean result = instance.isEmpty();
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ // fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of optimize method, of class Timeline.
+ */
+ @Test
+ public void testOptimize() throws ParseException {
+ System.out.println("optimize");
+ Timeline instance = new Timeline();
+ instance.insertDateTimeStamps(createTimestampList());
+ instance.optimize();
+ // TODO review the generated test code and remove the default call to fail.
+ // fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of getPoints method, of class Timeline.
+ */
+ @Test
+ public void testGetPoints() throws ParseException {
+ System.out.println("getPoints");
+ Timeline instance = new Timeline();
+ TreeMap map = createTimestampList();
+ instance.insertDateTimeStamps(map);
+ Set expResult = new TreeSet<>();
+ expResult.add(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 5, 20, 15));
+ expResult.add(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 0, 12, 23));
+
+ Set result = instance.getPoints();
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ // fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of aggregate method, of class Timeline.
+ */
+ @Test
+ public void testAggregate() throws ParseException {
+ System.out.println("aggregate");
+ Timeline second = new Timeline();
+ second.insertDateTimeStamps(createSecondTimeline());
+ int[][][] truthTable = createTruthTable();
+ int op = 0;
+ Timeline instance = new Timeline();
+ instance.aggregate(second, truthTable, op);
+ Set> expResult = createMerged().entrySet();
+ Set> result = instance.getSamples();
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ // fail("The test case is a prototype.");
+ }
+ /**
+ * Test of calcStatusChanges method, of class Timeline.
+ */
+ @Test
+ public void testCalcStatusChanges() throws ParseException {
+ System.out.println("calcStatusChanges");
+ Timeline instance = new Timeline();
+ instance.insertDateTimeStamps(createTimestampList());
+ int expResult = 1;
+ int result = instance.calcStatusChanges();
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ // fail("The test case is a prototype.");
+ }
+
+ /**
+ * Test of replacePreviousDateStatus method, of class Timeline.
+ */
+ @Test
+ public void testReplacePreviousDateStatus() throws ParseException {
+ System.out.println("replacePreviousDateStatus");
+ DateTime date = Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 0, 0, 0);
+ Timeline instance = new Timeline();
+ instance.insertDateTimeStamps(createTimestampList());
+ ArrayList availStates = new ArrayList<>();
+ availStates.add("OK");
+ availStates.add("WARNING");
+ availStates.add("UKNOWN");
+ availStates.add("MISSING");
+ availStates.add("CRITICAL");
+ availStates.add("DOWNTIME");
+
+ instance.replacePreviousDateStatus(date, availStates);
+ // TODO review the generated test code and remove the default call to fail.
+ // fail("The test case is a prototype.");
+ }
+ /**
+ * Test of opInt method, of class Timeline.
+ */
+ @Test
+ public void testOpInt() throws ParseException {
+ System.out.println("opInt");
+ int[][][] truthTable = createTruthTable();
+ int op = 0;
+ int a = 0;
+ int b = 0;
+ Timeline instance = new Timeline();
+ instance.insertDateTimeStamps(createTimestampList());
+ int expResult = 0;
+ int result = instance.opInt(truthTable, op, a, b);
+ assertEquals(expResult, result);
+ // TODO review the generated test code and remove the default call to fail.
+ // fail("The test case is a prototype.");
+ }
+
+ private TreeMap createTimestampList() throws ParseException {
+ TreeMap map = new TreeMap<>();
+
+ map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 0, 12, 23), 1);
+ map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 1, 5, 10), 1);
+ map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 5, 20, 15), 0);
+ map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 4, 31, 1), 1);
+ map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 3, 50, 4), 1);
+ map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 21, 3, 5), 0);
+ return map;
+//
+ }
+
+ private TreeMap createStringTimestampList() throws ParseException {
+ TreeMap map = new TreeMap<>();
+
+ DateTimeFormatter dtf = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'");
+
+ map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 0, 12, 23).toString(dtf), 1);
+ map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 1, 5, 10).toString(dtf), 1);
+ map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 5, 20, 15).toString(dtf), 0);
+ map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 4, 31, 1).toString(dtf), 1);
+ map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 3, 50, 4).toString(dtf), 1);
+ map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 21, 3, 5).toString(dtf), 0);
+ return map;
+//
+ }
+
+ private TreeMap createSecondTimeline() throws ParseException {
+ TreeMap map = new TreeMap<>();
+
+ map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 0, 15, 50), 1);
+ map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 2, 5, 10), 0);
+ map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 5, 20, 15), 0);
+ map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 4, 31, 1), 1);
+ map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 3, 50, 4), 1);
+ map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 21, 3, 5), 0);
+ return map;
+//
+ }
+
+ private int[][][] createTruthTable() {
+
+ int[][][] truthtable = new int[2][6][6];
+
+ truthtable[0][0][0] = 0;
+ truthtable[0][0][1] = 0;
+ truthtable[0][0][2] = 0;
+ truthtable[0][0][3] = 0;
+ truthtable[0][0][4] = 0;
+ truthtable[0][0][5] = 0;
+
+ truthtable[0][1][0] = -1;
+ truthtable[0][1][1] = 1;
+ truthtable[0][1][2] = 1;
+ truthtable[0][1][3] = 1;
+ truthtable[0][1][4] = 1;
+ truthtable[0][1][5] = 1;
+
+ truthtable[0][2][0] = -1;
+ truthtable[0][2][1] = -1;
+ truthtable[0][2][2] = 2;
+ truthtable[0][2][3] = 2;
+ truthtable[0][2][4] = 4;
+ truthtable[0][2][5] = 2;
+
+ truthtable[0][3][0] = -1;
+ truthtable[0][3][1] = -1;
+ truthtable[0][3][2] = -1;
+ truthtable[0][3][3] = 3;
+ truthtable[0][3][4] = 4;
+ truthtable[0][3][5] = 5;
+
+ truthtable[0][4][0] = -1;
+ truthtable[0][4][1] = -1;
+ truthtable[0][4][2] = -1;
+ truthtable[0][4][3] = -1;
+ truthtable[0][4][4] = 4;
+ truthtable[0][4][5] = 5;
+
+ truthtable[0][5][0] = -1;
+ truthtable[0][5][1] = -1;
+ truthtable[0][5][2] = -1;
+ truthtable[0][5][3] = -1;
+ truthtable[0][5][4] = -1;
+ truthtable[0][5][5] = 5;
+
+ truthtable[1][0][0] = 0;
+ truthtable[1][0][1] = 1;
+ truthtable[1][0][2] = 2;
+ truthtable[1][0][3] = 3;
+ truthtable[1][0][4] = 4;
+ truthtable[1][0][5] = 5;
+
+ truthtable[1][1][0] = -1;
+ truthtable[1][1][1] = 1;
+ truthtable[1][1][2] = 2;
+ truthtable[1][1][3] = 3;
+ truthtable[1][1][4] = 4;
+ truthtable[1][1][5] = 5;
+
+ truthtable[1][2][0] = -1;
+ truthtable[1][2][1] = -1;
+ truthtable[1][2][2] = 2;
+ truthtable[1][2][3] = 3;
+ truthtable[1][2][4] = 4;
+ truthtable[1][2][5] = 5;
+
+ truthtable[1][3][0] = -1;
+ truthtable[1][3][1] = -1;
+ truthtable[1][3][2] = -1;
+ truthtable[1][3][3] = 3;
+ truthtable[1][3][4] = 4;
+ truthtable[1][3][5] = 5;
+
+ truthtable[1][4][0] = -1;
+ truthtable[1][4][1] = -1;
+ truthtable[1][4][2] = -1;
+ truthtable[1][4][3] = -1;
+ truthtable[1][4][4] = 4;
+ truthtable[1][4][5] = 4;
+
+ truthtable[1][5][0] = -1;
+ truthtable[1][5][1] = -1;
+ truthtable[1][5][2] = -1;
+ truthtable[1][5][3] = -1;
+ truthtable[1][5][4] = -1;
+ truthtable[1][5][5] = 5;
+
+ return truthtable;
+
+ }
+
+ private TreeMap createMerged() throws ParseException {
+ TreeMap map = new TreeMap();
+ map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 0, 15, 50), 1);
+ map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 2, 5, 10), 0);
+ map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 3, 50, 4), 1);
+
+ map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 5, 20, 15), 0);
+ return map;
+ }
+
+}
diff --git a/flink_jobs/Timelines/src/test/java/timelines/TimelineUtils.java b/flink_jobs/Timelines/src/test/java/timelines/TimelineUtils.java
new file mode 100644
index 00000000..7df7ab4e
--- /dev/null
+++ b/flink_jobs/Timelines/src/test/java/timelines/TimelineUtils.java
@@ -0,0 +1,188 @@
+/*
+ * To change this license header, choose License Headers in Project Properties.
+ * To change this template file, choose Tools | Templates
+ * and open the template in the editor.
+ */
+package timelines;
+
+import timelines.TimelineUtils;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.io.IOException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.TreeMap;
+import org.joda.time.DateTime;
+import org.json.simple.JSONArray;
+import org.json.simple.JSONObject;
+import org.json.simple.parser.JSONParser;
+import org.json.simple.parser.ParseException;
+
+/**
+ *
+ * A utils class to process resource files for tests and provide the information
+ */
+public class TimelineUtils {
+
+ public TimelineJson readTimelines() throws IOException, FileNotFoundException, ParseException, java.text.ParseException {
+
+ JSONObject timelineJSONObj = readJsonFromFile(TimelineUtils.class.getResource("/timelines/timeline.json").getFile());
+ TimelineJson timelinejson = buildTimelines(timelineJSONObj);
+ return timelinejson;
+ }
+
+ private JSONObject readJsonFromFile(String path) throws FileNotFoundException, IOException, org.json.simple.parser.ParseException {
+ JSONParser parser = new JSONParser();
+ URL url = TimelineUtils.class.getResource(path);
+ Object obj = parser.parse(new FileReader(path));
+
+ JSONObject jsonObject = (JSONObject) obj;
+
+ return jsonObject;
+ }
+
+ public TimelineJson buildTimelines(JSONObject jsonObject) throws java.text.ParseException {
+
+ ArrayList states = new ArrayList<>();
+ ArrayList operations = new ArrayList<>();
+ ArrayList inputTimelines = new ArrayList<>();
+ TreeMap outputTimeline = new TreeMap();
+ JSONObject dataObject = (JSONObject) jsonObject.get("data");
+
+ JSONArray stateList = (JSONArray) dataObject.get("available_states");
+ JSONArray operationList = (JSONArray) dataObject.get("operations");
+ String operation = (String) dataObject.get("operation");
+ Iterator operationsIter = operationList.iterator();
+
+ while (operationsIter.hasNext()) {
+ String op = operationsIter.next();
+ operations.add(op);
+ }
+ JSONArray inputs = (JSONArray) dataObject.get("inputs");
+ JSONObject output = (JSONObject) dataObject.get("output");
+ Iterator stateIter = stateList.iterator();
+ while (stateIter.hasNext()) {
+ String state = stateIter.next();
+ states.add(state);
+ }
+
+ Iterator inputIter = inputs.iterator();
+ while (inputIter.hasNext()) {
+ JSONObject timelineJSONObj = inputIter.next();
+ JSONArray timestampList = (JSONArray) timelineJSONObj.get("timestamps");
+ Iterator timeIter = timestampList.iterator();
+ TreeMap map = new TreeMap();
+ while (timeIter.hasNext()) {
+ JSONObject timestatus = (JSONObject) timeIter.next();
+ String time = (String) timestatus.get("timestamp");
+ String status = (String) timestatus.get("status");
+ map.put(Utils.convertStringtoDate("yyyy-MM-dd'T'HH:mm:ss'Z'", time), states.indexOf(status));
+ inputTimelines.add(map);
+ }
+
+ }
+
+ JSONArray timestampList = (JSONArray) output.get("timestamps");
+ Iterator timeIter = timestampList.iterator();
+
+ while (timeIter.hasNext()) {
+ JSONObject timestatus = (JSONObject) timeIter.next();
+ String time = (String) timestatus.get("timestamp");
+ String status = (String) timestatus.get("status");
+ outputTimeline.put(Utils.convertStringtoDate("yyyy-MM-dd'T'HH:mm:ss'Z'", time), states.indexOf(status));
+
+ }
+
+ JSONArray opTruthTable = (JSONArray) dataObject.get("operation_truth_table");
+
+ Iterator opTruthTableIter = opTruthTable.iterator();
+ int[][][] table = new int[operations.size()][states.size()][states.size()];
+ for (int[][] surface : table) {
+ for (int[] line : surface) {
+ Arrays.fill(line, -1);
+ }
+ }
+ while (opTruthTableIter.hasNext()) {
+ JSONObject truthOperationObj = (JSONObject) opTruthTableIter.next();
+ String truthOp = (String) truthOperationObj.get("name");
+ int truthOpInt = operations.indexOf(truthOp);
+ JSONArray truthTable = (JSONArray) truthOperationObj.get("truth_table");
+ Iterator truthTableIter = truthTable.iterator();
+ while (truthTableIter.hasNext()) {
+
+ JSONObject truthTableObj = (JSONObject) truthTableIter.next();
+ String a = (String) truthTableObj.get("a");
+ int aInt = states.indexOf(a);
+ String b = (String) truthTableObj.get("b");
+ int bInt = states.indexOf(b);
+ String x = (String) truthTableObj.get("b");
+ int xInt = states.indexOf(x);
+ table[truthOpInt][aInt][bInt] = xInt;
+
+ }
+ }
+ TimelineJson timelineJsonObject = new TimelineJson(inputTimelines, outputTimeline, operations.indexOf(operation),table,states);
+ return timelineJsonObject;
+ }
+
+ public class TimelineJson {
+
+ private ArrayList inputTimelines;
+ private TreeMap outputTimeline;
+ private Integer operation;
+ private int[][][] truthTable;
+ private ArrayList states;
+
+ public TimelineJson(ArrayList inputTimelines, TreeMap outputTimeline, Integer operation, int[][][] truthTable, ArrayList states) {
+ this.inputTimelines = inputTimelines;
+ this.outputTimeline = outputTimeline;
+ this.operation = operation;
+ this.truthTable = truthTable;
+ this.states = states;
+ }
+
+ public ArrayList getInputTimelines() {
+ return inputTimelines;
+ }
+
+ public void setInputTimelines(ArrayList inputTimelines) {
+ this.inputTimelines = inputTimelines;
+ }
+
+ public TreeMap getOutputTimeline() {
+ return outputTimeline;
+ }
+
+ public void setOutputTimeline(TreeMap outputTimeline) {
+ this.outputTimeline = outputTimeline;
+ }
+
+ public Integer getOperation() {
+ return operation;
+ }
+
+ public void setOperation(Integer operation) {
+ this.operation = operation;
+ }
+
+ public int[][][] getTruthTable() {
+ return truthTable;
+ }
+
+ public void setTruthTable(int[][][] truthTable) {
+ this.truthTable = truthTable;
+ }
+
+ public ArrayList getStates() {
+ return states;
+ }
+
+ public void setStates(ArrayList states) {
+ this.states = states;
+ }
+ }
+
+
+}
diff --git a/flink_jobs/ams_ingest_metric/.gitignore b/flink_jobs/ams_ingest_metric/.gitignore
index b83d2226..6c4e323f 100644
--- a/flink_jobs/ams_ingest_metric/.gitignore
+++ b/flink_jobs/ams_ingest_metric/.gitignore
@@ -1 +1,8 @@
/target/
+.project
+.settings/
+.classpath/
+.classpath
+/nbproject
+nbactions.xml
+
diff --git a/flink_jobs/ams_ingest_metric/pom.xml b/flink_jobs/ams_ingest_metric/pom.xml
index ef75498c..fea028e6 100644
--- a/flink_jobs/ams_ingest_metric/pom.xml
+++ b/flink_jobs/ams_ingest_metric/pom.xml
@@ -109,12 +109,12 @@
org.apache.httpcomponents
httpclient
- 4.5.2
+ 4.5.13
org.apache.httpcomponents
fluent-hc
- 4.5.2
+ 4.5.13
@@ -182,17 +182,17 @@
hbase-client
1.2.0-cdh5.7.4
-
org.apache.httpcomponents
httpclient
- 4.5.2
+ 4.5.13
org.apache.httpcomponents
fluent-hc
- 4.5.2
+ 4.5.13
+
diff --git a/flink_jobs/ams_ingest_sync/.gitignore b/flink_jobs/ams_ingest_sync/.gitignore
index b83d2226..6c4e323f 100644
--- a/flink_jobs/ams_ingest_sync/.gitignore
+++ b/flink_jobs/ams_ingest_sync/.gitignore
@@ -1 +1,8 @@
/target/
+.project
+.settings/
+.classpath/
+.classpath
+/nbproject
+nbactions.xml
+
diff --git a/flink_jobs/ams_ingest_sync/pom.xml b/flink_jobs/ams_ingest_sync/pom.xml
index e11b872b..c455c1a5 100644
--- a/flink_jobs/ams_ingest_sync/pom.xml
+++ b/flink_jobs/ams_ingest_sync/pom.xml
@@ -90,7 +90,7 @@
org.apache.httpcomponents
httpclient
- 4.5.2
+ 4.5.13
com.google.code.gson
@@ -100,7 +100,7 @@
junit
junit
- 4.11
+ 4.13.1
test
@@ -159,25 +159,13 @@
org.apache.httpcomponents
httpclient
- 4.5.2
+ 4.5.13
com.google.code.gson
gson
2.7
-
- junit
- junit
- 4.11
- test
-
-
- junit-addons
- junit-addons
- 1.4
- test
-
diff --git a/flink_jobs/batch_ar/.gitignore b/flink_jobs/batch_ar/.gitignore
index ce1b0b79..6c4e323f 100644
--- a/flink_jobs/batch_ar/.gitignore
+++ b/flink_jobs/batch_ar/.gitignore
@@ -1,7 +1,8 @@
/target/
-
-# Eclipse related
-.classpath
.project
.settings/
+.classpath/
+.classpath
+/nbproject
+nbactions.xml
diff --git a/flink_jobs/batch_ar/pom.xml b/flink_jobs/batch_ar/pom.xml
index b93a7ff7..4326c2d4 100644
--- a/flink_jobs/batch_ar/pom.xml
+++ b/flink_jobs/batch_ar/pom.xml
@@ -95,6 +95,18 @@
+
+
+ org.apache.httpcomponents
+ httpclient
+ 4.5.13
+
+
+ org.apache.httpcomponents
+ fluent-hc
+ 4.5.13
+
+
joda-time
joda-time
@@ -135,9 +147,17 @@
junit
junit
- 4.11
+ 4.13.1
test
+
+
+ com.github.tomakehurst
+ wiremock
+ 1.58
+ test
+
+
@@ -204,6 +224,17 @@
3.2.2
+
+ org.apache.httpcomponents
+ httpclient
+ 4.5.13
+
+
+ org.apache.httpcomponents
+ fluent-hc
+ 4.5.13
+
+
@@ -386,4 +417,4 @@
-->
-
+
\ No newline at end of file
diff --git a/flink_jobs/batch_ar/src/main/java/argo/amr/ApiResource.java b/flink_jobs/batch_ar/src/main/java/argo/amr/ApiResource.java
new file mode 100644
index 00000000..d8cb13b5
--- /dev/null
+++ b/flink_jobs/batch_ar/src/main/java/argo/amr/ApiResource.java
@@ -0,0 +1,5 @@
+package argo.amr;
+
+public enum ApiResource {
+ CONFIG, OPS, METRIC, AGGREGATION, THRESHOLDS, TOPOENDPOINTS, TOPOGROUPS, WEIGHTS, DOWNTIMES, RECOMPUTATIONS
+}
\ No newline at end of file
diff --git a/flink_jobs/batch_ar/src/main/java/argo/amr/ApiResourceManager.java b/flink_jobs/batch_ar/src/main/java/argo/amr/ApiResourceManager.java
new file mode 100644
index 00000000..c4375701
--- /dev/null
+++ b/flink_jobs/batch_ar/src/main/java/argo/amr/ApiResourceManager.java
@@ -0,0 +1,644 @@
+package argo.amr;
+
+import java.io.IOException;
+import java.security.KeyManagementException;
+import java.security.KeyStoreException;
+import java.security.NoSuchAlgorithmException;
+import java.security.SecureRandom;
+import java.util.ArrayList;
+import java.util.EnumMap;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import javax.net.ssl.SSLContext;
+
+import org.apache.http.client.ClientProtocolException;
+import org.apache.http.client.fluent.Executor;
+import org.apache.http.client.fluent.Request;
+import org.apache.http.conn.ssl.NoopHostnameVerifier;
+import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
+import org.apache.http.conn.ssl.TrustSelfSignedStrategy;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClients;
+import org.apache.http.ssl.SSLContextBuilder;
+
+import com.google.gson.JsonArray;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParser;
+
+import argo.avro.Downtime;
+import argo.avro.GroupEndpoint;
+import argo.avro.GroupGroup;
+import argo.avro.MetricProfile;
+import argo.avro.Weight;
+
+
+/**
+ * APIResourceManager class fetches remote argo-web-api resources such as
+ * report configuration, profiles, topology, weights in JSON format
+ */
+
+
+public class ApiResourceManager {
+
+ private EnumMap data = new EnumMap<>(ApiResource.class);
+
+ private String endpoint;
+ private String token;
+ private String reportID;
+ private String date;
+ private String proxy;
+
+ private String metricID;
+ private String aggregationID;
+ private String opsID;
+ private String threshID;
+ private String reportName;
+ private String weightsID;
+ private boolean verify;
+
+
+ public ApiResourceManager(String endpoint, String token) {
+ this.endpoint = endpoint;
+ this.token = token;
+ this.metricID = "";
+ this.aggregationID = "";
+ this.opsID = "";
+ this.threshID = "";
+ this.reportName = "";
+ this.reportID = "";
+ this.date = "";
+ this.proxy = "";
+ this.weightsID = "";
+ this.verify = true;
+
+ }
+
+ public boolean getVerify() {
+ return verify;
+ }
+
+ public void setVerify(boolean verify) {
+ this.verify = verify;
+ }
+
+ public String getEndpoint() {
+ return endpoint;
+ }
+
+ public void setEndpoint(String endpoint) {
+ this.endpoint = endpoint;
+ }
+
+ public String getToken() {
+ return token;
+ }
+
+ public void setToken(String token) {
+ this.token = token;
+ }
+
+ public String getReportID() {
+ return reportID;
+ }
+
+ public void setReportID(String reportID) {
+ this.reportID = reportID;
+ }
+
+ public String getReportName() {
+ return this.reportName;
+ }
+
+ public String getOpsID() {
+ return this.opsID;
+ }
+
+
+ public String getAggregationID() {
+ return this.aggregationID;
+ }
+
+ public String getMetricID() {
+ return this.metricID;
+ }
+
+ public String getThresholdsID() {
+ return this.threshID;
+ }
+
+
+ public String getDate() {
+ return date;
+ }
+
+ public void setDate(String date) {
+ this.date = date;
+ }
+
+ public String getProxy() {
+ return proxy;
+ }
+
+ public void setProxy(String proxy) {
+ this.proxy = proxy;
+ }
+
+ public String getWeightsID() {
+ return weightsID;
+ }
+
+ public void setWeightsID(String weightsID) {
+ this.weightsID = weightsID;
+ }
+
+ /**
+ * Create an SSL Connection Socket Factory with a strategy to trust self signed
+ * certificates
+ */
+ private SSLConnectionSocketFactory selfSignedSSLF()
+ throws NoSuchAlgorithmException, KeyStoreException, KeyManagementException {
+ SSLContextBuilder sslBuild = new SSLContextBuilder();
+ sslBuild.loadTrustMaterial(null, new TrustSelfSignedStrategy());
+ return new SSLConnectionSocketFactory(sslBuild.build(), NoopHostnameVerifier.INSTANCE);
+ }
+
+ /**
+ * Contacts remote argo-web-api based on the full url of a resource its content (expected in json format)
+ *
+ * @param fullURL String containing the full url representation of the argo-web-api resource
+ * @return A string representation of the resource json content
+ * @throws ClientProtocolException
+ * @throws IOException
+ * @throws KeyStoreException
+ * @throws NoSuchAlgorithmException
+ * @throws KeyManagementException
+ */
+ private String getResource(String fullURL) {
+
+
+ Request r = Request.Get(fullURL).addHeader("Accept", "application/json").addHeader("Content-type",
+ "application/json").addHeader("x-api-key",this.token);
+ if (!this.proxy.isEmpty()) {
+ r = r.viaProxy(proxy);
+ }
+
+ r = r.connectTimeout(1000).socketTimeout(1000);
+
+ String content = "{}";
+
+ try {
+ if (this.verify == false) {
+ CloseableHttpClient httpClient = HttpClients.custom().setSSLSocketFactory(selfSignedSSLF()).build();
+ Executor executor = Executor.newInstance(httpClient);
+ content = executor.execute(r).returnContent().asString();
+ } else {
+
+ content = r.execute().returnContent().asString();
+ }
+ } catch (KeyManagementException | NoSuchAlgorithmException | KeyStoreException | IOException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+
+ return content;
+ }
+
+ /**
+ * Retrieves the remote report configuration based on reportID main class attribute and
+ * stores the content in the enum map
+ *
+ * @throws ClientProtocolException
+ * @throws IOException
+ * @throws KeyStoreException
+ * @throws NoSuchAlgorithmException
+ * @throws KeyManagementException
+ */
+ public void getRemoteConfig() {
+ String path = "https://%s/api/v2/reports/%s";
+ String fullURL = String.format(path, this.endpoint, this.reportID);
+ String content = getResource(fullURL);
+ this.data.put(ApiResource.CONFIG, getJsonData(content, false));
+ }
+
+
+ /**
+ * Retrieves the metric profile content based on the metric_id attribute and stores it to the enum map
+ *
+ * @throws ClientProtocolException
+ * @throws IOException
+ * @throws KeyStoreException
+ * @throws NoSuchAlgorithmException
+ * @throws KeyManagementException
+ */
+ public void getRemoteMetric() {
+
+ String path = "https://%s/api/v2/metric_profiles/%s?date=%s";
+ String fullURL = String.format(path, this.endpoint, this.metricID, this.date);
+ String content = getResource(fullURL);
+ this.data.put(ApiResource.METRIC, getJsonData(content, false));
+ }
+
+ /**
+ * Retrieves the aggregation profile content based on the aggreagation_id attribute and stores it to the enum map
+ *
+ * @throws ClientProtocolException
+ * @throws IOException
+ * @throws KeyStoreException
+ * @throws NoSuchAlgorithmException
+ * @throws KeyManagementException
+ */
+ public void getRemoteAggregation() {
+
+ String path = "https://%s/api/v2/aggregation_profiles/%s?date=%s";
+ String fullURL = String.format(path, this.endpoint, this.aggregationID, this.date);
+ String content = getResource(fullURL);
+ this.data.put(ApiResource.AGGREGATION, getJsonData(content, false));
+ }
+
+ /**
+ * Retrieves the ops profile content based on the ops_id attribute and stores it to the enum map
+ *
+ * @throws ClientProtocolException
+ * @throws IOException
+ * @throws KeyStoreException
+ * @throws NoSuchAlgorithmException
+ * @throws KeyManagementException
+ */
+ public void getRemoteOps() {
+
+ String path = "https://%s/api/v2/operations_profiles/%s?date=%s";
+ String fullURL = String.format(path, this.endpoint, this.opsID, this.date);
+
+ String content = getResource(fullURL);
+ this.data.put(ApiResource.OPS, getJsonData(content, false));
+ }
+
+ /**
+ * Retrieves the thresholds profile content based on the thresh_id attribute and stores it to the enum map
+ *
+ * @throws ClientProtocolException
+ * @throws IOException
+ * @throws KeyStoreException
+ * @throws NoSuchAlgorithmException
+ * @throws KeyManagementException
+ */
+ public void getRemoteThresholds() {
+
+ String path = "https://%s/api/v2/thresholds_profiles/%s?date=%s";
+ String fullURL = String.format(path, this.endpoint, this.threshID, this.date);
+ String content = getResource(fullURL);
+ this.data.put(ApiResource.THRESHOLDS, getJsonData(content, false));
+ }
+
+ /**
+ * Retrieves the topology endpoint content and stores it to the enum map
+ *
+ * @throws ClientProtocolException
+ * @throws IOException
+ * @throws KeyStoreException
+ * @throws NoSuchAlgorithmException
+ * @throws KeyManagementException
+ */
+ public void getRemoteTopoEndpoints() {
+ String path = "https://%s/api/v2/topology/endpoints/by_report/%s?date=%s";
+ String fullURL = String.format(path, this.endpoint, this.reportName, this.date);
+ String content = getResource(fullURL);
+ this.data.put(ApiResource.TOPOENDPOINTS, getJsonData(content, true));
+ }
+
+ /**
+ * Retrieves the topology groups content and stores it to the enum map
+ *
+ * @throws ClientProtocolException
+ * @throws IOException
+ * @throws KeyStoreException
+ * @throws NoSuchAlgorithmException
+ * @throws KeyManagementException
+ */
+ public void getRemoteTopoGroups() {
+ String path = "https://%s/api/v2/topology/groups/by_report/%s?date=%s";
+ String fullURL = String.format(path, this.endpoint, this.reportName, this.date);
+ String content = getResource(fullURL);
+ this.data.put(ApiResource.TOPOGROUPS, getJsonData(content, true));
+ }
+
+ /**
+ * Retrieves the weights content and stores it to the enum map
+ *
+ * @throws ClientProtocolException
+ * @throws IOException
+ * @throws KeyStoreException
+ * @throws NoSuchAlgorithmException
+ * @throws KeyManagementException
+ */
+ public void getRemoteWeights() {
+ String path = "https://%s/api/v2/weights/%s?date=%s";
+ String fullURL = String.format(path, this.endpoint, this.weightsID, this.date);
+ String content = getResource(fullURL);
+ this.data.put(ApiResource.WEIGHTS, getJsonData(content, false));
+ }
+
+ /**
+ * Retrieves the downtimes content and stores it to the enum map
+ *
+ * @throws ClientProtocolException
+ * @throws IOException
+ * @throws KeyStoreException
+ * @throws NoSuchAlgorithmException
+ * @throws KeyManagementException
+ */
+ public void getRemoteDowntimes() {
+ String path = "https://%s/api/v2/downtimes?date=%s";
+ String fullURL = String.format(path, this.endpoint, this.date);
+ String content = getResource(fullURL);
+ this.data.put(ApiResource.DOWNTIMES, getJsonData(content, false));
+ }
+
+ public void getRemoteRecomputations() {
+ String path = "https://%s/api/v2/recomputations?date=%s";
+ String fullURL = String.format(path, this.endpoint, this.date);
+ String content = getResource(fullURL);
+ this.data.put(ApiResource.RECOMPUTATIONS, getJsonData(content, true));
+ }
+
+ /**
+ * Returns local resource (after has been retrieved) content based on resource type
+ *
+ * @param res
+ * @return The extracted items JSON value as string
+ */
+ public String getResourceJSON(ApiResource res) {
+ return this.data.get(res);
+ }
+
+ /**
+ * Exectues all steps to retrieve the complete amount of the available profile,
+ * topology, weights and downtime information from argo-web-api
+ *
+ * @throws ClientProtocolException
+ * @throws IOException
+ * @throws KeyStoreException
+ * @throws NoSuchAlgorithmException
+ * @throws KeyManagementException
+ */
+ public void getRemoteAll() {
+ // Start with report and configuration
+ this.getRemoteConfig();
+ // parse remote report config to be able to get the other profiles
+ this.parseReport();
+ // Go on to the profiles
+ this.getRemoteMetric();
+ this.getRemoteOps();
+ this.getRemoteAggregation();
+ if (!this.threshID.equals("")) this.getRemoteThresholds();
+ // Go to topology
+ this.getRemoteTopoEndpoints();
+ this.getRemoteTopoGroups();
+ // get weights
+ if (!this.weightsID.equals("")) this.getRemoteWeights();
+ // get downtimes
+ this.getRemoteDowntimes();
+ // get recomptations
+ this.getRemoteRecomputations();
+
+ }
+
+ /**
+ * Parses the report content to extract the report's name and the various profile IDs
+ */
+ public void parseReport() {
+ // check if report configuration has been retrieved
+ if (!this.data.containsKey(ApiResource.CONFIG))
+ return;
+
+ String content = this.data.get(ApiResource.CONFIG);
+ JsonParser jsonParser = new JsonParser();
+ JsonElement jElement = jsonParser.parse(content);
+ JsonObject jRoot = jElement.getAsJsonObject();
+ JsonArray jProfiles = jRoot.get("profiles").getAsJsonArray();
+
+ JsonObject jInfo = jRoot.get("info").getAsJsonObject();
+ this.reportName = jInfo.get("name").getAsString();
+
+ // for each profile iterate and store it's id in profile manager for later
+ // reference
+ for (int i = 0; i < jProfiles.size(); i++) {
+ JsonObject jProf = jProfiles.get(i).getAsJsonObject();
+ String profType = jProf.get("type").getAsString();
+ String profID = jProf.get("id").getAsString();
+ if (profType.equalsIgnoreCase("metric")) {
+ this.metricID = profID;
+ } else if (profType.equalsIgnoreCase("aggregation")) {
+ this.aggregationID = profID;
+ } else if (profType.equalsIgnoreCase("operations")) {
+ this.opsID = profID;
+ } else if (profType.equalsIgnoreCase("thresholds")) {
+ this.threshID = profID;
+ }
+
+ }
+
+ }
+
+ /**
+ * Parses the Downtime content retrieved from argo-web-api and provides a list of Downtime avro objects
+ * to be used in the next steps of the pipeline
+ */
+ public Downtime[] getListDowntimes() {
+ List results = new ArrayList();
+ if (!this.data.containsKey(ApiResource.DOWNTIMES)) {
+ Downtime[] rArr = new Downtime[results.size()];
+ rArr = results.toArray(rArr);
+ }
+
+
+ String content = this.data.get(ApiResource.DOWNTIMES);
+ JsonParser jsonParser = new JsonParser();
+ JsonElement jElement = jsonParser.parse(content);
+ JsonObject jRoot = jElement.getAsJsonObject();
+ JsonArray jElements = jRoot.get("endpoints").getAsJsonArray();
+ for (int i = 0; i < jElements.size(); i++) {
+ JsonObject jItem= jElements.get(i).getAsJsonObject();
+ String hostname = jItem.get("hostname").getAsString();
+ String service = jItem.get("service").getAsString();
+ String startTime = jItem.get("start_time").getAsString();
+ String endTime = jItem.get("end_time").getAsString();
+
+ Downtime d = new Downtime(hostname,service,startTime,endTime);
+ results.add(d);
+ }
+
+ Downtime[] rArr = new Downtime[results.size()];
+ rArr = results.toArray(rArr);
+ return rArr;
+ }
+
+ /**
+ * Parses the Topology endpoint content retrieved from argo-web-api and provides a list of GroupEndpoint avro objects
+ * to be used in the next steps of the pipeline
+ */
+ public GroupEndpoint[] getListGroupEndpoints() {
+ List results = new ArrayList();
+ if (!this.data.containsKey(ApiResource.TOPOENDPOINTS)) {
+ GroupEndpoint[] rArr = new GroupEndpoint[results.size()];
+ rArr = results.toArray(rArr);
+ return rArr;
+ }
+
+
+ String content = this.data.get(ApiResource.TOPOENDPOINTS);
+ JsonParser jsonParser = new JsonParser();
+ JsonElement jElement = jsonParser.parse(content);
+ JsonArray jRoot = jElement.getAsJsonArray();
+ for (int i = 0; i < jRoot.size(); i++) {
+ JsonObject jItem= jRoot.get(i).getAsJsonObject();
+ String group = jItem.get("group").getAsString();
+ String gType = jItem.get("type").getAsString();
+ String service = jItem.get("service").getAsString();
+ String hostname = jItem.get("hostname").getAsString();
+ JsonObject jTags = jItem.get("tags").getAsJsonObject();
+ Map tags = new HashMap();
+ for ( Entry kv : jTags.entrySet()) {
+ tags.put(kv.getKey(), kv.getValue().getAsString());
+ }
+ GroupEndpoint ge = new GroupEndpoint(gType,group,service,hostname,tags);
+ results.add(ge);
+ }
+
+ GroupEndpoint[] rArr = new GroupEndpoint[results.size()];
+ rArr = results.toArray(rArr);
+ return rArr;
+ }
+
+ /**
+ * Parses the Topology Groups content retrieved from argo-web-api and provides a list of GroupGroup avro objects
+ * to be used in the next steps of the pipeline
+ */
+ public GroupGroup[] getListGroupGroups() {
+ List results = new ArrayList();
+ if (!this.data.containsKey(ApiResource.TOPOGROUPS)){
+ GroupGroup[] rArr = new GroupGroup[results.size()];
+ rArr = results.toArray(rArr);
+ return rArr;
+ }
+
+ String content = this.data.get(ApiResource.TOPOGROUPS);
+ JsonParser jsonParser = new JsonParser();
+ JsonElement jElement = jsonParser.parse(content);
+ JsonArray jRoot = jElement.getAsJsonArray();
+ for (int i = 0; i < jRoot.size(); i++) {
+ JsonObject jItem= jRoot.get(i).getAsJsonObject();
+ String group = jItem.get("group").getAsString();
+ String gType = jItem.get("type").getAsString();
+ String subgroup = jItem.get("subgroup").getAsString();
+ JsonObject jTags = jItem.get("tags").getAsJsonObject();
+ Map tags = new HashMap();
+ for ( Entry kv : jTags.entrySet()) {
+ tags.put(kv.getKey(), kv.getValue().getAsString());
+ }
+ GroupGroup gg = new GroupGroup(gType,group,subgroup,tags);
+ results.add(gg);
+ }
+
+ GroupGroup[] rArr = new GroupGroup[results.size()];
+ rArr = results.toArray(rArr);
+ return rArr;
+ }
+
+ /**
+ * Parses the Weights content retrieved from argo-web-api and provides a list of Weights avro objects
+ * to be used in the next steps of the pipeline
+ */
+ public Weight[] getListWeights() {
+ List results = new ArrayList();
+ if (!this.data.containsKey(ApiResource.WEIGHTS)) {
+ Weight[] rArr = new Weight[results.size()];
+ rArr = results.toArray(rArr);
+ return rArr;
+ }
+
+
+ String content = this.data.get(ApiResource.WEIGHTS);
+ JsonParser jsonParser = new JsonParser();
+ JsonElement jElement = jsonParser.parse(content);
+ JsonObject jRoot = jElement.getAsJsonObject();
+ String wType = jRoot.get("weight_type").getAsString();
+ JsonArray jElements = jRoot.get("groups").getAsJsonArray();
+ for (int i = 0; i < jElements.size(); i++) {
+ JsonObject jItem= jElements.get(i).getAsJsonObject();
+ String group = jItem.get("name").getAsString();
+ String weight = jItem.get("value").getAsString();
+
+ Weight w = new Weight(wType,group,weight);
+ results.add(w);
+ }
+
+ Weight[] rArr = new Weight[results.size()];
+ rArr = results.toArray(rArr);
+ return rArr;
+ }
+
+ /**
+ * Parses the Metric profile content retrieved from argo-web-api and provides a list of MetricProfile avro objects
+ * to be used in the next steps of the pipeline
+ */
+ public MetricProfile[] getListMetrics() {
+ List results = new ArrayList();
+ if (!this.data.containsKey(ApiResource.METRIC)) {
+ MetricProfile[] rArr = new MetricProfile[results.size()];
+ rArr = results.toArray(rArr);
+ return rArr;
+ }
+
+
+ String content = this.data.get(ApiResource.METRIC);
+ JsonParser jsonParser = new JsonParser();
+ JsonElement jElement = jsonParser.parse(content);
+ JsonObject jRoot = jElement.getAsJsonObject();
+ String profileName = jRoot.get("name").getAsString();
+ JsonArray jElements = jRoot.get("services").getAsJsonArray();
+ for (int i = 0; i < jElements.size(); i++) {
+ JsonObject jItem= jElements.get(i).getAsJsonObject();
+ String service = jItem.get("service").getAsString();
+ JsonArray jMetrics = jItem.get("metrics").getAsJsonArray();
+ for (int j=0; j < jMetrics.size(); j++) {
+ String metric = jMetrics.get(j).getAsString();
+
+ Map tags = new HashMap();
+ MetricProfile mp = new MetricProfile(profileName,service,metric,tags);
+ results.add(mp);
+ }
+
+ }
+
+ MetricProfile[] rArr = new MetricProfile[results.size()];
+ rArr = results.toArray(rArr);
+ return rArr;
+ }
+
+ /**
+ * Extract first JSON item from data JSON array in api response
+ *
+ * @param content JSON content of the full repsonse (status + data)
+ * @return First available item in data array as JSON string representation
+ *
+ */
+ private String getJsonData(String content, boolean asArray) {
+ JsonParser jsonParser = new JsonParser();
+ // Grab the first - and only line of json from ops data
+ JsonElement jElement = jsonParser.parse(content);
+ JsonObject jRoot = jElement.getAsJsonObject();
+ // Get the data array and the first item
+ if (asArray) {
+ return jRoot.get("data").toString();
+ }
+ JsonArray jData = jRoot.get("data").getAsJsonArray();
+ JsonElement jItem = jData.get(0);
+ return jItem.toString();
+ }
+
+}
diff --git a/flink_jobs/batch_ar/src/main/java/argo/batch/ArgoArBatch.java b/flink_jobs/batch_ar/src/main/java/argo/batch/ArgoArBatch.java
index 053c06d7..e6bf2b85 100644
--- a/flink_jobs/batch_ar/src/main/java/argo/batch/ArgoArBatch.java
+++ b/flink_jobs/batch_ar/src/main/java/argo/batch/ArgoArBatch.java
@@ -2,6 +2,8 @@
import org.slf4j.LoggerFactory;
+import argo.amr.ApiResource;
+import argo.amr.ApiResourceManager;
import argo.avro.Downtime;
import argo.avro.GroupEndpoint;
import argo.avro.GroupGroup;
@@ -64,21 +66,48 @@ public static void main(String[] args) throws Exception {
env.getConfig().setGlobalJobParameters(params);
env.setParallelism(1);
// sync data for input
+
+
+ String apiEndpoint = params.getRequired("api.endpoint");
+ String apiToken = params.getRequired("api.token");
+ String reportID = params.getRequired("report.id");
+
+ ApiResourceManager amr = new ApiResourceManager(apiEndpoint,apiToken);
+
+ // fetch
+
+ // set params
+ if (params.has("api.proxy")) {
+ amr.setProxy(params.get("api.proxy"));
+ }
+
+ amr.setReportID(reportID);
+ amr.getRemoteAll();
+
+
- Path mps = new Path(params.getRequired("mps"));
- Path egp = new Path(params.getRequired("egp"));
- Path ggp = new Path(params.getRequired("ggp"));
- Path down = new Path(params.getRequired("downtimes"));
- Path weight = new Path(params.getRequired("weights"));
+// Path mps = new Path(params.getRequired("mps"));
+// Path egp = new Path(params.getRequired("egp"));
+// Path ggp = new Path(params.getRequired("ggp"));
+// Path down = new Path(params.getRequired("downtimes"));
+// Path weight = new Path(params.getRequired("weights"));
- DataSource confDS = env.readTextFile(params.getRequired("conf"));
- DataSource opsDS = env.readTextFile(params.getRequired("ops"));
- DataSource aprDS = env.readTextFile(params.getRequired("apr"));
- DataSource recDS = env.readTextFile(params.getRequired("rec"));
+ //DataSource confDS = env.readTextFile(params.getRequired("conf"));
+// DataSource opsDS = env.readTextFile(params.getRequired("ops"));
+// DataSource aprDS = env.readTextFile(params.getRequired("apr"));
+// DataSource recDS = env.readTextFile(params.getRequired("rec"));
+
+
+ DataSourceconfDS = env.fromElements(amr.getResourceJSON(ApiResource.CONFIG));
+ DataSourceopsDS = env.fromElements(amr.getResourceJSON(ApiResource.OPS));
+ DataSourceaprDS = env.fromElements(amr.getResourceJSON(ApiResource.AGGREGATION));
+ DataSourcerecDS = env.fromElements(amr.getResourceJSON(ApiResource.RECOMPUTATIONS));
+
// begin with empty threshold datasource
DataSource thrDS = env.fromElements("");
+
// if threshold filepath has been defined in cli parameters
if (params.has("thr")){
// read file and update threshold datasource
@@ -86,29 +115,27 @@ public static void main(String[] args) throws Exception {
}
+ DataSet downDS = env.fromElements(new Downtime());
+ DataSet weightDS = env.fromElements(new Weight());
+ DataSet ggpDS = env.fromElements(new GroupGroup());
ConfigManager confMgr = new ConfigManager();
confMgr.loadJsonString(confDS.collect());
- // sync data input: metric profile in avro format
- AvroInputFormat mpsAvro = new AvroInputFormat(mps, MetricProfile.class);
- DataSet mpsDS = env.createInput(mpsAvro);
-
- // sync data input: endpoint group topology data in avro format
- AvroInputFormat egpAvro = new AvroInputFormat(egp, GroupEndpoint.class);
- DataSet egpDS = env.createInput(egpAvro);
-
- // sync data input: group of group topology data in avro format
- AvroInputFormat ggpAvro = new AvroInputFormat(ggp, GroupGroup.class);
- DataSet ggpDS = env.createInput(ggpAvro);
-
- // sync data input: downtime data in avro format
- AvroInputFormat downAvro = new AvroInputFormat(down, Downtime.class);
- DataSet downDS = env.createInput(downAvro);
+ // Get the sync datasets directly from the web-api data
+ DataSet mpsDS = env.fromElements(amr.getListMetrics());
+ DataSet egpDS = env.fromElements(amr.getListGroupEndpoints());
+
+
+ Downtime[] listDowntimes = amr.getListDowntimes();
+ Weight[] listWeights = amr.getListWeights();
+ GroupGroup[] listGroups = amr.getListGroupGroups();
+
+ if (listDowntimes.length > 0) downDS = env.fromElements(amr.getListDowntimes());
+ if (listWeights.length > 0) weightDS = env.fromElements(amr.getListWeights());
+ if (listGroups.length > 0) ggpDS = env.fromElements(amr.getListGroupGroups());
+
- // sync data input: weight data in avro format
- AvroInputFormat weightAvro = new AvroInputFormat(weight, Weight.class);
- DataSet weightDS = env.createInput(weightAvro);
// todays metric data
Path in = new Path(params.getRequired("mdata"));
diff --git a/flink_jobs/batch_ar/src/main/java/argo/batch/CalcEndpointAR.java b/flink_jobs/batch_ar/src/main/java/argo/batch/CalcEndpointAR.java
index 26622c0b..56b70527 100644
--- a/flink_jobs/batch_ar/src/main/java/argo/batch/CalcEndpointAR.java
+++ b/flink_jobs/batch_ar/src/main/java/argo/batch/CalcEndpointAR.java
@@ -135,8 +135,9 @@ public void flatMap(MonTimeline mtl, Collector out) throws Exception
dAR.calculateAR(mtl.getTimeline(),this.opsMgr);
int runDateInt = Integer.parseInt(this.runDate.replace("-", ""));
-
- EndpointAR result = new EndpointAR(runDateInt,this.report,mtl.getHostname(),mtl.getService(),mtl.getGroup(),dAR.availability,dAR.reliability,dAR.up_f,dAR.unknown_f,dAR.down_f);
+ String groupType = this.confMgr.egroup;
+ String info = this.egpMgr.getInfo(mtl.getGroup(),groupType, mtl.getHostname(), mtl.getService());
+ EndpointAR result = new EndpointAR(runDateInt,this.report,mtl.getHostname(),mtl.getService(),mtl.getGroup(),dAR.availability,dAR.reliability,dAR.up_f,dAR.unknown_f,dAR.down_f,info);
out.collect(result);
diff --git a/flink_jobs/batch_ar/src/main/java/argo/batch/EndpointAR.java b/flink_jobs/batch_ar/src/main/java/argo/batch/EndpointAR.java
index 928c1e31..c173ddf4 100644
--- a/flink_jobs/batch_ar/src/main/java/argo/batch/EndpointAR.java
+++ b/flink_jobs/batch_ar/src/main/java/argo/batch/EndpointAR.java
@@ -12,8 +12,9 @@ public class EndpointAR {
private double up;
private double unknown;
private double down;
+ private String info;
- public EndpointAR(int _dateInt, String _report, String _name, String _service, String _group, double _a, double _r, double _up, double _unknown, double _down){
+ public EndpointAR(int _dateInt, String _report, String _name, String _service, String _group, double _a, double _r, double _up, double _unknown, double _down, String _info){
this.dateInt = _dateInt;
this.report=_report;
this.name = _name;
@@ -24,6 +25,8 @@ public EndpointAR(int _dateInt, String _report, String _name, String _service, S
this.up = _up;
this.unknown = _unknown;
this.down = _down;
+ this.info = _info;
+
}
@@ -92,6 +95,14 @@ public void setDown(double down) {
this.down = down;
}
+ public void setInfo(String info) {
+ this.info = info;
+ }
+
+ public String getInfo() {
+ return this.info;
+ }
+
public String toString() {
return "(" + this.dateInt+ "," + this.report + "," + this.name + "," + this.service + "," + this.group + "," + this.a + "," + this.r + "," + this.up + "," + this.unknown + "," + this.down + ")";
}
diff --git a/flink_jobs/batch_ar/src/main/java/argo/batch/MongoEndpointArOutput.java b/flink_jobs/batch_ar/src/main/java/argo/batch/MongoEndpointArOutput.java
index 8c67bb1f..961f3f03 100644
--- a/flink_jobs/batch_ar/src/main/java/argo/batch/MongoEndpointArOutput.java
+++ b/flink_jobs/batch_ar/src/main/java/argo/batch/MongoEndpointArOutput.java
@@ -86,12 +86,29 @@ public void open(int taskNumber, int numTasks) throws IOException {
*/
@Override
public void writeRecord(EndpointAR record) throws IOException {
-
+
+
+ String info = record.getInfo();
+
// create document from record
Document doc = new Document("report", record.getReport()).append("date", record.getDateInt())
.append("name", record.getName()).append("service", record.getService()).append("supergroup", record.getGroup())
.append("availability", record.getA()).append("reliability", record.getR()).append("up", record.getUp())
.append("unknown", record.getUnknown()).append("down", record.getDown());
+
+ if (!info.equalsIgnoreCase("")) {
+ Document infoDoc = new Document();
+ String[] kvs = info.split(",");
+ for (String kv : kvs) {
+ String[] kvtok = kv.split(":",2);
+ if (kvtok.length == 2){
+ infoDoc.append(kvtok[0], kvtok[1]);
+ }
+ }
+
+ doc.append("info", infoDoc);
+
+ }
if (this.method == MongoMethod.UPSERT) {
Bson f = Filters.and(Filters.eq("report", record.getReport()), Filters.eq("date", record.getDateInt()),
diff --git a/flink_jobs/batch_ar/src/main/java/sync/DowntimeManager.java b/flink_jobs/batch_ar/src/main/java/sync/DowntimeManager.java
index 2034458d..abe70698 100644
--- a/flink_jobs/batch_ar/src/main/java/sync/DowntimeManager.java
+++ b/flink_jobs/batch_ar/src/main/java/sync/DowntimeManager.java
@@ -146,8 +146,7 @@ public void loadAvro(File avroFile) throws IOException {
String service = avroRow.get("service").toString();
String startTime = avroRow.get("start_time").toString();
String endTime = avroRow.get("end_time").toString();
-
- // Insert data to list
+ // insert data to list
this.insert(hostname, service, startTime, endTime);
} // end of avro rows
@@ -179,7 +178,7 @@ public void loadFromList( List dnt) {
String startTime = item.getStartTime();
String endTime = item.getEndTime();
// Insert data to list
- this.insert(hostname,service,startTime,endTime);
+ if (hostname != null) this.insert(hostname,service,startTime,endTime);
}
diff --git a/flink_jobs/batch_ar/src/main/java/sync/EndpointGroupManager.java b/flink_jobs/batch_ar/src/main/java/sync/EndpointGroupManager.java
index 87bf6b0d..789bfc33 100644
--- a/flink_jobs/batch_ar/src/main/java/sync/EndpointGroupManager.java
+++ b/flink_jobs/batch_ar/src/main/java/sync/EndpointGroupManager.java
@@ -5,7 +5,7 @@
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
-
+import java.util.Map;
import java.util.TreeMap;
import java.util.Map.Entry;
@@ -49,6 +49,29 @@ public EndpointItem(String type, String group, String service, String hostname,
}
}
+
+ public String getInfo(String group, String type, String hostname, String service) {
+ String info = "";
+ boolean first = true;
+ HashMap tags = this.getGroupTags(group, type, hostname, service);
+ if (tags == null) return info;
+ for (String tName : tags.keySet()) {
+ if (tName.startsWith("info.")) {
+ String infoName = tName.replaceFirst("info.", "");
+
+ String value = tags.get(tName);
+ if (!value.equalsIgnoreCase("")) {
+ if (!first) {
+ info = info + ",";
+ } else {
+ first = false;
+ }
+ info = info + infoName+":"+tags.get(tName);
+ }
+ }
+ }
+ return info;
+ }
public EndpointGroupManager() {
this.list = new ArrayList();
@@ -86,10 +109,10 @@ public ArrayList getGroup(String type, String hostname, String service)
return results;
}
- public HashMap getGroupTags(String type, String hostname, String service) {
+ public HashMap getGroupTags(String group, String type, String hostname, String service) {
for (EndpointItem item : fList) {
- if (item.type.equals(type) && item.hostname.equals(hostname) && item.service.equals(service)) {
+ if (item.group.equals(group) && item.type.equals(type) && item.hostname.equals(hostname) && item.service.equals(service)) {
return item.tags;
}
}
diff --git a/flink_jobs/batch_ar/src/main/java/sync/GroupGroupManager.java b/flink_jobs/batch_ar/src/main/java/sync/GroupGroupManager.java
index 74bb2378..37985c80 100644
--- a/flink_jobs/batch_ar/src/main/java/sync/GroupGroupManager.java
+++ b/flink_jobs/batch_ar/src/main/java/sync/GroupGroupManager.java
@@ -217,7 +217,7 @@ public void loadFromList( List ggp) {
}
// Insert data to list
- this.insert(type, group, subgroup, tagMap);
+ if (type != null) this.insert(type, group, subgroup, tagMap);
}
this.unfilter();
diff --git a/flink_jobs/batch_ar/src/main/java/sync/WeightManager.java b/flink_jobs/batch_ar/src/main/java/sync/WeightManager.java
index c8cb72c7..158565cc 100644
--- a/flink_jobs/batch_ar/src/main/java/sync/WeightManager.java
+++ b/flink_jobs/batch_ar/src/main/java/sync/WeightManager.java
@@ -140,7 +140,7 @@ public int loadAvro(File avroFile) throws IOException {
String group = avroRow.get("site").toString();
String weight = avroRow.get("weight").toString();
- // Insert data to list
+ // Insert data to list
this.insert(type, group, weight);
} // end of avro rows
@@ -170,8 +170,8 @@ public void loadFromList( List wg) {
String type = item.getType();
String group = item.getSite();
String weight = item.getWeight();
- // Insert data to list
- this.insert(type, group, weight);
+ // Insert data to list -- ignore empty placeholder items
+ if (type != null) this.insert(type, group, weight);
}
diff --git a/flink_jobs/batch_ar/src/main/resources/amr/agg_profile.json b/flink_jobs/batch_ar/src/main/resources/amr/agg_profile.json
new file mode 100644
index 00000000..66f9474d
--- /dev/null
+++ b/flink_jobs/batch_ar/src/main/resources/amr/agg_profile.json
@@ -0,0 +1,33 @@
+{
+ "status": {
+ "message": "Success",
+ "code": "200"
+ },
+ "data": [
+ {
+ "id": "2744247f-40f8-4dd6-b22c-76a3b38334d8",
+ "date": "2020-06-24",
+ "name": "test-agg2",
+ "namespace": "",
+ "endpoint_group": "servicegroups",
+ "metric_operation": "AND",
+ "profile_operation": "AND",
+ "metric_profile": {
+ "name": "test-mon",
+ "id": "92fa5d74-015c-4122-b8b9-7b344f3154d4"
+ },
+ "groups": [
+ {
+ "name": "webportal",
+ "operation": "AND",
+ "services": [
+ {
+ "name": "WebPortal",
+ "operation": "OR"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+}
diff --git a/flink_jobs/batch_ar/src/main/resources/amr/data_AGGREGATION.json b/flink_jobs/batch_ar/src/main/resources/amr/data_AGGREGATION.json
new file mode 100644
index 00000000..f0f40f2c
--- /dev/null
+++ b/flink_jobs/batch_ar/src/main/resources/amr/data_AGGREGATION.json
@@ -0,0 +1 @@
+{"id":"2744247f-40f8-4dd6-b22c-76a3b38334d8","date":"2020-06-24","name":"test-agg2","namespace":"","endpoint_group":"servicegroups","metric_operation":"AND","profile_operation":"AND","metric_profile":{"name":"test-mon","id":"92fa5d74-015c-4122-b8b9-7b344f3154d4"},"groups":[{"name":"webportal","operation":"AND","services":[{"name":"WebPortal","operation":"OR"}]}]}
diff --git a/flink_jobs/batch_ar/src/main/resources/amr/data_CONFIG.json b/flink_jobs/batch_ar/src/main/resources/amr/data_CONFIG.json
new file mode 100644
index 00000000..8220787f
--- /dev/null
+++ b/flink_jobs/batch_ar/src/main/resources/amr/data_CONFIG.json
@@ -0,0 +1 @@
+{"id":"f29eeb59-ab38-4aa0-b372-5d3c0709dfb2","tenant":"demo","disabled":false,"info":{"name":"Critical","description":"test report","created":"2020-09-24 12:05:04","updated":"2020-10-08 09:32:46"},"thresholds":{"availability":80,"reliability":85,"uptime":0.8,"unknown":0.1,"downtime":0.1},"topology_schema":{"group":{"type":"PROJECT","group":{"type":"SERVICEGROUPS"}}},"profiles":[{"id":"92fa5d74-015c-4122-b8b9-7b344f3154d4","name":"test-mon","type":"metric"},{"id":"2744247f-40f8-4dd6-b22c-76a3b38334d8","name":"test-agg2","type":"aggregation"},{"id":"ea62ff1e-c6e1-438b-83c7-9262b3a4f179","name":"demo_ops","type":"operations"},{"id":"3345c3c1-322a-47f1-982c-1d9df1fc065e","name":"endpoint_example","type":"thresholds"}],"filter_tags":[]}
diff --git a/flink_jobs/batch_ar/src/main/resources/amr/data_DOWNTIMES.json b/flink_jobs/batch_ar/src/main/resources/amr/data_DOWNTIMES.json
new file mode 100644
index 00000000..b7d181aa
--- /dev/null
+++ b/flink_jobs/batch_ar/src/main/resources/amr/data_DOWNTIMES.json
@@ -0,0 +1 @@
+{"date":"2020-11-10","endpoints":[{"hostname":"hostA.foo","service":"WebPortal","start_time":"2020-11-10T00:00:00Z","end_time":"2020-11-10T23:59:00Z"},{"hostname":"hostB.foo","service":"WebPortal","start_time":"2020-11-10T00:00:00Z","end_time":"2020-11-10T23:59:00Z"},{"hostname":"hostB.foo","service":"WebPortald","start_time":"2020-11-10T00:00:00Z","end_time":"2020-11-10T23:59:00Z"}]}
\ No newline at end of file
diff --git a/flink_jobs/batch_ar/src/main/resources/amr/data_METRIC.json b/flink_jobs/batch_ar/src/main/resources/amr/data_METRIC.json
new file mode 100644
index 00000000..b4681fcb
--- /dev/null
+++ b/flink_jobs/batch_ar/src/main/resources/amr/data_METRIC.json
@@ -0,0 +1 @@
+{"id":"392fa5d74-015c-4122-b8b9-7b344f3154d4","date":"2020-09-24","name":"test-mon","description":"Generic monitoring profile","services":[{"service":"WebPortal","metrics":["org.nagios.WebCheck"]}]}
diff --git a/flink_jobs/batch_ar/src/main/resources/amr/data_OPS.json b/flink_jobs/batch_ar/src/main/resources/amr/data_OPS.json
new file mode 100644
index 00000000..ff505f0a
--- /dev/null
+++ b/flink_jobs/batch_ar/src/main/resources/amr/data_OPS.json
@@ -0,0 +1 @@
+{"id":"ea62ff1e-c6e1-438b-83c7-9262b3a4f179","date":"2020-06-24","name":"demo_ops","available_states":["OK","WARNING","UNKNOWN","MISSING","CRITICAL","DOWNTIME"],"defaults":{"down":"DOWNTIME","missing":"MISSING","unknown":"UNKNOWN"},"operations":[{"name":"AND","truth_table":[{"a":"OK","b":"OK","x":"OK"},{"a":"OK","b":"WARNING","x":"WARNING"},{"a":"OK","b":"UNKNOWN","x":"UNKNOWN"},{"a":"OK","b":"MISSING","x":"MISSING"},{"a":"OK","b":"CRITICAL","x":"CRITICAL"},{"a":"OK","b":"DOWNTIME","x":"DOWNTIME"},{"a":"WARNING","b":"WARNING","x":"WARNING"},{"a":"WARNING","b":"UNKNOWN","x":"UNKNOWN"},{"a":"WARNING","b":"MISSING","x":"MISSING"},{"a":"WARNING","b":"CRITICAL","x":"CRITICAL"},{"a":"WARNING","b":"DOWNTIME","x":"DOWNTIME"},{"a":"UNKNOWN","b":"UNKNOWN","x":"UNKNOWN"},{"a":"UNKNOWN","b":"MISSING","x":"MISSING"},{"a":"UNKNOWN","b":"CRITICAL","x":"CRITICAL"},{"a":"UNKNOWN","b":"DOWNTIME","x":"DOWNTIME"},{"a":"MISSING","b":"MISSING","x":"MISSING"},{"a":"MISSING","b":"CRITICAL","x":"CRITICAL"},{"a":"MISSING","b":"DOWNTIME","x":"DOWNTIME"},{"a":"CRITICAL","b":"CRITICAL","x":"CRITICAL"},{"a":"CRITICAL","b":"DOWNTIME","x":"CRITICAL"},{"a":"DOWNTIME","b":"DOWNTIME","x":"DOWNTIME"}]},{"name":"OR","truth_table":[{"a":"OK","b":"OK","x":"OK"},{"a":"OK","b":"WARNING","x":"OK"},{"a":"OK","b":"UNKNOWN","x":"OK"},{"a":"OK","b":"MISSING","x":"OK"},{"a":"OK","b":"CRITICAL","x":"OK"},{"a":"OK","b":"DOWNTIME","x":"OK"},{"a":"WARNING","b":"WARNING","x":"WARNING"},{"a":"WARNING","b":"UNKNOWN","x":"WARNING"},{"a":"WARNING","b":"MISSING","x":"WARNING"},{"a":"WARNING","b":"CRITICAL","x":"WARNING"},{"a":"WARNING","b":"DOWNTIME","x":"WARNING"},{"a":"UNKNOWN","b":"UNKNOWN","x":"UNKNOWN"},{"a":"UNKNOWN","b":"MISSING","x":"UNKNOWN"},{"a":"UNKNOWN","b":"CRITICAL","x":"CRITICAL"},{"a":"UNKNOWN","b":"DOWNTIME","x":"UNKNOWN"},{"a":"MISSING","b":"MISSING","x":"MISSING"},{"a":"MISSING","b":"CRITICAL","x":"CRITICAL"},{"a":"MISSING","b":"DOWNTIME","x":"DOWNTIME"},{"a":"CRITICAL","b":"CRITICAL","x":"CRITICAL"},{"a":"CRITICAL","b":"DOWNTIME","x":"CRITICAL"},{"a":"DOWNTIME","b":"DOWNTIME","x":"DOWNTIME"}]}]}
diff --git a/flink_jobs/batch_ar/src/main/resources/amr/data_RECOMPUTATIONS.json b/flink_jobs/batch_ar/src/main/resources/amr/data_RECOMPUTATIONS.json
new file mode 100644
index 00000000..052b03aa
--- /dev/null
+++ b/flink_jobs/batch_ar/src/main/resources/amr/data_RECOMPUTATIONS.json
@@ -0,0 +1 @@
+[{"id":"56db4f1a-f331-46ca-b0fd-4555b4aa1cfc","requester_name":"john foo","requester_email":"foo1@email.com","reason":"ggus-reason01","start_time":"2018-01-21T23:01:00Z","end_time":"2018-01-23T12:01:00Z","report":"Critical","exclude":["SITE-1","SITE-2"],"status":"done","timestamp":"2018-03-17 17:03:55","history":[{"status":"pending","timestamp":"2018-01-30T11:41:26Z"}]},{"id":"66db4f55-f331-46ca-b0fd-4555b4aa1cfc","requester_name":"john foo","requester_email":"foo1@email.com","reason":"ggus-reason01","start_time":"2018-05-21T23:01:00Z","end_time":"2018-05-23T12:01:00Z","report":"Critical","exclude":["SITE-3","SITE-4"],"status":"done","timestamp":"2018-06-17 17:03:55","history":[{"status":"pending","timestamp":"2018-06-30T11:41:26Z"}]},{"id":"76db4444-f331-46ca-b0fd-4555b4aa1cfc","requester_name":"john foo","requester_email":"foo1@email.com","reason":"ggus-reason01","start_time":"2018-09-10T23:01:00Z","end_time":"2018-09-15T12:01:00Z","report":"Critical","exclude":["SITE-6","SITE-7","SITE-8"],"status":"done","timestamp":"2018-03-17 17:03:55","history":[{"status":"pending","timestamp":"2018-01-30T11:41:26Z"}]}]
diff --git a/flink_jobs/batch_ar/src/main/resources/amr/data_THRESHOLDS.json b/flink_jobs/batch_ar/src/main/resources/amr/data_THRESHOLDS.json
new file mode 100644
index 00000000..453e5bdf
--- /dev/null
+++ b/flink_jobs/batch_ar/src/main/resources/amr/data_THRESHOLDS.json
@@ -0,0 +1 @@
+{"id":"3345c3c1-322a-47f1-982c-1d9df1fc065e","date":"2015-01-01","name":"endpoint_example","rules":[{"host":"host1.foo.bar","metric":"service.freshness","thresholds":"freshness=1s;;0:;"}]}
diff --git a/flink_jobs/batch_ar/src/main/resources/amr/data_TOPOENDPOINTS.json b/flink_jobs/batch_ar/src/main/resources/amr/data_TOPOENDPOINTS.json
new file mode 100644
index 00000000..10dd42cf
--- /dev/null
+++ b/flink_jobs/batch_ar/src/main/resources/amr/data_TOPOENDPOINTS.json
@@ -0,0 +1 @@
+[{"date":"2020-11-10","group":"groupA","type":"SERVICEGROUPS","service":"webPortal","hostname":"host1.foo.bar","tags":{"monitored":"1","production":"1","scope":"FOO"}},{"date":"2020-11-10","group":"groupB","type":"SERVICEGROUPS","service":"webPortal","hostname":"host3.foo.bar","tags":{"monitored":"1","production":"1","scope":"FOO"}},{"date":"2020-11-10","group":"groupA","type":"SERVICEGROUPS","service":"webPortal","hostname":"host2.foo.bar","tags":{"monitored":"1","production":"1","scope":"FOO"}}]
diff --git a/flink_jobs/batch_ar/src/main/resources/amr/data_TOPOGROUPS.json b/flink_jobs/batch_ar/src/main/resources/amr/data_TOPOGROUPS.json
new file mode 100644
index 00000000..1c8e4316
--- /dev/null
+++ b/flink_jobs/batch_ar/src/main/resources/amr/data_TOPOGROUPS.json
@@ -0,0 +1 @@
+[{"date":"2020-11-11","group":"ORG-A","type":"PROJECT","subgroup":"GROUP-101","tags":{"monitored":"0","scope":"Local"}},{"date":"2020-11-11","group":"ORG-A","type":"PROJECT","subgroup":"GROUP-202","tags":{"monitored":"1","scope":"Local"}}]
diff --git a/flink_jobs/batch_ar/src/main/resources/amr/data_WEIGHTS.json b/flink_jobs/batch_ar/src/main/resources/amr/data_WEIGHTS.json
new file mode 100644
index 00000000..399c31c1
--- /dev/null
+++ b/flink_jobs/batch_ar/src/main/resources/amr/data_WEIGHTS.json
@@ -0,0 +1 @@
+{"id":"3b9602ed-49ec-42f3-8df7-7c35331ebf69","date":"2020-09-02","name":"demo","weight_type":"computationpower","group_type":"SERVICEGROUPS","groups":[{"name":"GROUP-A","value":366},{"name":"GROUP-B","value":4000},{"name":"GROUP-C","value":19838},{"name":"GROUP-D","value":19838}]}
diff --git a/flink_jobs/batch_ar/src/main/resources/amr/downtimes.json b/flink_jobs/batch_ar/src/main/resources/amr/downtimes.json
new file mode 100644
index 00000000..7bf3adee
--- /dev/null
+++ b/flink_jobs/batch_ar/src/main/resources/amr/downtimes.json
@@ -0,0 +1,31 @@
+{
+ "status": {
+ "message": "Success",
+ "code": "200"
+ },
+ "data": [
+ {
+ "date": "2020-11-10",
+ "endpoints": [
+ {
+ "hostname": "hostA.foo",
+ "service": "WebPortal",
+ "start_time": "2020-11-10T00:00:00Z",
+ "end_time": "2020-11-10T23:59:00Z"
+ },
+ {
+ "hostname": "hostB.foo",
+ "service": "WebPortal",
+ "start_time": "2020-11-10T00:00:00Z",
+ "end_time": "2020-11-10T23:59:00Z"
+ },
+ {
+ "hostname": "hostB.foo",
+ "service": "WebPortald",
+ "start_time": "2020-11-10T00:00:00Z",
+ "end_time": "2020-11-10T23:59:00Z"
+ }
+ ]
+ }
+ ]
+}
diff --git a/flink_jobs/batch_ar/src/main/resources/amr/metric_profile.json b/flink_jobs/batch_ar/src/main/resources/amr/metric_profile.json
new file mode 100644
index 00000000..7ea5a470
--- /dev/null
+++ b/flink_jobs/batch_ar/src/main/resources/amr/metric_profile.json
@@ -0,0 +1,22 @@
+{
+ "status": {
+ "message": "Success",
+ "code": "200"
+ },
+ "data": [
+ {
+ "id": "392fa5d74-015c-4122-b8b9-7b344f3154d4",
+ "date": "2020-09-24",
+ "name": "test-mon",
+ "description": "Generic monitoring profile",
+ "services": [
+ {
+ "service": "WebPortal",
+ "metrics": [
+ "org.nagios.WebCheck"
+ ]
+ }
+ ]
+ }
+ ]
+}
diff --git a/flink_jobs/batch_ar/src/main/resources/amr/ops_profile.json b/flink_jobs/batch_ar/src/main/resources/amr/ops_profile.json
new file mode 100644
index 00000000..9b00f14b
--- /dev/null
+++ b/flink_jobs/batch_ar/src/main/resources/amr/ops_profile.json
@@ -0,0 +1,248 @@
+{
+ "status": {
+ "message": "Success",
+ "code": "200"
+ },
+ "data": [
+ {
+ "id": "ea62ff1e-c6e1-438b-83c7-9262b3a4f179",
+ "date": "2020-06-24",
+ "name": "demo_ops",
+ "available_states": [
+ "OK",
+ "WARNING",
+ "UNKNOWN",
+ "MISSING",
+ "CRITICAL",
+ "DOWNTIME"
+ ],
+ "defaults": {
+ "down": "DOWNTIME",
+ "missing": "MISSING",
+ "unknown": "UNKNOWN"
+ },
+ "operations": [
+ {
+ "name": "AND",
+ "truth_table": [
+ {
+ "a": "OK",
+ "b": "OK",
+ "x": "OK"
+ },
+ {
+ "a": "OK",
+ "b": "WARNING",
+ "x": "WARNING"
+ },
+ {
+ "a": "OK",
+ "b": "UNKNOWN",
+ "x": "UNKNOWN"
+ },
+ {
+ "a": "OK",
+ "b": "MISSING",
+ "x": "MISSING"
+ },
+ {
+ "a": "OK",
+ "b": "CRITICAL",
+ "x": "CRITICAL"
+ },
+ {
+ "a": "OK",
+ "b": "DOWNTIME",
+ "x": "DOWNTIME"
+ },
+ {
+ "a": "WARNING",
+ "b": "WARNING",
+ "x": "WARNING"
+ },
+ {
+ "a": "WARNING",
+ "b": "UNKNOWN",
+ "x": "UNKNOWN"
+ },
+ {
+ "a": "WARNING",
+ "b": "MISSING",
+ "x": "MISSING"
+ },
+ {
+ "a": "WARNING",
+ "b": "CRITICAL",
+ "x": "CRITICAL"
+ },
+ {
+ "a": "WARNING",
+ "b": "DOWNTIME",
+ "x": "DOWNTIME"
+ },
+ {
+ "a": "UNKNOWN",
+ "b": "UNKNOWN",
+ "x": "UNKNOWN"
+ },
+ {
+ "a": "UNKNOWN",
+ "b": "MISSING",
+ "x": "MISSING"
+ },
+ {
+ "a": "UNKNOWN",
+ "b": "CRITICAL",
+ "x": "CRITICAL"
+ },
+ {
+ "a": "UNKNOWN",
+ "b": "DOWNTIME",
+ "x": "DOWNTIME"
+ },
+ {
+ "a": "MISSING",
+ "b": "MISSING",
+ "x": "MISSING"
+ },
+ {
+ "a": "MISSING",
+ "b": "CRITICAL",
+ "x": "CRITICAL"
+ },
+ {
+ "a": "MISSING",
+ "b": "DOWNTIME",
+ "x": "DOWNTIME"
+ },
+ {
+ "a": "CRITICAL",
+ "b": "CRITICAL",
+ "x": "CRITICAL"
+ },
+ {
+ "a": "CRITICAL",
+ "b": "DOWNTIME",
+ "x": "CRITICAL"
+ },
+ {
+ "a": "DOWNTIME",
+ "b": "DOWNTIME",
+ "x": "DOWNTIME"
+ }
+ ]
+ },
+ {
+ "name": "OR",
+ "truth_table": [
+ {
+ "a": "OK",
+ "b": "OK",
+ "x": "OK"
+ },
+ {
+ "a": "OK",
+ "b": "WARNING",
+ "x": "OK"
+ },
+ {
+ "a": "OK",
+ "b": "UNKNOWN",
+ "x": "OK"
+ },
+ {
+ "a": "OK",
+ "b": "MISSING",
+ "x": "OK"
+ },
+ {
+ "a": "OK",
+ "b": "CRITICAL",
+ "x": "OK"
+ },
+ {
+ "a": "OK",
+ "b": "DOWNTIME",
+ "x": "OK"
+ },
+ {
+ "a": "WARNING",
+ "b": "WARNING",
+ "x": "WARNING"
+ },
+ {
+ "a": "WARNING",
+ "b": "UNKNOWN",
+ "x": "WARNING"
+ },
+ {
+ "a": "WARNING",
+ "b": "MISSING",
+ "x": "WARNING"
+ },
+ {
+ "a": "WARNING",
+ "b": "CRITICAL",
+ "x": "WARNING"
+ },
+ {
+ "a": "WARNING",
+ "b": "DOWNTIME",
+ "x": "WARNING"
+ },
+ {
+ "a": "UNKNOWN",
+ "b": "UNKNOWN",
+ "x": "UNKNOWN"
+ },
+ {
+ "a": "UNKNOWN",
+ "b": "MISSING",
+ "x": "UNKNOWN"
+ },
+ {
+ "a": "UNKNOWN",
+ "b": "CRITICAL",
+ "x": "CRITICAL"
+ },
+ {
+ "a": "UNKNOWN",
+ "b": "DOWNTIME",
+ "x": "UNKNOWN"
+ },
+ {
+ "a": "MISSING",
+ "b": "MISSING",
+ "x": "MISSING"
+ },
+ {
+ "a": "MISSING",
+ "b": "CRITICAL",
+ "x": "CRITICAL"
+ },
+ {
+ "a": "MISSING",
+ "b": "DOWNTIME",
+ "x": "DOWNTIME"
+ },
+ {
+ "a": "CRITICAL",
+ "b": "CRITICAL",
+ "x": "CRITICAL"
+ },
+ {
+ "a": "CRITICAL",
+ "b": "DOWNTIME",
+ "x": "CRITICAL"
+ },
+ {
+ "a": "DOWNTIME",
+ "b": "DOWNTIME",
+ "x": "DOWNTIME"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+}
diff --git a/flink_jobs/batch_ar/src/main/resources/amr/recomputations.json b/flink_jobs/batch_ar/src/main/resources/amr/recomputations.json
new file mode 100644
index 00000000..b597ad09
--- /dev/null
+++ b/flink_jobs/batch_ar/src/main/resources/amr/recomputations.json
@@ -0,0 +1,72 @@
+{
+ "status": {
+ "message": "Success",
+ "code": "200"
+ },
+ "data": [
+ {
+ "id": "56db4f1a-f331-46ca-b0fd-4555b4aa1cfc",
+ "requester_name": "john foo",
+ "requester_email": "foo1@email.com",
+ "reason": "ggus-reason01",
+ "start_time": "2018-01-21T23:01:00Z",
+ "end_time": "2018-01-23T12:01:00Z",
+ "report": "Critical",
+ "exclude": [
+ "SITE-1",
+ "SITE-2"
+ ],
+ "status": "done",
+ "timestamp": "2018-03-17 17:03:55",
+ "history": [
+ {
+ "status": "pending",
+ "timestamp": "2018-01-30T11:41:26Z"
+ }
+ ]
+ },
+ {
+ "id": "66db4f55-f331-46ca-b0fd-4555b4aa1cfc",
+ "requester_name": "john foo",
+ "requester_email": "foo1@email.com",
+ "reason": "ggus-reason01",
+ "start_time": "2018-05-21T23:01:00Z",
+ "end_time": "2018-05-23T12:01:00Z",
+ "report": "Critical",
+ "exclude": [
+ "SITE-3",
+ "SITE-4"
+ ],
+ "status": "done",
+ "timestamp": "2018-06-17 17:03:55",
+ "history": [
+ {
+ "status": "pending",
+ "timestamp": "2018-06-30T11:41:26Z"
+ }
+ ]
+ },
+ {
+ "id": "76db4444-f331-46ca-b0fd-4555b4aa1cfc",
+ "requester_name": "john foo",
+ "requester_email": "foo1@email.com",
+ "reason": "ggus-reason01",
+ "start_time": "2018-09-10T23:01:00Z",
+ "end_time": "2018-09-15T12:01:00Z",
+ "report": "Critical",
+ "exclude": [
+ "SITE-6",
+ "SITE-7",
+ "SITE-8"
+ ],
+ "status": "done",
+ "timestamp": "2018-03-17 17:03:55",
+ "history": [
+ {
+ "status": "pending",
+ "timestamp": "2018-01-30T11:41:26Z"
+ }
+ ]
+ }
+ ]
+}
diff --git a/flink_jobs/batch_ar/src/main/resources/amr/report.json b/flink_jobs/batch_ar/src/main/resources/amr/report.json
new file mode 100644
index 00000000..fa5a5f65
--- /dev/null
+++ b/flink_jobs/batch_ar/src/main/resources/amr/report.json
@@ -0,0 +1,57 @@
+{
+ "status": {
+ "message": "Success",
+ "code": "200"
+ },
+ "data": [
+ {
+ "id": "f29eeb59-ab38-4aa0-b372-5d3c0709dfb2",
+ "tenant": "demo",
+ "disabled": false,
+ "info": {
+ "name": "Critical",
+ "description": "test report",
+ "created": "2020-09-24 12:05:04",
+ "updated": "2020-10-08 09:32:46"
+ },
+ "thresholds": {
+ "availability": 80,
+ "reliability": 85,
+ "uptime": 0.8,
+ "unknown": 0.1,
+ "downtime": 0.1
+ },
+ "topology_schema": {
+ "group": {
+ "type": "PROJECT",
+ "group": {
+ "type": "SERVICEGROUPS"
+ }
+ }
+ },
+ "profiles": [
+ {
+ "id": "92fa5d74-015c-4122-b8b9-7b344f3154d4",
+ "name": "test-mon",
+ "type": "metric"
+ },
+ {
+ "id": "2744247f-40f8-4dd6-b22c-76a3b38334d8",
+ "name": "test-agg2",
+ "type": "aggregation"
+ },
+ {
+ "id": "ea62ff1e-c6e1-438b-83c7-9262b3a4f179",
+ "name": "demo_ops",
+ "type": "operations"
+ },
+ {
+ "id": "3345c3c1-322a-47f1-982c-1d9df1fc065e",
+ "name": "endpoint_example",
+ "type": "thresholds"
+ }
+ ],
+ "filter_tags": []
+ }
+ ]
+}
diff --git a/flink_jobs/batch_ar/src/main/resources/amr/thresholds.json b/flink_jobs/batch_ar/src/main/resources/amr/thresholds.json
new file mode 100644
index 00000000..1c1ac3fb
--- /dev/null
+++ b/flink_jobs/batch_ar/src/main/resources/amr/thresholds.json
@@ -0,0 +1,20 @@
+{
+ "status": {
+ "message": "Success",
+ "code": "200"
+ },
+ "data": [
+ {
+ "id": "3345c3c1-322a-47f1-982c-1d9df1fc065e",
+ "date": "2015-01-01",
+ "name": "endpoint_example",
+ "rules": [
+ {
+ "host": "host1.foo.bar",
+ "metric": "service.freshness",
+ "thresholds": "freshness=1s;;0:;"
+ }
+ ]
+ }
+ ]
+}
diff --git a/flink_jobs/batch_ar/src/main/resources/amr/topoendpoints.json b/flink_jobs/batch_ar/src/main/resources/amr/topoendpoints.json
new file mode 100644
index 00000000..2b1cfed5
--- /dev/null
+++ b/flink_jobs/batch_ar/src/main/resources/amr/topoendpoints.json
@@ -0,0 +1,44 @@
+{
+ "status": {
+ "message": "Success",
+ "code": "200"
+ },
+ "data": [
+ {
+ "date": "2020-11-10",
+ "group": "groupA",
+ "type": "SERVICEGROUPS",
+ "service": "webPortal",
+ "hostname": "host1.foo.bar",
+ "tags": {
+ "monitored": "1",
+ "production": "1",
+ "scope": "FOO"
+ }
+ },
+ {
+ "date": "2020-11-10",
+ "group": "groupB",
+ "type": "SERVICEGROUPS",
+ "service": "webPortal",
+ "hostname": "host3.foo.bar",
+ "tags": {
+ "monitored": "1",
+ "production": "1",
+ "scope": "FOO"
+ }
+ },
+ {
+ "date": "2020-11-10",
+ "group": "groupA",
+ "type": "SERVICEGROUPS",
+ "service": "webPortal",
+ "hostname": "host2.foo.bar",
+ "tags": {
+ "monitored": "1",
+ "production": "1",
+ "scope": "FOO"
+ }
+ }
+ ]
+}
diff --git a/flink_jobs/batch_ar/src/main/resources/amr/topogroups.json b/flink_jobs/batch_ar/src/main/resources/amr/topogroups.json
new file mode 100644
index 00000000..6286cc55
--- /dev/null
+++ b/flink_jobs/batch_ar/src/main/resources/amr/topogroups.json
@@ -0,0 +1,28 @@
+{
+ "status": {
+ "message": "Success",
+ "code": "200"
+ },
+ "data": [
+ {
+ "date": "2020-11-11",
+ "group": "ORG-A",
+ "type": "PROJECT",
+ "subgroup": "GROUP-101",
+ "tags": {
+ "monitored": "0",
+ "scope": "Local"
+ }
+ },
+ {
+ "date": "2020-11-11",
+ "group": "ORG-A",
+ "type": "PROJECT",
+ "subgroup": "GROUP-202",
+ "tags": {
+ "monitored": "1",
+ "scope": "Local"
+ }
+ }
+ ]
+}
diff --git a/flink_jobs/batch_ar/src/main/resources/amr/weights.json b/flink_jobs/batch_ar/src/main/resources/amr/weights.json
new file mode 100644
index 00000000..fc1dea3f
--- /dev/null
+++ b/flink_jobs/batch_ar/src/main/resources/amr/weights.json
@@ -0,0 +1,33 @@
+{
+ "status": {
+ "message": "Success",
+ "code": "200"
+ },
+ "data": [
+ {
+ "id": "3b9602ed-49ec-42f3-8df7-7c35331ebf69",
+ "date": "2020-09-02",
+ "name": "demo",
+ "weight_type": "computationpower",
+ "group_type": "SERVICEGROUPS",
+ "groups": [
+ {
+ "name": "GROUP-A",
+ "value": 366
+ },
+ {
+ "name": "GROUP-B",
+ "value": 4000
+ },
+ {
+ "name": "GROUP-C",
+ "value": 19838
+ },
+ {
+ "name": "GROUP-D",
+ "value": 19838
+ }
+ ]
+ }
+ ]
+}
diff --git a/flink_jobs/batch_ar/src/main/resources/avro/group_endpoints_info.avro b/flink_jobs/batch_ar/src/main/resources/avro/group_endpoints_info.avro
new file mode 100644
index 00000000..0f388be0
Binary files /dev/null and b/flink_jobs/batch_ar/src/main/resources/avro/group_endpoints_info.avro differ
diff --git a/flink_jobs/batch_ar/src/test/java/argo/amr/ApiResourceManagerTest.java b/flink_jobs/batch_ar/src/test/java/argo/amr/ApiResourceManagerTest.java
new file mode 100644
index 00000000..4a384ada
--- /dev/null
+++ b/flink_jobs/batch_ar/src/test/java/argo/amr/ApiResourceManagerTest.java
@@ -0,0 +1,288 @@
+package argo.amr;
+
+import static org.junit.Assert.*;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.net.URISyntaxException;
+import java.nio.charset.StandardCharsets;
+import java.security.KeyManagementException;
+import java.security.KeyStoreException;
+import java.security.NoSuchAlgorithmException;
+import java.text.ParseException;
+import java.util.List;
+import java.util.stream.Collectors;
+
+import org.apache.http.client.ClientProtocolException;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+
+import com.github.tomakehurst.wiremock.WireMockServer;
+import com.github.tomakehurst.wiremock.junit.WireMockRule;
+
+import argo.avro.Downtime;
+import argo.avro.GroupEndpoint;
+import argo.avro.GroupGroup;
+import argo.avro.MetricProfile;
+import argo.avro.Weight;
+
+import static com.github.tomakehurst.wiremock.client.WireMock.stubFor;
+import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo;
+import static com.github.tomakehurst.wiremock.client.WireMock.configureFor;
+import static com.github.tomakehurst.wiremock.client.WireMock.get;
+import static com.github.tomakehurst.wiremock.client.WireMock.aResponse;
+import static com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig;
+
+
+public class ApiResourceManagerTest {
+
+ public static String loadResJSON(String resURL) {
+
+ InputStream jsonInputStream
+ = ApiResourceManagerTest.class.getResourceAsStream(resURL);
+ String content = new BufferedReader(
+ new InputStreamReader(jsonInputStream, StandardCharsets.UTF_8))
+ .lines()
+ .collect(Collectors.joining("\n"));
+ return content;
+
+ }
+
+ @Rule
+ public WireMockRule wireMockRule = new WireMockRule(wireMockConfig().httpsPort(8443));
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ // Assert that files are present
+ assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/report.json"));
+ assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/report.json"));
+ assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/metric_profile.json"));
+ assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/agg_profile.json"));
+ assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/ops_profile.json"));
+ assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/thresholds.json"));
+ assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/topoendpoints.json"));
+ assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/topogroups.json"));
+ assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/downtimes.json"));
+ assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/weights.json"));
+ assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/recomputations.json"));
+ assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_CONFIG.json"));
+ assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_METRIC.json"));
+ assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_AGGREGATION.json"));
+ assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_OPS.json"));
+ assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_THRESHOLDS.json"));
+ assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_TOPOENDPOINTS.json"));
+ assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_TOPOGROUPS.json"));
+ assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_DOWNTIMES.json"));
+ assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_WEIGHTS.json"));
+ assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_RECOMPUTATIONS.json"));
+ }
+
+ @Test
+ public void test() throws URISyntaxException, IOException, ParseException {
+ // load mock api response content
+ String jsonReport = loadResJSON("/amr/report.json");
+ String jsonMetric = loadResJSON("/amr/metric_profile.json");
+ String jsonAgg = loadResJSON("/amr/agg_profile.json");
+ String jsonOps = loadResJSON("/amr/ops_profile.json");
+ String jsonThresholds = loadResJSON("/amr/thresholds.json");
+ String jsonTopoEnd = loadResJSON("/amr/topoendpoints.json");
+ String jsonTopoGroups = loadResJSON("/amr/topogroups.json");
+ String jsonDowntimes = loadResJSON("/amr/downtimes.json");
+ String jsonWeights = loadResJSON("/amr/weights.json");
+ String jsonRecomp = loadResJSON("/amr/recomputations.json");
+
+ // get json data items
+
+ String dataConfig = loadResJSON("/amr/data_CONFIG.json");
+ String dataMetric = loadResJSON("/amr/data_METRIC.json");
+ String dataAggr = loadResJSON("/amr/data_AGGREGATION.json");
+ String dataOps = loadResJSON("/amr/data_OPS.json");
+ String dataThresh = loadResJSON("/amr/data_THRESHOLDS.json");
+ String dataTopoEnd = loadResJSON("/amr/data_TOPOENDPOINTS.json");
+ String dataTopoGroup = loadResJSON("/amr/data_TOPOGROUPS.json");
+ String dataDown = loadResJSON("/amr/data_DOWNTIMES.json");
+ String dataWeights = loadResJSON("/amr/data_WEIGHTS.json");
+ String dataRecomp = loadResJSON("/amr/data_RECOMPUTATIONS.json");
+
+
+
+
+ stubFor(get(urlEqualTo("/api/v2/reports/f29eeb59-ab38-4aa0-b372-5d3c0709dfb2"))
+ .willReturn(aResponse().withBody(jsonReport)));
+ stubFor(get(urlEqualTo("/api/v2/metric_profiles/92fa5d74-015c-4122-b8b9-7b344f3154d4?date=2020-11-01"))
+ .willReturn(aResponse().withBody(jsonMetric)));
+ stubFor(get(urlEqualTo("/api/v2/aggregation_profiles/2744247f-40f8-4dd6-b22c-76a3b38334d8?date=2020-11-01"))
+ .willReturn(aResponse().withBody(jsonAgg)));
+ stubFor(get(urlEqualTo("/api/v2/operations_profiles/ea62ff1e-c6e1-438b-83c7-9262b3a4f179?date=2020-11-01"))
+ .willReturn(aResponse().withBody(jsonOps)));
+ stubFor(get(urlEqualTo("/api/v2/thresholds_profiles/3345c3c1-322a-47f1-982c-1d9df1fc065e?date=2020-11-01"))
+ .willReturn(aResponse().withBody(jsonThresholds)));
+ stubFor(get(urlEqualTo("/api/v2/topology/endpoints/by_report/Critical?date=2020-11-01"))
+ .willReturn(aResponse().withBody(jsonTopoEnd)));
+ stubFor(get(urlEqualTo("/api/v2/topology/groups/by_report/Critical?date=2020-11-01"))
+ .willReturn(aResponse().withBody(jsonTopoGroups)));
+ stubFor(get(urlEqualTo("/api/v2/downtimes?date=2020-11-01"))
+ .willReturn(aResponse().withBody(jsonDowntimes)));
+ stubFor(get(urlEqualTo("/api/v2/weights/3b9602ed-49ec-42f3-8df7-7c35331ebf69?date=2020-11-01"))
+ .willReturn(aResponse().withBody(jsonWeights)));
+ stubFor(get(urlEqualTo("/api/v2/recomputations?date=2020-11-01"))
+ .willReturn(aResponse().withBody(jsonRecomp)));
+
+ ApiResourceManager amr = new ApiResourceManager("localhost:8443", "s3cr3t");
+ amr.setDate("2020-11-01");
+ amr.setReportID("f29eeb59-ab38-4aa0-b372-5d3c0709dfb2");
+ amr.setToken("s3cr3t");
+ amr.setWeightsID("3b9602ed-49ec-42f3-8df7-7c35331ebf69");
+ amr.setVerify(false);
+
+ // Get the report configuration first and parse it
+ amr.getRemoteConfig();
+ amr.parseReport();
+
+ assertEquals("report name retrieved","Critical",amr.getReportName());
+ assertEquals("metric id retrieved","92fa5d74-015c-4122-b8b9-7b344f3154d4",amr.getMetricID());
+ assertEquals("ops id retrieved","ea62ff1e-c6e1-438b-83c7-9262b3a4f179",amr.getOpsID());
+ assertEquals("aggregations id retrieved","2744247f-40f8-4dd6-b22c-76a3b38334d8",amr.getAggregationID());
+ assertEquals("thresholds id retrieved","3345c3c1-322a-47f1-982c-1d9df1fc065e",amr.getThresholdsID());
+
+ assertEquals("retrieved config data",dataConfig,amr.getResourceJSON(ApiResource.CONFIG));
+
+
+ // get the profiles metric, aggregation, ops and thresholds
+ amr.getRemoteMetric();
+ amr.getRemoteAggregation();
+ amr.getRemoteOps();
+ amr.getRemoteThresholds();
+
+ assertEquals("retrieved metric profile data",dataMetric,amr.getResourceJSON(ApiResource.METRIC));
+ assertEquals("retrieved aggregation profile data",dataAggr,amr.getResourceJSON(ApiResource.AGGREGATION));
+ assertEquals("retrieved ops profile data",dataOps,amr.getResourceJSON(ApiResource.OPS));
+ assertEquals("retrieved thresholds profile data",dataThresh,amr.getResourceJSON(ApiResource.THRESHOLDS));
+
+ // get remote topology
+
+ amr.getRemoteTopoEndpoints();
+ amr.getRemoteTopoGroups();
+
+ assertEquals("retrieved topology endpoints",dataTopoEnd,amr.getResourceJSON(ApiResource.TOPOENDPOINTS));
+ assertEquals("retrieved topology groups",dataTopoGroup,amr.getResourceJSON(ApiResource.TOPOGROUPS));
+
+
+ // get remote downtimes
+ amr.getRemoteDowntimes();
+ assertEquals("retrieved downtimes",dataDown,amr.getResourceJSON(ApiResource.DOWNTIMES));
+
+ // get weights
+ amr.getRemoteWeights();
+ assertEquals("retrieved downtimes",dataWeights,amr.getResourceJSON(ApiResource.WEIGHTS));
+
+ // get recomputations
+ amr.getRemoteRecomputations();
+ assertEquals("retrieved recomputations",dataRecomp,amr.getResourceJSON(ApiResource.RECOMPUTATIONS));
+
+ // initate a second amr and check getRemoteAll routine
+
+
+ ApiResourceManager amr2 = new ApiResourceManager("localhost:8443", "s3cr3t");
+ amr2.setDate("2020-11-01");
+ amr2.setReportID("f29eeb59-ab38-4aa0-b372-5d3c0709dfb2");
+ amr2.setToken("s3cr3t");
+ amr2.setWeightsID("3b9602ed-49ec-42f3-8df7-7c35331ebf69");
+ amr2.setVerify(false);
+
+ amr2.getRemoteAll();
+
+ // test amr2 downtime list
+ Downtime[] dtl = amr2.getListDowntimes();
+ assertEquals("downtime list size", 3, dtl.length);
+ assertEquals("downtime data", "WebPortal", dtl[0].getService());
+ assertEquals("downtime data", "hostA.foo", dtl[0].getHostname());
+ assertEquals("downtime data", "2020-11-10T00:00:00Z", dtl[0].getStartTime());
+ assertEquals("downtime data", "2020-11-10T23:59:00Z", dtl[0].getEndTime());
+ assertEquals("downtime data", "WebPortal", dtl[1].getService());
+ assertEquals("downtime data", "hostB.foo", dtl[1].getHostname());
+ assertEquals("downtime data", "2020-11-10T00:00:00Z", dtl[1].getStartTime());
+ assertEquals("downtime data", "2020-11-10T23:59:00Z", dtl[1].getEndTime());
+ assertEquals("downtime data", "WebPortald", dtl[2].getService());
+ assertEquals("downtime data", "hostB.foo", dtl[2].getHostname());
+ assertEquals("downtime data", "2020-11-10T00:00:00Z", dtl[2].getStartTime());
+ assertEquals("downtime data", "2020-11-10T23:59:00Z", dtl[2].getEndTime());
+
+ // test amr2 group endpoint list
+ GroupEndpoint[] gel = amr2.getListGroupEndpoints();
+ assertEquals("group endpoint list size", 3, gel.length);
+ assertEquals("group endpoint data", "SERVICEGROUPS", gel[0].getType());
+ assertEquals("group endpoint data", "groupA", gel[0].getGroup());
+ assertEquals("group endpoint data", "webPortal", gel[0].getService());
+ assertEquals("group endpoint data", "host1.foo.bar", gel[0].getHostname());
+ assertEquals("group endpoint data", "1", gel[0].getTags().get("monitored"));
+ assertEquals("group endpoint data", "1", gel[0].getTags().get("production"));
+ assertEquals("group endpoint data", "FOO", gel[0].getTags().get("scope"));
+
+ assertEquals("group endpoint data", "SERVICEGROUPS", gel[1].getType());
+ assertEquals("group endpoint data", "groupB", gel[1].getGroup());
+ assertEquals("group endpoint data", "webPortal", gel[1].getService());
+ assertEquals("group endpoint data", "host3.foo.bar", gel[1].getHostname());
+ assertEquals("group endpoint data", "1", gel[1].getTags().get("monitored"));
+ assertEquals("group endpoint data", "1", gel[1].getTags().get("production"));
+ assertEquals("group endpoint data", "FOO", gel[1].getTags().get("scope"));
+
+ assertEquals("group endpoint data", "SERVICEGROUPS", gel[2].getType());
+ assertEquals("group endpoint data", "groupA", gel[2].getGroup());
+ assertEquals("group endpoint data", "webPortal", gel[2].getService());
+ assertEquals("group endpoint data", "host2.foo.bar", gel[2].getHostname());
+ assertEquals("group endpoint data", "1", gel[2].getTags().get("monitored"));
+ assertEquals("group endpoint data", "1", gel[2].getTags().get("production"));
+ assertEquals("group endpoint data", "FOO", gel[2].getTags().get("scope"));
+
+ // test amr2 group groups list
+ GroupGroup[] ggl = amr2.getListGroupGroups();
+ assertEquals("group endpoint list size", 2, ggl.length);
+ assertEquals("group endpoint data", "PROJECT", ggl[0].getType());
+ assertEquals("group endpoint data", "ORG-A", ggl[0].getGroup());
+ assertEquals("group endpoint data", "GROUP-101", ggl[0].getSubgroup());
+ assertEquals("group endpoint data", "0", ggl[0].getTags().get("monitored"));
+ assertEquals("group endpoint data", "Local", ggl[0].getTags().get("scope"));
+
+ assertEquals("group endpoint data", "PROJECT", ggl[1].getType());
+ assertEquals("group endpoint data", "ORG-A", ggl[1].getGroup());
+ assertEquals("group endpoint data", "GROUP-202", ggl[1].getSubgroup());
+ assertEquals("group endpoint data", "1", ggl[1].getTags().get("monitored"));
+ assertEquals("group endpoint data", "Local", ggl[1].getTags().get("scope"));
+
+ // test amr2 weights list
+ Weight[] wl = amr2.getListWeights();
+ assertEquals("group endpoint list size", 4, wl.length);
+ assertEquals("group endpoint data", "computationpower", wl[0].getType());
+ assertEquals("group endpoint data", "GROUP-A", wl[0].getSite());
+ assertEquals("group endpoint data", "366", wl[0].getWeight());
+
+ assertEquals("group endpoint data", "computationpower", wl[1].getType());
+ assertEquals("group endpoint data", "GROUP-B", wl[1].getSite());
+ assertEquals("group endpoint data", "4000", wl[1].getWeight());
+
+ assertEquals("group endpoint data", "computationpower", wl[2].getType());
+ assertEquals("group endpoint data", "GROUP-C", wl[2].getSite());
+ assertEquals("group endpoint data", "19838", wl[2].getWeight());
+
+ assertEquals("group endpoint data", "computationpower", wl[3].getType());
+ assertEquals("group endpoint data", "GROUP-D", wl[3].getSite());
+ assertEquals("group endpoint data", "19838", wl[3].getWeight());
+
+ // test amr2 metric profile list
+ MetricProfile[] mpl = amr2.getListMetrics();
+ assertEquals("group endpoint list size", 1, mpl.length);
+ assertEquals("group endpoint data", "test-mon", mpl[0].getProfile());
+ assertEquals("group endpoint data", "WebPortal", mpl[0].getService());
+ assertEquals("group endpoint data", "org.nagios.WebCheck", mpl[0].getMetric());
+ assertEquals("group endpoint data", 0, mpl[0].getTags().size());
+
+
+
+
+ }
+
+}
diff --git a/flink_jobs/batch_ar/src/test/java/argo/batch/EndpointArTest.java b/flink_jobs/batch_ar/src/test/java/argo/batch/EndpointArTest.java
index 25d00ecf..96bcfd11 100644
--- a/flink_jobs/batch_ar/src/test/java/argo/batch/EndpointArTest.java
+++ b/flink_jobs/batch_ar/src/test/java/argo/batch/EndpointArTest.java
@@ -88,7 +88,7 @@ public void test() throws URISyntaxException, IOException, ParseException {
int runDateInt = Integer.parseInt(runDate.replace("-", ""));
- EndpointAR result = new EndpointAR(runDateInt,report,item.getHostname(),item.getService(),item.getGroup(),dAR.availability,dAR.reliability,dAR.up_f,dAR.unknown_f,dAR.down_f);
+ EndpointAR result = new EndpointAR(runDateInt,report,item.getHostname(),item.getService(),item.getGroup(),dAR.availability,dAR.reliability,dAR.up_f,dAR.unknown_f,dAR.down_f,"URL:https://example.foo");
resultDS.add(result);
}
diff --git a/flink_jobs/batch_ar/src/test/java/ops/DAggregatorTest.java b/flink_jobs/batch_ar/src/test/java/ops/DAggregatorTest.java
index 6374c17f..be1e5f9c 100644
--- a/flink_jobs/batch_ar/src/test/java/ops/DAggregatorTest.java
+++ b/flink_jobs/batch_ar/src/test/java/ops/DAggregatorTest.java
@@ -126,7 +126,6 @@ public void test2() throws URISyntaxException, ParseException, IOException {
dAgg.settleAll(opsMgr.getIntStatus("MISSING"));
dAgg.aggregate("AND", opsMgr);
- System.out.println(Arrays.toString(dAgg.aggregation.samples));
assertArrayEquals("Aggregation test 3", expected, dAgg.aggregation.samples);
}
diff --git a/flink_jobs/batch_ar/src/test/java/ops/OpsManagerTest.java b/flink_jobs/batch_ar/src/test/java/ops/OpsManagerTest.java
index 776a794b..2926b9fb 100644
--- a/flink_jobs/batch_ar/src/test/java/ops/OpsManagerTest.java
+++ b/flink_jobs/batch_ar/src/test/java/ops/OpsManagerTest.java
@@ -59,7 +59,7 @@ public void test() throws URISyntaxException, IOException {
assertEquals("DOWNTIME (AND) UNKNOWN = DOWNTIME", opsMgr.op("AND", "DOWNTIME", "UNKNOWN"), "DOWNTIME");
assertEquals("Default Downtime Status = DOWNTIME", opsMgr.getDefaultDown(), "DOWNTIME");
- System.out.println(opsMgr.getDefaultMissingInt());
+
}
}
diff --git a/flink_jobs/batch_ar/src/test/java/sync/EndpointGroupManagerTest.java b/flink_jobs/batch_ar/src/test/java/sync/EndpointGroupManagerTest.java
index 99093f48..dac10812 100644
--- a/flink_jobs/batch_ar/src/test/java/sync/EndpointGroupManagerTest.java
+++ b/flink_jobs/batch_ar/src/test/java/sync/EndpointGroupManagerTest.java
@@ -19,6 +19,7 @@ public class EndpointGroupManagerTest {
public static void setUpBeforeClass() throws Exception {
// Assert that files are present
assertNotNull("Test file missing", EndpointGroupManagerTest.class.getResource("/avro/group_endpoints_v2.avro"));
+ assertNotNull("Test file missing", EndpointGroupManagerTest.class.getResource("/avro/group_endpoints_info.avro"));
}
@Test
@@ -55,7 +56,29 @@ public void test() throws URISyntaxException, IOException {
// Check non-existent groups
assertTrue(ge.checkEndpoint("ce.etfos.cro-ngi.hr", "GRAM5") == false);
assertTrue(ge.checkEndpoint("grid129.sinp.msu.ru", "CREAM-CE") == false);
-
+
+ // Prepare Resource File with extra information in tags
+ URL resAvroFile2 = EndpointGroupManagerTest.class.getResource("/avro/group_endpoints_info.avro");
+ File avroFile2 = new File(resAvroFile2.toURI());
+ // Instantiate class
+ EndpointGroupManager ge2 = new EndpointGroupManager();
+ // Test loading file
+ ge2.loadAvro(avroFile2);
+ assertNotNull("File Loaded", ge);
+
+ String exp1 = "URL:host1.example.foo/path/to/service1,DN:foo DN";
+ String exp2 = "URL:host1.example.foo/path/to/service2";
+ String exp3 = "URL:host2.example.foo/path/to/service1";
+ String exp4 = "ext.Value:extension1,URL:host2.example.foo/path/to/service2";
+ String exp5 = "";
+ String exp6 = "URL:host4.example.foo/path/to/service1";
+
+ assertEquals("wrong tags", exp1,ge2.getInfo("groupA", "SERVICEGROUPS", "host1.example.foo_11", "services.url"));
+ assertEquals("wrong tags", exp2,ge2.getInfo("groupB", "SERVICEGROUPS", "host1.example.foo_22", "services.url"));
+ assertEquals("wrong tags", exp3,ge2.getInfo("groupC", "SERVICEGROUPS", "host2.example.foo_33", "services.url"));
+ assertEquals("wrong tags", exp4,ge2.getInfo("groupD", "SERVICEGROUPS", "host2.example.foo_44", "services.url"));
+ assertEquals("wrong tags", exp5,ge2.getInfo("groupE", "SERVICEGROUPS", "host3.example.foo_55", "services.url"));
+ assertEquals("wrong tags", exp6,ge2.getInfo("groupF", "SERVICEGROUPS", "host4.example.foo_66", "services.url"));
}
}
diff --git a/flink_jobs/batch_status/.gitignore b/flink_jobs/batch_status/.gitignore
index ce1b0b79..6c4e323f 100644
--- a/flink_jobs/batch_status/.gitignore
+++ b/flink_jobs/batch_status/.gitignore
@@ -1,7 +1,8 @@
/target/
-
-# Eclipse related
-.classpath
.project
.settings/
+.classpath/
+.classpath
+/nbproject
+nbactions.xml
diff --git a/flink_jobs/batch_status/pom.xml b/flink_jobs/batch_status/pom.xml
index 6562ed49..64be44b6 100644
--- a/flink_jobs/batch_status/pom.xml
+++ b/flink_jobs/batch_status/pom.xml
@@ -8,7 +8,8 @@
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
OF ANY KIND, either express or implied. See the License for the specific
language governing permissions and limitations under the License. -->
-
4.0.0
@@ -105,6 +106,18 @@
gson
2.2.4
+
+
+
+ org.apache.httpcomponents
+ httpclient
+ 4.5.13
+
+
+ org.apache.httpcomponents
+ fluent-hc
+ 4.5.13
+
@@ -126,7 +139,7 @@
log4j
${log4j.version}
-
+
junit-addons
junit-addons
@@ -136,9 +149,22 @@
junit
junit
- 4.11
+ 4.13.1
+ test
+
+
+ org.apache.flink
+ flink-test-utils_2.11
+ 1.8.0
test
+
+
+ com.github.tomakehurst
+ wiremock
+ 1.58
+ test
+
@@ -205,6 +231,17 @@
3.2.2
+
+ org.apache.httpcomponents
+ httpclient
+ 4.5.13
+
+
+ org.apache.httpcomponents
+ fluent-hc
+ 4.5.13
+
+
diff --git a/flink_jobs/batch_status/src/main/java/argo/amr/ApiResource.java b/flink_jobs/batch_status/src/main/java/argo/amr/ApiResource.java
new file mode 100644
index 00000000..d8cb13b5
--- /dev/null
+++ b/flink_jobs/batch_status/src/main/java/argo/amr/ApiResource.java
@@ -0,0 +1,5 @@
+package argo.amr;
+
+public enum ApiResource {
+ CONFIG, OPS, METRIC, AGGREGATION, THRESHOLDS, TOPOENDPOINTS, TOPOGROUPS, WEIGHTS, DOWNTIMES, RECOMPUTATIONS
+}
\ No newline at end of file
diff --git a/flink_jobs/batch_status/src/main/java/argo/amr/ApiResourceManager.java b/flink_jobs/batch_status/src/main/java/argo/amr/ApiResourceManager.java
new file mode 100644
index 00000000..68d14277
--- /dev/null
+++ b/flink_jobs/batch_status/src/main/java/argo/amr/ApiResourceManager.java
@@ -0,0 +1,644 @@
+package argo.amr;
+
+import java.io.IOException;
+import java.security.KeyManagementException;
+import java.security.KeyStoreException;
+import java.security.NoSuchAlgorithmException;
+
+import java.util.ArrayList;
+import java.util.EnumMap;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+
+
+import org.apache.http.client.ClientProtocolException;
+import org.apache.http.client.fluent.Executor;
+import org.apache.http.client.fluent.Request;
+import org.apache.http.conn.ssl.NoopHostnameVerifier;
+import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
+import org.apache.http.conn.ssl.TrustSelfSignedStrategy;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClients;
+import org.apache.http.ssl.SSLContextBuilder;
+
+import com.google.gson.JsonArray;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParser;
+
+import argo.avro.Downtime;
+import argo.avro.GroupEndpoint;
+import argo.avro.GroupGroup;
+import argo.avro.MetricProfile;
+import argo.avro.Weight;
+
+
+/**
+ * APIResourceManager class fetches remote argo-web-api resources such as
+ * report configuration, profiles, topology, weights in JSON format
+ */
+
+
+public class ApiResourceManager {
+
+ private EnumMap data = new EnumMap<>(ApiResource.class);
+
+ private String endpoint;
+ private String token;
+ private String reportID;
+ private String date;
+ private String proxy;
+
+ private String metricID;
+ private String aggregationID;
+ private String opsID;
+ private String threshID;
+ private String reportName;
+ private String weightsID;
+ private boolean verify;
+
+
+ public ApiResourceManager(String endpoint, String token) {
+ this.endpoint = endpoint;
+ this.token = token;
+ this.metricID = "";
+ this.aggregationID = "";
+ this.opsID = "";
+ this.threshID = "";
+ this.reportName = "";
+ this.reportID = "";
+ this.date = "";
+ this.proxy = "";
+ this.weightsID = "";
+ this.verify = true;
+
+ }
+
+ public boolean getVerify() {
+ return verify;
+ }
+
+ public void setVerify(boolean verify) {
+ this.verify = verify;
+ }
+
+ public String getEndpoint() {
+ return endpoint;
+ }
+
+ public void setEndpoint(String endpoint) {
+ this.endpoint = endpoint;
+ }
+
+ public String getToken() {
+ return token;
+ }
+
+ public void setToken(String token) {
+ this.token = token;
+ }
+
+ public String getReportID() {
+ return reportID;
+ }
+
+ public void setReportID(String reportID) {
+ this.reportID = reportID;
+ }
+
+ public String getReportName() {
+ return this.reportName;
+ }
+
+ public String getOpsID() {
+ return this.opsID;
+ }
+
+
+ public String getAggregationID() {
+ return this.aggregationID;
+ }
+
+ public String getMetricID() {
+ return this.metricID;
+ }
+
+ public String getThresholdsID() {
+ return this.threshID;
+ }
+
+
+ public String getDate() {
+ return date;
+ }
+
+ public void setDate(String date) {
+ this.date = date;
+ }
+
+ public String getProxy() {
+ return proxy;
+ }
+
+ public void setProxy(String proxy) {
+ this.proxy = proxy;
+ }
+
+ public String getWeightsID() {
+ return weightsID;
+ }
+
+ public void setWeightsID(String weightsID) {
+ this.weightsID = weightsID;
+ }
+
+ /**
+ * Create an SSL Connection Socket Factory with a strategy to trust self signed
+ * certificates
+ */
+ private SSLConnectionSocketFactory selfSignedSSLF()
+ throws NoSuchAlgorithmException, KeyStoreException, KeyManagementException {
+ SSLContextBuilder sslBuild = new SSLContextBuilder();
+ sslBuild.loadTrustMaterial(null, new TrustSelfSignedStrategy());
+ return new SSLConnectionSocketFactory(sslBuild.build(), NoopHostnameVerifier.INSTANCE);
+ }
+
+ /**
+ * Contacts remote argo-web-api based on the full url of a resource its content (expected in json format)
+ *
+ * @param fullURL String containing the full url representation of the argo-web-api resource
+ * @return A string representation of the resource json content
+ * @throws ClientProtocolException
+ * @throws IOException
+ * @throws KeyStoreException
+ * @throws NoSuchAlgorithmException
+ * @throws KeyManagementException
+ */
+ private String getResource(String fullURL) {
+
+
+ Request r = Request.Get(fullURL).addHeader("Accept", "application/json").addHeader("Content-type",
+ "application/json").addHeader("x-api-key",this.token);
+ if (!this.proxy.isEmpty()) {
+ r = r.viaProxy(proxy);
+ }
+
+ r = r.connectTimeout(1000).socketTimeout(1000);
+
+ String content = "{}";
+
+ try {
+ if (this.verify == false) {
+ CloseableHttpClient httpClient = HttpClients.custom().setSSLSocketFactory(selfSignedSSLF()).build();
+ Executor executor = Executor.newInstance(httpClient);
+ content = executor.execute(r).returnContent().asString();
+ } else {
+
+ content = r.execute().returnContent().asString();
+ }
+ } catch (KeyManagementException | NoSuchAlgorithmException | KeyStoreException | IOException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+
+ return content;
+ }
+
+ /**
+ * Retrieves the remote report configuration based on reportID main class attribute and
+ * stores the content in the enum map
+ *
+ * @throws ClientProtocolException
+ * @throws IOException
+ * @throws KeyStoreException
+ * @throws NoSuchAlgorithmException
+ * @throws KeyManagementException
+ */
+ public void getRemoteConfig() {
+ String path = "https://%s/api/v2/reports/%s";
+ String fullURL = String.format(path, this.endpoint, this.reportID);
+ String content = getResource(fullURL);
+ this.data.put(ApiResource.CONFIG, getJsonData(content, false));
+ }
+
+
+ /**
+ * Retrieves the metric profile content based on the metric_id attribute and stores it to the enum map
+ *
+ * @throws ClientProtocolException
+ * @throws IOException
+ * @throws KeyStoreException
+ * @throws NoSuchAlgorithmException
+ * @throws KeyManagementException
+ */
+ public void getRemoteMetric() {
+
+ String path = "https://%s/api/v2/metric_profiles/%s?date=%s";
+ String fullURL = String.format(path, this.endpoint, this.metricID, this.date);
+ String content = getResource(fullURL);
+ this.data.put(ApiResource.METRIC, getJsonData(content, false));
+ }
+
+ /**
+ * Retrieves the aggregation profile content based on the aggreagation_id attribute and stores it to the enum map
+ *
+ * @throws ClientProtocolException
+ * @throws IOException
+ * @throws KeyStoreException
+ * @throws NoSuchAlgorithmException
+ * @throws KeyManagementException
+ */
+ public void getRemoteAggregation() {
+
+ String path = "https://%s/api/v2/aggregation_profiles/%s?date=%s";
+ String fullURL = String.format(path, this.endpoint, this.aggregationID, this.date);
+ String content = getResource(fullURL);
+ this.data.put(ApiResource.AGGREGATION, getJsonData(content, false));
+ }
+
+ /**
+ * Retrieves the ops profile content based on the ops_id attribute and stores it to the enum map
+ *
+ * @throws ClientProtocolException
+ * @throws IOException
+ * @throws KeyStoreException
+ * @throws NoSuchAlgorithmException
+ * @throws KeyManagementException
+ */
+ public void getRemoteOps() {
+
+ String path = "https://%s/api/v2/operations_profiles/%s?date=%s";
+ String fullURL = String.format(path, this.endpoint, this.opsID, this.date);
+
+ String content = getResource(fullURL);
+ this.data.put(ApiResource.OPS, getJsonData(content, false));
+ }
+
+ /**
+ * Retrieves the thresholds profile content based on the thresh_id attribute and stores it to the enum map
+ *
+ * @throws ClientProtocolException
+ * @throws IOException
+ * @throws KeyStoreException
+ * @throws NoSuchAlgorithmException
+ * @throws KeyManagementException
+ */
+ public void getRemoteThresholds() {
+
+ String path = "https://%s/api/v2/thresholds_profiles/%s?date=%s";
+ String fullURL = String.format(path, this.endpoint, this.threshID, this.date);
+ String content = getResource(fullURL);
+ this.data.put(ApiResource.THRESHOLDS, getJsonData(content, false));
+ }
+
+ /**
+ * Retrieves the topology endpoint content and stores it to the enum map
+ *
+ * @throws ClientProtocolException
+ * @throws IOException
+ * @throws KeyStoreException
+ * @throws NoSuchAlgorithmException
+ * @throws KeyManagementException
+ */
+ public void getRemoteTopoEndpoints() {
+ String path = "https://%s/api/v2/topology/endpoints/by_report/%s?date=%s";
+ String fullURL = String.format(path, this.endpoint, this.reportName, this.date);
+ String content = getResource(fullURL);
+ this.data.put(ApiResource.TOPOENDPOINTS, getJsonData(content, true));
+ }
+
+ /**
+ * Retrieves the topology groups content and stores it to the enum map
+ *
+ * @throws ClientProtocolException
+ * @throws IOException
+ * @throws KeyStoreException
+ * @throws NoSuchAlgorithmException
+ * @throws KeyManagementException
+ */
+ public void getRemoteTopoGroups() {
+ String path = "https://%s/api/v2/topology/groups/by_report/%s?date=%s";
+ String fullURL = String.format(path, this.endpoint, this.reportName, this.date);
+ String content = getResource(fullURL);
+ this.data.put(ApiResource.TOPOGROUPS, getJsonData(content, true));
+ }
+
+ /**
+ * Retrieves the weights content and stores it to the enum map
+ *
+ * @throws ClientProtocolException
+ * @throws IOException
+ * @throws KeyStoreException
+ * @throws NoSuchAlgorithmException
+ * @throws KeyManagementException
+ */
+ public void getRemoteWeights() {
+ String path = "https://%s/api/v2/weights/%s?date=%s";
+ String fullURL = String.format(path, this.endpoint, this.weightsID, this.date);
+ String content = getResource(fullURL);
+ this.data.put(ApiResource.WEIGHTS, getJsonData(content, false));
+ }
+
+ /**
+ * Retrieves the downtimes content and stores it to the enum map
+ *
+ * @throws ClientProtocolException
+ * @throws IOException
+ * @throws KeyStoreException
+ * @throws NoSuchAlgorithmException
+ * @throws KeyManagementException
+ */
+ public void getRemoteDowntimes() {
+ String path = "https://%s/api/v2/downtimes?date=%s";
+ String fullURL = String.format(path, this.endpoint, this.date);
+ String content = getResource(fullURL);
+ this.data.put(ApiResource.DOWNTIMES, getJsonData(content, false));
+ }
+
+ public void getRemoteRecomputations() {
+ String path = "https://%s/api/v2/recomputations?date=%s";
+ String fullURL = String.format(path, this.endpoint, this.date);
+ String content = getResource(fullURL);
+ this.data.put(ApiResource.RECOMPUTATIONS, getJsonData(content, true));
+ }
+
+ /**
+ * Returns local resource (after has been retrieved) content based on resource type
+ *
+ * @param res
+ * @return The extracted items JSON value as string
+ */
+ public String getResourceJSON(ApiResource res) {
+ return this.data.get(res);
+ }
+
+ /**
+ * Exectues all steps to retrieve the complete amount of the available profile,
+ * topology, weights and downtime information from argo-web-api
+ *
+ * @throws ClientProtocolException
+ * @throws IOException
+ * @throws KeyStoreException
+ * @throws NoSuchAlgorithmException
+ * @throws KeyManagementException
+ */
+ public void getRemoteAll() {
+ // Start with report and configuration
+ this.getRemoteConfig();
+ // parse remote report config to be able to get the other profiles
+ this.parseReport();
+ // Go on to the profiles
+ this.getRemoteMetric();
+ this.getRemoteOps();
+ this.getRemoteAggregation();
+ if (!this.threshID.equals("")) this.getRemoteThresholds();
+ // Go to topology
+ this.getRemoteTopoEndpoints();
+ this.getRemoteTopoGroups();
+ // get weights
+ if (!this.weightsID.equals("")) this.getRemoteWeights();
+ // get downtimes
+ this.getRemoteDowntimes();
+ // get recomptations
+ this.getRemoteRecomputations();
+
+ }
+
+ /**
+ * Parses the report content to extract the report's name and the various profile IDs
+ */
+ public void parseReport() {
+ // check if report configuration has been retrieved
+ if (!this.data.containsKey(ApiResource.CONFIG))
+ return;
+
+ String content = this.data.get(ApiResource.CONFIG);
+ JsonParser jsonParser = new JsonParser();
+ JsonElement jElement = jsonParser.parse(content);
+ JsonObject jRoot = jElement.getAsJsonObject();
+ JsonArray jProfiles = jRoot.get("profiles").getAsJsonArray();
+
+ JsonObject jInfo = jRoot.get("info").getAsJsonObject();
+ this.reportName = jInfo.get("name").getAsString();
+
+ // for each profile iterate and store it's id in profile manager for later
+ // reference
+ for (int i = 0; i < jProfiles.size(); i++) {
+ JsonObject jProf = jProfiles.get(i).getAsJsonObject();
+ String profType = jProf.get("type").getAsString();
+ String profID = jProf.get("id").getAsString();
+ if (profType.equalsIgnoreCase("metric")) {
+ this.metricID = profID;
+ } else if (profType.equalsIgnoreCase("aggregation")) {
+ this.aggregationID = profID;
+ } else if (profType.equalsIgnoreCase("operations")) {
+ this.opsID = profID;
+ } else if (profType.equalsIgnoreCase("thresholds")) {
+ this.threshID = profID;
+ }
+
+ }
+
+ }
+
+ /**
+ * Parses the Downtime content retrieved from argo-web-api and provides a list of Downtime avro objects
+ * to be used in the next steps of the pipeline
+ */
+ public Downtime[] getListDowntimes() {
+ List results = new ArrayList();
+ if (!this.data.containsKey(ApiResource.DOWNTIMES)) {
+ Downtime[] rArr = new Downtime[results.size()];
+ rArr = results.toArray(rArr);
+ }
+
+
+ String content = this.data.get(ApiResource.DOWNTIMES);
+ JsonParser jsonParser = new JsonParser();
+ JsonElement jElement = jsonParser.parse(content);
+ JsonObject jRoot = jElement.getAsJsonObject();
+ JsonArray jElements = jRoot.get("endpoints").getAsJsonArray();
+ for (int i = 0; i < jElements.size(); i++) {
+ JsonObject jItem= jElements.get(i).getAsJsonObject();
+ String hostname = jItem.get("hostname").getAsString();
+ String service = jItem.get("service").getAsString();
+ String startTime = jItem.get("start_time").getAsString();
+ String endTime = jItem.get("end_time").getAsString();
+
+ Downtime d = new Downtime(hostname,service,startTime,endTime);
+ results.add(d);
+ }
+
+ Downtime[] rArr = new Downtime[results.size()];
+ rArr = results.toArray(rArr);
+ return rArr;
+ }
+
+ /**
+ * Parses the Topology endpoint content retrieved from argo-web-api and provides a list of GroupEndpoint avro objects
+ * to be used in the next steps of the pipeline
+ */
+ public GroupEndpoint[] getListGroupEndpoints() {
+ List results = new ArrayList();
+ if (!this.data.containsKey(ApiResource.TOPOENDPOINTS)) {
+ GroupEndpoint[] rArr = new GroupEndpoint[results.size()];
+ rArr = results.toArray(rArr);
+ return rArr;
+ }
+
+
+ String content = this.data.get(ApiResource.TOPOENDPOINTS);
+ JsonParser jsonParser = new JsonParser();
+ JsonElement jElement = jsonParser.parse(content);
+ JsonArray jRoot = jElement.getAsJsonArray();
+ for (int i = 0; i < jRoot.size(); i++) {
+ JsonObject jItem= jRoot.get(i).getAsJsonObject();
+ String group = jItem.get("group").getAsString();
+ String gType = jItem.get("type").getAsString();
+ String service = jItem.get("service").getAsString();
+ String hostname = jItem.get("hostname").getAsString();
+ JsonObject jTags = jItem.get("tags").getAsJsonObject();
+ Map tags = new HashMap();
+ for ( Entry kv : jTags.entrySet()) {
+ tags.put(kv.getKey(), kv.getValue().getAsString());
+ }
+ GroupEndpoint ge = new GroupEndpoint(gType,group,service,hostname,tags);
+ results.add(ge);
+ }
+
+ GroupEndpoint[] rArr = new GroupEndpoint[results.size()];
+ rArr = results.toArray(rArr);
+ return rArr;
+ }
+
+ /**
+ * Parses the Topology Groups content retrieved from argo-web-api and provides a list of GroupGroup avro objects
+ * to be used in the next steps of the pipeline
+ */
+ public GroupGroup[] getListGroupGroups() {
+ List results = new ArrayList();
+ if (!this.data.containsKey(ApiResource.TOPOGROUPS)){
+ GroupGroup[] rArr = new GroupGroup[results.size()];
+ rArr = results.toArray(rArr);
+ return rArr;
+ }
+
+ String content = this.data.get(ApiResource.TOPOGROUPS);
+ JsonParser jsonParser = new JsonParser();
+ JsonElement jElement = jsonParser.parse(content);
+ JsonArray jRoot = jElement.getAsJsonArray();
+ for (int i = 0; i < jRoot.size(); i++) {
+ JsonObject jItem= jRoot.get(i).getAsJsonObject();
+ String group = jItem.get("group").getAsString();
+ String gType = jItem.get("type").getAsString();
+ String subgroup = jItem.get("subgroup").getAsString();
+ JsonObject jTags = jItem.get("tags").getAsJsonObject();
+ Map tags = new HashMap();
+ for ( Entry kv : jTags.entrySet()) {
+ tags.put(kv.getKey(), kv.getValue().getAsString());
+ }
+ GroupGroup gg = new GroupGroup(gType,group,subgroup,tags);
+ results.add(gg);
+ }
+
+ GroupGroup[] rArr = new GroupGroup[results.size()];
+ rArr = results.toArray(rArr);
+ return rArr;
+ }
+
+ /**
+ * Parses the Weights content retrieved from argo-web-api and provides a list of Weights avro objects
+ * to be used in the next steps of the pipeline
+ */
+ public Weight[] getListWeights() {
+ List results = new ArrayList();
+ if (!this.data.containsKey(ApiResource.WEIGHTS)) {
+ Weight[] rArr = new Weight[results.size()];
+ rArr = results.toArray(rArr);
+ return rArr;
+ }
+
+
+ String content = this.data.get(ApiResource.WEIGHTS);
+ JsonParser jsonParser = new JsonParser();
+ JsonElement jElement = jsonParser.parse(content);
+ JsonObject jRoot = jElement.getAsJsonObject();
+ String wType = jRoot.get("weight_type").getAsString();
+ JsonArray jElements = jRoot.get("groups").getAsJsonArray();
+ for (int i = 0; i < jElements.size(); i++) {
+ JsonObject jItem= jElements.get(i).getAsJsonObject();
+ String group = jItem.get("name").getAsString();
+ String weight = jItem.get("value").getAsString();
+
+ Weight w = new Weight(wType,group,weight);
+ results.add(w);
+ }
+
+ Weight[] rArr = new Weight[results.size()];
+ rArr = results.toArray(rArr);
+ return rArr;
+ }
+
+ /**
+ * Parses the Metric profile content retrieved from argo-web-api and provides a list of MetricProfile avro objects
+ * to be used in the next steps of the pipeline
+ */
+ public MetricProfile[] getListMetrics() {
+ List results = new ArrayList();
+ if (!this.data.containsKey(ApiResource.METRIC)) {
+ MetricProfile[] rArr = new MetricProfile[results.size()];
+ rArr = results.toArray(rArr);
+ return rArr;
+ }
+
+
+ String content = this.data.get(ApiResource.METRIC);
+ JsonParser jsonParser = new JsonParser();
+ JsonElement jElement = jsonParser.parse(content);
+ JsonObject jRoot = jElement.getAsJsonObject();
+ String profileName = jRoot.get("name").getAsString();
+ JsonArray jElements = jRoot.get("services").getAsJsonArray();
+ for (int i = 0; i < jElements.size(); i++) {
+ JsonObject jItem= jElements.get(i).getAsJsonObject();
+ String service = jItem.get("service").getAsString();
+ JsonArray jMetrics = jItem.get("metrics").getAsJsonArray();
+ for (int j=0; j < jMetrics.size(); j++) {
+ String metric = jMetrics.get(j).getAsString();
+
+ Map tags = new HashMap();
+ MetricProfile mp = new MetricProfile(profileName,service,metric,tags);
+ results.add(mp);
+ }
+
+ }
+
+ MetricProfile[] rArr = new MetricProfile[results.size()];
+ rArr = results.toArray(rArr);
+ return rArr;
+ }
+
+ /**
+ * Extract first JSON item from data JSON array in api response
+ *
+ * @param content JSON content of the full repsonse (status + data)
+ * @return First available item in data array as JSON string representation
+ *
+ */
+ private String getJsonData(String content, boolean asArray) {
+ JsonParser jsonParser = new JsonParser();
+ // Grab the first - and only line of json from ops data
+ JsonElement jElement = jsonParser.parse(content);
+ JsonObject jRoot = jElement.getAsJsonObject();
+ // Get the data array and the first item
+ if (asArray) {
+ return jRoot.get("data").toString();
+ }
+ JsonArray jData = jRoot.get("data").getAsJsonArray();
+ JsonElement jItem = jData.get(0);
+ return jItem.toString();
+ }
+
+}
diff --git a/flink_jobs/batch_status/src/main/java/argo/avro/Downtime.java b/flink_jobs/batch_status/src/main/java/argo/avro/Downtime.java
new file mode 100644
index 00000000..b73e100d
--- /dev/null
+++ b/flink_jobs/batch_status/src/main/java/argo/avro/Downtime.java
@@ -0,0 +1,286 @@
+/**
+ * Autogenerated by Avro
+ *
+ * DO NOT EDIT DIRECTLY
+ */
+package argo.avro;
+@SuppressWarnings("all")
+@org.apache.avro.specific.AvroGenerated
+public class Downtime extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
+ public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"Downtime\",\"namespace\":\"argo.avro\",\"fields\":[{\"name\":\"hostname\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"service\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"start_time\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"end_time\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}}]}");
+ public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; }
+ @Deprecated public java.lang.String hostname;
+ @Deprecated public java.lang.String service;
+ @Deprecated public java.lang.String start_time;
+ @Deprecated public java.lang.String end_time;
+
+ /**
+ * Default constructor.
+ */
+ public Downtime() {}
+
+ /**
+ * All-args constructor.
+ */
+ public Downtime(java.lang.String hostname, java.lang.String service, java.lang.String start_time, java.lang.String end_time) {
+ this.hostname = hostname;
+ this.service = service;
+ this.start_time = start_time;
+ this.end_time = end_time;
+ }
+
+ public org.apache.avro.Schema getSchema() { return SCHEMA$; }
+ // Used by DatumWriter. Applications should not call.
+ public java.lang.Object get(int field$) {
+ switch (field$) {
+ case 0: return hostname;
+ case 1: return service;
+ case 2: return start_time;
+ case 3: return end_time;
+ default: throw new org.apache.avro.AvroRuntimeException("Bad index");
+ }
+ }
+ // Used by DatumReader. Applications should not call.
+ @SuppressWarnings(value="unchecked")
+ public void put(int field$, java.lang.Object value$) {
+ switch (field$) {
+ case 0: hostname = (java.lang.String)value$; break;
+ case 1: service = (java.lang.String)value$; break;
+ case 2: start_time = (java.lang.String)value$; break;
+ case 3: end_time = (java.lang.String)value$; break;
+ default: throw new org.apache.avro.AvroRuntimeException("Bad index");
+ }
+ }
+
+ /**
+ * Gets the value of the 'hostname' field.
+ */
+ public java.lang.String getHostname() {
+ return hostname;
+ }
+
+ /**
+ * Sets the value of the 'hostname' field.
+ * @param value the value to set.
+ */
+ public void setHostname(java.lang.String value) {
+ this.hostname = value;
+ }
+
+ /**
+ * Gets the value of the 'service' field.
+ */
+ public java.lang.String getService() {
+ return service;
+ }
+
+ /**
+ * Sets the value of the 'service' field.
+ * @param value the value to set.
+ */
+ public void setService(java.lang.String value) {
+ this.service = value;
+ }
+
+ /**
+ * Gets the value of the 'start_time' field.
+ */
+ public java.lang.String getStartTime() {
+ return start_time;
+ }
+
+ /**
+ * Sets the value of the 'start_time' field.
+ * @param value the value to set.
+ */
+ public void setStartTime(java.lang.String value) {
+ this.start_time = value;
+ }
+
+ /**
+ * Gets the value of the 'end_time' field.
+ */
+ public java.lang.String getEndTime() {
+ return end_time;
+ }
+
+ /**
+ * Sets the value of the 'end_time' field.
+ * @param value the value to set.
+ */
+ public void setEndTime(java.lang.String value) {
+ this.end_time = value;
+ }
+
+ /** Creates a new Downtime RecordBuilder */
+ public static argo.avro.Downtime.Builder newBuilder() {
+ return new argo.avro.Downtime.Builder();
+ }
+
+ /** Creates a new Downtime RecordBuilder by copying an existing Builder */
+ public static argo.avro.Downtime.Builder newBuilder(argo.avro.Downtime.Builder other) {
+ return new argo.avro.Downtime.Builder(other);
+ }
+
+ /** Creates a new Downtime RecordBuilder by copying an existing Downtime instance */
+ public static argo.avro.Downtime.Builder newBuilder(argo.avro.Downtime other) {
+ return new argo.avro.Downtime.Builder(other);
+ }
+
+ /**
+ * RecordBuilder for Downtime instances.
+ */
+ public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase
+ implements org.apache.avro.data.RecordBuilder {
+
+ private java.lang.String hostname;
+ private java.lang.String service;
+ private java.lang.String start_time;
+ private java.lang.String end_time;
+
+ /** Creates a new Builder */
+ private Builder() {
+ super(argo.avro.Downtime.SCHEMA$);
+ }
+
+ /** Creates a Builder by copying an existing Builder */
+ private Builder(argo.avro.Downtime.Builder other) {
+ super(other);
+ }
+
+ /** Creates a Builder by copying an existing Downtime instance */
+ private Builder(argo.avro.Downtime other) {
+ super(argo.avro.Downtime.SCHEMA$);
+ if (isValidValue(fields()[0], other.hostname)) {
+ this.hostname = data().deepCopy(fields()[0].schema(), other.hostname);
+ fieldSetFlags()[0] = true;
+ }
+ if (isValidValue(fields()[1], other.service)) {
+ this.service = data().deepCopy(fields()[1].schema(), other.service);
+ fieldSetFlags()[1] = true;
+ }
+ if (isValidValue(fields()[2], other.start_time)) {
+ this.start_time = data().deepCopy(fields()[2].schema(), other.start_time);
+ fieldSetFlags()[2] = true;
+ }
+ if (isValidValue(fields()[3], other.end_time)) {
+ this.end_time = data().deepCopy(fields()[3].schema(), other.end_time);
+ fieldSetFlags()[3] = true;
+ }
+ }
+
+ /** Gets the value of the 'hostname' field */
+ public java.lang.String getHostname() {
+ return hostname;
+ }
+
+ /** Sets the value of the 'hostname' field */
+ public argo.avro.Downtime.Builder setHostname(java.lang.String value) {
+ validate(fields()[0], value);
+ this.hostname = value;
+ fieldSetFlags()[0] = true;
+ return this;
+ }
+
+ /** Checks whether the 'hostname' field has been set */
+ public boolean hasHostname() {
+ return fieldSetFlags()[0];
+ }
+
+ /** Clears the value of the 'hostname' field */
+ public argo.avro.Downtime.Builder clearHostname() {
+ hostname = null;
+ fieldSetFlags()[0] = false;
+ return this;
+ }
+
+ /** Gets the value of the 'service' field */
+ public java.lang.String getService() {
+ return service;
+ }
+
+ /** Sets the value of the 'service' field */
+ public argo.avro.Downtime.Builder setService(java.lang.String value) {
+ validate(fields()[1], value);
+ this.service = value;
+ fieldSetFlags()[1] = true;
+ return this;
+ }
+
+ /** Checks whether the 'service' field has been set */
+ public boolean hasService() {
+ return fieldSetFlags()[1];
+ }
+
+ /** Clears the value of the 'service' field */
+ public argo.avro.Downtime.Builder clearService() {
+ service = null;
+ fieldSetFlags()[1] = false;
+ return this;
+ }
+
+ /** Gets the value of the 'start_time' field */
+ public java.lang.String getStartTime() {
+ return start_time;
+ }
+
+ /** Sets the value of the 'start_time' field */
+ public argo.avro.Downtime.Builder setStartTime(java.lang.String value) {
+ validate(fields()[2], value);
+ this.start_time = value;
+ fieldSetFlags()[2] = true;
+ return this;
+ }
+
+ /** Checks whether the 'start_time' field has been set */
+ public boolean hasStartTime() {
+ return fieldSetFlags()[2];
+ }
+
+ /** Clears the value of the 'start_time' field */
+ public argo.avro.Downtime.Builder clearStartTime() {
+ start_time = null;
+ fieldSetFlags()[2] = false;
+ return this;
+ }
+
+ /** Gets the value of the 'end_time' field */
+ public java.lang.String getEndTime() {
+ return end_time;
+ }
+
+ /** Sets the value of the 'end_time' field */
+ public argo.avro.Downtime.Builder setEndTime(java.lang.String value) {
+ validate(fields()[3], value);
+ this.end_time = value;
+ fieldSetFlags()[3] = true;
+ return this;
+ }
+
+ /** Checks whether the 'end_time' field has been set */
+ public boolean hasEndTime() {
+ return fieldSetFlags()[3];
+ }
+
+ /** Clears the value of the 'end_time' field */
+ public argo.avro.Downtime.Builder clearEndTime() {
+ end_time = null;
+ fieldSetFlags()[3] = false;
+ return this;
+ }
+
+ @Override
+ public Downtime build() {
+ try {
+ Downtime record = new Downtime();
+ record.hostname = fieldSetFlags()[0] ? this.hostname : (java.lang.String) defaultValue(fields()[0]);
+ record.service = fieldSetFlags()[1] ? this.service : (java.lang.String) defaultValue(fields()[1]);
+ record.start_time = fieldSetFlags()[2] ? this.start_time : (java.lang.String) defaultValue(fields()[2]);
+ record.end_time = fieldSetFlags()[3] ? this.end_time : (java.lang.String) defaultValue(fields()[3]);
+ return record;
+ } catch (Exception e) {
+ throw new org.apache.avro.AvroRuntimeException(e);
+ }
+ }
+ }
+}
diff --git a/flink_jobs/batch_status/src/main/java/argo/avro/Weight.java b/flink_jobs/batch_status/src/main/java/argo/avro/Weight.java
new file mode 100644
index 00000000..0238d7cf
--- /dev/null
+++ b/flink_jobs/batch_status/src/main/java/argo/avro/Weight.java
@@ -0,0 +1,236 @@
+/**
+ * Autogenerated by Avro
+ *
+ * DO NOT EDIT DIRECTLY
+ */
+package argo.avro;
+@SuppressWarnings("all")
+@org.apache.avro.specific.AvroGenerated
+public class Weight extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
+ public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"Weight\",\"namespace\":\"argo.avro\",\"fields\":[{\"name\":\"type\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"site\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"weight\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}}]}");
+ public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; }
+ @Deprecated public java.lang.String type;
+ @Deprecated public java.lang.String site;
+ @Deprecated public java.lang.String weight;
+
+ /**
+ * Default constructor.
+ */
+ public Weight() {}
+
+ /**
+ * All-args constructor.
+ */
+ public Weight(java.lang.String type, java.lang.String site, java.lang.String weight) {
+ this.type = type;
+ this.site = site;
+ this.weight = weight;
+ }
+
+ public org.apache.avro.Schema getSchema() { return SCHEMA$; }
+ // Used by DatumWriter. Applications should not call.
+ public java.lang.Object get(int field$) {
+ switch (field$) {
+ case 0: return type;
+ case 1: return site;
+ case 2: return weight;
+ default: throw new org.apache.avro.AvroRuntimeException("Bad index");
+ }
+ }
+ // Used by DatumReader. Applications should not call.
+ @SuppressWarnings(value="unchecked")
+ public void put(int field$, java.lang.Object value$) {
+ switch (field$) {
+ case 0: type = (java.lang.String)value$; break;
+ case 1: site = (java.lang.String)value$; break;
+ case 2: weight = (java.lang.String)value$; break;
+ default: throw new org.apache.avro.AvroRuntimeException("Bad index");
+ }
+ }
+
+ /**
+ * Gets the value of the 'type' field.
+ */
+ public java.lang.String getType() {
+ return type;
+ }
+
+ /**
+ * Sets the value of the 'type' field.
+ * @param value the value to set.
+ */
+ public void setType(java.lang.String value) {
+ this.type = value;
+ }
+
+ /**
+ * Gets the value of the 'site' field.
+ */
+ public java.lang.String getSite() {
+ return site;
+ }
+
+ /**
+ * Sets the value of the 'site' field.
+ * @param value the value to set.
+ */
+ public void setSite(java.lang.String value) {
+ this.site = value;
+ }
+
+ /**
+ * Gets the value of the 'weight' field.
+ */
+ public java.lang.String getWeight() {
+ return weight;
+ }
+
+ /**
+ * Sets the value of the 'weight' field.
+ * @param value the value to set.
+ */
+ public void setWeight(java.lang.String value) {
+ this.weight = value;
+ }
+
+ /** Creates a new Weight RecordBuilder */
+ public static argo.avro.Weight.Builder newBuilder() {
+ return new argo.avro.Weight.Builder();
+ }
+
+ /** Creates a new Weight RecordBuilder by copying an existing Builder */
+ public static argo.avro.Weight.Builder newBuilder(argo.avro.Weight.Builder other) {
+ return new argo.avro.Weight.Builder(other);
+ }
+
+ /** Creates a new Weight RecordBuilder by copying an existing Weight instance */
+ public static argo.avro.Weight.Builder newBuilder(argo.avro.Weight other) {
+ return new argo.avro.Weight.Builder(other);
+ }
+
+ /**
+ * RecordBuilder for Weight instances.
+ */
+ public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase
+ implements org.apache.avro.data.RecordBuilder {
+
+ private java.lang.String type;
+ private java.lang.String site;
+ private java.lang.String weight;
+
+ /** Creates a new Builder */
+ private Builder() {
+ super(argo.avro.Weight.SCHEMA$);
+ }
+
+ /** Creates a Builder by copying an existing Builder */
+ private Builder(argo.avro.Weight.Builder other) {
+ super(other);
+ }
+
+ /** Creates a Builder by copying an existing Weight instance */
+ private Builder(argo.avro.Weight other) {
+ super(argo.avro.Weight.SCHEMA$);
+ if (isValidValue(fields()[0], other.type)) {
+ this.type = data().deepCopy(fields()[0].schema(), other.type);
+ fieldSetFlags()[0] = true;
+ }
+ if (isValidValue(fields()[1], other.site)) {
+ this.site = data().deepCopy(fields()[1].schema(), other.site);
+ fieldSetFlags()[1] = true;
+ }
+ if (isValidValue(fields()[2], other.weight)) {
+ this.weight = data().deepCopy(fields()[2].schema(), other.weight);
+ fieldSetFlags()[2] = true;
+ }
+ }
+
+ /** Gets the value of the 'type' field */
+ public java.lang.String getType() {
+ return type;
+ }
+
+ /** Sets the value of the 'type' field */
+ public argo.avro.Weight.Builder setType(java.lang.String value) {
+ validate(fields()[0], value);
+ this.type = value;
+ fieldSetFlags()[0] = true;
+ return this;
+ }
+
+ /** Checks whether the 'type' field has been set */
+ public boolean hasType() {
+ return fieldSetFlags()[0];
+ }
+
+ /** Clears the value of the 'type' field */
+ public argo.avro.Weight.Builder clearType() {
+ type = null;
+ fieldSetFlags()[0] = false;
+ return this;
+ }
+
+ /** Gets the value of the 'site' field */
+ public java.lang.String getSite() {
+ return site;
+ }
+
+ /** Sets the value of the 'site' field */
+ public argo.avro.Weight.Builder setSite(java.lang.String value) {
+ validate(fields()[1], value);
+ this.site = value;
+ fieldSetFlags()[1] = true;
+ return this;
+ }
+
+ /** Checks whether the 'site' field has been set */
+ public boolean hasSite() {
+ return fieldSetFlags()[1];
+ }
+
+ /** Clears the value of the 'site' field */
+ public argo.avro.Weight.Builder clearSite() {
+ site = null;
+ fieldSetFlags()[1] = false;
+ return this;
+ }
+
+ /** Gets the value of the 'weight' field */
+ public java.lang.String getWeight() {
+ return weight;
+ }
+
+ /** Sets the value of the 'weight' field */
+ public argo.avro.Weight.Builder setWeight(java.lang.String value) {
+ validate(fields()[2], value);
+ this.weight = value;
+ fieldSetFlags()[2] = true;
+ return this;
+ }
+
+ /** Checks whether the 'weight' field has been set */
+ public boolean hasWeight() {
+ return fieldSetFlags()[2];
+ }
+
+ /** Clears the value of the 'weight' field */
+ public argo.avro.Weight.Builder clearWeight() {
+ weight = null;
+ fieldSetFlags()[2] = false;
+ return this;
+ }
+
+ @Override
+ public Weight build() {
+ try {
+ Weight record = new Weight();
+ record.type = fieldSetFlags()[0] ? this.type : (java.lang.String) defaultValue(fields()[0]);
+ record.site = fieldSetFlags()[1] ? this.site : (java.lang.String) defaultValue(fields()[1]);
+ record.weight = fieldSetFlags()[2] ? this.weight : (java.lang.String) defaultValue(fields()[2]);
+ return record;
+ } catch (Exception e) {
+ throw new org.apache.avro.AvroRuntimeException(e);
+ }
+ }
+ }
+}
diff --git a/flink_jobs/batch_status/src/main/java/argo/batch/ArgoStatusBatch.java b/flink_jobs/batch_status/src/main/java/argo/batch/ArgoStatusBatch.java
index 1e7b94b1..f96123a1 100644
--- a/flink_jobs/batch_status/src/main/java/argo/batch/ArgoStatusBatch.java
+++ b/flink_jobs/batch_status/src/main/java/argo/batch/ArgoStatusBatch.java
@@ -2,6 +2,8 @@
import org.slf4j.LoggerFactory;
+import argo.amr.ApiResource;
+import argo.amr.ApiResourceManager;
import argo.avro.GroupEndpoint;
import argo.avro.GroupGroup;
import argo.avro.MetricData;
@@ -11,11 +13,8 @@
import org.slf4j.Logger;
import java.util.List;
-import java.util.concurrent.TimeUnit;
import org.apache.flink.api.common.operators.Order;
-import org.apache.flink.api.common.restartstrategy.RestartStrategies;
-import org.apache.flink.api.common.time.Time;
import org.apache.flink.api.java.DataSet;
import org.apache.flink.api.java.ExecutionEnvironment;
import org.apache.flink.api.java.io.AvroInputFormat;
@@ -58,16 +57,27 @@ public static void main(String[] args) throws Exception {
env.getConfig().setGlobalJobParameters(params);
env.setParallelism(1);
- // sync data for input
- Path mps = new Path(params.getRequired("mps"));
- Path egp = new Path(params.getRequired("egp"));
- Path ggp = new Path(params.getRequired("ggp"));
+ String apiEndpoint = params.getRequired("api.endpoint");
+ String apiToken = params.getRequired("api.token");
+ String reportID = params.getRequired("report.id");
+
+ ApiResourceManager amr = new ApiResourceManager(apiEndpoint,apiToken);
+
+ // fetch
+
+ // set params
+ if (params.has("api.proxy")) {
+ amr.setProxy(params.get("api.proxy"));
+ }
+
+ amr.setReportID(reportID);
+ amr.getRemoteAll();
- DataSource cfgDS = env.readTextFile(params.getRequired("conf"));
- DataSource opsDS = env.readTextFile(params.getRequired("ops"));
- DataSource apsDS = env.readTextFile(params.getRequired("apr"));
- DataSource recDS = env.readTextFile(params.getRequired("rec"));
+ DataSourcecfgDS = env.fromElements(amr.getResourceJSON(ApiResource.CONFIG));
+ DataSourceopsDS = env.fromElements(amr.getResourceJSON(ApiResource.OPS));
+ DataSourceapsDS = env.fromElements(amr.getResourceJSON(ApiResource.AGGREGATION));
+ DataSourcerecDS = env.fromElements(amr.getResourceJSON(ApiResource.RECOMPUTATIONS));
// begin with empty threshold datasource
DataSource thrDS = env.fromElements("");
@@ -84,18 +94,15 @@ public static void main(String[] args) throws Exception {
List confData = cfgDS.collect();
ConfigManager cfgMgr = new ConfigManager();
cfgMgr.loadJsonString(confData);
- // sync data input: metric profile in avro format
- AvroInputFormat mpsAvro = new AvroInputFormat(mps, MetricProfile.class);
- DataSet mpsDS = env.createInput(mpsAvro);
-
- // sync data input: endpoint group topology data in avro format
- AvroInputFormat egpAvro = new AvroInputFormat(egp, GroupEndpoint.class);
- DataSet egpDS = env.createInput(egpAvro);
-
- // sync data input: group of group topology data in avro format
- AvroInputFormat ggpAvro = new AvroInputFormat(ggp, GroupGroup.class);
- DataSet ggpDS = env.createInput(ggpAvro);
-
+
+ DataSet mpsDS = env.fromElements(amr.getListMetrics());
+ DataSet egpDS = env.fromElements(amr.getListGroupEndpoints());
+ DataSet ggpDS = env.fromElements(new GroupGroup());
+ GroupGroup[] listGroups = amr.getListGroupGroups();
+ if (listGroups.length > 0) ggpDS = env.fromElements(amr.getListGroupGroups());
+
+
+
// todays metric data
Path in = new Path(params.getRequired("mdata"));
AvroInputFormat mdataAvro = new AvroInputFormat(in, MetricData.class);
@@ -105,9 +112,11 @@ public static void main(String[] args) throws Exception {
Path pin = new Path(params.getRequired("pdata"));
AvroInputFormat pdataAvro = new AvroInputFormat(pin, MetricData.class);
DataSet pdataDS = env.createInput(pdataAvro);
+
+ DataSet pdataCleanDS = pdataDS.flatMap(new ExcludeMetricData(params)).withBroadcastSet(recDS, "rec");
// Find the latest day
- DataSet pdataMin = pdataDS.groupBy("service", "hostname", "metric")
+ DataSet pdataMin = pdataCleanDS.groupBy("service", "hostname", "metric")
.sortGroup("timestamp", Order.DESCENDING).first(1);
// Union todays data with the latest statuses from previous day
@@ -161,7 +170,7 @@ public static void main(String[] args) throws Exception {
String dbURI = params.getRequired("mongo.uri");
String dbMethod = params.getRequired("mongo.method");
- String reportID = cfgMgr.getReportID();
+
// Initialize four mongo outputs (metric,endpoint,service,endpoint_group)
MongoStatusOutput metricMongoOut = new MongoStatusOutput(dbURI,"status_metrics",dbMethod, MongoStatusOutput.StatusType.STATUS_METRIC, reportID);
MongoStatusOutput endpointMongoOut = new MongoStatusOutput(dbURI,"status_endpoints",dbMethod, MongoStatusOutput.StatusType.STATUS_ENDPOINT, reportID);
diff --git a/flink_jobs/batch_status/src/main/java/argo/batch/CalcStatusEndGroup.java b/flink_jobs/batch_status/src/main/java/argo/batch/CalcStatusEndGroup.java
index b36d3ec2..d77b760d 100644
--- a/flink_jobs/batch_status/src/main/java/argo/batch/CalcStatusEndGroup.java
+++ b/flink_jobs/batch_status/src/main/java/argo/batch/CalcStatusEndGroup.java
@@ -14,9 +14,12 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import ops.CAggregator;
import ops.OpsManager;
+
+//import ops.OpsManager;
import sync.AggregationProfileManager;
+import timelines.TimelineAggregator;
+
/**
* Accepts a list o status metrics grouped by the fields: endpoint group
@@ -45,7 +48,7 @@ public CalcStatusEndGroup(ParameterTool params) {
private String runDate;
- public HashMap groupEndpointAggr;
+ public HashMap groupEndpointAggr;
private boolean getGroup;
@@ -67,7 +70,7 @@ public void open(Configuration parameters) throws IOException {
// Initialize endpoint group type
this.runDate = params.getRequired("run.date");
// set the Structures
- this.groupEndpointAggr = new HashMap();
+ this.groupEndpointAggr = new HashMap();
this.getGroup = true;
}
@@ -105,7 +108,7 @@ public void reduce(Iterable in, Collector out) throw
// if group doesn't exist yet create it
if (this.groupEndpointAggr.containsKey(group) == false) {
- this.groupEndpointAggr.put(group, new CAggregator());
+ this.groupEndpointAggr.put(group, new TimelineAggregator());
}
this.groupEndpointAggr.get(group).insert(service, ts, this.opsMgr.getIntStatus(status));
@@ -119,14 +122,26 @@ public void reduce(Iterable in, Collector out) throw
// Get group Operation
String gop = this.apsMgr.getProfileGroupOp(aProfile, group);
-
- this.groupEndpointAggr.get(group).aggregate(this.opsMgr, gop);
+
+ this.groupEndpointAggr.get(group).aggregate(this.opsMgr.getTruthTable(), this.opsMgr.getIntOperation(gop));
}
// Aggregate all sites
- CAggregator totalSite = new CAggregator();
+ TimelineAggregator totalSite = new TimelineAggregator();
+
+ // Aggregate each group
+ for (String group : this.groupEndpointAggr.keySet()) {
+ for (Entry item : this.groupEndpointAggr.get(group).getSamples()) {
+ String ts = item.getKey().toString(DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"));
+ totalSite.insert(group,ts, item.getValue());
+ }
+
+ }
+
+ totalSite.aggregate( this.opsMgr.getTruthTable(),this.opsMgr.getIntOperation(apsMgr.getTotalOp(aProfile)));
+
// Aggregate each group
for (String group : this.groupEndpointAggr.keySet()) {
for (Entry item : this.groupEndpointAggr.get(group).getSamples()) {
@@ -136,7 +151,7 @@ public void reduce(Iterable in, Collector out) throw
}
- totalSite.aggregate( this.opsMgr,apsMgr.getTotalOp(aProfile));
+ totalSite.aggregate( this.opsMgr.getTruthTable(),this.opsMgr.getIntOperation(apsMgr.getTotalOp(aProfile)));
// Append the timeline
for (Entry item : totalSite.getSamples()) {
diff --git a/flink_jobs/batch_status/src/main/java/argo/batch/CalcStatusEndpoint.java b/flink_jobs/batch_status/src/main/java/argo/batch/CalcStatusEndpoint.java
index c65f9e57..9d4dc938 100644
--- a/flink_jobs/batch_status/src/main/java/argo/batch/CalcStatusEndpoint.java
+++ b/flink_jobs/batch_status/src/main/java/argo/batch/CalcStatusEndpoint.java
@@ -14,16 +14,13 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.esotericsoftware.minlog.Log;
-
-import argo.avro.GroupGroup;
import argo.avro.MetricProfile;
import ops.CAggregator;
import ops.OpsManager;
import sync.AggregationProfileManager;
-import sync.GroupGroupManager;
import sync.MetricProfileManager;
+import timelines.TimelineAggregator;
/**
@@ -50,7 +47,7 @@ public CalcStatusEndpoint(ParameterTool params) {
private AggregationProfileManager apsMgr;
private OpsManager opsMgr;
private String runDate;
- private CAggregator endpointAggr;
+ private TimelineAggregator endpointAggr;
private boolean fillMissing;
@@ -74,7 +71,7 @@ public void open(Configuration parameters) throws IOException {
this.opsMgr.loadJsonString(ops);
this.runDate = params.getRequired("run.date");
- this.endpointAggr = new CAggregator(); // Create aggregator
+ this.endpointAggr = new TimelineAggregator(); // Create aggregator
this.fillMissing = true;
}
@@ -98,6 +95,7 @@ public void reduce(Iterable in, Collector out) throw
String service ="";
String endpointGroup ="";
String hostname ="";
+ String info = "";
int dateInt = Integer.parseInt(this.runDate.replace("-", ""));
@@ -129,6 +127,7 @@ public void reduce(Iterable in, Collector out) throw
String ts = item.getTimestamp();
String status = item.getStatus();
String prevStatus = item.getPrevState();
+ info = item.getInfo();
// Check if we are in the switch of a new metric name
@@ -143,7 +142,7 @@ public void reduce(Iterable in, Collector out) throw
}
- this.endpointAggr.aggregate(this.opsMgr, this.apsMgr.getMetricOp(aprofile));
+ this.endpointAggr.aggregate(this.opsMgr.getTruthTable(), this.opsMgr.getIntOperation(this.apsMgr.getMetricOp(aprofile)));
// Append the timeline
@@ -156,6 +155,8 @@ public void reduce(Iterable in, Collector out) throw
cur.setGroup(endpointGroup);
cur.setHostname(hostname);
cur.setService(service);
+ cur.setInfo(info);
+
cur.setTimestamp(item.getKey().toString(DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'")));
diff --git a/flink_jobs/batch_status/src/main/java/argo/batch/CalcStatusService.java b/flink_jobs/batch_status/src/main/java/argo/batch/CalcStatusService.java
index 8105888a..ec9e9b92 100644
--- a/flink_jobs/batch_status/src/main/java/argo/batch/CalcStatusService.java
+++ b/flink_jobs/batch_status/src/main/java/argo/batch/CalcStatusService.java
@@ -1,7 +1,6 @@
package argo.batch;
import java.io.IOException;
-import java.util.ArrayList;
import java.util.List;
import java.util.Map.Entry;
@@ -12,19 +11,13 @@
import org.apache.flink.util.Collector;
import org.joda.time.DateTime;
import org.joda.time.format.DateTimeFormat;
-import org.mortbay.log.Log;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import argo.avro.GroupEndpoint;
-import argo.avro.GroupGroup;
-
-import argo.avro.MetricProfile;
import ops.CAggregator;
import ops.OpsManager;
import sync.AggregationProfileManager;
-import sync.GroupGroupManager;
-import sync.MetricProfileManager;
+import timelines.TimelineAggregator;
/**
@@ -54,7 +47,7 @@ public CalcStatusService(ParameterTool params) {
private String runDate;
- private CAggregator serviceAggr;
+ private TimelineAggregator serviceAggr;
private boolean getService;
@@ -75,7 +68,7 @@ public void open(Configuration parameters) throws IOException {
// Initialize endpoint group type
this.runDate = params.getRequired("run.date");
- this.serviceAggr = new CAggregator(); // Create aggregator
+ this.serviceAggr = new TimelineAggregator(); // Create aggregator
this.getService = true;
}
@@ -121,7 +114,7 @@ public void reduce(Iterable in, Collector out) throw
avGroup = this.apsMgr.getGroupByService(aProfile, service);
String avOp = this.apsMgr.getProfileGroupServiceOp(aProfile, avGroup, service);
- this.serviceAggr.aggregate(this.opsMgr, avOp);
+ this.serviceAggr.aggregate(this.opsMgr.getTruthTable(), this.opsMgr.getIntOperation(avOp));
// Append the timeline
for (Entry item : this.serviceAggr.getSamples()) {
diff --git a/flink_jobs/batch_status/src/main/java/argo/batch/ExcludeMetricData.java b/flink_jobs/batch_status/src/main/java/argo/batch/ExcludeMetricData.java
new file mode 100644
index 00000000..f18a4743
--- /dev/null
+++ b/flink_jobs/batch_status/src/main/java/argo/batch/ExcludeMetricData.java
@@ -0,0 +1,70 @@
+package argo.batch;
+
+import java.io.IOException;
+import java.text.ParseException;
+
+import java.util.List;
+
+
+
+import org.apache.flink.api.common.functions.RichFlatMapFunction;
+
+import org.apache.flink.api.java.utils.ParameterTool;
+import org.apache.flink.configuration.Configuration;
+import org.apache.flink.util.Collector;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+import argo.avro.MetricData;
+
+import sync.RecomputationsManager;
+
+/**
+ * Receives MetricData and filters them by excluding monitoring engine based on recomputation information
+ * retrieved by broadcast variable "rec" and handled by an internal recomputation manager
+ */
+public class ExcludeMetricData extends RichFlatMapFunction {
+
+ private static final long serialVersionUID = 1L;
+
+ final ParameterTool params;
+
+ public ExcludeMetricData(ParameterTool params){
+ this.params = params;
+ }
+
+ static Logger LOG = LoggerFactory.getLogger(ArgoStatusBatch.class);
+
+ private List rec;
+ private RecomputationsManager recMgr;
+
+ @Override
+ public void open(Configuration parameters) throws IOException, ParseException {
+ // Get recomputation data from broadcast variable
+ this.rec = getRuntimeContext().getBroadcastVariable("rec");
+
+ // Initialize Recomputation manager
+ this.recMgr = new RecomputationsManager();
+ this.recMgr.loadJsonString(rec);
+
+ }
+
+ @Override
+ public void flatMap(MetricData md, Collector out) throws Exception {
+
+ // Get monitoring host from input metric data
+ String monHost = md.getMonitoringHost();
+ // Get timestamp from input metric data
+ String ts = md.getTimestamp();
+
+ // Check if monitoring host and metric data coincide with exclusions by monitoring
+ // engine in the current available recomputations
+ if (recMgr.isMonExcluded(monHost, ts) == true) return;
+
+ // if not excluded collect the result in the output
+ out.collect(md);
+
+
+ }
+}
diff --git a/flink_jobs/batch_status/src/main/java/argo/batch/MongoStatusOutput.java b/flink_jobs/batch_status/src/main/java/argo/batch/MongoStatusOutput.java
index 65b52e98..aa13e71d 100644
--- a/flink_jobs/batch_status/src/main/java/argo/batch/MongoStatusOutput.java
+++ b/flink_jobs/batch_status/src/main/java/argo/batch/MongoStatusOutput.java
@@ -109,8 +109,23 @@ private Document prepDoc(StatusMetric record) {
} else if (this.sType == StatusType.STATUS_ENDPOINT) {
+
doc.append("service", record.getService())
.append("host", record.getHostname());
+
+ String info = record.getInfo();
+ if (!info.equalsIgnoreCase("")) {
+ Document infoDoc = new Document();
+ String[] kvs = info.split(",");
+ for (String kv : kvs) {
+ String[] kvtok = kv.split(":",2);
+ if (kvtok.length == 2){
+ infoDoc.append(kvtok[0], kvtok[1]);
+ }
+ }
+
+ doc.append("info", infoDoc);
+ }
} else if (this.sType == StatusType.STATUS_METRIC) {
@@ -153,6 +168,7 @@ private Bson prepFilter(StatusMetric record) {
} else if (this.sType == StatusType.STATUS_ENDPOINT) {
+
return Filters.and(Filters.eq("report", this.report), Filters.eq("date_integer", record.getDateInt()),
Filters.eq("endpoint_group", record.getGroup()), Filters.eq("service", record.getService()),
Filters.eq("host", record.getHostname()), Filters.eq("timestamp", record.getTimestamp()));
@@ -181,7 +197,8 @@ public void writeRecord(StatusMetric record) throws IOException {
// Mongo Document to be prepared according to StatusType of input
Document doc = prepDoc(record);
-
+
+
if (this.method == MongoMethod.UPSERT) {
// Filter for upsert to be prepared according to StatusType of input
diff --git a/flink_jobs/batch_status/src/main/java/argo/batch/PickEndpoints.java b/flink_jobs/batch_status/src/main/java/argo/batch/PickEndpoints.java
index 717af10a..be2803f8 100644
--- a/flink_jobs/batch_status/src/main/java/argo/batch/PickEndpoints.java
+++ b/flink_jobs/batch_status/src/main/java/argo/batch/PickEndpoints.java
@@ -129,22 +129,20 @@ public void flatMap(MetricData md, Collector out) throws Exception
String metric = md.getMetric();
String monHost = md.getMonitoringHost();
String ts = md.getTimestamp();
-
+
// Filter By monitoring engine
if (recMgr.isMonExcluded(monHost, ts) == true) return;
-
+
// Filter By aggregation profile
if (apsMgr.checkService(aprof, service) == false) return;
-
+
// Filter By metric profile
if (mpsMgr.checkProfileServiceMetric(prof, service, metric) == false) return;
-
-
-
// Filter By endpoint group if belongs to supergroup
ArrayList groupnames = egpMgr.getGroup(egroupType, hostname, service);
+
for (String groupname : groupnames) {
if (ggpMgr.checkSubGroup(groupname) == true){
// Create a StatusMetric output
@@ -178,7 +176,12 @@ public void flatMap(MetricData md, Collector out) throws Exception
}
- StatusMetric sm = new StatusMetric(groupname,md.getService(),md.getHostname(),md.getMetric(), status,md.getTimestamp(),dateInt,timeInt,md.getSummary(),md.getMessage(),"","",actualData, ogStatus, ruleApplied);
+
+
+
+ String info = this.egpMgr.getInfo(groupname, egroupType, md.getHostname(), md.getService());
+
+ StatusMetric sm = new StatusMetric(groupname,md.getService(),md.getHostname(),md.getMetric(), status,md.getTimestamp(),dateInt,timeInt,md.getSummary(),md.getMessage(),"","",actualData, ogStatus, ruleApplied,info);
out.collect(sm);
}
diff --git a/flink_jobs/batch_status/src/main/java/argo/batch/StatusMetric.java b/flink_jobs/batch_status/src/main/java/argo/batch/StatusMetric.java
index 9bf147c1..df5bb583 100644
--- a/flink_jobs/batch_status/src/main/java/argo/batch/StatusMetric.java
+++ b/flink_jobs/batch_status/src/main/java/argo/batch/StatusMetric.java
@@ -19,6 +19,7 @@ public class StatusMetric {
private String actualData;
private String ogStatus; // original status from moniting host
private String ruleApplied; // threshold rule applied - empty if not
+ private String info; // extra endpoint information provided by the topology
public StatusMetric(){
this.group = "";
@@ -36,10 +37,11 @@ public StatusMetric(){
this.actualData = "";
this.ogStatus = "";
this.ruleApplied = "";
+ this.info = "";
}
public StatusMetric(String group, String service, String hostname, String metric, String status, String timestamp,
- int dateInt, int timeInt, String summary, String message, String prevState, String prevTs, String actualData, String ogStatus, String ruleApplied) {
+ int dateInt, int timeInt, String summary, String message, String prevState, String prevTs, String actualData, String ogStatus, String ruleApplied, String info) {
this.group = group;
this.service = service;
@@ -56,6 +58,7 @@ public StatusMetric(String group, String service, String hostname, String metric
this.actualData = actualData;
this.ogStatus = ogStatus;
this.ruleApplied = ruleApplied;
+ this.info = info;
}
@@ -158,10 +161,18 @@ public void setRuleApplied(String ruleApplied) {
this.ruleApplied = ruleApplied;
}
+ public String getInfo() {
+ return this.info;
+ }
+
+ public void setInfo(String info) {
+ this.info = info;
+ }
+
@Override
public String toString() {
return "(" + this.group + "," + this.service + "," + this.hostname + "," + this.metric + "," + this.status + "," + this.timestamp + "," +
- this.dateInt + "," + this.timeInt + "," + this.prevState + "," + this.prevTs + "," + this.actualData + "," + this.ogStatus + "," + this.ruleApplied + ")";
+ this.dateInt + "," + this.timeInt + "," + this.prevState + "," + this.prevTs + "," + this.actualData + "," + this.ogStatus + "," + this.ruleApplied + "," + this.info + ")";
}
}
diff --git a/flink_jobs/batch_status/src/main/java/ops/OpsManager.java b/flink_jobs/batch_status/src/main/java/ops/OpsManager.java
index 341c9260..f0a262bc 100644
--- a/flink_jobs/batch_status/src/main/java/ops/OpsManager.java
+++ b/flink_jobs/batch_status/src/main/java/ops/OpsManager.java
@@ -22,290 +22,236 @@
public class OpsManager {
- private static final Logger LOG = Logger.getLogger(OpsManager.class.getName());
+ private static final Logger LOG = Logger.getLogger(OpsManager.class.getName());
- private HashMap states;
- private HashMap ops;
- private ArrayList revStates;
- private ArrayList revOps;
+ private HashMap states;
+ private HashMap ops;
+ private ArrayList revStates;
+ private ArrayList revOps;
- private int[][][] truthTable;
+ private int[][][] truthTable;
- private String defaultDownState;
- private String defaultMissingState;
- private String defaultUnknownState;
+ private String defaultDownState;
+ private String defaultMissingState;
+ private String defaultUnknownState;
- private boolean order;
+ private boolean order;
- public OpsManager() {
- this.states = new HashMap();
- this.ops = new HashMap();
- this.revStates = new ArrayList();
- this.revOps = new ArrayList();
+ public OpsManager() {
+ this.states = new HashMap();
+ this.ops = new HashMap();
+ this.revStates = new ArrayList();
+ this.revOps = new ArrayList();
- this.truthTable = null;
+ this.truthTable = null;
- this.order = false;
+ this.order = false;
- }
+ }
- public OpsManager(boolean _order) {
- this.states = new HashMap();
- this.ops = new HashMap();
- this.revStates = new ArrayList();
- this.revOps = new ArrayList();
- this.order = _order;
+ public OpsManager(boolean _order) {
+ this.states = new HashMap();
+ this.ops = new HashMap();
+ this.revStates = new ArrayList();
+ this.revOps = new ArrayList();
+ this.order = _order;
- this.truthTable = null;
- }
+ this.truthTable = null;
+ }
- public String getDefaultDown() {
- return this.defaultDownState;
- }
+ public String getDefaultDown() {
+ return this.defaultDownState;
+ }
- public String getDefaultUnknown() {
- return this.defaultUnknownState;
- }
+ public String getDefaultUnknown() {
+ return this.defaultUnknownState;
+ }
- public int getDefaultUnknownInt() {
- return this.getIntStatus(this.defaultUnknownState);
- }
+ public int getDefaultUnknownInt() {
+ return this.getIntStatus(this.defaultUnknownState);
+ }
- public int getDefaultDownInt() {
- return this.getIntStatus(this.defaultDownState);
- }
+ public int getDefaultDownInt() {
+ return this.getIntStatus(this.defaultDownState);
+ }
- public String getDefaultMissing() {
- return this.defaultMissingState;
- }
-
- public int getDefaultMissingInt() {
- return this.getIntStatus(this.defaultMissingState);
- }
-
- public void clear() {
- this.states = new HashMap();
- this.ops = new HashMap