diff --git a/.gitignore b/.gitignore index 1ae7cfb7..a51fb346 100644 --- a/.gitignore +++ b/.gitignore @@ -18,3 +18,8 @@ __pycache__ .DS_STORE conf.cfg .pytest_cache + +# Java related +.classpath +.project +.settings/ diff --git a/.travis.yml b/.travis.yml index 1723c6bc..e7163c60 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,16 +1,17 @@ # Set-up a python centric enviroment in order to easily choose py version:2.7 # bonus: Java 7 and mvn also included language: python -# Target py version 2.7 +# Target py version 3.6 python: - - "2.7" + - "3.6" script: - pip install -r ./bin/requirements.txt - pytest - - cd flink_jobs/ams_ingest_metric/ && travis_wait mvn test - - cd ../batch_ar && travis_wait mvn test - - cd ../batch_status && travis_wait mvn test - - cd ../stream_status && travis_wait mvn test - - cd ../ams_ingest_sync && travis_wait mvn test + - cd flink_jobs/ams_ingest_metric/ && travis_wait mvn -B -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn test + - cd ../batch_ar && travis_wait mvn -B -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn test + - cd ../batch_status && travis_wait mvn -B -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn test + - cd ../stream_status && travis_wait mvn -B -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn test + - cd ../ams_ingest_sync && travis_wait mvn -B -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn test + diff --git a/Jenkinsfile b/Jenkinsfile new file mode 100644 index 00000000..c22985f4 --- /dev/null +++ b/Jenkinsfile @@ -0,0 +1,79 @@ + +pipeline { + agent none + options { + checkoutToSubdirectory('argo-streaming') + newContainerPerStage() + } + environment { + PROJECT_DIR='argo-streaming' + REQUIREMENTS="${PROJECT_DIR}/bin/requirements.txt" + } + stages { + stage('Configuration scripts Tests') { + agent { + docker { + image 'argo.registry:5000/epel-7-py36' + args '-u jenkins:jenkins' + } + } + steps { + echo 'Testing compute engine auto configuration scripts' + sh """ + pip3 install -r ${REQUIREMENTS} --user + pytest --junit-xml=${PROJECT_DIR}/junit.xml --cov=${PROJECT_DIR} --cov-report=xml + """ + junit '**/junit.xml' + cobertura coberturaReportFile: '**/coverage.xml' + } + post { + always { + cleanWs() + } + } + } + stage('Flink Jobs Testing & Packaging') { + agent { + docker { + image 'argo.registry:5000/epel-7-java18' + args '-u jenkins:jenkins' + } + } + steps { + echo 'Packaging & Testing Flink Jobs' + sh """ + mvn clean package cobertura:cobertura -Dcobertura.report.format=xml -f ${PROJECT_DIR}/flink_jobs/stream_status/pom.xml + mvn clean package cobertura:cobertura -Dcobertura.report.format=xml -f ${PROJECT_DIR}/flink_jobs/batch_ar/pom.xml + mvn clean package cobertura:cobertura -Dcobertura.report.format=xml -f ${PROJECT_DIR}/flink_jobs/batch_status/pom.xml + mvn clean package cobertura:cobertura -Dcobertura.report.format=xml -f ${PROJECT_DIR}/flink_jobs/ams_ingest_metric/pom.xml + mvn clean package cobertura:cobertura -Dcobertura.report.format=xml -f ${PROJECT_DIR}/flink_jobs/ams_ingest_sync/pom.xml + mvn clean package cobertura:cobertura -Dcobertura.report.format=xml -f ${PROJECT_DIR}/flink_jobs/status_trends/pom.xml + """ + junit '**/target/surefire-reports/*.xml' + cobertura coberturaReportFile: '**/target/site/cobertura/coverage.xml' + archiveArtifacts artifacts: '**/target/*.jar' + } + post { + always { + cleanWs() + } + } + } + } + post { + success { + script{ + if ( env.BRANCH_NAME == 'master' || env.BRANCH_NAME == 'devel' ) { + slackSend( message: ":rocket: New version for <$BUILD_URL|$PROJECT_DIR>:$BRANCH_NAME Job: $JOB_NAME !") + } + } + } + failure { + script{ + if ( env.BRANCH_NAME == 'master' || env.BRANCH_NAME == 'devel' ) { + slackSend( message: ":rain_cloud: Build Failed for <$BUILD_URL|$PROJECT_DIR>:$BRANCH_NAME Job: $JOB_NAME") + } + } + } + } +} diff --git a/README.md b/README.md index 2ea45779..15b095a1 100644 --- a/README.md +++ b/README.md @@ -368,3 +368,82 @@ Ingest Sync | Ingesting sync data from `{{ams-endpoint}}`/v1/projects/`{{project Batch AR | Ar Batch job for tenant:`{{tenant}}` on day:`{{day}}` using report:`{{report}}` Batch Status | Status Batch job for tenant:`{{tenant}}` on day:`{{day}}` using report:`{{report}}` Streaming Status | Streaming status using data from `{{ams-endpoint}}`/v1/projects/`{{project}}`/subscriptions/`[`{{metric_subscription}}`,`{{sync_subscription}}`] + +## Status Trends +Flink batch Job that calculate status trends for critical,warning,unknown status +Job requires parameters: + +`--yesterdayData` : file location of previous day's data +`--todayData` : file location of today day's data +`--N` : (optional) number of displayed top results +`--mongoUri` : uri to the mongo db , to store results +`--apiUri` : uri to the web-api +`--key` : users's token, used for authentication +`--proxy` : (optional) proxy url +`--clearMongo` : (optional) defines if the collections in mongo will be cleared from previous documents or not. if false or is missing collection will remain as it is + + +Flink batch Job that calculate flip flop trends for service endpoints metrics +Job requires parameters: + +`--yesterdayData` : file location of previous day's data +`--todayData` : file location of today day's data +`--N` : (optional) number of displayed top results +`--mongoUri` : uri to the mongo db , to store results +`--apiUri` : uri to the web-api +`--key` : users's token, used for authentication +`--proxy` : (optional) proxy url +`--clearMongo` : (optional) defines if the collections in mongo will be cleared from previous documents or not. if false or is missing collection will remain as it is + + +Flink batch Job that calculate flip flop trends for service endpoints +Job requires parameters: + +`--yesterdayData` : file location of previous day's data +`--todayData` : file location of today day's data +`--N` : (optional) number of displayed top results +`--mongoUri` : uri to the mongo db , to store results +`--apiUri` : uri to the web-api +`--key` : users's token, used for authentication +`--proxy` : (optional) proxy url +`--clearMongo` : (optional) defines if the collections in mongo will be cleared from previous documents or not. if false or is missing collection will remain as it is + + +Flink batch Job that calculate flip flop trends for service +Job requires parameters: + +`--yesterdayData` : file location of previous day's data +`--todayData` : file location of today day's data +`--N` : (optional) number of displayed top results +`--mongoUri` : uri to the mongo db , to store results +`--apiUri` : uri to the web-api +`--key` : users's token, used for authentication +`--proxy` : (optional) proxy url +`--clearMongo` : (optional) defines if the collections in mongo will be cleared from previous documents or not. if false or is missing collection will remain as it is + + +Flink batch Job that calculate flip flop trends for groups +Job requires parameters: + +`--yesterdayData` : file location of previous day's data +`--todayData` : file location of today day's data +`--N` : (optional) number of displayed top results +`--mongoUri` : uri to the mongo db , to store results +`--apiUri` : uri to the web-api +`--key` : users's token, used for authentication +`--proxy` : (optional) proxy url +`--clearMongo` : (optional) defines if the collections in mongo will be cleared from previous documents or not. if false or is missing collection will remain as it is + +Flink batch Job that calculate flip flop trends for all levels of groups +Job requires parameters: + +`--yesterdayData` : file location of previous day's data +`--todayData` : file location of today day's data +`--N` : (optional) number of displayed top results +`--mongoUri` : uri to the mongo db , to store results +`--apiUri` : uri to the web-api +`--key` : users's token, used for authentication +`--proxy` : (optional) proxy url +`--clearMongo` : (optional) defines if the collections in mongo will be cleared from previous documents or not. if false or is missing collection will remain as it is + + diff --git a/bin/ar_job_submit.py b/bin/ar_job_submit.py index b643d0e5..5743bc36 100755 --- a/bin/ar_job_submit.py +++ b/bin/ar_job_submit.py @@ -6,18 +6,31 @@ import datetime from snakebite.client import Client import logging -from urlparse import urlparse +from urllib.parse import urlparse from utils.argo_mongo import ArgoMongoClient from utils.common import cmd_to_string, date_rollback, flink_job_submit, hdfs_check_path, get_log_conf, get_config_paths from utils.update_profiles import ArgoProfileManager from utils.argo_config import ArgoConfig from utils.recomputations import upload_recomputations +from datetime import datetime log = logging.getLogger(__name__) def compose_hdfs_commands(year, month, day, args, config): + """Checks hdfs for available files back in time and prepares the correct hdfs arguments + + Args: + year (int): year part of the date to check for hdfs files + month (int): month part of the date to check for hdfs files + day (int): day part of the date to check for hdfs files + config (obj.): argo configuration object + + + Returns: + list: A list of all hdfs arguments to be used in flink job submission + """ # set up the hdfs client to be used in order to check the files namenode = config.get("HDFS", "namenode") @@ -27,12 +40,12 @@ def compose_hdfs_commands(year, month, day, args, config): hdfs_user = config.get("HDFS", "user") tenant = args.tenant - hdfs_sync = config.get("HDFS", "path_sync") - hdfs_sync = hdfs_sync.fill(namenode=namenode.geturl(), hdfs_user=hdfs_user, tenant=tenant).geturl() + hdfs_metric = config.get("HDFS", "path_metric") - hdfs_metric = hdfs_metric.fill(namenode=namenode.geturl(), hdfs_user=hdfs_user, tenant=tenant).geturl() + hdfs_metric = hdfs_metric.fill( + namenode=namenode.geturl(), hdfs_user=hdfs_user, tenant=tenant).geturl() # dictionary holding all the commands with their respective arguments' name hdfs_commands = dict() @@ -42,57 +55,26 @@ def compose_hdfs_commands(year, month, day, args, config): hdfs_metric + "/" + str(datetime.date(year, month, day) - datetime.timedelta(1)), client) # file location of target day's metric data (local or hdfs) - hdfs_commands["--mdata"] = hdfs_check_path(hdfs_metric + "/" + args.date, client) - - # file location of report configuration json file (local or hdfs) - hdfs_commands["--conf"] = hdfs_check_path(hdfs_sync + "/" + args.tenant+"_"+args.report+"_cfg.json", client) - - # file location of metric profile (local or hdfs) - hdfs_commands["--mps"] = date_rollback( - hdfs_sync + "/" + args.report + "/" + "metric_profile_" + "{{date}}" + ".avro", year, month, day, config, - client) - - # file location of operations profile (local or hdfs) - hdfs_commands["--ops"] = hdfs_check_path(hdfs_sync+"/"+args.tenant+"_ops.json", client) - - # file location of aggregations profile (local or hdfs) - hdfs_commands["--apr"] = hdfs_check_path(hdfs_sync+"/"+args.tenant+"_"+args.report+"_ap.json", client) - - if args.thresholds: - # file location of thresholds rules file (local or hdfs) - hdfs_commands["--thr"] = hdfs_check_path( - os.path.join(hdfs_sync, "".join([args.tenant, "_", args.report, "_thresholds.json"])), client) - - # file location of endpoint group topology file (local or hdfs) - hdfs_commands["-egp"] = date_rollback( - hdfs_sync + "/" + args.report + "/" + "group_endpoints_" + "{{date}}" + ".avro", year, month, day, config, - client) - - # file location of group of groups topology file (local or hdfs) - hdfs_commands["-ggp"] = date_rollback(hdfs_sync + "/" + args.report + "/" + "group_groups_" + "{{date}}" + ".avro", - year, month, day, config, client) - - # file location of weights file (local or hdfs) - hdfs_commands["--weights"] = date_rollback(hdfs_sync + "/" + args.report + "/weights_" + "{{date}}" + ".avro", year, - month, day, config, client) + hdfs_commands["--mdata"] = hdfs_check_path( + hdfs_metric + "/" + args.date, client) - # file location of downtimes file (local or hdfs) - hdfs_commands["--downtimes"] = hdfs_check_path( - hdfs_sync + "/" + args.report + "/downtimes_" + str(datetime.date(year, month, day)) + ".avro", client) + return hdfs_commands - # file location of recomputations file (local or hdfs) - # first check if there is a recomputations file for the given date - # recomputation lies in the hdfs in the form of - # /sync/recomp_TENANTNAME_ReportName_2018-08-02.json - if client.test(urlparse(hdfs_sync+"/recomp_"+args.tenant+"_"+args.report+"_"+args.date+".json").path, exists=True): - hdfs_commands["--rec"] = hdfs_sync+"/recomp_"+args.tenant+"_"+args.report+"_"+args.date+".json" - else: - hdfs_commands["--rec"] = hdfs_check_path(hdfs_sync+"/recomp.json", client) - return hdfs_commands +def compose_command(config, args, hdfs_commands, dry_run=False): + """Composes a command line execution string for submitting a flink job. Also calls mongodb + clean up procedure before composing the command + Args: + config (obj.): argo configuration object + args (dict): command line arguments of this script + hdfs_commands (list): a list of hdfs related arguments to be passed in flink job + dry_run (bool, optional): signifies a dry-run execution context, if yes no mongodb clean-up is perfomed. + Defaults to False. -def compose_command(config, args, hdfs_commands): + Returns: + list: A list of all command line arguments for performing the flink job submission + """ # job submission command cmd_command = [] @@ -120,13 +102,16 @@ def compose_command(config, args, hdfs_commands): # MongoDB uri for outputting the results to (e.g. mongodb://localhost:21017/example_db) cmd_command.append("--mongo.uri") group_tenant = "TENANTS:"+args.tenant - mongo_endpoint = config.get("MONGO","endpoint").geturl() - mongo_uri = config.get(group_tenant, "mongo_uri").fill(mongo_endpoint=mongo_endpoint, tenant=args.tenant) + mongo_endpoint = config.get("MONGO", "endpoint").geturl() + mongo_uri = config.get(group_tenant, "mongo_uri").fill( + mongo_endpoint=mongo_endpoint, tenant=args.tenant) cmd_command.append(mongo_uri.geturl()) - if args.method == "insert": - argo_mongo_client = ArgoMongoClient(args, config, ["service_ar", "endpoint_group_ar"]) - argo_mongo_client.mongo_clean_ar(mongo_uri) + # do action if method is insert and not dry run + if args.method == "insert" and dry_run == False: + argo_mongo_client = ArgoMongoClient( + args, config, ["endpoint_ar", "service_ar", "endpoint_group_ar"]) + argo_mongo_client.mongo_clean_ar(mongo_uri, dry_run) # MongoDB method to be used when storing the results, either insert or upsert cmd_command.append("--mongo.method") @@ -137,21 +122,28 @@ def compose_command(config, args, hdfs_commands): cmd_command.append(command) cmd_command.append(hdfs_commands[command]) - # get optional ams proxy - proxy = config.get("AMS", "proxy") + # get the api endpoint + api_endpoint = config.get("API","endpoint") + if api_endpoint: + cmd_command.append("--api.endpoint") + cmd_command.append(api_endpoint.hostname) + + # get the api token + cmd_command.append("--api.token") + cmd_command.append(config.get("API","access_token")) + + # get report id + + cmd_command.append("--report.id") + cmd_command.append(config.get("TENANTS:"+args.tenant,"report_"+args.report)) + + + # get optional api proxy + proxy = config.get("API", "proxy") if proxy is not None: - cmd_command.append("--ams.proxy") + cmd_command.append("--api.proxy") cmd_command.append(proxy.geturl()) - # ssl verify - cmd_command.append("--ams.verify") - ams_verify = config.get("AMS", "verify") - if ams_verify is not None: - cmd_command.append(str(ams_verify).lower()) - else: - # by default assume ams verify is always true - cmd_command.append("true") - return cmd_command @@ -173,47 +165,41 @@ def main(args=None): log.info("Tenant: "+args.tenant+" doesn't exist.") sys.exit(1) - # check and upload recomputations - upload_recomputations(args.tenant, args.report, args.date, config) - - # optional call to update profiles - if args.profile_check: - profile_mgr = ArgoProfileManager(config) - profile_type_checklist = ["operations", "aggregations", "reports", "thresholds"] - for profile_type in profile_type_checklist: - profile_mgr.profile_update_check(args.tenant, args.report, profile_type) # dictionary containing the argument's name and the command assosciated with each name hdfs_commands = compose_hdfs_commands(year, month, day, args, config) - cmd_command = compose_command(config, args, hdfs_commands) - - log.info("Getting ready to submit job") - log.info(cmd_to_string(cmd_command)+"\n") + cmd_command = compose_command(config, args, hdfs_commands, args.dry_run) # submit the script's command - flink_job_submit(config, cmd_command) + flink_job_submit(config, cmd_command, None, args.dry_run) if __name__ == "__main__": + today = datetime.today().strftime('%Y-%m-%d') + parser = argparse.ArgumentParser(description="Batch A/R Job submit script") parser.add_argument( "-t", "--tenant", metavar="STRING", help="Name of the tenant", required=True, dest="tenant") parser.add_argument( - "-r", "--report", metavar="STRING", help="Report status", required=True, dest="report") + "-r", "--report", metavar="STRING", help="Name of the report", required=True, dest="report") parser.add_argument( - "-d", "--date", metavar="DATE(YYYY-MM-DD)", help="Date to run the job for", required=True, dest="date") + "-d", "--date", metavar="DATE(YYYY-MM-DD)", help="Date to run the job for", required=False, dest="date", default=today) parser.add_argument( - "-m", "--method", metavar="KEYWORD(insert|upsert)", help="Insert or Upsert data in mongoDB", required=True, dest="method") + "-m", "--method", metavar="KEYWORD(insert|upsert)", help="Insert or Upsert data in mongoDB", required=False, dest="method", default="insert") parser.add_argument( - "-c", "--config", metavar="PATH", help="Path for the config file", dest="config") + "-c", "--config", metavar="PATH", help="Path for the config file", dest="config", required=True) parser.add_argument( "-u", "--sudo", help="Run the submition as superuser", action="store_true") parser.add_argument("--profile-check", help="check if profiles are up to date before running job", dest="profile_check", action="store_true") + parser.add_argument("--historic-profiles", help="use historic profiles", + dest="historic", action="store_true") parser.add_argument("--thresholds", help="check and use threshold rule file if exists", dest="thresholds", action="store_true") + parser.add_argument("--dry-run", help="Runs in test mode without actually submitting the job", + action="store_true", dest="dry_run") # Pass the arguments to main method sys.exit(main(parser.parse_args())) diff --git a/bin/metric_ingestion_submit.py b/bin/metric_ingestion_submit.py index 47ec738b..2d67e42d 100755 --- a/bin/metric_ingestion_submit.py +++ b/bin/metric_ingestion_submit.py @@ -10,6 +10,15 @@ def compose_command(config, args): + """Composes a command line execution string for submitting a flink job. + + Args: + config (obj.): argo configuration object + args (dict): command line arguments of this script + + Returns: + list: A list of all command line arguments for performing the flink job submission + """ # job submission command cmd_command = [] @@ -48,7 +57,7 @@ def compose_command(config, args): # ams port ams_port = 443 if ams_endpoint.port is not None: - ams_port = ams_endpoint.port + ams_port = ams_endpoint.port cmd_command.append("--ams.port") cmd_command.append(str(ams_port)) @@ -73,9 +82,11 @@ def compose_command(config, args): hdfs_user = config.get("HDFS", "user") hdfs_metric = config.get("HDFS", "path_metric") - hdfs_metric.fill(namenode=namenode.geturl(), hdfs_user=hdfs_user, tenant=args.tenant) + hdfs_metric.fill(namenode=namenode.geturl(), + hdfs_user=hdfs_user, tenant=args.tenant) - hdfs_metric = hdfs_metric.fill(namenode=namenode.geturl(), hdfs_user=hdfs_user, tenant=args.tenant).geturl() + hdfs_metric = hdfs_metric.fill(namenode=namenode.geturl( + ), hdfs_user=hdfs_user, tenant=args.tenant).geturl() cmd_command.append("--hdfs.path") cmd_command.append(hdfs_metric) @@ -86,7 +97,8 @@ def compose_command(config, args): # interval for checkpont in ms cmd_command.append("--check.interval") - cmd_command.append(str(config.get(section_tenant_job, "checkpoint_interval"))) + cmd_command.append( + str(config.get(section_tenant_job, "checkpoint_interval"))) # num of messages to be retrieved from AMS per request cmd_command.append("--ams.batch") @@ -110,7 +122,7 @@ def compose_command(config, args): else: # by default assume ams verify is always true cmd_command.append("true") - + return cmd_command, job_namespace @@ -131,20 +143,20 @@ def main(args=None): cmd_command, job_namespace = compose_command(config, args) - log.info("Getting ready to submit job") - log.info(cmd_to_string(cmd_command)+"\n") - # submit script's command - flink_job_submit(config, cmd_command, job_namespace) + flink_job_submit(config, cmd_command, job_namespace, args.dry_run) if __name__ == "__main__": - parser = argparse.ArgumentParser(description="AMS Metric Ingestion submission script") + parser = argparse.ArgumentParser( + description="AMS Metric Ingestion submission script") parser.add_argument( "-t", "--tenant", metavar="STRING", help="Name of the tenant", required=True) parser.add_argument( "-c", "--config", metavar="PATH", help="Path for the config file") parser.add_argument( "-u", "--sudo", help="Run the submition as superuser", action="store_true") + parser.add_argument("--dry-run", help="Runs in test mode without actually submitting the job", + action="store_true", dest="dry_run") sys.exit(main(parser.parse_args())) diff --git a/bin/requirements.txt b/bin/requirements.txt index 64fb15a3..eb5cbfef 100644 --- a/bin/requirements.txt +++ b/bin/requirements.txt @@ -1,5 +1,5 @@ -requests==2.20.0 -responses==0.6.0 -pytest==3.4.0 -snakebite==2.11.0 -pymongo==3.6.1 +requests==2.22.0 +responses==0.10.7 +pytest==5.3.1 +snakebite-py3==3.0.5 +pymongo==3.10.0 diff --git a/bin/status_job_submit.py b/bin/status_job_submit.py index 57acdda4..38c2803c 100755 --- a/bin/status_job_submit.py +++ b/bin/status_job_submit.py @@ -6,16 +6,30 @@ from snakebite.client import Client import logging -from urlparse import urlparse +from urllib.parse import urlparse from utils.argo_mongo import ArgoMongoClient from utils.common import cmd_to_string, date_rollback, flink_job_submit, hdfs_check_path, get_log_conf, get_config_paths from utils.update_profiles import ArgoProfileManager from utils.argo_config import ArgoConfig +from datetime import datetime + log = logging.getLogger(__name__) def compose_hdfs_commands(year, month, day, args, config): + """Checks hdfs for available files back in time and prepares the correct hdfs arguments + + Args: + year (int): year part of the date to check for hdfs files + month (int): month part of the date to check for hdfs files + day (int): day part of the date to check for hdfs files + config (obj.): argo configuration object + + + Returns: + list: A list of all hdfs arguments to be used in flink job submission + """ # set up the hdfs client to be used in order to check the files namenode = config.get("HDFS", "namenode") @@ -26,11 +40,13 @@ def compose_hdfs_commands(year, month, day, args, config): hdfs_user = config.get("HDFS", "user") tenant = args.tenant hdfs_sync = config.get("HDFS", "path_sync") - hdfs_sync = hdfs_sync.fill(namenode=namenode.geturl(), hdfs_user=hdfs_user, tenant=tenant).geturl() + hdfs_sync = hdfs_sync.fill(namenode=namenode.geturl( + ), hdfs_user=hdfs_user, tenant=tenant).geturl() hdfs_metric = config.get("HDFS", "path_metric") - hdfs_metric = hdfs_metric.fill(namenode=namenode.geturl(), hdfs_user=hdfs_user, tenant=tenant).geturl() + hdfs_metric = hdfs_metric.fill( + namenode=namenode.geturl(), hdfs_user=hdfs_user, tenant=tenant).geturl() # dictionary holding all the commands with their respective arguments' name hdfs_commands = dict() @@ -40,49 +56,26 @@ def compose_hdfs_commands(year, month, day, args, config): hdfs_metric + "/" + str(datetime.date(year, month, day) - datetime.timedelta(1)), client) # file location of target day's metric data (local or hdfs) - hdfs_commands["--mdata"] = hdfs_check_path(hdfs_metric+"/"+args.date, client) - - # file location of report configuration json file (local or hdfs) - hdfs_commands["--conf"] = hdfs_check_path(hdfs_sync+"/"+args.tenant+"_"+args.report+"_cfg.json", client) - - # file location of metric profile (local or hdfs) - hdfs_commands["--mps"] = date_rollback( - hdfs_sync + "/" + args.report + "/" + "metric_profile_" + "{{date}}" + ".avro", year, month, day, config, - client) - - # file location of operations profile (local or hdfs) - hdfs_commands["--ops"] = hdfs_check_path(hdfs_sync+"/"+args.tenant+"_ops.json", client) - - # file location of aggregations profile (local or hdfs) - hdfs_commands["--apr"] = hdfs_check_path(hdfs_sync+"/"+args.tenant+"_"+args.report+"_ap.json", client) - - if args.thresholds: - # file location of thresholds rules file (local or hdfs) - hdfs_commands["--thr"] = hdfs_check_path( - os.path.join(hdfs_sync, "".join([args.tenant, "_", args.report, "_thresholds.json"])), client) - - # file location of endpoint group topology file (local or hdfs) - hdfs_commands["-egp"] = date_rollback( - hdfs_sync + "/" + args.report + "/" + "group_endpoints_" + "{{date}}" + ".avro", year, month, day, config, - client) + hdfs_commands["--mdata"] = hdfs_check_path( + hdfs_metric+"/"+args.date, client) - # file location of group of groups topology file (local or hdfs) - hdfs_commands["-ggp"] = date_rollback(hdfs_sync + "/" + args.report + "/" + "group_groups_" + "{{date}}" + ".avro", - year, month, day, config, client) + return hdfs_commands - # file location of recomputations file (local or hdfs) - # first check if there is a recomputations file for the given date - if client.test(urlparse(hdfs_sync+"/recomp_"+args.tenant+"_"+args.report+"_"+args.date+".json").path, exists=True): - hdfs_commands["--rec"] = hdfs_sync+"/recomp_"+args.tenant+"_"+args.report+"_"+args.date+".json" - log.info("Using recomputations file for the given date") - else: - hdfs_commands["--rec"] = hdfs_check_path(hdfs_sync+"/recomp.json", client) - log.info("Recomputations file for the given date was not found. Using default.") - return hdfs_commands +def compose_command(config, args, hdfs_commands, dry_run=False): + """Composes a command line execution string for submitting a flink job. Also calls mongodb + clean up procedure before composing the command + Args: + config (obj.): argo configuration object + args (dict): command line arguments of this script + hdfs_commands (list): a list of hdfs related arguments to be passed in flink job + dry_run (bool, optional): signifies a dry-run execution context, if yes no mongodb clean-up is perfomed. + Defaults to False. -def compose_command(config, args, hdfs_commands): + Returns: + list: A list of all command line arguments for performing the flink job submission + """ # job sumbission command cmd_command = [] @@ -110,15 +103,16 @@ def compose_command(config, args, hdfs_commands): # MongoDB uri for outputting the results to (e.g. mongodb://localhost:21017/example_db) cmd_command.append("--mongo.uri") group_tenant = "TENANTS:" + args.tenant - mongo_endpoint = config.get("MONGO","endpoint").geturl() - mongo_uri = config.get(group_tenant, "mongo_uri").fill(mongo_endpoint=mongo_endpoint,tenant=args.tenant) + mongo_endpoint = config.get("MONGO", "endpoint").geturl() + mongo_uri = config.get(group_tenant, "mongo_uri").fill( + mongo_endpoint=mongo_endpoint, tenant=args.tenant) cmd_command.append(mongo_uri.geturl()) - if args.method == "insert": + if args.method == "insert" and dry_run == False: argo_mongo_client = ArgoMongoClient(args, config, ["status_metrics", "status_endpoints", "status_services", "status_endpoint_groups"]) - argo_mongo_client.mongo_clean_status(mongo_uri) + argo_mongo_client.mongo_clean_status(mongo_uri, dry_run) # MongoDB method to be used when storing the results, either insert or upsert cmd_command.append("--mongo.method") @@ -129,21 +123,27 @@ def compose_command(config, args, hdfs_commands): cmd_command.append(command) cmd_command.append(hdfs_commands[command]) - # get optional ams proxy - proxy = config.get("AMS", "proxy") + # get the api endpoint + api_endpoint = config.get("API","endpoint") + if api_endpoint: + cmd_command.append("--api.endpoint") + cmd_command.append(api_endpoint.hostname) + + # get the api token + cmd_command.append("--api.token") + cmd_command.append(config.get("API","access_token")) + + # get report id + + cmd_command.append("--report.id") + cmd_command.append(config.get("TENANTS:"+args.tenant,"report_"+args.report)) + + # get optional api proxy + proxy = config.get("API", "proxy") if proxy is not None: - cmd_command.append("--ams.proxy") + cmd_command.append("--api.proxy") cmd_command.append(proxy.geturl()) - # ssl verify - cmd_command.append("--ams.verify") - ams_verify = config.get("AMS", "verify") - if ams_verify is not None: - cmd_command.append(str(ams_verify).lower()) - else: - # by default assume ams verify is always true - cmd_command.append("true") - return cmd_command @@ -165,44 +165,41 @@ def main(args=None): year, month, day = [int(x) for x in args.date.split("-")] - # optional call to update profiles - if args.profile_check: - profile_mgr = ArgoProfileManager(config) - profile_type_checklist = ["operations", "aggregations", "reports", "thresholds"] - for profile_type in profile_type_checklist: - profile_mgr.profile_update_check(args.tenant, args.report, profile_type) - # dictionary containing the argument's name and the command associated with each name hdfs_commands = compose_hdfs_commands(year, month, day, args, config) - cmd_command = compose_command(config, args, hdfs_commands) - - log.info("Getting ready to submit job") - log.info(cmd_to_string(cmd_command)+"\n") + cmd_command = compose_command(config, args, hdfs_commands, args.dry_run) # submit the script's command - flink_job_submit(config, cmd_command) + flink_job_submit(config, cmd_command, None, args.dry_run) if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Batch Status Job submit script") + today = datetime.today().strftime('%Y-%m-%d') + + parser = argparse.ArgumentParser( + description="Batch Status Job submit script") parser.add_argument( "-t", "--tenant", metavar="STRING", help="Name of the tenant", required=True, dest="tenant") parser.add_argument( "-r", "--report", metavar="STRING", help="Report status", required=True, dest="report") parser.add_argument( - "-d", "--date", metavar="DATE(YYYY-MM-DD)", help="Date to run the job for", required=True, dest="date") + "-d", "--date", metavar="DATE(YYYY-MM-DD)", help="Date to run the job for", required=True, dest="date", default=today) parser.add_argument( - "-m", "--method", metavar="KEYWORD(insert|upsert)", help="Insert or Upsert data in mongoDB", required=True, dest="method") + "-m", "--method", metavar="KEYWORD(insert|upsert)", help="Insert or Upsert data in mongoDB", required=True, dest="method", default="insert") parser.add_argument( "-c", "--config", metavar="PATH", help="Path for the config file", dest="config") parser.add_argument( "-u", "--sudo", help="Run the submit job as superuser", action="store_true") + parser.add_argument("--historic-profiles", help="use historic profiles", + dest="historic", action="store_true") parser.add_argument("--profile-check", help="check if profiles are up to date before running job", dest="profile_check", action="store_true") parser.add_argument("--thresholds", help="check and use threshold rule file if exists", dest="thresholds", action="store_true") + parser.add_argument("--dry-run", help="Runs in test mode without actually submitting the job", + action="store_true", dest="dry_run") # Pass the arguments to main method sys.exit(main(parser.parse_args())) diff --git a/bin/stream_status_job_submit.py b/bin/stream_status_job_submit.py index 144dae44..c7042332 100755 --- a/bin/stream_status_job_submit.py +++ b/bin/stream_status_job_submit.py @@ -11,6 +11,19 @@ def compose_hdfs_commands(year, month, day, args, config): + """Checks hdfs for available files back in time and prepares the correct hdfs arguments + + Args: + year (int): year part of the date to check for hdfs files + month (int): month part of the date to check for hdfs files + day (int): day part of the date to check for hdfs files + config (obj.): argo configuration object + + + Returns: + list: A list of all hdfs arguments to be used in flink job submission + """ + # set up the hdfs client to be used in order to check the files namenode = config.get("HDFS", "namenode") client = Client(namenode.hostname, namenode.port, use_trash=False) @@ -20,31 +33,77 @@ def compose_hdfs_commands(year, month, day, args, config): hdfs_user = config.get("HDFS", "user") tenant = args.tenant hdfs_sync = config.get("HDFS", "path_sync") - hdfs_sync = hdfs_sync.fill(namenode=namenode.geturl(), hdfs_user=hdfs_user, tenant=tenant).geturl() + hdfs_sync = hdfs_sync.fill(namenode=namenode.geturl( + ), hdfs_user=hdfs_user, tenant=tenant).geturl() # dictionary holding all the commands with their respective arguments' name hdfs_commands = dict() - # file location of metric profile (local or hdfs) - hdfs_commands["--sync.mps"] = date_rollback( - hdfs_sync + "/" + args.report + "/" + "metric_profile_" + "{{date}}" + ".avro", year, month, day, config, - client) + # if profile historic mode is used reference profiles by date + if args.historic: + # file location of historic operations profile (local or hdfs) + hdfs_commands["--ops"] = hdfs_check_path( + hdfs_sync+"/"+args.tenant+"_ops_" + args.date + ".json", client) + + # file location of historic aggregations profile (local or hdfs) + hdfs_commands["--apr"] = hdfs_check_path( + hdfs_sync+"/"+args.tenant+"_"+args.report+"_ap_" + args.date + ".json", client) + + # TODO: Don't Use YET metric profiles from api in json form until status computation jobs are updated + # accordingly - After that uncomment the following + # #file location of historic metric profile (local or hdfs) which is in json format + # hdfs_commands["--mps"] = hdfs_check_path( + # hdfs_sync+"/"+args.tenant+"_"+args.report+"_metric_" + args.date + ".json", client) + + # TODO: when compute jobs are updated to use metric profiles in json format comment the following: + # file location of metric profile (local or hdfs) + hdfs_commands["--mps"] = date_rollback( + hdfs_sync + "/" + args.report + "/" + "metric_profile_" + + "{{date}}" + ".avro", year, month, day, config, + client) + else: + + # file location of operations profile (local or hdfs) + hdfs_commands["--ops"] = hdfs_check_path( + hdfs_sync+"/"+args.tenant+"_ops.json", client) - # file location of operations profile (local or hdfs) - hdfs_commands["--sync.ops"] = hdfs_check_path(hdfs_sync+"/"+args.tenant+"_ops.json", client) + # file location of aggregations profile (local or hdfs) + hdfs_commands["--apr"] = hdfs_check_path( + hdfs_sync+"/"+args.tenant+"_"+args.report+"_ap.json", client) - # file location of aggregations profile (local or hdfs) - hdfs_commands["--sync.apr"] = hdfs_check_path(hdfs_sync+"/"+args.tenant+"_"+args.report+"_ap.json", client) + # file location of metric profile (local or hdfs) + hdfs_commands["--mps"] = date_rollback( + hdfs_sync + "/" + args.report + "/" + "metric_profile_" + + "{{date}}" + ".avro", year, month, day, config, + client) + + # get downtime + # file location of metric profile (local or hdfs) + hdfs_commands["--sync.downtime"] = date_rollback( + hdfs_sync + "/" + args.report + "/" + "downtimes_" + + "{{date}}" + ".avro", year, month, day, config, + client) # file location of endpoint group topology file (local or hdfs) hdfs_commands["-sync.egp"] = date_rollback( - hdfs_sync + "/" + args.report + "/" + "group_endpoints_" + "{{date}}" + ".avro", year, month, day, config, + hdfs_sync + "/" + args.report + "/" + "group_endpoints_" + + "{{date}}" + ".avro", year, month, day, config, client) return hdfs_commands def compose_command(config, args, hdfs_commands): + """Composes a command line execution string for submitting a flink job. + + Args: + config (obj.): argo configuration object + args (dict): command line arguments of this script + hdfs_commands (list): a list of hdfs related arguments to be passed in flink job + + Returns: + list: A list of all command line arguments for performing the flink job submission + """ # job submission command cmd_command = [] @@ -55,12 +114,16 @@ def compose_command(config, args, hdfs_commands): # get needed config params section_tenant = "TENANTS:" + args.tenant section_tenant_job = "TENANTS:" + args.tenant + ":stream-status" - job_namespace = config.get("JOB-NAMESPACE", "stream-status-namespace") + ams_endpoint = config.get("AMS", "endpoint") ams_project = config.get(section_tenant, "ams_project") - ams_sub_metric = config.get(section_tenant_job, "ams_sub_metric") - ams_sub_sync = config.get(section_tenant_job, "ams_sub_sync") + if args.report.lower() == "critical": + ams_sub_metric = config.get(section_tenant_job, "ams_sub_metric") + ams_sub_sync = config.get(section_tenant_job, "ams_sub_sync") + else: + ams_sub_metric = "stream_metric_" + report.lower() + ams_sub_sync = "stream_sync_" + report.lower() # flink executable cmd_command.append(config.get("FLINK", "path")) @@ -80,8 +143,10 @@ def compose_command(config, args, hdfs_commands): cmd_command.append(ams_endpoint.hostname) # ams port - cmd_command.append("--ams.port") - cmd_command.append(ams_endpoint.port) + ams_port = cmd_command.append("--ams.port") + if not ams_port: + ams_port = 443 + cmd_command.append(str(ams_port)) # tenant's token for ams cmd_command.append("--ams.token") @@ -100,8 +165,9 @@ def compose_command(config, args, hdfs_commands): cmd_command.append(ams_sub_sync) # fill job namespace template with the required arguments - job_namespace.fill(ams_endpoint=ams_endpoint.hostname, ams_port=ams_endpoint.port, ams_project=ams_project, - ams_sub_metric=ams_sub_metric, ams_sub_sync=ams_sub_sync) + job_namespace = config.get("JOB-NAMESPACE", "stream-status-namespace") + job_namespace = job_namespace.fill(ams_endpoint=ams_endpoint.hostname, ams_port=ams_port, ams_project=ams_project, + ams_sub_metric=ams_sub_metric, ams_sub_sync=ams_sub_sync) # add the hdfs commands for command in hdfs_commands: @@ -112,9 +178,16 @@ def compose_command(config, args, hdfs_commands): cmd_command.append("--run.date") cmd_command.append(args.date) + # report + cmd_command.append("--report") + cmd_command.append(args.report) + # flink parallelism cmd_command.append("--p") - cmd_command.append(config.get(section_tenant_job, "flink_parallelism")) + flink_parallelism = config.get(section_tenant_job, "flink_parallelism") + if not flink_parallelism: + flink_parallelism = "1" + cmd_command.append(flink_parallelism) # grab tenant configuration section for stream-status @@ -128,63 +201,80 @@ def compose_command(config, args, hdfs_commands): # hbase endpoint if config.has(section_tenant_job, "hbase_master"): cmd_command.append("--hbase.master") - cmd_command.append(config.get(section_tenant_job, "hbase_master").hostname) + cmd_command.append(config.get( + section_tenant_job, "hbase_master").hostname) # hbase endpoint port if config.has(section_tenant_job, "hbase_master"): cmd_command.append("--hbase.port") - cmd_command.append(config.get(section_tenant_job, "hbase_master").port) + cmd_command.append(config.get( + section_tenant_job, "hbase_master").port) # comma separate list of zookeeper servers if config.has(section_tenant_job, "hbase_zk_quorum"): cmd_command.append("--hbase.zk.quorum") - cmd_command.append(config.get(section_tenant_job, "hbase_zk_quorum")) + cmd_command.append(config.get( + section_tenant_job, "hbase_zk_quorum")) # port used by zookeeper servers if config.has(section_tenant_job, "hbase_zk_port"): cmd_command.append("--hbase.zk.port") - cmd_command.append(config.get(section_tenant_job, "hbase_zk_port")) + cmd_command.append(config.get( + section_tenant_job, "hbase_zk_port")) # table namespace, usually tenant if config.has(section_tenant_job, "hbase_namespace"): cmd_command.append("--hbase.namespace") - cmd_command.append(config.get(section_tenant_job, "hbase_namespace")) + cmd_command.append(config.get( + section_tenant_job, "hbase_namespace")) # table name, usually metric data if config.has(section_tenant_job, "hbase_table"): cmd_command.append("--hbase.table") - cmd_command.append(config.get(section_tenant_job, "hbase_table")) + cmd_command.append(config.get( + section_tenant_job, "hbase_table")) elif output == "kafka": # kafka list of servers if config.has(section_tenant_job, "kafka_servers"): cmd_command.append("--kafka.servers") - kafka_servers = ','.join(config.get(section_tenant_job, "kafka_servers")) + kafka_servers = ','.join(config.get( + section_tenant_job, "kafka_servers")) cmd_command.append(kafka_servers) # kafka topic to send status events to if config.has(section_tenant_job, "kafka_topic"): cmd_command.append("--kafka.topic") - cmd_command.append(config.get(section_tenant_job, "kafka_topic")) + cmd_command.append(config.get( + section_tenant_job, "kafka_topic")) elif output == "fs": # filesystem path for output(use "hdfs://" for hdfs path) if config.has(section_tenant_job, "fs_output"): cmd_command.append("--fs.output") - cmd_command.append(config.get(section_tenant_job, "fs_output")) + cmd_command.append(config.get( + section_tenant_job, "fs_output")) elif output == "mongo": cmd_command.append("--mongo.uri") - mongo_endpoint = config.get("MONGO","endpoint").geturl() - mongo_uri = config.get(section_tenant, "mongo_uri").fill(mongo_endpoint=mongo_endpoint,tenant=args.tenant) + mongo_endpoint = config.get("MONGO", "endpoint").geturl() + mongo_uri = config.get(section_tenant, "mongo_uri").fill( + mongo_endpoint=mongo_endpoint, tenant=args.tenant) cmd_command.append(mongo_uri.geturl()) # mongo method + mongo_method = config.get("MONGO", "mongo_method") + if not mongo_method: + mongo_method = "insert" cmd_command.append("--mongo.method") - cmd_command.append(config.get(section_tenant_job, "mongo_method")) + cmd_command.append(mongo_method) + # report id + report_id = config.get(section_tenant, "report_" + args.report) + cmd_command.append("--report-id") + cmd_command.append(report_id) # num of messages to be retrieved from AMS per request cmd_command.append("--ams.batch") - cmd_command.append(config.get(section_tenant_job, "ams_batch")) + cmd_command.append(str(config.get(section_tenant_job, "ams_batch"))) # interval in ms betweeb AMS service requests cmd_command.append("--ams.interval") - cmd_command.append(config.get(section_tenant_job, "ams_interval")) + cmd_command.append(str(config.get(section_tenant_job, "ams_interval"))) # get optional ams proxy proxy = config.get("AMS", "proxy") @@ -226,33 +316,50 @@ def main(args=None): year, month, day = [int(x) for x in args.date.split("T")[0].split("-")] + # optional call to update profiles + if args.profile_check: + dateParam = None + if args.historic: + dateParam = args.date + profile_mgr = ArgoProfileManager(config) + profile_type_checklist = [ + "operations", "aggregations", "reports", "thresholds", "metrics"] + for profile_type in profile_type_checklist: + profile_mgr.profile_update_check( + args.tenant, args.report, profile_type, dateParam) + # dictionary containing the argument's name and the command assosciated with each name hdfs_commands = compose_hdfs_commands(year, month, day, args, config) cmd_command, job_namespace = compose_command(config, args, hdfs_commands) - log.info("Getting ready to submit job") - log.info(cmd_to_string(cmd_command)+"\n") - # submit the script's command - flink_job_submit(config, cmd_command, job_namespace) + flink_job_submit(config, cmd_command, job_namespace, args.dry_run) if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Stream Status Job submit script") + parser = argparse.ArgumentParser( + description="Stream Status Job submit script") parser.add_argument( "-t", "--tenant", metavar="STRING", help="Name of the tenant", required=True, dest="tenant") parser.add_argument( "-d", "--date", metavar="DATE(ISO-8601)", - default=str(datetime.datetime.utcnow().replace(microsecond=0).isoformat()) + "Z", + default=str(datetime.datetime.utcnow().replace( + microsecond=0).isoformat()) + "Z", help="Date in ISO-8601 format", dest="date") parser.add_argument( "-r", "--report", metavar="STRING", help="Report status", required=True, dest="report") parser.add_argument( "-c", "--config", metavar="PATH", help="Path for the config file", dest="config") parser.add_argument( - "-u", "--sudo", help="Run the submition as superuser", action="store_true", dest="sudo") + "-u", "--sudo", help="Run the submission as superuser", action="store_true", dest="sudo") + parser.add_argument("--dry-run", help="Runs in test mode without actually submitting the job", + action="store_true", dest="dry_run") + parser.add_argument("--historic-profiles", help="use historic profiles", + dest="historic", action="store_true") + parser.add_argument("--profile-check", help="check if profiles are up to date before running job", + dest="profile_check", action="store_true") parser.add_argument( "-timeout", "--timeout", metavar="INT", help="Controls default timeout for event regeneration (used in notifications)", dest="timeout") diff --git a/bin/sync_ingestion_submit.py b/bin/sync_ingestion_submit.py index 48b52b78..6146c9c6 100755 --- a/bin/sync_ingestion_submit.py +++ b/bin/sync_ingestion_submit.py @@ -10,6 +10,15 @@ def compose_command(config, args): + """Composes a command line execution string for submitting a flink job. + + Args: + config (obj.): argo configuration object + args (dict): command line arguments of this script + + Returns: + list: A list of all command line arguments for performing the flink job submission + """ # job submission command cmd_command = [] @@ -49,7 +58,7 @@ def compose_command(config, args): # ams port ams_port = 443 if ams_endpoint.port is not None: - ams_port = ams_endpoint.port + ams_port = ams_endpoint.port cmd_command.append("--ams.port") cmd_command.append(str(ams_port)) @@ -74,9 +83,11 @@ def compose_command(config, args): hdfs_user = config.get("HDFS", "user") hdfs_sync = config.get("HDFS", "path_sync") - hdfs_sync.fill(namenode=namenode.geturl(), hdfs_user=hdfs_user, tenant=args.tenant) + hdfs_sync.fill(namenode=namenode.geturl(), + hdfs_user=hdfs_user, tenant=args.tenant) - hdfs_sync = hdfs_sync.fill(namenode=namenode.geturl(), hdfs_user=hdfs_user, tenant=args.tenant).geturl() + hdfs_sync = hdfs_sync.fill(namenode=namenode.geturl( + ), hdfs_user=hdfs_user, tenant=args.tenant).geturl() # append hdfs sync base path to the submit command cmd_command.append("--hdfs.path") @@ -125,21 +136,21 @@ def main(args=None): cmd_command, job_namespace = compose_command(config, args) - log.info("Getting ready to submit job") - log.info(cmd_to_string(cmd_command)+"\n") - # submit the job - - flink_job_submit(config, cmd_command, job_namespace) + + flink_job_submit(config, cmd_command, job_namespace, args.dry_run) if __name__ == "__main__": - parser = argparse.ArgumentParser(description="AMS Sync Ingestion submission script") + parser = argparse.ArgumentParser( + description="AMS Sync Ingestion submission script") parser.add_argument( "-t", "--tenant", metavar="STRING", help="Name of the tenant", required=True) parser.add_argument( "-c", "--config", metavar="PATH", help="Path for the config file") parser.add_argument( "-u", "--sudo", help="Run the submission as superuser", action="store_true") + parser.add_argument("--dry-run", help="Runs in test mode without actually submitting the job", + action="store_true", dest="dry_run") sys.exit(main(parser.parse_args())) diff --git a/bin/test_ar_job_submit.py b/bin/test_ar_job_submit.py index 7a1d6dcf..0cf4d1a6 100644 --- a/bin/test_ar_job_submit.py +++ b/bin/test_ar_job_submit.py @@ -5,32 +5,22 @@ from utils.common import cmd_to_string from utils.argo_config import ArgoConfig -CONF_TEMPLATE = os.path.join(os.path.dirname(__file__), '../conf/conf.template') -CONF_SCHEMA = os.path.join(os.path.dirname(__file__), '../conf/config.schema.json') +CONF_TEMPLATE = os.path.join( + os.path.dirname(__file__), '../conf/conf.template') +CONF_SCHEMA = os.path.join(os.path.dirname( + __file__), '../conf/config.schema.json') + # This is the command that the submission script is expected to compose based on given args and config -expected_result = """flink_path run -c test_class test.jar --run.date 2018-02-11 \ ---mongo.uri mongodb://localhost:21017/argo_TENANTA --mongo.method upsert \ +expected_result = """flink_path run -c test_class test.jar --run.date 2018-02-11 --mongo.uri mongodb://localhost:21017/argo_TENANTA \ +--mongo.method upsert --pdata hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/mdata/2018-02-10 \ --mdata hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/mdata/2018-02-11 \ ---rec hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/sync/recomp.json \ ---downtimes hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/\ -TENANTA/sync/Critical/downtimes_2018-02-11.avro \ ---mps hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/\ -TENANTA/sync/Critical/metric_profile_2018-02-11.avro \ ---apr hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/\ -TENANTA/sync/TENANTA_Critical_ap.json \ ---ggp hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/\ -TENANTA/sync/Critical/group_groups_2018-02-11.avro \ ---conf hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/\ -TENANTA/sync/TENANTA_Critical_cfg.json \ ---egp hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/\ -TENANTA/sync/Critical/group_endpoints_2018-02-11.avro \ ---pdata hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/\ -TENANTA/mdata/2018-02-10 --weights hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/\ -TENANTA/sync/Critical/weights_2018-02-11.avro \ ---ops hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/sync/TENANTA_ops.json \ ---ams.proxy test_proxy --ams.verify true""" +--api.endpoint api.foo --api.token key0 --report.id report_uuid""" +expected_result2 = """flink_path run -c test_class test.jar --run.date 2021-01-01 --mongo.uri mongodb://localhost:21017/argo_TENANTA \ +--mongo.method insert --pdata hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/mdata/2020-12-31 \ +--mdata hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/mdata/2021-01-01 \ +--api.endpoint api.foo --api.token key0 --report.id report_uuid""" class TestClass(unittest.TestCase): @@ -46,23 +36,39 @@ def test_compose_command(self): parser.add_argument('--sudo', action='store_true') parser.add_argument('--method') args = parser.parse_args( - ['--tenant', 'TENANTA', '--date', '2018-02-11', '--report', 'Critical', '--method', 'upsert']) + ['--tenant', 'TENANTA', '--date', '2018-02-11', '--report', 'report_name', '--method', 'upsert']) hdfs_metric = "hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/mdata" - hdfs_sync = "hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/sync" - + test_hdfs_commands = dict() test_hdfs_commands["--pdata"] = hdfs_metric+"/2018-02-10" test_hdfs_commands["--mdata"] = hdfs_metric+"/2018-02-11" - test_hdfs_commands["--conf"] = hdfs_sync+"/TENANTA_Critical_cfg.json" - test_hdfs_commands["--mps"] = hdfs_sync+"/Critical/"+"metric_profile_2018-02-11.avro" - test_hdfs_commands["--ops"] = hdfs_sync+"/TENANTA_ops.json" - test_hdfs_commands["--apr"] = hdfs_sync+"/TENANTA_Critical_ap.json" - test_hdfs_commands["--egp"] = hdfs_sync+"/Critical/group_endpoints_2018-02-11.avro" - test_hdfs_commands["--ggp"] = hdfs_sync+"/Critical/group_groups_2018-02-11.avro" - test_hdfs_commands["--weights"] = hdfs_sync+"/Critical/weights_2018-02-11.avro" - test_hdfs_commands["--downtimes"] = hdfs_sync+"/Critical/downtimes_2018-02-11.avro" - test_hdfs_commands["--rec"] = hdfs_sync+"/recomp.json" - - self.assertEquals(expected_result, cmd_to_string(compose_command(config, args, test_hdfs_commands))) + + self.assertEqual(expected_result, cmd_to_string( + compose_command(config, args, test_hdfs_commands))) + + def test_compose_second_command(self): + + # set up the config parser + config = ArgoConfig(CONF_TEMPLATE, CONF_SCHEMA) + + parser = argparse.ArgumentParser() + parser.add_argument('--tenant') + parser.add_argument('--date', required=False, default="2021-01-01") + parser.add_argument('--report') + parser.add_argument('--sudo', action='store_true') + parser.add_argument('--method', required=False, default="insert") + + args = parser.parse_args( + ['--tenant', 'TENANTA', '--report', 'report_name']) + + hdfs_metric = "hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/mdata" + + test_hdfs_commands = dict() + + test_hdfs_commands["--pdata"] = hdfs_metric+"/2020-12-31" + test_hdfs_commands["--mdata"] = hdfs_metric+"/2021-01-01" + + self.assertEqual(expected_result2, cmd_to_string( + compose_command(config, args, test_hdfs_commands, True))) diff --git a/bin/test_metric_ingestion_submit.py b/bin/test_metric_ingestion_submit.py index e6928bb1..4726bb95 100644 --- a/bin/test_metric_ingestion_submit.py +++ b/bin/test_metric_ingestion_submit.py @@ -5,8 +5,10 @@ import argparse import os -CONF_TEMPLATE = os.path.join(os.path.dirname(__file__), '../conf/conf.template') -CONF_SCHEMA = os.path.join(os.path.dirname(__file__), '../conf/config.schema.json') +CONF_TEMPLATE = os.path.join( + os.path.dirname(__file__), '../conf/conf.template') +CONF_SCHEMA = os.path.join(os.path.dirname( + __file__), '../conf/config.schema.json') # This is the command that the submission script is expected to compose based on given args and config expected_result = """sudo flink_path run -c test_class test.jar --ams.endpoint test_endpoint --ams.port 8080 \ @@ -27,6 +29,5 @@ def test_compose_command(self): parser.add_argument('--sudo', action='store_true') args = parser.parse_args(['--tenant', 'TENANTA', '--sudo']) - print cmd_to_string(compose_command(config, args)[0]) - - self.assertEquals(expected_result, cmd_to_string(compose_command(config, args)[0])) + self.assertEqual(expected_result, cmd_to_string( + compose_command(config, args)[0])) diff --git a/bin/test_status_job_submit.py b/bin/test_status_job_submit.py index a0029abd..e90a2935 100644 --- a/bin/test_status_job_submit.py +++ b/bin/test_status_job_submit.py @@ -5,25 +5,21 @@ from status_job_submit import compose_command from utils.common import cmd_to_string -CONF_TEMPLATE = os.path.join(os.path.dirname(__file__), '../conf/conf.template') -CONF_SCHEMA = os.path.join(os.path.dirname(__file__), '../conf/config.schema.json') +CONF_TEMPLATE = os.path.join( + os.path.dirname(__file__), '../conf/conf.template') +CONF_SCHEMA = os.path.join(os.path.dirname( + __file__), '../conf/config.schema.json') # This is the command that the submission script is expected to compose based on given args and config -expected_result = """sudo flink_path run -c test_class test.jar --run.date 2018-02-11 \ ---mongo.uri mongodb://localhost:21017/argo_TENANTA --mongo.method upsert \ +expected_result = """sudo flink_path run -c test_class test.jar --run.date 2018-02-11 --mongo.uri mongodb://localhost:21017/argo_TENANTA \ +--mongo.method upsert --pdata hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/mdata/2018-02-10 \ --mdata hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/mdata/2018-02-11 \ ---mps hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/\ -TENANTA/sync/Critical/metric_profile_2018-02-11.avro \ ---apr hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/\ -TENANTA/sync/TENANTA_Critical_ap.json \ ---ggp hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/\ -TENANTA/sync/Critical/group_groups_2018-02-11.avro \ ---conf hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/sync/TENANTA_Critical_cfg.json \ ---egp hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/\ -TENANTA/sync/Critical/group_endpoints_2018-02-11.avro \ ---pdata hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/mdata/2018-02-10 \ ---ops hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/sync/TENANTA_ops.json \ ---ams.proxy test_proxy --ams.verify true""" +--api.endpoint api.foo --api.token key0 --report.id report_uuid""" + +expected_result2 = """sudo flink_path run -c test_class test.jar --run.date 2021-01-01 --mongo.uri mongodb://localhost:21017/argo_TENANTA \ +--mongo.method insert --pdata hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/mdata/2020-12-31 \ +--mdata hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/mdata/2021-01-01 \ +--api.endpoint api.foo --api.token key0 --report.id report_uuid""" class TestClass(unittest.TestCase): @@ -39,20 +35,38 @@ def test_compose_command(self): parser.add_argument('--sudo', action='store_true') parser.add_argument('--method') args = parser.parse_args( - ['--tenant', 'TENANTA', '--date', '2018-02-11', '--report', 'Critical', '--method', 'upsert', '--sudo']) + ['--tenant', 'TENANTA', '--date', '2018-02-11', '--report', 'report_name', '--method', 'upsert', '--sudo']) hdfs_metric = "hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/mdata" - hdfs_sync = "hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/sync" - + test_hdfs_commands = dict() test_hdfs_commands["--pdata"] = hdfs_metric+"/2018-02-10" test_hdfs_commands["--mdata"] = hdfs_metric+"/2018-02-11" - test_hdfs_commands["--conf"] = hdfs_sync+"/TENANTA_Critical_cfg.json" - test_hdfs_commands["--mps"] = hdfs_sync+"/Critical/"+"metric_profile_2018-02-11.avro" - test_hdfs_commands["--ops"] = hdfs_sync+"/TENANTA_ops.json" - test_hdfs_commands["--apr"] = hdfs_sync+"/TENANTA_Critical_ap.json" - test_hdfs_commands["--egp"] = hdfs_sync+"/Critical/group_endpoints_2018-02-11.avro" - test_hdfs_commands["--ggp"] = hdfs_sync+"/Critical/group_groups_2018-02-11.avro" - - self.assertEquals(expected_result, cmd_to_string(compose_command(config, args, test_hdfs_commands))) + + self.assertEqual(expected_result, cmd_to_string( + compose_command(config, args, test_hdfs_commands))) + + def test_compose_command2(self): + + config = ArgoConfig(CONF_TEMPLATE, CONF_SCHEMA) + + parser = argparse.ArgumentParser() + parser.add_argument('--tenant') + parser.add_argument('--date', default="2021-01-01") + parser.add_argument('--report') + parser.add_argument('--sudo', action='store_true') + parser.add_argument('--method', default="insert") + args = parser.parse_args( + ['--tenant', 'TENANTA', '--report', 'report_name', '--sudo']) + + hdfs_metric = "hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/mdata" + + + test_hdfs_commands = dict() + + test_hdfs_commands["--pdata"] = hdfs_metric+"/2020-12-31" + test_hdfs_commands["--mdata"] = hdfs_metric+"/2021-01-01" + + self.assertEqual(expected_result2, cmd_to_string( + compose_command(config, args, test_hdfs_commands, True))) diff --git a/bin/test_stream_status_job_submit.py b/bin/test_stream_status_job_submit.py index b2f1fd12..4fefa573 100644 --- a/bin/test_stream_status_job_submit.py +++ b/bin/test_stream_status_job_submit.py @@ -5,22 +5,23 @@ from stream_status_job_submit import compose_command from utils.common import cmd_to_string -CONF_TEMPLATE = os.path.join(os.path.dirname(__file__), '../conf/conf.template') -CONF_SCHEMA = os.path.join(os.path.dirname(__file__), '../conf/config.schema.json') +CONF_TEMPLATE = os.path.join( + os.path.dirname(__file__), '../conf/conf.template') +CONF_SCHEMA = os.path.join(os.path.dirname( + __file__), '../conf/config.schema.json') -expected_result = """sudo flink_path run -c test_class test.jar --ams.endpoint test_endpoint --ams.port 8080 \ + +expected_result = """sudo flink_path run -c test_class test.jar --ams.endpoint test_endpoint --ams.port 443 \ --ams.token test_token --ams.project test_project --ams.sub.metric metric_status --ams.sub.sync sync_status \ +--sync.mps hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/sync/Critical/metric_profile_2018-03-01.avro \ +--sync.ops hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/sync/TENANTA_ops.json \ --sync.apr hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/sync/TENANTA_Critical_ap.json \ ---sync.egp hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/\ -TENANTA/sync/Critical/group_endpoints_2018-03-01.avro \ ---sync.mps hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/\ -TENANTA/sync/Critical/metric_profile_2018-03-01.avro \ ---sync.ops hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/\ -sync/TENANTA_ops.json --run.date 2018-03-05T00:00:00Z --p 1 \ ---hbase.master hbase.devel --hbase.port 8080 --hbase.zk.quorum ['test_zk_servers'] \ +--sync.egp hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/sync/Critical/group_endpoints_2018-03-01.avro \ +--run.date 2018-03-05T00:00:00Z --report Critical --p 1 --hbase.master hbase.devel --hbase.port 8080 --hbase.zk.quorum ['test_zk_servers'] \ --hbase.zk.port 8080 --hbase.namespace test_hbase_namespace --hbase.table metric_data \ ---kafka.servers kafka_server:9090,kafka_server2:9092 --kafka.topic test_kafka_topic --fs.output None --mongo.uri mongodb://localhost:21017/argo_TENANTA --mongo.method upsert --ams.batch 10 --ams.interval 300 --ams.proxy test_proxy --ams.verify true --timeout 500""" - +--kafka.servers kafka_server:9090,kafka_server2:9092 --kafka.topic test_kafka_topic --fs.output None \ +--mongo.uri mongodb://localhost:21017/argo_TENANTA --mongo.method insert --report-id None \ +--ams.batch 10 --ams.interval 300 --ams.proxy test_proxy --ams.verify true --timeout 500""" class TestClass(unittest.TestCase): @@ -42,9 +43,13 @@ def test_compose_command(self): hdfs_sync = "hdfs://hdfs_test_host:hdfs_test_port/user/hdfs_test_user/argo/tenants/TENANTA/sync" test_hdfs_commands = dict() - test_hdfs_commands["--sync.mps"] = hdfs_sync+"/Critical/"+"metric_profile_2018-03-01.avro" + test_hdfs_commands["--sync.mps"] = hdfs_sync + \ + "/Critical/"+"metric_profile_2018-03-01.avro" test_hdfs_commands["--sync.ops"] = hdfs_sync+"/TENANTA_ops.json" - test_hdfs_commands["--sync.apr"] = hdfs_sync+"/TENANTA_Critical_ap.json" - test_hdfs_commands["--sync.egp"] = hdfs_sync+"/Critical/group_endpoints_2018-03-01.avro" + test_hdfs_commands["--sync.apr"] = hdfs_sync + \ + "/TENANTA_Critical_ap.json" + test_hdfs_commands["--sync.egp"] = hdfs_sync + \ + "/Critical/group_endpoints_2018-03-01.avro" - self.assertEquals(expected_result, cmd_to_string(compose_command(config, args, test_hdfs_commands)[0])) + self.assertEqual(expected_result, cmd_to_string( + compose_command(config, args, test_hdfs_commands)[0])) diff --git a/bin/test_sync_ingestion_submit.py b/bin/test_sync_ingestion_submit.py index d82c8d6e..2ec53b73 100644 --- a/bin/test_sync_ingestion_submit.py +++ b/bin/test_sync_ingestion_submit.py @@ -27,4 +27,4 @@ def test_compose_command(self): parser.add_argument('--sudo', action='store_true') args = parser.parse_args(['--tenant', 'TENANTA', '--sudo']) - self.assertEquals(expected_result, cmd_to_string(compose_command(config, args)[0])) + self.assertEqual(expected_result, cmd_to_string(compose_command(config, args)[0])) diff --git a/bin/update_engine.py b/bin/update_engine.py index d5308e7d..333f8e4e 100755 --- a/bin/update_engine.py +++ b/bin/update_engine.py @@ -52,6 +52,8 @@ def main(args): tenants = config.get("API","tenants") profile_type_checklist = ["operations", "aggregations", "reports", "thresholds", "recomputations"] for tenant in tenants: + if args.tenant and tenant != args.tenant: + continue reports = config.get("TENANTS:"+tenant,"reports") for report in reports: for profile_type in profile_type_checklist: @@ -65,6 +67,8 @@ def main(args): ams = ArgoAmsClient(ams_host, ams_token) for tenant in tenants: + if args.tenant and tenant != args.tenant: + continue ams.check_project_exists(tenant) missing = ams.check_tenant(tenant) if is_tenant_complete(missing): @@ -99,7 +103,7 @@ def tenant_ok_reports(status): for report_name in status["hdfs"]["sync_data"]: result = 1 report = status["hdfs"]["sync_data"][report_name] - for key in report.keys(): + for key in list(report.keys()): result = result * report[key] if result > 0: rep_list.append(report_name) @@ -109,6 +113,8 @@ def tenant_ok_reports(status): if __name__ == "__main__": parser = ArgumentParser(description="Update engine") + parser.add_argument( + "-t", "--tenant", help="tenant owner ", dest="tenant", metavar="STRING", required=False, default=None) parser.add_argument( "-b", "--backup-conf", help="backup current configuration", action="store_true", dest="backup") parser.add_argument( diff --git a/bin/utils/argo_config.py b/bin/utils/argo_config.py index 8a209d0d..6a79aebd 100755 --- a/bin/utils/argo_config.py +++ b/bin/utils/argo_config.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -from ConfigParser import SafeConfigParser -from urlparse import urlparse +from configparser import ConfigParser +from urllib.parse import urlparse import json import re import logging @@ -26,7 +26,6 @@ def __repr__(self): return self.tmpl def get_args(self): - """ Get arguments used in template @@ -49,7 +48,8 @@ def fill(self, **args_new): # If provided arguments fill the needed ones we are ok (extra arguments will be ingored) if not set(args).issubset(set(args_new.keys())): - raise RuntimeError("Argument mismatch, needed arguments:"+str(args)) + raise RuntimeError( + "Argument mismatch, needed arguments:"+str(args)) for arg in args: txt = re.sub(r"{{\s*"+str(arg)+r"\s*}}", str(args_new[arg]), txt) @@ -68,9 +68,10 @@ def partial_fill(self, **args_new): args = self.get_args() for arg in args: - if arg not in args_new.keys(): + if arg not in list(args_new.keys()): continue - txt = re.sub(r"{{\s*" + str(arg) + r"\s*}}", str(args_new[arg]), txt) + txt = re.sub(r"{{\s*" + str(arg) + r"\s*}}", + str(args_new[arg]), txt) return txt @@ -109,7 +110,7 @@ def __init__(self, config=None, schema=None): self.log_changes = True self.conf_path = None self.schema_path = None - self.conf = SafeConfigParser() + self.conf = ConfigParser() self.schema = dict() self.fix = dict() self.var = dict() @@ -131,19 +132,20 @@ def set(self, group, item, value): log.info("config section added [{}]".format(group)) if self.conf.has_option(group, item): old_val = self.conf.get(group, item) - else: + else: old_val = None if old_val != value: self.conf.set(group, item, value) if self.log_changes: - log.info("config option changed [{}]{}={} (from:{})".format(group, item, value, old_val)) + log.info("config option changed [{}]{}={} (from:{})".format( + group, item, value, old_val)) def set_default(self, group, item_name): - self.set(group,item_name,str(self.get_default(group, item_name))) + self.set(group, item_name, str(self.get_default(group, item_name))) def get_var_origin(self, group_name, ): - # reverse keys alphabetically - keys = sorted(self.schema.keys(), reverse=True) + # reverse keys alphabetically + keys = sorted(list(self.schema.keys()), reverse=True) for item in keys: if "~" in item: @@ -158,18 +160,18 @@ def get_var_origin(self, group_name, ): return "" if group_name.startswith(item_prefix) and group_name.endswith(item_postfix): - return item + return item return "" def get_default(self, group, item_name): group_og = self.get_var_origin(group) - + item = self.schema[group_og][item_name] - if "default" not in item.keys(): + if "default" not in list(item.keys()): return "" item_type = item["type"] if item_type == "string": - result =item["default"] + result = item["default"] elif item_type == "int" or item_type == "long": result = int(item["default"]) elif item_type == "bool": @@ -190,8 +192,8 @@ def get_default(self, group, item_name): sub_type = "string" result = Template(item["default"], sub_type) - return result + def get(self, group, item=None): """ Given a group and an item return its value @@ -221,11 +223,11 @@ def get(self, group, item=None): r = re.compile(item.replace('*', '.*')) results = {} if group in self.fix: - items = filter(r.match, self.fix[group].keys()) + items = list(filter(r.match, list(self.fix[group].keys()))) for item in items: results[item] = self.fix[group][item] if group in self.var: - items = filter(r.match, self.var[group].keys()) + items = list(filter(r.match, list(self.var[group].keys()))) for item in items: results[item] = self.var[group][item] return results @@ -236,13 +238,13 @@ def get(self, group, item=None): return self.fix[group][item]["value"] if group in self.var: if item in self.var[group]: - if self.var[group][item] is not None: - return self.var[group][item]["value"] + if self.var[group][item] is not None: + return self.var[group][item]["value"] return None def load_conf(self, conf_path): """ - Load configuration from file using a SafeConfigParser + Load configuration from file using a ConfigParser """ self.conf.read(conf_path) self.conf_path = conf_path @@ -274,8 +276,8 @@ def get_as(self, group, item, item_type, og_item): dict: result dictionary with value and optional reference to original item in schema """ pack = dict() - - try: + + try: result = None if item_type == "string": result = self.conf.get(group, item) @@ -303,10 +305,10 @@ def get_as(self, group, item, item_type, og_item): if og_item != item: pack["og_item"] = og_item - except Exception, e: - log.error("Not found [{}][{}]".format(group,item)) + except Exception as e: + log.error("Not found [{}][{}]".format(group, item)) self.valid = False - return + return return pack def add_config_item(self, group, item, og_item, dest, og_group): @@ -325,7 +327,7 @@ def add_config_item(self, group, item, og_item, dest, og_group): else: schema_group = group - if "optional" in self.schema[schema_group][og_item].keys(): + if "optional" in list(self.schema[schema_group][og_item].keys()): if self.schema[schema_group][og_item]["optional"]: if not self.conf.has_option(group, item): return @@ -335,7 +337,8 @@ def add_config_item(self, group, item, og_item, dest, og_group): if og_group is not None: dest[group]["og_group"] = og_group - dest[group][item] = self.get_as(group, item, self.schema[schema_group][og_item]["type"], og_item) + dest[group][item] = self.get_as( + group, item, self.schema[schema_group][og_item]["type"], og_item) def add_group_items(self, group, items, var, og_group): """ @@ -358,7 +361,8 @@ def add_group_items(self, group, items, var, og_group): self.add_config_item(group, item, item, dest, og_group) else: for sub_item in item["vars"]: - self.add_config_item(group, sub_item, item["item"], dest, og_group) + self.add_config_item( + group, sub_item, item["item"], dest, og_group) @staticmethod def is_var(name): @@ -398,9 +402,9 @@ def get_item_variations(self, group, item, ogroup): name_pool = self.conf.get(map_pool[0], map_pool[1]).split(",") if name_pool == [""]: return None - except Exception, e: - log.error("Not found [{}]{}".format(map_pool[0],map_pool[1])) - self.valid=False + except Exception as e: + log.error("Not found [{}]{}".format(map_pool[0], map_pool[1])) + self.valid = False return None for name in name_pool: variations["vars"].append(item.replace("~", name)) @@ -435,56 +439,55 @@ def get_group_variations(self, group): return variations def check_conf(self): - """ Validate schema and configuration file. Iterate and extract all configuration parameters """ self.valid = True - fix_groups = self.schema.keys() + fix_groups = list(self.schema.keys()) var_groups = list() - for group in fix_groups: if self.is_var(group): - + var_group = self.get_group_variations(group) if var_group is not None: var_groups.append(var_group) - + continue fix_items = list() var_items = list() - for item in self.schema[group].keys(): + for item in list(self.schema[group].keys()): if self.is_var(item): - group_vars = self.get_item_variations(group,item,None) + group_vars = self.get_item_variations(group, item, None) if group_vars is not None: - var_items.append(self.get_item_variations(group, item, None)) + var_items.append( + self.get_item_variations(group, item, None)) continue fix_items.append(item) self.add_group_items(group, fix_items, False, None) self.add_group_items(group, var_items, True, None) - for group in var_groups: - + for sub_group in group["vars"]: fix_items = list() var_items = list() - for item in self.schema[group["group"]].keys(): + for item in list(self.schema[group["group"]].keys()): if item == "~": continue if self.is_var(item): - item_vars = self.get_item_variations(sub_group, item, group["group"]) + item_vars = self.get_item_variations( + sub_group, item, group["group"]) if item_vars is not None: var_items.append(item_vars) continue fix_items.append(item) # Both fix and var items are in a var group so are considered var - self.add_group_items(sub_group, fix_items, True, group["group"]) - self.add_group_items(sub_group, var_items, True, group["group"]) - - + self.add_group_items(sub_group, fix_items, + True, group["group"]) + self.add_group_items(sub_group, var_items, + True, group["group"]) diff --git a/bin/utils/argo_mongo.py b/bin/utils/argo_mongo.py index 1f143d49..81482f1a 100755 --- a/bin/utils/argo_mongo.py +++ b/bin/utils/argo_mongo.py @@ -5,9 +5,9 @@ import pymongo from pymongo import MongoClient from pymongo.errors import ServerSelectionTimeoutError -from argo_config import ArgoConfig -from common import get_config_paths -from common import get_log_conf +from .argo_config import ArgoConfig +from .common import get_config_paths +from .common import get_log_conf log = logging.getLogger(__name__) @@ -19,55 +19,94 @@ def __init__(self, args, config, cols): self.config = config self.cols = cols - def ensure_status_indexes(self, db): """Checks if required indexes exist in specific argo status-related collections in mongodb - + Args: db (obj): pymongo database object - + """ log.info("Checking required indexes in status collections...") + def is_index_included(index_set, index): """gets a set of mongodb indexes and checks if specified mongodb index exists in this set - + Args: index_set (dict): pymongo mongodb index object index (obj): pymongo index object - + Returns: bool: If index exists in set return true """ - for name in index_set.keys(): + for name in list(index_set.keys()): if index_set[name]['key'] == index: - return True + return True return False # Used in all status collections - index_report_date = [("report",pymongo.ASCENDING), ("date_integer",pymongo.ASCENDING)] + index_report_date = [("report", pymongo.ASCENDING), + ("date_integer", pymongo.ASCENDING)] # Used only in status_metrics collection - index_date_host = [("date_integer",pymongo.ASCENDING), ("report",pymongo.ASCENDING)] - status_collections = ["status_metrics","status_endpoints","status_services","status_endpoint_groups"] + index_date_host = [("date_integer", pymongo.ASCENDING), + ("report", pymongo.ASCENDING)] + status_collections = ["status_metrics", "status_endpoints", + "status_services", "status_endpoint_groups"] + + index_history = [("date_integer",pymongo.DESCENDING), + ("id",pymongo.ASCENDING)] + + index_downtimes = [("date_integer",pymongo.DESCENDING)] + + # Check indexes in sync collections + for col_name in ["topology_endpoints", "topology_groups", "weights"]: + col = db[col_name] + indexes = col.index_information() + if not is_index_included(indexes, index_history): + # ensure index + col.create_index(index_history, background=True) + log.info("Created (date_integer,id) index in %s.%s", + col.database.name, col.name) + + # Check for index in downtimes + col = db["downtimes"] + indexes = col.index_information() + if not is_index_included(indexes, index_downtimes): + col.create_index(index_downtimes, background=True) + log.info("Created (date_integer) index in %s.%s", + col.database.name, col.name) + + + # Check first for index report,date for status_name in status_collections: col = db[status_name] indexes = col.index_information() - if not is_index_included(indexes,index_report_date): + if not is_index_included(indexes, index_report_date): # ensure index - col.create_index(index_report_date,background=True) - log.info("Created (report,date) index in %s.%s",col.database.name,col.name) - + col.create_index(index_report_date, background=True) + log.info("Created (report,date) index in %s.%s", + col.database.name, col.name) + # Check for index date,host in status_metrics col = db["status_metrics"] - if not is_index_included(indexes,index_date_host): - col.create_index(index_date_host,background=True) - log.info("Created (report,date) index in %s.%s",col.database.name,col.name) + indexes = col.index_information() + if not is_index_included(indexes, index_date_host): + col.create_index(index_date_host, background=True) + log.info("Created (report,date) index in %s.%s", + col.database.name, col.name) + def mongo_clean_ar(self, uri, dry_run=False): + """Gets a mongo database reference as a uri string and performs + a/r data removal for a specific date - def mongo_clean_ar(self, uri): + Args: + uri (str.): uri string pointing to a specific mongodb database + dry_run (bool, optional): Optional flag that specifies if the execution is in dry mode run. + If yes no data removal is performed. Defaults to False. + """ tenant_report = None @@ -76,7 +115,8 @@ def mongo_clean_ar(self, uri): report_name = self.args.report tenant_group = "TENANTS:" + self.args.tenant if report_name in self.config.get(tenant_group, "reports"): - tenant_report = self.config.get(tenant_group, "report_"+report_name) + tenant_report = self.config.get( + tenant_group, "report_"+report_name) else: log.critical("Report %s not found", report_name) sys.exit(1) @@ -101,34 +141,46 @@ def mongo_clean_ar(self, uri): # iterate over the specified collections for col in self.cols: if tenant_report is not None: - num_of_rows = db[col].find({"date": date_int, "report": tenant_report}).count() + num_of_rows = db[col].find( + {"date": date_int, "report": tenant_report}).count() log.info("Collection: " + col + " -> Found " + str( num_of_rows) + " entries for date: " + self.args.date + " and report: " + self.args.report) else: num_of_rows = db[col].find({"date": date_int}).count() log.info("Collection: " + col + " -> Found " + str( num_of_rows) + " entries for date: " + self.args.date + ". No report specified!") - - if num_of_rows > 0: - - if tenant_report is not None: - # response returned from the delete operation - res = db[col].delete_many({"date": date_int, "report": tenant_report}) - log.info("Collection: " + col + " -> Removed " + str(res.deleted_count) + - " entries for date: " + self.args.date + " and report: " + self.args.report) - else: - # response returned from the delete operation - res = db[col].delete_many({"date": date_int, "report": tenant_report}) - log.info("Collection: " + col + " -> Removed " + str( - res.deleted_count) + " entries for date: " + self.args.date + ". No report specified!") - log.info("Entries removed successfully") + if dry_run: + log.info("Results won't be removed to dry-run mode") else: - log.info("Zero entries found. Nothing to remove.") + if num_of_rows > 0: + if tenant_report is not None: + # response returned from the delete operation + res = db[col].delete_many( + {"date": date_int, "report": tenant_report}) + log.info("Collection: " + col + " -> Removed " + str(res.deleted_count) + + " entries for date: " + self.args.date + " and report: " + self.args.report) + else: + # response returned from the delete operation + res = db[col].delete_many( + {"date": date_int, "report": tenant_report}) + log.info("Collection: " + col + " -> Removed " + str( + res.deleted_count) + " entries for date: " + self.args.date + ". No report specified!") + log.info("Entries removed successfully") + else: + log.info("Zero entries found. Nothing to remove.") # close the connection with mongo client.close() - def mongo_clean_status(self, uri): + def mongo_clean_status(self, uri, dry_run=False): + """Gets a mongo database reference as a uri string and performs + status data removal for a specific date + + Args: + uri (str.): uri string pointing to a specific mongodb database + dry_run (bool, optional): Optional flag that specifies if the execution is in dry mode run. + If yes no data removal is performed. Defaults to False. + """ tenant_report = None @@ -137,7 +189,8 @@ def mongo_clean_status(self, uri): report_name = self.args.report tenant_group = "TENANTS:" + self.args.tenant if report_name in self.config.get(tenant_group, "reports"): - tenant_report = self.config.get(tenant_group, "report_"+report_name) + tenant_report = self.config.get( + tenant_group, "report_"+report_name) else: log.critical("Report %s not found", report_name) sys.exit(1) @@ -165,7 +218,8 @@ def mongo_clean_status(self, uri): # iterate over the specified collections for col in self.cols: if tenant_report is not None: - num_of_rows = db[col].find({"date_integer": date_int, "report": tenant_report}).count() + num_of_rows = db[col].find( + {"date_integer": date_int, "report": tenant_report}).count() log.info("Collection: " + col + " -> Found " + str( num_of_rows) + " entries for date: " + self.args.date + " and report: " + self.args.report) else: @@ -173,21 +227,25 @@ def mongo_clean_status(self, uri): log.info("Collection: " + col + " -> Found " + str( num_of_rows) + " entries for date: " + self.args.date + ". No report specified!") - if num_of_rows > 0: - - if tenant_report is not None: - # response returned from the delete operation - res = db[col].delete_many({"date_integer": date_int, "report": tenant_report}) - log.info("Collection: " + col + " -> Removed " + str(res.deleted_count) + - " entries for date: " + self.args.date + " and report: " + self.args.report) - else: - # response returned from the delete operation - res = db[col].delete_many({"date_integer": date_int, "report": tenant_report}) - log.info("Collection: " + col + " -> Removed " + str( - res.deleted_count) + " entries for date: " + self.args.Date + ". No report specified!") - log.info("Entries removed successfully") + if dry_run: + log.info("Results won't be removed to dry-run mode") else: - log.info("Zero entries found. Nothing to remove.") + if num_of_rows > 0: + if tenant_report is not None: + # response returned from the delete operation + res = db[col].delete_many( + {"date_integer": date_int, "report": tenant_report}) + log.info("Collection: " + col + " -> Removed " + str(res.deleted_count) + + " entries for date: " + self.args.date + " and report: " + self.args.report) + else: + # response returned from the delete operation + res = db[col].delete_many( + {"date_integer": date_int, "report": tenant_report}) + log.info("Collection: " + col + " -> Removed " + str( + res.deleted_count) + " entries for date: " + self.args.Date + ". No report specified!") + log.info("Entries removed successfully") + else: + log.info("Zero entries found. Nothing to remove.") # close the connection with mongo client.close() @@ -205,20 +263,21 @@ def main_clean(args=None): # Get main configuration and schema config = ArgoConfig(conf_paths["main"], conf_paths["schema"]) - # set up the mongo uri + # set up the mongo uri section_tenant = "TENANTS:" + args.tenant - mongo_endpoint = config.get("MONGO","endpoint") - mongo_uri = config.get(section_tenant,"mongo_uri").fill(mongo_endpoint=mongo_endpoint,tenant=args.tenant) - + mongo_endpoint = config.get("MONGO", "endpoint") + mongo_uri = config.get(section_tenant, "mongo_uri").fill( + mongo_endpoint=mongo_endpoint, tenant=args.tenant) if args.job == "clean_ar": - argo_mongo_client = ArgoMongoClient(args, config, ["service_ar", "endpoint_group_ar"]) - argo_mongo_client.mongo_clean_ar(mongo_uri) + argo_mongo_client = ArgoMongoClient( + args, config, ["endpoint_ar", "service_ar", "endpoint_group_ar"]) + argo_mongo_client.mongo_clean_ar(mongo_uri, args.dry_run) elif args.job == "clean_status": argo_mongo_client = ArgoMongoClient(args, config, ["status_metrics", "status_endpoints", "status_services", "status_endpoint_groups"]) - argo_mongo_client.mongo_clean_status(mongo_uri) + argo_mongo_client.mongo_clean_status(mongo_uri, args.dry_run) # Provide the ability to the script, to run as a standalone module @@ -234,6 +293,8 @@ def main_clean(args=None): "-c", "--config", metavar="STRING", help="Path for the config file", dest="config") parser.add_argument( "-j", "--job", metavar="STRING", help="Stand alone method we wish to run", required=True, dest="job") + parser.add_argument("--dry-run", help="Runs in test mode without actually submitting the job", + action="store_true", dest="dry_run") # Parse the arguments sys.exit(main_clean(parser.parse_args())) diff --git a/bin/utils/check_tenant.py b/bin/utils/check_tenant.py index 1d1b9f3d..46b3eb3e 100755 --- a/bin/utils/check_tenant.py +++ b/bin/utils/check_tenant.py @@ -1,14 +1,14 @@ #!/usr/bin/env python from argparse import ArgumentParser -from common import get_config_paths, get_log_conf -from argo_config import ArgoConfig +from .common import get_config_paths, get_log_conf +from .argo_config import ArgoConfig import sys import logging import json from snakebite.client import Client from datetime import datetime, timedelta -from update_ams import ArgoAmsClient +from .update_ams import ArgoAmsClient import requests @@ -112,7 +112,7 @@ def check_tenant_hdfs(tenant, target_date, days_back, namenode, hdfs_user, clien except Exception: sync_result[report][item] = False - for item in report_profiles.keys(): + for item in list(report_profiles.keys()): profile_path = "".join([hdfs_sync.path,"/",report_profiles[item].format(tenant,report)]) try: client.test(profile_path) @@ -158,13 +158,13 @@ def check_tenant_ams(tenant, target_date, ams, config): if ams.check_project_exists(tenant): tenant_topics = ams.get_tenant_topics(tenant) - topic_types = tenant_topics.keys() + topic_types = list(tenant_topics.keys()) if "metric_data" in topic_types: ams_tenant["metric_data"]["publishing"] = True if "sync_data" in topic_types: ams_tenant["sync_data"]["publishing"] = True - sub_types = ams.get_tenant_subs(tenant,tenant_topics).keys() + sub_types = list(ams.get_tenant_subs(tenant,tenant_topics).keys()) if "ingest_metric" in sub_types: ams_tenant["metric_data"]["ingestion"] = True if "status_metric" in sub_types: @@ -204,15 +204,24 @@ def check_tenants(tenants, target_date, days_back, config): # ams client init ams_token = config.get("AMS", "access_token") ams_host = config.get("AMS", "endpoint").hostname - ams = ArgoAmsClient(ams_host, ams_token) + ams_proxy = config.get("AMS","proxy") + if ams_proxy: + ams_proxy = ams_proxy.geturl() + ams_verify = config.get("AMS","verify") + + ams = ArgoAmsClient(ams_host, ams_token, ams_verify, ams_proxy) log.info("connecting to AMS: {}".format(ams_host)) # Upload tenant statuses in argo web api api_endpoint = config.get("API","endpoint").netloc api_token = config.get("API","access_token") + api_proxy = config.get("API","proxy") + if api_proxy: + api_proxy = api_proxy.geturl() + api_verify = config.get("API","verify") # Get tenant uuids - tenant_uuids = get_tenant_uuids(api_endpoint, api_token) + tenant_uuids = get_tenant_uuids(api_endpoint, api_token, api_verify, api_proxy) if not tenant_uuids: log.error("Without tenant uuids service is unable to check and upload tenant status") sys.exit(1) @@ -235,7 +244,7 @@ def check_tenants(tenants, target_date, days_back, config): log.info("Status for tenant[{}] = {}".format(tenant,json.dumps(status_tenant))) # Upload tenant status to argo-web-api complete_status.append(status_tenant) - upload_tenant_status(api_endpoint,api_token,tenant,tenant_uuids[tenant],status_tenant) + upload_tenant_status(api_endpoint,api_token,tenant,tenant_uuids[tenant],status_tenant,api_verify,api_proxy) return complete_status @@ -280,12 +289,14 @@ def run_tenant_check(args): -def get_tenant_uuids(api_endpoint, api_token): +def get_tenant_uuids(api_endpoint, api_token, verify=False, http_proxy_url=None): """Get tenant uuids from remote argo-web-api endpoint Args: api_endpoint (str.): hostname of the remote argo-web-api endpoint api_token (str.): access token for the remote argo-web-api endpoint + verify (boolean): flag if the remote web api host should be verified + http_proxy_url (str.): optional url for local http proxy to be used Returns: dict.: dictionary with mappings of tenant names to tenant uuidss @@ -294,12 +305,15 @@ def get_tenant_uuids(api_endpoint, api_token): log.info("Retrieving tenant uuids from api: {}".format(api_endpoint)) result = dict() url = "https://{}/api/v2/admin/tenants".format(api_endpoint) + proxies = None + if http_proxy_url: + proxies = {'http':http_proxy_url,'https':http_proxy_url} headers = dict() headers.update({ 'Accept': 'application/json', 'x-api-key': api_token }) - r = requests.get(url, headers=headers, verify=False) + r = requests.get(url, headers=headers, verify=verify, proxies=proxies) if 200 == r.status_code: @@ -313,7 +327,7 @@ def get_tenant_uuids(api_endpoint, api_token): return result -def upload_tenant_status(api_endpoint, api_token, tenant, tenant_id, tenant_status): +def upload_tenant_status(api_endpoint, api_token, tenant, tenant_id, tenant_status, verify=False, http_proxy_url=None): """Uploads tenant's status to a remote argo-web-api endpoint Args: @@ -322,6 +336,8 @@ def upload_tenant_status(api_endpoint, api_token, tenant, tenant_id, tenant_stat tenant (str.): tenant name tenant_id (str.): tenant uuid tenant_status (obj.): json representation of tenant's status report + verify (boolean): flag if the remote web api host should be verified + http_proxy_url (str.): optional url for local http proxy to be used Returns: bool: true if upload is successfull @@ -334,7 +350,10 @@ def upload_tenant_status(api_endpoint, api_token, tenant, tenant_id, tenant_stat 'Accept': 'application/json', 'x-api-key': api_token }) - r = requests.put(url, headers=headers, data=json.dumps(tenant_status), verify=False) + proxies = None + if http_proxy_url: + proxies={'http':http_proxy_url, 'https':http_proxy_url} + r = requests.put(url, headers=headers, data=json.dumps(tenant_status), verify=verify, proxies=proxies) if 200 == r.status_code: log.info("Tenant's {} status upload succesfull to {}".format(tenant, api_endpoint)) return True diff --git a/bin/utils/common.py b/bin/utils/common.py index bd8b4fcf..16e57b37 100644 --- a/bin/utils/common.py +++ b/bin/utils/common.py @@ -3,7 +3,7 @@ import json import subprocess from subprocess import check_call -from urlparse import urlparse, urlsplit, urlunsplit +from urllib.parse import urlparse, urlsplit, urlunsplit import logging.config import logging import os.path @@ -78,7 +78,7 @@ def date_rollback(path, year, month, day, config, client): sys.exit(1) -def flink_job_submit(config, cmd_command, job_namespace=None): +def flink_job_submit(config, cmd_command, job_namespace=None, dry_run=False): """Method that takes a command and executes it, after checking for flink being up and running. If the job_namespace is defined, then it will also check for the specific job if its already running. If flink is not running or the job is already submitted, it will execute. @@ -86,30 +86,48 @@ def flink_job_submit(config, cmd_command, job_namespace=None): config(ConfigParser): script's configuration cmd_command(list): list contaning the command to be submitted job_namespace(string): the job's name + dry_run(boolean, optional): signifies a dry-run execution - no submission is performed """ # check if flink is up and running + if dry_run: + log.info("This is a dry run. Job won't be submitted") + else: + log.info("Getting ready to submit job") + + log.info(cmd_to_string(cmd_command)+"\n") try: flink_response = requests.get(config.get("FLINK", "job_manager").geturl()+"/joboverview/running") - + issues = False + job_already_runs = False if job_namespace is not None: # if the job's already running then exit, else sumbit the command for job in json.loads(flink_response.text)["jobs"]: if job["name"] == job_namespace: log.critical("Job: "+"'"+job_namespace+"' is already running") - sys.exit(1) + job_already_runs = True + issues = True log.info("Everything is ok") try: - check_call(cmd_command) + if not dry_run and not job_already_runs: + check_call(cmd_command) except subprocess.CalledProcessError as esp: log.fatal("Job was not submitted. Error exit code: "+str(esp.returncode)) + issues = True sys.exit(1) except requests.exceptions.ConnectionError: log.fatal("Flink is not currently running. Tried to communicate with job manager at: " + config.get("FLINK", "job_manager").geturl()) - sys.exit(1) - + issues = True + + # print dry-run message if needed + if dry_run: + # print output in green and exit + print(("\033[92m" + cmd_to_string(cmd_command) + "\033[0m")) + # if isses exit + if issues: + exit(1) def hdfs_check_path(uri, client): """Method that checks if a path in hdfs exists. If it exists it will return the path, diff --git a/bin/utils/recomputations.py b/bin/utils/recomputations.py index 14e06153..cfffc726 100755 --- a/bin/utils/recomputations.py +++ b/bin/utils/recomputations.py @@ -7,24 +7,25 @@ from pymongo import MongoClient from bson import json_util import logging -from common import get_config_paths -from common import get_log_conf -from argo_config import ArgoConfig +from .common import get_config_paths +from .common import get_log_conf +from .argo_config import ArgoConfig import subprocess log = logging.getLogger(__name__) + def write_output(results, tenant, report, target_date, config): """Write recomputation output to hdfs - + Args: results (list(obj)): List of recomputation definitions tenant (str.): tenant name report (str.): report name target_date ([type]): target date config ([type]): argo configuration object - + Returns: bool: False if upload had errors """ @@ -33,24 +34,27 @@ def write_output(results, tenant, report, target_date, config): log.info("No recomputations found skipping") return True # create a temporary recalculation file in the ar-sync folder - recomp_name = "".join(["recomp", "_", tenant, "_", report, "_", target_date, ".json"]) + recomp_name = "".join( + ["recomp", "_", tenant, "_", report, "_", target_date, ".json"]) recomp_filepath = os.path.join("/tmp/", recomp_name) # write output file to the correct job path with open(recomp_filepath, 'w') as output_file: json.dump(results, output_file, default=json_util.default) - + # upload file to hdfs hdfs_writer = config.get("HDFS", "writer_bin") hdfs_namenode = config.get("HDFS", "namenode") hdfs_user = config.get("HDFS", "user") - hdfs_sync = config.get("HDFS", "path_sync").fill(namenode=hdfs_namenode.geturl(), hdfs_user=hdfs_user, tenant=tenant).path - - status = subprocess.check_call([hdfs_writer, "put", recomp_filepath, hdfs_sync]) + hdfs_sync = config.get("HDFS", "path_sync").fill( + namenode=hdfs_namenode.geturl(), hdfs_user=hdfs_user, tenant=tenant).path + + status = subprocess.check_call( + [hdfs_writer, "put", recomp_filepath, hdfs_sync]) # clear temp local file os.remove(recomp_filepath) if status == 0: - log.info("File uploaded successfully to hdfs: %s", hdfs_sync ) + log.info("File uploaded successfully to hdfs: %s", hdfs_sync) return True else: log.error("File uploaded unsuccessful to hdfs: %s", hdfs_sync) @@ -59,20 +63,19 @@ def write_output(results, tenant, report, target_date, config): def get_mongo_collection(mongo_uri, collection): """Return a pymongo collection object from a collection name - + Args: mongo_uri (obj.): mongodb uri collection (str.): collection name - + Returns: obj.: pymongo collection object """ - log.info ("Connecting to mongodb: %s", mongo_uri.geturl()) - print mongo_uri.geturl() + log.info("Connecting to mongodb: %s", mongo_uri.geturl()) client = MongoClient(mongo_uri.geturl()) log.info("Opening database: %s", mongo_uri.path[1:]) - db = client[mongo_uri.path[1:]] + db = client[mongo_uri.path[1:]] log.info("Opening collection: %s", collection) col = db[collection] @@ -81,12 +84,12 @@ def get_mongo_collection(mongo_uri, collection): def get_mongo_results(collection, target_date, report): """Get recomputation results from mongo collection for specific date and report - + Args: collection (obj.): pymongo collection object target_date (str.): date to target report (str.): report name - + Returns: list(dict): list of dictionaries containing recomputation definitions """ @@ -94,36 +97,38 @@ def get_mongo_results(collection, target_date, report): # Init results list results = [] # prepare the query to find requests that include the target date - query = "'%s' >= this.start_time.split('T')[0] && '%s' <= this.end_time.split('T')[0]" % (target_date, target_date) + query = "'%s' >= this.start_time.split('T')[0] && '%s' <= this.end_time.split('T')[0]" % ( + target_date, target_date) # run the query - for item in collection.find({"report":report,"$where": query}, {"_id": 0}): + for item in collection.find({"report": report, "$where": query}, {"_id": 0}): results.append(item) return results + def upload_recomputations(tenant, report, target_date, config): """Given a tenant, report and target date upload the relevant recomputations as an hdfs file - + Args: tenant (str.): tenant name report (str.): report name target_date (str.): target date config (obj.): argo configuration object - + Returns: bool: True if upload was succesfull """ - tenant_group = "TENANTS:" +tenant - mongo_endpoint = config.get("MONGO","endpoint").geturl() - mongo_location = config.get_default(tenant_group,"mongo_uri").fill(mongo_endpoint=mongo_endpoint,tenant=tenant) - col = get_mongo_collection(mongo_location, "recomputations" ) + tenant_group = "TENANTS:" + tenant + mongo_endpoint = config.get("MONGO", "endpoint").geturl() + mongo_location = config.get_default(tenant_group, "mongo_uri").fill( + mongo_endpoint=mongo_endpoint, tenant=tenant) + col = get_mongo_collection(mongo_location, "recomputations") recomp_data = get_mongo_results(col, target_date, report) return write_output(recomp_data, tenant, report, target_date, config) - def main(args=None): # Get configuration paths conf_paths = get_config_paths(args.config) @@ -136,11 +141,13 @@ def main(args=None): if not res: sys.exit(1) + if __name__ == "__main__": # Feed Argument parser with the description of the 3 arguments we need # (input_file,output_file,schema_file) - arg_parser = ArgumentParser(description="Get relevant recomputation requests") + arg_parser = ArgumentParser( + description="Get relevant recomputation requests") arg_parser.add_argument( "-d", "--date", help="date", dest="date", metavar="DATE", required="TRUE") arg_parser.add_argument( @@ -151,4 +158,4 @@ def main(args=None): "-c", "--config", help="config ", dest="config", metavar="STRING", required="TRUE") # Parse the command line arguments accordingly and introduce them to # main... - sys.exit(main(arg_parser.parse_args())) \ No newline at end of file + sys.exit(main(arg_parser.parse_args())) diff --git a/bin/utils/test_argo_config.py b/bin/utils/test_argo_config.py index 8bf80ca2..397b9019 100644 --- a/bin/utils/test_argo_config.py +++ b/bin/utils/test_argo_config.py @@ -1,7 +1,7 @@ import unittest import os -from argo_config import ArgoConfig -from urlparse import urlparse +from .argo_config import ArgoConfig +from urllib.parse import urlparse CONF_FILE = os.path.join(os.path.dirname(__file__), '../../conf/argo-streaming.conf') diff --git a/bin/utils/test_update_ams.py b/bin/utils/test_update_ams.py index 20fe1b85..58da67ec 100644 --- a/bin/utils/test_update_ams.py +++ b/bin/utils/test_update_ams.py @@ -1,6 +1,6 @@ import unittest import responses -from update_ams import ArgoAmsClient +from .update_ams import ArgoAmsClient class TestClass(unittest.TestCase): @@ -40,7 +40,7 @@ def test_urls(self): actual = ams.get_url(test_case["resource"], test_case["item_uuid"], test_case["group_uuid"], test_case["action"]) expected = test_case["expected"] - self.assertEquals(expected, actual) + self.assertEqual(expected, actual) @responses.activate def test_basic_request(self): @@ -109,6 +109,24 @@ def test_basic_request(self): ], "name": "ams_projecta_consumer", + }, status=200) + responses.add(responses.GET, 'https://ams.foo/v1/users/ams_projecta_archiver?key=faketoken', + json={ + "uuid": "id02", + "projects": [ + { + "project": "PROJECTA", + "roles": [ + "consumer" + ], + "topics": [ + + ], + "subscriptions": ["archive_metric"] + } + ], + "name": "ams_projecta_archiver", + }, status=200) responses.add(responses.GET, 'https://ams.foo/v1/users/ams_projecta_publisher?key=faketoken', json={ @@ -153,22 +171,22 @@ def test_basic_request(self): ams = ArgoAmsClient("ams.foo", "faketoken") - self.assertEquals("PROJECTA", ams.get_project("PROJECTA")["name"]) + self.assertEqual("PROJECTA", ams.get_project("PROJECTA")["name"]) users = ams.get_users() - self.assertEquals("id01", users[0]["uuid"]) - self.assertEquals("id02", users[1]["uuid"]) + self.assertEqual("id01", users[0]["uuid"]) + self.assertEqual("id02", users[1]["uuid"]) user = ams.get_user("ams_projecta_consumer") - self.assertEquals("ams_projecta_consumer", user["name"]) + self.assertEqual("ams_projecta_consumer", user["name"]) - self.assertEquals(["sync_data", "metric_data"], ams.user_get_topics(users[0], "PROJECTA")) - self.assertEquals([], ams.user_get_subs(users[0], "PROJECTA")) - self.assertEquals([], ams.user_get_topics(users[1], "PROJECTA")) - self.assertEquals(["ingest_sync", "ingest_metric", "status_sync", "status_metric"], + self.assertEqual(["sync_data", "metric_data"], ams.user_get_topics(users[0], "PROJECTA")) + self.assertEqual([], ams.user_get_subs(users[0], "PROJECTA")) + self.assertEqual([], ams.user_get_topics(users[1], "PROJECTA")) + self.assertEqual(["ingest_sync", "ingest_metric", "status_sync", "status_metric"], ams.user_get_subs(users[1], "PROJECTA")) - self.assertEquals("PROJECTA", ams.check_project_exists("projectA")["name"]) + self.assertEqual("PROJECTA", ams.check_project_exists("projectA")["name"]) expected_missing = {'topics': ['sync_data'], 'topic_acls': [], - 'subs': ['ingest_sync', 'ingest_metric', 'status_sync', 'status_metric'], - 'sub_acls': ['ingest_sync'], 'users': ['project_admin']} + 'subs': ['ingest_sync', 'ingest_metric', 'status_sync', 'status_metric', 'archive_metric'], + 'sub_acls': ['ingest_sync', 'archive_metric'], 'users': ['project_admin']} - self.assertEquals(expected_missing, ams.check_tenant("projectA")) + self.assertEqual(expected_missing, ams.check_tenant("projectA")) diff --git a/bin/utils/test_update_cron.py b/bin/utils/test_update_cron.py index 753b5bcc..91ee64fc 100644 --- a/bin/utils/test_update_cron.py +++ b/bin/utils/test_update_cron.py @@ -1,7 +1,7 @@ import unittest import os -from update_cron import get_daily, get_hourly, gen_entry, gen_batch_ar, gen_batch_status, gen_tenant_all, gen_for_all -from argo_config import ArgoConfig +from .update_cron import get_daily, get_hourly, gen_entry, gen_batch_ar, gen_batch_status, gen_tenant_all, gen_for_all +from .argo_config import ArgoConfig CONF_FILE = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../conf/argo-streaming.conf')) SCHEMA_FILE = os.path.join(os.path.dirname(__file__), '../../conf/config.schema.json') @@ -23,24 +23,24 @@ def test_update_cron(self): config = ArgoConfig(CONF_FILE, SCHEMA_FILE) # Test get_hourly - self.assertEquals("8 * * * *", get_hourly(8)) - self.assertEquals("44 * * * *", get_hourly(44)) - self.assertEquals("32 * * * *", get_hourly(32)) - self.assertEquals("12 * * * *", get_hourly(12)) - self.assertEquals("5 * * * *", get_hourly()) + self.assertEqual("8 * * * *", get_hourly(8)) + self.assertEqual("44 * * * *", get_hourly(44)) + self.assertEqual("32 * * * *", get_hourly(32)) + self.assertEqual("12 * * * *", get_hourly(12)) + self.assertEqual("5 * * * *", get_hourly()) # Test get_daily - self.assertEquals("8 1 * * *", get_daily(1, 8)) - self.assertEquals("44 3 * * *", get_daily(3, 44)) - self.assertEquals("32 4 * * *", get_daily(4, 32)) - self.assertEquals("12 5 * * *", get_daily(5, 12)) - self.assertEquals("0 5 * * *", get_daily()) + self.assertEqual("8 1 * * *", get_daily(1, 8)) + self.assertEqual("44 3 * * *", get_daily(3, 44)) + self.assertEqual("32 4 * * *", get_daily(4, 32)) + self.assertEqual("12 5 * * *", get_daily(5, 12)) + self.assertEqual("0 5 * * *", get_daily()) # Test gen_entry - self.assertEquals("#simple command\n5 * * * * root echo 12\n", + self.assertEqual("#simple command\n5 * * * * root echo 12\n", gen_entry(get_hourly(), "echo 12", "root", "simple command")) - self.assertEquals("#foo command\n8 12 * * * foo echo 1+1\n", + self.assertEqual("#foo command\n8 12 * * * foo echo 1+1\n", gen_entry(get_daily(12, 8), "echo 1+1", "foo", "foo command")) # Test generation of ar cronjob for a specific tenant and report @@ -48,14 +48,14 @@ def test_update_cron(self): + "5 5 * * * foo " + BATCH_AR + " -t TENANT_A -r report1 -d " + YESTERDAY + " -m upsert " + "-c "\ + config.conf_path + "\n" - self.assertEquals(expected, gen_batch_ar(config, "TENANT_A", "report1", "daily", "foo", "upsert")) + self.assertEqual(expected, gen_batch_ar(config, "TENANT_A", "report1", "daily", "foo", "upsert")) # Test generation of ar cronjob for a specific tenant and report expected = "#TENANT_A:report1 hourly A/R\n"\ + "5 * * * * " + BATCH_AR + " -t TENANT_A -r report1 -d " + TODAY + " -m insert " + "-c "\ + config.conf_path + "\n" - self.assertEquals(expected, gen_batch_ar(config, "TENANT_A", "report1", "hourly")) + self.assertEqual(expected, gen_batch_ar(config, "TENANT_A", "report1", "hourly")) # Test generation of ar cronjob for a specific tenant and report expected = "#TENANT_B:report1 daily Status\n"\ @@ -63,14 +63,14 @@ def test_update_cron(self): + YESTERDAY + " -m upsert " + "-c "\ + config.conf_path + "\n" - self.assertEquals(expected, gen_batch_status(config, "TENANT_B", "report1", "daily", "foo", "upsert")) + self.assertEqual(expected, gen_batch_status(config, "TENANT_B", "report1", "daily", "foo", "upsert")) # Test generation of status cronjob for a specific tenant and report expected = "#TENANT_B:report1 hourly Status\n"\ + "5 * * * * " + BATCH_STATUS + " -t TENANT_B -r report1 -d " + TODAY + " -m insert " + "-c "\ + config.conf_path + "\n" - self.assertEquals(expected, gen_batch_status(config, "TENANT_B", "report1", "hourly")) + self.assertEqual(expected, gen_batch_status(config, "TENANT_B", "report1", "hourly")) # Test generation of cronjobs for a tenant's reports expected = "#Jobs for TENANT_A\n\n" \ @@ -100,7 +100,7 @@ def test_update_cron(self): + config.conf_path + "\n\n" \ + "\n" - self.assertEquals(expected, gen_tenant_all(config, "TENANT_A")) + self.assertEqual(expected, gen_tenant_all(config, "TENANT_A")) # Test generation of cronjobs for all tenants and all reports expected2 = "#Jobs for TENANT_B\n\n" \ @@ -131,4 +131,4 @@ def test_update_cron(self): + "\n" expected = expected + expected2 - self.assertEquals(expected, gen_for_all(config)) + self.assertEqual(expected, gen_for_all(config)) diff --git a/bin/utils/test_update_profiles.py b/bin/utils/test_update_profiles.py index c162d722..67b2f01a 100644 --- a/bin/utils/test_update_profiles.py +++ b/bin/utils/test_update_profiles.py @@ -1,6 +1,6 @@ import unittest -from update_profiles import HdfsReader -from update_profiles import ArgoApiClient +from .update_profiles import HdfsReader +from .update_profiles import ArgoApiClient class TestClass(unittest.TestCase): @@ -13,9 +13,9 @@ def test_hdfs_reader(self): test_cases = [ {"tenant": "TA", "report": "Critical", "profile_type": "operations", - "expected": "/user/foo/argo/tenants/TA/sync/TA_ops.json"}, + "expected": "/user/foo/argo/tenants/TA/sync/TA_Critical_ops.json"}, {"tenant": "TA", "report": "Super-Critical", "profile_type": "operations", - "expected": "/user/foo/argo/tenants/TA/sync/TA_ops.json"}, + "expected": "/user/foo/argo/tenants/TA/sync/TA_Super-Critical_ops.json"}, {"tenant": "TA", "report": "Critical", "profile_type": "reports", "expected": "/user/foo/argo/tenants/TA/sync/TA_Critical_cfg.json"}, {"tenant": "TA", "report": "Critical", "profile_type": "aggregations", @@ -27,13 +27,44 @@ def test_hdfs_reader(self): {"tenant": "TB", "report": "Critical", "profile_type": "aggregations", "expected": "/user/foo/argo/tenants/TB/sync/TB_Critical_ap.json"}, {"tenant": "TB", "report": "Critical", "profile_type": "reports", - "expected": "/user/foo/argo/tenants/TB/sync/TB_Critical_cfg.json"} + "expected": "/user/foo/argo/tenants/TB/sync/TB_Critical_cfg.json"}, + {"tenant": "TB", "report": "Critical", "profile_type": "metrics", + "expected": "/user/foo/argo/tenants/TB/sync/TB_Critical_metrics.json"} ] for test_case in test_cases: - actual = hdfs.gen_profile_path(test_case["tenant"], test_case["report"], test_case["profile_type"]) + actual = hdfs.gen_profile_path( + test_case["tenant"], test_case["report"], test_case["profile_type"]) expected = test_case["expected"] - self.assertEquals(expected, actual) + self.assertEqual(expected, actual) + + # Test with dates + test_cases_dates = [ + {"tenant": "TA", "report": "Critical", "profile_type": "operations", "date": "2019-12-11", + "expected": "/user/foo/argo/tenants/TA/sync/TA_Critical_ops_2019-12-11.json"}, + {"tenant": "TA", "report": "Super-Critical", "profile_type": "operations", "date": "2019-10-04", + "expected": "/user/foo/argo/tenants/TA/sync/TA_Super-Critical_ops_2019-10-04.json"}, + {"tenant": "TA", "report": "Critical", "profile_type": "reports", "date": "2019-05-11", + "expected": "/user/foo/argo/tenants/TA/sync/TA_Critical_cfg.json"}, + {"tenant": "TA", "report": "Critical", "profile_type": "aggregations", "date": "2019-06-06", + "expected": "/user/foo/argo/tenants/TA/sync/TA_Critical_ap_2019-06-06.json"}, + {"tenant": "TA", "report": "Crit", "profile_type": "reports", "date": "2019-07-04", + "expected": "/user/foo/argo/tenants/TA/sync/TA_Crit_cfg.json"}, + {"tenant": "TA", "report": "Super-Critical", "profile_type": "aggregations", "date": "2019-03-04", + "expected": "/user/foo/argo/tenants/TA/sync/TA_Super-Critical_ap_2019-03-04.json"}, + {"tenant": "TB", "report": "Critical", "profile_type": "aggregations", "date": "2019-01-04", + "expected": "/user/foo/argo/tenants/TB/sync/TB_Critical_ap_2019-01-04.json"}, + {"tenant": "TB", "report": "Critical", "profile_type": "reports", "date": "2019-01-05", + "expected": "/user/foo/argo/tenants/TB/sync/TB_Critical_cfg.json"}, + {"tenant": "TB", "report": "Critical", "profile_type": "metrics", "date": "2019-02-24", + "expected": "/user/foo/argo/tenants/TB/sync/TB_Critical_metrics_2019-02-24.json"} + ] + + for test_case_date in test_cases_dates: + actual = hdfs.gen_profile_path( + test_case_date["tenant"], test_case_date["report"], test_case_date["profile_type"], test_case_date["date"]) + expected = test_case_date["expected"] + self.assertEqual(expected, actual) def test_api(self): @@ -60,10 +91,15 @@ def test_api(self): {"resource": "tenants", "item_uuid": None, "expected": "https://foo.host/api/v2/admin/tenants"}, {"resource": "tenants", "item_uuid": "12", - "expected": "https://foo.host/api/v2/admin/tenants/12"} - ] + "expected": "https://foo.host/api/v2/admin/tenants/12"}, + {"resource": "metrics", "item_uuid": None, + "expected": "https://foo.host/api/v2/metric_profiles"}, + {"resource": "metrics", "item_uuid": "12", + "expected": "https://foo.host/api/v2/metric_profiles/12"} + ] for test_case in test_cases: - actual = argo_api.get_url(test_case["resource"], test_case["item_uuid"]) + actual = argo_api.get_url( + test_case["resource"], test_case["item_uuid"]) expected = test_case["expected"] - self.assertEquals(expected, actual) + self.assertEqual(expected, actual) diff --git a/bin/utils/update_ams.py b/bin/utils/update_ams.py index d9148830..6c9d88bf 100755 --- a/bin/utils/update_ams.py +++ b/bin/utils/update_ams.py @@ -2,8 +2,8 @@ import requests import json import logging -from common import get_config_paths, get_log_conf -from argo_config import ArgoConfig +from .common import get_config_paths, get_log_conf +from .argo_config import ArgoConfig from argparse import ArgumentParser import sys @@ -18,16 +18,23 @@ class ArgoAmsClient: It connects to an argo-messaging host and retrieves project/user/topic/subscription information """ - def __init__(self, host, admin_key, verify=True): + def __init__(self, host, admin_key, verify=True, http_proxy_url=None): """ Initialize ArgoAAmsClient Args: host: str. argo ams host admin_key: str. admin token + verify (boolean): flag if the remote web api host should be verified + http_proxy_url (str.): optional url for local http proxy to be used """ # flag to verify https connections or not self.verify = verify + # proxy configuration + if http_proxy_url: + self.proxies = {'http':http_proxy_url,'https':http_proxy_url} + else: + self.proxies = None # ams host self.host = host # admin key to access ams service @@ -88,7 +95,7 @@ def post_resource(self, url, data): 'Accept': 'application/json' }) # do the post requests - r = requests.post(url, headers=headers, verify=self.verify, data=json.dumps(data)) + r = requests.post(url, headers=headers, verify=self.verify, data=json.dumps(data), proxies=self.proxies) # if successful return data (or empty json) if 200 == r.status_code: if r.text == "": @@ -116,7 +123,7 @@ def put_resource(self, url, data): 'Accept': 'application/json' }) # do the put request - r = requests.put(url, headers=headers, verify=self.verify, data=json.dumps(data)) + r = requests.put(url, headers=headers, verify=self.verify, data=json.dumps(data), proxies=self.proxies) # if successful return json data (or empty json) if 200 == r.status_code: if r.text == "": @@ -143,7 +150,7 @@ def get_resource(self, url): 'Accept': 'application/json' }) # do the get resource - r = requests.get(url, headers=headers, verify=self.verify) + r = requests.get(url, headers=headers, verify=self.verify, proxies=self.proxies) # if successful return the json data or empty json if 200 == r.status_code: if r.text == "": @@ -306,8 +313,8 @@ def get_tenant_users(self, tenant): dict. json representation of list of AMS users """ - # tenant must have 3 users: project_admin, publisher, consumer - lookup = [("project_admin", "ams_{}_admin"), ("publisher", "ams_{}_publisher"), ("consumer", "ams_{}_consumer")] + # tenant must have 4 users: project_admin, publisher, consumer, archiver(consumer) + lookup = [("project_admin", "ams_{}_admin"), ("publisher", "ams_{}_publisher"), ("consumer", "ams_{}_consumer"), ("archiver", "ams_{}_archiver")] lookup = [(x, y.format(tenant.lower())) for (x, y) in lookup] users = dict() for (role, name) in lookup: @@ -372,6 +379,9 @@ def get_tenant_subs(self, tenant, topics): if name.endswith('status_metric'): if topics["metric_data"] == sub["topic"]: found["status_metric"] = name + if name.endswith('archive_metric'): + if topics["metric_data"] == sub["topic"]: + found["archive_metric"] = name return found @staticmethod @@ -518,10 +528,14 @@ def create_tenant_user(self, tenant, role): project_name = tenant.upper() if role == "project_admin": username = "ams_{}_admin".format(tenant.lower()) + elif role == "archiver": + username = "ams_{}_archiver".format(tenant.lower()) + # archiver is actually a consumer + role = "consumer" else: username = "ams_{}_{}".format(tenant.lower(), role) - print username, role + url = self.get_url("users", username) data = {"projects": [{"project": project_name, "roles": [role]}]} return self.post_resource(url, data) @@ -575,10 +589,10 @@ def check_tenant(self, tenant): # Things that sould be present in AMS definitions topics_lookup = ["sync_data", "metric_data"] - subs_lookup = ["ingest_sync", "ingest_metric", "status_sync", "status_metric"] - users_lookup = ["project_admin", "publisher", "consumer"] + subs_lookup = ["ingest_sync", "ingest_metric", "status_sync", "status_metric", "archive_metric"] + users_lookup = ["project_admin", "publisher", "consumer", "archiver"] topic_acl_lookup = ["sync_data", "metric_data"] - sub_acl_lookup = ["ingest_sync", "ingest_metric"] + sub_acl_lookup = ["ingest_sync", "ingest_metric", "archive_metric"] # Initialize a dictionary with missing definitions missing = dict() @@ -601,19 +615,21 @@ def check_tenant(self, tenant): if users is None: users = {} + + # For each expected topic check if it was indeed found in AMS or if it's missing for item in topics_lookup: - if item not in topics.keys(): + if item not in list(topics.keys()): missing["topics"].append(item) # For each expected sub check if it was indeed found in AMS or if it's missing for item in subs_lookup: - if item not in subs.keys(): + if item not in list(subs.keys()): missing["subs"].append(item) # For each expected user check if it was indeed found in AMS or if it's missing for item in users_lookup: - if item not in users.keys(): + if item not in list(users.keys()): missing["users"].append(item) user_topics = [] @@ -658,13 +674,17 @@ def fill_missing(self, tenant, missing): # For each missing sub attempt to create it in AMS for sub in missing["subs"]: # create sub + if sub.startswith("archive") and sub.endswith("metric"): + topic = "metric_data" if sub.endswith("metric"): topic = "metric_data" elif sub.endswith("sync"): topic = "sync_data" else: continue + sub_new = self.create_tenant_sub(tenant, topic, sub) + log.info("Tenant:{} - created missing subscription: {} on topic: {}".format(tenant, sub_new["name"], sub_new["topic"])) @@ -688,7 +708,10 @@ def fill_missing(self, tenant, missing): # For each missing subscription attempt to set it in AMS for sub_acl in missing["sub_acls"]: acl = self.get_sub_acl(tenant, sub_acl) - user_con = "ams_{}_consumer".format(tenant.lower()) + if sub_acl.startswith("archive"): + user_con = "ams_{}_archiver".format(tenant.lower()) + else: + user_con = "ams_{}_consumer".format(tenant.lower()) if user_con not in acl: acl.append(user_con) @@ -759,10 +782,14 @@ def run_ams_update(args): ams_token = config.get("AMS", "access_token") ams_host = config.get("AMS", "endpoint").hostname + ams_verify = config.get("AMS", "verify") + ams_proxy = config.get("AMS", "proxy") + if ams_proxy: + ams_proxy = ams_proxy.geturl() log.info("ams api used {}".format(ams_host)) tenant_list = config.get("API", "tenants") - ams = ArgoAmsClient(ams_host, ams_token) + ams = ArgoAmsClient(ams_host, ams_token, ams_verify, ams_proxy) if args.tenant is not None: # Check if tenant exists in argo configuarion diff --git a/bin/utils/update_cron.py b/bin/utils/update_cron.py index 3b219eed..835dbddd 100755 --- a/bin/utils/update_cron.py +++ b/bin/utils/update_cron.py @@ -6,8 +6,8 @@ from argparse import ArgumentParser from subprocess import check_output, CalledProcessError, check_call from datetime import datetime -from common import get_log_conf, get_config_paths -from argo_config import ArgoConfig +from .common import get_log_conf, get_config_paths +from .argo_config import ArgoConfig log = logging.getLogger(__name__) diff --git a/bin/utils/update_profiles.py b/bin/utils/update_profiles.py index e48e490d..5b1fc9e7 100755 --- a/bin/utils/update_profiles.py +++ b/bin/utils/update_profiles.py @@ -6,12 +6,12 @@ import logging import os import uuid -from urlparse import urlparse +from urllib.parse import urlparse from argparse import ArgumentParser import sys import subprocess -from common import get_log_conf, get_config_paths -from argo_config import ArgoConfig +from .common import get_log_conf, get_config_paths +from .argo_config import ArgoConfig log = logging.getLogger(__name__) @@ -23,39 +23,52 @@ class ArgoApiClient: It connects to an argo-web-api host and retrieves profile information per tenant and report """ - def __init__(self, host, tenant_keys): + def __init__(self, host, tenant_keys, verify=False, http_proxy_url=None): """ Initialize ArgoApiClient which is used to retrieve profiles from argo-web-api Args: host: str. argo-web-api host tenant_keys: dict. a dictionary of {tenant: api_token} entries + verify (boolean): flag if the remote web api host should be verified + http_proxy_url (str.): optional url for local http proxy to be used """ self.host = host + self.verify = verify + if http_proxy_url: + self.proxies = {'http': http_proxy_url, 'https': http_proxy_url} + else: + self.proxies = None + self.paths = dict() self.tenant_keys = tenant_keys self.paths.update({ 'reports': '/api/v2/reports', 'operations': '/api/v2/operations_profiles', + 'metrics': '/api/v2/metric_profiles', 'aggregations': '/api/v2/aggregation_profiles', 'thresholds': '/api/v2/thresholds_profiles', 'tenants': '/api/v2/admin/tenants' }) - def get_url(self, resource, item_uuid=None): + def get_url(self, resource, item_uuid=None, date=None): """ Constructs an argo-web-api url based on the resource and item_uuid Args: resource: str. resource to be retrieved (reports|ops) item_uuid: str. retrieve a specific item from the resource + date: str. returns the historic version of the resource Returns: str: url path """ + dateQuery = "" + if date: + dateQuery = "?date=" + date if item_uuid is None: - return "".join(["https://", self.host, self.paths[resource]]) + return "".join(["https://", self.host, self.paths[resource], dateQuery]) else: - return "".join(["https://", self.host, self.paths[resource], "/", item_uuid]) + return "".join(["https://", self.host, self.paths[resource], "/", item_uuid, dateQuery]) def get_resource(self, tenant, url): """ @@ -73,7 +86,8 @@ def get_resource(self, tenant, url): 'Accept': 'application/json', 'x-api-key': self.tenant_keys[tenant] }) - r = requests.get(url, headers=headers, verify=False) + r = requests.get(url, headers=headers, + verify=self.verify, proxies=self.proxies) if 200 == r.status_code: return json.loads(r.text)["data"] @@ -90,17 +104,16 @@ def get_tenants(self, token): dict: list of tenants and access keys """ + tenants = self.get_admin_resource(token, self.get_url("tenants")) tenant_keys = dict() for item in tenants: for user in item["users"]: if user["name"].startswith("argo_engine_") and user["api_key"]: - print len(user["api_key"]) tenant_keys[item["info"]["name"]] = user["api_key"] return tenant_keys - @staticmethod - def get_admin_resource(token, url): + def get_admin_resource(self, token, url): """ Returns an argo-web-api resource by tenant and url Args: @@ -116,14 +129,15 @@ def get_admin_resource(token, url): 'Accept': 'application/json', 'x-api-key': token }) - r = requests.get(url, headers=headers, verify=False) + r = requests.get(url, headers=headers, + verify=self.verify, proxies=self.proxies) if 200 == r.status_code: return json.loads(r.text)["data"] else: return None - def get_profile(self, tenant, report, profile_type): + def get_profile(self, tenant, report, profile_type, date=None): """ Gets an argo-web-api profile by tenant, report and profile type Args: @@ -140,9 +154,11 @@ def get_profile(self, tenant, report, profile_type): return [] item_uuid = self.find_profile_uuid(tenant, report, profile_type) + if item_uuid is None: return None - profiles = self.get_resource(tenant, self.get_url(profile_type, item_uuid)) + profiles = self.get_resource( + tenant, self.get_url(profile_type, item_uuid, date)) if profiles is not None: return profiles[0] @@ -171,6 +187,7 @@ def find_report_uuid(self, tenant, name): """ r = self.get_reports(tenant) + if r is None: return '' @@ -189,6 +206,7 @@ def get_report(self, tenant, item_uuid): obj: Returns an array of reports or one report """ + reports = self.get_resource(tenant, self.get_url("reports", item_uuid)) if reports is not None: return reports[0] @@ -204,13 +222,16 @@ def find_profile_uuid(self, tenant, report, profile_type): Returns: """ - if profile_type is "aggregations": - profile_type = "aggregation" - + if profile_type is "aggregations": + profile_type = "aggregation" + + if profile_type is "metrics": + profile_type = "metric" + report = self.get_report(tenant, self.find_report_uuid(tenant, report)) if profile_type == "reports": - return report["id"] - for profile in report["profiles"]: + return report["id"] + for profile in report["profiles"]: if profile["type"] == profile_type: return profile["id"] @@ -234,51 +255,56 @@ def __init__(self, namenode, port, base_path): self.client = Client(namenode, port) self.base_path = base_path - def gen_profile_path(self, tenant, report, profile_type): + def gen_profile_path(self, tenant, report, profile_type, date=None): """ Generates a valid hdfs path to a specific profile Args: tenant: str. tenant to be used report: str. report to be used - profile_type: str. profile_type (operations|reports|aggregations|thresholds) + profile_type: str. profile_type (operations|reports|aggregations|thresholds|metrics) Returns: str: hdfs path """ templates = dict() + if date: + date = "_" + date + else: + date = "" templates.update({ - 'operations': '{0}_ops.json', - 'aggregations': '{0}_{1}_ap.json', + 'operations': '{0}_{1}_ops{2}.json', + 'aggregations': '{0}_{1}_ap{2}.json', + 'metrics': '{0}_{1}_metrics{2}.json', 'reports': '{0}_{1}_cfg.json', - 'thresholds': '{0}_{1}_thresholds.json', + 'thresholds': '{0}_{1}_thresholds{2}.json', 'recomputations': 'recomp.json' }) sync_path = self.base_path.replace("{{tenant}}", tenant) - filename = templates[profile_type].format(tenant, report) + filename = templates[profile_type].format(tenant, report, date) return os.path.join(sync_path, filename) - def cat(self, tenant, report, profile_type): + def cat(self, tenant, report, profile_type, date=None): """ Returns the contents of a profile stored in hdfs Args: tenant: str. tenant name report: str. report name - profile_type: str. profile type (operations|reports|aggregations|thresholds) + profile_type: str. profile type (operations|reports|aggregations|thresholds|metric) Returns: """ - path = self.gen_profile_path(tenant, report, profile_type) + path = self.gen_profile_path(tenant, report, profile_type, date) try: txt = self.client.cat([path]) - j = json.loads(txt.next().next()) + j = json.loads(next(next(txt))) return j, True except FileNotFoundException: return None, False - def rem(self, tenant, report, profile_type): + def rem(self, tenant, report, profile_type, date=None): """ Removes a profile file that already exists in hdfs (in order to be replaced) Args: @@ -289,10 +315,10 @@ def rem(self, tenant, report, profile_type): Returns: """ - path = self.gen_profile_path(tenant, report, profile_type) + path = self.gen_profile_path(tenant, report, profile_type, date) try: - self.client.delete([path]).next() + next(self.client.delete([path])) return True except FileNotFoundException: return False @@ -320,7 +346,8 @@ def __init__(self, config): namenode = config.get("HDFS", "namenode") hdfs_user = config.get("HDFS", "user") full_path = config.get("HDFS", "path_sync") - full_path = full_path.partial_fill(namenode=namenode.geturl(), hdfs_user=hdfs_user) + full_path = full_path.partial_fill( + namenode=namenode.geturl(), hdfs_user=hdfs_user) short_path = urlparse(full_path).path @@ -330,12 +357,16 @@ def __init__(self, config): for tenant in tenant_list: tenant_key = config.get("API", tenant + "_key") tenant_keys[tenant] = tenant_key - - print namenode.hostname, namenode.port, short_path + + ams_proxy = config.get("API", "proxy") + if ams_proxy: + ams_proxy = ams_proxy.geturl() + self.hdfs = HdfsReader(namenode.hostname, namenode.port, short_path) - self.api = ArgoApiClient(config.get("API", "endpoint").netloc, tenant_keys) + self.api = ArgoApiClient(config.get("API", "endpoint").netloc, tenant_keys, config.get( + "API", "verify"), ams_proxy) - def profile_update_check(self, tenant, report, profile_type): + def profile_update_check(self, tenant, report, profile_type, date=None): """ Checks if latest api profiles are aligned with profile files stored in hdfs. If not the updated api profile are uploaded to hdfs @@ -343,20 +374,22 @@ def profile_update_check(self, tenant, report, profile_type): tenant: str. Tenant name to check profiles from report: str. Report name to check profiles from profile_type: str. Name of the profile type used (operations|aggregations|reports) + date: str. Optional date to retrieve historic version of the profile """ - prof_api = self.api.get_profile(tenant, report, profile_type) + prof_api = self.api.get_profile(tenant, report, profile_type, date) if prof_api is None: - log.info("profile type %s doesn't exist in report --skipping", profile_type) + log.info( + "profile type %s doesn't exist in report --skipping", profile_type) return log.info("retrieved %s profile(api): %s", profile_type, prof_api) - - prof_hdfs, exists = self.hdfs.cat(tenant, report, profile_type) - - + + prof_hdfs, exists = self.hdfs.cat(tenant, report, profile_type, date) + if exists: - log.info("retrieved %s profile(hdfs): %s ", profile_type, prof_hdfs) + log.info("retrieved %s profile(hdfs): %s ", + profile_type, prof_hdfs) prof_update = prof_api != prof_hdfs if prof_update: @@ -366,13 +399,15 @@ def profile_update_check(self, tenant, report, profile_type): else: # doesn't exist so it should be uploaded prof_update = True - log.info("%s profile doesn't exist in hdfs, should be uploaded", profile_type) + log.info( + "%s profile doesn't exist in hdfs, should be uploaded", profile_type) # Upload if it's deemed to be uploaded if prof_update: - self.upload_profile_to_hdfs(tenant, report, profile_type, prof_api, exists) + self.upload_profile_to_hdfs( + tenant, report, profile_type, prof_api, exists, date) - def upload_profile_to_hdfs(self, tenant, report, profile_type, profile, exists): + def upload_profile_to_hdfs(self, tenant, report, profile_type, profile, exists, date=None): """ Uploads an updated profile (from api) to the specified hdfs destination Args: @@ -389,9 +424,10 @@ def upload_profile_to_hdfs(self, tenant, report, profile_type, profile, exists): # If file exists on hdfs should be removed first if exists: - is_removed = self.hdfs.rem(tenant, report, profile_type) + is_removed = self.hdfs.rem(tenant, report, profile_type, date) if not is_removed: - log.error("Could not remove old %s profile from hdfs", profile_type) + log.error( + "Could not remove old %s profile from hdfs", profile_type) return # If all ok continue with uploading the new file to hdfs @@ -403,29 +439,34 @@ def upload_profile_to_hdfs(self, tenant, report, profile_type, profile, exists): local_path = "/tmp/" + temp_fn with open(local_path, 'w') as outfile: json.dump(profile, outfile) - hdfs_host = self.cfg.get("HDFS","namenode").hostname - hdfs_path = self.hdfs.gen_profile_path(tenant, report, profile_type) - status = subprocess.check_call([hdfs_write_bin, hdfs_write_cmd, local_path, hdfs_path]) + hdfs_host = self.cfg.get("HDFS", "namenode").hostname + hdfs_path = self.hdfs.gen_profile_path( + tenant, report, profile_type, date) + status = subprocess.check_call( + [hdfs_write_bin, hdfs_write_cmd, local_path, hdfs_path]) if status == 0: - log.info("File uploaded successfully to hdfs host: %s path: %s", hdfs_host, hdfs_path) + log.info( + "File uploaded successfully to hdfs host: %s path: %s", hdfs_host, hdfs_path) return True else: - log.error("File uploaded unsuccessful to hdfs host: %s path: %s", hdfs_host, hdfs_path) + log.error( + "File uploaded unsuccessful to hdfs host: %s path: %s", hdfs_host, hdfs_path) return False def upload_tenant_reports_cfg(self, tenant): reports = self.api.get_reports(tenant) report_name_list = [] for report in reports: - + # double check if indeed report belongs to tenant if report["tenant"] == tenant: report_name = report["info"]["name"] report_name_list.append(report_name) report_uuid = report["id"] # Set report in configuration - self.cfg.set("TENANTS:"+tenant, "report_" + report_name, report_uuid) + self.cfg.set("TENANTS:"+tenant, "report_" + + report_name, report_uuid) # update tenant's report name list self.cfg.set("TENANTS:"+tenant, "reports", ",".join(report_name_list)) @@ -437,13 +478,13 @@ def upload_tenants_cfg(self): """ token = self.cfg.get("API", "access_token") tenant_keys = self.api.get_tenants(token) - self.api.tenant_keys=tenant_keys - tenant_names = ",".join(tenant_keys.keys()) - + self.api.tenant_keys = tenant_keys + tenant_names = ",".join(list(tenant_keys.keys())) + self.cfg.set("API", "tenants", tenant_names) # For each tenant update also it's report list - for tenant_name in tenant_keys.keys(): + for tenant_name in list(tenant_keys.keys()): self.cfg.set("API", tenant_name+"_key", tenant_keys[tenant_name]) # Update tenant's report definitions in configuration self.upload_tenant_reports_cfg(tenant_name) @@ -451,36 +492,46 @@ def upload_tenants_cfg(self): def upload_tenant_defaults(self, tenant): # check - section_tenant = "TENANTS:"+ tenant - section_metric = "TENANTS:"+ tenant + ":ingest-metric" - mongo_endpoint = self.cfg.get("MONGO","endpoint").geturl() - mongo_uri = self.cfg.get_default(section_tenant,"mongo_uri").fill(mongo_endpoint=mongo_endpoint,tenant=tenant).geturl() - hdfs_user = self.cfg.get("HDFS","user") - namenode = self.cfg.get("HDFS","namenode").netloc - hdfs_check = self.cfg.get_default(section_metric,"checkpoint_path").fill(namenode=namenode,hdfs_user=hdfs_user,tenant=tenant) - - - self.cfg.get("MONGO","endpoint") - - self.cfg.set(section_tenant,"mongo_uri",mongo_uri) - self.cfg.set_default(section_tenant,"mongo_method") - - - self.cfg.set_default(section_metric,"ams_interval") - self.cfg.set_default(section_metric,"ams_batch") - self.cfg.set(section_metric,"checkpoint_path",hdfs_check.geturl()) - self.cfg.set_default(section_metric,"checkpoint_interval") - section_sync = "TENANTS:"+ tenant + ":ingest-sync" - - self.cfg.set_default(section_sync,"ams_interval") - self.cfg.set_default(section_sync,"ams_batch") - section_stream = "TENANTS:"+ tenant + ":stream-status" - - self.cfg.set_default(section_stream,"ams_sub_sync") - self.cfg.set_default(section_stream,"ams_interval") - self.cfg.set_default(section_stream,"ams_batch") - - + section_tenant = "TENANTS:" + tenant + section_metric = "TENANTS:" + tenant + ":ingest-metric" + mongo_endpoint = self.cfg.get("MONGO", "endpoint").geturl() + mongo_uri = self.cfg.get_default(section_tenant, "mongo_uri").fill( + mongo_endpoint=mongo_endpoint, tenant=tenant).geturl() + hdfs_user = self.cfg.get("HDFS", "user") + namenode = self.cfg.get("HDFS", "namenode").netloc + hdfs_check = self.cfg.get_default(section_metric, "checkpoint_path").fill( + namenode=namenode, hdfs_user=hdfs_user, tenant=tenant) + + self.cfg.get("MONGO", "endpoint") + + self.cfg.set(section_tenant, "mongo_uri", mongo_uri) + self.cfg.set_default(section_tenant, "mongo_method") + + self.cfg.set_default(section_metric, "ams_interval") + self.cfg.set_default(section_metric, "ams_batch") + self.cfg.set(section_metric, "checkpoint_path", hdfs_check.geturl()) + self.cfg.set_default(section_metric, "checkpoint_interval") + section_sync = "TENANTS:" + tenant + ":ingest-sync" + + self.cfg.set_default(section_sync, "ams_interval") + self.cfg.set_default(section_sync, "ams_batch") + + section_stream = "TENANTS:" + tenant + ":stream-status" + streaming_kafka_servers = self.cfg.get("STREAMING", "kafka_servers") + if (streaming_kafka_servers): + streaming_kafka_servers = ",".join(streaming_kafka_servers) + self.cfg.set(section_stream, "kafka_servers", + streaming_kafka_servers) + else: + self.cfg.set_default(section_stream, "kafka_servers") + + self.cfg.set_default(section_stream, "ams_sub_sync") + self.cfg.set_default(section_stream, "ams_sub_metric") + self.cfg.set_default(section_stream, "ams_interval") + self.cfg.set_default(section_stream, "ams_batch") + self.cfg.set(section_stream, "output", "kafka,mongo") + + self.cfg.set(section_stream, "mongo_method", "insert") def save_config(self, file_path): """ @@ -514,18 +565,18 @@ def run_profile_update(args): if args.tenant is not None: # check for the following profile types - profile_type_checklist = ["operations", "aggregations", "reports", "thresholds", "recomputations"] - reports = [] - if args.report is not None: - reports.append(args.report) - else: - reports = config.get("TENANTS:"+args.tenant,"reports") - - for report in reports: - for profile_type in profile_type_checklist: - argo.profile_update_check(args.tenant, report, profile_type) - + profile_type_checklist = [ + "reports", "operations", "aggregations", "thresholds", "recomputations", "metrics"] + reports = [] + if args.report is not None: + reports.append(args.report) + else: + reports = config.get("TENANTS:"+args.tenant, "reports") + for report in reports: + for profile_type in profile_type_checklist: + argo.profile_update_check( + args.tenant, report, profile_type, args.date) else: argo.upload_tenants_cfg() argo.save_config(conf_paths["main"]) @@ -541,6 +592,8 @@ def run_profile_update(args): "-r", "--report", help="report", dest="report", metavar="STRING", required=False, default=None) arg_parser.add_argument( "-c", "--config", help="config", dest="config", metavar="STRING") + arg_parser.add_argument( + "-d", "--date", help="historic date", dest="date", metavar="STRING", required=False, default=None) # Parse the command line arguments accordingly and introduce them to the run method sys.exit(run_profile_update(arg_parser.parse_args())) diff --git a/conf/argo-streaming.conf b/conf/argo-streaming.conf index 3076ac36..c4bc4021 100644 --- a/conf/argo-streaming.conf +++ b/conf/argo-streaming.conf @@ -6,6 +6,9 @@ path_metric= hdfs://{{namenode}}/user/{{hdfs_user}}/argo/tenants/{{tenant}}/mdat path_sync= hdfs://{{namenode}}/user/{{hdfs_user}}/argo/tenants/{{tenant}}/sync writer_bin= /home/root/hdfs +[STREAMING] +kafka_servers=localhost:9092 + [API] endpoint=https://api_host access_token=token01 @@ -32,7 +35,7 @@ job_manager= http://localhost:8081 [JOB-NAMESPACE] ingest-metric-namespace= Ingesting metric data from {{ams_endpoint}}:{{ams_port}}/v1/projects/{{project}}/subscriptions/{{ams_sub}} ingest-sync-namespace= Ingesting sync data from {{ams_endpoint}}:{{ams_port}}/v1/projects/{{project}}/subscriptions/{{ams_sub}} -stream-status-namespace= Streaming status using data {{ams_endpoint}}:{{ams_port}}/v1/projects/{{project}}/subscriptions/[{{ams_sub_metric}}, {{ams_sub_sync}}] +stream-status-namespace= Streaming status using data from {{ams_endpoint}}:{{ams_port}}/v1/projects/{{project}}/subscriptions/[{{ams_sub_metric}},{{ams_sub_sync}}] [CLASSES] ams-ingest-metric= argo.streaming.AmsIngestMetric @@ -48,9 +51,6 @@ batch-ar= /path/to/ArgoArBatch-1.0.jar batch-status= /path/to/ArgoStatusBatch-1.0.jar stream-status= /path/to/streaming-status-multi2.jar -[AMS] -ams_endpoint= localhost:8080 -access_token= secret [TENANTS:TENANT_A] ams_project= TENANT_A diff --git a/conf/conf.template b/conf/conf.template index 1c71731b..eb380712 100644 --- a/conf/conf.template +++ b/conf/conf.template @@ -14,13 +14,16 @@ path_sync: {{namenode}}/user/{{hdfs_user}}/argo/tenants/{{tenant}}/sync writer_bin: /path/to/binary [API] -endpoint = api.foo +endpoint = https://api.foo tenants = TENANTA access_token = key0 TENANTA_key = key1 TENANTB_key = key2 TENANTC_key = key3 +[STREAMING] +kafka_servers: localhost:9092 + [MONGO] endpoint = mongodb://localhost:21017 @@ -36,7 +39,7 @@ ingest-metric-namespace: Ingesting data from {{ams_endpoint}}:{{ams_port}}/v1/pr # Template to check if a sync job with similar name already runs ingest-sync-namespace: Ingesting sync data from {{ams_endpoint}}:{{ams_port}}/v1/projects/{{ams_project}}/subscriptions/{{ams_sub}} #Template to check if a stream status job with similar name already runs -stream-status-namespace: Streaming status using data {{ams_endpoint}}:{{ams_port}}/v1/projects/{{ams_project}}/subscriptions/[{{ams_sub_metric}}, {{ams_sub_sync}}] +stream-status-namespace: Streaming status using data from {{ams_endpoint}}:{{ams_port}}/v1/projects/{{ams_project}}/subscriptions/[{{ams_sub_metric}},{{ams_sub_sync}}] [CLASSES] # Specify class to run during job submit diff --git a/conf/config.schema.json b/conf/config.schema.json index 30536502..b469e73e 100644 --- a/conf/config.schema.json +++ b/conf/config.schema.json @@ -31,6 +31,15 @@ } }, + "STREAMING": { + "kafka_servers":{ + "desc": "comma-separated list of kafka servers to send messages to", + "type": "list", + "optional": true, + "default": "localhost:9092" + } + }, + "MONGO": { "endpoint": { "desc": "mongodb core endpoint", @@ -73,6 +82,18 @@ "desc": "list of tenants", "type": "list" }, + "proxy": { + "desc": "ams proxy to be used", + "type": "uri", + "optional": true, + "default": "http://localhost:3128" + }, + "verify":{ + "desc":"ssl verify ams endpoint", + "type":"bool", + "optional": true, + "default": "true" + }, "~_key": { "~": "tenants", "desc": "tenants key", @@ -234,11 +255,13 @@ "~":"API.tenants", "ams_sub_metric":{ "desc": "subscription for ingesting metric data", - "type": "string" + "type": "string", + "default": "status_metric" }, "ams_sub_sync":{ "desc": "subscription for ingesting sync data", - "type": "string" + "type": "string", + "default": "status_sync" }, "ams_interval":{ "desc": "interval for polling ams for sync data", diff --git a/docs/submission-scripts.md b/docs/submission-scripts.md index 02c80866..81a7bea3 100644 --- a/docs/submission-scripts.md +++ b/docs/submission-scripts.md @@ -1,15 +1,17 @@ # Python utility scripts for easier flink job submission/handling -| Script | Description | Shortcut | -|--------|-------------|---------- | -| metric_ingestion_submit.py | Python wrapper over flink sumbit metric ingestion job.| [Details](#ingest-metric) | -| sync_ingestion_submit.py | Python wrapper over flink submit sync ingestion job.| [Details](#ingest-synbc) | -| ar_job_submit.py | Python wrapper over the flink batch AR job. | [Details](#batch-ar) | -| status_job_submit.py | Python wrapper over the flink batch Status jon. | [Details](#batch-status) | +| Script | Description | Shortcut | +| --------------------------- | ------------------------------------------------------ | ------------------------- | +| metric_ingestion_submit.py | Python wrapper over flink sumbit metric ingestion job. | [Details](#ingest-metric) | +| sync_ingestion_submit.py | Python wrapper over flink submit sync ingestion job. | [Details](#ingest-synbc) | +| ar_job_submit.py | Python wrapper over the flink batch AR job. | [Details](#batch-ar) | +| status_job_submit.py | Python wrapper over the flink batch Status jon. | [Details](#batch-status) | | stream_status_job_submit.py | Python wrapper over flink sumbit status streaming job. | [Details](#stream-status) | + ## Metric Ingestion Submit Script + Python wrapper over flink sumbit metric ingestion job. Metric Ingestion job receives metric data from an AMS endpoint subscription and stores them to a proper hdfs destination. @@ -22,7 +24,9 @@ Metric Ingestion job receives metric data from an AMS endpoint subscription and `-u : If specified the flink command will run without sudo` + ## Sync Ingestion Submit Script + Same as Metric Ingestion but for connector data This job connects to AMS and stores connector data (by report) in an hdfs destination @@ -35,7 +39,9 @@ This job connects to AMS and stores connector data (by report) in an hdfs destin `-u : If specified the flink command will run without sudo` + ## A/R Batch Job + A/R job submission is a batch job that will run and finish on the cluster `ar_job_submit.py -t -c -u -r -d -m` @@ -54,10 +60,14 @@ A/R job submission is a batch job that will run and finish on the cluster `--profile-check: (optional) Check if profiles used in computation are out of date and update them` +`--historic: (optional) Ar job submission script will use the historic versions of the available profiles according to the (-d) date parameter` + `--thresholds: (optional) Check if threshold rules are defined and use them during computations` + ## Status Batch Job + Status job submission is a batch job that will run and finish on the cluster `status_job_submit.py -t -c -u -r -d -m` @@ -76,10 +86,14 @@ Status job submission is a batch job that will run and finish on the cluster `--profile-check: (optional) Check if profiles used in computation are out of date and update them` +`--historic: (optional) status job submission script will use the historic versions of the available profiles according to the (-d) date parameter` + `--thresholds: (optional) Check if threshold rules are defined and use them during computations` + ## Status Stream Job + Status streaming job receives metric and sync data from AMS calculates and generates status events which are forwarded to kafka `stream_status_job_submit.py -t -c -u -r -d` @@ -96,14 +110,20 @@ Status streaming job receives metric and sync data from AMS calculates and gener `-t : long(ms) - controls default timeout for event regeneration (used in notifications)` -### Important +`--historic: (optional) status stream job submission script will use the historic versions of the available profiles according to the (-d) date parameter` -- Sometimes connector data (metric profiles,endpoint,group endpoints,weights) appear delayed (in comparison with the metric data) or might be missing. We have a check mechanism that looks back (up to three days) for connector data that might be missing and uses that. +`--profile-check: (optional) Check if profiles used in computation are out of date and update them` +`--thresholds: (optional) Check if threshold rules are defined and use them during computations` -- Flink job receives a parameter of insert or upsert when storing results. Give the ability to honor that parameter and when insert is used, call a clean mongo script for removing (if present) any mongo a/r report data for that very day +### Important + +- Sometimes connector data (metric profiles,endpoint,group endpoints,weights) appear delayed (in comparison with the metric data) or might be missing. We have a check mechanism that looks back (up to three days) for connector data that might be missing and uses that. + +* Flink job receives a parameter of insert or upsert when storing results. Give the ability to honor that parameter and when insert is used, call a clean mongo script for removing (if present) any mongo a/r report data for that very day ## Configuration file + ``` [HDFS] HDFS credentials diff --git a/docs/update_profiles.md b/docs/update_profiles.md index 77ff0a29..27c11405 100644 --- a/docs/update_profiles.md +++ b/docs/update_profiles.md @@ -3,14 +3,14 @@ Argo-streaming engine maintains profile files stored in flink shared storage per tenant and report. These profiles are essential for computing a/r and status results during flink-jobs and are not provided automatically by connectors. Profiles include: -- operations_profile: `TENANT_ops.json` which includes truth tables about the fundamental aggregation operations applied - on monitor status timelines (such as 'AND', 'OR' .etc between statuses of 'OK', 'WARNING', 'CRITICAL', 'MISSING' etc.) -- aggregation_profile: `TENANT_REPORT_aps.json` which includes information on what operations ('AND','OR') and how are - applied on different service levels -- report configuration profile: `TENANT_REPORT_cfg.json` which includes information on the report it self, what profiles -it uses and how filters data -- threhsolds_profile (optional): `TENANT_REPORT_thresholds.json` which includes thresholds rules to be applied during computation +- operations_profile: `TENANT_ops.json` which includes truth tables about the fundamental aggregation operations applied + on monitor status timelines (such as 'AND', 'OR' .etc between statuses of 'OK', 'WARNING', 'CRITICAL', 'MISSING' etc.) +- aggregation_profile: `TENANT_REPORT_aps.json` which includes information on what operations ('AND','OR') and how are + applied on different service levels +- report configuration profile: `TENANT_REPORT_cfg.json` which includes information on the report it self, what profiles + it uses and how filters data +- threhsolds_profile (optional): `TENANT_REPORT_thresholds.json` which includes thresholds rules to be applied during computation Each report uses an operations profile. The operation profile is defined also in argo-web-api instance at the following url `GET https://argo-web-api.host.example/api/v2/operations_profiles/{{profile_uuid}}` @@ -24,15 +24,15 @@ Each report optionally contains a thresholds profile. The thresholds profile is Each report contains a configuration profile. The report is defined also in argo-web-api instance at the following url `GET https://argo-web-api.host.example/api/v2/reports/{{report_uuid}}` - - -Providing a specific `tenant` and a specific `report`, script `update_profiles` checks corresponding profiles on hdfs against +Providing a specific `tenant` and a specific `report`, script `update_profiles` checks corresponding profiles on hdfs against latest profiles provided by argo-web-api. If they don't match it uploads the latest argo-web-api profile definition in hdfs # Submission scripts automatic invoke + Script logic is programmatically called in a/r and status job submission scripts # Invoke manually from command line + Script logic can be invoked from command line by issuing `$ ./update_profiles -t TENANT -r REPORT` @@ -52,9 +52,14 @@ optional arguments: report -c STRING, --config STRING config + -d STRING, --date STRING date ``` +If `-d` parameter is set, update profile script will check for historic version of the profiles and will update +them accordingly to HDFS + # Config file parameters used + Update_profiles script will search for the main argo-streaming.conf file but uses only the following configuration parameters: @@ -85,6 +90,7 @@ TENANT_B_key = secret2 ``` # Dependencies + Update_profiles script is deployed alongside the other scripts included in the `./bin/` folder of argo-streaming engine and relies on the same dependencies. Specifically it uses `requests` lib for contacting argo-web-api and python `snakebite` lib for checking hdfs files. Because `snakebite` lib lacks upload mechanism the script relies on a binary client wrapper to upload diff --git a/flink_jobs/OperationsManager/.gitignore b/flink_jobs/OperationsManager/.gitignore new file mode 100644 index 00000000..6c4e323f --- /dev/null +++ b/flink_jobs/OperationsManager/.gitignore @@ -0,0 +1,8 @@ +/target/ +.project +.settings/ +.classpath/ +.classpath +/nbproject +nbactions.xml + diff --git a/flink_jobs/OperationsManager/pom.xml b/flink_jobs/OperationsManager/pom.xml new file mode 100644 index 00000000..e2d68601 --- /dev/null +++ b/flink_jobs/OperationsManager/pom.xml @@ -0,0 +1,52 @@ + + + 4.0.0 + operations.manager + OperationsManager + 1.0-SNAPSHOT + jar + + UTF-8 + 1.8 + 1.8 + 1.7.7 + 1.2.17 + + + + com.google.code.gson + gson + 2.2.4 + + + log4j + log4j + ${log4j.version} + + + + org.slf4j + slf4j-log4j12 + ${slf4j.version} + + + commons-io + commons-io + 2.10.0 + + + junit-addons + junit-addons + 1.4 + test + + + junit + junit + 4.13.1 + test + + + + + \ No newline at end of file diff --git a/flink_jobs/OperationsManager/src/main/java/operations/OperationsManager.java b/flink_jobs/OperationsManager/src/main/java/operations/OperationsManager.java new file mode 100644 index 00000000..8a89d992 --- /dev/null +++ b/flink_jobs/OperationsManager/src/main/java/operations/OperationsManager.java @@ -0,0 +1,395 @@ +package operations; + +//import argo.utils.RequestManager; +import java.io.BufferedReader; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileReader; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import org.apache.log4j.Logger; + +import com.google.gson.JsonArray; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParseException; +import com.google.gson.JsonParser; +import java.io.Serializable; +import org.apache.commons.io.IOUtils; + +/** + * OperationsManager class implements objects that store the information parsed + * from a json object containing operation profile data or loaded from a json + * file * + * + * The OperationsManager keeps info of the defined statuses, the defined + * operations, creates a truth table containing all the combinations of statuses + * per operation , also it convert string operations and statuses to integer + * based on their position in the list storage + */ +public class OperationsManager implements Serializable { + + private static final Logger LOG = Logger.getLogger(OperationsManager.class.getName()); + + private HashMap states; + private HashMap ops; + private ArrayList revStates; + private ArrayList revOps; + + private int[][][] truthTable; + + private String defaultDownState; + private String defaultMissingState; + private String defaultUnknownState; + + private boolean order; + // private final String url = "/operations_profiles"; + + /** + * Constructs an OperationsManager object initializing fields + */ + public OperationsManager() { + this.states = new HashMap(); + this.ops = new HashMap(); + this.revStates = new ArrayList(); + this.revOps = new ArrayList(); + + this.truthTable = null; + + this.order = false; + } + + public String getDefaultDown() { + return this.defaultDownState; + } + + public String getDefaultUnknown() { + return this.defaultUnknownState; + } + + public int getDefaultUnknownInt() { + return this.getIntStatus(this.defaultUnknownState); + } + + public int getDefaultDownInt() { + return this.getIntStatus(this.defaultDownState); + } + + public String getDefaultMissing() { + return this.defaultMissingState; + } + + public int getDefaultMissingInt() { + return this.getIntStatus(this.defaultMissingState); + } + +/** + * Clears the OperationsManager fields + */ + + public void clear() { + this.states = new HashMap(); + this.ops = new HashMap(); + this.revStates = new ArrayList(); + this.revOps = new ArrayList(); + + this.truthTable = null; + } + +/** +* Retrieves the status which is a combination of 2 statuses based on the truth table of the operation, as an int +* @param op , the operation (e.g 0, 1) +* @param a , the 1st status (e.g 3 ) +* @param b , the 2nd status (e.g 2) +* @return the final status which is the combination of the two statuses retrieved from the operation's truth table +*/ + public int opInt(int op, int a, int b) { + int result = -1; + try { + result = this.truthTable[op][a][b]; + } catch (IndexOutOfBoundsException ex) { + LOG.info(ex); + result = -1; + } + + return result; + } +/** + * Retrieves the status which is a combination of 2 statuses based on the truth table of the operation, as an int + * @param op , the operation in the form of a string (e.g AND , OR) + * @param a , the 1st status in the form of a string (e.g OK , MISSING) + * @param b , the 2nd status in the form of a string (e.g OK, MISSING) + * @return . the final status which is the combination of the two statuses retrieved from the operation's truth table + */ + public int opInt(String op, String a, String b) { + + int opInt = this.ops.get(op); + int aInt = this.states.get(a); + int bInt = this.states.get(b); + + return this.truthTable[opInt][aInt][bInt]; + } +/** + * Retrieves the status which is a combination of 2 statuses based on the truth table of the operation, as a string + * @param op , the operation as an int (e.g 0, 1) + * @param a , the 1st status as an int (e.g 1, 3) + * @param b , the 2nd status as an int (e.g 1, 3) + * @return the final status which is the combination of the two statuses , as a string, + * retrieved from the operation's truth table + */ + public String op(int op, int a, int b) { + return this.revStates.get(this.truthTable[op][a][b]); + } +/** + * Retrieves the status which is a combination of 2 statuses based on the truth table of the operation, as a string + * @param op, the operation as a string (e.g AND, OR) + * @param a , the 1st status as a string (e.g OK, MISSING) + * @param b, the 1st status as a string (e.g OK , MISSING) + * @return the final status which is the combination of the two statuses , as a string, + * retrieved from the operation's truth table + */ + public String op(String op, String a, String b) { + int opInt = this.ops.get(op); + int aInt = this.states.get(a); + int bInt = this.states.get(b); + + return this.revStates.get(this.truthTable[opInt][aInt][bInt]); + } +/** +* Maps a status as string to an int based on the position of the status in the stored list of statuses + * @param status a status as an int (e.g 1 ,2) + * @return the status as a string + */ + public String getStrStatus(int status) { + return this.revStates.get(status); + } +/** + * Maps a status as string to an int + * @param status ,a status as a string (e.g OK,MISSING) + * @return the status as an int + */ + public int getIntStatus(String status) { + return this.states.get(status); + } +/** + * Maps an operation as int to a string based on the position of the operation in the stored list of operations + * @param op , an operation as an int + * @return the operation as a string + */ + public String getStrOperation(int op) { + return this.revOps.get(op); + } +/** + * Maps an operation as string to an int + * @param op, an operation as a string + * @return the operation as an int + */ + public int getIntOperation(String op) { + return this.ops.get(op); + } + + public ArrayList availableStates() { + + return this.revStates; + } + + public ArrayList availableOps() { + return this.revOps; + } +/** + * reads from a json file and stores the necessary information to the OperationsManager object fields + * @param jsonFile a json file containing information about the operation profiles + * @throws IOException + */ + public void loadJson(File jsonFile) throws IOException { + // Clear data + this.clear(); + + BufferedReader br = null; + try { + br = new BufferedReader(new FileReader(jsonFile)); + + JsonParser json_parser = new JsonParser(); + JsonElement j_element = json_parser.parse(br); + JsonObject jRoot = j_element.getAsJsonObject(); + JsonArray jData = jRoot.get("data").getAsJsonArray(); + JsonElement jItem = jData.get(0); + + readJson(jItem); + } catch (FileNotFoundException ex) { + LOG.error("Could not open file:" + jsonFile.getName()); + throw ex; + + } catch (JsonParseException ex) { + LOG.error("File is not valid json:" + jsonFile.getName()); + throw ex; + } finally { + // Close quietly without exceptions the buffered reader + IOUtils.closeQuietly(br); + } + + } +/** + * reads from a JsonElement and stores the necessary information to the OperationsManager object fields + * @param j_element , a JsonElement containing the operations profiles data + */ + private void readJson(JsonElement j_element) { + JsonObject j_obj = j_element.getAsJsonObject(); + JsonArray j_states = j_obj.getAsJsonArray("available_states"); + JsonArray j_ops = j_obj.getAsJsonArray("operations"); + this.defaultMissingState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("missing").getAsString(); + this.defaultDownState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("down").getAsString(); + this.defaultUnknownState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("unknown").getAsString(); + // Collect the available states + for (int i = 0; i < j_states.size(); i++) { + this.states.put(j_states.get(i).getAsString(), i); + this.revStates.add(j_states.get(i).getAsString()); + + } + + // Collect the available operations + int i = 0; + for (JsonElement item : j_ops) { + JsonObject jObjItem = item.getAsJsonObject(); + this.ops.put(jObjItem.getAsJsonPrimitive("name").getAsString(), i); + this.revOps.add(jObjItem.getAsJsonPrimitive("name").getAsString()); + i++; + } + // Initialize the truthtable + int num_ops = this.revOps.size(); + int num_states = this.revStates.size(); + this.truthTable = new int[num_ops][num_states][num_states]; + + for (int[][] surface : this.truthTable) { + for (int[] line : surface) { + Arrays.fill(line, -1); + } + } + + // Fill the truth table + for (JsonElement item : j_ops) { + JsonObject jObjItem = item.getAsJsonObject(); + String opname = jObjItem.getAsJsonPrimitive("name").getAsString(); + JsonArray tops = jObjItem.getAsJsonArray("truth_table"); + // System.out.println(tops); + + for (int j = 0; j < tops.size(); j++) { + // System.out.println(opname); + JsonObject row = tops.get(j).getAsJsonObject(); + + int a_val = this.states.get(row.getAsJsonPrimitive("a").getAsString()); + int b_val = this.states.get(row.getAsJsonPrimitive("b").getAsString()); + int x_val = this.states.get(row.getAsJsonPrimitive("x").getAsString()); + int op_val = this.ops.get(opname); + + // Fill in truth table + // Check if order sensitivity is off so to insert two truth + // values + // ...[a][b] and [b][a] + this.truthTable[op_val][a_val][b_val] = x_val; + if (!this.order) { + this.truthTable[op_val][b_val][a_val] = x_val; + } + } + } + + } +/** + * Calls a JsonParser to read from a list of strings containing the operations profiles data , extracts the JsonElement and + * calls the readJson() to read and store the operations profiles data + * @param opsJson , a list of strings + * @throws JsonParseException + */ + public void loadJsonString(List opsJson) throws JsonParseException { + // Clear data + this.clear(); + + JsonParser json_parser = new JsonParser(); + // Grab the first - and only line of json from ops data + JsonElement j_element = json_parser.parse(opsJson.get(0)); + readJson(j_element); + } + + public int[][][] getTruthTable() { + return truthTable; + } + + public void setTruthTable(int[][][] truthTable) { + this.truthTable = truthTable; + } + + public HashMap getStates() { + return states; + } + + public void setStates(HashMap states) { + this.states = states; + } + + public HashMap getOps() { + return ops; + } + + public void setOps(HashMap ops) { + this.ops = ops; + } + + public ArrayList getRevStates() { + return revStates; + } + + public void setRevStates(ArrayList revStates) { + this.revStates = revStates; + } + + public ArrayList getRevOps() { + return revOps; + } + + public void setRevOps(ArrayList revOps) { + this.revOps = revOps; + } + + public String getDefaultDownState() { + return defaultDownState; + } + + public void setDefaultDownState(String defaultDownState) { + this.defaultDownState = defaultDownState; + } + + public String getDefaultMissingState() { + return defaultMissingState; + } + + public void setDefaultMissingState(String defaultMissingState) { + this.defaultMissingState = defaultMissingState; + } + + public String getDefaultUnknownState() { + return defaultUnknownState; + } + + public void setDefaultUnknownState(String defaultUnknownState) { + this.defaultUnknownState = defaultUnknownState; + } + + public boolean isOrder() { + return order; + } + + public void setOrder(boolean order) { + this.order = order; + } + + public static Logger getLOG() { + return LOG; + } + +// public String getUrl() { +// return url; +// } +} diff --git a/flink_jobs/OperationsManager/src/main/resources/log4j.properties b/flink_jobs/OperationsManager/src/main/resources/log4j.properties new file mode 100644 index 00000000..da32ea0f --- /dev/null +++ b/flink_jobs/OperationsManager/src/main/resources/log4j.properties @@ -0,0 +1,23 @@ +################################################################################ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +################################################################################ + +log4j.rootLogger=INFO, console + +log4j.appender.console=org.apache.log4j.ConsoleAppender +log4j.appender.console.layout=org.apache.log4j.PatternLayout +log4j.appender.console.layout.ConversionPattern=%d{HH:mm:ss,SSS} %-5p %-60c %x - %m%n diff --git a/flink_jobs/OperationsManager/src/main/resources/operations/operations.json b/flink_jobs/OperationsManager/src/main/resources/operations/operations.json new file mode 100644 index 00000000..8486d509 --- /dev/null +++ b/flink_jobs/OperationsManager/src/main/resources/operations/operations.json @@ -0,0 +1 @@ +{"status":{"message":"Success","code":"200"},"data":[{"id":"8ce59c4d-3761-4f25-a364-f019e394bf8b","date":"2015-01-01","name":"egi_ops","available_states":["OK","WARNING","UNKNOWN","MISSING","CRITICAL","DOWNTIME"],"defaults":{"down":"DOWNTIME","missing":"MISSING","unknown":"UNKNOWN"},"operations":[{"name":"AND","truth_table":[{"a":"OK","b":"OK","x":"OK"},{"a":"OK","b":"WARNING","x":"WARNING"},{"a":"OK","b":"UNKNOWN","x":"UNKNOWN"},{"a":"OK","b":"MISSING","x":"MISSING"},{"a":"OK","b":"CRITICAL","x":"CRITICAL"},{"a":"OK","b":"DOWNTIME","x":"DOWNTIME"},{"a":"WARNING","b":"WARNING","x":"WARNING"},{"a":"WARNING","b":"UNKNOWN","x":"UNKNOWN"},{"a":"WARNING","b":"MISSING","x":"MISSING"},{"a":"WARNING","b":"CRITICAL","x":"CRITICAL"},{"a":"WARNING","b":"DOWNTIME","x":"DOWNTIME"},{"a":"UNKNOWN","b":"UNKNOWN","x":"UNKNOWN"},{"a":"UNKNOWN","b":"MISSING","x":"MISSING"},{"a":"UNKNOWN","b":"CRITICAL","x":"CRITICAL"},{"a":"UNKNOWN","b":"DOWNTIME","x":"DOWNTIME"},{"a":"MISSING","b":"MISSING","x":"MISSING"},{"a":"MISSING","b":"CRITICAL","x":"CRITICAL"},{"a":"MISSING","b":"DOWNTIME","x":"DOWNTIME"},{"a":"CRITICAL","b":"CRITICAL","x":"CRITICAL"},{"a":"CRITICAL","b":"DOWNTIME","x":"CRITICAL"},{"a":"DOWNTIME","b":"DOWNTIME","x":"DOWNTIME"}]},{"name":"OR","truth_table":[{"a":"OK","b":"OK","x":"OK"},{"a":"OK","b":"WARNING","x":"OK"},{"a":"OK","b":"UNKNOWN","x":"OK"},{"a":"OK","b":"MISSING","x":"OK"},{"a":"OK","b":"CRITICAL","x":"OK"},{"a":"OK","b":"DOWNTIME","x":"OK"},{"a":"WARNING","b":"WARNING","x":"WARNING"},{"a":"WARNING","b":"UNKNOWN","x":"WARNING"},{"a":"WARNING","b":"MISSING","x":"WARNING"},{"a":"WARNING","b":"CRITICAL","x":"WARNING"},{"a":"WARNING","b":"DOWNTIME","x":"WARNING"},{"a":"UNKNOWN","b":"UNKNOWN","x":"UNKNOWN"},{"a":"UNKNOWN","b":"MISSING","x":"UNKNOWN"},{"a":"UNKNOWN","b":"CRITICAL","x":"CRITICAL"},{"a":"UNKNOWN","b":"DOWNTIME","x":"UNKNOWN"},{"a":"MISSING","b":"MISSING","x":"MISSING"},{"a":"MISSING","b":"CRITICAL","x":"CRITICAL"},{"a":"MISSING","b":"DOWNTIME","x":"DOWNTIME"},{"a":"CRITICAL","b":"CRITICAL","x":"CRITICAL"},{"a":"CRITICAL","b":"DOWNTIME","x":"CRITICAL"},{"a":"DOWNTIME","b":"DOWNTIME","x":"DOWNTIME"}]}]}]} \ No newline at end of file diff --git a/flink_jobs/OperationsManager/src/main/resources/operations/truthtable.json b/flink_jobs/OperationsManager/src/main/resources/operations/truthtable.json new file mode 100644 index 00000000..705de7f4 --- /dev/null +++ b/flink_jobs/OperationsManager/src/main/resources/operations/truthtable.json @@ -0,0 +1,587 @@ +{ "data":[{ + "operations": ["AND","OR"], + "available_states": [ + "OK", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME" + ], + + "inputs": [ + { + "name":"AND", + "truth_table":[ + { + "a": "OK", + "b": "OK", + "x": "OK" + }, + { + "a": "OK", + "b": "WARNING", + "x": "WARNING" + }, + { + "a": "OK", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "OK", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "OK", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "OK", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "WARNING", + "b": "WARNING", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "WARNING", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "WARNING", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "WARNING", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "UNKNOWN", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "UNKNOWN", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "UNKNOWN", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "UNKNOWN", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "MISSING", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "MISSING", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "MISSING", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "CRITICAL", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "CRITICAL", + "b": "DOWNTIME", + "x": "CRITICAL" + }, + { + "a": "DOWNTIME", + "b": "DOWNTIME", + "x": "DOWNTIME" + } + ] + },{ + "name": "OR", + "truth_table": [ + { + "a": "OK", + "b": "OK", + "x": "OK" + }, + { + "a": "OK", + "b": "WARNING", + "x": "OK" + }, + { + "a": "OK", + "b": "UNKNOWN", + "x": "OK" + }, + { + "a": "OK", + "b": "MISSING", + "x": "OK" + }, + { + "a": "OK", + "b": "CRITICAL", + "x": "OK" + }, + { + "a": "OK", + "b": "DOWNTIME", + "x": "OK" + }, + { + "a": "WARNING", + "b": "WARNING", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "UNKNOWN", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "MISSING", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "CRITICAL", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "DOWNTIME", + "x": "WARNING" + }, + { + "a": "UNKNOWN", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "UNKNOWN", + "b": "MISSING", + "x": "UNKNOWN" + }, + { + "a": "UNKNOWN", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "UNKNOWN", + "b": "DOWNTIME", + "x": "UNKNOWN" + }, + { + "a": "MISSING", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "MISSING", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "MISSING", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "CRITICAL", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "CRITICAL", + "b": "DOWNTIME", + "x": "CRITICAL" + }, + { + "a": "DOWNTIME", + "b": "DOWNTIME", + "x": "DOWNTIME" + } + ] + } +], +"output":[ + {"op":"1", + "a":"0", + "b":"0", + "x":"0"}, + {"op":"1", + "a":"0", + "b":"2", + "x":"0"}, + {"op":"1", + "a":"0", + "b":"3", + "x":"0"}, + {"op":"1", + "a":"0", + "b":"4", + "x":"0"}, + {"op":"1", + "a":"0", + "b":"5", + "x":"0"}, + + {"op":"1", + "a":"1", + "b":"0", + "x":"0"}, + {"op":"1", + "a":"1", + "b":"1", + "x":"1"}, + + + {"op":"1", + "a":"1", + "b":"2", + "x":"1"}, + + {"op":"1", + "a":"1", + "b":"3", + "x":"1"}, + {"op":"1", + "a":"1", + "b":"4", + "x":"1"}, + {"op":"1", + "a":"1", + "b":"5", + "x":"1"}, + + {"op":"1", + "a":"2", + "b":"0", + "x":"0"}, + + {"op":"1", + "a":"2", + "b":"1", + "x":"1"}, + {"op":"1", + "a":"2", + "b":"2", + "x":"2"}, + + {"op":"1", + "a":"2", + "b":"3", + "x":"2"}, + {"op":"1", + "a":"2", + "b":"4", + "x":"4"}, + + {"op":"1", + "a":"2", + "b":"5", + "x":"2"}, + + {"op":"1", + "a":"3", + "b":"0", + "x":"0"}, + + {"op":"1", + "a":"3", + "b":"1", + "x":"1"}, + + {"op":"1", + "a":"3", + "b":"2", + "x":"2"}, + + {"op":"1", + "a":"3", + "b":"3", + "x":"3"}, + + {"op":"1", + "a":"3", + "b":"4", + "x":"4"}, + + {"op":"1", + "a":"3", + "b":"5", + "x":"5"}, + + + {"op":"1", + "a":"4", + "b":"0", + "x":"0"}, + + {"op":"1", + "a":"4", + "b":"1", + "x":"1"}, + + {"op":"1", + "a":"4", + "b":"2", + "x":"4"}, + + {"op":"1", + "a":"4", + "b":"3", + "x":"4"}, + + + {"op":"1", + "a":"4", + "b":"4", + "x":"4"}, + + {"op":"1", + "a":"4", + "b":"5", + "x":"4"}, + + + {"op":"1", + "a":"5", + "b":"0", + "x":"0"}, + + + {"op":"1", + "a":"5", + "b":"1", + "x":"1"}, + + {"op":"1", + "a":"5", + "b":"2", + "x":"2"}, + + {"op":"1", + "a":"5", + "b":"3", + "x":"5"}, + + {"op":"1", + "a":"5", + "b":"4", + "x":"4"}, + + {"op":"1", + "a":"5", + "b":"5", + "x":"5"}, + + + {"op":"0", + "a":"0", + "b":"0", + "x":"0"}, + + + {"op":"0", + "a":"0", + "b":"1", + "x":"1"}, + + {"op":"0", + "a":"0", + "b":"2", + "x":"2"}, + + {"op":"0", + "a":"0", + "b":"3", + "x":"3"}, + + {"op":"0", + "a":"0", + "b":"4", + "x":"4"}, + + {"op":"0", + "a":"0", + "b":"5", + "x":"5"}, + + {"op":"0", + "a":"1", + "b":"0", + "x":"1"}, + + {"op":"0", + "a":"1", + "b":"1", + "x":"1"}, + + {"op":"0", + "a":"1", + "b":"2", + "x":"2"}, + + {"op":"0", + "a":"1", + "b":"3", + "x":"3"}, + + {"op":"0", + "a":"1", + "b":"4", + "x":"4"}, + + {"op":"0", + "a":"1", + "b":"5", + "x":"5"}, + + + {"op":"0", + "a":"2", + "b":"0", + "x":"2"}, + + {"op":"0", + "a":"2", + "b":"1", + "x":"2"}, + + {"op":"0", + "a":"2", + "b":"2", + "x":"2"}, + + {"op":"0", + "a":"2", + "b":"3", + "x":"3"}, + + {"op":"0", + "a":"2", + "b":"4", + "x":"4"}, + + {"op":"0", + "a":"2", + "b":"5", + "x":"5"}, + + + {"op":"0", + "a":"3", + "b":"0", + "x":"3"}, + {"op":"0", + "a":"3", + "b":"1", + "x":"3"}, + {"op":"0", + "a":"3", + "b":"2", + "x":"3"}, + {"op":"0", + "a":"3", + "b":"3", + "x":"3"}, + + {"op":"0", + "a":"3", + "b":"4", + "x":"4"}, + {"op":"0", + "a":"3", + "b":"5", + "x":"5"}, + {"op":"0", + "a":"4", + "b":"0", + "x":"4"}, + + {"op":"0", + "a":"4", + "b":"1", + "x":"4"}, + + {"op":"0", + "a":"4", + "b":"2", + "x":"4"}, + + {"op":"0", + "a":"4", + "b":"3", + "x":"4"}, + + {"op":"0", + "a":"4", + "b":"4", + "x":"4"}, + + {"op":"0", + "a":"4", + "b":"5", + "x":"4"}, + + {"op":"0", + "a":"5", + "b":"0", + "x":"5"}, + + {"op":"0", + "a":"5", + "b":"1", + "x":"5"}, + + {"op":"0", + "a":"5", + "b":"2", + "x":"5"}, + + {"op":"0", + "a":"5", + "b":"3", + "x":"5"}, + + {"op":"0", + "a":"5", + "b":"4", + "x":"4"}, + + {"op":"0", + "a":"5", + "b":"5", + "x":"5"} + ] + +} +] +} \ No newline at end of file diff --git a/flink_jobs/OperationsManager/src/test/java/operations/OperationsManagerTest.java b/flink_jobs/OperationsManager/src/test/java/operations/OperationsManagerTest.java new file mode 100644 index 00000000..1e7f2cdf --- /dev/null +++ b/flink_jobs/OperationsManager/src/test/java/operations/OperationsManagerTest.java @@ -0,0 +1,673 @@ +/*s + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +package operations; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.text.ParseException; +import java.util.ArrayList; +import java.util.HashMap; +import static junit.framework.Assert.assertEquals; +import static junit.framework.Assert.assertNotNull; +import org.junit.After; +import org.junit.AfterClass; +import static org.junit.Assert.assertArrayEquals; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +/* + * + * A unit test class to test OperationsManager + */ +public class OperationsManagerTest { + + public OperationsManagerTest() { + } + + @BeforeClass + public static void setUpClass() { + assertNotNull("Test file missing", OperationsManagerTest.class.getResource("/operations/operations.json")); + } + + @AfterClass + public static void tearDownClass() { + } + + @Before + public void setUp() { + } + + @After + public void tearDown() { + } + + /** + * Test of loadOperationProfile method, of class OperationsParser. + */ + /** + * Test of getDefaultDown method, of class OperationsManager. + */ + @Test + public void testGetDefaultDown() throws IOException { + System.out.println("getDefaultDown"); + OperationsManager instance = new OperationsManager(); + instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile())); + String expResult = "DOWNTIME"; + String result = instance.getDefaultDown(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of getDefaultUnknown method, of class OperationsManager. + */ + @Test + public void testGetDefaultUnknown() throws IOException { + System.out.println("getDefaultUnknown"); + OperationsManager instance = new OperationsManager(); + instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile())); + + String expResult = "UNKNOWN"; + String result = instance.getDefaultUnknown(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of getDefaultUnknownInt method, of class OperationsManager. + */ + @Test + public void testGetDefaultUnknownInt() throws IOException { + System.out.println("getDefaultUnknownInt"); + OperationsManager instance = new OperationsManager(); + instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile())); + + int expResult = 2; + int result = instance.getDefaultUnknownInt(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of getDefaultDownInt method, of class OperationsManager. + */ + @Test + public void testGetDefaultDownInt() throws IOException { + System.out.println("getDefaultDownInt"); + OperationsManager instance = new OperationsManager(); + instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile())); + + int expResult = 5; + int result = instance.getDefaultDownInt(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of getDefaultMissing method, of class OperationsManager. + */ + @Test + public void testGetDefaultMissing() throws IOException { + System.out.println("getDefaultMissing"); + OperationsManager instance = new OperationsManager(); + instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile())); + + String expResult = "MISSING"; + String result = instance.getDefaultMissing(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of getDefaultMissingInt method, of class OperationsManager. + */ + @Test + public void testGetDefaultMissingInt() throws IOException { + System.out.println("getDefaultMissingInt"); + OperationsManager instance = new OperationsManager(); + instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile())); + + int expResult = 3; + int result = instance.getDefaultMissingInt(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. +// fail("The test case is a prototype."); + } + + /** + * Test of clear method, of class OperationsManager. + */ + @Test + public void testClear() throws IOException { + System.out.println("clear"); + OperationsManager instance = new OperationsManager(); + instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile())); + + instance.clear(); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of opInt method, of class OperationsManager. + */ + @Test + public void testOpInt_3args_1() throws IOException { + System.out.println("opInt"); + int op = 0; + int a = 0; + int b = 0; + OperationsManager instance = new OperationsManager(); + instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile())); + + int expResult = 0; + int result = instance.opInt(op, a, b); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of opInt method, of class OperationsManager. + */ + @Test + public void testOpInt_3args_2() throws IOException { + System.out.println("opInt"); + String op = "AND"; + String a = "OK"; + String b = "OK"; + OperationsManager instance = new OperationsManager(); + instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile())); + + int expResult = 0; + int result = instance.opInt(op, a, b); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of op method, of class OperationsManager. + */ + @Test + public void testOp_3args_1() throws IOException { + System.out.println("op"); + int op = 0; + int a = 0; + int b = 0; + OperationsManager instance = new OperationsManager(); + instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile())); + + String expResult = "OK"; + String result = instance.op(op, a, b); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of op method, of class OperationsManager. + */ + @Test + public void testOp_3args_2() throws IOException { + System.out.println("op"); + String op = "AND"; + String a = "OK"; + String b = "OK"; + OperationsManager instance = new OperationsManager(); + instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile())); + + String expResult = "OK"; + String result = instance.op(op, a, b); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of getStrStatus method, of class OperationsManager. + */ + @Test + public void testGetStrStatus() throws IOException { + System.out.println("getStrStatus"); + int status = 0; + OperationsManager instance = new OperationsManager(); + instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile())); + + String expResult = "OK"; + String result = instance.getStrStatus(status); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of getIntStatus method, of class OperationsManager. + */ + @Test + public void testGetIntStatus() throws IOException { + System.out.println("getIntStatus"); + String status = "WARNING"; + OperationsManager instance = new OperationsManager(); + instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile())); + + int expResult = 1; + int result = instance.getIntStatus(status); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of getStrOperation method, of class OperationsManager. + */ + @Test + public void testGetStrOperation() throws IOException { + System.out.println("getStrOperation"); + int op = 1; + OperationsManager instance = new OperationsManager(); + instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile())); + + String expResult = "OR"; + String result = instance.getStrOperation(op); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of getIntOperation method, of class OperationsManager. + */ + @Test + public void testGetIntOperation() throws IOException { + System.out.println("getIntOperation"); + String op = "OR"; + OperationsManager instance = new OperationsManager(); + instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile())); + + int expResult = 1; + int result = instance.getIntOperation(op); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of availableStates method, of class OperationsManager. + */ + @Test + public void testAvailableStates() throws IOException { + System.out.println("availableStates"); + OperationsManager instance = new OperationsManager(); + instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile())); + + ArrayList expResult = new ArrayList<>(); + expResult.add("OK"); + expResult.add("WARNING"); + expResult.add("UNKNOWN"); + expResult.add("MISSING"); + expResult.add("CRITICAL"); + expResult.add("DOWNTIME"); + + ArrayList result = instance.availableStates(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of availableOps method, of class OperationsManager. + */ + @Test + public void testAvailableOps() throws IOException { + System.out.println("availableOps"); + OperationsManager instance = new OperationsManager(); + instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile())); + + ArrayList expResult = new ArrayList<>(); + expResult.add("AND"); + expResult.add("OR"); + ArrayList result = instance.availableOps(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of loadJson method, of class OperationsManager. + */ + @Test + public void testLoadJson() throws Exception { + System.out.println("loadJson"); + File jsonFile = new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile()); + OperationsManager instance = new OperationsManager(); + + instance.loadJson(jsonFile); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of loadJsonString method, of class OperationsManager. + */ +// @Test +// public void testLoadJsonString() { +// System.out.println("loadJsonString"); +// List opsJson = null; +// OperationsManager instance = new OperationsManager(); +// instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile())); +// +// instance.loadJsonString(opsJson); +// // TODO review the generated test code and remove the default call to fail. +// fail("The test case is a prototype."); +// } + /** + * Test of getTruthTable method, of class OperationsManager. + */ + @Test + public void testGetTruthTable() throws IOException, FileNotFoundException, ParseException { + System.out.println("getTruthTable"); + OperationsManager instance = new OperationsManager(); + instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile())); + Utils utils=new Utils(); + + int[][][] expResult = utils.readTruthTable(); + int[][][] result = instance.getTruthTable(); + assertArrayEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of setTruthTable method, of class OperationsManager. + */ + @Test + public void testSetTruthTable() throws IOException, FileNotFoundException, ParseException { + System.out.println("setTruthTable"); + Utils utils=new Utils(); + + + int[][][] truthTable = utils.readTruthTable(); + OperationsManager instance = new OperationsManager(); + instance.setTruthTable(truthTable); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of getStates method, of class OperationsManager. + */ + @Test + public void testGetStates() throws IOException { + System.out.println("getStates"); + OperationsManager instance = new OperationsManager(); + instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile())); + + HashMap expResult = new HashMap<>(); + expResult.put("OK", 0); + expResult.put("WARNING", 1); + expResult.put("UNKNOWN", 2); + expResult.put("MISSING", 3); + expResult.put("CRITICAL", 4); + expResult.put("DOWNTIME", 5); + HashMap result = instance.getStates(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of setStates method, of class OperationsManager. + */ + @Test + public void testSetStates() { + System.out.println("setStates"); + HashMap states = new HashMap<>(); + states.put("OK", 0); + states.put("WARNING", 1); + states.put("UNKNOWN", 2); + states.put("MISSING", 3); + states.put("CRITICAL", 4); + states.put("DOWNTIME", 5); + OperationsManager instance = new OperationsManager(); + instance.setStates(states); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of getOps method, of class OperationsManager. + */ + @Test + public void testGetOps() throws IOException { + System.out.println("getOps"); + OperationsManager instance = new OperationsManager(); + instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile())); + + HashMap expResult = new HashMap<>(); + expResult.put("AND", 0); + expResult.put("OR", 1); + HashMap result = instance.getOps(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of setOps method, of class OperationsManager. + */ + @Test + public void testSetOps() { + System.out.println("setOps"); + HashMap ops = new HashMap<>(); + ops.put("AND", 0); + ops.put("OR", 1); + + OperationsManager instance = new OperationsManager(); + instance.setOps(ops); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of getRevStates method, of class OperationsManager. + */ + @Test + public void testGetRevStates() throws IOException { + System.out.println("getRevStates"); + OperationsManager instance = new OperationsManager(); + instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile())); + + ArrayList expResult = new ArrayList<>(); + expResult.add("OK"); + expResult.add("WARNING"); + expResult.add("UNKNOWN"); + expResult.add("MISSING"); + expResult.add("CRITICAL"); + expResult.add("DOWNTIME"); + ArrayList result = instance.getRevStates(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of setRevStates method, of class OperationsManager. + */ + @Test + public void testSetRevStates() { + System.out.println("setRevStates"); + ArrayList revStates = new ArrayList<>(); + revStates.add("OK"); + revStates.add("WARNING"); + revStates.add("UNKNWON"); + revStates.add("MISSING"); + revStates.add("CRITICAL"); + revStates.add("DOWNTIME"); + + OperationsManager instance = new OperationsManager(); + instance.setRevStates(revStates); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of getRevOps method, of class OperationsManager. + */ + @Test + public void testGetRevOps() throws IOException { + System.out.println("getRevOps"); + OperationsManager instance = new OperationsManager(); + instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile())); + + ArrayList expResult = new ArrayList<>(); + expResult.add("AND"); + expResult.add("OR"); + ArrayList result = instance.getRevOps(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of setRevOps method, of class OperationsManager. + */ + @Test + public void testSetRevOps() { + System.out.println("setRevOps"); + ArrayList revOps = new ArrayList<>(); + revOps.add("AND"); + revOps.add("OR"); + OperationsManager instance = new OperationsManager(); + instance.setRevOps(revOps); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of getDefaultDownState method, of class OperationsManager. + */ + @Test + public void testGetDefaultDownState() throws IOException { + System.out.println("getDefaultDownState"); + OperationsManager instance = new OperationsManager(); + instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile())); + + String expResult = "DOWNTIME"; + String result = instance.getDefaultDownState(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of setDefaultDownState method, of class OperationsManager. + */ + @Test + public void testSetDefaultDownState() { + System.out.println("setDefaultDownState"); + String defaultDownState = "DOWNTIME"; + OperationsManager instance = new OperationsManager(); + instance.setDefaultDownState(defaultDownState); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of getDefaultMissingState method, of class OperationsManager. + */ + @Test + public void testGetDefaultMissingState() throws IOException { + System.out.println("getDefaultMissingState"); + OperationsManager instance = new OperationsManager(); + instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile())); + + String expResult = "MISSING"; + String result = instance.getDefaultMissingState(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of setDefaultMissingState method, of class OperationsManager. + */ + @Test + public void testSetDefaultMissingState() { + System.out.println("setDefaultMissingState"); + String defaultMissingState = "MISSING"; + OperationsManager instance = new OperationsManager(); + instance.setDefaultMissingState(defaultMissingState); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of getDefaultUnknownState method, of class OperationsManager. + */ + @Test + public void testGetDefaultUnknownState() throws IOException { + System.out.println("getDefaultUnknownState"); + + OperationsManager instance = new OperationsManager(); + instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile())); + String expResult = "UNKNOWN"; + String result = instance.getDefaultUnknownState(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of setDefaultUnknownState method, of class OperationsManager. + */ + @Test + public void testSetDefaultUnknownState() { + System.out.println("setDefaultUnknownState"); + String defaultUnknownState = "UNKNOWN"; + OperationsManager instance = new OperationsManager(); + instance.setDefaultUnknownState(defaultUnknownState); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of isOrder method, of class OperationsManager. + */ + @Test + public void testIsOrder() throws IOException { + System.out.println("isOrder"); + OperationsManager instance = new OperationsManager(); + instance.loadJson(new File(OperationsManagerTest.class.getResource("/operations/operations.json").getFile())); + + boolean expResult = false; + boolean result = instance.isOrder(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. +// fail("The test case is a prototype."); + } + + /** + * Test of setOrder method, of class OperationsManager. + */ + @Test + public void testSetOrder() { + System.out.println("setOrder"); + boolean order = false; + OperationsManager instance = new OperationsManager(); + instance.setOrder(order); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + +} diff --git a/flink_jobs/OperationsManager/src/test/java/operations/Utils.java b/flink_jobs/OperationsManager/src/test/java/operations/Utils.java new file mode 100644 index 00000000..750b175b --- /dev/null +++ b/flink_jobs/OperationsManager/src/test/java/operations/Utils.java @@ -0,0 +1,124 @@ +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +package operations; + +import com.google.gson.JsonArray; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParser; +import java.io.BufferedReader; +import java.io.FileNotFoundException; +import java.io.FileReader; +import java.io.IOException; +import java.text.ParseException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; + +/** + * + * A utils class to process resource files for tests and provide the information + */ +public class Utils { + + public int[][][] readTruthTable() throws IOException, FileNotFoundException, ParseException, java.text.ParseException { + + BufferedReader br = null; + try { + br = new BufferedReader(new FileReader(Utils.class.getResource("/operations/truthtable.json").getFile())); + + JsonParser json_parser = new JsonParser(); + JsonElement j_element = json_parser.parse(br); + JsonObject jRoot = j_element.getAsJsonObject(); + JsonArray jData = jRoot.get("data").getAsJsonArray(); + JsonElement jItem = jData.get(0); + int[][][] truthTable = readJson(jItem); + return truthTable; + } catch (FileNotFoundException ex) { + + throw ex; + + } + + } + + private int[][][] readJson(JsonElement j_element) { + HashMap operations = new HashMap(); + ArrayList revOps = new ArrayList(); + HashMap states = new HashMap(); + ArrayList revStates = new ArrayList(); + + JsonObject j_obj = j_element.getAsJsonObject(); + JsonArray j_ops = j_obj.getAsJsonArray("operations"); + int i = 0; + for (JsonElement item : j_ops) { + String jObjItem = item.getAsString(); + operations.put(jObjItem, i); + revOps.add(jObjItem); + i++; + } + JsonArray j_states = j_obj.getAsJsonArray("available_states"); + i = 0; + for (JsonElement item : j_states) { + String jObjItem = item.getAsString(); + states.put(jObjItem, i); + revStates.add(jObjItem); + i++; + } + + int num_ops = revOps.size(); + int num_states = revStates.size(); + int[][][] truthTable = new int[num_ops][num_states][num_states]; + + for (int[][] surface : truthTable) { + for (int[] line : surface) { + Arrays.fill(line, -1); + } + } + JsonArray input = j_obj.getAsJsonArray("inputs"); + + // Fill the truth table + for (JsonElement item : input) { + JsonObject jObjItem = item.getAsJsonObject(); + String opname = jObjItem.getAsJsonPrimitive("name").getAsString(); + JsonArray tops = jObjItem.getAsJsonArray("truth_table"); + // System.out.println(tops); + + for (int j = 0; j < tops.size(); j++) { + // System.out.println(opname); + JsonObject row = tops.get(j).getAsJsonObject(); + + int a_val = revStates.indexOf(row.getAsJsonPrimitive("a").getAsString()); + int b_val = revStates.indexOf(row.getAsJsonPrimitive("b").getAsString()); + int x_val = revStates.indexOf(row.getAsJsonPrimitive("x").getAsString()); + int op_val = revOps.indexOf(opname); + + // Fill in truth table + // Check if order sensitivity is off so to insert two truth + // values + // ...[a][b] and [b][a] + truthTable[op_val][a_val][b_val] = x_val; + truthTable[op_val][b_val][a_val] = x_val; + + } + } + + int[][][] outputTruthTable = new int[num_ops][num_states][num_states]; + JsonArray output = j_obj.getAsJsonArray("output"); + + // Fill the truth table + for (JsonElement item : output) { + JsonObject jObjItem = item.getAsJsonObject(); + int op = jObjItem.getAsJsonPrimitive("op").getAsInt(); + int a = jObjItem.getAsJsonPrimitive("a").getAsInt(); + int b = jObjItem.getAsJsonPrimitive("b").getAsInt(); + int x = jObjItem.getAsJsonPrimitive("x").getAsInt(); + outputTruthTable[op][a][b] = x; + + } + return outputTruthTable; + } +} diff --git a/flink_jobs/Timelines/.gitignore b/flink_jobs/Timelines/.gitignore new file mode 100644 index 00000000..6c4e323f --- /dev/null +++ b/flink_jobs/Timelines/.gitignore @@ -0,0 +1,8 @@ +/target/ +.project +.settings/ +.classpath/ +.classpath +/nbproject +nbactions.xml + diff --git a/flink_jobs/Timelines/pom.xml b/flink_jobs/Timelines/pom.xml new file mode 100644 index 00000000..78170300 --- /dev/null +++ b/flink_jobs/Timelines/pom.xml @@ -0,0 +1,55 @@ + + + 4.0.0 + timeline.manager + Timelines + 1.0-SNAPSHOT + jar + + + org.junit.jupiter + junit-jupiter-api + 5.6.0 + test + + + org.junit.jupiter + junit-jupiter-params + 5.6.0 + test + + + org.junit.jupiter + junit-jupiter-engine + 5.6.0 + test + + + joda-time + joda-time + 1.6 + + + log4j + log4j + ${log4j.version} + + + org.slf4j + slf4j-log4j12 + ${slf4j.version} + + + com.googlecode.json-simple + json-simple + 1.1.1 + + + + UTF-8 + 1.8 + 1.8 + 1.7.7 + 1.2.17 + + \ No newline at end of file diff --git a/flink_jobs/Timelines/src/main/java/timelines/Timeline.java b/flink_jobs/Timelines/src/main/java/timelines/Timeline.java new file mode 100644 index 00000000..576a5b48 --- /dev/null +++ b/flink_jobs/Timelines/src/main/java/timelines/Timeline.java @@ -0,0 +1,486 @@ +package timelines; + +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +import java.util.ArrayList; +import java.util.Iterator; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.TreeMap; + +import org.joda.time.DateTime; +import org.joda.time.LocalDate; +import org.joda.time.format.DateTimeFormat; +import org.joda.time.format.DateTimeFormatter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Timeline class implements objects that store a set of status per timestamp. + * The set of status/timestamp is stored ascending by the timestamp (in a + * TreeMap). The key of the map is the timestamp in the form of DateTime and the + * status is expressed as an integer , given as input. + * + * A timeline can be constructed empty and then the map could be passed as + * parameter and stored. + * + * Also a timeline could be constructed by giving a timestamp. The timestamp + * would define the timeline's date. + * + * Also a timeline could be constructed by giving a timestamp and a status. The + * timestamp would define the timeline's date and the status would be stored as + * the status of the 00:00:00 timestamp . + * + * + * Timeline supports insert of a pair of timestamp, status + * + */ +public class Timeline { + + private LocalDate date; + + static Logger LOG = LoggerFactory.getLogger(Timeline.class); + + private TreeMap samples; + + /** + * Constructs an empty timeline + */ + public Timeline() { + this.date = null; + this.samples = new TreeMap(); + + } + + /** + * + * @param timestamp a timestamp Constructs a timeline where the timestamp + * would define the date of the timeline * + * + */ + public Timeline(String timestamp) { + DateTime tmp_date = new DateTime(); + DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + tmp_date = fmt.parseDateTime(timestamp); + tmp_date.withTime(0, 0, 0, 0); + this.date = tmp_date.toLocalDate(); + this.samples = new TreeMap(); + } + + /** + * + * @param timestamp a timestamp + * @param state , the status that pairs the timestamp Constructs a timeline + * , where the timestamp defines the timeline's date and the state is paired + * at a timestamp , describing midnight (00:00:00) + * + */ + Timeline(String timestamp, int state) { + DateTime tmp_date = new DateTime(); + DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + tmp_date = fmt.parseDateTime(timestamp); + tmp_date = tmp_date.withTime(0, 0, 0, 0); + this.date = tmp_date.toLocalDate(); + this.samples = new TreeMap(); + this.samples.put(tmp_date, state); + + } + + /** + * + * @param timestamp a timestamp + * @return the state for the input timestamp as is stored in the map + */ + public int get(String timestamp) { + DateTime tmp_date = new DateTime(); + DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + tmp_date = fmt.parseDateTime(timestamp); + if (this.samples.floorEntry(tmp_date) != null) { + return -1; + // throw new NullPointerException("no item found in timeline, size of timeline:" + this.samples.size() + "," + tmp_date.toString()); + } + + return this.samples.floorEntry(tmp_date).getValue(); + } + + /** + * + * @param point a timestamp , passed as datetime + * @return the state for the input timestamp as is stored in the map + */ + public int get(DateTime point) { + if (this.samples.floorEntry(point) == null) { + return -1; + //throw new NullPointerException("no item found in timeline, size of timeline:" + this.samples.size() + "," + point.toString()); + } + return this.samples.floorEntry(point).getValue(); + } + + /** + * + * @param timestamp a timestamp + * @param status the status for the given timestamp + * + * inserts a pair of timestamp, status in the map. + */ + public void insert(String timestamp, int status) { + + DateTime tmp_date = new DateTime(); + DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + tmp_date = fmt.parseDateTime(timestamp); + this.samples.put(tmp_date, status); + } + + /** + * + * @param timestamp, a timestamp in the form of datetime + * @param status , the status of the given timestamp + * + * inserts a pair of timestamp, status in the map + * + */ + public void insert(DateTime timestamp, int status) { + samples.put(timestamp, status); + + } + + /** + * + * @param timestamps a map of timestamp, status to be stored in the timeline + */ + public void insertStringTimeStamps(TreeMap timestamps) { + for (String dt : timestamps.keySet()) { + int status = timestamps.get(dt); + this.insert(dt, status); + + } + } + + /** + * + * @param timestamps a map of timestamp, status to be stored in the + * timeline. the timestamps are in the form of datetime + */ + public void insertDateTimeStamps(TreeMap timestamps) { + for (DateTime dt : timestamps.keySet()) { + int status = timestamps.get(dt); + this.insert(dt, status); + } + this.optimize(); + + } + + /** + * + * @param timestamp, a timestamp + * @param state, the status for the given timestamp + * + * inserts in the map of pairs (timestamp, status) a new entry where the new + * timestamp is the midnight (00:00:00) of the date of the given timestamp + * and the status is the given state + */ + public void setFirst(String timestamp, int state) { + DateTime tmp_date = new DateTime(); + DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + tmp_date = fmt.parseDateTime(timestamp); + this.samples = new TreeMap(); + tmp_date = tmp_date.withTime(0, 0, 0, 0); + this.samples.put(tmp_date, state); + this.optimize(); + } + + /** + * clears the map of timestamps, status + */ + public void clear() { + this.samples.clear(); + } + + /** + * + * @param samples an entry set of timestamp,status clears the existing map + * and stores the new entry set to the empty map + */ + public void bulkInsert(Set> samples) { + this.samples.clear(); + for (Map.Entry entry : samples) { + this.samples.put(entry.getKey(), entry.getValue()); + } + } + + /** + * + * @return the entry set of the map of timestamp, status + */ + public Set> getSamples() { + return samples.entrySet(); + } + + /** + * + * @return the date of the timeline + */ + public LocalDate getDate() { + return this.date; + } + + /** + * + * @return the number of the timestamps stored in the map + */ + public int getLength() { + return this.samples.size(); + } + + /** + * + * @return checks if the map of timestamp,state is empty + */ + public boolean isEmpty() { + return this.samples.isEmpty(); + } + + /** + * optimizes the map of timestamp, status if two or continuous timestamps + * have the same status then in the map the first timestamp , status is + * stored when the status of the next timestamp is different from the + * previous timestamp's status then both timestamp, status pairs are stored + */ + + public void optimize() { + TreeMap optimal = new TreeMap(); + int prevstate = -1; + for (DateTime key : this.samples.keySet()) { + int value = this.samples.get(key); + if (prevstate == -1) { + + optimal.put(key, value); + prevstate = value; + + } + if (prevstate != value) { + optimal.put(key, value); + prevstate = value; + } + } + + this.samples = optimal; + } + + /** + * + * @return return the timestamps in the map + */ + public Set getPoints() { + return this.samples.keySet(); + } + + /** + * + * @param second, the second timeline whose timestamps,status will be + * aggregated to the existing timeline timestamp, status + * @param truthTable + * @param op aggregate a set of timestamp,status pairs that are stored in a + * timeline with a set of timestamp,status pairs of a different timeline, + */ + + public void aggregate(Timeline second, int[][][] truthTable, int op) { + if (this.isEmpty()) { + this.bulkInsert(second.getSamples()); + // Optimize even when we have a single timeline for aggregation + this.optimize(); + return; + } + + Timeline result = new Timeline(); + + // Slice for first + for (DateTime point : this.getPoints()) { + result.insert(point, -1); + } + // Slice for second + for (DateTime point : second.getPoints()) { + result.insert(point, -1); + } + + // Iterate over result and ask + for (DateTime point : result.getPoints()) { + int a = this.get(point); + int b = second.get(point); + if(a!=-1 && b!=-1){ + int x = -1; + x = truthTable[op][a][b]; + if (x == -1) { + x = truthTable[op][b][a]; + } + + result.insert(point, x); + } + } + + result.optimize(); + + // Engrave the result in this timeline + this.clear(); + this.bulkInsert(result.getSamples()); + } + + /** + * + * @param timestampList, a list of pairs of timestamp, status where status is in the form of string + * @param states, a list of the existing states + * @return a sorted map of timestamp, status pairs in an ascending order + * receives pairs of timestamp , status where status is a string (e.g "OK", "WARNING") and converts the string to an integer + * based on the position of the status in the existing list of the states. Next this pair is stored in the map + * + */ + public TreeMap buildStringTimeStampMap(ArrayList timestampList, ArrayList states) { + + TreeMap timestampMap = new TreeMap(); + + for (String[] timestamp : timestampList) { + + String time = timestamp[0]; + + timestampMap.put(time, states.indexOf(timestamp[1])); + } + return timestampMap; + + } + + /** + * + * @param timestampList, a list of pairs of timestamp, status where status is in the form of string and timestamp is in the form of a datetime + * @param states, a list of the existing states + * @return a sorted map of timestamp, status pairs in an ascending order + * receives pairs of timestamp , status where status is a string (e.g "OK", "WARNING") and converts the string to an integer + * based on the position of the status in the existing list of the states. Next this pair is stored in the map + * + */ + + public TreeMap buildDateTimeStampMap(ArrayList timestampList, ArrayList states) { + + TreeMap timestampMap = new TreeMap(); + + for (String[] timestamp : timestampList) { + + DateTime tmp_date = new DateTime(); + DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + tmp_date = fmt.parseDateTime(timestamp[0]); + timestampMap.put(tmp_date, states.indexOf(timestamp[1])); + } + return timestampMap; + + } + + /** + * + * @param timestamp, a timestamp + * removes a pair of timestamp , status from the map + */ + + public void removeTimeStamp(DateTime timestamp) { + + if (this.samples.containsKey(timestamp)) { + Iterator iter = this.samples.keySet().iterator(); + while (iter.hasNext()) { + DateTime tmpTimestamp = (DateTime) iter.next(); + if (tmpTimestamp.equals(timestamp)) { + iter.remove(); + break; + } + } + } + + } + /** + * + * @return the number of the times a status changes between the timestamps of the timeline , after the map is optimized + */ + + public int calcStatusChanges() { + this.optimize(); + return this.samples.keySet().size() - 1; + } + + /** + * + * @param date, a timestamp + * @param availStates , the list of the available states + * + * checks if in the map the midnight exists and if not it is added with status "MISSING" + */ + + public void replacePreviousDateStatus(DateTime timestamp, ArrayList availStates) { + DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + + DateTime firsTime = timestamp; + firsTime = firsTime.withTime(0, 0, 0, 0); + + DateTime firstEntry = this.samples.floorKey(firsTime); + if (firstEntry != null && !firstEntry.equals(firsTime)) { + int previousStatus = this.samples.get(firstEntry); + this.samples.put(firsTime, previousStatus); + this.samples.remove(firstEntry); + } else if (firstEntry == null) { + this.samples.put(firsTime, availStates.indexOf("MISSING")); + } + + this.optimize(); + + } + + @Override + public int hashCode() { + int hash = 7; + hash = 83 * hash + Objects.hashCode(this.date); + hash = 83 * hash + Objects.hashCode(this.samples); + return hash; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + final Timeline other = (Timeline) obj; + if (!Objects.equals(this.date, other.date)) { + return false; + } + if (!Objects.equals(this.samples, other.samples)) { + return false; + } + return true; + } + /** + * + * @param truthTable the truthtable of the combination of various statuses with each other + * @param op , the operation + * @param a, the status a + * @param b, the status b + * @return , the result of the combination as defined from the truth table of the defined operation + */ + + public int opInt(int[][][] truthTable, int op, int a, int b) { + int result = -1; + try { + result = truthTable[op][a][b]; + } catch (IndexOutOfBoundsException ex) { + // LOG.info(ex); + result = -1; + } + + return result; + } + +} diff --git a/flink_jobs/Timelines/src/main/java/timelines/TimelineAggregator.java b/flink_jobs/Timelines/src/main/java/timelines/TimelineAggregator.java new file mode 100644 index 00000000..32eb8998 --- /dev/null +++ b/flink_jobs/Timelines/src/main/java/timelines/TimelineAggregator.java @@ -0,0 +1,186 @@ +package timelines; + +import java.text.ParseException; +import java.util.HashMap; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +import org.joda.time.DateTime; +import org.joda.time.LocalDate; +import org.joda.time.format.DateTimeFormat; +import org.joda.time.format.DateTimeFormatter; + +/** + TImelineAggregator class implements an aggregator which is able to receive a list of different timelines + and concude into one timeline by aggregating all the timestamps and the statuses + + */ +public class TimelineAggregator { + + private Timeline output; + private Map inputs; + + /** + * + * @param timestamp a timestamp + * @throws ParseException Constructs the TimelineAggregator object + */ + public TimelineAggregator(String timestamp) throws ParseException { + this.output = new Timeline(timestamp); + this.inputs = new HashMap(); + } + + /** + * Constructs the TimelineAggregator object + */ + public TimelineAggregator() { + this.output = new Timeline(); + this.inputs = new HashMap(); + + } + + /** + * + * @param inputs, a map of timelines Constructs a TimelineAggregator object, + * containing the timelines + */ + public TimelineAggregator(Map inputs) { + this.inputs = inputs; + this.output = new Timeline(); + } + + /** + * Clears the input timelines and the output timeline + */ + public void clear() { + this.output.clear(); + this.inputs.clear(); + } + + /** + * + * @param date + * @return the date given as input with midnight time (00:00:00) in + * yyyy-MM-dd'T'HH:mm:ss'Z format + * + */ + public String tsFromDate(String date) { + DateTime tmp_date = new DateTime(); + DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd"); + tmp_date = fmt.parseDateTime(date); + tmp_date = tmp_date.withTime(0, 0, 0, 0); + return tmp_date.toString(DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'")); + } + + /** + * + * @param name the owner of the created timeline + * @param timestamp , a timestamp + * @param prevState , a status + * + * Creates a timeline with the given status set at midnight of the date + * defined from timestamp and add this timeline to the input timelines + */ + public void createTimeline(String name, String timestamp, int prevState) { + Timeline temp = new Timeline(timestamp, prevState); + this.inputs.put(name, temp); + } + + /** + * + * @param name , the owner of the created timeline + * @param timestamp, a timestamp + * @param status , a status for the given timestamp if the owner does not + * have an existing timeline add a new timeline to the inputs + * + */ + public void insert(String name, String timestamp, int status) { + // Check if timeline exists, if not create it + if (this.inputs.containsKey(name) == false) { + Timeline temp = new Timeline(timestamp, status); + this.inputs.put(name, temp); + return; + } + + this.inputs.get(name).insert(timestamp, status); + } + + /** + * + * @param name the owner of the created timeline + * @param timestamp, a timestamp + * @param status , the status of the given timestamp if the owner does not + * have an existing timeline add a new timeline to the inputs the created + * timeline contains the given status for the midnight (00:00:00) of the + * timestamp + */ + + public void setFirst(String name, String timestamp, int status) { + // Check if timeline exists, if not create it + if (this.inputs.containsKey(name) == false) { + Timeline temp = new Timeline(timestamp, status); + this.inputs.put(name, temp); + return; + } + + this.inputs.get(name).setFirst(timestamp, status); + } + + /** + * + * @return the date of the output timeline + */ + public LocalDate getDate() { + return output.getDate(); + } + + public Set> getSamples() { + return this.output.getSamples(); + } + + public void clearAndSetDate(String timestamp) { + this.output = new Timeline(timestamp); + this.inputs.clear(); + + } + + /** + * + * @param truthTable a truth table containing all possible status + * combinations for the existing operations + * @param op , the operation to be applied in order to aggregate the + * timeline statuses + * + * aggregates the input timelines into one combined output including all the + * timestamp status combinations as produced from the input timelines + */ + public void aggregate(int[][][] truthTable, int op) { + if (this.output != null) { + this.output.clear(); + } + + //Iterate through all available input timelines and aggregate + for (Timeline item : this.inputs.values()) { + this.output.aggregate(item, truthTable, op); + } + + } + + public Timeline getOutput() { + return output; + } + + public void setOutput(Timeline output) { + this.output = output; + } + + public Map getInputs() { + return inputs; + } + + public void setInputs(Map inputs) { + this.inputs = inputs; + } + +} diff --git a/flink_jobs/Timelines/src/main/java/timelines/Utils.java b/flink_jobs/Timelines/src/main/java/timelines/Utils.java new file mode 100644 index 00000000..d6ee8f6a --- /dev/null +++ b/flink_jobs/Timelines/src/main/java/timelines/Utils.java @@ -0,0 +1,97 @@ +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +package timelines; + +import java.io.IOException; +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.Date; +import java.util.HashMap; +import java.util.Iterator; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import java.util.TimeZone; +import org.joda.time.DateTime; +import org.joda.time.format.DateTimeFormat; +import org.joda.time.format.DateTimeFormatter; + +/** + * A utils class to provide functions processing dates + */ +public class Utils { + + static Logger LOG = LoggerFactory.getLogger(Utils.class); + + public static String convertDateToString(String format, DateTime date) throws ParseException { + + //String format = "yyyy-MM-dd'T'HH:mm:ss'Z'"; + DateTimeFormatter dtf = DateTimeFormat.forPattern(format); + String dateString = date.toString(dtf); + return dateString; + + } + + public static DateTime convertStringtoDate(String format, String dateStr) throws ParseException { + DateTimeFormatter formatter = DateTimeFormat.forPattern(format); + DateTime dt = formatter.parseDateTime(dateStr); + + return dt; + } + + public static DateTime createDate(String format, Date dateStr, int hour, int min, int sec) throws ParseException { + + //String format = "yyyy-MM-dd'T'HH:mm:ss'Z'"; + SimpleDateFormat sdf = new SimpleDateFormat(format); + sdf.setTimeZone(TimeZone.getDefault()); + Calendar newCalendar = Calendar.getInstance(); + newCalendar.setTime(dateStr); + + newCalendar.set(Calendar.HOUR_OF_DAY, hour); + newCalendar.set(Calendar.MINUTE, min); + newCalendar.set(Calendar.SECOND, sec); + newCalendar.set(Calendar.MILLISECOND, 0); + return new DateTime(newCalendar.getTime()); + } + + public static boolean isPreviousDate(String format, Date nowDate, Date firstDate) throws ParseException { + // String format = "yyyy-MM-dd'T'HH:mm:ss'Z'"; + + Calendar cal = Calendar.getInstance(); + SimpleDateFormat sdf = new SimpleDateFormat(format); + sdf.setTimeZone(TimeZone.getDefault()); + cal.setTime(nowDate); + + Calendar calFirst = Calendar.getInstance(); + calFirst.setTime(firstDate); + + if (firstDate.before(nowDate)) { + return true; + } else { + return false; + } + } + + public static DateTime createDate(String format, int year, int month, int day, int hour, int min, int sec) throws ParseException { + + // String format = "yyyy-MM-dd'T'HH:mm:ss'Z'"; + SimpleDateFormat sdf = new SimpleDateFormat(format); + sdf.setTimeZone(TimeZone.getDefault()); + Calendar newCalendar = Calendar.getInstance(); + newCalendar.set(Calendar.YEAR, year); + newCalendar.set(Calendar.MONTH, month); + newCalendar.set(Calendar.DAY_OF_MONTH, day); + + newCalendar.set(Calendar.HOUR_OF_DAY, hour); + newCalendar.set(Calendar.MINUTE, min); + newCalendar.set(Calendar.SECOND, sec); + newCalendar.set(Calendar.MILLISECOND, 0); + + return new DateTime(newCalendar.getTime()); + } + +} diff --git a/flink_jobs/Timelines/src/main/resources/log4j.properties b/flink_jobs/Timelines/src/main/resources/log4j.properties new file mode 100644 index 00000000..da32ea0f --- /dev/null +++ b/flink_jobs/Timelines/src/main/resources/log4j.properties @@ -0,0 +1,23 @@ +################################################################################ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +################################################################################ + +log4j.rootLogger=INFO, console + +log4j.appender.console=org.apache.log4j.ConsoleAppender +log4j.appender.console.layout=org.apache.log4j.PatternLayout +log4j.appender.console.layout.ConversionPattern=%d{HH:mm:ss,SSS} %-5p %-60c %x - %m%n diff --git a/flink_jobs/Timelines/src/main/resources/timelines/timeline.json b/flink_jobs/Timelines/src/main/resources/timelines/timeline.json new file mode 100644 index 00000000..30b368fd --- /dev/null +++ b/flink_jobs/Timelines/src/main/resources/timelines/timeline.json @@ -0,0 +1,362 @@ +{ "data":{ + "operations":["AND","OR"], + "available_states": [ + "OK", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME" + ], + "operation":"AND", + "inputs": [ + { + "name":"timeline1", + "timestamps":[ + + {"timestamp": "2021-01-15T00:15:50Z", + "status": "WARNING" + }, + {"timestamp": "2021-01-15T01:15:50Z", + "status": "WARNING" + }, + {"timestamp": "2021-01-15T02:15:50Z", + "status": "OK" + }, + {"timestamp": "2021-01-15T03:15:50Z", + "status": "WARNING" + }, + {"timestamp": "2021-01-15T15:15:50Z", + "status": "OK" + }, + {"timestamp": "2021-01-15T20:16:50Z", + "status": "WARNING" + } + + ] + }, { + "name":"timeline2", + "timestamps":[ + {"timestamp": "2021-01-15T00:00:00Z" , "status": "OK" + }, + {"timestamp": "2021-01-15T00:05:00Z", + "status": "OK" + }, + {"timestamp": "2021-01-15T12:00:00Z", + "status": "WARNING" + }, + {"timestamp": "2021-01-15T14:00:00Z", + "status": "OK" + }, + + {"timestamp": "2021-01-15T23:05:00Z", + "status": "WARNING" + } + ] + }, { + "name":"timeline3", + "timestamps":[ + {"timestamp": "2021-01-15T00:00:00Z" , "status": "OK" + }, + {"timestamp": "2021-01-15T00:05:00Z", + "status": "UNKNOWN" + }, + + {"timestamp": "2021-01-15T02:00:00Z", + "status": "WARNING" + }, + {"timestamp": "2021-01-15T03:00:00Z", + "status": "OK" + }, + + {"timestamp": "2021-01-15T06:00:00Z", + "status": "OK" + } + ] + }, { + "name":"timeline4", + "timestamps":[ + {"timestamp": "2021-01-15T00:00:00Z" , "status": "OK" + }, + {"timestamp": "2021-01-15T20:00:00Z", + "status": "CRITICAL" + }, + + {"timestamp": "2021-01-15T21:00:00Z", + "status": "OK" + }, + {"timestamp": "2021-01-15T22:00:00Z", + "status": "CRITICAL" + }, + + {"timestamp": "2021-01-15T23:00:00Z", + "status": "OK" + } + ] + } + +], + "output":{ + "name":"merged", + "timestamps":[ + {"timestamp": "2021-01-15T00:00:00Z" , "status": "MISSING" + }, + {"timestamp": "2021-01-15T00:15:50Z", + "status": "UNKNOWN" + }, + {"timestamp": "2021-01-15T02:00:00Z", + "status": "WARNING" + }, + {"timestamp": "2021-01-15T03:00:00Z", + "status": "OK" + }, + + {"timestamp": "2021-01-15T03:15:50Z", + "status": "WARNING" + }, + + {"timestamp": "2021-01-15T15:15:50Z", + "status": "OK" + }, + {"timestamp": "2021-01-15T20:00:00Z", + "status": "CRITICAL" + } + , + {"timestamp": "2021-01-15T21:00:00Z", + "status": "WARNING" + }, + + {"timestamp": "2021-01-15T22:00:00Z", + "status": "CRITICAL" + }, + + {"timestamp": "2021-01-15T23:00:00Z", + "status": "WARNING" + } + ] + }, "operation_truth_table": [ + { + "name": "AND", + "truth_table": [ + { + "a": "OK", + "b": "OK", + "x": "OK" + }, + { + "a": "OK", + "b": "WARNING", + "x": "WARNING" + }, + { + "a": "OK", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "OK", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "OK", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "OK", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "WARNING", + "b": "WARNING", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "WARNING", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "WARNING", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "WARNING", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "UNKNOWN", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "UNKNOWN", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "UNKNOWN", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "UNKNOWN", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "MISSING", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "MISSING", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "MISSING", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "CRITICAL", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "CRITICAL", + "b": "DOWNTIME", + "x": "CRITICAL" + }, + { + "a": "DOWNTIME", + "b": "DOWNTIME", + "x": "DOWNTIME" + } + ] + }, + { + "name": "OR", + "truth_table": [ + { + "a": "OK", + "b": "OK", + "x": "OK" + }, + { + "a": "OK", + "b": "WARNING", + "x": "OK" + }, + { + "a": "OK", + "b": "UNKNOWN", + "x": "OK" + }, + { + "a": "OK", + "b": "MISSING", + "x": "OK" + }, + { + "a": "OK", + "b": "CRITICAL", + "x": "OK" + }, + { + "a": "OK", + "b": "DOWNTIME", + "x": "OK" + }, + { + "a": "WARNING", + "b": "WARNING", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "UNKNOWN", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "MISSING", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "CRITICAL", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "DOWNTIME", + "x": "WARNING" + }, + { + "a": "UNKNOWN", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "UNKNOWN", + "b": "MISSING", + "x": "UNKNOWN" + }, + { + "a": "UNKNOWN", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "UNKNOWN", + "b": "DOWNTIME", + "x": "UNKNOWN" + }, + { + "a": "MISSING", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "MISSING", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "MISSING", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "CRITICAL", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "CRITICAL", + "b": "DOWNTIME", + "x": "CRITICAL" + }, + { + "a": "DOWNTIME", + "b": "DOWNTIME", + "x": "DOWNTIME" + } + ] + } + ] +} + + +} diff --git a/flink_jobs/Timelines/src/test/java/timelines/TimelineAggregatorTest.java b/flink_jobs/Timelines/src/test/java/timelines/TimelineAggregatorTest.java new file mode 100644 index 00000000..1f9d15e8 --- /dev/null +++ b/flink_jobs/Timelines/src/test/java/timelines/TimelineAggregatorTest.java @@ -0,0 +1,389 @@ +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +package timelines; + + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.text.ParseException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.TreeMap; +import org.joda.time.DateTime; +import org.joda.time.LocalDate; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import static org.junit.Assert.*; + +/** + * + * A unit test to test TImelineAggregator class + */ +public class TimelineAggregatorTest { + + public TimelineAggregatorTest() { + } + + @BeforeClass + public static void setUpClass() { + } + + @AfterClass + public static void tearDownClass() { + } + + @Before + public void setUp() { + } + + @After + public void tearDown() { + } + + /** + * Test of clear method, of class TimelineAggregator. + */ + @Test + public void testClear() { + System.out.println("clear"); + TimelineAggregator instance = new TimelineAggregator(); + instance.clear(); + // TODO review the generated test code and remove the default call to fail. + + } + + /** + * Test of tsFromDate method, of class TimelineAggregator. + */ +// @Test +// public void testTsFromDate() { +// System.out.println("tsFromDate"); +// String date = ""; +// TimelineAggregator instance = new TimelineAggregator(); +// String expResult = ""; +// String result = instance.tsFromDate(date); +// assertEquals(expResult, result); +// // TODO review the generated test code and remove the default call to fail. +// +// } + /** + * Test of createTimeline method, of class TimelineAggregator. + */ + @Test + public void testCreateTimeline() throws ParseException { + System.out.println("createTimeline"); + String name = "test"; + String timestamp = Utils.convertDateToString("yyyy-MM-dd'T'HH:mm:ss'Z'", Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 12, 50, 0)); + int prevState = 0; + TimelineAggregator instance = new TimelineAggregator(); + instance.createTimeline(name, timestamp, prevState); + HashMap expRes = new HashMap<>(); + Timeline exptimeline = new Timeline(timestamp); + exptimeline.insert(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 0, 0, 0), 0); + expRes.put(name, exptimeline); + + assertEquals(expRes.toString(), instance.getInputs().toString()); + // TODO review the generated test code and remove the default call to fail. + + } + + /** + * Test of insert method, of class TimelineAggregator. + */ + @Test + public void testInsert() throws ParseException { + System.out.println("insert"); + String name = "test"; + String timestamp = Utils.convertDateToString("yyyy-MM-dd'T'HH:mm:ss'Z'", Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 12, 50, 0)); + + int status = 0; + TimelineAggregator instance = new TimelineAggregator(); + instance.insert(name, timestamp, status); + HashMap expRes = new HashMap<>(); + Timeline exptimeline = new Timeline(timestamp); + exptimeline.insert(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 0, 0, 0), 0); + expRes.put(name, exptimeline); + + assertEquals(expRes.toString(), instance.getInputs().toString()); + + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of setFirst method, of class TimelineAggregator. + */ + @Test + public void testSetFirst() throws ParseException { + System.out.println("setFirst"); + String name = "test1"; + String timestamp = Utils.convertDateToString("yyyy-MM-dd'T'HH:mm:ss'Z'", Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 12, 50, 0)); + String name2 = "test2"; + String timestamp2 = Utils.convertDateToString("yyyy-MM-dd'T'HH:mm:ss'Z'", Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 21, 50, 0)); + HashMap map = new HashMap(); + map.put(name, new Timeline(timestamp)); + map.put(name2, new Timeline(timestamp2)); + + int status = 0; + TimelineAggregator instance = new TimelineAggregator(map); + instance.insert(name, timestamp, status); + instance.setFirst(name2, timestamp2, status); + // TODO review the generated test code and remove the default call to fail. + + HashMap expRes = new HashMap<>(); + Timeline exptimeline = new Timeline(timestamp); + exptimeline.insert(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 0, 0, 0), 0); + Timeline exptimeline2 = new Timeline(timestamp); + + exptimeline2.insert(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 12, 50, 0), 0); + expRes.put(name2, exptimeline); + expRes.put(name, exptimeline2); + + assertEquals(expRes, instance.getInputs()); + } + + /** + * Test of getDate method, of class TimelineAggregator. + */ + @Test + public void testGetDate() throws ParseException { + System.out.println("getDate"); + String name = "test1"; + String timestamp = Utils.convertDateToString("yyyy-MM-dd'T'HH:mm:ss'Z'", Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 12, 50, 0)); + int status = 0; + TimelineAggregator instance = new TimelineAggregator(timestamp); + instance.insert(name, timestamp, status); + + LocalDate expResult = Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 12, 50, 0).toLocalDate(); + LocalDate result = instance.getDate(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of getSamples method, of class TimelineAggregator. + */ + @Test + public void testGetSamples() throws ParseException { + System.out.println("getSamples"); + String name = "test1"; + String timestamp = Utils.convertDateToString("yyyy-MM-dd'T'HH:mm:ss'Z'", Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 12, 50, 0)); + String name2 = "test2"; + String timestamp2 = Utils.convertDateToString("yyyy-MM-dd'T'HH:mm:ss'Z'", Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 21, 50, 0)); + HashMap map = new HashMap(); + map.put(name, new Timeline(timestamp)); + map.put(name2, new Timeline(timestamp2)); + + TimelineAggregator instance = new TimelineAggregator(map); + instance.aggregate(createTruthTable(), 0); + TreeMap expRes = new TreeMap<>(); + Timeline exptimeline = new Timeline(); + exptimeline.insert(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 12, 50, 0), 0); + Set> expResult = expRes.entrySet(); + Set> result = instance.getSamples(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of aggregate method, of class TimelineAggregator. + */ + @Test + public void testAggregate() throws IOException, FileNotFoundException, org.json.simple.parser.ParseException, ParseException { + System.out.println("aggregate"); + TimelineUtils timelineUtils = new TimelineUtils(); + TimelineUtils.TimelineJson timelinejson = timelineUtils.readTimelines(); + + ArrayList inputTimelines = timelinejson.getInputTimelines(); + int op = timelinejson.getOperation(); + int[][][] truthTable = timelinejson.getTruthTable(); + ArrayList states = timelinejson.getStates(); + + TimelineAggregator instance = new TimelineAggregator(); + + HashMap inputs = new HashMap(); + int counter = 1; + for (TreeMap map : inputTimelines) { + Timeline timeline = new Timeline(); + checkForMissingMidnightStatus(map, states.indexOf("MISSING")); + + timeline.insertDateTimeStamps(map); + inputs.put(timeline + "_" + counter, timeline); + counter++; + } + instance.setInputs(inputs); + + instance.aggregate(truthTable, op); + + Set> expRes = timelinejson.getOutputTimeline().entrySet(); + Set> res = instance.getOutput().getSamples(); + assertEquals(expRes, res); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of getOutput method, of class TimelineAggregator. + */ + @Test + public void testGetOutput() { + System.out.println("getOutput"); + TimelineAggregator instance = new TimelineAggregator(); + Timeline expResult = null; + Timeline result = instance.getOutput(); + //assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of setOutput method, of class TimelineAggregator. + */ + @Test + public void testSetOutput() { + System.out.println("setOutput"); + Timeline output = null; + TimelineAggregator instance = new TimelineAggregator(); + instance.setOutput(output); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of getInputs method, of class TimelineAggregator. + */ + @Test + public void testGetInputs() { + System.out.println("getInputs"); + TimelineAggregator instance = new TimelineAggregator(); + Map expResult = null; + Map result = instance.getInputs(); +// assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of setInputs method, of class TimelineAggregator. + */ + @Test + public void testSetInputs() { + System.out.println("setInputs"); + Map inputs = null; + TimelineAggregator instance = new TimelineAggregator(); + instance.setInputs(inputs); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + private int[][][] createTruthTable() { + + int[][][] truthtable = new int[2][6][6]; + + truthtable[0][0][0] = 0; + truthtable[0][0][1] = 0; + truthtable[0][0][2] = 0; + truthtable[0][0][3] = 0; + truthtable[0][0][4] = 0; + truthtable[0][0][5] = 0; + + truthtable[0][1][0] = -1; + truthtable[0][1][1] = 1; + truthtable[0][1][2] = 1; + truthtable[0][1][3] = 1; + truthtable[0][1][4] = 1; + truthtable[0][1][5] = 1; + + truthtable[0][2][0] = -1; + truthtable[0][2][1] = -1; + truthtable[0][2][2] = 2; + truthtable[0][2][3] = 2; + truthtable[0][2][4] = 4; + truthtable[0][2][5] = 2; + + truthtable[0][3][0] = -1; + truthtable[0][3][1] = -1; + truthtable[0][3][2] = -1; + truthtable[0][3][3] = 3; + truthtable[0][3][4] = 4; + truthtable[0][3][5] = 5; + + truthtable[0][4][0] = -1; + truthtable[0][4][1] = -1; + truthtable[0][4][2] = -1; + truthtable[0][4][3] = -1; + truthtable[0][4][4] = 4; + truthtable[0][4][5] = 5; + + truthtable[0][5][0] = -1; + truthtable[0][5][1] = -1; + truthtable[0][5][2] = -1; + truthtable[0][5][3] = -1; + truthtable[0][5][4] = -1; + truthtable[0][5][5] = 5; + + truthtable[1][0][0] = 0; + truthtable[1][0][1] = 1; + truthtable[1][0][2] = 2; + truthtable[1][0][3] = 3; + truthtable[1][0][4] = 4; + truthtable[1][0][5] = 5; + + truthtable[1][1][0] = -1; + truthtable[1][1][1] = 1; + truthtable[1][1][2] = 2; + truthtable[1][1][3] = 3; + truthtable[1][1][4] = 4; + truthtable[1][1][5] = 5; + + truthtable[1][2][0] = -1; + truthtable[1][2][1] = -1; + truthtable[1][2][2] = 2; + truthtable[1][2][3] = 3; + truthtable[1][2][4] = 4; + truthtable[1][2][5] = 5; + + truthtable[1][3][0] = -1; + truthtable[1][3][1] = -1; + truthtable[1][3][2] = -1; + truthtable[1][3][3] = 3; + truthtable[1][3][4] = 4; + truthtable[1][3][5] = 5; + + truthtable[1][4][0] = -1; + truthtable[1][4][1] = -1; + truthtable[1][4][2] = -1; + truthtable[1][4][3] = -1; + truthtable[1][4][4] = 4; + truthtable[1][4][5] = 4; + + truthtable[1][5][0] = -1; + truthtable[1][5][1] = -1; + truthtable[1][5][2] = -1; + truthtable[1][5][3] = -1; + truthtable[1][5][4] = -1; + truthtable[1][5][5] = 5; + + return truthtable; + + } + + private void checkForMissingMidnightStatus(TreeMap map, int missingStatus) throws ParseException { + DateTime midnight = Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 0, 0, 00); + if (!map.containsKey(midnight)) { + map.put(midnight, missingStatus); + } + } + +} diff --git a/flink_jobs/Timelines/src/test/java/timelines/TimelineTest.java b/flink_jobs/Timelines/src/test/java/timelines/TimelineTest.java new file mode 100644 index 00000000..747b9aa9 --- /dev/null +++ b/flink_jobs/Timelines/src/test/java/timelines/TimelineTest.java @@ -0,0 +1,499 @@ +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +package timelines; + +import java.text.ParseException; +import java.util.ArrayList; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; +import org.joda.time.DateTime; +import org.joda.time.LocalDate; +import org.joda.time.format.DateTimeFormat; +import org.joda.time.format.DateTimeFormatter; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import static org.junit.Assert.*; + +/** + * + * A unit test to test TImeline class + */ +public class TimelineTest { + + public TimelineTest() { + } + + @BeforeClass + public static void setUpClass() { + } + + @AfterClass + public static void tearDownClass() { + } + + @Before + public void setUp() { + } + + @After + public void tearDown() { + } + + /** + * Test of get method, of class Timeline. + */ + @Test + public void testGet_String() throws ParseException { + System.out.println("get"); + + DateTimeFormatter dtf = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + DateTime timestamp = Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 4, 31, 1); + String timestampStr = timestamp.toString(dtf); + Timeline instance = new Timeline(); + instance.insertDateTimeStamps(createTimestampList()); + int expResult = -1; + int result = instance.get(timestampStr); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of get method, of class Timeline. + */ + @Test + public void testGet_DateTime() throws ParseException { + System.out.println("get"); + DateTime point = Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 4, 31, 1); + Timeline instance = new Timeline(); + instance.insertDateTimeStamps(createTimestampList()); + int expResult = 1; + int result = instance.get(point); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of insert method, of class Timeline. + */ + @Test + public void testInsert_String_int() throws ParseException { + System.out.println("insert"); + DateTimeFormatter dtf = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + + DateTime timestamp = Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 4, 31, 1); + String timestampStr = timestamp.toString(dtf); + + int status = 1; + Timeline instance = new Timeline(); + instance.insert(timestampStr, status); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of insert method, of class Timeline. + */ + @Test + public void testInsert_DateTime_int() throws ParseException { + System.out.println("insert"); + DateTime timestamp =Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 4, 31, 1); + + int status = 0; + Timeline instance = new Timeline(); + instance.insert(timestamp, status); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of insertStringTimeStamps method, of class Timeline. + */ + @Test + public void testInsertStringTimeStamps() throws ParseException { + System.out.println("insertStringTimeStamps"); + TreeMap timestamps = createStringTimestampList(); + Timeline instance = new Timeline(); + instance.insertStringTimeStamps(timestamps); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of insertDateTimeStamps method, of class Timeline. + */ + @Test + public void testInsertDateTimeStamps() throws ParseException { + System.out.println("insertDateTimeStamps"); + TreeMap timestamps = createTimestampList(); + Timeline instance = new Timeline(); + instance.insertDateTimeStamps(timestamps); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of setFirst method, of class Timeline. + */ + @Test + public void testSetFirst() throws ParseException { + System.out.println("setFirst"); + DateTimeFormatter dtf = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + + DateTime timestamp = Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 0, 0, 0, 1); + String timestampStr = timestamp.toString(dtf); + + int state = 0; + Timeline instance = new Timeline(); + instance.setFirst(timestampStr, state); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of clear method, of class Timeline. + */ + @Test + public void testClear() throws ParseException { + System.out.println("clear"); + Timeline instance = new Timeline(); + instance.insertDateTimeStamps(createTimestampList()); + instance.clear(); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of bulkInsert method, of class Timeline. + */ + @Test + public void testBulkInsert() throws ParseException { + System.out.println("bulkInsert"); + Set> samples = createTimestampList().entrySet(); + Timeline instance = new Timeline(); + instance.bulkInsert(samples); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of getSamples method, of class Timeline. + */ + @Test + public void testGetSamples() throws ParseException { + System.out.println("getSamples"); + Timeline instance = new Timeline(); + instance.insertDateTimeStamps(createTimestampList()); + Set> expResult = instance.getSamples(); + Set> result = instance.getSamples(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of getDate method, of class Timeline. + */ + @Test + public void testGetDate() throws ParseException { + System.out.println("getDate"); + DateTimeFormatter dtf = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + + Timeline instance = new Timeline(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 1, 0, 0, 0, 0).toString(dtf)); + + LocalDate expResult = new LocalDate(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 1, 0, 0, 0, 0)); + LocalDate result = instance.getDate(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of getLength method, of class Timeline. + */ + @Test + public void testGetLength() throws ParseException { + System.out.println("getLength"); + Timeline instance = new Timeline(); + instance.insertDateTimeStamps(createTimestampList()); + int expResult = 2; + int result = instance.getLength(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of isEmpty method, of class Timeline. + */ + @Test + public void testIsEmpty() throws ParseException { + System.out.println("isEmpty"); + Timeline instance = new Timeline(); + instance.insertDateTimeStamps(createTimestampList()); + + boolean expResult = false; + boolean result = instance.isEmpty(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of optimize method, of class Timeline. + */ + @Test + public void testOptimize() throws ParseException { + System.out.println("optimize"); + Timeline instance = new Timeline(); + instance.insertDateTimeStamps(createTimestampList()); + instance.optimize(); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of getPoints method, of class Timeline. + */ + @Test + public void testGetPoints() throws ParseException { + System.out.println("getPoints"); + Timeline instance = new Timeline(); + TreeMap map = createTimestampList(); + instance.insertDateTimeStamps(map); + Set expResult = new TreeSet<>(); + expResult.add(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 5, 20, 15)); + expResult.add(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 0, 12, 23)); + + Set result = instance.getPoints(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of aggregate method, of class Timeline. + */ + @Test + public void testAggregate() throws ParseException { + System.out.println("aggregate"); + Timeline second = new Timeline(); + second.insertDateTimeStamps(createSecondTimeline()); + int[][][] truthTable = createTruthTable(); + int op = 0; + Timeline instance = new Timeline(); + instance.aggregate(second, truthTable, op); + Set> expResult = createMerged().entrySet(); + Set> result = instance.getSamples(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + /** + * Test of calcStatusChanges method, of class Timeline. + */ + @Test + public void testCalcStatusChanges() throws ParseException { + System.out.println("calcStatusChanges"); + Timeline instance = new Timeline(); + instance.insertDateTimeStamps(createTimestampList()); + int expResult = 1; + int result = instance.calcStatusChanges(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of replacePreviousDateStatus method, of class Timeline. + */ + @Test + public void testReplacePreviousDateStatus() throws ParseException { + System.out.println("replacePreviousDateStatus"); + DateTime date = Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 0, 0, 0); + Timeline instance = new Timeline(); + instance.insertDateTimeStamps(createTimestampList()); + ArrayList availStates = new ArrayList<>(); + availStates.add("OK"); + availStates.add("WARNING"); + availStates.add("UKNOWN"); + availStates.add("MISSING"); + availStates.add("CRITICAL"); + availStates.add("DOWNTIME"); + + instance.replacePreviousDateStatus(date, availStates); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + /** + * Test of opInt method, of class Timeline. + */ + @Test + public void testOpInt() throws ParseException { + System.out.println("opInt"); + int[][][] truthTable = createTruthTable(); + int op = 0; + int a = 0; + int b = 0; + Timeline instance = new Timeline(); + instance.insertDateTimeStamps(createTimestampList()); + int expResult = 0; + int result = instance.opInt(truthTable, op, a, b); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + private TreeMap createTimestampList() throws ParseException { + TreeMap map = new TreeMap<>(); + + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 0, 12, 23), 1); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 1, 5, 10), 1); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 5, 20, 15), 0); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 4, 31, 1), 1); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 3, 50, 4), 1); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 21, 3, 5), 0); + return map; +// + } + + private TreeMap createStringTimestampList() throws ParseException { + TreeMap map = new TreeMap<>(); + + DateTimeFormatter dtf = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 0, 12, 23).toString(dtf), 1); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 1, 5, 10).toString(dtf), 1); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 5, 20, 15).toString(dtf), 0); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 4, 31, 1).toString(dtf), 1); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 3, 50, 4).toString(dtf), 1); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 21, 3, 5).toString(dtf), 0); + return map; +// + } + + private TreeMap createSecondTimeline() throws ParseException { + TreeMap map = new TreeMap<>(); + + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 0, 15, 50), 1); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 2, 5, 10), 0); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 5, 20, 15), 0); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 4, 31, 1), 1); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 3, 50, 4), 1); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 21, 3, 5), 0); + return map; +// + } + + private int[][][] createTruthTable() { + + int[][][] truthtable = new int[2][6][6]; + + truthtable[0][0][0] = 0; + truthtable[0][0][1] = 0; + truthtable[0][0][2] = 0; + truthtable[0][0][3] = 0; + truthtable[0][0][4] = 0; + truthtable[0][0][5] = 0; + + truthtable[0][1][0] = -1; + truthtable[0][1][1] = 1; + truthtable[0][1][2] = 1; + truthtable[0][1][3] = 1; + truthtable[0][1][4] = 1; + truthtable[0][1][5] = 1; + + truthtable[0][2][0] = -1; + truthtable[0][2][1] = -1; + truthtable[0][2][2] = 2; + truthtable[0][2][3] = 2; + truthtable[0][2][4] = 4; + truthtable[0][2][5] = 2; + + truthtable[0][3][0] = -1; + truthtable[0][3][1] = -1; + truthtable[0][3][2] = -1; + truthtable[0][3][3] = 3; + truthtable[0][3][4] = 4; + truthtable[0][3][5] = 5; + + truthtable[0][4][0] = -1; + truthtable[0][4][1] = -1; + truthtable[0][4][2] = -1; + truthtable[0][4][3] = -1; + truthtable[0][4][4] = 4; + truthtable[0][4][5] = 5; + + truthtable[0][5][0] = -1; + truthtable[0][5][1] = -1; + truthtable[0][5][2] = -1; + truthtable[0][5][3] = -1; + truthtable[0][5][4] = -1; + truthtable[0][5][5] = 5; + + truthtable[1][0][0] = 0; + truthtable[1][0][1] = 1; + truthtable[1][0][2] = 2; + truthtable[1][0][3] = 3; + truthtable[1][0][4] = 4; + truthtable[1][0][5] = 5; + + truthtable[1][1][0] = -1; + truthtable[1][1][1] = 1; + truthtable[1][1][2] = 2; + truthtable[1][1][3] = 3; + truthtable[1][1][4] = 4; + truthtable[1][1][5] = 5; + + truthtable[1][2][0] = -1; + truthtable[1][2][1] = -1; + truthtable[1][2][2] = 2; + truthtable[1][2][3] = 3; + truthtable[1][2][4] = 4; + truthtable[1][2][5] = 5; + + truthtable[1][3][0] = -1; + truthtable[1][3][1] = -1; + truthtable[1][3][2] = -1; + truthtable[1][3][3] = 3; + truthtable[1][3][4] = 4; + truthtable[1][3][5] = 5; + + truthtable[1][4][0] = -1; + truthtable[1][4][1] = -1; + truthtable[1][4][2] = -1; + truthtable[1][4][3] = -1; + truthtable[1][4][4] = 4; + truthtable[1][4][5] = 4; + + truthtable[1][5][0] = -1; + truthtable[1][5][1] = -1; + truthtable[1][5][2] = -1; + truthtable[1][5][3] = -1; + truthtable[1][5][4] = -1; + truthtable[1][5][5] = 5; + + return truthtable; + + } + + private TreeMap createMerged() throws ParseException { + TreeMap map = new TreeMap(); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 0, 15, 50), 1); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 2, 5, 10), 0); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 3, 50, 4), 1); + + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 5, 20, 15), 0); + return map; + } + +} diff --git a/flink_jobs/Timelines/src/test/java/timelines/TimelineUtils.java b/flink_jobs/Timelines/src/test/java/timelines/TimelineUtils.java new file mode 100644 index 00000000..7df7ab4e --- /dev/null +++ b/flink_jobs/Timelines/src/test/java/timelines/TimelineUtils.java @@ -0,0 +1,188 @@ +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +package timelines; + +import timelines.TimelineUtils; +import java.io.FileNotFoundException; +import java.io.FileReader; +import java.io.IOException; +import java.net.URL; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.TreeMap; +import org.joda.time.DateTime; +import org.json.simple.JSONArray; +import org.json.simple.JSONObject; +import org.json.simple.parser.JSONParser; +import org.json.simple.parser.ParseException; + +/** + * + * A utils class to process resource files for tests and provide the information + */ +public class TimelineUtils { + + public TimelineJson readTimelines() throws IOException, FileNotFoundException, ParseException, java.text.ParseException { + + JSONObject timelineJSONObj = readJsonFromFile(TimelineUtils.class.getResource("/timelines/timeline.json").getFile()); + TimelineJson timelinejson = buildTimelines(timelineJSONObj); + return timelinejson; + } + + private JSONObject readJsonFromFile(String path) throws FileNotFoundException, IOException, org.json.simple.parser.ParseException { + JSONParser parser = new JSONParser(); + URL url = TimelineUtils.class.getResource(path); + Object obj = parser.parse(new FileReader(path)); + + JSONObject jsonObject = (JSONObject) obj; + + return jsonObject; + } + + public TimelineJson buildTimelines(JSONObject jsonObject) throws java.text.ParseException { + + ArrayList states = new ArrayList<>(); + ArrayList operations = new ArrayList<>(); + ArrayList inputTimelines = new ArrayList<>(); + TreeMap outputTimeline = new TreeMap(); + JSONObject dataObject = (JSONObject) jsonObject.get("data"); + + JSONArray stateList = (JSONArray) dataObject.get("available_states"); + JSONArray operationList = (JSONArray) dataObject.get("operations"); + String operation = (String) dataObject.get("operation"); + Iterator operationsIter = operationList.iterator(); + + while (operationsIter.hasNext()) { + String op = operationsIter.next(); + operations.add(op); + } + JSONArray inputs = (JSONArray) dataObject.get("inputs"); + JSONObject output = (JSONObject) dataObject.get("output"); + Iterator stateIter = stateList.iterator(); + while (stateIter.hasNext()) { + String state = stateIter.next(); + states.add(state); + } + + Iterator inputIter = inputs.iterator(); + while (inputIter.hasNext()) { + JSONObject timelineJSONObj = inputIter.next(); + JSONArray timestampList = (JSONArray) timelineJSONObj.get("timestamps"); + Iterator timeIter = timestampList.iterator(); + TreeMap map = new TreeMap(); + while (timeIter.hasNext()) { + JSONObject timestatus = (JSONObject) timeIter.next(); + String time = (String) timestatus.get("timestamp"); + String status = (String) timestatus.get("status"); + map.put(Utils.convertStringtoDate("yyyy-MM-dd'T'HH:mm:ss'Z'", time), states.indexOf(status)); + inputTimelines.add(map); + } + + } + + JSONArray timestampList = (JSONArray) output.get("timestamps"); + Iterator timeIter = timestampList.iterator(); + + while (timeIter.hasNext()) { + JSONObject timestatus = (JSONObject) timeIter.next(); + String time = (String) timestatus.get("timestamp"); + String status = (String) timestatus.get("status"); + outputTimeline.put(Utils.convertStringtoDate("yyyy-MM-dd'T'HH:mm:ss'Z'", time), states.indexOf(status)); + + } + + JSONArray opTruthTable = (JSONArray) dataObject.get("operation_truth_table"); + + Iterator opTruthTableIter = opTruthTable.iterator(); + int[][][] table = new int[operations.size()][states.size()][states.size()]; + for (int[][] surface : table) { + for (int[] line : surface) { + Arrays.fill(line, -1); + } + } + while (opTruthTableIter.hasNext()) { + JSONObject truthOperationObj = (JSONObject) opTruthTableIter.next(); + String truthOp = (String) truthOperationObj.get("name"); + int truthOpInt = operations.indexOf(truthOp); + JSONArray truthTable = (JSONArray) truthOperationObj.get("truth_table"); + Iterator truthTableIter = truthTable.iterator(); + while (truthTableIter.hasNext()) { + + JSONObject truthTableObj = (JSONObject) truthTableIter.next(); + String a = (String) truthTableObj.get("a"); + int aInt = states.indexOf(a); + String b = (String) truthTableObj.get("b"); + int bInt = states.indexOf(b); + String x = (String) truthTableObj.get("b"); + int xInt = states.indexOf(x); + table[truthOpInt][aInt][bInt] = xInt; + + } + } + TimelineJson timelineJsonObject = new TimelineJson(inputTimelines, outputTimeline, operations.indexOf(operation),table,states); + return timelineJsonObject; + } + + public class TimelineJson { + + private ArrayList inputTimelines; + private TreeMap outputTimeline; + private Integer operation; + private int[][][] truthTable; + private ArrayList states; + + public TimelineJson(ArrayList inputTimelines, TreeMap outputTimeline, Integer operation, int[][][] truthTable, ArrayList states) { + this.inputTimelines = inputTimelines; + this.outputTimeline = outputTimeline; + this.operation = operation; + this.truthTable = truthTable; + this.states = states; + } + + public ArrayList getInputTimelines() { + return inputTimelines; + } + + public void setInputTimelines(ArrayList inputTimelines) { + this.inputTimelines = inputTimelines; + } + + public TreeMap getOutputTimeline() { + return outputTimeline; + } + + public void setOutputTimeline(TreeMap outputTimeline) { + this.outputTimeline = outputTimeline; + } + + public Integer getOperation() { + return operation; + } + + public void setOperation(Integer operation) { + this.operation = operation; + } + + public int[][][] getTruthTable() { + return truthTable; + } + + public void setTruthTable(int[][][] truthTable) { + this.truthTable = truthTable; + } + + public ArrayList getStates() { + return states; + } + + public void setStates(ArrayList states) { + this.states = states; + } + } + + +} diff --git a/flink_jobs/ams_ingest_metric/.gitignore b/flink_jobs/ams_ingest_metric/.gitignore index b83d2226..6c4e323f 100644 --- a/flink_jobs/ams_ingest_metric/.gitignore +++ b/flink_jobs/ams_ingest_metric/.gitignore @@ -1 +1,8 @@ /target/ +.project +.settings/ +.classpath/ +.classpath +/nbproject +nbactions.xml + diff --git a/flink_jobs/ams_ingest_metric/pom.xml b/flink_jobs/ams_ingest_metric/pom.xml index ef75498c..fea028e6 100644 --- a/flink_jobs/ams_ingest_metric/pom.xml +++ b/flink_jobs/ams_ingest_metric/pom.xml @@ -109,12 +109,12 @@ org.apache.httpcomponents httpclient - 4.5.2 + 4.5.13 org.apache.httpcomponents fluent-hc - 4.5.2 + 4.5.13 @@ -182,17 +182,17 @@ hbase-client 1.2.0-cdh5.7.4 - org.apache.httpcomponents httpclient - 4.5.2 + 4.5.13 org.apache.httpcomponents fluent-hc - 4.5.2 + 4.5.13 + diff --git a/flink_jobs/ams_ingest_sync/.gitignore b/flink_jobs/ams_ingest_sync/.gitignore index b83d2226..6c4e323f 100644 --- a/flink_jobs/ams_ingest_sync/.gitignore +++ b/flink_jobs/ams_ingest_sync/.gitignore @@ -1 +1,8 @@ /target/ +.project +.settings/ +.classpath/ +.classpath +/nbproject +nbactions.xml + diff --git a/flink_jobs/ams_ingest_sync/pom.xml b/flink_jobs/ams_ingest_sync/pom.xml index e11b872b..c455c1a5 100644 --- a/flink_jobs/ams_ingest_sync/pom.xml +++ b/flink_jobs/ams_ingest_sync/pom.xml @@ -90,7 +90,7 @@ org.apache.httpcomponents httpclient - 4.5.2 + 4.5.13 com.google.code.gson @@ -100,7 +100,7 @@ junit junit - 4.11 + 4.13.1 test @@ -159,25 +159,13 @@ org.apache.httpcomponents httpclient - 4.5.2 + 4.5.13 com.google.code.gson gson 2.7 - - junit - junit - 4.11 - test - - - junit-addons - junit-addons - 1.4 - test - diff --git a/flink_jobs/batch_ar/.gitignore b/flink_jobs/batch_ar/.gitignore index ce1b0b79..6c4e323f 100644 --- a/flink_jobs/batch_ar/.gitignore +++ b/flink_jobs/batch_ar/.gitignore @@ -1,7 +1,8 @@ /target/ - -# Eclipse related -.classpath .project .settings/ +.classpath/ +.classpath +/nbproject +nbactions.xml diff --git a/flink_jobs/batch_ar/pom.xml b/flink_jobs/batch_ar/pom.xml index b93a7ff7..4326c2d4 100644 --- a/flink_jobs/batch_ar/pom.xml +++ b/flink_jobs/batch_ar/pom.xml @@ -95,6 +95,18 @@ + + + org.apache.httpcomponents + httpclient + 4.5.13 + + + org.apache.httpcomponents + fluent-hc + 4.5.13 + + joda-time joda-time @@ -135,9 +147,17 @@ junit junit - 4.11 + 4.13.1 test + + + com.github.tomakehurst + wiremock + 1.58 + test + + @@ -204,6 +224,17 @@ 3.2.2 + + org.apache.httpcomponents + httpclient + 4.5.13 + + + org.apache.httpcomponents + fluent-hc + 4.5.13 + + @@ -386,4 +417,4 @@ --> - + \ No newline at end of file diff --git a/flink_jobs/batch_ar/src/main/java/argo/amr/ApiResource.java b/flink_jobs/batch_ar/src/main/java/argo/amr/ApiResource.java new file mode 100644 index 00000000..d8cb13b5 --- /dev/null +++ b/flink_jobs/batch_ar/src/main/java/argo/amr/ApiResource.java @@ -0,0 +1,5 @@ +package argo.amr; + +public enum ApiResource { + CONFIG, OPS, METRIC, AGGREGATION, THRESHOLDS, TOPOENDPOINTS, TOPOGROUPS, WEIGHTS, DOWNTIMES, RECOMPUTATIONS +} \ No newline at end of file diff --git a/flink_jobs/batch_ar/src/main/java/argo/amr/ApiResourceManager.java b/flink_jobs/batch_ar/src/main/java/argo/amr/ApiResourceManager.java new file mode 100644 index 00000000..c4375701 --- /dev/null +++ b/flink_jobs/batch_ar/src/main/java/argo/amr/ApiResourceManager.java @@ -0,0 +1,644 @@ +package argo.amr; + +import java.io.IOException; +import java.security.KeyManagementException; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.SecureRandom; +import java.util.ArrayList; +import java.util.EnumMap; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +import javax.net.ssl.SSLContext; + +import org.apache.http.client.ClientProtocolException; +import org.apache.http.client.fluent.Executor; +import org.apache.http.client.fluent.Request; +import org.apache.http.conn.ssl.NoopHostnameVerifier; +import org.apache.http.conn.ssl.SSLConnectionSocketFactory; +import org.apache.http.conn.ssl.TrustSelfSignedStrategy; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClients; +import org.apache.http.ssl.SSLContextBuilder; + +import com.google.gson.JsonArray; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParser; + +import argo.avro.Downtime; +import argo.avro.GroupEndpoint; +import argo.avro.GroupGroup; +import argo.avro.MetricProfile; +import argo.avro.Weight; + + +/** + * APIResourceManager class fetches remote argo-web-api resources such as + * report configuration, profiles, topology, weights in JSON format + */ + + +public class ApiResourceManager { + + private EnumMap data = new EnumMap<>(ApiResource.class); + + private String endpoint; + private String token; + private String reportID; + private String date; + private String proxy; + + private String metricID; + private String aggregationID; + private String opsID; + private String threshID; + private String reportName; + private String weightsID; + private boolean verify; + + + public ApiResourceManager(String endpoint, String token) { + this.endpoint = endpoint; + this.token = token; + this.metricID = ""; + this.aggregationID = ""; + this.opsID = ""; + this.threshID = ""; + this.reportName = ""; + this.reportID = ""; + this.date = ""; + this.proxy = ""; + this.weightsID = ""; + this.verify = true; + + } + + public boolean getVerify() { + return verify; + } + + public void setVerify(boolean verify) { + this.verify = verify; + } + + public String getEndpoint() { + return endpoint; + } + + public void setEndpoint(String endpoint) { + this.endpoint = endpoint; + } + + public String getToken() { + return token; + } + + public void setToken(String token) { + this.token = token; + } + + public String getReportID() { + return reportID; + } + + public void setReportID(String reportID) { + this.reportID = reportID; + } + + public String getReportName() { + return this.reportName; + } + + public String getOpsID() { + return this.opsID; + } + + + public String getAggregationID() { + return this.aggregationID; + } + + public String getMetricID() { + return this.metricID; + } + + public String getThresholdsID() { + return this.threshID; + } + + + public String getDate() { + return date; + } + + public void setDate(String date) { + this.date = date; + } + + public String getProxy() { + return proxy; + } + + public void setProxy(String proxy) { + this.proxy = proxy; + } + + public String getWeightsID() { + return weightsID; + } + + public void setWeightsID(String weightsID) { + this.weightsID = weightsID; + } + + /** + * Create an SSL Connection Socket Factory with a strategy to trust self signed + * certificates + */ + private SSLConnectionSocketFactory selfSignedSSLF() + throws NoSuchAlgorithmException, KeyStoreException, KeyManagementException { + SSLContextBuilder sslBuild = new SSLContextBuilder(); + sslBuild.loadTrustMaterial(null, new TrustSelfSignedStrategy()); + return new SSLConnectionSocketFactory(sslBuild.build(), NoopHostnameVerifier.INSTANCE); + } + + /** + * Contacts remote argo-web-api based on the full url of a resource its content (expected in json format) + * + * @param fullURL String containing the full url representation of the argo-web-api resource + * @return A string representation of the resource json content + * @throws ClientProtocolException + * @throws IOException + * @throws KeyStoreException + * @throws NoSuchAlgorithmException + * @throws KeyManagementException + */ + private String getResource(String fullURL) { + + + Request r = Request.Get(fullURL).addHeader("Accept", "application/json").addHeader("Content-type", + "application/json").addHeader("x-api-key",this.token); + if (!this.proxy.isEmpty()) { + r = r.viaProxy(proxy); + } + + r = r.connectTimeout(1000).socketTimeout(1000); + + String content = "{}"; + + try { + if (this.verify == false) { + CloseableHttpClient httpClient = HttpClients.custom().setSSLSocketFactory(selfSignedSSLF()).build(); + Executor executor = Executor.newInstance(httpClient); + content = executor.execute(r).returnContent().asString(); + } else { + + content = r.execute().returnContent().asString(); + } + } catch (KeyManagementException | NoSuchAlgorithmException | KeyStoreException | IOException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + + return content; + } + + /** + * Retrieves the remote report configuration based on reportID main class attribute and + * stores the content in the enum map + * + * @throws ClientProtocolException + * @throws IOException + * @throws KeyStoreException + * @throws NoSuchAlgorithmException + * @throws KeyManagementException + */ + public void getRemoteConfig() { + String path = "https://%s/api/v2/reports/%s"; + String fullURL = String.format(path, this.endpoint, this.reportID); + String content = getResource(fullURL); + this.data.put(ApiResource.CONFIG, getJsonData(content, false)); + } + + + /** + * Retrieves the metric profile content based on the metric_id attribute and stores it to the enum map + * + * @throws ClientProtocolException + * @throws IOException + * @throws KeyStoreException + * @throws NoSuchAlgorithmException + * @throws KeyManagementException + */ + public void getRemoteMetric() { + + String path = "https://%s/api/v2/metric_profiles/%s?date=%s"; + String fullURL = String.format(path, this.endpoint, this.metricID, this.date); + String content = getResource(fullURL); + this.data.put(ApiResource.METRIC, getJsonData(content, false)); + } + + /** + * Retrieves the aggregation profile content based on the aggreagation_id attribute and stores it to the enum map + * + * @throws ClientProtocolException + * @throws IOException + * @throws KeyStoreException + * @throws NoSuchAlgorithmException + * @throws KeyManagementException + */ + public void getRemoteAggregation() { + + String path = "https://%s/api/v2/aggregation_profiles/%s?date=%s"; + String fullURL = String.format(path, this.endpoint, this.aggregationID, this.date); + String content = getResource(fullURL); + this.data.put(ApiResource.AGGREGATION, getJsonData(content, false)); + } + + /** + * Retrieves the ops profile content based on the ops_id attribute and stores it to the enum map + * + * @throws ClientProtocolException + * @throws IOException + * @throws KeyStoreException + * @throws NoSuchAlgorithmException + * @throws KeyManagementException + */ + public void getRemoteOps() { + + String path = "https://%s/api/v2/operations_profiles/%s?date=%s"; + String fullURL = String.format(path, this.endpoint, this.opsID, this.date); + + String content = getResource(fullURL); + this.data.put(ApiResource.OPS, getJsonData(content, false)); + } + + /** + * Retrieves the thresholds profile content based on the thresh_id attribute and stores it to the enum map + * + * @throws ClientProtocolException + * @throws IOException + * @throws KeyStoreException + * @throws NoSuchAlgorithmException + * @throws KeyManagementException + */ + public void getRemoteThresholds() { + + String path = "https://%s/api/v2/thresholds_profiles/%s?date=%s"; + String fullURL = String.format(path, this.endpoint, this.threshID, this.date); + String content = getResource(fullURL); + this.data.put(ApiResource.THRESHOLDS, getJsonData(content, false)); + } + + /** + * Retrieves the topology endpoint content and stores it to the enum map + * + * @throws ClientProtocolException + * @throws IOException + * @throws KeyStoreException + * @throws NoSuchAlgorithmException + * @throws KeyManagementException + */ + public void getRemoteTopoEndpoints() { + String path = "https://%s/api/v2/topology/endpoints/by_report/%s?date=%s"; + String fullURL = String.format(path, this.endpoint, this.reportName, this.date); + String content = getResource(fullURL); + this.data.put(ApiResource.TOPOENDPOINTS, getJsonData(content, true)); + } + + /** + * Retrieves the topology groups content and stores it to the enum map + * + * @throws ClientProtocolException + * @throws IOException + * @throws KeyStoreException + * @throws NoSuchAlgorithmException + * @throws KeyManagementException + */ + public void getRemoteTopoGroups() { + String path = "https://%s/api/v2/topology/groups/by_report/%s?date=%s"; + String fullURL = String.format(path, this.endpoint, this.reportName, this.date); + String content = getResource(fullURL); + this.data.put(ApiResource.TOPOGROUPS, getJsonData(content, true)); + } + + /** + * Retrieves the weights content and stores it to the enum map + * + * @throws ClientProtocolException + * @throws IOException + * @throws KeyStoreException + * @throws NoSuchAlgorithmException + * @throws KeyManagementException + */ + public void getRemoteWeights() { + String path = "https://%s/api/v2/weights/%s?date=%s"; + String fullURL = String.format(path, this.endpoint, this.weightsID, this.date); + String content = getResource(fullURL); + this.data.put(ApiResource.WEIGHTS, getJsonData(content, false)); + } + + /** + * Retrieves the downtimes content and stores it to the enum map + * + * @throws ClientProtocolException + * @throws IOException + * @throws KeyStoreException + * @throws NoSuchAlgorithmException + * @throws KeyManagementException + */ + public void getRemoteDowntimes() { + String path = "https://%s/api/v2/downtimes?date=%s"; + String fullURL = String.format(path, this.endpoint, this.date); + String content = getResource(fullURL); + this.data.put(ApiResource.DOWNTIMES, getJsonData(content, false)); + } + + public void getRemoteRecomputations() { + String path = "https://%s/api/v2/recomputations?date=%s"; + String fullURL = String.format(path, this.endpoint, this.date); + String content = getResource(fullURL); + this.data.put(ApiResource.RECOMPUTATIONS, getJsonData(content, true)); + } + + /** + * Returns local resource (after has been retrieved) content based on resource type + * + * @param res + * @return The extracted items JSON value as string + */ + public String getResourceJSON(ApiResource res) { + return this.data.get(res); + } + + /** + * Exectues all steps to retrieve the complete amount of the available profile, + * topology, weights and downtime information from argo-web-api + * + * @throws ClientProtocolException + * @throws IOException + * @throws KeyStoreException + * @throws NoSuchAlgorithmException + * @throws KeyManagementException + */ + public void getRemoteAll() { + // Start with report and configuration + this.getRemoteConfig(); + // parse remote report config to be able to get the other profiles + this.parseReport(); + // Go on to the profiles + this.getRemoteMetric(); + this.getRemoteOps(); + this.getRemoteAggregation(); + if (!this.threshID.equals("")) this.getRemoteThresholds(); + // Go to topology + this.getRemoteTopoEndpoints(); + this.getRemoteTopoGroups(); + // get weights + if (!this.weightsID.equals("")) this.getRemoteWeights(); + // get downtimes + this.getRemoteDowntimes(); + // get recomptations + this.getRemoteRecomputations(); + + } + + /** + * Parses the report content to extract the report's name and the various profile IDs + */ + public void parseReport() { + // check if report configuration has been retrieved + if (!this.data.containsKey(ApiResource.CONFIG)) + return; + + String content = this.data.get(ApiResource.CONFIG); + JsonParser jsonParser = new JsonParser(); + JsonElement jElement = jsonParser.parse(content); + JsonObject jRoot = jElement.getAsJsonObject(); + JsonArray jProfiles = jRoot.get("profiles").getAsJsonArray(); + + JsonObject jInfo = jRoot.get("info").getAsJsonObject(); + this.reportName = jInfo.get("name").getAsString(); + + // for each profile iterate and store it's id in profile manager for later + // reference + for (int i = 0; i < jProfiles.size(); i++) { + JsonObject jProf = jProfiles.get(i).getAsJsonObject(); + String profType = jProf.get("type").getAsString(); + String profID = jProf.get("id").getAsString(); + if (profType.equalsIgnoreCase("metric")) { + this.metricID = profID; + } else if (profType.equalsIgnoreCase("aggregation")) { + this.aggregationID = profID; + } else if (profType.equalsIgnoreCase("operations")) { + this.opsID = profID; + } else if (profType.equalsIgnoreCase("thresholds")) { + this.threshID = profID; + } + + } + + } + + /** + * Parses the Downtime content retrieved from argo-web-api and provides a list of Downtime avro objects + * to be used in the next steps of the pipeline + */ + public Downtime[] getListDowntimes() { + List results = new ArrayList(); + if (!this.data.containsKey(ApiResource.DOWNTIMES)) { + Downtime[] rArr = new Downtime[results.size()]; + rArr = results.toArray(rArr); + } + + + String content = this.data.get(ApiResource.DOWNTIMES); + JsonParser jsonParser = new JsonParser(); + JsonElement jElement = jsonParser.parse(content); + JsonObject jRoot = jElement.getAsJsonObject(); + JsonArray jElements = jRoot.get("endpoints").getAsJsonArray(); + for (int i = 0; i < jElements.size(); i++) { + JsonObject jItem= jElements.get(i).getAsJsonObject(); + String hostname = jItem.get("hostname").getAsString(); + String service = jItem.get("service").getAsString(); + String startTime = jItem.get("start_time").getAsString(); + String endTime = jItem.get("end_time").getAsString(); + + Downtime d = new Downtime(hostname,service,startTime,endTime); + results.add(d); + } + + Downtime[] rArr = new Downtime[results.size()]; + rArr = results.toArray(rArr); + return rArr; + } + + /** + * Parses the Topology endpoint content retrieved from argo-web-api and provides a list of GroupEndpoint avro objects + * to be used in the next steps of the pipeline + */ + public GroupEndpoint[] getListGroupEndpoints() { + List results = new ArrayList(); + if (!this.data.containsKey(ApiResource.TOPOENDPOINTS)) { + GroupEndpoint[] rArr = new GroupEndpoint[results.size()]; + rArr = results.toArray(rArr); + return rArr; + } + + + String content = this.data.get(ApiResource.TOPOENDPOINTS); + JsonParser jsonParser = new JsonParser(); + JsonElement jElement = jsonParser.parse(content); + JsonArray jRoot = jElement.getAsJsonArray(); + for (int i = 0; i < jRoot.size(); i++) { + JsonObject jItem= jRoot.get(i).getAsJsonObject(); + String group = jItem.get("group").getAsString(); + String gType = jItem.get("type").getAsString(); + String service = jItem.get("service").getAsString(); + String hostname = jItem.get("hostname").getAsString(); + JsonObject jTags = jItem.get("tags").getAsJsonObject(); + Map tags = new HashMap(); + for ( Entry kv : jTags.entrySet()) { + tags.put(kv.getKey(), kv.getValue().getAsString()); + } + GroupEndpoint ge = new GroupEndpoint(gType,group,service,hostname,tags); + results.add(ge); + } + + GroupEndpoint[] rArr = new GroupEndpoint[results.size()]; + rArr = results.toArray(rArr); + return rArr; + } + + /** + * Parses the Topology Groups content retrieved from argo-web-api and provides a list of GroupGroup avro objects + * to be used in the next steps of the pipeline + */ + public GroupGroup[] getListGroupGroups() { + List results = new ArrayList(); + if (!this.data.containsKey(ApiResource.TOPOGROUPS)){ + GroupGroup[] rArr = new GroupGroup[results.size()]; + rArr = results.toArray(rArr); + return rArr; + } + + String content = this.data.get(ApiResource.TOPOGROUPS); + JsonParser jsonParser = new JsonParser(); + JsonElement jElement = jsonParser.parse(content); + JsonArray jRoot = jElement.getAsJsonArray(); + for (int i = 0; i < jRoot.size(); i++) { + JsonObject jItem= jRoot.get(i).getAsJsonObject(); + String group = jItem.get("group").getAsString(); + String gType = jItem.get("type").getAsString(); + String subgroup = jItem.get("subgroup").getAsString(); + JsonObject jTags = jItem.get("tags").getAsJsonObject(); + Map tags = new HashMap(); + for ( Entry kv : jTags.entrySet()) { + tags.put(kv.getKey(), kv.getValue().getAsString()); + } + GroupGroup gg = new GroupGroup(gType,group,subgroup,tags); + results.add(gg); + } + + GroupGroup[] rArr = new GroupGroup[results.size()]; + rArr = results.toArray(rArr); + return rArr; + } + + /** + * Parses the Weights content retrieved from argo-web-api and provides a list of Weights avro objects + * to be used in the next steps of the pipeline + */ + public Weight[] getListWeights() { + List results = new ArrayList(); + if (!this.data.containsKey(ApiResource.WEIGHTS)) { + Weight[] rArr = new Weight[results.size()]; + rArr = results.toArray(rArr); + return rArr; + } + + + String content = this.data.get(ApiResource.WEIGHTS); + JsonParser jsonParser = new JsonParser(); + JsonElement jElement = jsonParser.parse(content); + JsonObject jRoot = jElement.getAsJsonObject(); + String wType = jRoot.get("weight_type").getAsString(); + JsonArray jElements = jRoot.get("groups").getAsJsonArray(); + for (int i = 0; i < jElements.size(); i++) { + JsonObject jItem= jElements.get(i).getAsJsonObject(); + String group = jItem.get("name").getAsString(); + String weight = jItem.get("value").getAsString(); + + Weight w = new Weight(wType,group,weight); + results.add(w); + } + + Weight[] rArr = new Weight[results.size()]; + rArr = results.toArray(rArr); + return rArr; + } + + /** + * Parses the Metric profile content retrieved from argo-web-api and provides a list of MetricProfile avro objects + * to be used in the next steps of the pipeline + */ + public MetricProfile[] getListMetrics() { + List results = new ArrayList(); + if (!this.data.containsKey(ApiResource.METRIC)) { + MetricProfile[] rArr = new MetricProfile[results.size()]; + rArr = results.toArray(rArr); + return rArr; + } + + + String content = this.data.get(ApiResource.METRIC); + JsonParser jsonParser = new JsonParser(); + JsonElement jElement = jsonParser.parse(content); + JsonObject jRoot = jElement.getAsJsonObject(); + String profileName = jRoot.get("name").getAsString(); + JsonArray jElements = jRoot.get("services").getAsJsonArray(); + for (int i = 0; i < jElements.size(); i++) { + JsonObject jItem= jElements.get(i).getAsJsonObject(); + String service = jItem.get("service").getAsString(); + JsonArray jMetrics = jItem.get("metrics").getAsJsonArray(); + for (int j=0; j < jMetrics.size(); j++) { + String metric = jMetrics.get(j).getAsString(); + + Map tags = new HashMap(); + MetricProfile mp = new MetricProfile(profileName,service,metric,tags); + results.add(mp); + } + + } + + MetricProfile[] rArr = new MetricProfile[results.size()]; + rArr = results.toArray(rArr); + return rArr; + } + + /** + * Extract first JSON item from data JSON array in api response + * + * @param content JSON content of the full repsonse (status + data) + * @return First available item in data array as JSON string representation + * + */ + private String getJsonData(String content, boolean asArray) { + JsonParser jsonParser = new JsonParser(); + // Grab the first - and only line of json from ops data + JsonElement jElement = jsonParser.parse(content); + JsonObject jRoot = jElement.getAsJsonObject(); + // Get the data array and the first item + if (asArray) { + return jRoot.get("data").toString(); + } + JsonArray jData = jRoot.get("data").getAsJsonArray(); + JsonElement jItem = jData.get(0); + return jItem.toString(); + } + +} diff --git a/flink_jobs/batch_ar/src/main/java/argo/batch/ArgoArBatch.java b/flink_jobs/batch_ar/src/main/java/argo/batch/ArgoArBatch.java index 053c06d7..e6bf2b85 100644 --- a/flink_jobs/batch_ar/src/main/java/argo/batch/ArgoArBatch.java +++ b/flink_jobs/batch_ar/src/main/java/argo/batch/ArgoArBatch.java @@ -2,6 +2,8 @@ import org.slf4j.LoggerFactory; +import argo.amr.ApiResource; +import argo.amr.ApiResourceManager; import argo.avro.Downtime; import argo.avro.GroupEndpoint; import argo.avro.GroupGroup; @@ -64,21 +66,48 @@ public static void main(String[] args) throws Exception { env.getConfig().setGlobalJobParameters(params); env.setParallelism(1); // sync data for input + + + String apiEndpoint = params.getRequired("api.endpoint"); + String apiToken = params.getRequired("api.token"); + String reportID = params.getRequired("report.id"); + + ApiResourceManager amr = new ApiResourceManager(apiEndpoint,apiToken); + + // fetch + + // set params + if (params.has("api.proxy")) { + amr.setProxy(params.get("api.proxy")); + } + + amr.setReportID(reportID); + amr.getRemoteAll(); + + - Path mps = new Path(params.getRequired("mps")); - Path egp = new Path(params.getRequired("egp")); - Path ggp = new Path(params.getRequired("ggp")); - Path down = new Path(params.getRequired("downtimes")); - Path weight = new Path(params.getRequired("weights")); +// Path mps = new Path(params.getRequired("mps")); +// Path egp = new Path(params.getRequired("egp")); +// Path ggp = new Path(params.getRequired("ggp")); +// Path down = new Path(params.getRequired("downtimes")); +// Path weight = new Path(params.getRequired("weights")); - DataSource confDS = env.readTextFile(params.getRequired("conf")); - DataSource opsDS = env.readTextFile(params.getRequired("ops")); - DataSource aprDS = env.readTextFile(params.getRequired("apr")); - DataSource recDS = env.readTextFile(params.getRequired("rec")); + //DataSource confDS = env.readTextFile(params.getRequired("conf")); +// DataSource opsDS = env.readTextFile(params.getRequired("ops")); +// DataSource aprDS = env.readTextFile(params.getRequired("apr")); +// DataSource recDS = env.readTextFile(params.getRequired("rec")); + + + DataSourceconfDS = env.fromElements(amr.getResourceJSON(ApiResource.CONFIG)); + DataSourceopsDS = env.fromElements(amr.getResourceJSON(ApiResource.OPS)); + DataSourceaprDS = env.fromElements(amr.getResourceJSON(ApiResource.AGGREGATION)); + DataSourcerecDS = env.fromElements(amr.getResourceJSON(ApiResource.RECOMPUTATIONS)); + // begin with empty threshold datasource DataSource thrDS = env.fromElements(""); + // if threshold filepath has been defined in cli parameters if (params.has("thr")){ // read file and update threshold datasource @@ -86,29 +115,27 @@ public static void main(String[] args) throws Exception { } + DataSet downDS = env.fromElements(new Downtime()); + DataSet weightDS = env.fromElements(new Weight()); + DataSet ggpDS = env.fromElements(new GroupGroup()); ConfigManager confMgr = new ConfigManager(); confMgr.loadJsonString(confDS.collect()); - // sync data input: metric profile in avro format - AvroInputFormat mpsAvro = new AvroInputFormat(mps, MetricProfile.class); - DataSet mpsDS = env.createInput(mpsAvro); - - // sync data input: endpoint group topology data in avro format - AvroInputFormat egpAvro = new AvroInputFormat(egp, GroupEndpoint.class); - DataSet egpDS = env.createInput(egpAvro); - - // sync data input: group of group topology data in avro format - AvroInputFormat ggpAvro = new AvroInputFormat(ggp, GroupGroup.class); - DataSet ggpDS = env.createInput(ggpAvro); - - // sync data input: downtime data in avro format - AvroInputFormat downAvro = new AvroInputFormat(down, Downtime.class); - DataSet downDS = env.createInput(downAvro); + // Get the sync datasets directly from the web-api data + DataSet mpsDS = env.fromElements(amr.getListMetrics()); + DataSet egpDS = env.fromElements(amr.getListGroupEndpoints()); + + + Downtime[] listDowntimes = amr.getListDowntimes(); + Weight[] listWeights = amr.getListWeights(); + GroupGroup[] listGroups = amr.getListGroupGroups(); + + if (listDowntimes.length > 0) downDS = env.fromElements(amr.getListDowntimes()); + if (listWeights.length > 0) weightDS = env.fromElements(amr.getListWeights()); + if (listGroups.length > 0) ggpDS = env.fromElements(amr.getListGroupGroups()); + - // sync data input: weight data in avro format - AvroInputFormat weightAvro = new AvroInputFormat(weight, Weight.class); - DataSet weightDS = env.createInput(weightAvro); // todays metric data Path in = new Path(params.getRequired("mdata")); diff --git a/flink_jobs/batch_ar/src/main/java/argo/batch/CalcEndpointAR.java b/flink_jobs/batch_ar/src/main/java/argo/batch/CalcEndpointAR.java index 26622c0b..56b70527 100644 --- a/flink_jobs/batch_ar/src/main/java/argo/batch/CalcEndpointAR.java +++ b/flink_jobs/batch_ar/src/main/java/argo/batch/CalcEndpointAR.java @@ -135,8 +135,9 @@ public void flatMap(MonTimeline mtl, Collector out) throws Exception dAR.calculateAR(mtl.getTimeline(),this.opsMgr); int runDateInt = Integer.parseInt(this.runDate.replace("-", "")); - - EndpointAR result = new EndpointAR(runDateInt,this.report,mtl.getHostname(),mtl.getService(),mtl.getGroup(),dAR.availability,dAR.reliability,dAR.up_f,dAR.unknown_f,dAR.down_f); + String groupType = this.confMgr.egroup; + String info = this.egpMgr.getInfo(mtl.getGroup(),groupType, mtl.getHostname(), mtl.getService()); + EndpointAR result = new EndpointAR(runDateInt,this.report,mtl.getHostname(),mtl.getService(),mtl.getGroup(),dAR.availability,dAR.reliability,dAR.up_f,dAR.unknown_f,dAR.down_f,info); out.collect(result); diff --git a/flink_jobs/batch_ar/src/main/java/argo/batch/EndpointAR.java b/flink_jobs/batch_ar/src/main/java/argo/batch/EndpointAR.java index 928c1e31..c173ddf4 100644 --- a/flink_jobs/batch_ar/src/main/java/argo/batch/EndpointAR.java +++ b/flink_jobs/batch_ar/src/main/java/argo/batch/EndpointAR.java @@ -12,8 +12,9 @@ public class EndpointAR { private double up; private double unknown; private double down; + private String info; - public EndpointAR(int _dateInt, String _report, String _name, String _service, String _group, double _a, double _r, double _up, double _unknown, double _down){ + public EndpointAR(int _dateInt, String _report, String _name, String _service, String _group, double _a, double _r, double _up, double _unknown, double _down, String _info){ this.dateInt = _dateInt; this.report=_report; this.name = _name; @@ -24,6 +25,8 @@ public EndpointAR(int _dateInt, String _report, String _name, String _service, S this.up = _up; this.unknown = _unknown; this.down = _down; + this.info = _info; + } @@ -92,6 +95,14 @@ public void setDown(double down) { this.down = down; } + public void setInfo(String info) { + this.info = info; + } + + public String getInfo() { + return this.info; + } + public String toString() { return "(" + this.dateInt+ "," + this.report + "," + this.name + "," + this.service + "," + this.group + "," + this.a + "," + this.r + "," + this.up + "," + this.unknown + "," + this.down + ")"; } diff --git a/flink_jobs/batch_ar/src/main/java/argo/batch/MongoEndpointArOutput.java b/flink_jobs/batch_ar/src/main/java/argo/batch/MongoEndpointArOutput.java index 8c67bb1f..961f3f03 100644 --- a/flink_jobs/batch_ar/src/main/java/argo/batch/MongoEndpointArOutput.java +++ b/flink_jobs/batch_ar/src/main/java/argo/batch/MongoEndpointArOutput.java @@ -86,12 +86,29 @@ public void open(int taskNumber, int numTasks) throws IOException { */ @Override public void writeRecord(EndpointAR record) throws IOException { - + + + String info = record.getInfo(); + // create document from record Document doc = new Document("report", record.getReport()).append("date", record.getDateInt()) .append("name", record.getName()).append("service", record.getService()).append("supergroup", record.getGroup()) .append("availability", record.getA()).append("reliability", record.getR()).append("up", record.getUp()) .append("unknown", record.getUnknown()).append("down", record.getDown()); + + if (!info.equalsIgnoreCase("")) { + Document infoDoc = new Document(); + String[] kvs = info.split(","); + for (String kv : kvs) { + String[] kvtok = kv.split(":",2); + if (kvtok.length == 2){ + infoDoc.append(kvtok[0], kvtok[1]); + } + } + + doc.append("info", infoDoc); + + } if (this.method == MongoMethod.UPSERT) { Bson f = Filters.and(Filters.eq("report", record.getReport()), Filters.eq("date", record.getDateInt()), diff --git a/flink_jobs/batch_ar/src/main/java/sync/DowntimeManager.java b/flink_jobs/batch_ar/src/main/java/sync/DowntimeManager.java index 2034458d..abe70698 100644 --- a/flink_jobs/batch_ar/src/main/java/sync/DowntimeManager.java +++ b/flink_jobs/batch_ar/src/main/java/sync/DowntimeManager.java @@ -146,8 +146,7 @@ public void loadAvro(File avroFile) throws IOException { String service = avroRow.get("service").toString(); String startTime = avroRow.get("start_time").toString(); String endTime = avroRow.get("end_time").toString(); - - // Insert data to list + // insert data to list this.insert(hostname, service, startTime, endTime); } // end of avro rows @@ -179,7 +178,7 @@ public void loadFromList( List dnt) { String startTime = item.getStartTime(); String endTime = item.getEndTime(); // Insert data to list - this.insert(hostname,service,startTime,endTime); + if (hostname != null) this.insert(hostname,service,startTime,endTime); } diff --git a/flink_jobs/batch_ar/src/main/java/sync/EndpointGroupManager.java b/flink_jobs/batch_ar/src/main/java/sync/EndpointGroupManager.java index 87bf6b0d..789bfc33 100644 --- a/flink_jobs/batch_ar/src/main/java/sync/EndpointGroupManager.java +++ b/flink_jobs/batch_ar/src/main/java/sync/EndpointGroupManager.java @@ -5,7 +5,7 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.List; - +import java.util.Map; import java.util.TreeMap; import java.util.Map.Entry; @@ -49,6 +49,29 @@ public EndpointItem(String type, String group, String service, String hostname, } } + + public String getInfo(String group, String type, String hostname, String service) { + String info = ""; + boolean first = true; + HashMap tags = this.getGroupTags(group, type, hostname, service); + if (tags == null) return info; + for (String tName : tags.keySet()) { + if (tName.startsWith("info.")) { + String infoName = tName.replaceFirst("info.", ""); + + String value = tags.get(tName); + if (!value.equalsIgnoreCase("")) { + if (!first) { + info = info + ","; + } else { + first = false; + } + info = info + infoName+":"+tags.get(tName); + } + } + } + return info; + } public EndpointGroupManager() { this.list = new ArrayList(); @@ -86,10 +109,10 @@ public ArrayList getGroup(String type, String hostname, String service) return results; } - public HashMap getGroupTags(String type, String hostname, String service) { + public HashMap getGroupTags(String group, String type, String hostname, String service) { for (EndpointItem item : fList) { - if (item.type.equals(type) && item.hostname.equals(hostname) && item.service.equals(service)) { + if (item.group.equals(group) && item.type.equals(type) && item.hostname.equals(hostname) && item.service.equals(service)) { return item.tags; } } diff --git a/flink_jobs/batch_ar/src/main/java/sync/GroupGroupManager.java b/flink_jobs/batch_ar/src/main/java/sync/GroupGroupManager.java index 74bb2378..37985c80 100644 --- a/flink_jobs/batch_ar/src/main/java/sync/GroupGroupManager.java +++ b/flink_jobs/batch_ar/src/main/java/sync/GroupGroupManager.java @@ -217,7 +217,7 @@ public void loadFromList( List ggp) { } // Insert data to list - this.insert(type, group, subgroup, tagMap); + if (type != null) this.insert(type, group, subgroup, tagMap); } this.unfilter(); diff --git a/flink_jobs/batch_ar/src/main/java/sync/WeightManager.java b/flink_jobs/batch_ar/src/main/java/sync/WeightManager.java index c8cb72c7..158565cc 100644 --- a/flink_jobs/batch_ar/src/main/java/sync/WeightManager.java +++ b/flink_jobs/batch_ar/src/main/java/sync/WeightManager.java @@ -140,7 +140,7 @@ public int loadAvro(File avroFile) throws IOException { String group = avroRow.get("site").toString(); String weight = avroRow.get("weight").toString(); - // Insert data to list + // Insert data to list this.insert(type, group, weight); } // end of avro rows @@ -170,8 +170,8 @@ public void loadFromList( List wg) { String type = item.getType(); String group = item.getSite(); String weight = item.getWeight(); - // Insert data to list - this.insert(type, group, weight); + // Insert data to list -- ignore empty placeholder items + if (type != null) this.insert(type, group, weight); } diff --git a/flink_jobs/batch_ar/src/main/resources/amr/agg_profile.json b/flink_jobs/batch_ar/src/main/resources/amr/agg_profile.json new file mode 100644 index 00000000..66f9474d --- /dev/null +++ b/flink_jobs/batch_ar/src/main/resources/amr/agg_profile.json @@ -0,0 +1,33 @@ +{ + "status": { + "message": "Success", + "code": "200" + }, + "data": [ + { + "id": "2744247f-40f8-4dd6-b22c-76a3b38334d8", + "date": "2020-06-24", + "name": "test-agg2", + "namespace": "", + "endpoint_group": "servicegroups", + "metric_operation": "AND", + "profile_operation": "AND", + "metric_profile": { + "name": "test-mon", + "id": "92fa5d74-015c-4122-b8b9-7b344f3154d4" + }, + "groups": [ + { + "name": "webportal", + "operation": "AND", + "services": [ + { + "name": "WebPortal", + "operation": "OR" + } + ] + } + ] + } + ] +} diff --git a/flink_jobs/batch_ar/src/main/resources/amr/data_AGGREGATION.json b/flink_jobs/batch_ar/src/main/resources/amr/data_AGGREGATION.json new file mode 100644 index 00000000..f0f40f2c --- /dev/null +++ b/flink_jobs/batch_ar/src/main/resources/amr/data_AGGREGATION.json @@ -0,0 +1 @@ +{"id":"2744247f-40f8-4dd6-b22c-76a3b38334d8","date":"2020-06-24","name":"test-agg2","namespace":"","endpoint_group":"servicegroups","metric_operation":"AND","profile_operation":"AND","metric_profile":{"name":"test-mon","id":"92fa5d74-015c-4122-b8b9-7b344f3154d4"},"groups":[{"name":"webportal","operation":"AND","services":[{"name":"WebPortal","operation":"OR"}]}]} diff --git a/flink_jobs/batch_ar/src/main/resources/amr/data_CONFIG.json b/flink_jobs/batch_ar/src/main/resources/amr/data_CONFIG.json new file mode 100644 index 00000000..8220787f --- /dev/null +++ b/flink_jobs/batch_ar/src/main/resources/amr/data_CONFIG.json @@ -0,0 +1 @@ +{"id":"f29eeb59-ab38-4aa0-b372-5d3c0709dfb2","tenant":"demo","disabled":false,"info":{"name":"Critical","description":"test report","created":"2020-09-24 12:05:04","updated":"2020-10-08 09:32:46"},"thresholds":{"availability":80,"reliability":85,"uptime":0.8,"unknown":0.1,"downtime":0.1},"topology_schema":{"group":{"type":"PROJECT","group":{"type":"SERVICEGROUPS"}}},"profiles":[{"id":"92fa5d74-015c-4122-b8b9-7b344f3154d4","name":"test-mon","type":"metric"},{"id":"2744247f-40f8-4dd6-b22c-76a3b38334d8","name":"test-agg2","type":"aggregation"},{"id":"ea62ff1e-c6e1-438b-83c7-9262b3a4f179","name":"demo_ops","type":"operations"},{"id":"3345c3c1-322a-47f1-982c-1d9df1fc065e","name":"endpoint_example","type":"thresholds"}],"filter_tags":[]} diff --git a/flink_jobs/batch_ar/src/main/resources/amr/data_DOWNTIMES.json b/flink_jobs/batch_ar/src/main/resources/amr/data_DOWNTIMES.json new file mode 100644 index 00000000..b7d181aa --- /dev/null +++ b/flink_jobs/batch_ar/src/main/resources/amr/data_DOWNTIMES.json @@ -0,0 +1 @@ +{"date":"2020-11-10","endpoints":[{"hostname":"hostA.foo","service":"WebPortal","start_time":"2020-11-10T00:00:00Z","end_time":"2020-11-10T23:59:00Z"},{"hostname":"hostB.foo","service":"WebPortal","start_time":"2020-11-10T00:00:00Z","end_time":"2020-11-10T23:59:00Z"},{"hostname":"hostB.foo","service":"WebPortald","start_time":"2020-11-10T00:00:00Z","end_time":"2020-11-10T23:59:00Z"}]} \ No newline at end of file diff --git a/flink_jobs/batch_ar/src/main/resources/amr/data_METRIC.json b/flink_jobs/batch_ar/src/main/resources/amr/data_METRIC.json new file mode 100644 index 00000000..b4681fcb --- /dev/null +++ b/flink_jobs/batch_ar/src/main/resources/amr/data_METRIC.json @@ -0,0 +1 @@ +{"id":"392fa5d74-015c-4122-b8b9-7b344f3154d4","date":"2020-09-24","name":"test-mon","description":"Generic monitoring profile","services":[{"service":"WebPortal","metrics":["org.nagios.WebCheck"]}]} diff --git a/flink_jobs/batch_ar/src/main/resources/amr/data_OPS.json b/flink_jobs/batch_ar/src/main/resources/amr/data_OPS.json new file mode 100644 index 00000000..ff505f0a --- /dev/null +++ b/flink_jobs/batch_ar/src/main/resources/amr/data_OPS.json @@ -0,0 +1 @@ +{"id":"ea62ff1e-c6e1-438b-83c7-9262b3a4f179","date":"2020-06-24","name":"demo_ops","available_states":["OK","WARNING","UNKNOWN","MISSING","CRITICAL","DOWNTIME"],"defaults":{"down":"DOWNTIME","missing":"MISSING","unknown":"UNKNOWN"},"operations":[{"name":"AND","truth_table":[{"a":"OK","b":"OK","x":"OK"},{"a":"OK","b":"WARNING","x":"WARNING"},{"a":"OK","b":"UNKNOWN","x":"UNKNOWN"},{"a":"OK","b":"MISSING","x":"MISSING"},{"a":"OK","b":"CRITICAL","x":"CRITICAL"},{"a":"OK","b":"DOWNTIME","x":"DOWNTIME"},{"a":"WARNING","b":"WARNING","x":"WARNING"},{"a":"WARNING","b":"UNKNOWN","x":"UNKNOWN"},{"a":"WARNING","b":"MISSING","x":"MISSING"},{"a":"WARNING","b":"CRITICAL","x":"CRITICAL"},{"a":"WARNING","b":"DOWNTIME","x":"DOWNTIME"},{"a":"UNKNOWN","b":"UNKNOWN","x":"UNKNOWN"},{"a":"UNKNOWN","b":"MISSING","x":"MISSING"},{"a":"UNKNOWN","b":"CRITICAL","x":"CRITICAL"},{"a":"UNKNOWN","b":"DOWNTIME","x":"DOWNTIME"},{"a":"MISSING","b":"MISSING","x":"MISSING"},{"a":"MISSING","b":"CRITICAL","x":"CRITICAL"},{"a":"MISSING","b":"DOWNTIME","x":"DOWNTIME"},{"a":"CRITICAL","b":"CRITICAL","x":"CRITICAL"},{"a":"CRITICAL","b":"DOWNTIME","x":"CRITICAL"},{"a":"DOWNTIME","b":"DOWNTIME","x":"DOWNTIME"}]},{"name":"OR","truth_table":[{"a":"OK","b":"OK","x":"OK"},{"a":"OK","b":"WARNING","x":"OK"},{"a":"OK","b":"UNKNOWN","x":"OK"},{"a":"OK","b":"MISSING","x":"OK"},{"a":"OK","b":"CRITICAL","x":"OK"},{"a":"OK","b":"DOWNTIME","x":"OK"},{"a":"WARNING","b":"WARNING","x":"WARNING"},{"a":"WARNING","b":"UNKNOWN","x":"WARNING"},{"a":"WARNING","b":"MISSING","x":"WARNING"},{"a":"WARNING","b":"CRITICAL","x":"WARNING"},{"a":"WARNING","b":"DOWNTIME","x":"WARNING"},{"a":"UNKNOWN","b":"UNKNOWN","x":"UNKNOWN"},{"a":"UNKNOWN","b":"MISSING","x":"UNKNOWN"},{"a":"UNKNOWN","b":"CRITICAL","x":"CRITICAL"},{"a":"UNKNOWN","b":"DOWNTIME","x":"UNKNOWN"},{"a":"MISSING","b":"MISSING","x":"MISSING"},{"a":"MISSING","b":"CRITICAL","x":"CRITICAL"},{"a":"MISSING","b":"DOWNTIME","x":"DOWNTIME"},{"a":"CRITICAL","b":"CRITICAL","x":"CRITICAL"},{"a":"CRITICAL","b":"DOWNTIME","x":"CRITICAL"},{"a":"DOWNTIME","b":"DOWNTIME","x":"DOWNTIME"}]}]} diff --git a/flink_jobs/batch_ar/src/main/resources/amr/data_RECOMPUTATIONS.json b/flink_jobs/batch_ar/src/main/resources/amr/data_RECOMPUTATIONS.json new file mode 100644 index 00000000..052b03aa --- /dev/null +++ b/flink_jobs/batch_ar/src/main/resources/amr/data_RECOMPUTATIONS.json @@ -0,0 +1 @@ +[{"id":"56db4f1a-f331-46ca-b0fd-4555b4aa1cfc","requester_name":"john foo","requester_email":"foo1@email.com","reason":"ggus-reason01","start_time":"2018-01-21T23:01:00Z","end_time":"2018-01-23T12:01:00Z","report":"Critical","exclude":["SITE-1","SITE-2"],"status":"done","timestamp":"2018-03-17 17:03:55","history":[{"status":"pending","timestamp":"2018-01-30T11:41:26Z"}]},{"id":"66db4f55-f331-46ca-b0fd-4555b4aa1cfc","requester_name":"john foo","requester_email":"foo1@email.com","reason":"ggus-reason01","start_time":"2018-05-21T23:01:00Z","end_time":"2018-05-23T12:01:00Z","report":"Critical","exclude":["SITE-3","SITE-4"],"status":"done","timestamp":"2018-06-17 17:03:55","history":[{"status":"pending","timestamp":"2018-06-30T11:41:26Z"}]},{"id":"76db4444-f331-46ca-b0fd-4555b4aa1cfc","requester_name":"john foo","requester_email":"foo1@email.com","reason":"ggus-reason01","start_time":"2018-09-10T23:01:00Z","end_time":"2018-09-15T12:01:00Z","report":"Critical","exclude":["SITE-6","SITE-7","SITE-8"],"status":"done","timestamp":"2018-03-17 17:03:55","history":[{"status":"pending","timestamp":"2018-01-30T11:41:26Z"}]}] diff --git a/flink_jobs/batch_ar/src/main/resources/amr/data_THRESHOLDS.json b/flink_jobs/batch_ar/src/main/resources/amr/data_THRESHOLDS.json new file mode 100644 index 00000000..453e5bdf --- /dev/null +++ b/flink_jobs/batch_ar/src/main/resources/amr/data_THRESHOLDS.json @@ -0,0 +1 @@ +{"id":"3345c3c1-322a-47f1-982c-1d9df1fc065e","date":"2015-01-01","name":"endpoint_example","rules":[{"host":"host1.foo.bar","metric":"service.freshness","thresholds":"freshness=1s;;0:;"}]} diff --git a/flink_jobs/batch_ar/src/main/resources/amr/data_TOPOENDPOINTS.json b/flink_jobs/batch_ar/src/main/resources/amr/data_TOPOENDPOINTS.json new file mode 100644 index 00000000..10dd42cf --- /dev/null +++ b/flink_jobs/batch_ar/src/main/resources/amr/data_TOPOENDPOINTS.json @@ -0,0 +1 @@ +[{"date":"2020-11-10","group":"groupA","type":"SERVICEGROUPS","service":"webPortal","hostname":"host1.foo.bar","tags":{"monitored":"1","production":"1","scope":"FOO"}},{"date":"2020-11-10","group":"groupB","type":"SERVICEGROUPS","service":"webPortal","hostname":"host3.foo.bar","tags":{"monitored":"1","production":"1","scope":"FOO"}},{"date":"2020-11-10","group":"groupA","type":"SERVICEGROUPS","service":"webPortal","hostname":"host2.foo.bar","tags":{"monitored":"1","production":"1","scope":"FOO"}}] diff --git a/flink_jobs/batch_ar/src/main/resources/amr/data_TOPOGROUPS.json b/flink_jobs/batch_ar/src/main/resources/amr/data_TOPOGROUPS.json new file mode 100644 index 00000000..1c8e4316 --- /dev/null +++ b/flink_jobs/batch_ar/src/main/resources/amr/data_TOPOGROUPS.json @@ -0,0 +1 @@ +[{"date":"2020-11-11","group":"ORG-A","type":"PROJECT","subgroup":"GROUP-101","tags":{"monitored":"0","scope":"Local"}},{"date":"2020-11-11","group":"ORG-A","type":"PROJECT","subgroup":"GROUP-202","tags":{"monitored":"1","scope":"Local"}}] diff --git a/flink_jobs/batch_ar/src/main/resources/amr/data_WEIGHTS.json b/flink_jobs/batch_ar/src/main/resources/amr/data_WEIGHTS.json new file mode 100644 index 00000000..399c31c1 --- /dev/null +++ b/flink_jobs/batch_ar/src/main/resources/amr/data_WEIGHTS.json @@ -0,0 +1 @@ +{"id":"3b9602ed-49ec-42f3-8df7-7c35331ebf69","date":"2020-09-02","name":"demo","weight_type":"computationpower","group_type":"SERVICEGROUPS","groups":[{"name":"GROUP-A","value":366},{"name":"GROUP-B","value":4000},{"name":"GROUP-C","value":19838},{"name":"GROUP-D","value":19838}]} diff --git a/flink_jobs/batch_ar/src/main/resources/amr/downtimes.json b/flink_jobs/batch_ar/src/main/resources/amr/downtimes.json new file mode 100644 index 00000000..7bf3adee --- /dev/null +++ b/flink_jobs/batch_ar/src/main/resources/amr/downtimes.json @@ -0,0 +1,31 @@ +{ + "status": { + "message": "Success", + "code": "200" + }, + "data": [ + { + "date": "2020-11-10", + "endpoints": [ + { + "hostname": "hostA.foo", + "service": "WebPortal", + "start_time": "2020-11-10T00:00:00Z", + "end_time": "2020-11-10T23:59:00Z" + }, + { + "hostname": "hostB.foo", + "service": "WebPortal", + "start_time": "2020-11-10T00:00:00Z", + "end_time": "2020-11-10T23:59:00Z" + }, + { + "hostname": "hostB.foo", + "service": "WebPortald", + "start_time": "2020-11-10T00:00:00Z", + "end_time": "2020-11-10T23:59:00Z" + } + ] + } + ] +} diff --git a/flink_jobs/batch_ar/src/main/resources/amr/metric_profile.json b/flink_jobs/batch_ar/src/main/resources/amr/metric_profile.json new file mode 100644 index 00000000..7ea5a470 --- /dev/null +++ b/flink_jobs/batch_ar/src/main/resources/amr/metric_profile.json @@ -0,0 +1,22 @@ +{ + "status": { + "message": "Success", + "code": "200" + }, + "data": [ + { + "id": "392fa5d74-015c-4122-b8b9-7b344f3154d4", + "date": "2020-09-24", + "name": "test-mon", + "description": "Generic monitoring profile", + "services": [ + { + "service": "WebPortal", + "metrics": [ + "org.nagios.WebCheck" + ] + } + ] + } + ] +} diff --git a/flink_jobs/batch_ar/src/main/resources/amr/ops_profile.json b/flink_jobs/batch_ar/src/main/resources/amr/ops_profile.json new file mode 100644 index 00000000..9b00f14b --- /dev/null +++ b/flink_jobs/batch_ar/src/main/resources/amr/ops_profile.json @@ -0,0 +1,248 @@ +{ + "status": { + "message": "Success", + "code": "200" + }, + "data": [ + { + "id": "ea62ff1e-c6e1-438b-83c7-9262b3a4f179", + "date": "2020-06-24", + "name": "demo_ops", + "available_states": [ + "OK", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME" + ], + "defaults": { + "down": "DOWNTIME", + "missing": "MISSING", + "unknown": "UNKNOWN" + }, + "operations": [ + { + "name": "AND", + "truth_table": [ + { + "a": "OK", + "b": "OK", + "x": "OK" + }, + { + "a": "OK", + "b": "WARNING", + "x": "WARNING" + }, + { + "a": "OK", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "OK", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "OK", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "OK", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "WARNING", + "b": "WARNING", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "WARNING", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "WARNING", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "WARNING", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "UNKNOWN", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "UNKNOWN", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "UNKNOWN", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "UNKNOWN", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "MISSING", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "MISSING", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "MISSING", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "CRITICAL", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "CRITICAL", + "b": "DOWNTIME", + "x": "CRITICAL" + }, + { + "a": "DOWNTIME", + "b": "DOWNTIME", + "x": "DOWNTIME" + } + ] + }, + { + "name": "OR", + "truth_table": [ + { + "a": "OK", + "b": "OK", + "x": "OK" + }, + { + "a": "OK", + "b": "WARNING", + "x": "OK" + }, + { + "a": "OK", + "b": "UNKNOWN", + "x": "OK" + }, + { + "a": "OK", + "b": "MISSING", + "x": "OK" + }, + { + "a": "OK", + "b": "CRITICAL", + "x": "OK" + }, + { + "a": "OK", + "b": "DOWNTIME", + "x": "OK" + }, + { + "a": "WARNING", + "b": "WARNING", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "UNKNOWN", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "MISSING", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "CRITICAL", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "DOWNTIME", + "x": "WARNING" + }, + { + "a": "UNKNOWN", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "UNKNOWN", + "b": "MISSING", + "x": "UNKNOWN" + }, + { + "a": "UNKNOWN", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "UNKNOWN", + "b": "DOWNTIME", + "x": "UNKNOWN" + }, + { + "a": "MISSING", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "MISSING", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "MISSING", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "CRITICAL", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "CRITICAL", + "b": "DOWNTIME", + "x": "CRITICAL" + }, + { + "a": "DOWNTIME", + "b": "DOWNTIME", + "x": "DOWNTIME" + } + ] + } + ] + } + ] +} diff --git a/flink_jobs/batch_ar/src/main/resources/amr/recomputations.json b/flink_jobs/batch_ar/src/main/resources/amr/recomputations.json new file mode 100644 index 00000000..b597ad09 --- /dev/null +++ b/flink_jobs/batch_ar/src/main/resources/amr/recomputations.json @@ -0,0 +1,72 @@ +{ + "status": { + "message": "Success", + "code": "200" + }, + "data": [ + { + "id": "56db4f1a-f331-46ca-b0fd-4555b4aa1cfc", + "requester_name": "john foo", + "requester_email": "foo1@email.com", + "reason": "ggus-reason01", + "start_time": "2018-01-21T23:01:00Z", + "end_time": "2018-01-23T12:01:00Z", + "report": "Critical", + "exclude": [ + "SITE-1", + "SITE-2" + ], + "status": "done", + "timestamp": "2018-03-17 17:03:55", + "history": [ + { + "status": "pending", + "timestamp": "2018-01-30T11:41:26Z" + } + ] + }, + { + "id": "66db4f55-f331-46ca-b0fd-4555b4aa1cfc", + "requester_name": "john foo", + "requester_email": "foo1@email.com", + "reason": "ggus-reason01", + "start_time": "2018-05-21T23:01:00Z", + "end_time": "2018-05-23T12:01:00Z", + "report": "Critical", + "exclude": [ + "SITE-3", + "SITE-4" + ], + "status": "done", + "timestamp": "2018-06-17 17:03:55", + "history": [ + { + "status": "pending", + "timestamp": "2018-06-30T11:41:26Z" + } + ] + }, + { + "id": "76db4444-f331-46ca-b0fd-4555b4aa1cfc", + "requester_name": "john foo", + "requester_email": "foo1@email.com", + "reason": "ggus-reason01", + "start_time": "2018-09-10T23:01:00Z", + "end_time": "2018-09-15T12:01:00Z", + "report": "Critical", + "exclude": [ + "SITE-6", + "SITE-7", + "SITE-8" + ], + "status": "done", + "timestamp": "2018-03-17 17:03:55", + "history": [ + { + "status": "pending", + "timestamp": "2018-01-30T11:41:26Z" + } + ] + } + ] +} diff --git a/flink_jobs/batch_ar/src/main/resources/amr/report.json b/flink_jobs/batch_ar/src/main/resources/amr/report.json new file mode 100644 index 00000000..fa5a5f65 --- /dev/null +++ b/flink_jobs/batch_ar/src/main/resources/amr/report.json @@ -0,0 +1,57 @@ +{ + "status": { + "message": "Success", + "code": "200" + }, + "data": [ + { + "id": "f29eeb59-ab38-4aa0-b372-5d3c0709dfb2", + "tenant": "demo", + "disabled": false, + "info": { + "name": "Critical", + "description": "test report", + "created": "2020-09-24 12:05:04", + "updated": "2020-10-08 09:32:46" + }, + "thresholds": { + "availability": 80, + "reliability": 85, + "uptime": 0.8, + "unknown": 0.1, + "downtime": 0.1 + }, + "topology_schema": { + "group": { + "type": "PROJECT", + "group": { + "type": "SERVICEGROUPS" + } + } + }, + "profiles": [ + { + "id": "92fa5d74-015c-4122-b8b9-7b344f3154d4", + "name": "test-mon", + "type": "metric" + }, + { + "id": "2744247f-40f8-4dd6-b22c-76a3b38334d8", + "name": "test-agg2", + "type": "aggregation" + }, + { + "id": "ea62ff1e-c6e1-438b-83c7-9262b3a4f179", + "name": "demo_ops", + "type": "operations" + }, + { + "id": "3345c3c1-322a-47f1-982c-1d9df1fc065e", + "name": "endpoint_example", + "type": "thresholds" + } + ], + "filter_tags": [] + } + ] +} diff --git a/flink_jobs/batch_ar/src/main/resources/amr/thresholds.json b/flink_jobs/batch_ar/src/main/resources/amr/thresholds.json new file mode 100644 index 00000000..1c1ac3fb --- /dev/null +++ b/flink_jobs/batch_ar/src/main/resources/amr/thresholds.json @@ -0,0 +1,20 @@ +{ + "status": { + "message": "Success", + "code": "200" + }, + "data": [ + { + "id": "3345c3c1-322a-47f1-982c-1d9df1fc065e", + "date": "2015-01-01", + "name": "endpoint_example", + "rules": [ + { + "host": "host1.foo.bar", + "metric": "service.freshness", + "thresholds": "freshness=1s;;0:;" + } + ] + } + ] +} diff --git a/flink_jobs/batch_ar/src/main/resources/amr/topoendpoints.json b/flink_jobs/batch_ar/src/main/resources/amr/topoendpoints.json new file mode 100644 index 00000000..2b1cfed5 --- /dev/null +++ b/flink_jobs/batch_ar/src/main/resources/amr/topoendpoints.json @@ -0,0 +1,44 @@ +{ + "status": { + "message": "Success", + "code": "200" + }, + "data": [ + { + "date": "2020-11-10", + "group": "groupA", + "type": "SERVICEGROUPS", + "service": "webPortal", + "hostname": "host1.foo.bar", + "tags": { + "monitored": "1", + "production": "1", + "scope": "FOO" + } + }, + { + "date": "2020-11-10", + "group": "groupB", + "type": "SERVICEGROUPS", + "service": "webPortal", + "hostname": "host3.foo.bar", + "tags": { + "monitored": "1", + "production": "1", + "scope": "FOO" + } + }, + { + "date": "2020-11-10", + "group": "groupA", + "type": "SERVICEGROUPS", + "service": "webPortal", + "hostname": "host2.foo.bar", + "tags": { + "monitored": "1", + "production": "1", + "scope": "FOO" + } + } + ] +} diff --git a/flink_jobs/batch_ar/src/main/resources/amr/topogroups.json b/flink_jobs/batch_ar/src/main/resources/amr/topogroups.json new file mode 100644 index 00000000..6286cc55 --- /dev/null +++ b/flink_jobs/batch_ar/src/main/resources/amr/topogroups.json @@ -0,0 +1,28 @@ +{ + "status": { + "message": "Success", + "code": "200" + }, + "data": [ + { + "date": "2020-11-11", + "group": "ORG-A", + "type": "PROJECT", + "subgroup": "GROUP-101", + "tags": { + "monitored": "0", + "scope": "Local" + } + }, + { + "date": "2020-11-11", + "group": "ORG-A", + "type": "PROJECT", + "subgroup": "GROUP-202", + "tags": { + "monitored": "1", + "scope": "Local" + } + } + ] +} diff --git a/flink_jobs/batch_ar/src/main/resources/amr/weights.json b/flink_jobs/batch_ar/src/main/resources/amr/weights.json new file mode 100644 index 00000000..fc1dea3f --- /dev/null +++ b/flink_jobs/batch_ar/src/main/resources/amr/weights.json @@ -0,0 +1,33 @@ +{ + "status": { + "message": "Success", + "code": "200" + }, + "data": [ + { + "id": "3b9602ed-49ec-42f3-8df7-7c35331ebf69", + "date": "2020-09-02", + "name": "demo", + "weight_type": "computationpower", + "group_type": "SERVICEGROUPS", + "groups": [ + { + "name": "GROUP-A", + "value": 366 + }, + { + "name": "GROUP-B", + "value": 4000 + }, + { + "name": "GROUP-C", + "value": 19838 + }, + { + "name": "GROUP-D", + "value": 19838 + } + ] + } + ] +} diff --git a/flink_jobs/batch_ar/src/main/resources/avro/group_endpoints_info.avro b/flink_jobs/batch_ar/src/main/resources/avro/group_endpoints_info.avro new file mode 100644 index 00000000..0f388be0 Binary files /dev/null and b/flink_jobs/batch_ar/src/main/resources/avro/group_endpoints_info.avro differ diff --git a/flink_jobs/batch_ar/src/test/java/argo/amr/ApiResourceManagerTest.java b/flink_jobs/batch_ar/src/test/java/argo/amr/ApiResourceManagerTest.java new file mode 100644 index 00000000..4a384ada --- /dev/null +++ b/flink_jobs/batch_ar/src/test/java/argo/amr/ApiResourceManagerTest.java @@ -0,0 +1,288 @@ +package argo.amr; + +import static org.junit.Assert.*; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.net.URISyntaxException; +import java.nio.charset.StandardCharsets; +import java.security.KeyManagementException; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.text.ParseException; +import java.util.List; +import java.util.stream.Collectors; + +import org.apache.http.client.ClientProtocolException; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; + +import com.github.tomakehurst.wiremock.WireMockServer; +import com.github.tomakehurst.wiremock.junit.WireMockRule; + +import argo.avro.Downtime; +import argo.avro.GroupEndpoint; +import argo.avro.GroupGroup; +import argo.avro.MetricProfile; +import argo.avro.Weight; + +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo; +import static com.github.tomakehurst.wiremock.client.WireMock.configureFor; +import static com.github.tomakehurst.wiremock.client.WireMock.get; +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig; + + +public class ApiResourceManagerTest { + + public static String loadResJSON(String resURL) { + + InputStream jsonInputStream + = ApiResourceManagerTest.class.getResourceAsStream(resURL); + String content = new BufferedReader( + new InputStreamReader(jsonInputStream, StandardCharsets.UTF_8)) + .lines() + .collect(Collectors.joining("\n")); + return content; + + } + + @Rule + public WireMockRule wireMockRule = new WireMockRule(wireMockConfig().httpsPort(8443)); + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/report.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/report.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/metric_profile.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/agg_profile.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/ops_profile.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/thresholds.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/topoendpoints.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/topogroups.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/downtimes.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/weights.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/recomputations.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_CONFIG.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_METRIC.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_AGGREGATION.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_OPS.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_THRESHOLDS.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_TOPOENDPOINTS.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_TOPOGROUPS.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_DOWNTIMES.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_WEIGHTS.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_RECOMPUTATIONS.json")); + } + + @Test + public void test() throws URISyntaxException, IOException, ParseException { + // load mock api response content + String jsonReport = loadResJSON("/amr/report.json"); + String jsonMetric = loadResJSON("/amr/metric_profile.json"); + String jsonAgg = loadResJSON("/amr/agg_profile.json"); + String jsonOps = loadResJSON("/amr/ops_profile.json"); + String jsonThresholds = loadResJSON("/amr/thresholds.json"); + String jsonTopoEnd = loadResJSON("/amr/topoendpoints.json"); + String jsonTopoGroups = loadResJSON("/amr/topogroups.json"); + String jsonDowntimes = loadResJSON("/amr/downtimes.json"); + String jsonWeights = loadResJSON("/amr/weights.json"); + String jsonRecomp = loadResJSON("/amr/recomputations.json"); + + // get json data items + + String dataConfig = loadResJSON("/amr/data_CONFIG.json"); + String dataMetric = loadResJSON("/amr/data_METRIC.json"); + String dataAggr = loadResJSON("/amr/data_AGGREGATION.json"); + String dataOps = loadResJSON("/amr/data_OPS.json"); + String dataThresh = loadResJSON("/amr/data_THRESHOLDS.json"); + String dataTopoEnd = loadResJSON("/amr/data_TOPOENDPOINTS.json"); + String dataTopoGroup = loadResJSON("/amr/data_TOPOGROUPS.json"); + String dataDown = loadResJSON("/amr/data_DOWNTIMES.json"); + String dataWeights = loadResJSON("/amr/data_WEIGHTS.json"); + String dataRecomp = loadResJSON("/amr/data_RECOMPUTATIONS.json"); + + + + + stubFor(get(urlEqualTo("/api/v2/reports/f29eeb59-ab38-4aa0-b372-5d3c0709dfb2")) + .willReturn(aResponse().withBody(jsonReport))); + stubFor(get(urlEqualTo("/api/v2/metric_profiles/92fa5d74-015c-4122-b8b9-7b344f3154d4?date=2020-11-01")) + .willReturn(aResponse().withBody(jsonMetric))); + stubFor(get(urlEqualTo("/api/v2/aggregation_profiles/2744247f-40f8-4dd6-b22c-76a3b38334d8?date=2020-11-01")) + .willReturn(aResponse().withBody(jsonAgg))); + stubFor(get(urlEqualTo("/api/v2/operations_profiles/ea62ff1e-c6e1-438b-83c7-9262b3a4f179?date=2020-11-01")) + .willReturn(aResponse().withBody(jsonOps))); + stubFor(get(urlEqualTo("/api/v2/thresholds_profiles/3345c3c1-322a-47f1-982c-1d9df1fc065e?date=2020-11-01")) + .willReturn(aResponse().withBody(jsonThresholds))); + stubFor(get(urlEqualTo("/api/v2/topology/endpoints/by_report/Critical?date=2020-11-01")) + .willReturn(aResponse().withBody(jsonTopoEnd))); + stubFor(get(urlEqualTo("/api/v2/topology/groups/by_report/Critical?date=2020-11-01")) + .willReturn(aResponse().withBody(jsonTopoGroups))); + stubFor(get(urlEqualTo("/api/v2/downtimes?date=2020-11-01")) + .willReturn(aResponse().withBody(jsonDowntimes))); + stubFor(get(urlEqualTo("/api/v2/weights/3b9602ed-49ec-42f3-8df7-7c35331ebf69?date=2020-11-01")) + .willReturn(aResponse().withBody(jsonWeights))); + stubFor(get(urlEqualTo("/api/v2/recomputations?date=2020-11-01")) + .willReturn(aResponse().withBody(jsonRecomp))); + + ApiResourceManager amr = new ApiResourceManager("localhost:8443", "s3cr3t"); + amr.setDate("2020-11-01"); + amr.setReportID("f29eeb59-ab38-4aa0-b372-5d3c0709dfb2"); + amr.setToken("s3cr3t"); + amr.setWeightsID("3b9602ed-49ec-42f3-8df7-7c35331ebf69"); + amr.setVerify(false); + + // Get the report configuration first and parse it + amr.getRemoteConfig(); + amr.parseReport(); + + assertEquals("report name retrieved","Critical",amr.getReportName()); + assertEquals("metric id retrieved","92fa5d74-015c-4122-b8b9-7b344f3154d4",amr.getMetricID()); + assertEquals("ops id retrieved","ea62ff1e-c6e1-438b-83c7-9262b3a4f179",amr.getOpsID()); + assertEquals("aggregations id retrieved","2744247f-40f8-4dd6-b22c-76a3b38334d8",amr.getAggregationID()); + assertEquals("thresholds id retrieved","3345c3c1-322a-47f1-982c-1d9df1fc065e",amr.getThresholdsID()); + + assertEquals("retrieved config data",dataConfig,amr.getResourceJSON(ApiResource.CONFIG)); + + + // get the profiles metric, aggregation, ops and thresholds + amr.getRemoteMetric(); + amr.getRemoteAggregation(); + amr.getRemoteOps(); + amr.getRemoteThresholds(); + + assertEquals("retrieved metric profile data",dataMetric,amr.getResourceJSON(ApiResource.METRIC)); + assertEquals("retrieved aggregation profile data",dataAggr,amr.getResourceJSON(ApiResource.AGGREGATION)); + assertEquals("retrieved ops profile data",dataOps,amr.getResourceJSON(ApiResource.OPS)); + assertEquals("retrieved thresholds profile data",dataThresh,amr.getResourceJSON(ApiResource.THRESHOLDS)); + + // get remote topology + + amr.getRemoteTopoEndpoints(); + amr.getRemoteTopoGroups(); + + assertEquals("retrieved topology endpoints",dataTopoEnd,amr.getResourceJSON(ApiResource.TOPOENDPOINTS)); + assertEquals("retrieved topology groups",dataTopoGroup,amr.getResourceJSON(ApiResource.TOPOGROUPS)); + + + // get remote downtimes + amr.getRemoteDowntimes(); + assertEquals("retrieved downtimes",dataDown,amr.getResourceJSON(ApiResource.DOWNTIMES)); + + // get weights + amr.getRemoteWeights(); + assertEquals("retrieved downtimes",dataWeights,amr.getResourceJSON(ApiResource.WEIGHTS)); + + // get recomputations + amr.getRemoteRecomputations(); + assertEquals("retrieved recomputations",dataRecomp,amr.getResourceJSON(ApiResource.RECOMPUTATIONS)); + + // initate a second amr and check getRemoteAll routine + + + ApiResourceManager amr2 = new ApiResourceManager("localhost:8443", "s3cr3t"); + amr2.setDate("2020-11-01"); + amr2.setReportID("f29eeb59-ab38-4aa0-b372-5d3c0709dfb2"); + amr2.setToken("s3cr3t"); + amr2.setWeightsID("3b9602ed-49ec-42f3-8df7-7c35331ebf69"); + amr2.setVerify(false); + + amr2.getRemoteAll(); + + // test amr2 downtime list + Downtime[] dtl = amr2.getListDowntimes(); + assertEquals("downtime list size", 3, dtl.length); + assertEquals("downtime data", "WebPortal", dtl[0].getService()); + assertEquals("downtime data", "hostA.foo", dtl[0].getHostname()); + assertEquals("downtime data", "2020-11-10T00:00:00Z", dtl[0].getStartTime()); + assertEquals("downtime data", "2020-11-10T23:59:00Z", dtl[0].getEndTime()); + assertEquals("downtime data", "WebPortal", dtl[1].getService()); + assertEquals("downtime data", "hostB.foo", dtl[1].getHostname()); + assertEquals("downtime data", "2020-11-10T00:00:00Z", dtl[1].getStartTime()); + assertEquals("downtime data", "2020-11-10T23:59:00Z", dtl[1].getEndTime()); + assertEquals("downtime data", "WebPortald", dtl[2].getService()); + assertEquals("downtime data", "hostB.foo", dtl[2].getHostname()); + assertEquals("downtime data", "2020-11-10T00:00:00Z", dtl[2].getStartTime()); + assertEquals("downtime data", "2020-11-10T23:59:00Z", dtl[2].getEndTime()); + + // test amr2 group endpoint list + GroupEndpoint[] gel = amr2.getListGroupEndpoints(); + assertEquals("group endpoint list size", 3, gel.length); + assertEquals("group endpoint data", "SERVICEGROUPS", gel[0].getType()); + assertEquals("group endpoint data", "groupA", gel[0].getGroup()); + assertEquals("group endpoint data", "webPortal", gel[0].getService()); + assertEquals("group endpoint data", "host1.foo.bar", gel[0].getHostname()); + assertEquals("group endpoint data", "1", gel[0].getTags().get("monitored")); + assertEquals("group endpoint data", "1", gel[0].getTags().get("production")); + assertEquals("group endpoint data", "FOO", gel[0].getTags().get("scope")); + + assertEquals("group endpoint data", "SERVICEGROUPS", gel[1].getType()); + assertEquals("group endpoint data", "groupB", gel[1].getGroup()); + assertEquals("group endpoint data", "webPortal", gel[1].getService()); + assertEquals("group endpoint data", "host3.foo.bar", gel[1].getHostname()); + assertEquals("group endpoint data", "1", gel[1].getTags().get("monitored")); + assertEquals("group endpoint data", "1", gel[1].getTags().get("production")); + assertEquals("group endpoint data", "FOO", gel[1].getTags().get("scope")); + + assertEquals("group endpoint data", "SERVICEGROUPS", gel[2].getType()); + assertEquals("group endpoint data", "groupA", gel[2].getGroup()); + assertEquals("group endpoint data", "webPortal", gel[2].getService()); + assertEquals("group endpoint data", "host2.foo.bar", gel[2].getHostname()); + assertEquals("group endpoint data", "1", gel[2].getTags().get("monitored")); + assertEquals("group endpoint data", "1", gel[2].getTags().get("production")); + assertEquals("group endpoint data", "FOO", gel[2].getTags().get("scope")); + + // test amr2 group groups list + GroupGroup[] ggl = amr2.getListGroupGroups(); + assertEquals("group endpoint list size", 2, ggl.length); + assertEquals("group endpoint data", "PROJECT", ggl[0].getType()); + assertEquals("group endpoint data", "ORG-A", ggl[0].getGroup()); + assertEquals("group endpoint data", "GROUP-101", ggl[0].getSubgroup()); + assertEquals("group endpoint data", "0", ggl[0].getTags().get("monitored")); + assertEquals("group endpoint data", "Local", ggl[0].getTags().get("scope")); + + assertEquals("group endpoint data", "PROJECT", ggl[1].getType()); + assertEquals("group endpoint data", "ORG-A", ggl[1].getGroup()); + assertEquals("group endpoint data", "GROUP-202", ggl[1].getSubgroup()); + assertEquals("group endpoint data", "1", ggl[1].getTags().get("monitored")); + assertEquals("group endpoint data", "Local", ggl[1].getTags().get("scope")); + + // test amr2 weights list + Weight[] wl = amr2.getListWeights(); + assertEquals("group endpoint list size", 4, wl.length); + assertEquals("group endpoint data", "computationpower", wl[0].getType()); + assertEquals("group endpoint data", "GROUP-A", wl[0].getSite()); + assertEquals("group endpoint data", "366", wl[0].getWeight()); + + assertEquals("group endpoint data", "computationpower", wl[1].getType()); + assertEquals("group endpoint data", "GROUP-B", wl[1].getSite()); + assertEquals("group endpoint data", "4000", wl[1].getWeight()); + + assertEquals("group endpoint data", "computationpower", wl[2].getType()); + assertEquals("group endpoint data", "GROUP-C", wl[2].getSite()); + assertEquals("group endpoint data", "19838", wl[2].getWeight()); + + assertEquals("group endpoint data", "computationpower", wl[3].getType()); + assertEquals("group endpoint data", "GROUP-D", wl[3].getSite()); + assertEquals("group endpoint data", "19838", wl[3].getWeight()); + + // test amr2 metric profile list + MetricProfile[] mpl = amr2.getListMetrics(); + assertEquals("group endpoint list size", 1, mpl.length); + assertEquals("group endpoint data", "test-mon", mpl[0].getProfile()); + assertEquals("group endpoint data", "WebPortal", mpl[0].getService()); + assertEquals("group endpoint data", "org.nagios.WebCheck", mpl[0].getMetric()); + assertEquals("group endpoint data", 0, mpl[0].getTags().size()); + + + + + } + +} diff --git a/flink_jobs/batch_ar/src/test/java/argo/batch/EndpointArTest.java b/flink_jobs/batch_ar/src/test/java/argo/batch/EndpointArTest.java index 25d00ecf..96bcfd11 100644 --- a/flink_jobs/batch_ar/src/test/java/argo/batch/EndpointArTest.java +++ b/flink_jobs/batch_ar/src/test/java/argo/batch/EndpointArTest.java @@ -88,7 +88,7 @@ public void test() throws URISyntaxException, IOException, ParseException { int runDateInt = Integer.parseInt(runDate.replace("-", "")); - EndpointAR result = new EndpointAR(runDateInt,report,item.getHostname(),item.getService(),item.getGroup(),dAR.availability,dAR.reliability,dAR.up_f,dAR.unknown_f,dAR.down_f); + EndpointAR result = new EndpointAR(runDateInt,report,item.getHostname(),item.getService(),item.getGroup(),dAR.availability,dAR.reliability,dAR.up_f,dAR.unknown_f,dAR.down_f,"URL:https://example.foo"); resultDS.add(result); } diff --git a/flink_jobs/batch_ar/src/test/java/ops/DAggregatorTest.java b/flink_jobs/batch_ar/src/test/java/ops/DAggregatorTest.java index 6374c17f..be1e5f9c 100644 --- a/flink_jobs/batch_ar/src/test/java/ops/DAggregatorTest.java +++ b/flink_jobs/batch_ar/src/test/java/ops/DAggregatorTest.java @@ -126,7 +126,6 @@ public void test2() throws URISyntaxException, ParseException, IOException { dAgg.settleAll(opsMgr.getIntStatus("MISSING")); dAgg.aggregate("AND", opsMgr); - System.out.println(Arrays.toString(dAgg.aggregation.samples)); assertArrayEquals("Aggregation test 3", expected, dAgg.aggregation.samples); } diff --git a/flink_jobs/batch_ar/src/test/java/ops/OpsManagerTest.java b/flink_jobs/batch_ar/src/test/java/ops/OpsManagerTest.java index 776a794b..2926b9fb 100644 --- a/flink_jobs/batch_ar/src/test/java/ops/OpsManagerTest.java +++ b/flink_jobs/batch_ar/src/test/java/ops/OpsManagerTest.java @@ -59,7 +59,7 @@ public void test() throws URISyntaxException, IOException { assertEquals("DOWNTIME (AND) UNKNOWN = DOWNTIME", opsMgr.op("AND", "DOWNTIME", "UNKNOWN"), "DOWNTIME"); assertEquals("Default Downtime Status = DOWNTIME", opsMgr.getDefaultDown(), "DOWNTIME"); - System.out.println(opsMgr.getDefaultMissingInt()); + } } diff --git a/flink_jobs/batch_ar/src/test/java/sync/EndpointGroupManagerTest.java b/flink_jobs/batch_ar/src/test/java/sync/EndpointGroupManagerTest.java index 99093f48..dac10812 100644 --- a/flink_jobs/batch_ar/src/test/java/sync/EndpointGroupManagerTest.java +++ b/flink_jobs/batch_ar/src/test/java/sync/EndpointGroupManagerTest.java @@ -19,6 +19,7 @@ public class EndpointGroupManagerTest { public static void setUpBeforeClass() throws Exception { // Assert that files are present assertNotNull("Test file missing", EndpointGroupManagerTest.class.getResource("/avro/group_endpoints_v2.avro")); + assertNotNull("Test file missing", EndpointGroupManagerTest.class.getResource("/avro/group_endpoints_info.avro")); } @Test @@ -55,7 +56,29 @@ public void test() throws URISyntaxException, IOException { // Check non-existent groups assertTrue(ge.checkEndpoint("ce.etfos.cro-ngi.hr", "GRAM5") == false); assertTrue(ge.checkEndpoint("grid129.sinp.msu.ru", "CREAM-CE") == false); - + + // Prepare Resource File with extra information in tags + URL resAvroFile2 = EndpointGroupManagerTest.class.getResource("/avro/group_endpoints_info.avro"); + File avroFile2 = new File(resAvroFile2.toURI()); + // Instantiate class + EndpointGroupManager ge2 = new EndpointGroupManager(); + // Test loading file + ge2.loadAvro(avroFile2); + assertNotNull("File Loaded", ge); + + String exp1 = "URL:host1.example.foo/path/to/service1,DN:foo DN"; + String exp2 = "URL:host1.example.foo/path/to/service2"; + String exp3 = "URL:host2.example.foo/path/to/service1"; + String exp4 = "ext.Value:extension1,URL:host2.example.foo/path/to/service2"; + String exp5 = ""; + String exp6 = "URL:host4.example.foo/path/to/service1"; + + assertEquals("wrong tags", exp1,ge2.getInfo("groupA", "SERVICEGROUPS", "host1.example.foo_11", "services.url")); + assertEquals("wrong tags", exp2,ge2.getInfo("groupB", "SERVICEGROUPS", "host1.example.foo_22", "services.url")); + assertEquals("wrong tags", exp3,ge2.getInfo("groupC", "SERVICEGROUPS", "host2.example.foo_33", "services.url")); + assertEquals("wrong tags", exp4,ge2.getInfo("groupD", "SERVICEGROUPS", "host2.example.foo_44", "services.url")); + assertEquals("wrong tags", exp5,ge2.getInfo("groupE", "SERVICEGROUPS", "host3.example.foo_55", "services.url")); + assertEquals("wrong tags", exp6,ge2.getInfo("groupF", "SERVICEGROUPS", "host4.example.foo_66", "services.url")); } } diff --git a/flink_jobs/batch_status/.gitignore b/flink_jobs/batch_status/.gitignore index ce1b0b79..6c4e323f 100644 --- a/flink_jobs/batch_status/.gitignore +++ b/flink_jobs/batch_status/.gitignore @@ -1,7 +1,8 @@ /target/ - -# Eclipse related -.classpath .project .settings/ +.classpath/ +.classpath +/nbproject +nbactions.xml diff --git a/flink_jobs/batch_status/pom.xml b/flink_jobs/batch_status/pom.xml index 6562ed49..64be44b6 100644 --- a/flink_jobs/batch_status/pom.xml +++ b/flink_jobs/batch_status/pom.xml @@ -8,7 +8,8 @@ License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> - 4.0.0 @@ -105,6 +106,18 @@ gson 2.2.4 + + + + org.apache.httpcomponents + httpclient + 4.5.13 + + + org.apache.httpcomponents + fluent-hc + 4.5.13 + @@ -126,7 +139,7 @@ log4j ${log4j.version} - + junit-addons junit-addons @@ -136,9 +149,22 @@ junit junit - 4.11 + 4.13.1 + test + + + org.apache.flink + flink-test-utils_2.11 + 1.8.0 test + + + com.github.tomakehurst + wiremock + 1.58 + test + @@ -205,6 +231,17 @@ 3.2.2 + + org.apache.httpcomponents + httpclient + 4.5.13 + + + org.apache.httpcomponents + fluent-hc + 4.5.13 + + diff --git a/flink_jobs/batch_status/src/main/java/argo/amr/ApiResource.java b/flink_jobs/batch_status/src/main/java/argo/amr/ApiResource.java new file mode 100644 index 00000000..d8cb13b5 --- /dev/null +++ b/flink_jobs/batch_status/src/main/java/argo/amr/ApiResource.java @@ -0,0 +1,5 @@ +package argo.amr; + +public enum ApiResource { + CONFIG, OPS, METRIC, AGGREGATION, THRESHOLDS, TOPOENDPOINTS, TOPOGROUPS, WEIGHTS, DOWNTIMES, RECOMPUTATIONS +} \ No newline at end of file diff --git a/flink_jobs/batch_status/src/main/java/argo/amr/ApiResourceManager.java b/flink_jobs/batch_status/src/main/java/argo/amr/ApiResourceManager.java new file mode 100644 index 00000000..68d14277 --- /dev/null +++ b/flink_jobs/batch_status/src/main/java/argo/amr/ApiResourceManager.java @@ -0,0 +1,644 @@ +package argo.amr; + +import java.io.IOException; +import java.security.KeyManagementException; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; + +import java.util.ArrayList; +import java.util.EnumMap; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + + + +import org.apache.http.client.ClientProtocolException; +import org.apache.http.client.fluent.Executor; +import org.apache.http.client.fluent.Request; +import org.apache.http.conn.ssl.NoopHostnameVerifier; +import org.apache.http.conn.ssl.SSLConnectionSocketFactory; +import org.apache.http.conn.ssl.TrustSelfSignedStrategy; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClients; +import org.apache.http.ssl.SSLContextBuilder; + +import com.google.gson.JsonArray; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParser; + +import argo.avro.Downtime; +import argo.avro.GroupEndpoint; +import argo.avro.GroupGroup; +import argo.avro.MetricProfile; +import argo.avro.Weight; + + +/** + * APIResourceManager class fetches remote argo-web-api resources such as + * report configuration, profiles, topology, weights in JSON format + */ + + +public class ApiResourceManager { + + private EnumMap data = new EnumMap<>(ApiResource.class); + + private String endpoint; + private String token; + private String reportID; + private String date; + private String proxy; + + private String metricID; + private String aggregationID; + private String opsID; + private String threshID; + private String reportName; + private String weightsID; + private boolean verify; + + + public ApiResourceManager(String endpoint, String token) { + this.endpoint = endpoint; + this.token = token; + this.metricID = ""; + this.aggregationID = ""; + this.opsID = ""; + this.threshID = ""; + this.reportName = ""; + this.reportID = ""; + this.date = ""; + this.proxy = ""; + this.weightsID = ""; + this.verify = true; + + } + + public boolean getVerify() { + return verify; + } + + public void setVerify(boolean verify) { + this.verify = verify; + } + + public String getEndpoint() { + return endpoint; + } + + public void setEndpoint(String endpoint) { + this.endpoint = endpoint; + } + + public String getToken() { + return token; + } + + public void setToken(String token) { + this.token = token; + } + + public String getReportID() { + return reportID; + } + + public void setReportID(String reportID) { + this.reportID = reportID; + } + + public String getReportName() { + return this.reportName; + } + + public String getOpsID() { + return this.opsID; + } + + + public String getAggregationID() { + return this.aggregationID; + } + + public String getMetricID() { + return this.metricID; + } + + public String getThresholdsID() { + return this.threshID; + } + + + public String getDate() { + return date; + } + + public void setDate(String date) { + this.date = date; + } + + public String getProxy() { + return proxy; + } + + public void setProxy(String proxy) { + this.proxy = proxy; + } + + public String getWeightsID() { + return weightsID; + } + + public void setWeightsID(String weightsID) { + this.weightsID = weightsID; + } + + /** + * Create an SSL Connection Socket Factory with a strategy to trust self signed + * certificates + */ + private SSLConnectionSocketFactory selfSignedSSLF() + throws NoSuchAlgorithmException, KeyStoreException, KeyManagementException { + SSLContextBuilder sslBuild = new SSLContextBuilder(); + sslBuild.loadTrustMaterial(null, new TrustSelfSignedStrategy()); + return new SSLConnectionSocketFactory(sslBuild.build(), NoopHostnameVerifier.INSTANCE); + } + + /** + * Contacts remote argo-web-api based on the full url of a resource its content (expected in json format) + * + * @param fullURL String containing the full url representation of the argo-web-api resource + * @return A string representation of the resource json content + * @throws ClientProtocolException + * @throws IOException + * @throws KeyStoreException + * @throws NoSuchAlgorithmException + * @throws KeyManagementException + */ + private String getResource(String fullURL) { + + + Request r = Request.Get(fullURL).addHeader("Accept", "application/json").addHeader("Content-type", + "application/json").addHeader("x-api-key",this.token); + if (!this.proxy.isEmpty()) { + r = r.viaProxy(proxy); + } + + r = r.connectTimeout(1000).socketTimeout(1000); + + String content = "{}"; + + try { + if (this.verify == false) { + CloseableHttpClient httpClient = HttpClients.custom().setSSLSocketFactory(selfSignedSSLF()).build(); + Executor executor = Executor.newInstance(httpClient); + content = executor.execute(r).returnContent().asString(); + } else { + + content = r.execute().returnContent().asString(); + } + } catch (KeyManagementException | NoSuchAlgorithmException | KeyStoreException | IOException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + + return content; + } + + /** + * Retrieves the remote report configuration based on reportID main class attribute and + * stores the content in the enum map + * + * @throws ClientProtocolException + * @throws IOException + * @throws KeyStoreException + * @throws NoSuchAlgorithmException + * @throws KeyManagementException + */ + public void getRemoteConfig() { + String path = "https://%s/api/v2/reports/%s"; + String fullURL = String.format(path, this.endpoint, this.reportID); + String content = getResource(fullURL); + this.data.put(ApiResource.CONFIG, getJsonData(content, false)); + } + + + /** + * Retrieves the metric profile content based on the metric_id attribute and stores it to the enum map + * + * @throws ClientProtocolException + * @throws IOException + * @throws KeyStoreException + * @throws NoSuchAlgorithmException + * @throws KeyManagementException + */ + public void getRemoteMetric() { + + String path = "https://%s/api/v2/metric_profiles/%s?date=%s"; + String fullURL = String.format(path, this.endpoint, this.metricID, this.date); + String content = getResource(fullURL); + this.data.put(ApiResource.METRIC, getJsonData(content, false)); + } + + /** + * Retrieves the aggregation profile content based on the aggreagation_id attribute and stores it to the enum map + * + * @throws ClientProtocolException + * @throws IOException + * @throws KeyStoreException + * @throws NoSuchAlgorithmException + * @throws KeyManagementException + */ + public void getRemoteAggregation() { + + String path = "https://%s/api/v2/aggregation_profiles/%s?date=%s"; + String fullURL = String.format(path, this.endpoint, this.aggregationID, this.date); + String content = getResource(fullURL); + this.data.put(ApiResource.AGGREGATION, getJsonData(content, false)); + } + + /** + * Retrieves the ops profile content based on the ops_id attribute and stores it to the enum map + * + * @throws ClientProtocolException + * @throws IOException + * @throws KeyStoreException + * @throws NoSuchAlgorithmException + * @throws KeyManagementException + */ + public void getRemoteOps() { + + String path = "https://%s/api/v2/operations_profiles/%s?date=%s"; + String fullURL = String.format(path, this.endpoint, this.opsID, this.date); + + String content = getResource(fullURL); + this.data.put(ApiResource.OPS, getJsonData(content, false)); + } + + /** + * Retrieves the thresholds profile content based on the thresh_id attribute and stores it to the enum map + * + * @throws ClientProtocolException + * @throws IOException + * @throws KeyStoreException + * @throws NoSuchAlgorithmException + * @throws KeyManagementException + */ + public void getRemoteThresholds() { + + String path = "https://%s/api/v2/thresholds_profiles/%s?date=%s"; + String fullURL = String.format(path, this.endpoint, this.threshID, this.date); + String content = getResource(fullURL); + this.data.put(ApiResource.THRESHOLDS, getJsonData(content, false)); + } + + /** + * Retrieves the topology endpoint content and stores it to the enum map + * + * @throws ClientProtocolException + * @throws IOException + * @throws KeyStoreException + * @throws NoSuchAlgorithmException + * @throws KeyManagementException + */ + public void getRemoteTopoEndpoints() { + String path = "https://%s/api/v2/topology/endpoints/by_report/%s?date=%s"; + String fullURL = String.format(path, this.endpoint, this.reportName, this.date); + String content = getResource(fullURL); + this.data.put(ApiResource.TOPOENDPOINTS, getJsonData(content, true)); + } + + /** + * Retrieves the topology groups content and stores it to the enum map + * + * @throws ClientProtocolException + * @throws IOException + * @throws KeyStoreException + * @throws NoSuchAlgorithmException + * @throws KeyManagementException + */ + public void getRemoteTopoGroups() { + String path = "https://%s/api/v2/topology/groups/by_report/%s?date=%s"; + String fullURL = String.format(path, this.endpoint, this.reportName, this.date); + String content = getResource(fullURL); + this.data.put(ApiResource.TOPOGROUPS, getJsonData(content, true)); + } + + /** + * Retrieves the weights content and stores it to the enum map + * + * @throws ClientProtocolException + * @throws IOException + * @throws KeyStoreException + * @throws NoSuchAlgorithmException + * @throws KeyManagementException + */ + public void getRemoteWeights() { + String path = "https://%s/api/v2/weights/%s?date=%s"; + String fullURL = String.format(path, this.endpoint, this.weightsID, this.date); + String content = getResource(fullURL); + this.data.put(ApiResource.WEIGHTS, getJsonData(content, false)); + } + + /** + * Retrieves the downtimes content and stores it to the enum map + * + * @throws ClientProtocolException + * @throws IOException + * @throws KeyStoreException + * @throws NoSuchAlgorithmException + * @throws KeyManagementException + */ + public void getRemoteDowntimes() { + String path = "https://%s/api/v2/downtimes?date=%s"; + String fullURL = String.format(path, this.endpoint, this.date); + String content = getResource(fullURL); + this.data.put(ApiResource.DOWNTIMES, getJsonData(content, false)); + } + + public void getRemoteRecomputations() { + String path = "https://%s/api/v2/recomputations?date=%s"; + String fullURL = String.format(path, this.endpoint, this.date); + String content = getResource(fullURL); + this.data.put(ApiResource.RECOMPUTATIONS, getJsonData(content, true)); + } + + /** + * Returns local resource (after has been retrieved) content based on resource type + * + * @param res + * @return The extracted items JSON value as string + */ + public String getResourceJSON(ApiResource res) { + return this.data.get(res); + } + + /** + * Exectues all steps to retrieve the complete amount of the available profile, + * topology, weights and downtime information from argo-web-api + * + * @throws ClientProtocolException + * @throws IOException + * @throws KeyStoreException + * @throws NoSuchAlgorithmException + * @throws KeyManagementException + */ + public void getRemoteAll() { + // Start with report and configuration + this.getRemoteConfig(); + // parse remote report config to be able to get the other profiles + this.parseReport(); + // Go on to the profiles + this.getRemoteMetric(); + this.getRemoteOps(); + this.getRemoteAggregation(); + if (!this.threshID.equals("")) this.getRemoteThresholds(); + // Go to topology + this.getRemoteTopoEndpoints(); + this.getRemoteTopoGroups(); + // get weights + if (!this.weightsID.equals("")) this.getRemoteWeights(); + // get downtimes + this.getRemoteDowntimes(); + // get recomptations + this.getRemoteRecomputations(); + + } + + /** + * Parses the report content to extract the report's name and the various profile IDs + */ + public void parseReport() { + // check if report configuration has been retrieved + if (!this.data.containsKey(ApiResource.CONFIG)) + return; + + String content = this.data.get(ApiResource.CONFIG); + JsonParser jsonParser = new JsonParser(); + JsonElement jElement = jsonParser.parse(content); + JsonObject jRoot = jElement.getAsJsonObject(); + JsonArray jProfiles = jRoot.get("profiles").getAsJsonArray(); + + JsonObject jInfo = jRoot.get("info").getAsJsonObject(); + this.reportName = jInfo.get("name").getAsString(); + + // for each profile iterate and store it's id in profile manager for later + // reference + for (int i = 0; i < jProfiles.size(); i++) { + JsonObject jProf = jProfiles.get(i).getAsJsonObject(); + String profType = jProf.get("type").getAsString(); + String profID = jProf.get("id").getAsString(); + if (profType.equalsIgnoreCase("metric")) { + this.metricID = profID; + } else if (profType.equalsIgnoreCase("aggregation")) { + this.aggregationID = profID; + } else if (profType.equalsIgnoreCase("operations")) { + this.opsID = profID; + } else if (profType.equalsIgnoreCase("thresholds")) { + this.threshID = profID; + } + + } + + } + + /** + * Parses the Downtime content retrieved from argo-web-api and provides a list of Downtime avro objects + * to be used in the next steps of the pipeline + */ + public Downtime[] getListDowntimes() { + List results = new ArrayList(); + if (!this.data.containsKey(ApiResource.DOWNTIMES)) { + Downtime[] rArr = new Downtime[results.size()]; + rArr = results.toArray(rArr); + } + + + String content = this.data.get(ApiResource.DOWNTIMES); + JsonParser jsonParser = new JsonParser(); + JsonElement jElement = jsonParser.parse(content); + JsonObject jRoot = jElement.getAsJsonObject(); + JsonArray jElements = jRoot.get("endpoints").getAsJsonArray(); + for (int i = 0; i < jElements.size(); i++) { + JsonObject jItem= jElements.get(i).getAsJsonObject(); + String hostname = jItem.get("hostname").getAsString(); + String service = jItem.get("service").getAsString(); + String startTime = jItem.get("start_time").getAsString(); + String endTime = jItem.get("end_time").getAsString(); + + Downtime d = new Downtime(hostname,service,startTime,endTime); + results.add(d); + } + + Downtime[] rArr = new Downtime[results.size()]; + rArr = results.toArray(rArr); + return rArr; + } + + /** + * Parses the Topology endpoint content retrieved from argo-web-api and provides a list of GroupEndpoint avro objects + * to be used in the next steps of the pipeline + */ + public GroupEndpoint[] getListGroupEndpoints() { + List results = new ArrayList(); + if (!this.data.containsKey(ApiResource.TOPOENDPOINTS)) { + GroupEndpoint[] rArr = new GroupEndpoint[results.size()]; + rArr = results.toArray(rArr); + return rArr; + } + + + String content = this.data.get(ApiResource.TOPOENDPOINTS); + JsonParser jsonParser = new JsonParser(); + JsonElement jElement = jsonParser.parse(content); + JsonArray jRoot = jElement.getAsJsonArray(); + for (int i = 0; i < jRoot.size(); i++) { + JsonObject jItem= jRoot.get(i).getAsJsonObject(); + String group = jItem.get("group").getAsString(); + String gType = jItem.get("type").getAsString(); + String service = jItem.get("service").getAsString(); + String hostname = jItem.get("hostname").getAsString(); + JsonObject jTags = jItem.get("tags").getAsJsonObject(); + Map tags = new HashMap(); + for ( Entry kv : jTags.entrySet()) { + tags.put(kv.getKey(), kv.getValue().getAsString()); + } + GroupEndpoint ge = new GroupEndpoint(gType,group,service,hostname,tags); + results.add(ge); + } + + GroupEndpoint[] rArr = new GroupEndpoint[results.size()]; + rArr = results.toArray(rArr); + return rArr; + } + + /** + * Parses the Topology Groups content retrieved from argo-web-api and provides a list of GroupGroup avro objects + * to be used in the next steps of the pipeline + */ + public GroupGroup[] getListGroupGroups() { + List results = new ArrayList(); + if (!this.data.containsKey(ApiResource.TOPOGROUPS)){ + GroupGroup[] rArr = new GroupGroup[results.size()]; + rArr = results.toArray(rArr); + return rArr; + } + + String content = this.data.get(ApiResource.TOPOGROUPS); + JsonParser jsonParser = new JsonParser(); + JsonElement jElement = jsonParser.parse(content); + JsonArray jRoot = jElement.getAsJsonArray(); + for (int i = 0; i < jRoot.size(); i++) { + JsonObject jItem= jRoot.get(i).getAsJsonObject(); + String group = jItem.get("group").getAsString(); + String gType = jItem.get("type").getAsString(); + String subgroup = jItem.get("subgroup").getAsString(); + JsonObject jTags = jItem.get("tags").getAsJsonObject(); + Map tags = new HashMap(); + for ( Entry kv : jTags.entrySet()) { + tags.put(kv.getKey(), kv.getValue().getAsString()); + } + GroupGroup gg = new GroupGroup(gType,group,subgroup,tags); + results.add(gg); + } + + GroupGroup[] rArr = new GroupGroup[results.size()]; + rArr = results.toArray(rArr); + return rArr; + } + + /** + * Parses the Weights content retrieved from argo-web-api and provides a list of Weights avro objects + * to be used in the next steps of the pipeline + */ + public Weight[] getListWeights() { + List results = new ArrayList(); + if (!this.data.containsKey(ApiResource.WEIGHTS)) { + Weight[] rArr = new Weight[results.size()]; + rArr = results.toArray(rArr); + return rArr; + } + + + String content = this.data.get(ApiResource.WEIGHTS); + JsonParser jsonParser = new JsonParser(); + JsonElement jElement = jsonParser.parse(content); + JsonObject jRoot = jElement.getAsJsonObject(); + String wType = jRoot.get("weight_type").getAsString(); + JsonArray jElements = jRoot.get("groups").getAsJsonArray(); + for (int i = 0; i < jElements.size(); i++) { + JsonObject jItem= jElements.get(i).getAsJsonObject(); + String group = jItem.get("name").getAsString(); + String weight = jItem.get("value").getAsString(); + + Weight w = new Weight(wType,group,weight); + results.add(w); + } + + Weight[] rArr = new Weight[results.size()]; + rArr = results.toArray(rArr); + return rArr; + } + + /** + * Parses the Metric profile content retrieved from argo-web-api and provides a list of MetricProfile avro objects + * to be used in the next steps of the pipeline + */ + public MetricProfile[] getListMetrics() { + List results = new ArrayList(); + if (!this.data.containsKey(ApiResource.METRIC)) { + MetricProfile[] rArr = new MetricProfile[results.size()]; + rArr = results.toArray(rArr); + return rArr; + } + + + String content = this.data.get(ApiResource.METRIC); + JsonParser jsonParser = new JsonParser(); + JsonElement jElement = jsonParser.parse(content); + JsonObject jRoot = jElement.getAsJsonObject(); + String profileName = jRoot.get("name").getAsString(); + JsonArray jElements = jRoot.get("services").getAsJsonArray(); + for (int i = 0; i < jElements.size(); i++) { + JsonObject jItem= jElements.get(i).getAsJsonObject(); + String service = jItem.get("service").getAsString(); + JsonArray jMetrics = jItem.get("metrics").getAsJsonArray(); + for (int j=0; j < jMetrics.size(); j++) { + String metric = jMetrics.get(j).getAsString(); + + Map tags = new HashMap(); + MetricProfile mp = new MetricProfile(profileName,service,metric,tags); + results.add(mp); + } + + } + + MetricProfile[] rArr = new MetricProfile[results.size()]; + rArr = results.toArray(rArr); + return rArr; + } + + /** + * Extract first JSON item from data JSON array in api response + * + * @param content JSON content of the full repsonse (status + data) + * @return First available item in data array as JSON string representation + * + */ + private String getJsonData(String content, boolean asArray) { + JsonParser jsonParser = new JsonParser(); + // Grab the first - and only line of json from ops data + JsonElement jElement = jsonParser.parse(content); + JsonObject jRoot = jElement.getAsJsonObject(); + // Get the data array and the first item + if (asArray) { + return jRoot.get("data").toString(); + } + JsonArray jData = jRoot.get("data").getAsJsonArray(); + JsonElement jItem = jData.get(0); + return jItem.toString(); + } + +} diff --git a/flink_jobs/batch_status/src/main/java/argo/avro/Downtime.java b/flink_jobs/batch_status/src/main/java/argo/avro/Downtime.java new file mode 100644 index 00000000..b73e100d --- /dev/null +++ b/flink_jobs/batch_status/src/main/java/argo/avro/Downtime.java @@ -0,0 +1,286 @@ +/** + * Autogenerated by Avro + * + * DO NOT EDIT DIRECTLY + */ +package argo.avro; +@SuppressWarnings("all") +@org.apache.avro.specific.AvroGenerated +public class Downtime extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"Downtime\",\"namespace\":\"argo.avro\",\"fields\":[{\"name\":\"hostname\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"service\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"start_time\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"end_time\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}}]}"); + public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } + @Deprecated public java.lang.String hostname; + @Deprecated public java.lang.String service; + @Deprecated public java.lang.String start_time; + @Deprecated public java.lang.String end_time; + + /** + * Default constructor. + */ + public Downtime() {} + + /** + * All-args constructor. + */ + public Downtime(java.lang.String hostname, java.lang.String service, java.lang.String start_time, java.lang.String end_time) { + this.hostname = hostname; + this.service = service; + this.start_time = start_time; + this.end_time = end_time; + } + + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + // Used by DatumWriter. Applications should not call. + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return hostname; + case 1: return service; + case 2: return start_time; + case 3: return end_time; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + // Used by DatumReader. Applications should not call. + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: hostname = (java.lang.String)value$; break; + case 1: service = (java.lang.String)value$; break; + case 2: start_time = (java.lang.String)value$; break; + case 3: end_time = (java.lang.String)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + + /** + * Gets the value of the 'hostname' field. + */ + public java.lang.String getHostname() { + return hostname; + } + + /** + * Sets the value of the 'hostname' field. + * @param value the value to set. + */ + public void setHostname(java.lang.String value) { + this.hostname = value; + } + + /** + * Gets the value of the 'service' field. + */ + public java.lang.String getService() { + return service; + } + + /** + * Sets the value of the 'service' field. + * @param value the value to set. + */ + public void setService(java.lang.String value) { + this.service = value; + } + + /** + * Gets the value of the 'start_time' field. + */ + public java.lang.String getStartTime() { + return start_time; + } + + /** + * Sets the value of the 'start_time' field. + * @param value the value to set. + */ + public void setStartTime(java.lang.String value) { + this.start_time = value; + } + + /** + * Gets the value of the 'end_time' field. + */ + public java.lang.String getEndTime() { + return end_time; + } + + /** + * Sets the value of the 'end_time' field. + * @param value the value to set. + */ + public void setEndTime(java.lang.String value) { + this.end_time = value; + } + + /** Creates a new Downtime RecordBuilder */ + public static argo.avro.Downtime.Builder newBuilder() { + return new argo.avro.Downtime.Builder(); + } + + /** Creates a new Downtime RecordBuilder by copying an existing Builder */ + public static argo.avro.Downtime.Builder newBuilder(argo.avro.Downtime.Builder other) { + return new argo.avro.Downtime.Builder(other); + } + + /** Creates a new Downtime RecordBuilder by copying an existing Downtime instance */ + public static argo.avro.Downtime.Builder newBuilder(argo.avro.Downtime other) { + return new argo.avro.Downtime.Builder(other); + } + + /** + * RecordBuilder for Downtime instances. + */ + public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase + implements org.apache.avro.data.RecordBuilder { + + private java.lang.String hostname; + private java.lang.String service; + private java.lang.String start_time; + private java.lang.String end_time; + + /** Creates a new Builder */ + private Builder() { + super(argo.avro.Downtime.SCHEMA$); + } + + /** Creates a Builder by copying an existing Builder */ + private Builder(argo.avro.Downtime.Builder other) { + super(other); + } + + /** Creates a Builder by copying an existing Downtime instance */ + private Builder(argo.avro.Downtime other) { + super(argo.avro.Downtime.SCHEMA$); + if (isValidValue(fields()[0], other.hostname)) { + this.hostname = data().deepCopy(fields()[0].schema(), other.hostname); + fieldSetFlags()[0] = true; + } + if (isValidValue(fields()[1], other.service)) { + this.service = data().deepCopy(fields()[1].schema(), other.service); + fieldSetFlags()[1] = true; + } + if (isValidValue(fields()[2], other.start_time)) { + this.start_time = data().deepCopy(fields()[2].schema(), other.start_time); + fieldSetFlags()[2] = true; + } + if (isValidValue(fields()[3], other.end_time)) { + this.end_time = data().deepCopy(fields()[3].schema(), other.end_time); + fieldSetFlags()[3] = true; + } + } + + /** Gets the value of the 'hostname' field */ + public java.lang.String getHostname() { + return hostname; + } + + /** Sets the value of the 'hostname' field */ + public argo.avro.Downtime.Builder setHostname(java.lang.String value) { + validate(fields()[0], value); + this.hostname = value; + fieldSetFlags()[0] = true; + return this; + } + + /** Checks whether the 'hostname' field has been set */ + public boolean hasHostname() { + return fieldSetFlags()[0]; + } + + /** Clears the value of the 'hostname' field */ + public argo.avro.Downtime.Builder clearHostname() { + hostname = null; + fieldSetFlags()[0] = false; + return this; + } + + /** Gets the value of the 'service' field */ + public java.lang.String getService() { + return service; + } + + /** Sets the value of the 'service' field */ + public argo.avro.Downtime.Builder setService(java.lang.String value) { + validate(fields()[1], value); + this.service = value; + fieldSetFlags()[1] = true; + return this; + } + + /** Checks whether the 'service' field has been set */ + public boolean hasService() { + return fieldSetFlags()[1]; + } + + /** Clears the value of the 'service' field */ + public argo.avro.Downtime.Builder clearService() { + service = null; + fieldSetFlags()[1] = false; + return this; + } + + /** Gets the value of the 'start_time' field */ + public java.lang.String getStartTime() { + return start_time; + } + + /** Sets the value of the 'start_time' field */ + public argo.avro.Downtime.Builder setStartTime(java.lang.String value) { + validate(fields()[2], value); + this.start_time = value; + fieldSetFlags()[2] = true; + return this; + } + + /** Checks whether the 'start_time' field has been set */ + public boolean hasStartTime() { + return fieldSetFlags()[2]; + } + + /** Clears the value of the 'start_time' field */ + public argo.avro.Downtime.Builder clearStartTime() { + start_time = null; + fieldSetFlags()[2] = false; + return this; + } + + /** Gets the value of the 'end_time' field */ + public java.lang.String getEndTime() { + return end_time; + } + + /** Sets the value of the 'end_time' field */ + public argo.avro.Downtime.Builder setEndTime(java.lang.String value) { + validate(fields()[3], value); + this.end_time = value; + fieldSetFlags()[3] = true; + return this; + } + + /** Checks whether the 'end_time' field has been set */ + public boolean hasEndTime() { + return fieldSetFlags()[3]; + } + + /** Clears the value of the 'end_time' field */ + public argo.avro.Downtime.Builder clearEndTime() { + end_time = null; + fieldSetFlags()[3] = false; + return this; + } + + @Override + public Downtime build() { + try { + Downtime record = new Downtime(); + record.hostname = fieldSetFlags()[0] ? this.hostname : (java.lang.String) defaultValue(fields()[0]); + record.service = fieldSetFlags()[1] ? this.service : (java.lang.String) defaultValue(fields()[1]); + record.start_time = fieldSetFlags()[2] ? this.start_time : (java.lang.String) defaultValue(fields()[2]); + record.end_time = fieldSetFlags()[3] ? this.end_time : (java.lang.String) defaultValue(fields()[3]); + return record; + } catch (Exception e) { + throw new org.apache.avro.AvroRuntimeException(e); + } + } + } +} diff --git a/flink_jobs/batch_status/src/main/java/argo/avro/Weight.java b/flink_jobs/batch_status/src/main/java/argo/avro/Weight.java new file mode 100644 index 00000000..0238d7cf --- /dev/null +++ b/flink_jobs/batch_status/src/main/java/argo/avro/Weight.java @@ -0,0 +1,236 @@ +/** + * Autogenerated by Avro + * + * DO NOT EDIT DIRECTLY + */ +package argo.avro; +@SuppressWarnings("all") +@org.apache.avro.specific.AvroGenerated +public class Weight extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"Weight\",\"namespace\":\"argo.avro\",\"fields\":[{\"name\":\"type\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"site\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"weight\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}}]}"); + public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } + @Deprecated public java.lang.String type; + @Deprecated public java.lang.String site; + @Deprecated public java.lang.String weight; + + /** + * Default constructor. + */ + public Weight() {} + + /** + * All-args constructor. + */ + public Weight(java.lang.String type, java.lang.String site, java.lang.String weight) { + this.type = type; + this.site = site; + this.weight = weight; + } + + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + // Used by DatumWriter. Applications should not call. + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return type; + case 1: return site; + case 2: return weight; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + // Used by DatumReader. Applications should not call. + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: type = (java.lang.String)value$; break; + case 1: site = (java.lang.String)value$; break; + case 2: weight = (java.lang.String)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + + /** + * Gets the value of the 'type' field. + */ + public java.lang.String getType() { + return type; + } + + /** + * Sets the value of the 'type' field. + * @param value the value to set. + */ + public void setType(java.lang.String value) { + this.type = value; + } + + /** + * Gets the value of the 'site' field. + */ + public java.lang.String getSite() { + return site; + } + + /** + * Sets the value of the 'site' field. + * @param value the value to set. + */ + public void setSite(java.lang.String value) { + this.site = value; + } + + /** + * Gets the value of the 'weight' field. + */ + public java.lang.String getWeight() { + return weight; + } + + /** + * Sets the value of the 'weight' field. + * @param value the value to set. + */ + public void setWeight(java.lang.String value) { + this.weight = value; + } + + /** Creates a new Weight RecordBuilder */ + public static argo.avro.Weight.Builder newBuilder() { + return new argo.avro.Weight.Builder(); + } + + /** Creates a new Weight RecordBuilder by copying an existing Builder */ + public static argo.avro.Weight.Builder newBuilder(argo.avro.Weight.Builder other) { + return new argo.avro.Weight.Builder(other); + } + + /** Creates a new Weight RecordBuilder by copying an existing Weight instance */ + public static argo.avro.Weight.Builder newBuilder(argo.avro.Weight other) { + return new argo.avro.Weight.Builder(other); + } + + /** + * RecordBuilder for Weight instances. + */ + public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase + implements org.apache.avro.data.RecordBuilder { + + private java.lang.String type; + private java.lang.String site; + private java.lang.String weight; + + /** Creates a new Builder */ + private Builder() { + super(argo.avro.Weight.SCHEMA$); + } + + /** Creates a Builder by copying an existing Builder */ + private Builder(argo.avro.Weight.Builder other) { + super(other); + } + + /** Creates a Builder by copying an existing Weight instance */ + private Builder(argo.avro.Weight other) { + super(argo.avro.Weight.SCHEMA$); + if (isValidValue(fields()[0], other.type)) { + this.type = data().deepCopy(fields()[0].schema(), other.type); + fieldSetFlags()[0] = true; + } + if (isValidValue(fields()[1], other.site)) { + this.site = data().deepCopy(fields()[1].schema(), other.site); + fieldSetFlags()[1] = true; + } + if (isValidValue(fields()[2], other.weight)) { + this.weight = data().deepCopy(fields()[2].schema(), other.weight); + fieldSetFlags()[2] = true; + } + } + + /** Gets the value of the 'type' field */ + public java.lang.String getType() { + return type; + } + + /** Sets the value of the 'type' field */ + public argo.avro.Weight.Builder setType(java.lang.String value) { + validate(fields()[0], value); + this.type = value; + fieldSetFlags()[0] = true; + return this; + } + + /** Checks whether the 'type' field has been set */ + public boolean hasType() { + return fieldSetFlags()[0]; + } + + /** Clears the value of the 'type' field */ + public argo.avro.Weight.Builder clearType() { + type = null; + fieldSetFlags()[0] = false; + return this; + } + + /** Gets the value of the 'site' field */ + public java.lang.String getSite() { + return site; + } + + /** Sets the value of the 'site' field */ + public argo.avro.Weight.Builder setSite(java.lang.String value) { + validate(fields()[1], value); + this.site = value; + fieldSetFlags()[1] = true; + return this; + } + + /** Checks whether the 'site' field has been set */ + public boolean hasSite() { + return fieldSetFlags()[1]; + } + + /** Clears the value of the 'site' field */ + public argo.avro.Weight.Builder clearSite() { + site = null; + fieldSetFlags()[1] = false; + return this; + } + + /** Gets the value of the 'weight' field */ + public java.lang.String getWeight() { + return weight; + } + + /** Sets the value of the 'weight' field */ + public argo.avro.Weight.Builder setWeight(java.lang.String value) { + validate(fields()[2], value); + this.weight = value; + fieldSetFlags()[2] = true; + return this; + } + + /** Checks whether the 'weight' field has been set */ + public boolean hasWeight() { + return fieldSetFlags()[2]; + } + + /** Clears the value of the 'weight' field */ + public argo.avro.Weight.Builder clearWeight() { + weight = null; + fieldSetFlags()[2] = false; + return this; + } + + @Override + public Weight build() { + try { + Weight record = new Weight(); + record.type = fieldSetFlags()[0] ? this.type : (java.lang.String) defaultValue(fields()[0]); + record.site = fieldSetFlags()[1] ? this.site : (java.lang.String) defaultValue(fields()[1]); + record.weight = fieldSetFlags()[2] ? this.weight : (java.lang.String) defaultValue(fields()[2]); + return record; + } catch (Exception e) { + throw new org.apache.avro.AvroRuntimeException(e); + } + } + } +} diff --git a/flink_jobs/batch_status/src/main/java/argo/batch/ArgoStatusBatch.java b/flink_jobs/batch_status/src/main/java/argo/batch/ArgoStatusBatch.java index 1e7b94b1..f96123a1 100644 --- a/flink_jobs/batch_status/src/main/java/argo/batch/ArgoStatusBatch.java +++ b/flink_jobs/batch_status/src/main/java/argo/batch/ArgoStatusBatch.java @@ -2,6 +2,8 @@ import org.slf4j.LoggerFactory; +import argo.amr.ApiResource; +import argo.amr.ApiResourceManager; import argo.avro.GroupEndpoint; import argo.avro.GroupGroup; import argo.avro.MetricData; @@ -11,11 +13,8 @@ import org.slf4j.Logger; import java.util.List; -import java.util.concurrent.TimeUnit; import org.apache.flink.api.common.operators.Order; -import org.apache.flink.api.common.restartstrategy.RestartStrategies; -import org.apache.flink.api.common.time.Time; import org.apache.flink.api.java.DataSet; import org.apache.flink.api.java.ExecutionEnvironment; import org.apache.flink.api.java.io.AvroInputFormat; @@ -58,16 +57,27 @@ public static void main(String[] args) throws Exception { env.getConfig().setGlobalJobParameters(params); env.setParallelism(1); - // sync data for input - Path mps = new Path(params.getRequired("mps")); - Path egp = new Path(params.getRequired("egp")); - Path ggp = new Path(params.getRequired("ggp")); + String apiEndpoint = params.getRequired("api.endpoint"); + String apiToken = params.getRequired("api.token"); + String reportID = params.getRequired("report.id"); + + ApiResourceManager amr = new ApiResourceManager(apiEndpoint,apiToken); + + // fetch + + // set params + if (params.has("api.proxy")) { + amr.setProxy(params.get("api.proxy")); + } + + amr.setReportID(reportID); + amr.getRemoteAll(); - DataSource cfgDS = env.readTextFile(params.getRequired("conf")); - DataSource opsDS = env.readTextFile(params.getRequired("ops")); - DataSource apsDS = env.readTextFile(params.getRequired("apr")); - DataSource recDS = env.readTextFile(params.getRequired("rec")); + DataSourcecfgDS = env.fromElements(amr.getResourceJSON(ApiResource.CONFIG)); + DataSourceopsDS = env.fromElements(amr.getResourceJSON(ApiResource.OPS)); + DataSourceapsDS = env.fromElements(amr.getResourceJSON(ApiResource.AGGREGATION)); + DataSourcerecDS = env.fromElements(amr.getResourceJSON(ApiResource.RECOMPUTATIONS)); // begin with empty threshold datasource DataSource thrDS = env.fromElements(""); @@ -84,18 +94,15 @@ public static void main(String[] args) throws Exception { List confData = cfgDS.collect(); ConfigManager cfgMgr = new ConfigManager(); cfgMgr.loadJsonString(confData); - // sync data input: metric profile in avro format - AvroInputFormat mpsAvro = new AvroInputFormat(mps, MetricProfile.class); - DataSet mpsDS = env.createInput(mpsAvro); - - // sync data input: endpoint group topology data in avro format - AvroInputFormat egpAvro = new AvroInputFormat(egp, GroupEndpoint.class); - DataSet egpDS = env.createInput(egpAvro); - - // sync data input: group of group topology data in avro format - AvroInputFormat ggpAvro = new AvroInputFormat(ggp, GroupGroup.class); - DataSet ggpDS = env.createInput(ggpAvro); - + + DataSet mpsDS = env.fromElements(amr.getListMetrics()); + DataSet egpDS = env.fromElements(amr.getListGroupEndpoints()); + DataSet ggpDS = env.fromElements(new GroupGroup()); + GroupGroup[] listGroups = amr.getListGroupGroups(); + if (listGroups.length > 0) ggpDS = env.fromElements(amr.getListGroupGroups()); + + + // todays metric data Path in = new Path(params.getRequired("mdata")); AvroInputFormat mdataAvro = new AvroInputFormat(in, MetricData.class); @@ -105,9 +112,11 @@ public static void main(String[] args) throws Exception { Path pin = new Path(params.getRequired("pdata")); AvroInputFormat pdataAvro = new AvroInputFormat(pin, MetricData.class); DataSet pdataDS = env.createInput(pdataAvro); + + DataSet pdataCleanDS = pdataDS.flatMap(new ExcludeMetricData(params)).withBroadcastSet(recDS, "rec"); // Find the latest day - DataSet pdataMin = pdataDS.groupBy("service", "hostname", "metric") + DataSet pdataMin = pdataCleanDS.groupBy("service", "hostname", "metric") .sortGroup("timestamp", Order.DESCENDING).first(1); // Union todays data with the latest statuses from previous day @@ -161,7 +170,7 @@ public static void main(String[] args) throws Exception { String dbURI = params.getRequired("mongo.uri"); String dbMethod = params.getRequired("mongo.method"); - String reportID = cfgMgr.getReportID(); + // Initialize four mongo outputs (metric,endpoint,service,endpoint_group) MongoStatusOutput metricMongoOut = new MongoStatusOutput(dbURI,"status_metrics",dbMethod, MongoStatusOutput.StatusType.STATUS_METRIC, reportID); MongoStatusOutput endpointMongoOut = new MongoStatusOutput(dbURI,"status_endpoints",dbMethod, MongoStatusOutput.StatusType.STATUS_ENDPOINT, reportID); diff --git a/flink_jobs/batch_status/src/main/java/argo/batch/CalcStatusEndGroup.java b/flink_jobs/batch_status/src/main/java/argo/batch/CalcStatusEndGroup.java index b36d3ec2..d77b760d 100644 --- a/flink_jobs/batch_status/src/main/java/argo/batch/CalcStatusEndGroup.java +++ b/flink_jobs/batch_status/src/main/java/argo/batch/CalcStatusEndGroup.java @@ -14,9 +14,12 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import ops.CAggregator; import ops.OpsManager; + +//import ops.OpsManager; import sync.AggregationProfileManager; +import timelines.TimelineAggregator; + /** * Accepts a list o status metrics grouped by the fields: endpoint group @@ -45,7 +48,7 @@ public CalcStatusEndGroup(ParameterTool params) { private String runDate; - public HashMap groupEndpointAggr; + public HashMap groupEndpointAggr; private boolean getGroup; @@ -67,7 +70,7 @@ public void open(Configuration parameters) throws IOException { // Initialize endpoint group type this.runDate = params.getRequired("run.date"); // set the Structures - this.groupEndpointAggr = new HashMap(); + this.groupEndpointAggr = new HashMap(); this.getGroup = true; } @@ -105,7 +108,7 @@ public void reduce(Iterable in, Collector out) throw // if group doesn't exist yet create it if (this.groupEndpointAggr.containsKey(group) == false) { - this.groupEndpointAggr.put(group, new CAggregator()); + this.groupEndpointAggr.put(group, new TimelineAggregator()); } this.groupEndpointAggr.get(group).insert(service, ts, this.opsMgr.getIntStatus(status)); @@ -119,14 +122,26 @@ public void reduce(Iterable in, Collector out) throw // Get group Operation String gop = this.apsMgr.getProfileGroupOp(aProfile, group); - - this.groupEndpointAggr.get(group).aggregate(this.opsMgr, gop); + + this.groupEndpointAggr.get(group).aggregate(this.opsMgr.getTruthTable(), this.opsMgr.getIntOperation(gop)); } // Aggregate all sites - CAggregator totalSite = new CAggregator(); + TimelineAggregator totalSite = new TimelineAggregator(); + + // Aggregate each group + for (String group : this.groupEndpointAggr.keySet()) { + for (Entry item : this.groupEndpointAggr.get(group).getSamples()) { + String ts = item.getKey().toString(DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'")); + totalSite.insert(group,ts, item.getValue()); + } + + } + + totalSite.aggregate( this.opsMgr.getTruthTable(),this.opsMgr.getIntOperation(apsMgr.getTotalOp(aProfile))); + // Aggregate each group for (String group : this.groupEndpointAggr.keySet()) { for (Entry item : this.groupEndpointAggr.get(group).getSamples()) { @@ -136,7 +151,7 @@ public void reduce(Iterable in, Collector out) throw } - totalSite.aggregate( this.opsMgr,apsMgr.getTotalOp(aProfile)); + totalSite.aggregate( this.opsMgr.getTruthTable(),this.opsMgr.getIntOperation(apsMgr.getTotalOp(aProfile))); // Append the timeline for (Entry item : totalSite.getSamples()) { diff --git a/flink_jobs/batch_status/src/main/java/argo/batch/CalcStatusEndpoint.java b/flink_jobs/batch_status/src/main/java/argo/batch/CalcStatusEndpoint.java index c65f9e57..9d4dc938 100644 --- a/flink_jobs/batch_status/src/main/java/argo/batch/CalcStatusEndpoint.java +++ b/flink_jobs/batch_status/src/main/java/argo/batch/CalcStatusEndpoint.java @@ -14,16 +14,13 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.esotericsoftware.minlog.Log; - -import argo.avro.GroupGroup; import argo.avro.MetricProfile; import ops.CAggregator; import ops.OpsManager; import sync.AggregationProfileManager; -import sync.GroupGroupManager; import sync.MetricProfileManager; +import timelines.TimelineAggregator; /** @@ -50,7 +47,7 @@ public CalcStatusEndpoint(ParameterTool params) { private AggregationProfileManager apsMgr; private OpsManager opsMgr; private String runDate; - private CAggregator endpointAggr; + private TimelineAggregator endpointAggr; private boolean fillMissing; @@ -74,7 +71,7 @@ public void open(Configuration parameters) throws IOException { this.opsMgr.loadJsonString(ops); this.runDate = params.getRequired("run.date"); - this.endpointAggr = new CAggregator(); // Create aggregator + this.endpointAggr = new TimelineAggregator(); // Create aggregator this.fillMissing = true; } @@ -98,6 +95,7 @@ public void reduce(Iterable in, Collector out) throw String service =""; String endpointGroup =""; String hostname =""; + String info = ""; int dateInt = Integer.parseInt(this.runDate.replace("-", "")); @@ -129,6 +127,7 @@ public void reduce(Iterable in, Collector out) throw String ts = item.getTimestamp(); String status = item.getStatus(); String prevStatus = item.getPrevState(); + info = item.getInfo(); // Check if we are in the switch of a new metric name @@ -143,7 +142,7 @@ public void reduce(Iterable in, Collector out) throw } - this.endpointAggr.aggregate(this.opsMgr, this.apsMgr.getMetricOp(aprofile)); + this.endpointAggr.aggregate(this.opsMgr.getTruthTable(), this.opsMgr.getIntOperation(this.apsMgr.getMetricOp(aprofile))); // Append the timeline @@ -156,6 +155,8 @@ public void reduce(Iterable in, Collector out) throw cur.setGroup(endpointGroup); cur.setHostname(hostname); cur.setService(service); + cur.setInfo(info); + cur.setTimestamp(item.getKey().toString(DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"))); diff --git a/flink_jobs/batch_status/src/main/java/argo/batch/CalcStatusService.java b/flink_jobs/batch_status/src/main/java/argo/batch/CalcStatusService.java index 8105888a..ec9e9b92 100644 --- a/flink_jobs/batch_status/src/main/java/argo/batch/CalcStatusService.java +++ b/flink_jobs/batch_status/src/main/java/argo/batch/CalcStatusService.java @@ -1,7 +1,6 @@ package argo.batch; import java.io.IOException; -import java.util.ArrayList; import java.util.List; import java.util.Map.Entry; @@ -12,19 +11,13 @@ import org.apache.flink.util.Collector; import org.joda.time.DateTime; import org.joda.time.format.DateTimeFormat; -import org.mortbay.log.Log; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import argo.avro.GroupEndpoint; -import argo.avro.GroupGroup; - -import argo.avro.MetricProfile; import ops.CAggregator; import ops.OpsManager; import sync.AggregationProfileManager; -import sync.GroupGroupManager; -import sync.MetricProfileManager; +import timelines.TimelineAggregator; /** @@ -54,7 +47,7 @@ public CalcStatusService(ParameterTool params) { private String runDate; - private CAggregator serviceAggr; + private TimelineAggregator serviceAggr; private boolean getService; @@ -75,7 +68,7 @@ public void open(Configuration parameters) throws IOException { // Initialize endpoint group type this.runDate = params.getRequired("run.date"); - this.serviceAggr = new CAggregator(); // Create aggregator + this.serviceAggr = new TimelineAggregator(); // Create aggregator this.getService = true; } @@ -121,7 +114,7 @@ public void reduce(Iterable in, Collector out) throw avGroup = this.apsMgr.getGroupByService(aProfile, service); String avOp = this.apsMgr.getProfileGroupServiceOp(aProfile, avGroup, service); - this.serviceAggr.aggregate(this.opsMgr, avOp); + this.serviceAggr.aggregate(this.opsMgr.getTruthTable(), this.opsMgr.getIntOperation(avOp)); // Append the timeline for (Entry item : this.serviceAggr.getSamples()) { diff --git a/flink_jobs/batch_status/src/main/java/argo/batch/ExcludeMetricData.java b/flink_jobs/batch_status/src/main/java/argo/batch/ExcludeMetricData.java new file mode 100644 index 00000000..f18a4743 --- /dev/null +++ b/flink_jobs/batch_status/src/main/java/argo/batch/ExcludeMetricData.java @@ -0,0 +1,70 @@ +package argo.batch; + +import java.io.IOException; +import java.text.ParseException; + +import java.util.List; + + + +import org.apache.flink.api.common.functions.RichFlatMapFunction; + +import org.apache.flink.api.java.utils.ParameterTool; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.util.Collector; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +import argo.avro.MetricData; + +import sync.RecomputationsManager; + +/** + * Receives MetricData and filters them by excluding monitoring engine based on recomputation information + * retrieved by broadcast variable "rec" and handled by an internal recomputation manager + */ +public class ExcludeMetricData extends RichFlatMapFunction { + + private static final long serialVersionUID = 1L; + + final ParameterTool params; + + public ExcludeMetricData(ParameterTool params){ + this.params = params; + } + + static Logger LOG = LoggerFactory.getLogger(ArgoStatusBatch.class); + + private List rec; + private RecomputationsManager recMgr; + + @Override + public void open(Configuration parameters) throws IOException, ParseException { + // Get recomputation data from broadcast variable + this.rec = getRuntimeContext().getBroadcastVariable("rec"); + + // Initialize Recomputation manager + this.recMgr = new RecomputationsManager(); + this.recMgr.loadJsonString(rec); + + } + + @Override + public void flatMap(MetricData md, Collector out) throws Exception { + + // Get monitoring host from input metric data + String monHost = md.getMonitoringHost(); + // Get timestamp from input metric data + String ts = md.getTimestamp(); + + // Check if monitoring host and metric data coincide with exclusions by monitoring + // engine in the current available recomputations + if (recMgr.isMonExcluded(monHost, ts) == true) return; + + // if not excluded collect the result in the output + out.collect(md); + + + } +} diff --git a/flink_jobs/batch_status/src/main/java/argo/batch/MongoStatusOutput.java b/flink_jobs/batch_status/src/main/java/argo/batch/MongoStatusOutput.java index 65b52e98..aa13e71d 100644 --- a/flink_jobs/batch_status/src/main/java/argo/batch/MongoStatusOutput.java +++ b/flink_jobs/batch_status/src/main/java/argo/batch/MongoStatusOutput.java @@ -109,8 +109,23 @@ private Document prepDoc(StatusMetric record) { } else if (this.sType == StatusType.STATUS_ENDPOINT) { + doc.append("service", record.getService()) .append("host", record.getHostname()); + + String info = record.getInfo(); + if (!info.equalsIgnoreCase("")) { + Document infoDoc = new Document(); + String[] kvs = info.split(","); + for (String kv : kvs) { + String[] kvtok = kv.split(":",2); + if (kvtok.length == 2){ + infoDoc.append(kvtok[0], kvtok[1]); + } + } + + doc.append("info", infoDoc); + } } else if (this.sType == StatusType.STATUS_METRIC) { @@ -153,6 +168,7 @@ private Bson prepFilter(StatusMetric record) { } else if (this.sType == StatusType.STATUS_ENDPOINT) { + return Filters.and(Filters.eq("report", this.report), Filters.eq("date_integer", record.getDateInt()), Filters.eq("endpoint_group", record.getGroup()), Filters.eq("service", record.getService()), Filters.eq("host", record.getHostname()), Filters.eq("timestamp", record.getTimestamp())); @@ -181,7 +197,8 @@ public void writeRecord(StatusMetric record) throws IOException { // Mongo Document to be prepared according to StatusType of input Document doc = prepDoc(record); - + + if (this.method == MongoMethod.UPSERT) { // Filter for upsert to be prepared according to StatusType of input diff --git a/flink_jobs/batch_status/src/main/java/argo/batch/PickEndpoints.java b/flink_jobs/batch_status/src/main/java/argo/batch/PickEndpoints.java index 717af10a..be2803f8 100644 --- a/flink_jobs/batch_status/src/main/java/argo/batch/PickEndpoints.java +++ b/flink_jobs/batch_status/src/main/java/argo/batch/PickEndpoints.java @@ -129,22 +129,20 @@ public void flatMap(MetricData md, Collector out) throws Exception String metric = md.getMetric(); String monHost = md.getMonitoringHost(); String ts = md.getTimestamp(); - + // Filter By monitoring engine if (recMgr.isMonExcluded(monHost, ts) == true) return; - + // Filter By aggregation profile if (apsMgr.checkService(aprof, service) == false) return; - + // Filter By metric profile if (mpsMgr.checkProfileServiceMetric(prof, service, metric) == false) return; - - - // Filter By endpoint group if belongs to supergroup ArrayList groupnames = egpMgr.getGroup(egroupType, hostname, service); + for (String groupname : groupnames) { if (ggpMgr.checkSubGroup(groupname) == true){ // Create a StatusMetric output @@ -178,7 +176,12 @@ public void flatMap(MetricData md, Collector out) throws Exception } - StatusMetric sm = new StatusMetric(groupname,md.getService(),md.getHostname(),md.getMetric(), status,md.getTimestamp(),dateInt,timeInt,md.getSummary(),md.getMessage(),"","",actualData, ogStatus, ruleApplied); + + + + String info = this.egpMgr.getInfo(groupname, egroupType, md.getHostname(), md.getService()); + + StatusMetric sm = new StatusMetric(groupname,md.getService(),md.getHostname(),md.getMetric(), status,md.getTimestamp(),dateInt,timeInt,md.getSummary(),md.getMessage(),"","",actualData, ogStatus, ruleApplied,info); out.collect(sm); } diff --git a/flink_jobs/batch_status/src/main/java/argo/batch/StatusMetric.java b/flink_jobs/batch_status/src/main/java/argo/batch/StatusMetric.java index 9bf147c1..df5bb583 100644 --- a/flink_jobs/batch_status/src/main/java/argo/batch/StatusMetric.java +++ b/flink_jobs/batch_status/src/main/java/argo/batch/StatusMetric.java @@ -19,6 +19,7 @@ public class StatusMetric { private String actualData; private String ogStatus; // original status from moniting host private String ruleApplied; // threshold rule applied - empty if not + private String info; // extra endpoint information provided by the topology public StatusMetric(){ this.group = ""; @@ -36,10 +37,11 @@ public StatusMetric(){ this.actualData = ""; this.ogStatus = ""; this.ruleApplied = ""; + this.info = ""; } public StatusMetric(String group, String service, String hostname, String metric, String status, String timestamp, - int dateInt, int timeInt, String summary, String message, String prevState, String prevTs, String actualData, String ogStatus, String ruleApplied) { + int dateInt, int timeInt, String summary, String message, String prevState, String prevTs, String actualData, String ogStatus, String ruleApplied, String info) { this.group = group; this.service = service; @@ -56,6 +58,7 @@ public StatusMetric(String group, String service, String hostname, String metric this.actualData = actualData; this.ogStatus = ogStatus; this.ruleApplied = ruleApplied; + this.info = info; } @@ -158,10 +161,18 @@ public void setRuleApplied(String ruleApplied) { this.ruleApplied = ruleApplied; } + public String getInfo() { + return this.info; + } + + public void setInfo(String info) { + this.info = info; + } + @Override public String toString() { return "(" + this.group + "," + this.service + "," + this.hostname + "," + this.metric + "," + this.status + "," + this.timestamp + "," + - this.dateInt + "," + this.timeInt + "," + this.prevState + "," + this.prevTs + "," + this.actualData + "," + this.ogStatus + "," + this.ruleApplied + ")"; + this.dateInt + "," + this.timeInt + "," + this.prevState + "," + this.prevTs + "," + this.actualData + "," + this.ogStatus + "," + this.ruleApplied + "," + this.info + ")"; } } diff --git a/flink_jobs/batch_status/src/main/java/ops/OpsManager.java b/flink_jobs/batch_status/src/main/java/ops/OpsManager.java index 341c9260..f0a262bc 100644 --- a/flink_jobs/batch_status/src/main/java/ops/OpsManager.java +++ b/flink_jobs/batch_status/src/main/java/ops/OpsManager.java @@ -22,290 +22,236 @@ public class OpsManager { - private static final Logger LOG = Logger.getLogger(OpsManager.class.getName()); + private static final Logger LOG = Logger.getLogger(OpsManager.class.getName()); - private HashMap states; - private HashMap ops; - private ArrayList revStates; - private ArrayList revOps; + private HashMap states; + private HashMap ops; + private ArrayList revStates; + private ArrayList revOps; - private int[][][] truthTable; + private int[][][] truthTable; - private String defaultDownState; - private String defaultMissingState; - private String defaultUnknownState; + private String defaultDownState; + private String defaultMissingState; + private String defaultUnknownState; - private boolean order; + private boolean order; - public OpsManager() { - this.states = new HashMap(); - this.ops = new HashMap(); - this.revStates = new ArrayList(); - this.revOps = new ArrayList(); + public OpsManager() { + this.states = new HashMap(); + this.ops = new HashMap(); + this.revStates = new ArrayList(); + this.revOps = new ArrayList(); - this.truthTable = null; + this.truthTable = null; - this.order = false; + this.order = false; - } + } - public OpsManager(boolean _order) { - this.states = new HashMap(); - this.ops = new HashMap(); - this.revStates = new ArrayList(); - this.revOps = new ArrayList(); - this.order = _order; + public OpsManager(boolean _order) { + this.states = new HashMap(); + this.ops = new HashMap(); + this.revStates = new ArrayList(); + this.revOps = new ArrayList(); + this.order = _order; - this.truthTable = null; - } + this.truthTable = null; + } - public String getDefaultDown() { - return this.defaultDownState; - } + public String getDefaultDown() { + return this.defaultDownState; + } - public String getDefaultUnknown() { - return this.defaultUnknownState; - } + public String getDefaultUnknown() { + return this.defaultUnknownState; + } - public int getDefaultUnknownInt() { - return this.getIntStatus(this.defaultUnknownState); - } + public int getDefaultUnknownInt() { + return this.getIntStatus(this.defaultUnknownState); + } - public int getDefaultDownInt() { - return this.getIntStatus(this.defaultDownState); - } + public int getDefaultDownInt() { + return this.getIntStatus(this.defaultDownState); + } - public String getDefaultMissing() { - return this.defaultMissingState; - } - - public int getDefaultMissingInt() { - return this.getIntStatus(this.defaultMissingState); - } - - public void clear() { - this.states = new HashMap(); - this.ops = new HashMap(); - this.revStates = new ArrayList(); - this.revOps = new ArrayList(); - - this.truthTable = null; - } - - public int opInt(int op, int a, int b) { - int result = -1; - try { - result = this.truthTable[op][a][b]; - } catch (IndexOutOfBoundsException ex) { - LOG.info(ex); - result = -1; - } - - return result; - } - - public int opInt(String op, String a, String b) { - - int opInt = this.ops.get(op); - int aInt = this.states.get(a); - int bInt = this.states.get(b); - - return this.truthTable[opInt][aInt][bInt]; - } - - public String op(int op, int a, int b) { - return this.revStates.get(this.truthTable[op][a][b]); - } - - public String op(String op, String a, String b) { - int opInt = this.ops.get(op); - int aInt = this.states.get(a); - int bInt = this.states.get(b); - - return this.revStates.get(this.truthTable[opInt][aInt][bInt]); - } - - public String getStrStatus(int status) { - return this.revStates.get(status); - } - - public int getIntStatus(String status) { - return this.states.get(status); - } - - public String getStrOperation(int op) { - return this.revOps.get(op); - } - - public int getIntOperation(String op) { - return this.ops.get(op); - } - - public ArrayList availableStates() { - - return this.revStates; - } - - public ArrayList availableOps() { - return this.revOps; - } - - public void loadJson(File jsonFile) throws IOException { - // Clear data - this.clear(); - - BufferedReader br = null; - try { - br = new BufferedReader(new FileReader(jsonFile)); - - JsonParser json_parser = new JsonParser(); - JsonElement j_element = json_parser.parse(br); - JsonObject j_obj = j_element.getAsJsonObject(); - JsonArray j_states = j_obj.getAsJsonArray("available_states"); - JsonArray j_ops = j_obj.getAsJsonArray("operations"); - this.defaultMissingState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("missing").getAsString(); - this.defaultDownState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("down").getAsString(); - this.defaultUnknownState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("unknown").getAsString(); - // Collect the available states - for (int i = 0; i < j_states.size(); i++) { - this.states.put(j_states.get(i).getAsString(), i); - this.revStates.add(j_states.get(i).getAsString()); - - } - - // Collect the available operations - int i = 0; - for (JsonElement item : j_ops) { - JsonObject jObjItem = item.getAsJsonObject(); - this.ops.put(jObjItem.getAsJsonPrimitive("name").getAsString(), i); - this.revOps.add(jObjItem.getAsJsonPrimitive("name").getAsString()); - i++; - } - // Initialize the truthtable - int num_ops = this.revOps.size(); - int num_states = this.revStates.size(); - this.truthTable = new int[num_ops][num_states][num_states]; - - for (int[][] surface : this.truthTable) { - for (int[] line : surface) { - Arrays.fill(line, -1); - } - } - - // Fill the truth table - for (JsonElement item : j_ops) { - JsonObject jObjItem = item.getAsJsonObject(); - String opname = jObjItem.getAsJsonPrimitive("name").getAsString(); - JsonArray tops = jObjItem.getAsJsonArray("truth_table"); - // System.out.println(tops); - - for (int j = 0; j < tops.size(); j++) { - // System.out.println(opname); - JsonObject row = tops.get(j).getAsJsonObject(); - - int a_val = this.states.get(row.getAsJsonPrimitive("a").getAsString()); - int b_val = this.states.get(row.getAsJsonPrimitive("b").getAsString()); - int x_val = this.states.get(row.getAsJsonPrimitive("x").getAsString()); - int op_val = this.ops.get(opname); - - // Fill in truth table - // Check if order sensitivity is off so to insert two truth - // values - // ...[a][b] and [b][a] - this.truthTable[op_val][a_val][b_val] = x_val; - if (!this.order) { - this.truthTable[op_val][b_val][a_val] = x_val; - } - } - } - } catch (FileNotFoundException ex) { - LOG.error("Could not open file:" + jsonFile.getName()); - throw ex; - - } catch (JsonParseException ex) { - LOG.error("File is not valid json:" + jsonFile.getName()); - throw ex; - } finally { - // Close quietly without exceptions the buffered reader - IOUtils.closeQuietly(br); - } - - } - - public void loadJsonString(List opsJson) throws JsonParseException { - // Clear data - this.clear(); - - try { - - - JsonParser json_parser = new JsonParser(); - // Grab the first - and only line of json from ops data - JsonElement j_element = json_parser.parse(opsJson.get(0)); - JsonObject j_obj = j_element.getAsJsonObject(); - JsonArray j_states = j_obj.getAsJsonArray("available_states"); - JsonArray j_ops = j_obj.getAsJsonArray("operations"); - this.defaultMissingState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("missing").getAsString(); - this.defaultDownState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("down").getAsString(); - this.defaultUnknownState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("unknown").getAsString(); - // Collect the available states - for (int i = 0; i < j_states.size(); i++) { - this.states.put(j_states.get(i).getAsString(), i); - this.revStates.add(j_states.get(i).getAsString()); - - } - - // Collect the available operations - int i = 0; - for (JsonElement item : j_ops) { - JsonObject jObjItem = item.getAsJsonObject(); - this.ops.put(jObjItem.getAsJsonPrimitive("name").getAsString(), i); - this.revOps.add(jObjItem.getAsJsonPrimitive("name").getAsString()); - i++; - } - // Initialize the truthtable - int num_ops = this.revOps.size(); - int num_states = this.revStates.size(); - this.truthTable = new int[num_ops][num_states][num_states]; - - for (int[][] surface : this.truthTable) { - for (int[] line : surface) { - Arrays.fill(line, -1); - } - } - - // Fill the truth table - for (JsonElement item : j_ops) { - JsonObject jObjItem = item.getAsJsonObject(); - String opname = jObjItem.getAsJsonPrimitive("name").getAsString(); - JsonArray tops = jObjItem.getAsJsonArray("truth_table"); - // System.out.println(tops); - - for (int j = 0; j < tops.size(); j++) { - // System.out.println(opname); - JsonObject row = tops.get(j).getAsJsonObject(); - - int a_val = this.states.get(row.getAsJsonPrimitive("a").getAsString()); - int b_val = this.states.get(row.getAsJsonPrimitive("b").getAsString()); - int x_val = this.states.get(row.getAsJsonPrimitive("x").getAsString()); - int op_val = this.ops.get(opname); - - // Fill in truth table - // Check if order sensitivity is off so to insert two truth - // values - // ...[a][b] and [b][a] - this.truthTable[op_val][a_val][b_val] = x_val; - if (!this.order) { - this.truthTable[op_val][b_val][a_val] = x_val; - } - } - } - - } catch (JsonParseException ex) { - LOG.error("Not valid json contents"); - throw ex; - } - - } + public String getDefaultMissing() { + return this.defaultMissingState; + } + + public int getDefaultMissingInt() { + return this.getIntStatus(this.defaultMissingState); + } + + public void clear() { + this.states = new HashMap(); + this.ops = new HashMap(); + this.revStates = new ArrayList(); + this.revOps = new ArrayList(); + + this.truthTable = null; + } + + public int opInt(int op, int a, int b) { + int result = -1; + try { + result = this.truthTable[op][a][b]; + } catch (IndexOutOfBoundsException ex) { + LOG.info(ex); + result = -1; + } + + return result; + } + + public int opInt(String op, String a, String b) { + + int opInt = this.ops.get(op); + int aInt = this.states.get(a); + int bInt = this.states.get(b); + + return this.truthTable[opInt][aInt][bInt]; + } + + public String op(int op, int a, int b) { + return this.revStates.get(this.truthTable[op][a][b]); + } + + public String op(String op, String a, String b) { + int opInt = this.ops.get(op); + int aInt = this.states.get(a); + int bInt = this.states.get(b); + + return this.revStates.get(this.truthTable[opInt][aInt][bInt]); + } + + public String getStrStatus(int status) { + return this.revStates.get(status); + } + + public int getIntStatus(String status) { + return this.states.get(status); + } + + public String getStrOperation(int op) { + return this.revOps.get(op); + } + + public int getIntOperation(String op) { + return this.ops.get(op); + } + + public ArrayList availableStates() { + + return this.revStates; + } + + public ArrayList availableOps() { + return this.revOps; + } + + public void loadJson(File jsonFile) throws IOException { + // Clear data + this.clear(); + + BufferedReader br = null; + try { + br = new BufferedReader(new FileReader(jsonFile)); + + JsonParser json_parser = new JsonParser(); + JsonElement j_element = json_parser.parse(br); + readJson(j_element); + } catch (FileNotFoundException ex) { + LOG.error("Could not open file:" + jsonFile.getName()); + throw ex; + + } catch (JsonParseException ex) { + LOG.error("File is not valid json:" + jsonFile.getName()); + throw ex; + } finally { + // Close quietly without exceptions the buffered reader + IOUtils.closeQuietly(br); + } + + } + + private void readJson(JsonElement j_element) { + JsonObject j_obj = j_element.getAsJsonObject(); + JsonArray j_states = j_obj.getAsJsonArray("available_states"); + JsonArray j_ops = j_obj.getAsJsonArray("operations"); + this.defaultMissingState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("missing").getAsString(); + this.defaultDownState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("down").getAsString(); + this.defaultUnknownState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("unknown").getAsString(); + // Collect the available states + for (int i = 0; i < j_states.size(); i++) { + this.states.put(j_states.get(i).getAsString(), i); + this.revStates.add(j_states.get(i).getAsString()); + + } + + // Collect the available operations + int i = 0; + for (JsonElement item : j_ops) { + JsonObject jObjItem = item.getAsJsonObject(); + this.ops.put(jObjItem.getAsJsonPrimitive("name").getAsString(), i); + this.revOps.add(jObjItem.getAsJsonPrimitive("name").getAsString()); + i++; + } + // Initialize the truthtable + int num_ops = this.revOps.size(); + int num_states = this.revStates.size(); + this.truthTable = new int[num_ops][num_states][num_states]; + + for (int[][] surface : this.truthTable) { + for (int[] line : surface) { + Arrays.fill(line, -1); + } + } + + // Fill the truth table + for (JsonElement item : j_ops) { + JsonObject jObjItem = item.getAsJsonObject(); + String opname = jObjItem.getAsJsonPrimitive("name").getAsString(); + JsonArray tops = jObjItem.getAsJsonArray("truth_table"); + // System.out.println(tops); + + for (int j = 0; j < tops.size(); j++) { + // System.out.println(opname); + JsonObject row = tops.get(j).getAsJsonObject(); + + int a_val = this.states.get(row.getAsJsonPrimitive("a").getAsString()); + int b_val = this.states.get(row.getAsJsonPrimitive("b").getAsString()); + int x_val = this.states.get(row.getAsJsonPrimitive("x").getAsString()); + int op_val = this.ops.get(opname); + + // Fill in truth table + // Check if order sensitivity is off so to insert two truth + // values + // ...[a][b] and [b][a] + this.truthTable[op_val][a_val][b_val] = x_val; + if (!this.order) { + this.truthTable[op_val][b_val][a_val] = x_val; + } + } + } + + } + + public void loadJsonString(List opsJson) throws JsonParseException { + // Clear data + this.clear(); + + JsonParser json_parser = new JsonParser(); + // Grab the first - and only line of json from ops data + JsonElement j_element = json_parser.parse(opsJson.get(0)); + readJson(j_element); + } + public int[][][] getTruthTable() { + return truthTable; + } + + public void setTruthTable(int[][][] truthTable) { + this.truthTable = truthTable; + } } diff --git a/flink_jobs/batch_status/src/main/java/sync/EndpointGroupManager.java b/flink_jobs/batch_status/src/main/java/sync/EndpointGroupManager.java index ab56a7dd..cc5dbbdd 100644 --- a/flink_jobs/batch_status/src/main/java/sync/EndpointGroupManager.java +++ b/flink_jobs/batch_status/src/main/java/sync/EndpointGroupManager.java @@ -5,12 +5,10 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.List; -import java.util.Map; import java.util.TreeMap; import java.util.Map.Entry; import org.apache.avro.Schema; -import org.apache.avro.Schema.Field; import org.apache.avro.file.DataFileReader; import org.apache.avro.generic.GenericData; import org.apache.avro.generic.GenericDatumReader; @@ -21,7 +19,6 @@ import org.apache.log4j.Logger; import argo.avro.GroupEndpoint; -import argo.avro.MetricProfile; public class EndpointGroupManager { @@ -92,11 +89,41 @@ public ArrayList getGroup(String type, String hostname, String service) return results; } + + public String getInfo(String group, String type, String hostname, String service) { + String info = ""; + boolean first = true; + HashMap tags = this.getGroupTags(group, type, hostname, service); + + if (tags == null) return info; + + for (String tName : tags.keySet()) { + + if (tName.startsWith("info.")) { + + String infoName = tName.replaceFirst("info.", ""); + + String value = tags.get(tName); + + if (!value.equalsIgnoreCase("")) { + + if (!first) { + info = info + ","; + } else { + first = false; + } + info = info + infoName+ ":" + value; + + } + } + } + return info; + } - public HashMap getGroupTags(String type, String hostname, String service) { + public HashMap getGroupTags(String group, String type, String hostname, String service) { for (EndpointItem item : fList) { - if (item.type.equals(type) && item.hostname.equals(hostname) && item.service.equals(service)) { + if (item.group.equals(group) && item.type.equals(type) && item.hostname.equals(hostname) && item.service.equals(service)) { return item.tags; } } diff --git a/flink_jobs/batch_status/src/main/java/timelines/Timeline.java b/flink_jobs/batch_status/src/main/java/timelines/Timeline.java new file mode 100644 index 00000000..c3cd4284 --- /dev/null +++ b/flink_jobs/batch_status/src/main/java/timelines/Timeline.java @@ -0,0 +1,283 @@ +package timelines; + +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +import java.util.ArrayList; +import java.util.Iterator; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.TreeMap; +import ops.OpsManager; + +import org.joda.time.DateTime; +import org.joda.time.LocalDate; +import org.joda.time.format.DateTimeFormat; +import org.joda.time.format.DateTimeFormatter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class Timeline { + + private LocalDate date; + + static Logger LOG = LoggerFactory.getLogger(Timeline.class); + + private TreeMap samples; + + public Timeline() { + this.date = null; + this.samples = new TreeMap(); + + } + + public Timeline(String timestamp) { + DateTime tmp_date = new DateTime(); + DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + tmp_date = fmt.parseDateTime(timestamp); + tmp_date.withTime(0, 0, 0, 0); + this.date = tmp_date.toLocalDate(); + this.samples = new TreeMap(); + } + + Timeline(String timestamp, int state) { + DateTime tmp_date = new DateTime(); + DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + tmp_date = fmt.parseDateTime(timestamp); + tmp_date = tmp_date.withTime(0, 0, 0, 0); + this.date = tmp_date.toLocalDate(); + this.samples = new TreeMap(); + this.samples.put(tmp_date, state); + + } + + public int get(String timestamp) { + DateTime tmp_date = new DateTime(); + DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + tmp_date = fmt.parseDateTime(timestamp); + return this.samples.floorEntry(tmp_date).getValue(); + } + + public int get(DateTime point) { + if (this.samples.floorEntry(point) == null) { + + throw new RuntimeException("no item found in timeline, size of timeline:" + this.samples.size() + "," + point.toString()); + } + return this.samples.floorEntry(point).getValue(); + } + + public void insert(String timestamp, int status) { + + DateTime tmp_date = new DateTime(); + DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + tmp_date = fmt.parseDateTime(timestamp); + this.samples.put(tmp_date, status); + } + + public void insert(DateTime date, int status) { + samples.put(date, status); + + } + + public void insertStringTimeStamps(TreeMap timestamps) { + for (String dt : timestamps.keySet()) { + int status = timestamps.get(dt); + this.insert(dt, status); + + } + } + + public void insertDateTimeStamps(TreeMap timestamps) { + for (DateTime dt : timestamps.keySet()) { + int status = timestamps.get(dt); + this.insert(dt, status); + } + } + + public void setFirst(String timestamp, int state) { + DateTime tmp_date = new DateTime(); + DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + tmp_date = fmt.parseDateTime(timestamp); + this.samples = new TreeMap(); + tmp_date = tmp_date.withTime(0, 0, 0, 0); + this.samples.put(tmp_date, state); + this.optimize(); + } + + public void clear() { + this.samples.clear(); + } + + public void bulkInsert(Set> samples) { + this.samples.clear(); + for (Map.Entry entry : samples) { + this.samples.put(entry.getKey(), entry.getValue()); + } + } + + public Set> getSamples() { + return samples.entrySet(); + } + + public LocalDate getDate() { + return this.date; + } + + public int getLength() { + return this.samples.size(); + } + + public boolean isEmpty() { + return this.samples.isEmpty(); + } + + public void optimize() { + TreeMap optimal = new TreeMap(); + int prevstate = -1; + for (DateTime key : this.samples.keySet()) { + int value = this.samples.get(key); + if (prevstate == -1) { + + optimal.put(key, value); + prevstate = value; + + } + if (prevstate != value) { + optimal.put(key, value); + prevstate = value; + } + } + + this.samples = optimal; + } +// + + public Set getPoints() { + return this.samples.keySet(); + } + + public void aggregate(Timeline second, int[][][] truthTable, int op) { + if (this.isEmpty()) { + this.bulkInsert(second.getSamples()); + // Optimize even when we have a single timeline for aggregation + this.optimize(); + return; + } + + Timeline result = new Timeline(); + + // Slice for first + for (DateTime point : this.getPoints()) { + result.insert(point, -1); + } + // Slice for second + for (DateTime point : second.getPoints()) { + result.insert(point, -1); + } + + // Iterate over result and ask + for (DateTime point : result.getPoints()) { + int a = this.get(point); + int b = second.get(point); + if (a != -1 && b != -1) { + int x = -1; + try { + x = truthTable[op][a][b]; + } catch (IndexOutOfBoundsException ex) { + // LOG.info(ex); + x = -1; + } + result.insert(point, x); + } + } + + result.optimize(); + + // Engrave the result in this timeline + this.clear(); + this.bulkInsert(result.getSamples()); + } + + public TreeMap buildStringTimeStampMap(ArrayList timestampList, OpsManager op) { + + TreeMap timestampMap = new TreeMap(); + + for (String[] timestamp : timestampList) { + + String time = timestamp[0]; + int status = op.getIntStatus(timestamp[1]); + timestampMap.put(time, status); + } + return timestampMap; + + } + + public TreeMap buildDateTimeStampMap(ArrayList timestampList, OpsManager op) { + + TreeMap timestampMap = new TreeMap(); + + for (String[] timestamp : timestampList) { + + DateTime tmp_date = new DateTime(); + DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + tmp_date = fmt.parseDateTime(timestamp[0]); + int status = op.getIntStatus(timestamp[1]); + timestampMap.put(tmp_date, status); + } + return timestampMap; + + } + + public void removeTimeStamp(DateTime timestamp) { + + if (this.samples.containsKey(timestamp)) { + Iterator iter = this.samples.keySet().iterator(); + while (iter.hasNext()) { + DateTime tmpTimestamp = (DateTime) iter.next(); + if (tmpTimestamp.equals(timestamp)) { + iter.remove(); + break; + } + } + } + + } + + public int calcStatusChanges() { + + return this.samples.size() - 1; + } + + @Override + public int hashCode() { + int hash = 7; + hash = 83 * hash + Objects.hashCode(this.date); + hash = 83 * hash + Objects.hashCode(this.samples); + return hash; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + final Timeline other = (Timeline) obj; + if (!Objects.equals(this.date, other.date)) { + return false; + } + if (!Objects.equals(this.samples, other.samples)) { + return false; + } + return true; + } + +} diff --git a/flink_jobs/batch_status/src/main/java/timelines/TimelineAggregator.java b/flink_jobs/batch_status/src/main/java/timelines/TimelineAggregator.java new file mode 100644 index 00000000..fdf72faa --- /dev/null +++ b/flink_jobs/batch_status/src/main/java/timelines/TimelineAggregator.java @@ -0,0 +1,99 @@ +package timelines; + + + +import java.text.ParseException; +import java.util.HashMap; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +import org.joda.time.DateTime; +import org.joda.time.LocalDate; +import org.joda.time.format.DateTimeFormat; +import org.joda.time.format.DateTimeFormatter; +public class TimelineAggregator { + + private Timeline output; + private Map inputs; + + public TimelineAggregator(String timestamp) throws ParseException + { + this.output = new Timeline(timestamp); + this.inputs = new HashMap(); + } + + public TimelineAggregator(){ + this.output = new Timeline(); + this.inputs = new HashMap(); + + } + + public void clear(){ + this.output.clear(); + this.inputs.clear(); + } + + public String tsFromDate(String date){ + DateTime tmp_date = new DateTime(); + DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd"); + tmp_date = fmt.parseDateTime(date); + tmp_date = tmp_date.withTime(0, 0, 0, 0); + return tmp_date.toString(DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'")); + } + + public void createTimeline(String name, String timestamp, int prevState){ + Timeline temp = new Timeline(timestamp,prevState); + this.inputs.put(name, temp); + } + + public void insert(String name, String timestamp, int status){ + // Check if timeline exists, if not create it + if (this.inputs.containsKey(name) == false) + { + Timeline temp = new Timeline(timestamp,status); + this.inputs.put(name, temp); + return; + } + + this.inputs.get(name).insert(timestamp, status); + } + + public void setFirst(String name, String timestamp, int status){ + // Check if timeline exists, if not create it + if (this.inputs.containsKey(name) == false) + { + Timeline temp = new Timeline(timestamp,status); + this.inputs.put(name, temp); + return; + } + + this.inputs.get(name).setFirst(timestamp, status); + } + + public LocalDate getDate(){ + return output.getDate(); + } + + public Set> getSamples(){ + return this.output.getSamples(); + } + + + public void clearAndSetDate(String timestamp) + { + this.output = new Timeline(timestamp); + this.inputs.clear(); + + } + + public void aggregate(int[][][] truthTable, int op ){ + this.output.clear(); + + //Iterate through all available input timelines and aggregate + for (Timeline item : this.inputs.values()) { + this.output.aggregate(item, truthTable, op ); + } + + } +} diff --git a/flink_jobs/batch_status/src/main/java/timelines/TimelineMerger.java b/flink_jobs/batch_status/src/main/java/timelines/TimelineMerger.java new file mode 100644 index 00000000..5d2049d1 --- /dev/null +++ b/flink_jobs/batch_status/src/main/java/timelines/TimelineMerger.java @@ -0,0 +1,53 @@ +package timelines; + +import java.util.ArrayList; +import ops.OpsManager; + +public class TimelineMerger { + + static private Timeline output=new Timeline(); + static private ArrayList inputs=new ArrayList<>(); + + static public void clear() { + output.clear(); + inputs.clear(); + } + + static public void aggregate(int[][][] truthTable, int op) { + output.clear(); + inputs.clear(); + + //Iterate through all available input timelines and aggregate + for (Timeline item : inputs) { + output.aggregate(item, truthTable, op); + } + } + + static public void aggregate(ArrayList inputsT, int[][][] truthTable, int op) { + output.clear(); + inputs.clear(); + + inputs = inputsT; + //Iterate through all available input timelines and aggregate + for (Timeline item : inputs) { + output.aggregate(item, truthTable, op); + } + } + + static public Timeline getOutput() { + return output; + } + + static public void setOutput(Timeline outputT) { + output = outputT; + } + + static public ArrayList getInputs() { + return inputs; + } + + static public void setInputs(ArrayList inputsT) { + inputs = inputsT; + } + +} diff --git a/flink_jobs/batch_status/src/main/resources/amr/agg_profile.json b/flink_jobs/batch_status/src/main/resources/amr/agg_profile.json new file mode 100644 index 00000000..66f9474d --- /dev/null +++ b/flink_jobs/batch_status/src/main/resources/amr/agg_profile.json @@ -0,0 +1,33 @@ +{ + "status": { + "message": "Success", + "code": "200" + }, + "data": [ + { + "id": "2744247f-40f8-4dd6-b22c-76a3b38334d8", + "date": "2020-06-24", + "name": "test-agg2", + "namespace": "", + "endpoint_group": "servicegroups", + "metric_operation": "AND", + "profile_operation": "AND", + "metric_profile": { + "name": "test-mon", + "id": "92fa5d74-015c-4122-b8b9-7b344f3154d4" + }, + "groups": [ + { + "name": "webportal", + "operation": "AND", + "services": [ + { + "name": "WebPortal", + "operation": "OR" + } + ] + } + ] + } + ] +} diff --git a/flink_jobs/batch_status/src/main/resources/amr/data_AGGREGATION.json b/flink_jobs/batch_status/src/main/resources/amr/data_AGGREGATION.json new file mode 100644 index 00000000..f0f40f2c --- /dev/null +++ b/flink_jobs/batch_status/src/main/resources/amr/data_AGGREGATION.json @@ -0,0 +1 @@ +{"id":"2744247f-40f8-4dd6-b22c-76a3b38334d8","date":"2020-06-24","name":"test-agg2","namespace":"","endpoint_group":"servicegroups","metric_operation":"AND","profile_operation":"AND","metric_profile":{"name":"test-mon","id":"92fa5d74-015c-4122-b8b9-7b344f3154d4"},"groups":[{"name":"webportal","operation":"AND","services":[{"name":"WebPortal","operation":"OR"}]}]} diff --git a/flink_jobs/batch_status/src/main/resources/amr/data_CONFIG.json b/flink_jobs/batch_status/src/main/resources/amr/data_CONFIG.json new file mode 100644 index 00000000..8220787f --- /dev/null +++ b/flink_jobs/batch_status/src/main/resources/amr/data_CONFIG.json @@ -0,0 +1 @@ +{"id":"f29eeb59-ab38-4aa0-b372-5d3c0709dfb2","tenant":"demo","disabled":false,"info":{"name":"Critical","description":"test report","created":"2020-09-24 12:05:04","updated":"2020-10-08 09:32:46"},"thresholds":{"availability":80,"reliability":85,"uptime":0.8,"unknown":0.1,"downtime":0.1},"topology_schema":{"group":{"type":"PROJECT","group":{"type":"SERVICEGROUPS"}}},"profiles":[{"id":"92fa5d74-015c-4122-b8b9-7b344f3154d4","name":"test-mon","type":"metric"},{"id":"2744247f-40f8-4dd6-b22c-76a3b38334d8","name":"test-agg2","type":"aggregation"},{"id":"ea62ff1e-c6e1-438b-83c7-9262b3a4f179","name":"demo_ops","type":"operations"},{"id":"3345c3c1-322a-47f1-982c-1d9df1fc065e","name":"endpoint_example","type":"thresholds"}],"filter_tags":[]} diff --git a/flink_jobs/batch_status/src/main/resources/amr/data_DOWNTIMES.json b/flink_jobs/batch_status/src/main/resources/amr/data_DOWNTIMES.json new file mode 100644 index 00000000..b7d181aa --- /dev/null +++ b/flink_jobs/batch_status/src/main/resources/amr/data_DOWNTIMES.json @@ -0,0 +1 @@ +{"date":"2020-11-10","endpoints":[{"hostname":"hostA.foo","service":"WebPortal","start_time":"2020-11-10T00:00:00Z","end_time":"2020-11-10T23:59:00Z"},{"hostname":"hostB.foo","service":"WebPortal","start_time":"2020-11-10T00:00:00Z","end_time":"2020-11-10T23:59:00Z"},{"hostname":"hostB.foo","service":"WebPortald","start_time":"2020-11-10T00:00:00Z","end_time":"2020-11-10T23:59:00Z"}]} \ No newline at end of file diff --git a/flink_jobs/batch_status/src/main/resources/amr/data_METRIC.json b/flink_jobs/batch_status/src/main/resources/amr/data_METRIC.json new file mode 100644 index 00000000..b4681fcb --- /dev/null +++ b/flink_jobs/batch_status/src/main/resources/amr/data_METRIC.json @@ -0,0 +1 @@ +{"id":"392fa5d74-015c-4122-b8b9-7b344f3154d4","date":"2020-09-24","name":"test-mon","description":"Generic monitoring profile","services":[{"service":"WebPortal","metrics":["org.nagios.WebCheck"]}]} diff --git a/flink_jobs/batch_status/src/main/resources/amr/data_OPS.json b/flink_jobs/batch_status/src/main/resources/amr/data_OPS.json new file mode 100644 index 00000000..ff505f0a --- /dev/null +++ b/flink_jobs/batch_status/src/main/resources/amr/data_OPS.json @@ -0,0 +1 @@ +{"id":"ea62ff1e-c6e1-438b-83c7-9262b3a4f179","date":"2020-06-24","name":"demo_ops","available_states":["OK","WARNING","UNKNOWN","MISSING","CRITICAL","DOWNTIME"],"defaults":{"down":"DOWNTIME","missing":"MISSING","unknown":"UNKNOWN"},"operations":[{"name":"AND","truth_table":[{"a":"OK","b":"OK","x":"OK"},{"a":"OK","b":"WARNING","x":"WARNING"},{"a":"OK","b":"UNKNOWN","x":"UNKNOWN"},{"a":"OK","b":"MISSING","x":"MISSING"},{"a":"OK","b":"CRITICAL","x":"CRITICAL"},{"a":"OK","b":"DOWNTIME","x":"DOWNTIME"},{"a":"WARNING","b":"WARNING","x":"WARNING"},{"a":"WARNING","b":"UNKNOWN","x":"UNKNOWN"},{"a":"WARNING","b":"MISSING","x":"MISSING"},{"a":"WARNING","b":"CRITICAL","x":"CRITICAL"},{"a":"WARNING","b":"DOWNTIME","x":"DOWNTIME"},{"a":"UNKNOWN","b":"UNKNOWN","x":"UNKNOWN"},{"a":"UNKNOWN","b":"MISSING","x":"MISSING"},{"a":"UNKNOWN","b":"CRITICAL","x":"CRITICAL"},{"a":"UNKNOWN","b":"DOWNTIME","x":"DOWNTIME"},{"a":"MISSING","b":"MISSING","x":"MISSING"},{"a":"MISSING","b":"CRITICAL","x":"CRITICAL"},{"a":"MISSING","b":"DOWNTIME","x":"DOWNTIME"},{"a":"CRITICAL","b":"CRITICAL","x":"CRITICAL"},{"a":"CRITICAL","b":"DOWNTIME","x":"CRITICAL"},{"a":"DOWNTIME","b":"DOWNTIME","x":"DOWNTIME"}]},{"name":"OR","truth_table":[{"a":"OK","b":"OK","x":"OK"},{"a":"OK","b":"WARNING","x":"OK"},{"a":"OK","b":"UNKNOWN","x":"OK"},{"a":"OK","b":"MISSING","x":"OK"},{"a":"OK","b":"CRITICAL","x":"OK"},{"a":"OK","b":"DOWNTIME","x":"OK"},{"a":"WARNING","b":"WARNING","x":"WARNING"},{"a":"WARNING","b":"UNKNOWN","x":"WARNING"},{"a":"WARNING","b":"MISSING","x":"WARNING"},{"a":"WARNING","b":"CRITICAL","x":"WARNING"},{"a":"WARNING","b":"DOWNTIME","x":"WARNING"},{"a":"UNKNOWN","b":"UNKNOWN","x":"UNKNOWN"},{"a":"UNKNOWN","b":"MISSING","x":"UNKNOWN"},{"a":"UNKNOWN","b":"CRITICAL","x":"CRITICAL"},{"a":"UNKNOWN","b":"DOWNTIME","x":"UNKNOWN"},{"a":"MISSING","b":"MISSING","x":"MISSING"},{"a":"MISSING","b":"CRITICAL","x":"CRITICAL"},{"a":"MISSING","b":"DOWNTIME","x":"DOWNTIME"},{"a":"CRITICAL","b":"CRITICAL","x":"CRITICAL"},{"a":"CRITICAL","b":"DOWNTIME","x":"CRITICAL"},{"a":"DOWNTIME","b":"DOWNTIME","x":"DOWNTIME"}]}]} diff --git a/flink_jobs/batch_status/src/main/resources/amr/data_RECOMPUTATIONS.json b/flink_jobs/batch_status/src/main/resources/amr/data_RECOMPUTATIONS.json new file mode 100644 index 00000000..052b03aa --- /dev/null +++ b/flink_jobs/batch_status/src/main/resources/amr/data_RECOMPUTATIONS.json @@ -0,0 +1 @@ +[{"id":"56db4f1a-f331-46ca-b0fd-4555b4aa1cfc","requester_name":"john foo","requester_email":"foo1@email.com","reason":"ggus-reason01","start_time":"2018-01-21T23:01:00Z","end_time":"2018-01-23T12:01:00Z","report":"Critical","exclude":["SITE-1","SITE-2"],"status":"done","timestamp":"2018-03-17 17:03:55","history":[{"status":"pending","timestamp":"2018-01-30T11:41:26Z"}]},{"id":"66db4f55-f331-46ca-b0fd-4555b4aa1cfc","requester_name":"john foo","requester_email":"foo1@email.com","reason":"ggus-reason01","start_time":"2018-05-21T23:01:00Z","end_time":"2018-05-23T12:01:00Z","report":"Critical","exclude":["SITE-3","SITE-4"],"status":"done","timestamp":"2018-06-17 17:03:55","history":[{"status":"pending","timestamp":"2018-06-30T11:41:26Z"}]},{"id":"76db4444-f331-46ca-b0fd-4555b4aa1cfc","requester_name":"john foo","requester_email":"foo1@email.com","reason":"ggus-reason01","start_time":"2018-09-10T23:01:00Z","end_time":"2018-09-15T12:01:00Z","report":"Critical","exclude":["SITE-6","SITE-7","SITE-8"],"status":"done","timestamp":"2018-03-17 17:03:55","history":[{"status":"pending","timestamp":"2018-01-30T11:41:26Z"}]}] diff --git a/flink_jobs/batch_status/src/main/resources/amr/data_THRESHOLDS.json b/flink_jobs/batch_status/src/main/resources/amr/data_THRESHOLDS.json new file mode 100644 index 00000000..453e5bdf --- /dev/null +++ b/flink_jobs/batch_status/src/main/resources/amr/data_THRESHOLDS.json @@ -0,0 +1 @@ +{"id":"3345c3c1-322a-47f1-982c-1d9df1fc065e","date":"2015-01-01","name":"endpoint_example","rules":[{"host":"host1.foo.bar","metric":"service.freshness","thresholds":"freshness=1s;;0:;"}]} diff --git a/flink_jobs/batch_status/src/main/resources/amr/data_TOPOENDPOINTS.json b/flink_jobs/batch_status/src/main/resources/amr/data_TOPOENDPOINTS.json new file mode 100644 index 00000000..10dd42cf --- /dev/null +++ b/flink_jobs/batch_status/src/main/resources/amr/data_TOPOENDPOINTS.json @@ -0,0 +1 @@ +[{"date":"2020-11-10","group":"groupA","type":"SERVICEGROUPS","service":"webPortal","hostname":"host1.foo.bar","tags":{"monitored":"1","production":"1","scope":"FOO"}},{"date":"2020-11-10","group":"groupB","type":"SERVICEGROUPS","service":"webPortal","hostname":"host3.foo.bar","tags":{"monitored":"1","production":"1","scope":"FOO"}},{"date":"2020-11-10","group":"groupA","type":"SERVICEGROUPS","service":"webPortal","hostname":"host2.foo.bar","tags":{"monitored":"1","production":"1","scope":"FOO"}}] diff --git a/flink_jobs/batch_status/src/main/resources/amr/data_TOPOGROUPS.json b/flink_jobs/batch_status/src/main/resources/amr/data_TOPOGROUPS.json new file mode 100644 index 00000000..1c8e4316 --- /dev/null +++ b/flink_jobs/batch_status/src/main/resources/amr/data_TOPOGROUPS.json @@ -0,0 +1 @@ +[{"date":"2020-11-11","group":"ORG-A","type":"PROJECT","subgroup":"GROUP-101","tags":{"monitored":"0","scope":"Local"}},{"date":"2020-11-11","group":"ORG-A","type":"PROJECT","subgroup":"GROUP-202","tags":{"monitored":"1","scope":"Local"}}] diff --git a/flink_jobs/batch_status/src/main/resources/amr/data_WEIGHTS.json b/flink_jobs/batch_status/src/main/resources/amr/data_WEIGHTS.json new file mode 100644 index 00000000..399c31c1 --- /dev/null +++ b/flink_jobs/batch_status/src/main/resources/amr/data_WEIGHTS.json @@ -0,0 +1 @@ +{"id":"3b9602ed-49ec-42f3-8df7-7c35331ebf69","date":"2020-09-02","name":"demo","weight_type":"computationpower","group_type":"SERVICEGROUPS","groups":[{"name":"GROUP-A","value":366},{"name":"GROUP-B","value":4000},{"name":"GROUP-C","value":19838},{"name":"GROUP-D","value":19838}]} diff --git a/flink_jobs/batch_status/src/main/resources/amr/downtimes.json b/flink_jobs/batch_status/src/main/resources/amr/downtimes.json new file mode 100644 index 00000000..7bf3adee --- /dev/null +++ b/flink_jobs/batch_status/src/main/resources/amr/downtimes.json @@ -0,0 +1,31 @@ +{ + "status": { + "message": "Success", + "code": "200" + }, + "data": [ + { + "date": "2020-11-10", + "endpoints": [ + { + "hostname": "hostA.foo", + "service": "WebPortal", + "start_time": "2020-11-10T00:00:00Z", + "end_time": "2020-11-10T23:59:00Z" + }, + { + "hostname": "hostB.foo", + "service": "WebPortal", + "start_time": "2020-11-10T00:00:00Z", + "end_time": "2020-11-10T23:59:00Z" + }, + { + "hostname": "hostB.foo", + "service": "WebPortald", + "start_time": "2020-11-10T00:00:00Z", + "end_time": "2020-11-10T23:59:00Z" + } + ] + } + ] +} diff --git a/flink_jobs/batch_status/src/main/resources/amr/metric_profile.json b/flink_jobs/batch_status/src/main/resources/amr/metric_profile.json new file mode 100644 index 00000000..7ea5a470 --- /dev/null +++ b/flink_jobs/batch_status/src/main/resources/amr/metric_profile.json @@ -0,0 +1,22 @@ +{ + "status": { + "message": "Success", + "code": "200" + }, + "data": [ + { + "id": "392fa5d74-015c-4122-b8b9-7b344f3154d4", + "date": "2020-09-24", + "name": "test-mon", + "description": "Generic monitoring profile", + "services": [ + { + "service": "WebPortal", + "metrics": [ + "org.nagios.WebCheck" + ] + } + ] + } + ] +} diff --git a/flink_jobs/batch_status/src/main/resources/amr/ops_profile.json b/flink_jobs/batch_status/src/main/resources/amr/ops_profile.json new file mode 100644 index 00000000..9b00f14b --- /dev/null +++ b/flink_jobs/batch_status/src/main/resources/amr/ops_profile.json @@ -0,0 +1,248 @@ +{ + "status": { + "message": "Success", + "code": "200" + }, + "data": [ + { + "id": "ea62ff1e-c6e1-438b-83c7-9262b3a4f179", + "date": "2020-06-24", + "name": "demo_ops", + "available_states": [ + "OK", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME" + ], + "defaults": { + "down": "DOWNTIME", + "missing": "MISSING", + "unknown": "UNKNOWN" + }, + "operations": [ + { + "name": "AND", + "truth_table": [ + { + "a": "OK", + "b": "OK", + "x": "OK" + }, + { + "a": "OK", + "b": "WARNING", + "x": "WARNING" + }, + { + "a": "OK", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "OK", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "OK", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "OK", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "WARNING", + "b": "WARNING", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "WARNING", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "WARNING", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "WARNING", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "UNKNOWN", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "UNKNOWN", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "UNKNOWN", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "UNKNOWN", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "MISSING", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "MISSING", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "MISSING", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "CRITICAL", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "CRITICAL", + "b": "DOWNTIME", + "x": "CRITICAL" + }, + { + "a": "DOWNTIME", + "b": "DOWNTIME", + "x": "DOWNTIME" + } + ] + }, + { + "name": "OR", + "truth_table": [ + { + "a": "OK", + "b": "OK", + "x": "OK" + }, + { + "a": "OK", + "b": "WARNING", + "x": "OK" + }, + { + "a": "OK", + "b": "UNKNOWN", + "x": "OK" + }, + { + "a": "OK", + "b": "MISSING", + "x": "OK" + }, + { + "a": "OK", + "b": "CRITICAL", + "x": "OK" + }, + { + "a": "OK", + "b": "DOWNTIME", + "x": "OK" + }, + { + "a": "WARNING", + "b": "WARNING", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "UNKNOWN", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "MISSING", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "CRITICAL", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "DOWNTIME", + "x": "WARNING" + }, + { + "a": "UNKNOWN", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "UNKNOWN", + "b": "MISSING", + "x": "UNKNOWN" + }, + { + "a": "UNKNOWN", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "UNKNOWN", + "b": "DOWNTIME", + "x": "UNKNOWN" + }, + { + "a": "MISSING", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "MISSING", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "MISSING", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "CRITICAL", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "CRITICAL", + "b": "DOWNTIME", + "x": "CRITICAL" + }, + { + "a": "DOWNTIME", + "b": "DOWNTIME", + "x": "DOWNTIME" + } + ] + } + ] + } + ] +} diff --git a/flink_jobs/batch_status/src/main/resources/amr/recomputations.json b/flink_jobs/batch_status/src/main/resources/amr/recomputations.json new file mode 100644 index 00000000..b597ad09 --- /dev/null +++ b/flink_jobs/batch_status/src/main/resources/amr/recomputations.json @@ -0,0 +1,72 @@ +{ + "status": { + "message": "Success", + "code": "200" + }, + "data": [ + { + "id": "56db4f1a-f331-46ca-b0fd-4555b4aa1cfc", + "requester_name": "john foo", + "requester_email": "foo1@email.com", + "reason": "ggus-reason01", + "start_time": "2018-01-21T23:01:00Z", + "end_time": "2018-01-23T12:01:00Z", + "report": "Critical", + "exclude": [ + "SITE-1", + "SITE-2" + ], + "status": "done", + "timestamp": "2018-03-17 17:03:55", + "history": [ + { + "status": "pending", + "timestamp": "2018-01-30T11:41:26Z" + } + ] + }, + { + "id": "66db4f55-f331-46ca-b0fd-4555b4aa1cfc", + "requester_name": "john foo", + "requester_email": "foo1@email.com", + "reason": "ggus-reason01", + "start_time": "2018-05-21T23:01:00Z", + "end_time": "2018-05-23T12:01:00Z", + "report": "Critical", + "exclude": [ + "SITE-3", + "SITE-4" + ], + "status": "done", + "timestamp": "2018-06-17 17:03:55", + "history": [ + { + "status": "pending", + "timestamp": "2018-06-30T11:41:26Z" + } + ] + }, + { + "id": "76db4444-f331-46ca-b0fd-4555b4aa1cfc", + "requester_name": "john foo", + "requester_email": "foo1@email.com", + "reason": "ggus-reason01", + "start_time": "2018-09-10T23:01:00Z", + "end_time": "2018-09-15T12:01:00Z", + "report": "Critical", + "exclude": [ + "SITE-6", + "SITE-7", + "SITE-8" + ], + "status": "done", + "timestamp": "2018-03-17 17:03:55", + "history": [ + { + "status": "pending", + "timestamp": "2018-01-30T11:41:26Z" + } + ] + } + ] +} diff --git a/flink_jobs/batch_status/src/main/resources/amr/report.json b/flink_jobs/batch_status/src/main/resources/amr/report.json new file mode 100644 index 00000000..fa5a5f65 --- /dev/null +++ b/flink_jobs/batch_status/src/main/resources/amr/report.json @@ -0,0 +1,57 @@ +{ + "status": { + "message": "Success", + "code": "200" + }, + "data": [ + { + "id": "f29eeb59-ab38-4aa0-b372-5d3c0709dfb2", + "tenant": "demo", + "disabled": false, + "info": { + "name": "Critical", + "description": "test report", + "created": "2020-09-24 12:05:04", + "updated": "2020-10-08 09:32:46" + }, + "thresholds": { + "availability": 80, + "reliability": 85, + "uptime": 0.8, + "unknown": 0.1, + "downtime": 0.1 + }, + "topology_schema": { + "group": { + "type": "PROJECT", + "group": { + "type": "SERVICEGROUPS" + } + } + }, + "profiles": [ + { + "id": "92fa5d74-015c-4122-b8b9-7b344f3154d4", + "name": "test-mon", + "type": "metric" + }, + { + "id": "2744247f-40f8-4dd6-b22c-76a3b38334d8", + "name": "test-agg2", + "type": "aggregation" + }, + { + "id": "ea62ff1e-c6e1-438b-83c7-9262b3a4f179", + "name": "demo_ops", + "type": "operations" + }, + { + "id": "3345c3c1-322a-47f1-982c-1d9df1fc065e", + "name": "endpoint_example", + "type": "thresholds" + } + ], + "filter_tags": [] + } + ] +} diff --git a/flink_jobs/batch_status/src/main/resources/amr/thresholds.json b/flink_jobs/batch_status/src/main/resources/amr/thresholds.json new file mode 100644 index 00000000..1c1ac3fb --- /dev/null +++ b/flink_jobs/batch_status/src/main/resources/amr/thresholds.json @@ -0,0 +1,20 @@ +{ + "status": { + "message": "Success", + "code": "200" + }, + "data": [ + { + "id": "3345c3c1-322a-47f1-982c-1d9df1fc065e", + "date": "2015-01-01", + "name": "endpoint_example", + "rules": [ + { + "host": "host1.foo.bar", + "metric": "service.freshness", + "thresholds": "freshness=1s;;0:;" + } + ] + } + ] +} diff --git a/flink_jobs/batch_status/src/main/resources/amr/topoendpoints.json b/flink_jobs/batch_status/src/main/resources/amr/topoendpoints.json new file mode 100644 index 00000000..2b1cfed5 --- /dev/null +++ b/flink_jobs/batch_status/src/main/resources/amr/topoendpoints.json @@ -0,0 +1,44 @@ +{ + "status": { + "message": "Success", + "code": "200" + }, + "data": [ + { + "date": "2020-11-10", + "group": "groupA", + "type": "SERVICEGROUPS", + "service": "webPortal", + "hostname": "host1.foo.bar", + "tags": { + "monitored": "1", + "production": "1", + "scope": "FOO" + } + }, + { + "date": "2020-11-10", + "group": "groupB", + "type": "SERVICEGROUPS", + "service": "webPortal", + "hostname": "host3.foo.bar", + "tags": { + "monitored": "1", + "production": "1", + "scope": "FOO" + } + }, + { + "date": "2020-11-10", + "group": "groupA", + "type": "SERVICEGROUPS", + "service": "webPortal", + "hostname": "host2.foo.bar", + "tags": { + "monitored": "1", + "production": "1", + "scope": "FOO" + } + } + ] +} diff --git a/flink_jobs/batch_status/src/main/resources/amr/topogroups.json b/flink_jobs/batch_status/src/main/resources/amr/topogroups.json new file mode 100644 index 00000000..6286cc55 --- /dev/null +++ b/flink_jobs/batch_status/src/main/resources/amr/topogroups.json @@ -0,0 +1,28 @@ +{ + "status": { + "message": "Success", + "code": "200" + }, + "data": [ + { + "date": "2020-11-11", + "group": "ORG-A", + "type": "PROJECT", + "subgroup": "GROUP-101", + "tags": { + "monitored": "0", + "scope": "Local" + } + }, + { + "date": "2020-11-11", + "group": "ORG-A", + "type": "PROJECT", + "subgroup": "GROUP-202", + "tags": { + "monitored": "1", + "scope": "Local" + } + } + ] +} diff --git a/flink_jobs/batch_status/src/main/resources/amr/weights.json b/flink_jobs/batch_status/src/main/resources/amr/weights.json new file mode 100644 index 00000000..fc1dea3f --- /dev/null +++ b/flink_jobs/batch_status/src/main/resources/amr/weights.json @@ -0,0 +1,33 @@ +{ + "status": { + "message": "Success", + "code": "200" + }, + "data": [ + { + "id": "3b9602ed-49ec-42f3-8df7-7c35331ebf69", + "date": "2020-09-02", + "name": "demo", + "weight_type": "computationpower", + "group_type": "SERVICEGROUPS", + "groups": [ + { + "name": "GROUP-A", + "value": 366 + }, + { + "name": "GROUP-B", + "value": 4000 + }, + { + "name": "GROUP-C", + "value": 19838 + }, + { + "name": "GROUP-D", + "value": 19838 + } + ] + } + ] +} diff --git a/flink_jobs/batch_status/src/main/resources/avro/downtimes_v2.avro b/flink_jobs/batch_status/src/main/resources/avro/downtimes_v2.avro new file mode 100644 index 00000000..b31d809c Binary files /dev/null and b/flink_jobs/batch_status/src/main/resources/avro/downtimes_v2.avro differ diff --git a/flink_jobs/batch_status/src/main/resources/avro/group_endpoints_info.avro b/flink_jobs/batch_status/src/main/resources/avro/group_endpoints_info.avro new file mode 100644 index 00000000..0f388be0 Binary files /dev/null and b/flink_jobs/batch_status/src/main/resources/avro/group_endpoints_info.avro differ diff --git a/flink_jobs/batch_status/src/main/resources/avro/group_endpoints_v2.avro b/flink_jobs/batch_status/src/main/resources/avro/group_endpoints_v2.avro new file mode 100644 index 00000000..68b4dbf9 Binary files /dev/null and b/flink_jobs/batch_status/src/main/resources/avro/group_endpoints_v2.avro differ diff --git a/flink_jobs/batch_status/src/main/resources/avro/group_example.avro b/flink_jobs/batch_status/src/main/resources/avro/group_example.avro new file mode 100644 index 00000000..e71785ad Binary files /dev/null and b/flink_jobs/batch_status/src/main/resources/avro/group_example.avro differ diff --git a/flink_jobs/batch_status/src/main/resources/avro/group_groups_v2.avro b/flink_jobs/batch_status/src/main/resources/avro/group_groups_v2.avro new file mode 100644 index 00000000..d4e82bfe Binary files /dev/null and b/flink_jobs/batch_status/src/main/resources/avro/group_groups_v2.avro differ diff --git a/flink_jobs/batch_status/src/main/resources/avro/groups_info.avro b/flink_jobs/batch_status/src/main/resources/avro/groups_info.avro new file mode 100644 index 00000000..dab57b89 Binary files /dev/null and b/flink_jobs/batch_status/src/main/resources/avro/groups_info.avro differ diff --git a/flink_jobs/batch_status/src/main/resources/avro/poem_sync_v2.avro b/flink_jobs/batch_status/src/main/resources/avro/poem_sync_v2.avro new file mode 100644 index 00000000..fac926ca Binary files /dev/null and b/flink_jobs/batch_status/src/main/resources/avro/poem_sync_v2.avro differ diff --git a/flink_jobs/batch_status/src/main/resources/avro/poem_url_services.avro b/flink_jobs/batch_status/src/main/resources/avro/poem_url_services.avro new file mode 100644 index 00000000..a3212204 Binary files /dev/null and b/flink_jobs/batch_status/src/main/resources/avro/poem_url_services.avro differ diff --git a/flink_jobs/batch_status/src/main/resources/avro/weights_v2.avro b/flink_jobs/batch_status/src/main/resources/avro/weights_v2.avro new file mode 100644 index 00000000..7b45958b Binary files /dev/null and b/flink_jobs/batch_status/src/main/resources/avro/weights_v2.avro differ diff --git a/flink_jobs/batch_status/src/main/resources/ops/EGI-algorithm.json b/flink_jobs/batch_status/src/main/resources/ops/EGI-algorithm.json index b88d8c99..45a8e9b5 100644 --- a/flink_jobs/batch_status/src/main/resources/ops/EGI-algorithm.json +++ b/flink_jobs/batch_status/src/main/resources/ops/EGI-algorithm.json @@ -1,239 +1 @@ -{ - "id": "1b0318f0-429d-44fc-8bba-07184354c73b", - "name": "egi_ops", - "available_states": [ - "OK", - "WARNING", - "UNKNOWN", - "MISSING", - "CRITICAL", - "DOWNTIME" - ], - "defaults": { - "down": "DOWNTIME", - "missing": "MISSING", - "unknown": "UNKNOWN" - }, - "operations": [ - { - "name": "AND", - "truth_table": [ - { - "a": "OK", - "b": "OK", - "x": "OK" - }, - { - "a": "OK", - "b": "WARNING", - "x": "WARNING" - }, - { - "a": "OK", - "b": "UNKNOWN", - "x": "UNKNOWN" - }, - { - "a": "OK", - "b": "MISSING", - "x": "MISSING" - }, - { - "a": "OK", - "b": "CRITICAL", - "x": "CRITICAL" - }, - { - "a": "OK", - "b": "DOWNTIME", - "x": "DOWNTIME" - }, - { - "a": "WARNING", - "b": "WARNING", - "x": "WARNING" - }, - { - "a": "WARNING", - "b": "UNKNOWN", - "x": "UNKNOWN" - }, - { - "a": "WARNING", - "b": "MISSING", - "x": "MISSING" - }, - { - "a": "WARNING", - "b": "CRITICAL", - "x": "CRITICAL" - }, - { - "a": "WARNING", - "b": "DOWNTIME", - "x": "DOWNTIME" - }, - { - "a": "UNKNOWN", - "b": "UNKNOWN", - "x": "UNKNOWN" - }, - { - "a": "UNKNOWN", - "b": "MISSING", - "x": "MISSING" - }, - { - "a": "UNKNOWN", - "b": "CRITICAL", - "x": "CRITICAL" - }, - { - "a": "UNKNOWN", - "b": "DOWNTIME", - "x": "DOWNTIME" - }, - { - "a": "MISSING", - "b": "MISSING", - "x": "MISSING" - }, - { - "a": "MISSING", - "b": "CRITICAL", - "x": "CRITICAL" - }, - { - "a": "MISSING", - "b": "DOWNTIME", - "x": "DOWNTIME" - }, - { - "a": "CRITICAL", - "b": "CRITICAL", - "x": "CRITICAL" - }, - { - "a": "CRITICAL", - "b": "DOWNTIME", - "x": "CRITICAL" - }, - { - "a": "DOWNTIME", - "b": "DOWNTIME", - "x": "DOWNTIME" - } - ] - }, - { - "name": "OR", - "truth_table": [ - { - "a": "OK", - "b": "OK", - "x": "OK" - }, - { - "a": "OK", - "b": "WARNING", - "x": "OK" - }, - { - "a": "OK", - "b": "UNKNOWN", - "x": "OK" - }, - { - "a": "OK", - "b": "MISSING", - "x": "OK" - }, - { - "a": "OK", - "b": "CRITICAL", - "x": "OK" - }, - { - "a": "OK", - "b": "DOWNTIME", - "x": "OK" - }, - { - "a": "WARNING", - "b": "WARNING", - "x": "WARNING" - }, - { - "a": "WARNING", - "b": "UNKNOWN", - "x": "WARNING" - }, - { - "a": "WARNING", - "b": "MISSING", - "x": "WARNING" - }, - { - "a": "WARNING", - "b": "CRITICAL", - "x": "WARNING" - }, - { - "a": "WARNING", - "b": "DOWNTIME", - "x": "WARNING" - }, - { - "a": "UNKNOWN", - "b": "UNKNOWN", - "x": "UNKNOWN" - }, - { - "a": "UNKNOWN", - "b": "MISSING", - "x": "UNKNOWN" - }, - { - "a": "UNKNOWN", - "b": "CRITICAL", - "x": "CRITICAL" - }, - { - "a": "UNKNOWN", - "b": "DOWNTIME", - "x": "UNKNOWN" - }, - { - "a": "MISSING", - "b": "MISSING", - "x": "MISSING" - }, - { - "a": "MISSING", - "b": "CRITICAL", - "x": "CRITICAL" - }, - { - "a": "MISSING", - "b": "DOWNTIME", - "x": "DOWNTIME" - }, - { - "a": "CRITICAL", - "b": "CRITICAL", - "x": "CRITICAL" - }, - { - "a": "CRITICAL", - "b": "DOWNTIME", - "x": "CRITICAL" - }, - { - "a": "DOWNTIME", - "b": "DOWNTIME", - "x": "DOWNTIME" - } - ] - } - ] -} +{"id":"1b0318f0-429d-44fc-8bba-07184354c73b","name":"egi_ops","available_states":["OK","WARNING","UNKNOWN","MISSING","CRITICAL","DOWNTIME"],"defaults":{"down":"DOWNTIME","missing":"MISSING","unknown":"UNKNOWN"},"operations":[{"name":"AND","truth_table":[{"a":"OK","b":"OK","x":"OK"},{"a":"OK","b":"WARNING","x":"WARNING"},{"a":"OK","b":"UNKNOWN","x":"UNKNOWN"},{"a":"OK","b":"MISSING","x":"MISSING"},{"a":"OK","b":"CRITICAL","x":"CRITICAL"},{"a":"OK","b":"DOWNTIME","x":"DOWNTIME"},{"a":"WARNING","b":"WARNING","x":"WARNING"},{"a":"WARNING","b":"UNKNOWN","x":"UNKNOWN"},{"a":"WARNING","b":"MISSING","x":"MISSING"},{"a":"WARNING","b":"CRITICAL","x":"CRITICAL"},{"a":"WARNING","b":"DOWNTIME","x":"DOWNTIME"},{"a":"UNKNOWN","b":"UNKNOWN","x":"UNKNOWN"},{"a":"UNKNOWN","b":"MISSING","x":"MISSING"},{"a":"UNKNOWN","b":"CRITICAL","x":"CRITICAL"},{"a":"UNKNOWN","b":"DOWNTIME","x":"DOWNTIME"},{"a":"MISSING","b":"MISSING","x":"MISSING"},{"a":"MISSING","b":"CRITICAL","x":"CRITICAL"},{"a":"MISSING","b":"DOWNTIME","x":"DOWNTIME"},{"a":"CRITICAL","b":"CRITICAL","x":"CRITICAL"},{"a":"CRITICAL","b":"DOWNTIME","x":"CRITICAL"},{"a":"DOWNTIME","b":"DOWNTIME","x":"DOWNTIME"}]},{"name":"OR","truth_table":[{"a":"OK","b":"OK","x":"OK"},{"a":"OK","b":"WARNING","x":"OK"},{"a":"OK","b":"UNKNOWN","x":"OK"},{"a":"OK","b":"MISSING","x":"OK"},{"a":"OK","b":"CRITICAL","x":"OK"},{"a":"OK","b":"DOWNTIME","x":"OK"},{"a":"WARNING","b":"WARNING","x":"WARNING"},{"a":"WARNING","b":"UNKNOWN","x":"WARNING"},{"a":"WARNING","b":"MISSING","x":"WARNING"},{"a":"WARNING","b":"CRITICAL","x":"WARNING"},{"a":"WARNING","b":"DOWNTIME","x":"WARNING"},{"a":"UNKNOWN","b":"UNKNOWN","x":"UNKNOWN"},{"a":"UNKNOWN","b":"MISSING","x":"UNKNOWN"},{"a":"UNKNOWN","b":"CRITICAL","x":"CRITICAL"},{"a":"UNKNOWN","b":"DOWNTIME","x":"UNKNOWN"},{"a":"MISSING","b":"MISSING","x":"MISSING"},{"a":"MISSING","b":"CRITICAL","x":"CRITICAL"},{"a":"MISSING","b":"DOWNTIME","x":"DOWNTIME"},{"a":"CRITICAL","b":"CRITICAL","x":"CRITICAL"},{"a":"CRITICAL","b":"DOWNTIME","x":"CRITICAL"},{"a":"DOWNTIME","b":"DOWNTIME","x":"DOWNTIME"}]}]} diff --git a/flink_jobs/batch_status/src/main/resources/ops/EGI-rules.json b/flink_jobs/batch_status/src/main/resources/ops/EGI-rules.json index 2a52c99a..76fc5f5c 100644 --- a/flink_jobs/batch_status/src/main/resources/ops/EGI-rules.json +++ b/flink_jobs/batch_status/src/main/resources/ops/EGI-rules.json @@ -1,33 +1 @@ -{ - "rules": [ - { - "metric": "org.bdii.Freshness", - "thresholds": "freshness=10s;30;50:60;0;100 entries=5;0:10;20:30;50;30" - }, - { - "metric": "org.bdii.Entries", - "thresholds": "time=-35s;~:10;15:;-100;300 entries=55;20;50:60;50;30" - }, - { - "metric": "org.bdii.Freshness", - "thresholds": "freshness=10s; entries=29;;30:50", - "host" : "bdii.host3.example.foo" - }, - { - "metric": "org.bdii.Freshness", - "thresholds": "freshness=10s;30;50:60;0;100 entries=29;0:10;20:30;0;30", - "host" : "bdii.host1.example.foo" - }, - { - "metric": "org.bdii.Freshness", - "thresholds": "freshness=10s;30;50:60;0;100 entries=5;0:10;20:30;50;30", - "host" : "bdii.host1.example.foo", - "endpoint_group": "SITE-101" - }, - { - "metric": "org.bdii.Freshness", - "thresholds": "freshness=10s;30;50:60;0;100 entries=5;0:10;20:30;50;30", - "endpoint_group": "SITE-101" - } - ] -} +{"rules":[{"metric":"org.bdii.Freshness","thresholds":"freshness=10s;30;50:60;0;100 entries=5;0:10;20:30;50;30"},{"metric":"org.bdii.Entries","thresholds":"time=-35s;~:10;15:;-100;300 entries=55;20;50:60;50;30"},{"metric":"org.bdii.Freshness","thresholds":"freshness=10s; entries=29;;30:50","host":"bdii.host3.example.foo"},{"metric":"org.bdii.Freshness","thresholds":"freshness=10s;30;50:60;0;100 entries=29;0:10;20:30;0;30","host":"bdii.host1.example.foo"},{"metric":"org.bdii.Freshness","thresholds":"freshness=10s;30;50:60;0;100 entries=5;0:10;20:30;50;30","host":"bdii.host1.example.foo","endpoint_group":"SITE-101"},{"metric":"org.bdii.Freshness","thresholds":"freshness=10s;30;50:60;0;100 entries=5;0:10;20:30;50;30","endpoint_group":"SITE-101"}]} diff --git a/flink_jobs/batch_status/src/main/resources/ops/ap1.json b/flink_jobs/batch_status/src/main/resources/ops/ap1.json new file mode 100644 index 00000000..ab21412c --- /dev/null +++ b/flink_jobs/batch_status/src/main/resources/ops/ap1.json @@ -0,0 +1 @@ +{"id":"297c368a-524f-4144-9eb6-924fae5f08fa","name":"ap1","namespace":"test","endpoint_group":"sites","metric_operation":"AND","profile_operation":"AND","metric_profile":{"name":"CH.CERN.SAM.ARGO_MON_CRITICAL","id":"c81fdb7b-d8f8-4ff9-96c5-6a0c336e2b25"},"groups":[{"name":"compute","operation":"OR","services":[{"name":"CREAM-CE","operation":"OR"},{"name":"ARC-CE","operation":"OR"},{"name":"GRAM5","operation":"OR"},{"name":"unicore6.TargetSystemFactory","operation":"OR"},{"name":"QCG.Computing","operation":"OR"}]},{"name":"storage","operation":"OR","services":[{"name":"SRMv2","operation":"OR"},{"name":"SRM","operation":"OR"}]},{"name":"information","operation":"OR","services":[{"name":"Site-BDII","operation":"OR"}]},{"name":"url","operation":"OR","services":[{"name":"services.url","operation":"OR"}]}]} diff --git a/flink_jobs/batch_status/src/main/resources/ops/config.json b/flink_jobs/batch_status/src/main/resources/ops/config.json new file mode 100644 index 00000000..46b39972 --- /dev/null +++ b/flink_jobs/batch_status/src/main/resources/ops/config.json @@ -0,0 +1 @@ +{"id":"c800846f-8478-4af8-85d1-a3f12fe4c18f","info":{"name":"Critical","description":"EGI report for Roc critical","created":"2015-10-19 10:35:49","updated":"2015-10-19 10:35:49"},"tenant":"EGI","topology_schema":{"group":{"type":"NGI","group":{"type":"SERVICEGROUPS"}}},"weight":"hepspec","profiles":[{"id":"433beb2c-45cc-49d4-a8e0-b132bb30327e","name":"ch.cern.sam.ROC_CRITICAL","type":"metric"},{"id":"17d1462f-8f91-4728-a253-1a6e8e2e848d","name":"ops1","type":"operations"},{"id":"1ef8c0c9-f9ef-4ca1-9ee7-bb8b36332036","name":"critical","type":"aggregation"}],"filter_tags":[{"name":"production","value":"1","context":"endpoint_groups"},{"name":"monitored","value":"1","context":"endpoint_groups"},{"name":"scope","value":"EGI","context":"endpoint_groups"},{"name":"scope","value":"EGI","context":"group_of_groups"},{"name":"infrastructure","value":"Production","context":"group_of_groups"},{"name":"certification","value":"Certified","context":"group_of_groups"},{"name":"vo","value":"ops","context":"metric_data"},{"name":"vo_fqan","value":"ops","context":"metric_data"},{"name":"roc","value":"any","context":"metric_data"}]} diff --git a/flink_jobs/batch_status/src/main/resources/ops/recomp.json b/flink_jobs/batch_status/src/main/resources/ops/recomp.json new file mode 100644 index 00000000..1079cb55 --- /dev/null +++ b/flink_jobs/batch_status/src/main/resources/ops/recomp.json @@ -0,0 +1 @@ +[{"reason":"testing_compute_engine","start_time":"2013-12-08T12:03:44Z","end_time":"2013-12-10T12:03:44Z","exclude":["GR-01-AUTH","HG-03-AUTH"],"status":"running","timestamp":"2015-02-01 14:58:40"},{"reason":"testing_compute_engine","start_time":"2013-12-08T12:03:44Z","end_time":"2013-12-08T13:03:44Z","exclude":["SITE-A","SITE-B"],"status":"running","timestamp":"2015-02-01 14:58:40","exclude_monitoring_source":[{"host":"monA","start_time":"2013-12-08T12:03:44Z","end_time":"2013-12-08T15:03:44Z"},{"host":"monA","start_time":"2013-12-08T18:03:44Z","end_time":"2013-12-08T19:03:44Z"}]},{"reason":"testing_compute_engine","start_time":"2013-12-08T16:03:44Z","end_time":"2013-12-08T18:03:44Z","exclude":["SITE-A","SITE-c"],"status":"running","timestamp":"2015-02-01 14:58:40"}] diff --git a/flink_jobs/batch_status/src/main/resources/ops/recomp.json.flink b/flink_jobs/batch_status/src/main/resources/ops/recomp.json.flink new file mode 100644 index 00000000..e51abe2b --- /dev/null +++ b/flink_jobs/batch_status/src/main/resources/ops/recomp.json.flink @@ -0,0 +1 @@ +[{"requester_email":"example email","status":"done","timestamp":"2019-05-07 02:00:00","start_time":"2019-05-07T06:00:00Z","exclude_monitoring_source":[{"host":"bad-mon01.example.org","start_time":"2019-05-07T02:00:00Z","end_time":"2019-05-07T06:00:00Z"}],"id":"ba64962c-90eb-44e9-b035-c697aa812876","reason":"Exclusion of bad-mon01.example.org from TestCase","end_time":"2019-05-08T23:59:59Z","report":"TestCase","exclude":[],"requester_name":"example requester"}] \ No newline at end of file diff --git a/flink_jobs/batch_status/src/test/java/argo/amr/ApiResourceManagerTest.java b/flink_jobs/batch_status/src/test/java/argo/amr/ApiResourceManagerTest.java new file mode 100644 index 00000000..4a384ada --- /dev/null +++ b/flink_jobs/batch_status/src/test/java/argo/amr/ApiResourceManagerTest.java @@ -0,0 +1,288 @@ +package argo.amr; + +import static org.junit.Assert.*; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.net.URISyntaxException; +import java.nio.charset.StandardCharsets; +import java.security.KeyManagementException; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.text.ParseException; +import java.util.List; +import java.util.stream.Collectors; + +import org.apache.http.client.ClientProtocolException; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; + +import com.github.tomakehurst.wiremock.WireMockServer; +import com.github.tomakehurst.wiremock.junit.WireMockRule; + +import argo.avro.Downtime; +import argo.avro.GroupEndpoint; +import argo.avro.GroupGroup; +import argo.avro.MetricProfile; +import argo.avro.Weight; + +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo; +import static com.github.tomakehurst.wiremock.client.WireMock.configureFor; +import static com.github.tomakehurst.wiremock.client.WireMock.get; +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig; + + +public class ApiResourceManagerTest { + + public static String loadResJSON(String resURL) { + + InputStream jsonInputStream + = ApiResourceManagerTest.class.getResourceAsStream(resURL); + String content = new BufferedReader( + new InputStreamReader(jsonInputStream, StandardCharsets.UTF_8)) + .lines() + .collect(Collectors.joining("\n")); + return content; + + } + + @Rule + public WireMockRule wireMockRule = new WireMockRule(wireMockConfig().httpsPort(8443)); + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/report.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/report.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/metric_profile.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/agg_profile.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/ops_profile.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/thresholds.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/topoendpoints.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/topogroups.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/downtimes.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/weights.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/recomputations.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_CONFIG.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_METRIC.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_AGGREGATION.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_OPS.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_THRESHOLDS.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_TOPOENDPOINTS.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_TOPOGROUPS.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_DOWNTIMES.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_WEIGHTS.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_RECOMPUTATIONS.json")); + } + + @Test + public void test() throws URISyntaxException, IOException, ParseException { + // load mock api response content + String jsonReport = loadResJSON("/amr/report.json"); + String jsonMetric = loadResJSON("/amr/metric_profile.json"); + String jsonAgg = loadResJSON("/amr/agg_profile.json"); + String jsonOps = loadResJSON("/amr/ops_profile.json"); + String jsonThresholds = loadResJSON("/amr/thresholds.json"); + String jsonTopoEnd = loadResJSON("/amr/topoendpoints.json"); + String jsonTopoGroups = loadResJSON("/amr/topogroups.json"); + String jsonDowntimes = loadResJSON("/amr/downtimes.json"); + String jsonWeights = loadResJSON("/amr/weights.json"); + String jsonRecomp = loadResJSON("/amr/recomputations.json"); + + // get json data items + + String dataConfig = loadResJSON("/amr/data_CONFIG.json"); + String dataMetric = loadResJSON("/amr/data_METRIC.json"); + String dataAggr = loadResJSON("/amr/data_AGGREGATION.json"); + String dataOps = loadResJSON("/amr/data_OPS.json"); + String dataThresh = loadResJSON("/amr/data_THRESHOLDS.json"); + String dataTopoEnd = loadResJSON("/amr/data_TOPOENDPOINTS.json"); + String dataTopoGroup = loadResJSON("/amr/data_TOPOGROUPS.json"); + String dataDown = loadResJSON("/amr/data_DOWNTIMES.json"); + String dataWeights = loadResJSON("/amr/data_WEIGHTS.json"); + String dataRecomp = loadResJSON("/amr/data_RECOMPUTATIONS.json"); + + + + + stubFor(get(urlEqualTo("/api/v2/reports/f29eeb59-ab38-4aa0-b372-5d3c0709dfb2")) + .willReturn(aResponse().withBody(jsonReport))); + stubFor(get(urlEqualTo("/api/v2/metric_profiles/92fa5d74-015c-4122-b8b9-7b344f3154d4?date=2020-11-01")) + .willReturn(aResponse().withBody(jsonMetric))); + stubFor(get(urlEqualTo("/api/v2/aggregation_profiles/2744247f-40f8-4dd6-b22c-76a3b38334d8?date=2020-11-01")) + .willReturn(aResponse().withBody(jsonAgg))); + stubFor(get(urlEqualTo("/api/v2/operations_profiles/ea62ff1e-c6e1-438b-83c7-9262b3a4f179?date=2020-11-01")) + .willReturn(aResponse().withBody(jsonOps))); + stubFor(get(urlEqualTo("/api/v2/thresholds_profiles/3345c3c1-322a-47f1-982c-1d9df1fc065e?date=2020-11-01")) + .willReturn(aResponse().withBody(jsonThresholds))); + stubFor(get(urlEqualTo("/api/v2/topology/endpoints/by_report/Critical?date=2020-11-01")) + .willReturn(aResponse().withBody(jsonTopoEnd))); + stubFor(get(urlEqualTo("/api/v2/topology/groups/by_report/Critical?date=2020-11-01")) + .willReturn(aResponse().withBody(jsonTopoGroups))); + stubFor(get(urlEqualTo("/api/v2/downtimes?date=2020-11-01")) + .willReturn(aResponse().withBody(jsonDowntimes))); + stubFor(get(urlEqualTo("/api/v2/weights/3b9602ed-49ec-42f3-8df7-7c35331ebf69?date=2020-11-01")) + .willReturn(aResponse().withBody(jsonWeights))); + stubFor(get(urlEqualTo("/api/v2/recomputations?date=2020-11-01")) + .willReturn(aResponse().withBody(jsonRecomp))); + + ApiResourceManager amr = new ApiResourceManager("localhost:8443", "s3cr3t"); + amr.setDate("2020-11-01"); + amr.setReportID("f29eeb59-ab38-4aa0-b372-5d3c0709dfb2"); + amr.setToken("s3cr3t"); + amr.setWeightsID("3b9602ed-49ec-42f3-8df7-7c35331ebf69"); + amr.setVerify(false); + + // Get the report configuration first and parse it + amr.getRemoteConfig(); + amr.parseReport(); + + assertEquals("report name retrieved","Critical",amr.getReportName()); + assertEquals("metric id retrieved","92fa5d74-015c-4122-b8b9-7b344f3154d4",amr.getMetricID()); + assertEquals("ops id retrieved","ea62ff1e-c6e1-438b-83c7-9262b3a4f179",amr.getOpsID()); + assertEquals("aggregations id retrieved","2744247f-40f8-4dd6-b22c-76a3b38334d8",amr.getAggregationID()); + assertEquals("thresholds id retrieved","3345c3c1-322a-47f1-982c-1d9df1fc065e",amr.getThresholdsID()); + + assertEquals("retrieved config data",dataConfig,amr.getResourceJSON(ApiResource.CONFIG)); + + + // get the profiles metric, aggregation, ops and thresholds + amr.getRemoteMetric(); + amr.getRemoteAggregation(); + amr.getRemoteOps(); + amr.getRemoteThresholds(); + + assertEquals("retrieved metric profile data",dataMetric,amr.getResourceJSON(ApiResource.METRIC)); + assertEquals("retrieved aggregation profile data",dataAggr,amr.getResourceJSON(ApiResource.AGGREGATION)); + assertEquals("retrieved ops profile data",dataOps,amr.getResourceJSON(ApiResource.OPS)); + assertEquals("retrieved thresholds profile data",dataThresh,amr.getResourceJSON(ApiResource.THRESHOLDS)); + + // get remote topology + + amr.getRemoteTopoEndpoints(); + amr.getRemoteTopoGroups(); + + assertEquals("retrieved topology endpoints",dataTopoEnd,amr.getResourceJSON(ApiResource.TOPOENDPOINTS)); + assertEquals("retrieved topology groups",dataTopoGroup,amr.getResourceJSON(ApiResource.TOPOGROUPS)); + + + // get remote downtimes + amr.getRemoteDowntimes(); + assertEquals("retrieved downtimes",dataDown,amr.getResourceJSON(ApiResource.DOWNTIMES)); + + // get weights + amr.getRemoteWeights(); + assertEquals("retrieved downtimes",dataWeights,amr.getResourceJSON(ApiResource.WEIGHTS)); + + // get recomputations + amr.getRemoteRecomputations(); + assertEquals("retrieved recomputations",dataRecomp,amr.getResourceJSON(ApiResource.RECOMPUTATIONS)); + + // initate a second amr and check getRemoteAll routine + + + ApiResourceManager amr2 = new ApiResourceManager("localhost:8443", "s3cr3t"); + amr2.setDate("2020-11-01"); + amr2.setReportID("f29eeb59-ab38-4aa0-b372-5d3c0709dfb2"); + amr2.setToken("s3cr3t"); + amr2.setWeightsID("3b9602ed-49ec-42f3-8df7-7c35331ebf69"); + amr2.setVerify(false); + + amr2.getRemoteAll(); + + // test amr2 downtime list + Downtime[] dtl = amr2.getListDowntimes(); + assertEquals("downtime list size", 3, dtl.length); + assertEquals("downtime data", "WebPortal", dtl[0].getService()); + assertEquals("downtime data", "hostA.foo", dtl[0].getHostname()); + assertEquals("downtime data", "2020-11-10T00:00:00Z", dtl[0].getStartTime()); + assertEquals("downtime data", "2020-11-10T23:59:00Z", dtl[0].getEndTime()); + assertEquals("downtime data", "WebPortal", dtl[1].getService()); + assertEquals("downtime data", "hostB.foo", dtl[1].getHostname()); + assertEquals("downtime data", "2020-11-10T00:00:00Z", dtl[1].getStartTime()); + assertEquals("downtime data", "2020-11-10T23:59:00Z", dtl[1].getEndTime()); + assertEquals("downtime data", "WebPortald", dtl[2].getService()); + assertEquals("downtime data", "hostB.foo", dtl[2].getHostname()); + assertEquals("downtime data", "2020-11-10T00:00:00Z", dtl[2].getStartTime()); + assertEquals("downtime data", "2020-11-10T23:59:00Z", dtl[2].getEndTime()); + + // test amr2 group endpoint list + GroupEndpoint[] gel = amr2.getListGroupEndpoints(); + assertEquals("group endpoint list size", 3, gel.length); + assertEquals("group endpoint data", "SERVICEGROUPS", gel[0].getType()); + assertEquals("group endpoint data", "groupA", gel[0].getGroup()); + assertEquals("group endpoint data", "webPortal", gel[0].getService()); + assertEquals("group endpoint data", "host1.foo.bar", gel[0].getHostname()); + assertEquals("group endpoint data", "1", gel[0].getTags().get("monitored")); + assertEquals("group endpoint data", "1", gel[0].getTags().get("production")); + assertEquals("group endpoint data", "FOO", gel[0].getTags().get("scope")); + + assertEquals("group endpoint data", "SERVICEGROUPS", gel[1].getType()); + assertEquals("group endpoint data", "groupB", gel[1].getGroup()); + assertEquals("group endpoint data", "webPortal", gel[1].getService()); + assertEquals("group endpoint data", "host3.foo.bar", gel[1].getHostname()); + assertEquals("group endpoint data", "1", gel[1].getTags().get("monitored")); + assertEquals("group endpoint data", "1", gel[1].getTags().get("production")); + assertEquals("group endpoint data", "FOO", gel[1].getTags().get("scope")); + + assertEquals("group endpoint data", "SERVICEGROUPS", gel[2].getType()); + assertEquals("group endpoint data", "groupA", gel[2].getGroup()); + assertEquals("group endpoint data", "webPortal", gel[2].getService()); + assertEquals("group endpoint data", "host2.foo.bar", gel[2].getHostname()); + assertEquals("group endpoint data", "1", gel[2].getTags().get("monitored")); + assertEquals("group endpoint data", "1", gel[2].getTags().get("production")); + assertEquals("group endpoint data", "FOO", gel[2].getTags().get("scope")); + + // test amr2 group groups list + GroupGroup[] ggl = amr2.getListGroupGroups(); + assertEquals("group endpoint list size", 2, ggl.length); + assertEquals("group endpoint data", "PROJECT", ggl[0].getType()); + assertEquals("group endpoint data", "ORG-A", ggl[0].getGroup()); + assertEquals("group endpoint data", "GROUP-101", ggl[0].getSubgroup()); + assertEquals("group endpoint data", "0", ggl[0].getTags().get("monitored")); + assertEquals("group endpoint data", "Local", ggl[0].getTags().get("scope")); + + assertEquals("group endpoint data", "PROJECT", ggl[1].getType()); + assertEquals("group endpoint data", "ORG-A", ggl[1].getGroup()); + assertEquals("group endpoint data", "GROUP-202", ggl[1].getSubgroup()); + assertEquals("group endpoint data", "1", ggl[1].getTags().get("monitored")); + assertEquals("group endpoint data", "Local", ggl[1].getTags().get("scope")); + + // test amr2 weights list + Weight[] wl = amr2.getListWeights(); + assertEquals("group endpoint list size", 4, wl.length); + assertEquals("group endpoint data", "computationpower", wl[0].getType()); + assertEquals("group endpoint data", "GROUP-A", wl[0].getSite()); + assertEquals("group endpoint data", "366", wl[0].getWeight()); + + assertEquals("group endpoint data", "computationpower", wl[1].getType()); + assertEquals("group endpoint data", "GROUP-B", wl[1].getSite()); + assertEquals("group endpoint data", "4000", wl[1].getWeight()); + + assertEquals("group endpoint data", "computationpower", wl[2].getType()); + assertEquals("group endpoint data", "GROUP-C", wl[2].getSite()); + assertEquals("group endpoint data", "19838", wl[2].getWeight()); + + assertEquals("group endpoint data", "computationpower", wl[3].getType()); + assertEquals("group endpoint data", "GROUP-D", wl[3].getSite()); + assertEquals("group endpoint data", "19838", wl[3].getWeight()); + + // test amr2 metric profile list + MetricProfile[] mpl = amr2.getListMetrics(); + assertEquals("group endpoint list size", 1, mpl.length); + assertEquals("group endpoint data", "test-mon", mpl[0].getProfile()); + assertEquals("group endpoint data", "WebPortal", mpl[0].getService()); + assertEquals("group endpoint data", "org.nagios.WebCheck", mpl[0].getMetric()); + assertEquals("group endpoint data", 0, mpl[0].getTags().size()); + + + + + } + +} diff --git a/flink_jobs/batch_status/src/test/java/argo/batch/ExcludeMetricDataTest.java b/flink_jobs/batch_status/src/test/java/argo/batch/ExcludeMetricDataTest.java new file mode 100644 index 00000000..1aac89a5 --- /dev/null +++ b/flink_jobs/batch_status/src/test/java/argo/batch/ExcludeMetricDataTest.java @@ -0,0 +1,93 @@ +package argo.batch; + +import static org.junit.Assert.*; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileReader; + +import java.net.URL; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.flink.api.java.DataSet; +import org.apache.flink.api.java.ExecutionEnvironment; + +import org.junit.BeforeClass; +import org.junit.Test; + + +import argo.avro.MetricData; +import sync.RecomputationManagerTest; + +public class ExcludeMetricDataTest { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", ExcludeMetricDataTest.class.getResource("/ops/recomp.json")); + + } + + @Test + public void test() throws Exception { + + // Prepare Resource File which contains recomputations + URL resJsonFile = RecomputationManagerTest.class.getResource("/ops/recomp.json.flink"); + File jsonFile = new File(resJsonFile.toURI()); + + // Prepare a local flink execution environment for testing + ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); + + env.setParallelism(1); + + // Create a data set with metric data - some of them are coming from monitoring host bad-mon.example.org + // which should be excluded during 02:00 and 06:00 period of 2019-05-07 + MetricData md01 = new MetricData("2019-05-07T00:00:00Z","CREAM-CE","host01.example.org","metric01","CRITICAL","bad-mon01.example.org","","summary","msg",null); + MetricData md02 = new MetricData("2019-05-07T01:00:00Z","CREAM-CE","host01.example.org","metric01","OK","good-mon01.example.org","","summary","msg",null); + MetricData md03 = new MetricData("2019-05-07T03:00:00Z","CREAM-CE","host01.example.org","metric01","OK","good-mon02.example.org","","summary","msg",null); + MetricData md04 = new MetricData("2019-05-07T03:32:00Z","CREAM-CE","host01.example.org","metric01","CRITICAL","bad-mon01.example.org","","summary","msg",null); + MetricData md05 = new MetricData("2019-05-07T04:00:00Z","CREAM-CE","host01.example.org","metric01","OK","good-mon01.example.org","","summary","msg",null); + MetricData md06 = new MetricData("2019-05-07T04:32:00Z","CREAM-CE","host01.example.org","metric01","CRITICAL","bad-mon01.example.org","","summary","msg",null); + MetricData md07 = new MetricData("2019-05-07T05:00:00Z","CREAM-CE","host01.example.org","metric01","OK","good-mon02.example.org","","summary","msg",null); + MetricData md08 = new MetricData("2019-05-07T06:00:00Z","CREAM-CE","host01.example.org","metric01","OK","good-mon01.example.org","","summary","msg",null); + MetricData md09 = new MetricData("2019-05-07T07:00:00Z","CREAM-CE","host01.example.org","metric01","OK","bad-mon1.example.org","","summary","msg",null); + + // Create a recomputation dataset by reading the recomputation file. This dataset will + // be used as a broadcast variable + String recStr = new String(); + BufferedReader br = new BufferedReader(new FileReader(jsonFile)); + recStr = br.readLine(); + br.close(); + DataSet recDS = env.fromElements(recStr); + + // Read the initial metric data + DataSet md = env.fromElements(md01,md02,md03,md04,md05,md06,md07,md08,md09); + // Clean the metric data by testing the ExcludeMetricData flatmap function with + // recomputation information to exclude bad-mon01.example.org data from 02:00 to 06:00 (broadcast variable) + DataSet clearMd = md.flatMap(new ExcludeMetricData(null)).withBroadcastSet(recDS, "rec"); + + // collect the final result in a list + List resulted = clearMd.collect(); + + // Create the expected result in a list + List expected = new ArrayList(); + expected.add(md01); + expected.add(md02); + expected.add(md03); + expected.add(md05); + expected.add(md07); + expected.add(md08); + expected.add(md09); + + // compare expected and resulted data + assertEquals(expected,resulted); + + + } + + + + +} diff --git a/flink_jobs/batch_status/src/test/java/argo/batch/PickDataPointsTest.java b/flink_jobs/batch_status/src/test/java/argo/batch/PickDataPointsTest.java new file mode 100644 index 00000000..db8e19a1 --- /dev/null +++ b/flink_jobs/batch_status/src/test/java/argo/batch/PickDataPointsTest.java @@ -0,0 +1,209 @@ +package argo.batch; + +import static org.junit.Assert.*; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileReader; + +import java.net.URL; +import java.nio.file.Paths; +import java.util.List; + +import org.apache.flink.api.java.DataSet; +import org.apache.flink.api.java.ExecutionEnvironment; +import org.apache.flink.api.java.io.AvroInputFormat; +import org.apache.flink.core.fs.Path; +import org.junit.BeforeClass; +import org.junit.Test; + +import argo.avro.GroupEndpoint; +import argo.avro.GroupGroup; +import argo.avro.MetricData; +import argo.avro.MetricProfile; +import junit.framework.Assert; +import ops.ConfigManager; +import ops.OpsManager; +import ops.ThresholdManager; +import ops.ThresholdManagerTest; +import sync.AggregationProfileManager; +import sync.EndpointGroupManager; +import sync.GroupGroupManager; +import sync.MetricProfileManager; +import sync.RecomputationManagerTest; + +public class PickDataPointsTest { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", PickDataPointsTest.class.getResource("/avro/group_endpoints_info.avro")); + assertNotNull("Test file missing", PickDataPointsTest.class.getResource("/avro/poem_url_services.avro")); + assertNotNull("Test file missing", PickDataPointsTest.class.getResource("/ops/ap1.json")); + assertNotNull("Test file missing", PickDataPointsTest.class.getResource("/avro/groups_info.avro")); + assertNotNull("Test file missing", PickDataPointsTest.class.getResource("/ops/config.json")); + assertNotNull("Test file missing", PickDataPointsTest.class.getResource("/ops/EGI-algorithm.json")); + assertNotNull("Test file missing", PickDataPointsTest.class.getResource("/ops/recomp.json.flink")); + assertNotNull("Test file missing", PickDataPointsTest.class.getResource("/ops/EGI-rules.json")); + + + } + + @Test + public void test() throws Exception { + + // Prepare Resource File + URL resAvroFile = PickDataPointsTest.class.getResource("/avro/poem_url_services.avro"); + File avroFile = new File(resAvroFile.toURI()); + // Instatiate class + MetricProfileManager mp = new MetricProfileManager(); + // Test loading file + mp.loadAvro(avroFile); + assertNotNull("File Loaded", mp); + + // Prepare Resource File + URL resJsonFileAp = PickDataPointsTest.class.getResource("/ops/ap1.json"); + File jsonFileAp = new File(resJsonFileAp.toURI()); + // Instatiate class + AggregationProfileManager avp = new AggregationProfileManager(); + avp.clearProfiles(); + avp.loadJson(jsonFileAp); + + // Prepare Resource File + URL resAvroFileEndpoint = PickDataPointsTest.class.getResource("/avro/group_endpoints_info.avro"); + File avroFileEndpoint = new File(resAvroFileEndpoint.toURI()); + // Instatiate class + EndpointGroupManager ge = new EndpointGroupManager(); + // Test loading file + ge.loadAvro(avroFileEndpoint); + assertNotNull("File Loaded", ge); + + // Prepare Resource File + URL resAvroFileGroup = PickDataPointsTest.class.getResource("/avro/groups_info.avro"); + File avroFileGroup = new File(resAvroFileGroup.toURI()); + // Instatiate class + GroupGroupManager gg = new GroupGroupManager(); + // Test loading file + gg.loadAvro(avroFileGroup); + assertNotNull("File Loaded", gg); + + // Prepare Resource File + URL resJsonFileCfg = PickDataPointsTest.class.getResource("/ops/config.json"); + File jsonFileCfg = new File(resJsonFileCfg.toURI()); + + + // Prepare Resource File + URL resJsonFileOps = PickDataPointsTest.class.getResource("/ops/EGI-algorithm.json"); + File jsonFileOps = new File(resJsonFileOps.toURI()); + + // Prepare Resource File which contains recomputations + URL resJsonFileRecomp = RecomputationManagerTest.class.getResource("/ops/recomp.json.flink"); + File jsonFileRecomp = new File(resJsonFileRecomp.toURI()); + + // Prepare Resource File + URL thrJsonFile = ThresholdManagerTest.class.getResource("/ops/EGI-rules.json"); + File thrFile = new File(thrJsonFile.toURI()); + + // Prepare a local flink execution environment for testing + ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); + + env.setParallelism(1); + + + // Create a data set with metric data - some of them are coming from monitoring + // host bad-mon.example.org + // which should be excluded during 02:00 and 06:00 period of 2019-05-07 + MetricData md01 = new MetricData("2019-05-07T00:00:00Z", "services.url", "host1.example.foo_11", "check_http", + "CRITICAL", "mon2", "", "summary", "msg", null); + MetricData md02 = new MetricData("2019-05-07T01:00:00Z", "services.url", "host1.example.foo_22", "check_http", "OK", + "mon2", "", "summary", "msg", null); + MetricData md03 = new MetricData("2019-05-07T03:00:00Z", "services.url", "host2.example.foo_33", "check_http", "OK", + "mon2", "", "summary", "msg", null); + MetricData md04 = new MetricData("2019-05-07T03:32:00Z", "services.url", "host2.example.foo_44", "check_http", + "CRITICAL", "mon2", "", "summary", "msg", null); + MetricData md05 = new MetricData("2019-05-07T04:00:00Z", "services.url", "host3.example.foo_55", "check_http", "OK", + "mon2", "", "summary", "msg", null); + MetricData md06 = new MetricData("2019-05-07T04:32:00Z", "services.url", "host4.example.foo_66", "check_http", + "CRITICAL", "mon2", "", "summary", "msg", null); + + // Create a config manager dataset by reading the configfile. This dataset will + // be used as a broadcast variable + String cfgStr = new String(); + BufferedReader br = new BufferedReader(new FileReader(jsonFileCfg)); + cfgStr = br.readLine(); + br.close(); + DataSet cfgDS = env.fromElements(cfgStr); + + String opsStr = new String(); + br = new BufferedReader(new FileReader(jsonFileOps)); + opsStr = br.readLine(); + br.close(); + DataSet opsDS = env.fromElements(opsStr); + + String apStr = new String(); + br = new BufferedReader(new FileReader(jsonFileAp)); + apStr = br.readLine(); + br.close(); + DataSet apDS = env.fromElements(apStr); + + String recStr = new String(); + br = new BufferedReader(new FileReader(jsonFileRecomp)); + recStr = br.readLine(); + br.close(); + DataSet recDS = env.fromElements(recStr); + + String thrStr = new String(); + br = new BufferedReader(new FileReader(thrFile)); + recStr = br.readLine(); + br.close(); + DataSet thrDS = env.fromElements(thrStr); + + + + + // sync data input: metric profile in avro format + AvroInputFormat mpsAvro = new AvroInputFormat(new Path(resAvroFile.toURI()), + MetricProfile.class); + DataSet mpsDS = env.createInput(mpsAvro); + + // sync data input: endpoint group topology data in avro format + AvroInputFormat egpAvro = new AvroInputFormat( + new Path(resAvroFileEndpoint.toURI()), GroupEndpoint.class); + DataSet egpDS = env.createInput(egpAvro); + + // sync data input: group of group topology data in avro format + AvroInputFormat ggpAvro = new AvroInputFormat(new Path(resAvroFileGroup.toURI()), + GroupGroup.class); + DataSet ggpDS = env.createInput(ggpAvro); + + // Read the initial metric data + DataSet md = env.fromElements(md01, md02, md03, md04, md05, md06); + // Clean the metric data by testing the ExcludeMetricData flatmap function with + // recomputation information to exclude bad-mon01.example.org data from 02:00 to + // 06:00 (broadcast variable) + DataSet clearMd = md.flatMap(new PickEndpoints(null)).withBroadcastSet(cfgDS, "conf") + .withBroadcastSet(recDS, "rec").withBroadcastSet(opsDS, "ops").withBroadcastSet(mpsDS, "mps") + .withBroadcastSet(ggpDS, "ggp").withBroadcastSet(egpDS, "egp").withBroadcastSet(thrDS, "thr") + .withBroadcastSet(apDS, "aps"); + + // collect the final result in a list + List resulted = clearMd.collect(); + + Assert.assertEquals(5,resulted.size()); + + String[] expected = new String[] {"URL:host1.example.foo/path/to/service1,DN:foo DN", + "URL:host1.example.foo/path/to/service2", + "URL:host2.example.foo/path/to/service1", + "ext.Value:extension1,URL:host2.example.foo/path/to/service2", + ""}; + + + System.out.println(resulted); + + for (int i = 0;iitem = t3.getSamples().iterator().next(); + assertEquals("2015-05-01T00:00:00Z",item.getKey().toString("yyyy-MM-dd'T'HH:mm:ss'Z'")); + assertEquals(new Integer(5),item.getValue()); + } + + @Test + public void testGet() { + + CTimeline t1 = new CTimeline("2015-05-01T11:00:00Z",3); + t1.insert("2015-05-01T11:00:00Z",1); + t1.insert("2015-05-01T13:00:00Z",2); + t1.insert("2015-05-01T22:00:00Z",6); + + assertEquals(4, t1.getLength()); + + CTimeline t2 = new CTimeline("2015-05-01T11:00:00Z",3); + t2.insert("2015-05-01T11:00:00Z",1); + t2.insert("2015-05-01T13:00:00Z",2); + t2.insert("2015-05-01T00:00:00Z",6); + + assertEquals(3, t2.getLength()); + + // Test Get + assertEquals(1,t1.get("2015-05-01T11:00:00Z")); + assertEquals(1,t1.get("2015-05-01T11:05:00Z")); + assertEquals(1,t1.get("2015-05-01T12:05:00Z")); + assertEquals(2,t1.get("2015-05-01T13:05:00Z")); + assertEquals(6,t1.get("2015-05-01T22:05:00Z")); + assertEquals(3,t1.get("2015-05-01T10:05:00Z")); + + assertEquals(1,t2.get("2015-05-01T11:00:00Z")); + assertEquals(1,t2.get("2015-05-01T11:05:00Z")); + assertEquals(1,t2.get("2015-05-01T12:05:00Z")); + assertEquals(2,t2.get("2015-05-01T13:05:00Z")); + assertEquals(2,t2.get("2015-05-01T22:05:00Z")); + assertEquals(6,t2.get("2015-05-01T10:05:00Z")); + } + + @Test + public void testBulkInserts() throws ParseException { + + + // Test optimization and insert and correct order + CTimeline ctl = new CTimeline("2015-05-01T00:00:00Z"); + CTimeline ctl2 = new CTimeline("2015-05-01T00:00:00Z"); + CTimeline expCtl = new CTimeline("2015-05-01T00:00:00Z"); + + + ctl.insert("2015-05-01T11:00:00Z", 1); + ctl.insert("2015-05-01T12:00:00Z", 1); + ctl.insert("2015-05-01T13:00:00Z", 2); + ctl.insert("2015-05-01T15:00:00Z", 2); + ctl.insert("2015-05-01T16:00:00Z", 2); + ctl.insert("2015-05-01T20:00:00Z", 2); + ctl.insert("2015-05-01T22:00:00Z", 1); + ctl.insert("2015-05-01T23:00:00Z", 1); + + ctl2.insert("2015-05-01T22:00:00Z", 1); + ctl2.insert("2015-05-01T23:00:00Z", 1); + ctl2.insert("2015-05-01T12:00:00Z", 1); + ctl2.insert("2015-05-01T11:00:00Z", 1); + ctl2.insert("2015-05-01T16:00:00Z", 2); + ctl2.insert("2015-05-01T13:00:00Z", 2); + ctl2.insert("2015-05-01T15:00:00Z", 2); + ctl2.insert("2015-05-01T20:00:00Z", 2); + + // assert that correct order is retained through inserts + assertEquals(ctl.getSamples(),ctl2.getSamples()); + + // Check bulk insert + CTimeline ctl3 = new CTimeline(); + ctl3.bulkInsert(ctl.getSamples()); + + assertEquals(ctl2.getSamples(),ctl3.getSamples()); + + // Check optimization in single element timeline + CTimeline ctl4 = new CTimeline(); + ctl4.insert("2015-05-01T12:00:00Z",1); + ctl.optimize(); + expCtl.clear(); + expCtl.insert("2015-05-01T12:00:00Z",1); + + assertEquals(ctl4.getSamples(),expCtl.getSamples()); + + } + + @Test + public void testOptimization() throws ParseException { + + + // Test optimization and insert and correct order + CTimeline ctl = new CTimeline("2015-05-01T00:00:00Z"); + CTimeline expCtl = new CTimeline("2015-05-01T00:00:00Z"); + + + ctl.insert("2015-05-01T11:00:00Z", 1); + ctl.insert("2015-05-01T12:00:00Z", 1); + ctl.insert("2015-05-01T13:00:00Z", 2); + ctl.insert("2015-05-01T15:00:00Z", 2); + ctl.insert("2015-05-01T16:00:00Z", 2); + ctl.insert("2015-05-01T20:00:00Z", 2); + ctl.insert("2015-05-01T22:00:00Z", 1); + ctl.insert("2015-05-01T23:00:00Z", 1); + // Test optimization + expCtl.insert("2015-05-01T11:00:00Z",1); + expCtl.insert("2015-05-01T13:00:00Z",2); + expCtl.insert("2015-05-01T22:00:00Z",1); + + ctl.optimize(); + + assertEquals(expCtl.getSamples(),ctl.getSamples()); + + + } + + @Test + public void testAggregation() throws URISyntaxException, IOException { + + // Prepare Resource File + URL resJsonFile = CTimelineTest.class.getResource("/ops/EGI-algorithm.json"); + File JsonFile = new File(resJsonFile.toURI()); + // Instantiate class + OpsManager opsMgr = new OpsManager(); + // Test loading file + opsMgr.loadJson(JsonFile); + + CTimeline t1 = new CTimeline("2015-05-01T00:00:00Z"); + CTimeline t2 = new CTimeline("2015-05-01T00:00:00Z"); + + + t1.insert("2015-05-01T00:00:00Z", opsMgr.getIntStatus("OK")); + t1.insert("2015-05-01T09:00:00Z", opsMgr.getIntStatus("WARNING")); + t1.insert("2015-05-01T12:00:00Z", opsMgr.getIntStatus("OK")); + + t2.insert("2015-05-01T00:00:00Z", opsMgr.getIntStatus("OK")); + t2.insert("2015-05-01T22:00:00Z", opsMgr.getIntStatus("CRITICAL")); + t2.insert("2015-05-01T22:23:00Z", opsMgr.getIntStatus("OK")); + + t1.aggregate(t2, opsMgr, opsMgr.getIntOperation("AND")); + + CTimeline expected = new CTimeline("2015-05-01T00:00:00Z"); + expected.insert("2015-05-01T00:00:00Z", opsMgr.getIntStatus("OK")); + expected.insert("2015-05-01T09:00:00Z", opsMgr.getIntStatus("WARNING")); + expected.insert("2015-05-01T12:00:00Z", opsMgr.getIntStatus("OK")); + expected.insert("2015-05-01T22:00:00Z", opsMgr.getIntStatus("CRITICAL")); + expected.insert("2015-05-01T22:23:00Z", opsMgr.getIntStatus("OK")); + + assertEquals(expected.getSamples(),t1.getSamples()); + } + +} \ No newline at end of file diff --git a/flink_jobs/batch_status/src/test/java/sync/EndpointGroupManagerTest.java b/flink_jobs/batch_status/src/test/java/sync/EndpointGroupManagerTest.java new file mode 100644 index 00000000..99a494bf --- /dev/null +++ b/flink_jobs/batch_status/src/test/java/sync/EndpointGroupManagerTest.java @@ -0,0 +1,84 @@ +package sync; + +import static org.junit.Assert.*; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; + +import ops.ConfigManager; + +import org.junit.BeforeClass; +import org.junit.Test; + +public class EndpointGroupManagerTest { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", EndpointGroupManagerTest.class.getResource("/avro/group_endpoints_v2.avro")); + assertNotNull("Test file missing", EndpointGroupManagerTest.class.getResource("/avro/group_endpoints_info.avro")); + } + + @Test + public void test() throws URISyntaxException, IOException { + // Prepare Resource File + URL resAvroFile = EndpointGroupManagerTest.class.getResource("/avro/group_endpoints_v2.avro"); + File avroFile = new File(resAvroFile.toURI()); + // Instatiate class + EndpointGroupManager ge = new EndpointGroupManager(); + // Test loading file + ge.loadAvro(avroFile); + assertNotNull("File Loaded", ge); + + // Test Check if service endpoint exists in topology + assertTrue(ge.checkEndpoint("storage1.grid.upjs.sk", "ARC-CE")); + assertTrue(ge.checkEndpoint("storage1.grid.upjs.sk", "ARC-CE")); + assertTrue(ge.checkEndpoint("se01.afroditi.hellasgrid.gr", "SRMv2")); + assertTrue(ge.checkEndpoint("grid-perfsonar.hpc.susx.ac.uk", "net.perfSONAR.Latency")); + assertTrue(ge.checkEndpoint("se.grid.tuke.sk", "SRMv2")); + assertTrue(ge.checkEndpoint("dpm.grid.atomki.hu", "SRMv2")); + // Test check Group retrieval + ArrayList result1 = new ArrayList(); + result1.add("ru-PNPI"); + assertEquals(ge.getGroup("SITES", "gt3.pnpi.nw.ru", "CREAM-CE"), result1); + + // Test Tag Filtering (Wont filter out anything since input is already + // filtered) + URL resJson = EndpointGroupManagerTest.class.getResource("/ops/config.json"); + File cfgFile = new File(resJson.toURI()); + ConfigManager cfgMgr = new ConfigManager(); + cfgMgr.loadJson(cfgFile); + ge.filter(cfgMgr.egroupTags); + + // Check non-existent groups + assertTrue(ge.checkEndpoint("ce.etfos.cro-ngi.hr", "GRAM5") == false); + assertTrue(ge.checkEndpoint("grid129.sinp.msu.ru", "CREAM-CE") == false); + + // Prepare Resource File with extra information in tags + URL resAvroFile2 = EndpointGroupManagerTest.class.getResource("/avro/group_endpoints_info.avro"); + File avroFile2 = new File(resAvroFile2.toURI()); + // Instantiate class + EndpointGroupManager ge2 = new EndpointGroupManager(); + // Test loading file + ge2.loadAvro(avroFile2); + assertNotNull("File Loaded", ge); + + String exp1 = "URL:host1.example.foo/path/to/service1,DN:foo DN"; + String exp2 = "URL:host1.example.foo/path/to/service2"; + String exp3 = "URL:host2.example.foo/path/to/service1"; + String exp4 = "ext.Value:extension1,URL:host2.example.foo/path/to/service2"; + String exp5 = ""; + String exp6 = "URL:host4.example.foo/path/to/service1"; + + assertEquals("wrong tags", exp1,ge2.getInfo("groupA", "SERVICEGROUPS", "host1.example.foo_11", "services.url")); + assertEquals("wrong tags", exp2,ge2.getInfo("groupB", "SERVICEGROUPS", "host1.example.foo_22", "services.url")); + assertEquals("wrong tags", exp3,ge2.getInfo("groupC", "SERVICEGROUPS", "host2.example.foo_33", "services.url")); + assertEquals("wrong tags", exp4,ge2.getInfo("groupD", "SERVICEGROUPS", "host2.example.foo_44", "services.url")); + assertEquals("wrong tags", exp5,ge2.getInfo("groupE", "SERVICEGROUPS", "host3.example.foo_55", "services.url")); + assertEquals("wrong tags", exp6,ge2.getInfo("groupF", "SERVICEGROUPS", "host4.example.foo_66", "services.url")); + } + +} diff --git a/flink_jobs/batch_status/src/test/java/sync/RecomputationManagerTest.java b/flink_jobs/batch_status/src/test/java/sync/RecomputationManagerTest.java new file mode 100644 index 00000000..c99c99ff --- /dev/null +++ b/flink_jobs/batch_status/src/test/java/sync/RecomputationManagerTest.java @@ -0,0 +1,103 @@ +package sync; + +import static org.junit.Assert.*; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; +import java.text.ParseException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Map; + + + +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +public class RecomputationManagerTest { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", RecomputationManagerTest.class.getResource("/ops/recomp.json")); + } + + @Test + public void test() throws URISyntaxException, ParseException, IOException { + // Prepare Resource File + URL resJsonFile = RecomputationManagerTest.class.getResource("/ops/recomp.json"); + File jsonFile = new File(resJsonFile.toURI()); + + RecomputationsManager recMgr = new RecomputationsManager(); + recMgr.loadJson(jsonFile); + + + assertEquals(recMgr.isExcluded("GR-01-AUTH"), true); + assertEquals(recMgr.isExcluded("HG-03-AUTH"), true); + assertEquals(recMgr.isExcluded("GR-04-IASA"), false); + + // Check period functionality + ArrayList> gr01list = new ArrayList>(); + ArrayList> siteAlist = new ArrayList>(); + ArrayList> siteBlist = new ArrayList>(); + ArrayList> siteClist = new ArrayList>(); + + Map gr01map = new HashMap(); + + Map siteA1map = new HashMap(); + Map siteA2map = new HashMap(); + + + + Map siteBmap = new HashMap(); + Map siteCmap = new HashMap(); + + // Check period functionality + + gr01map.put("start", "2013-12-08T12:03:44Z"); + gr01map.put("end", "2013-12-10T12:03:44Z"); + + siteA1map.put("start", "2013-12-08T12:03:44Z"); + siteA1map.put("end", "2013-12-08T13:03:44Z"); + + siteA2map.put("start", "2013-12-08T16:03:44Z"); + siteA2map.put("end", "2013-12-08T18:03:44Z"); + + siteBmap.put("start", "2013-12-08T12:03:44Z"); + siteBmap.put("end", "2013-12-08T13:03:44Z"); + + siteCmap.put("start", "2013-12-08T16:03:44Z"); + siteCmap.put("end", "2013-12-08T18:03:44Z"); + + gr01list.add(gr01map); + siteAlist.add(siteA1map); + siteAlist.add(siteA2map); + siteBlist.add(siteBmap); + siteClist.add(siteCmap); + + Assert.assertEquals(recMgr.getPeriods("GR-01-AUTH", "2013-12-08"),gr01list); + Assert.assertEquals(recMgr.getPeriods("SITE-A", "2013-12-08"),siteAlist); + Assert.assertEquals(recMgr.getPeriods("SITE-B", "2013-12-08"),siteBlist); + + // check monitoring exclusions + Assert.assertEquals(false,recMgr.isMonExcluded("monA", "2013-12-08T11:03:43Z")); + Assert.assertEquals(false,recMgr.isMonExcluded("monA", "2013-12-08T11:03:44Z")); + Assert.assertEquals(true,recMgr.isMonExcluded("monA", "2013-12-08T12:06:44Z")); + Assert.assertEquals(true,recMgr.isMonExcluded("monA", "2013-12-08T14:05:44Z")); + Assert.assertEquals(true,recMgr.isMonExcluded("monA", "2013-12-08T15:02:44Z")); + Assert.assertEquals(false,recMgr.isMonExcluded("monA", "2013-12-08T15:03:45Z")); + + // check monitoring exclusions + Assert.assertEquals(false,recMgr.isMonExcluded("monB", "2013-12-08T11:03:43Z")); + Assert.assertEquals(false,recMgr.isMonExcluded("monB", "2013-12-08T11:03:44Z")); + Assert.assertEquals(false,recMgr.isMonExcluded("monB", "2013-12-08T12:06:44Z")); + Assert.assertEquals(false,recMgr.isMonExcluded("monB", "2013-12-08T14:05:44Z")); + Assert.assertEquals(false,recMgr.isMonExcluded("monB", "2013-12-08T15:02:44Z")); + Assert.assertEquals(false,recMgr.isMonExcluded("monB", "2013-12-08T15:03:45Z")); + + } + +} diff --git a/flink_jobs/old-models/ams_ingest_metric/.gitignore b/flink_jobs/old-models/ams_ingest_metric/.gitignore new file mode 100644 index 00000000..6c4e323f --- /dev/null +++ b/flink_jobs/old-models/ams_ingest_metric/.gitignore @@ -0,0 +1,8 @@ +/target/ +.project +.settings/ +.classpath/ +.classpath +/nbproject +nbactions.xml + diff --git a/flink_jobs/old-models/ams_ingest_metric/metric_data.avsc b/flink_jobs/old-models/ams_ingest_metric/metric_data.avsc new file mode 100644 index 00000000..c35bbe38 --- /dev/null +++ b/flink_jobs/old-models/ams_ingest_metric/metric_data.avsc @@ -0,0 +1,18 @@ +{"namespace": "argo.avro", + "type": "record", + "name": "metric_data", + "fields": [ + {"name": "timestamp", "type": "string"}, + {"name": "service", "type": "string"}, + {"name": "hostname", "type": "string"}, + {"name": "metric", "type": "string"}, + {"name": "status", "type": "string"}, + {"name": "monitoring_host", "type": ["null", "string"]}, + {"name": "summary", "type": ["null", "string"]}, + {"name": "message", "type": ["null", "string"]}, + {"name": "tags", "type" : ["null", {"name" : "Tags", + "type" : "map", + "values" : ["null", "string"] + }] + }] +} diff --git a/flink_jobs/old-models/ams_ingest_metric/pom.xml b/flink_jobs/old-models/ams_ingest_metric/pom.xml new file mode 100644 index 00000000..70121203 --- /dev/null +++ b/flink_jobs/old-models/ams_ingest_metric/pom.xml @@ -0,0 +1,378 @@ + + + 4.0.0 + + argo.streaming + ams-ingest-metric + 0.1 + jar + + ARGO AMS Ingest Metric Data job + + + + + UTF-8 + 1.3.2 + + + + + + cloudera + https://repository.cloudera.com/artifactory/cloudera-repos/ + + + + apache.snapshots + Apache Development Snapshot Repository + https://repository.apache.org/content/repositories/snapshots/ + + false + + + true + + + + + + + + + org.apache.avro + avro + 1.7.7 + + + org.apache.flink + flink-connector-filesystem_2.10 + ${flink.version} + + + org.apache.flink + flink-java + ${flink.version} + + + org.apache.flink + flink-streaming-java_2.10 + ${flink.version} + + + org.apache.flink + flink-clients_2.10 + ${flink.version} + + + org.apache.flink + flink-connector-kafka-0.9_2.10 + ${flink.version} + + + commons-codec + commons-codec + 20041127.091804 + + + com.google.code.gson + gson + 2.7 + + + + org.apache.hbase + hbase-client + 1.2.0-cdh5.7.4 + + + + org.apache.httpcomponents + httpclient + 4.5.13 + + + org.apache.httpcomponents + fluent-hc + 4.5.13 + + + + + + + + + + + build-jar + + false + + + + org.apache.avro + avro + 1.7.7 + + + org.apache.flink + flink-java + ${flink.version} + provided + + + org.apache.flink + flink-connector-filesystem_2.10 + ${flink.version} + + + org.apache.flink + flink-streaming-java_2.10 + ${flink.version} + provided + + + org.apache.flink + flink-clients_2.10 + ${flink.version} + provided + + + org.apache.flink + flink-connector-kafka-0.9_2.10 + ${flink.version} + provided + + + commons-codec + commons-codec + 20041127.091804 + provided + + + + com.google.code.gson + gson + 2.7 + + + + org.apache.hbase + hbase-client + 1.2.0-cdh5.7.4 + + + + org.apache.httpcomponents + httpclient + 4.5.13 + + + org.apache.httpcomponents + fluent-hc + 4.5.13 + + + + + + + + + org.apache.maven.plugins + maven-shade-plugin + 2.4.1 + + + package + + shade + + + + + + + + + + + + + + + + + + + org.apache.maven.plugins + maven-shade-plugin + 2.4.1 + + + + package + + shade + + + + + + org.apache.flink:flink-annotations + org.apache.flink:flink-shaded-hadoop1 + org.apache.flink:flink-shaded-hadoop2 + org.apache.flink:flink-shaded-curator-recipes + org.apache.flink:flink-core + org.apache.flink:flink-java + org.apache.flink:flink-scala_2.10 + org.apache.flink:flink-runtime_2.10 + org.apache.flink:flink-optimizer_2.10 + org.apache.flink:flink-clients_2.10 + org.apache.flink:flink-avro_2.10 + org.apache.flink:flink-examples-batch_2.10 + org.apache.flink:flink-examples-streaming_2.10 + org.apache.flink:flink-streaming-java_2.10 + + + org.scala-lang:scala-library + org.scala-lang:scala-compiler + org.scala-lang:scala-reflect + com.amazonaws:aws-java-sdk + com.typesafe.akka:akka-actor_* + com.typesafe.akka:akka-remote_* + com.typesafe.akka:akka-slf4j_* + io.netty:netty-all + io.netty:netty + commons-fileupload:commons-fileupload + org.apache.avro:avro + commons-collections:commons-collections + org.codehaus.jackson:jackson-core-asl + org.codehaus.jackson:jackson-mapper-asl + com.thoughtworks.paranamer:paranamer + org.xerial.snappy:snappy-java + org.apache.commons:commons-compress + org.tukaani:xz + com.esotericsoftware.kryo:kryo + com.esotericsoftware.minlog:minlog + org.objenesis:objenesis + com.twitter:chill_* + com.twitter:chill-java + com.twitter:chill-avro_* + com.twitter:chill-bijection_* + com.twitter:bijection-core_* + com.twitter:bijection-avro_* + commons-lang:commons-lang + junit:junit + de.javakaffee:kryo-serializers + joda-time:joda-time + org.apache.commons:commons-lang3 + org.slf4j:slf4j-api + org.slf4j:slf4j-log4j12 + log4j:log4j + org.apache.commons:commons-math + org.apache.sling:org.apache.sling.commons.json + commons-logging:commons-logging + commons-codec:commons-codec + com.fasterxml.jackson.core:jackson-core + com.fasterxml.jackson.core:jackson-databind + com.fasterxml.jackson.core:jackson-annotations + stax:stax-api + com.typesafe:config + org.uncommons.maths:uncommons-maths + com.github.scopt:scopt_* + commons-io:commons-io + commons-cli:commons-cli + + + + + org.apache.flink:* + + + org/apache/flink/shaded/com/** + web-docs/** + + + + + *:* + + META-INF/*.SF + META-INF/*.DSA + META-INF/*.RSA + + + + + + + argo.streaming.AmsIngestMetric + + + false + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.1 + + 1.7 + 1.7 + + + + + + + + + + diff --git a/flink_jobs/old-models/ams_ingest_metric/src/main/java/argo/avro/MetricData.java b/flink_jobs/old-models/ams_ingest_metric/src/main/java/argo/avro/MetricData.java new file mode 100644 index 00000000..77800770 --- /dev/null +++ b/flink_jobs/old-models/ams_ingest_metric/src/main/java/argo/avro/MetricData.java @@ -0,0 +1,811 @@ +/** + * Autogenerated by Avro + * + * DO NOT EDIT DIRECTLY + */ +package argo.avro; + +import org.apache.avro.specific.SpecificData; + +@SuppressWarnings("all") +@org.apache.avro.specific.AvroGenerated +public class MetricData extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + private static final long serialVersionUID = 3861438289744595870L; + public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"MetricData\",\"namespace\":\"argo.avro\",\"fields\":[{\"name\":\"timestamp\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"service\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"hostname\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"metric\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"status\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"monitoring_host\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}]},{\"name\":\"actual_data\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}],\"default\":null},{\"name\":\"summary\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}]},{\"name\":\"message\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}]},{\"name\":\"tags\",\"type\":[\"null\",{\"type\":\"map\",\"values\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}],\"avro.java.string\":\"String\"}]}]}"); + public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } + @Deprecated public java.lang.String timestamp; + @Deprecated public java.lang.String service; + @Deprecated public java.lang.String hostname; + @Deprecated public java.lang.String metric; + @Deprecated public java.lang.String status; + @Deprecated public java.lang.String monitoring_host; + @Deprecated public java.lang.String actual_data; + @Deprecated public java.lang.String summary; + @Deprecated public java.lang.String message; + @Deprecated public java.util.Map tags; + + /** + * Default constructor. Note that this does not initialize fields + * to their default values from the schema. If that is desired then + * one should use newBuilder(). + */ + public MetricData() {} + + /** + * All-args constructor. + * @param timestamp The new value for timestamp + * @param service The new value for service + * @param hostname The new value for hostname + * @param metric The new value for metric + * @param status The new value for status + * @param monitoring_host The new value for monitoring_host + * @param actual_data The new value for actual_data + * @param summary The new value for summary + * @param message The new value for message + * @param tags The new value for tags + */ + public MetricData(java.lang.String timestamp, java.lang.String service, java.lang.String hostname, java.lang.String metric, java.lang.String status, java.lang.String monitoring_host, java.lang.String actual_data, java.lang.String summary, java.lang.String message, java.util.Map tags) { + this.timestamp = timestamp; + this.service = service; + this.hostname = hostname; + this.metric = metric; + this.status = status; + this.monitoring_host = monitoring_host; + this.actual_data = actual_data; + this.summary = summary; + this.message = message; + this.tags = tags; + } + + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + // Used by DatumWriter. Applications should not call. + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return timestamp; + case 1: return service; + case 2: return hostname; + case 3: return metric; + case 4: return status; + case 5: return monitoring_host; + case 6: return actual_data; + case 7: return summary; + case 8: return message; + case 9: return tags; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + + // Used by DatumReader. Applications should not call. + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: timestamp = (java.lang.String)value$; break; + case 1: service = (java.lang.String)value$; break; + case 2: hostname = (java.lang.String)value$; break; + case 3: metric = (java.lang.String)value$; break; + case 4: status = (java.lang.String)value$; break; + case 5: monitoring_host = (java.lang.String)value$; break; + case 6: actual_data = (java.lang.String)value$; break; + case 7: summary = (java.lang.String)value$; break; + case 8: message = (java.lang.String)value$; break; + case 9: tags = (java.util.Map)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + + /** + * Gets the value of the 'timestamp' field. + * @return The value of the 'timestamp' field. + */ + public java.lang.String getTimestamp() { + return timestamp; + } + + /** + * Sets the value of the 'timestamp' field. + * @param value the value to set. + */ + public void setTimestamp(java.lang.String value) { + this.timestamp = value; + } + + /** + * Gets the value of the 'service' field. + * @return The value of the 'service' field. + */ + public java.lang.String getService() { + return service; + } + + /** + * Sets the value of the 'service' field. + * @param value the value to set. + */ + public void setService(java.lang.String value) { + this.service = value; + } + + /** + * Gets the value of the 'hostname' field. + * @return The value of the 'hostname' field. + */ + public java.lang.String getHostname() { + return hostname; + } + + /** + * Sets the value of the 'hostname' field. + * @param value the value to set. + */ + public void setHostname(java.lang.String value) { + this.hostname = value; + } + + /** + * Gets the value of the 'metric' field. + * @return The value of the 'metric' field. + */ + public java.lang.String getMetric() { + return metric; + } + + /** + * Sets the value of the 'metric' field. + * @param value the value to set. + */ + public void setMetric(java.lang.String value) { + this.metric = value; + } + + /** + * Gets the value of the 'status' field. + * @return The value of the 'status' field. + */ + public java.lang.String getStatus() { + return status; + } + + /** + * Sets the value of the 'status' field. + * @param value the value to set. + */ + public void setStatus(java.lang.String value) { + this.status = value; + } + + /** + * Gets the value of the 'monitoring_host' field. + * @return The value of the 'monitoring_host' field. + */ + public java.lang.String getMonitoringHost() { + return monitoring_host; + } + + /** + * Sets the value of the 'monitoring_host' field. + * @param value the value to set. + */ + public void setMonitoringHost(java.lang.String value) { + this.monitoring_host = value; + } + + /** + * Gets the value of the 'actual_data' field. + * @return The value of the 'actual_data' field. + */ + public java.lang.String getActualData() { + return actual_data; + } + + /** + * Sets the value of the 'actual_data' field. + * @param value the value to set. + */ + public void setActualData(java.lang.String value) { + this.actual_data = value; + } + + /** + * Gets the value of the 'summary' field. + * @return The value of the 'summary' field. + */ + public java.lang.String getSummary() { + return summary; + } + + /** + * Sets the value of the 'summary' field. + * @param value the value to set. + */ + public void setSummary(java.lang.String value) { + this.summary = value; + } + + /** + * Gets the value of the 'message' field. + * @return The value of the 'message' field. + */ + public java.lang.String getMessage() { + return message; + } + + /** + * Sets the value of the 'message' field. + * @param value the value to set. + */ + public void setMessage(java.lang.String value) { + this.message = value; + } + + /** + * Gets the value of the 'tags' field. + * @return The value of the 'tags' field. + */ + public java.util.Map getTags() { + return tags; + } + + /** + * Sets the value of the 'tags' field. + * @param value the value to set. + */ + public void setTags(java.util.Map value) { + this.tags = value; + } + + /** + * Creates a new MetricData RecordBuilder. + * @return A new MetricData RecordBuilder + */ + public static argo.avro.MetricData.Builder newBuilder() { + return new argo.avro.MetricData.Builder(); + } + + /** + * Creates a new MetricData RecordBuilder by copying an existing Builder. + * @param other The existing builder to copy. + * @return A new MetricData RecordBuilder + */ + public static argo.avro.MetricData.Builder newBuilder(argo.avro.MetricData.Builder other) { + return new argo.avro.MetricData.Builder(other); + } + + /** + * Creates a new MetricData RecordBuilder by copying an existing MetricData instance. + * @param other The existing instance to copy. + * @return A new MetricData RecordBuilder + */ + public static argo.avro.MetricData.Builder newBuilder(argo.avro.MetricData other) { + return new argo.avro.MetricData.Builder(other); + } + + /** + * RecordBuilder for MetricData instances. + */ + public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase + implements org.apache.avro.data.RecordBuilder { + + private java.lang.String timestamp; + private java.lang.String service; + private java.lang.String hostname; + private java.lang.String metric; + private java.lang.String status; + private java.lang.String monitoring_host; + private java.lang.String actual_data; + private java.lang.String summary; + private java.lang.String message; + private java.util.Map tags; + + /** Creates a new Builder */ + private Builder() { + super(SCHEMA$); + } + + /** + * Creates a Builder by copying an existing Builder. + * @param other The existing Builder to copy. + */ + private Builder(argo.avro.MetricData.Builder other) { + super(other); + if (isValidValue(fields()[0], other.timestamp)) { + this.timestamp = data().deepCopy(fields()[0].schema(), other.timestamp); + fieldSetFlags()[0] = true; + } + if (isValidValue(fields()[1], other.service)) { + this.service = data().deepCopy(fields()[1].schema(), other.service); + fieldSetFlags()[1] = true; + } + if (isValidValue(fields()[2], other.hostname)) { + this.hostname = data().deepCopy(fields()[2].schema(), other.hostname); + fieldSetFlags()[2] = true; + } + if (isValidValue(fields()[3], other.metric)) { + this.metric = data().deepCopy(fields()[3].schema(), other.metric); + fieldSetFlags()[3] = true; + } + if (isValidValue(fields()[4], other.status)) { + this.status = data().deepCopy(fields()[4].schema(), other.status); + fieldSetFlags()[4] = true; + } + if (isValidValue(fields()[5], other.monitoring_host)) { + this.monitoring_host = data().deepCopy(fields()[5].schema(), other.monitoring_host); + fieldSetFlags()[5] = true; + } + if (isValidValue(fields()[6], other.actual_data)) { + this.actual_data = data().deepCopy(fields()[6].schema(), other.actual_data); + fieldSetFlags()[6] = true; + } + if (isValidValue(fields()[7], other.summary)) { + this.summary = data().deepCopy(fields()[7].schema(), other.summary); + fieldSetFlags()[7] = true; + } + if (isValidValue(fields()[8], other.message)) { + this.message = data().deepCopy(fields()[8].schema(), other.message); + fieldSetFlags()[8] = true; + } + if (isValidValue(fields()[9], other.tags)) { + this.tags = data().deepCopy(fields()[9].schema(), other.tags); + fieldSetFlags()[9] = true; + } + } + + /** + * Creates a Builder by copying an existing MetricData instance + * @param other The existing instance to copy. + */ + private Builder(argo.avro.MetricData other) { + super(SCHEMA$); + if (isValidValue(fields()[0], other.timestamp)) { + this.timestamp = data().deepCopy(fields()[0].schema(), other.timestamp); + fieldSetFlags()[0] = true; + } + if (isValidValue(fields()[1], other.service)) { + this.service = data().deepCopy(fields()[1].schema(), other.service); + fieldSetFlags()[1] = true; + } + if (isValidValue(fields()[2], other.hostname)) { + this.hostname = data().deepCopy(fields()[2].schema(), other.hostname); + fieldSetFlags()[2] = true; + } + if (isValidValue(fields()[3], other.metric)) { + this.metric = data().deepCopy(fields()[3].schema(), other.metric); + fieldSetFlags()[3] = true; + } + if (isValidValue(fields()[4], other.status)) { + this.status = data().deepCopy(fields()[4].schema(), other.status); + fieldSetFlags()[4] = true; + } + if (isValidValue(fields()[5], other.monitoring_host)) { + this.monitoring_host = data().deepCopy(fields()[5].schema(), other.monitoring_host); + fieldSetFlags()[5] = true; + } + if (isValidValue(fields()[6], other.actual_data)) { + this.actual_data = data().deepCopy(fields()[6].schema(), other.actual_data); + fieldSetFlags()[6] = true; + } + if (isValidValue(fields()[7], other.summary)) { + this.summary = data().deepCopy(fields()[7].schema(), other.summary); + fieldSetFlags()[7] = true; + } + if (isValidValue(fields()[8], other.message)) { + this.message = data().deepCopy(fields()[8].schema(), other.message); + fieldSetFlags()[8] = true; + } + if (isValidValue(fields()[9], other.tags)) { + this.tags = data().deepCopy(fields()[9].schema(), other.tags); + fieldSetFlags()[9] = true; + } + } + + /** + * Gets the value of the 'timestamp' field. + * @return The value. + */ + public java.lang.String getTimestamp() { + return timestamp; + } + + /** + * Sets the value of the 'timestamp' field. + * @param value The value of 'timestamp'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setTimestamp(java.lang.String value) { + validate(fields()[0], value); + this.timestamp = value; + fieldSetFlags()[0] = true; + return this; + } + + /** + * Checks whether the 'timestamp' field has been set. + * @return True if the 'timestamp' field has been set, false otherwise. + */ + public boolean hasTimestamp() { + return fieldSetFlags()[0]; + } + + + /** + * Clears the value of the 'timestamp' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearTimestamp() { + timestamp = null; + fieldSetFlags()[0] = false; + return this; + } + + /** + * Gets the value of the 'service' field. + * @return The value. + */ + public java.lang.String getService() { + return service; + } + + /** + * Sets the value of the 'service' field. + * @param value The value of 'service'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setService(java.lang.String value) { + validate(fields()[1], value); + this.service = value; + fieldSetFlags()[1] = true; + return this; + } + + /** + * Checks whether the 'service' field has been set. + * @return True if the 'service' field has been set, false otherwise. + */ + public boolean hasService() { + return fieldSetFlags()[1]; + } + + + /** + * Clears the value of the 'service' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearService() { + service = null; + fieldSetFlags()[1] = false; + return this; + } + + /** + * Gets the value of the 'hostname' field. + * @return The value. + */ + public java.lang.String getHostname() { + return hostname; + } + + /** + * Sets the value of the 'hostname' field. + * @param value The value of 'hostname'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setHostname(java.lang.String value) { + validate(fields()[2], value); + this.hostname = value; + fieldSetFlags()[2] = true; + return this; + } + + /** + * Checks whether the 'hostname' field has been set. + * @return True if the 'hostname' field has been set, false otherwise. + */ + public boolean hasHostname() { + return fieldSetFlags()[2]; + } + + + /** + * Clears the value of the 'hostname' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearHostname() { + hostname = null; + fieldSetFlags()[2] = false; + return this; + } + + /** + * Gets the value of the 'metric' field. + * @return The value. + */ + public java.lang.String getMetric() { + return metric; + } + + /** + * Sets the value of the 'metric' field. + * @param value The value of 'metric'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setMetric(java.lang.String value) { + validate(fields()[3], value); + this.metric = value; + fieldSetFlags()[3] = true; + return this; + } + + /** + * Checks whether the 'metric' field has been set. + * @return True if the 'metric' field has been set, false otherwise. + */ + public boolean hasMetric() { + return fieldSetFlags()[3]; + } + + + /** + * Clears the value of the 'metric' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearMetric() { + metric = null; + fieldSetFlags()[3] = false; + return this; + } + + /** + * Gets the value of the 'status' field. + * @return The value. + */ + public java.lang.String getStatus() { + return status; + } + + /** + * Sets the value of the 'status' field. + * @param value The value of 'status'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setStatus(java.lang.String value) { + validate(fields()[4], value); + this.status = value; + fieldSetFlags()[4] = true; + return this; + } + + /** + * Checks whether the 'status' field has been set. + * @return True if the 'status' field has been set, false otherwise. + */ + public boolean hasStatus() { + return fieldSetFlags()[4]; + } + + + /** + * Clears the value of the 'status' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearStatus() { + status = null; + fieldSetFlags()[4] = false; + return this; + } + + /** + * Gets the value of the 'monitoring_host' field. + * @return The value. + */ + public java.lang.String getMonitoringHost() { + return monitoring_host; + } + + /** + * Sets the value of the 'monitoring_host' field. + * @param value The value of 'monitoring_host'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setMonitoringHost(java.lang.String value) { + validate(fields()[5], value); + this.monitoring_host = value; + fieldSetFlags()[5] = true; + return this; + } + + /** + * Checks whether the 'monitoring_host' field has been set. + * @return True if the 'monitoring_host' field has been set, false otherwise. + */ + public boolean hasMonitoringHost() { + return fieldSetFlags()[5]; + } + + + /** + * Clears the value of the 'monitoring_host' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearMonitoringHost() { + monitoring_host = null; + fieldSetFlags()[5] = false; + return this; + } + + /** + * Gets the value of the 'actual_data' field. + * @return The value. + */ + public java.lang.String getActualData() { + return actual_data; + } + + /** + * Sets the value of the 'actual_data' field. + * @param value The value of 'actual_data'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setActualData(java.lang.String value) { + validate(fields()[6], value); + this.actual_data = value; + fieldSetFlags()[6] = true; + return this; + } + + /** + * Checks whether the 'actual_data' field has been set. + * @return True if the 'actual_data' field has been set, false otherwise. + */ + public boolean hasActualData() { + return fieldSetFlags()[6]; + } + + + /** + * Clears the value of the 'actual_data' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearActualData() { + actual_data = null; + fieldSetFlags()[6] = false; + return this; + } + + /** + * Gets the value of the 'summary' field. + * @return The value. + */ + public java.lang.String getSummary() { + return summary; + } + + /** + * Sets the value of the 'summary' field. + * @param value The value of 'summary'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setSummary(java.lang.String value) { + validate(fields()[7], value); + this.summary = value; + fieldSetFlags()[7] = true; + return this; + } + + /** + * Checks whether the 'summary' field has been set. + * @return True if the 'summary' field has been set, false otherwise. + */ + public boolean hasSummary() { + return fieldSetFlags()[7]; + } + + + /** + * Clears the value of the 'summary' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearSummary() { + summary = null; + fieldSetFlags()[7] = false; + return this; + } + + /** + * Gets the value of the 'message' field. + * @return The value. + */ + public java.lang.String getMessage() { + return message; + } + + /** + * Sets the value of the 'message' field. + * @param value The value of 'message'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setMessage(java.lang.String value) { + validate(fields()[8], value); + this.message = value; + fieldSetFlags()[8] = true; + return this; + } + + /** + * Checks whether the 'message' field has been set. + * @return True if the 'message' field has been set, false otherwise. + */ + public boolean hasMessage() { + return fieldSetFlags()[8]; + } + + + /** + * Clears the value of the 'message' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearMessage() { + message = null; + fieldSetFlags()[8] = false; + return this; + } + + /** + * Gets the value of the 'tags' field. + * @return The value. + */ + public java.util.Map getTags() { + return tags; + } + + /** + * Sets the value of the 'tags' field. + * @param value The value of 'tags'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setTags(java.util.Map value) { + validate(fields()[9], value); + this.tags = value; + fieldSetFlags()[9] = true; + return this; + } + + /** + * Checks whether the 'tags' field has been set. + * @return True if the 'tags' field has been set, false otherwise. + */ + public boolean hasTags() { + return fieldSetFlags()[9]; + } + + + /** + * Clears the value of the 'tags' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearTags() { + tags = null; + fieldSetFlags()[9] = false; + return this; + } + + @Override + public MetricData build() { + try { + MetricData record = new MetricData(); + record.timestamp = fieldSetFlags()[0] ? this.timestamp : (java.lang.String) defaultValue(fields()[0]); + record.service = fieldSetFlags()[1] ? this.service : (java.lang.String) defaultValue(fields()[1]); + record.hostname = fieldSetFlags()[2] ? this.hostname : (java.lang.String) defaultValue(fields()[2]); + record.metric = fieldSetFlags()[3] ? this.metric : (java.lang.String) defaultValue(fields()[3]); + record.status = fieldSetFlags()[4] ? this.status : (java.lang.String) defaultValue(fields()[4]); + record.monitoring_host = fieldSetFlags()[5] ? this.monitoring_host : (java.lang.String) defaultValue(fields()[5]); + record.actual_data = fieldSetFlags()[6] ? this.actual_data : (java.lang.String) defaultValue(fields()[6]); + record.summary = fieldSetFlags()[7] ? this.summary : (java.lang.String) defaultValue(fields()[7]); + record.message = fieldSetFlags()[8] ? this.message : (java.lang.String) defaultValue(fields()[8]); + record.tags = fieldSetFlags()[9] ? this.tags : (java.util.Map) defaultValue(fields()[9]); + return record; + } catch (Exception e) { + throw new org.apache.avro.AvroRuntimeException(e); + } + } + } + +} diff --git a/flink_jobs/old-models/ams_ingest_metric/src/main/java/argo/avro/MetricDataOld.java b/flink_jobs/old-models/ams_ingest_metric/src/main/java/argo/avro/MetricDataOld.java new file mode 100644 index 00000000..dafb8d40 --- /dev/null +++ b/flink_jobs/old-models/ams_ingest_metric/src/main/java/argo/avro/MetricDataOld.java @@ -0,0 +1,536 @@ +/** + * Autogenerated by Avro + * + * DO NOT EDIT DIRECTLY + */ +package argo.avro; +@SuppressWarnings("all") +@org.apache.avro.specific.AvroGenerated +public class MetricDataOld extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"MetricData\",\"namespace\":\"argo.avro\",\"fields\":[{\"name\":\"timestamp\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"service\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"hostname\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"metric\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"status\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"monitoring_host\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}]},{\"name\":\"summary\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}]},{\"name\":\"message\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}]},{\"name\":\"tags\",\"type\":[\"null\",{\"type\":\"map\",\"values\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}],\"avro.java.string\":\"String\"}]}]}"); + public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } + @Deprecated public java.lang.String timestamp; + @Deprecated public java.lang.String service; + @Deprecated public java.lang.String hostname; + @Deprecated public java.lang.String metric; + @Deprecated public java.lang.String status; + @Deprecated public java.lang.String monitoring_host; + @Deprecated public java.lang.String summary; + @Deprecated public java.lang.String message; + @Deprecated public java.util.Map tags; + + /** + * Default constructor. + */ + public MetricDataOld() {} + + /** + * All-args constructor. + */ + public MetricDataOld(java.lang.String timestamp, java.lang.String service, java.lang.String hostname, java.lang.String metric, java.lang.String status, java.lang.String monitoring_host, java.lang.String summary, java.lang.String message, java.util.Map tags) { + this.timestamp = timestamp; + this.service = service; + this.hostname = hostname; + this.metric = metric; + this.status = status; + this.monitoring_host = monitoring_host; + this.summary = summary; + this.message = message; + this.tags = tags; + } + + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + // Used by DatumWriter. Applications should not call. + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return timestamp; + case 1: return service; + case 2: return hostname; + case 3: return metric; + case 4: return status; + case 5: return monitoring_host; + case 6: return summary; + case 7: return message; + case 8: return tags; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + // Used by DatumReader. Applications should not call. + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: timestamp = (java.lang.String)value$; break; + case 1: service = (java.lang.String)value$; break; + case 2: hostname = (java.lang.String)value$; break; + case 3: metric = (java.lang.String)value$; break; + case 4: status = (java.lang.String)value$; break; + case 5: monitoring_host = (java.lang.String)value$; break; + case 6: summary = (java.lang.String)value$; break; + case 7: message = (java.lang.String)value$; break; + case 8: tags = (java.util.Map)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + + /** + * Gets the value of the 'timestamp' field. + */ + public java.lang.String getTimestamp() { + return timestamp; + } + + /** + * Sets the value of the 'timestamp' field. + * @param value the value to set. + */ + public void setTimestamp(java.lang.String value) { + this.timestamp = value; + } + + /** + * Gets the value of the 'service' field. + */ + public java.lang.String getService() { + return service; + } + + /** + * Sets the value of the 'service' field. + * @param value the value to set. + */ + public void setService(java.lang.String value) { + this.service = value; + } + + /** + * Gets the value of the 'hostname' field. + */ + public java.lang.String getHostname() { + return hostname; + } + + /** + * Sets the value of the 'hostname' field. + * @param value the value to set. + */ + public void setHostname(java.lang.String value) { + this.hostname = value; + } + + /** + * Gets the value of the 'metric' field. + */ + public java.lang.String getMetric() { + return metric; + } + + /** + * Sets the value of the 'metric' field. + * @param value the value to set. + */ + public void setMetric(java.lang.String value) { + this.metric = value; + } + + /** + * Gets the value of the 'status' field. + */ + public java.lang.String getStatus() { + return status; + } + + /** + * Sets the value of the 'status' field. + * @param value the value to set. + */ + public void setStatus(java.lang.String value) { + this.status = value; + } + + /** + * Gets the value of the 'monitoring_host' field. + */ + public java.lang.String getMonitoringHost() { + return monitoring_host; + } + + /** + * Sets the value of the 'monitoring_host' field. + * @param value the value to set. + */ + public void setMonitoringHost(java.lang.String value) { + this.monitoring_host = value; + } + + /** + * Gets the value of the 'summary' field. + */ + public java.lang.String getSummary() { + return summary; + } + + /** + * Sets the value of the 'summary' field. + * @param value the value to set. + */ + public void setSummary(java.lang.String value) { + this.summary = value; + } + + /** + * Gets the value of the 'message' field. + */ + public java.lang.String getMessage() { + return message; + } + + /** + * Sets the value of the 'message' field. + * @param value the value to set. + */ + public void setMessage(java.lang.String value) { + this.message = value; + } + + /** + * Gets the value of the 'tags' field. + */ + public java.util.Map getTags() { + return tags; + } + + /** + * Sets the value of the 'tags' field. + * @param value the value to set. + */ + public void setTags(java.util.Map value) { + this.tags = value; + } + + /** Creates a new MetricData RecordBuilder */ + public static argo.avro.MetricDataOld.Builder newBuilder() { + return new argo.avro.MetricDataOld.Builder(); + } + + /** Creates a new MetricData RecordBuilder by copying an existing Builder */ + public static argo.avro.MetricDataOld.Builder newBuilder(argo.avro.MetricDataOld.Builder other) { + return new argo.avro.MetricDataOld.Builder(other); + } + + /** Creates a new MetricData RecordBuilder by copying an existing MetricData instance */ + public static argo.avro.MetricDataOld.Builder newBuilder(argo.avro.MetricDataOld other) { + return new argo.avro.MetricDataOld.Builder(other); + } + + /** + * RecordBuilder for MetricData instances. + */ + public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase + implements org.apache.avro.data.RecordBuilder { + + private java.lang.String timestamp; + private java.lang.String service; + private java.lang.String hostname; + private java.lang.String metric; + private java.lang.String status; + private java.lang.String monitoring_host; + private java.lang.String summary; + private java.lang.String message; + private java.util.Map tags; + + /** Creates a new Builder */ + private Builder() { + super(argo.avro.MetricDataOld.SCHEMA$); + } + + /** Creates a Builder by copying an existing Builder */ + private Builder(argo.avro.MetricDataOld.Builder other) { + super(other); + } + + /** Creates a Builder by copying an existing MetricData instance */ + private Builder(argo.avro.MetricDataOld other) { + super(argo.avro.MetricDataOld.SCHEMA$); + if (isValidValue(fields()[0], other.timestamp)) { + this.timestamp = data().deepCopy(fields()[0].schema(), other.timestamp); + fieldSetFlags()[0] = true; + } + if (isValidValue(fields()[1], other.service)) { + this.service = data().deepCopy(fields()[1].schema(), other.service); + fieldSetFlags()[1] = true; + } + if (isValidValue(fields()[2], other.hostname)) { + this.hostname = data().deepCopy(fields()[2].schema(), other.hostname); + fieldSetFlags()[2] = true; + } + if (isValidValue(fields()[3], other.metric)) { + this.metric = data().deepCopy(fields()[3].schema(), other.metric); + fieldSetFlags()[3] = true; + } + if (isValidValue(fields()[4], other.status)) { + this.status = data().deepCopy(fields()[4].schema(), other.status); + fieldSetFlags()[4] = true; + } + if (isValidValue(fields()[5], other.monitoring_host)) { + this.monitoring_host = data().deepCopy(fields()[5].schema(), other.monitoring_host); + fieldSetFlags()[5] = true; + } + if (isValidValue(fields()[6], other.summary)) { + this.summary = data().deepCopy(fields()[6].schema(), other.summary); + fieldSetFlags()[6] = true; + } + if (isValidValue(fields()[7], other.message)) { + this.message = data().deepCopy(fields()[7].schema(), other.message); + fieldSetFlags()[7] = true; + } + if (isValidValue(fields()[8], other.tags)) { + this.tags = data().deepCopy(fields()[8].schema(), other.tags); + fieldSetFlags()[8] = true; + } + } + + /** Gets the value of the 'timestamp' field */ + public java.lang.String getTimestamp() { + return timestamp; + } + + /** Sets the value of the 'timestamp' field */ + public argo.avro.MetricDataOld.Builder setTimestamp(java.lang.String value) { + validate(fields()[0], value); + this.timestamp = value; + fieldSetFlags()[0] = true; + return this; + } + + /** Checks whether the 'timestamp' field has been set */ + public boolean hasTimestamp() { + return fieldSetFlags()[0]; + } + + /** Clears the value of the 'timestamp' field */ + public argo.avro.MetricDataOld.Builder clearTimestamp() { + timestamp = null; + fieldSetFlags()[0] = false; + return this; + } + + /** Gets the value of the 'service' field */ + public java.lang.String getService() { + return service; + } + + /** Sets the value of the 'service' field */ + public argo.avro.MetricDataOld.Builder setService(java.lang.String value) { + validate(fields()[1], value); + this.service = value; + fieldSetFlags()[1] = true; + return this; + } + + /** Checks whether the 'service' field has been set */ + public boolean hasService() { + return fieldSetFlags()[1]; + } + + /** Clears the value of the 'service' field */ + public argo.avro.MetricDataOld.Builder clearService() { + service = null; + fieldSetFlags()[1] = false; + return this; + } + + /** Gets the value of the 'hostname' field */ + public java.lang.String getHostname() { + return hostname; + } + + /** Sets the value of the 'hostname' field */ + public argo.avro.MetricDataOld.Builder setHostname(java.lang.String value) { + validate(fields()[2], value); + this.hostname = value; + fieldSetFlags()[2] = true; + return this; + } + + /** Checks whether the 'hostname' field has been set */ + public boolean hasHostname() { + return fieldSetFlags()[2]; + } + + /** Clears the value of the 'hostname' field */ + public argo.avro.MetricDataOld.Builder clearHostname() { + hostname = null; + fieldSetFlags()[2] = false; + return this; + } + + /** Gets the value of the 'metric' field */ + public java.lang.String getMetric() { + return metric; + } + + /** Sets the value of the 'metric' field */ + public argo.avro.MetricDataOld.Builder setMetric(java.lang.String value) { + validate(fields()[3], value); + this.metric = value; + fieldSetFlags()[3] = true; + return this; + } + + /** Checks whether the 'metric' field has been set */ + public boolean hasMetric() { + return fieldSetFlags()[3]; + } + + /** Clears the value of the 'metric' field */ + public argo.avro.MetricDataOld.Builder clearMetric() { + metric = null; + fieldSetFlags()[3] = false; + return this; + } + + /** Gets the value of the 'status' field */ + public java.lang.String getStatus() { + return status; + } + + /** Sets the value of the 'status' field */ + public argo.avro.MetricDataOld.Builder setStatus(java.lang.String value) { + validate(fields()[4], value); + this.status = value; + fieldSetFlags()[4] = true; + return this; + } + + /** Checks whether the 'status' field has been set */ + public boolean hasStatus() { + return fieldSetFlags()[4]; + } + + /** Clears the value of the 'status' field */ + public argo.avro.MetricDataOld.Builder clearStatus() { + status = null; + fieldSetFlags()[4] = false; + return this; + } + + /** Gets the value of the 'monitoring_host' field */ + public java.lang.String getMonitoringHost() { + return monitoring_host; + } + + /** Sets the value of the 'monitoring_host' field */ + public argo.avro.MetricDataOld.Builder setMonitoringHost(java.lang.String value) { + validate(fields()[5], value); + this.monitoring_host = value; + fieldSetFlags()[5] = true; + return this; + } + + /** Checks whether the 'monitoring_host' field has been set */ + public boolean hasMonitoringHost() { + return fieldSetFlags()[5]; + } + + /** Clears the value of the 'monitoring_host' field */ + public argo.avro.MetricDataOld.Builder clearMonitoringHost() { + monitoring_host = null; + fieldSetFlags()[5] = false; + return this; + } + + /** Gets the value of the 'summary' field */ + public java.lang.String getSummary() { + return summary; + } + + /** Sets the value of the 'summary' field */ + public argo.avro.MetricDataOld.Builder setSummary(java.lang.String value) { + validate(fields()[6], value); + this.summary = value; + fieldSetFlags()[6] = true; + return this; + } + + /** Checks whether the 'summary' field has been set */ + public boolean hasSummary() { + return fieldSetFlags()[6]; + } + + /** Clears the value of the 'summary' field */ + public argo.avro.MetricDataOld.Builder clearSummary() { + summary = null; + fieldSetFlags()[6] = false; + return this; + } + + /** Gets the value of the 'message' field */ + public java.lang.String getMessage() { + return message; + } + + /** Sets the value of the 'message' field */ + public argo.avro.MetricDataOld.Builder setMessage(java.lang.String value) { + validate(fields()[7], value); + this.message = value; + fieldSetFlags()[7] = true; + return this; + } + + /** Checks whether the 'message' field has been set */ + public boolean hasMessage() { + return fieldSetFlags()[7]; + } + + /** Clears the value of the 'message' field */ + public argo.avro.MetricDataOld.Builder clearMessage() { + message = null; + fieldSetFlags()[7] = false; + return this; + } + + /** Gets the value of the 'tags' field */ + public java.util.Map getTags() { + return tags; + } + + /** Sets the value of the 'tags' field */ + public argo.avro.MetricDataOld.Builder setTags(java.util.Map value) { + validate(fields()[8], value); + this.tags = value; + fieldSetFlags()[8] = true; + return this; + } + + /** Checks whether the 'tags' field has been set */ + public boolean hasTags() { + return fieldSetFlags()[8]; + } + + /** Clears the value of the 'tags' field */ + public argo.avro.MetricDataOld.Builder clearTags() { + tags = null; + fieldSetFlags()[8] = false; + return this; + } + + @Override + public MetricDataOld build() { + try { + MetricDataOld record = new MetricDataOld(); + record.timestamp = fieldSetFlags()[0] ? this.timestamp : (java.lang.String) defaultValue(fields()[0]); + record.service = fieldSetFlags()[1] ? this.service : (java.lang.String) defaultValue(fields()[1]); + record.hostname = fieldSetFlags()[2] ? this.hostname : (java.lang.String) defaultValue(fields()[2]); + record.metric = fieldSetFlags()[3] ? this.metric : (java.lang.String) defaultValue(fields()[3]); + record.status = fieldSetFlags()[4] ? this.status : (java.lang.String) defaultValue(fields()[4]); + record.monitoring_host = fieldSetFlags()[5] ? this.monitoring_host : (java.lang.String) defaultValue(fields()[5]); + record.summary = fieldSetFlags()[6] ? this.summary : (java.lang.String) defaultValue(fields()[6]); + record.message = fieldSetFlags()[7] ? this.message : (java.lang.String) defaultValue(fields()[7]); + record.tags = fieldSetFlags()[8] ? this.tags : (java.util.Map) defaultValue(fields()[8]); + return record; + } catch (Exception e) { + throw new org.apache.avro.AvroRuntimeException(e); + } + } + } +} diff --git a/flink_jobs/old-models/ams_ingest_metric/src/main/java/argo/streaming/AmsIngestMetric.java b/flink_jobs/old-models/ams_ingest_metric/src/main/java/argo/streaming/AmsIngestMetric.java new file mode 100644 index 00000000..78a0fac3 --- /dev/null +++ b/flink_jobs/old-models/ams_ingest_metric/src/main/java/argo/streaming/AmsIngestMetric.java @@ -0,0 +1,263 @@ +package argo.streaming; + + +import java.util.Arrays; +import java.util.concurrent.TimeUnit; + +import org.apache.avro.AvroRuntimeException; +import org.apache.avro.io.DatumReader; +import org.apache.avro.io.Decoder; +import org.apache.avro.io.DecoderFactory; +import org.apache.avro.specific.SpecificData; +import org.apache.avro.specific.SpecificDatumReader; +import org.apache.commons.codec.binary.Base64; +import org.apache.flink.api.common.functions.FlatMapFunction; +import org.apache.flink.api.common.io.OutputFormat; +import org.apache.flink.api.common.restartstrategy.RestartStrategies; +import org.apache.flink.api.common.time.Time; +import org.apache.flink.api.java.utils.ParameterTool; + +import org.apache.flink.runtime.state.filesystem.FsStateBackend; +import org.apache.flink.streaming.api.datastream.DataStream; + +import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; + +import org.apache.flink.streaming.connectors.fs.bucketing.Bucketer; +import org.apache.flink.streaming.connectors.fs.bucketing.BucketingSink; +import org.apache.flink.util.Collector; + + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +import com.google.gson.JsonElement; + +import com.google.gson.JsonParser; + +import argo.avro.MetricData; +import argo.avro.MetricDataOld; + + +/** + * Flink Job : Stream metric data from ARGO messaging to Hbase + * job required cli parameters: + * + * --ams.endpoint : ARGO messaging api endoint to connect to msg.example.com + * --ams.port : ARGO messaging api port + * --ams.token : ARGO messaging api token + * --ams.project : ARGO messaging api project to connect to + * --ams.sub : ARGO messaging subscription to pull from + * --hbase.master : hbase endpoint + * --hbase.master.port : hbase master port + * --hbase.zk.quorum : comma separated list of hbase zookeeper servers + * --hbase.zk.port : port used by hbase zookeeper servers + * --hbase.namespace : table namespace used (usually tenant name) + * --hbase.table : table name (usually metric_data) + * --check.path : checkpoint path + * --check.interval : checkpoint interval + * --hdfs.path : hdfs destination to write the data + * --ams.batch : num of messages to be retrieved per request to AMS service + * --ams.interval : interval (in ms) between AMS service requests + * --ams.proxy : optional http proxy url + * --ams.verify : optional turn on/off ssl verify + */ +public class AmsIngestMetric { + // setup logger + static Logger LOG = LoggerFactory.getLogger(AmsIngestMetric.class); + + /** + * Check if flink job has been called with ams rate params + */ + public static boolean hasAmsRateArgs(ParameterTool paramTool) { + String args[] = { "ams.batch", "ams.interval" }; + return hasArgs(args, paramTool); + } + + + /** + * Check if flink job has been called with checkpoint cli arguments + */ + public static boolean hasCheckArgs(ParameterTool paramTool) { + String args[] = { "check.path", "check.interval" }; + return hasArgs(args, paramTool); + } + + /** + * Check if flink job has been called with hdfs cli arguments + */ + public static boolean hasHdfsArgs(ParameterTool paramTool) { + String args[] = { "hdfs.path" }; + return hasArgs(args, paramTool); + } + + /** + * Check if flink job has been called with hbase cli arguments + */ + public static boolean hasHbaseArgs(ParameterTool paramTool) { + String args[] = { "hbase.master", "hbase.master.port", "hbase.zk.quorum", "hbase.zk.port", "hbase.namespace", + "hbase.table" }; + return hasArgs(args, paramTool); + } + + /** + * Check if a list of expected cli arguments have been provided to this flink job + */ + public static boolean hasArgs(String[] reqArgs, ParameterTool paramTool) { + + for (String reqArg : reqArgs) { + if (!paramTool.has(reqArg)) + return false; + } + + return true; + } + + public static void main(String[] args) throws Exception { + + // Create flink execution environment + StreamExecutionEnvironment see = StreamExecutionEnvironment.getExecutionEnvironment(); + see.setParallelism(1); + // On failure attempt max 10 times to restart with a retry interval of 2 minutes + see.setRestartStrategy(RestartStrategies.fixedDelayRestart(10, Time.of(2, TimeUnit.MINUTES))); + + // Initialize cli parameter tool + final ParameterTool parameterTool = ParameterTool.fromArgs(args); + + // set ams client batch and interval to default values + int batch = 1; + long interval = 100L; + long inactivityThresh = 1800000L; // default inactivity threshold value ~ 30mins + + if (hasAmsRateArgs(parameterTool)) { + batch = parameterTool.getInt("ams.batch"); + interval = parameterTool.getLong("ams.interval"); + } + + + // Initialize Input Source : ARGO Messaging Source + String endpoint = parameterTool.getRequired("ams.endpoint"); + String port = parameterTool.getRequired("ams.port"); + String token = parameterTool.getRequired("ams.token"); + String project = parameterTool.getRequired("ams.project"); + String sub = parameterTool.getRequired("ams.sub"); + + + // Check if checkpointing is desired + if (hasCheckArgs(parameterTool)) { + String checkPath = parameterTool.get("check.path"); + String checkInterval = parameterTool.get("check.interval"); + // Establish check-pointing mechanism using the cli-parameter check.path + see.setStateBackend(new FsStateBackend(checkPath)); + // Establish the check-pointing interval + long checkInt = Long.parseLong(checkInterval); + see.enableCheckpointing(checkInt); + } + + // Ingest sync avro encoded data from AMS endpoint + ArgoMessagingSource ams = new ArgoMessagingSource(endpoint, port, token, project, sub, batch, interval); + + if (parameterTool.has("ams.verify")) { + ams.setVerify(parameterTool.getBoolean("ams.verify")); + } + + if (parameterTool.has("ams.proxy")) { + ams.setProxy(parameterTool.get("ams.proxy")); + } + + DataStream metricDataJSON = see.addSource(ams); + DataStream metricDataPOJO = metricDataJSON.flatMap(new FlatMapFunction() { + + /** + * Flat Map Function that accepts AMS message and exports the metric data object (encoded in the payload) + */ + private static final long serialVersionUID = 1L; + + @Override + public void flatMap(String value, Collector out) throws Exception { + + JsonParser jsonParser = new JsonParser(); + // parse the json root object + JsonElement jRoot = jsonParser.parse(value); + // parse the json field "data" and read it as string + // this is the base64 string payload + String data = jRoot.getAsJsonObject().get("data").getAsString(); + // Decode from base64 + byte[] decoded64 = Base64.decodeBase64(data.getBytes("UTF-8")); + // Decode from avro + + DatumReader avroReader = new SpecificDatumReader(MetricData.getClassSchema()); + Decoder decoder = DecoderFactory.get().binaryDecoder(decoded64, null); + + + MetricData item; + try { + item = avroReader.read(null, decoder); + } catch (java.io.EOFException ex) + { + //convert from old to new + avroReader = new SpecificDatumReader(MetricDataOld.getClassSchema(),MetricData.getClassSchema()); + decoder = DecoderFactory.get().binaryDecoder(decoded64, null); + item = avroReader.read(null, decoder); + } + if (item != null) { + LOG.info("Captured data -- {}", item.toString()); + out.collect(item); + } + + + } + }); + + // Check if saving to HDFS is desired + if (hasHdfsArgs(parameterTool)) { + String basePath = parameterTool.getRequired("hdfs.path"); + // Establish a bucketing sink to be able to store events with different daily + // timestamp parts (YYYY-MM-DD) + // in different daily files + BucketingSink bs = new BucketingSink(basePath); + bs.setInactiveBucketThreshold(inactivityThresh); + Bucketer tsBuck = new TSBucketer(); + bs.setBucketer(tsBuck); + bs.setPartPrefix("mdata"); + // Change default in progress prefix: _ to allow loading the file in + // AvroInputFormat + bs.setInProgressPrefix(""); + // Add .prog extension when a file is in progress mode + bs.setInProgressSuffix(".prog"); + // Add .pend extension when a file is in pending mode + bs.setPendingSuffix(".pend"); + + bs.setWriter(new SpecificAvroWriter()); + metricDataPOJO.addSink(bs); + } + + // Check if saving to Hbase is desired + if (hasHbaseArgs(parameterTool)) { + // Initialize Output : Hbase Output Format + HBaseMetricOutputFormat hbf = new HBaseMetricOutputFormat(); + hbf.setMaster(parameterTool.getRequired("hbase.master")); + hbf.setMasterPort(parameterTool.getRequired("hbase.master-port")); + hbf.setZkQuorum(parameterTool.getRequired("hbase.zk.quorum")); + hbf.setZkPort(parameterTool.getRequired("hbase.zk.port")); + hbf.setNamespace(parameterTool.getRequired("hbase.namespace")); + hbf.setTableName(parameterTool.getRequired("hbase.table")); + + metricDataPOJO.writeUsingOutputFormat(hbf); + } + + // Create a job title message to discern job in flink dashboard/cli + StringBuilder jobTitleSB = new StringBuilder(); + jobTitleSB.append("Ingesting metric data from "); + jobTitleSB.append(endpoint); + jobTitleSB.append(":"); + jobTitleSB.append(port); + jobTitleSB.append("/v1/projects/"); + jobTitleSB.append(project); + jobTitleSB.append("/subscriptions/"); + jobTitleSB.append(sub); + + see.execute(jobTitleSB.toString()); + } + +} \ No newline at end of file diff --git a/flink_jobs/old-models/ams_ingest_metric/src/main/java/argo/streaming/ArgoMessagingClient.java b/flink_jobs/old-models/ams_ingest_metric/src/main/java/argo/streaming/ArgoMessagingClient.java new file mode 100644 index 00000000..76305f62 --- /dev/null +++ b/flink_jobs/old-models/ams_ingest_metric/src/main/java/argo/streaming/ArgoMessagingClient.java @@ -0,0 +1,335 @@ +package argo.streaming; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.net.URI; +import java.net.URISyntaxException; +import java.security.KeyManagementException; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; + +import org.apache.http.HttpEntity; +import org.apache.http.HttpHost; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.conn.ssl.NoopHostnameVerifier; +import org.apache.http.conn.ssl.SSLConnectionSocketFactory; +import org.apache.http.conn.ssl.TrustSelfSignedStrategy; +import org.apache.http.entity.StringEntity; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClients; +import org.apache.http.ssl.SSLContextBuilder; +import org.mortbay.log.Log; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.gson.JsonArray; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParser; + +import org.apache.http.client.config.RequestConfig; +import org.apache.http.client.methods.CloseableHttpResponse; + +/** + * Simple http client for pulling and acknowledging messages from AMS service + * http API + */ +public class ArgoMessagingClient { + + static Logger LOG = LoggerFactory.getLogger(ArgoMessagingClient.class); + // Http Client for contanting AMS service + private CloseableHttpClient httpClient = null; + // AMS endpoint (hostname:port or hostname) + private String endpoint = null; + // AMS project (/v1/projects/{project}) + private String project = null; + // AMS token (?key={token}) + private String token = null; + // AMS subscription (/v1/projects/{project}/subscriptions/{sub}) + private String sub = null; + // protocol (https,http) + private String proto = null; + // numer of message to be pulled; + private String maxMessages = ""; + // ssl verify or not + private boolean verify = true; + // proxy + private URI proxy = null; + + // Utility inner class for holding list of messages and acknowledgements + private class MsgAck { + String[] msgs; + String[] ackIds; + + private MsgAck(String[] msgs, String[] ackIds) { + this.msgs = msgs; + this.ackIds = ackIds; + } + + } + + public ArgoMessagingClient() { + this.httpClient = HttpClients.createDefault(); + this.proto = "https"; + this.token = "token"; + this.endpoint = "localhost"; + this.project = "test_project"; + this.sub = "test_sub"; + this.maxMessages = "100"; + this.proxy = null; + } + + public ArgoMessagingClient(String method, String token, String endpoint, String project, String sub, int batch, + boolean verify) throws NoSuchAlgorithmException, KeyStoreException, KeyManagementException { + + this.proto = method; + this.token = token; + this.endpoint = endpoint; + this.project = project; + this.sub = sub; + this.maxMessages = String.valueOf(batch); + this.verify = verify; + + this.httpClient = buildHttpClient(); + + } + + /** + * Initializes Http Client (if not initialized during constructor) + * + * @return + */ + private CloseableHttpClient buildHttpClient() + throws NoSuchAlgorithmException, KeyStoreException, KeyManagementException { + if (this.verify) { + return this.httpClient = HttpClients.createDefault(); + } else { + return this.httpClient = HttpClients.custom().setSSLSocketFactory(selfSignedSSLF()).build(); + } + } + + /** + * Create an SSL Connection Socket Factory with a strategy to trust self signed + * certificates + */ + private SSLConnectionSocketFactory selfSignedSSLF() + throws NoSuchAlgorithmException, KeyStoreException, KeyManagementException { + SSLContextBuilder sslBuild = new SSLContextBuilder(); + sslBuild.loadTrustMaterial(null, new TrustSelfSignedStrategy()); + return new SSLConnectionSocketFactory(sslBuild.build(), NoopHostnameVerifier.INSTANCE); + } + + /** + * Set AMS http client to use http proxy + */ + public void setProxy(String proxyURL) throws URISyntaxException { + // parse proxy url + this.proxy = URI.create(proxyURL); + } + + /** + * Set AMS http client to NOT use an http proxy + */ + public void unsetProxy() { + this.proxy = null; + } + + /** + * Create a configuration for using http proxy on each request + */ + private RequestConfig createProxyCfg() { + HttpHost proxy = new HttpHost(this.proxy.getHost(), this.proxy.getPort(), this.proxy.getScheme()); + RequestConfig config = RequestConfig.custom().setProxy(proxy).build(); + return config; + } + + public void logIssue(CloseableHttpResponse resp) throws UnsupportedOperationException, IOException { + InputStreamReader isRdr = new InputStreamReader(resp.getEntity().getContent()); + BufferedReader bRdr = new BufferedReader(isRdr); + int statusCode = resp.getStatusLine().getStatusCode(); + + // Parse error content from api response + StringBuilder result = new StringBuilder(); + String rLine; + while ((rLine = bRdr.readLine()) != null) + result.append(rLine); + isRdr.close(); + Log.warn("ApiStatusCode={}, ApiErrorMessage={}", statusCode, result); + + } + + /** + * Properly compose url for each AMS request + */ + public String composeURL(String method) { + return proto + "://" + endpoint + "/v1/projects/" + project + "/subscriptions/" + sub + ":" + method + "?key=" + + token; + } + + /** + * Executes a pull request against AMS api + */ + public MsgAck doPull() throws IOException, KeyManagementException, NoSuchAlgorithmException, KeyStoreException { + + ArrayList msgList = new ArrayList(); + ArrayList ackIdList = new ArrayList(); + + // Create the http post to pull + HttpPost postPull = new HttpPost(this.composeURL("pull")); + StringEntity postBody = new StringEntity( + "{\"maxMessages\":\"" + this.maxMessages + "\",\"returnImmediately\":\"true\"}"); + postBody.setContentType("application/json"); + postPull.setEntity(postBody); + + if (this.httpClient == null) { + this.httpClient = buildHttpClient(); + } + + // check for proxy + if (this.proxy != null) { + postPull.setConfig(createProxyCfg()); + } + + CloseableHttpResponse response = this.httpClient.execute(postPull); + String msg = ""; + String ackId = ""; + StringBuilder result = new StringBuilder(); + + HttpEntity entity = response.getEntity(); + + int statusCode = response.getStatusLine().getStatusCode(); + + if (entity != null && statusCode == 200) { + + InputStreamReader isRdr = new InputStreamReader(entity.getContent()); + BufferedReader bRdr = new BufferedReader(isRdr); + + String rLine; + + while ((rLine = bRdr.readLine()) != null) + result.append(rLine); + + // Gather message from json + JsonParser jsonParser = new JsonParser(); + // parse the json root object + Log.info("response: {}", result.toString()); + JsonElement jRoot = jsonParser.parse(result.toString()); + + JsonArray jRec = jRoot.getAsJsonObject().get("receivedMessages").getAsJsonArray(); + + // if has elements + for (JsonElement jMsgItem : jRec) { + JsonElement jMsg = jMsgItem.getAsJsonObject().get("message"); + JsonElement jAckId = jMsgItem.getAsJsonObject().get("ackId"); + msg = jMsg.toString(); + ackId = jAckId.toString(); + msgList.add(msg); + ackIdList.add(ackId); + } + + isRdr.close(); + + } else { + + logIssue(response); + + } + + response.close(); + + String[] msgArr = msgList.toArray(new String[0]); + String[] ackIdArr = ackIdList.toArray(new String[0]); + + // Return a Message array + return new MsgAck(msgArr, ackIdArr); + + } + + /** + * Executes a combination of Pull & Ack requests against AMS api + */ + public String[] consume() throws KeyManagementException, NoSuchAlgorithmException, KeyStoreException { + String[] msgs = new String[0]; + // Try first to pull a message + try { + + MsgAck msgAck = doPull(); + // get last ackid + String ackId = ""; + if (msgAck.ackIds.length > 0) { + ackId = msgAck.ackIds[msgAck.ackIds.length - 1]; + } + + if (ackId != "") { + // Do an ack for the received message + String ackRes = doAck(ackId); + if (ackRes == "") { + Log.info("Message Acknowledged ackid:" + ackId); + msgs = msgAck.msgs; + + } else { + Log.warn("No acknowledment for ackid:" + ackId + "-" + ackRes); + } + } + } catch (IOException e) { + LOG.error(e.getMessage()); + } + return msgs; + + } + + /** + * Executes an Acknowledge request against AMS api + */ + public String doAck(String ackId) throws IOException { + + // Create the http post to ack + HttpPost postAck = new HttpPost(this.composeURL("acknowledge")); + StringEntity postBody = new StringEntity("{\"ackIds\":[" + ackId + "]}"); + postBody.setContentType("application/json"); + postAck.setEntity(postBody); + + // check for proxy + if (this.proxy != null) { + postAck.setConfig(createProxyCfg()); + } + + CloseableHttpResponse response = httpClient.execute(postAck); + String resMsg = ""; + StringBuilder result = new StringBuilder(); + + HttpEntity entity = response.getEntity(); + int status = response.getStatusLine().getStatusCode(); + + if (status != 200) { + + InputStreamReader isRdr = new InputStreamReader(entity.getContent()); + BufferedReader bRdr = new BufferedReader(isRdr); + + String rLine; + + while ((rLine = bRdr.readLine()) != null) + result.append(rLine); + + resMsg = result.toString(); + isRdr.close(); + + } else { + // Log any api errors + logIssue(response); + } + response.close(); + // Return a resposeMessage + return resMsg; + + } + + /** + * Close AMS http client + */ + public void close() throws IOException { + this.httpClient.close(); + } +} diff --git a/flink_jobs/old-models/ams_ingest_metric/src/main/java/argo/streaming/ArgoMessagingSource.java b/flink_jobs/old-models/ams_ingest_metric/src/main/java/argo/streaming/ArgoMessagingSource.java new file mode 100644 index 00000000..30997677 --- /dev/null +++ b/flink_jobs/old-models/ams_ingest_metric/src/main/java/argo/streaming/ArgoMessagingSource.java @@ -0,0 +1,135 @@ +package argo.streaming; + +import java.security.KeyManagementException; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; + +import org.apache.flink.configuration.Configuration; +import org.apache.flink.streaming.api.functions.source.RichSourceFunction; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Custom source to connect to AMS service. Uses ArgoMessaging client + */ +public class ArgoMessagingSource extends RichSourceFunction { + + private static final long serialVersionUID = 1L; + + // setup logger + static Logger LOG = LoggerFactory.getLogger(ArgoMessagingSource.class); + + private String endpoint = null; + private String port = null; + private String token = null; + private String project = null; + private String sub = null; + private int batch = 1; + private long interval = 100L; + private boolean verify = true; + private boolean useProxy = false; + private String proxyURL = ""; + private transient Object rateLck; // lock for waiting to establish rate + + private volatile boolean isRunning = true; + + private ArgoMessagingClient client = null; + + + public ArgoMessagingSource(String endpoint, String port, String token, String project, String sub, int batch, Long interval) { + this.endpoint = endpoint; + this.port = port; + this.token = token; + this.project = project; + this.sub = sub; + this.interval = interval; + this.batch = batch; + this.verify = true; + + } + + /** + * Set verify to true or false. If set to false AMS client will be able to contact AMS endpoints that use self-signed certificates + */ + public void setVerify(boolean verify) { + this.verify=verify; + } + /** + * Set proxy details for AMS client + */ + public void setProxy(String proxyURL) { + this.useProxy = true; + this.proxyURL = proxyURL; + } + + /** + * Unset proxy details for AMS client + */ + public void unsetProxy(String proxyURL) { + this.useProxy = false; + this.proxyURL = ""; + } + + + @Override + public void cancel() { + isRunning = false; + + } + + @Override + public void run(SourceContext ctx) throws Exception { + // This is the main run logic + while (isRunning) { + String[] res = this.client.consume(); + if (res.length > 0) { + for (String msg : res) { + ctx.collect(msg); + } + + } + synchronized (rateLck) { + rateLck.wait(this.interval); + } + + } + + } + + /** + * AMS Source initialization + */ + @Override + public void open(Configuration parameters) throws Exception { + // init rate lock + rateLck = new Object(); + // init client + String fendpoint = this.endpoint; + if (this.port != null && !this.port.isEmpty()) { + fendpoint = this.endpoint + ":" + port; + } + try { + client = new ArgoMessagingClient("https", this.token, fendpoint, this.project, this.sub, this.batch, this.verify); + if (this.useProxy) { + client.setProxy(this.proxyURL); + } + } catch (KeyManagementException e) { + e.printStackTrace(); + } catch (NoSuchAlgorithmException e) { + e.printStackTrace(); + } catch (KeyStoreException e) { + e.printStackTrace(); + } + } + + @Override + public void close() throws Exception { + if (this.client != null) { + client.close(); + } + synchronized (rateLck) { + rateLck.notify(); + } + } + +} diff --git a/flink_jobs/old-models/ams_ingest_metric/src/main/java/argo/streaming/HBaseMetricOutputFormat.java b/flink_jobs/old-models/ams_ingest_metric/src/main/java/argo/streaming/HBaseMetricOutputFormat.java new file mode 100644 index 00000000..5a4b084e --- /dev/null +++ b/flink_jobs/old-models/ams_ingest_metric/src/main/java/argo/streaming/HBaseMetricOutputFormat.java @@ -0,0 +1,137 @@ +package argo.streaming; + +import java.io.IOException; + +import org.apache.flink.api.common.io.OutputFormat; +import org.apache.flink.configuration.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.util.Bytes; + +import argo.avro.MetricData; +import argo.avro.MetricDataOld; + +/** + * Hbase Output Format for storing Metric Data to an hbase destination + */ +public class HBaseMetricOutputFormat implements OutputFormat { + + private String master = null; + private String masterPort = null; + private String zkQuorum = null; + private String zkPort = null; + private String namespace = null; + private String tname = null; + private Connection connection = null; + private Table ht = null; + + + private static final long serialVersionUID = 1L; + + // Setters + public void setMasterPort(String masterPort) { + this.masterPort = masterPort; + } + + public void setMaster(String master) { + this.master = master; + } + + public void setZkQuorum(String zkQuorum) { + this.zkQuorum = zkQuorum; + } + + public void setZkPort(String zkPort) { + this.zkPort = zkPort; + } + + public void setNamespace(String namespace) { + this.namespace = namespace; + } + + public void setTableName(String tname) { + this.tname = tname; + } + + @Override + public void configure(Configuration parameters) { + + } + + /** + * Initialize Hbase remote connection + */ + @Override + public void open(int taskNumber, int numTasks) throws IOException { + // Create hadoop based configuration for hclient to use + org.apache.hadoop.conf.Configuration config = HBaseConfiguration.create(); + // Modify configuration to job needs + config.setInt("timeout", 120000); + if (masterPort != null && !masterPort.isEmpty()){ + config.set("hbase.master", master + ":" + masterPort); + }else { + config.set("hbase.master", master + ":60000"); + } + + config.set("hbase.zookeeper.quorum", zkQuorum); + config.set("hbase.zookeeper.property.clientPort", (zkPort)); + // Create the connection + connection = ConnectionFactory.createConnection(config); + if (namespace != null) { + ht = connection.getTable(TableName.valueOf(namespace + ":" + tname)); + } else { + ht = connection.getTable(TableName.valueOf(tname)); + } + + } + + /** + * Store a Metric Data object as an Hbase Record + */ + @Override + public void writeRecord(MetricData record) throws IOException { + + String ts = record.getTimestamp(); + String host = record.getHostname(); + String service = record.getService(); + String metric = record.getMetric(); + String mHost = record.getMonitoringHost(); + String status = record.getStatus(); + String summary = record.getSummary(); + String msg = record.getMessage(); + String tags = record.getTags().toString(); + + // Compile key + String key = host + "|" + service + "|" + metric + "|" +ts+ "|" + mHost; + + // Prepare columns + Put put = new Put(Bytes.toBytes(key)); + put.addColumn(Bytes.toBytes("data"), Bytes.toBytes("timestamp"), Bytes.toBytes(ts)); + put.addColumn(Bytes.toBytes("data"), Bytes.toBytes("host"), Bytes.toBytes(host)); + put.addColumn(Bytes.toBytes("data"), Bytes.toBytes("service"), Bytes.toBytes(service)); + put.addColumn(Bytes.toBytes("data"), Bytes.toBytes("metric"), Bytes.toBytes(metric)); + put.addColumn(Bytes.toBytes("data"), Bytes.toBytes("monitoring_host"), Bytes.toBytes(mHost)); + put.addColumn(Bytes.toBytes("data"), Bytes.toBytes("status"), Bytes.toBytes(status)); + put.addColumn(Bytes.toBytes("data"), Bytes.toBytes("summary"), Bytes.toBytes(summary)); + put.addColumn(Bytes.toBytes("data"), Bytes.toBytes("msg"), Bytes.toBytes(msg)); + put.addColumn(Bytes.toBytes("data"), Bytes.toBytes("tags"), Bytes.toBytes(tags)); + + // Insert row in hbase + ht.put(put); + + } + + /** + * Close Hbase Connection + */ + @Override + public void close() throws IOException { + ht.close(); + connection.close(); + } + +} \ No newline at end of file diff --git a/flink_jobs/old-models/ams_ingest_metric/src/main/java/argo/streaming/MetricParse.java b/flink_jobs/old-models/ams_ingest_metric/src/main/java/argo/streaming/MetricParse.java new file mode 100644 index 00000000..40873bfb --- /dev/null +++ b/flink_jobs/old-models/ams_ingest_metric/src/main/java/argo/streaming/MetricParse.java @@ -0,0 +1,65 @@ +package argo.streaming; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; +import java.util.Map.Entry; + +import org.apache.avro.io.BinaryDecoder; +import org.apache.avro.io.DatumReader; +import org.apache.avro.io.DecoderFactory; +import org.apache.avro.specific.SpecificData; +import org.apache.avro.specific.SpecificDatumReader; + +import com.google.gson.JsonElement; + +import argo.avro.MetricData; +import argo.avro.MetricDataOld; + + + +/** + * SyncParse is a utility class providing methods to parse specific connector data in avro format + */ +public class MetricParse { + + /** + * Parses a byte array and decodes avro MetricData objects + */ + public static ArrayList parseGroupEndpoint(byte[] avroBytes) throws IOException{ + + ArrayList result = new ArrayList(); + + DatumReader avroReader = new SpecificDatumReader(MetricDataOld.getClassSchema(),MetricData.getClassSchema(),new SpecificData()); + BinaryDecoder decoder = DecoderFactory.get().binaryDecoder(avroBytes, null); + + while (!decoder.isEnd()){ + MetricData cur = avroReader.read(null, decoder); + result.add(cur); + } + + return result; + } + + + /** + * Parses attributes from a json attribute element + */ + public static Map parseAttributes(JsonElement jAttr) throws IOException{ + + Map result = new HashMap(); + if (jAttr!=null){ + Set> jItems = jAttr.getAsJsonObject().entrySet(); + + for (Entry jItem : jItems){ + result.put(jItem.getKey(), jItem.getValue().getAsString()); + } + } + + return result; + } + + +} diff --git a/flink_jobs/old-models/ams_ingest_metric/src/main/java/argo/streaming/SpecificAvroWriter.java b/flink_jobs/old-models/ams_ingest_metric/src/main/java/argo/streaming/SpecificAvroWriter.java new file mode 100644 index 00000000..5ea8c472 --- /dev/null +++ b/flink_jobs/old-models/ams_ingest_metric/src/main/java/argo/streaming/SpecificAvroWriter.java @@ -0,0 +1,93 @@ +package argo.streaming; + +import java.io.IOException; + + +import org.apache.avro.file.DataFileWriter; +import org.apache.avro.io.DatumWriter; +import org.apache.avro.specific.SpecificDatumWriter; +import org.apache.flink.streaming.connectors.fs.StreamWriterBase; +import org.apache.flink.streaming.connectors.fs.Writer; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; + + +/** + * Implements a specific AvroWriter For MetricData Avro Objects + */ +public class SpecificAvroWriter extends StreamWriterBase { + + + private static final long serialVersionUID = 1L; + + private transient FSDataOutputStream outputStream = null; + private transient DataFileWriter outputWriter = null; + + + public SpecificAvroWriter(){ + + } + + + + + + @Override + public void close() throws IOException { + if(outputWriter != null) { + outputWriter.sync(); + outputWriter.close(); + } + outputWriter = null; + outputStream = null; + + } + + + + /** + * Write the avro element to the output writer + */ + @Override + public void write(T item) throws IOException { + if (outputStream == null) { + throw new IllegalStateException("AvroWriter has not been opened."); + } + outputWriter.append(item); + + + } + + @Override + public void open(FileSystem fs, Path path) throws IOException { + super.open(fs, path); + if (outputStream != null) { + throw new IllegalStateException("AvroWriter has already been opened."); + } + outputStream = getStream(); + + if(outputWriter == null) { + DatumWriter writer = new SpecificDatumWriter(argo.avro.MetricData.getClassSchema()); + outputWriter = new DataFileWriter(writer); + + outputWriter.create(argo.avro.MetricData.getClassSchema(),outputStream); + } + + } + + @Override + public long flush() throws IOException { + if (outputWriter != null) { + outputWriter.sync(); + } + return super.flush(); + } + + @Override + public Writer duplicate() { + return new SpecificAvroWriter(); + } + + +} diff --git a/flink_jobs/old-models/ams_ingest_metric/src/main/java/argo/streaming/TSBucketer.java b/flink_jobs/old-models/ams_ingest_metric/src/main/java/argo/streaming/TSBucketer.java new file mode 100644 index 00000000..bae5cedf --- /dev/null +++ b/flink_jobs/old-models/ams_ingest_metric/src/main/java/argo/streaming/TSBucketer.java @@ -0,0 +1,25 @@ +package argo.streaming; + +import org.apache.flink.streaming.connectors.fs.Clock; +import org.apache.flink.streaming.connectors.fs.bucketing.Bucketer; +import org.apache.hadoop.fs.Path; + +import com.esotericsoftware.minlog.Log; + +import argo.avro.MetricData; + + +public class TSBucketer implements Bucketer { + + private static final long serialVersionUID = 1L; + + /** + * Create a specific bucket based on the timestamp field of element + */ + @Override + public Path getBucketPath(final Clock clock, final Path basePath, final MetricData element) { + + String dailyPart = element.getTimestamp().split("T")[0]; + return new Path(basePath + "/" + dailyPart); + } +} \ No newline at end of file diff --git a/flink_jobs/old-models/ams_ingest_metric/src/main/resources/log4j.properties b/flink_jobs/old-models/ams_ingest_metric/src/main/resources/log4j.properties new file mode 100644 index 00000000..da32ea0f --- /dev/null +++ b/flink_jobs/old-models/ams_ingest_metric/src/main/resources/log4j.properties @@ -0,0 +1,23 @@ +################################################################################ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +################################################################################ + +log4j.rootLogger=INFO, console + +log4j.appender.console=org.apache.log4j.ConsoleAppender +log4j.appender.console.layout=org.apache.log4j.PatternLayout +log4j.appender.console.layout.ConversionPattern=%d{HH:mm:ss,SSS} %-5p %-60c %x - %m%n diff --git a/flink_jobs/old-models/ams_ingest_metric/src/main/resources/schemas/metric_data.avsc b/flink_jobs/old-models/ams_ingest_metric/src/main/resources/schemas/metric_data.avsc new file mode 100644 index 00000000..737e0ead --- /dev/null +++ b/flink_jobs/old-models/ams_ingest_metric/src/main/resources/schemas/metric_data.avsc @@ -0,0 +1,18 @@ +{"namespace": "argo.avro", + "type": "record", + "name": "metric_data", + "fields": [ + {"name": "timestamp", "type": "string"}, + {"name": "service", "type": "string"}, + {"name": "hostname", "type": "string"}, + {"name": "metric", "type": "string"}, + {"name": "status", "type": "string"}, + {"name": "monitoring_host", "type": ["null", "string"]}, + {"name": "summary", "type": ["null", "string"]}, + {"name": "message", "type": ["null", "string"]}, + {"name": "tags", "type" : ["null", {"name" : "Tags", + "type" : "map", + "values" : ["null", "string"] + }] + }] +} \ No newline at end of file diff --git a/flink_jobs/old-models/ams_ingest_sync/.gitignore b/flink_jobs/old-models/ams_ingest_sync/.gitignore new file mode 100644 index 00000000..6c4e323f --- /dev/null +++ b/flink_jobs/old-models/ams_ingest_sync/.gitignore @@ -0,0 +1,8 @@ +/target/ +.project +.settings/ +.classpath/ +.classpath +/nbproject +nbactions.xml + diff --git a/flink_jobs/old-models/ams_ingest_sync/pom.xml b/flink_jobs/old-models/ams_ingest_sync/pom.xml new file mode 100644 index 00000000..05dc7ab0 --- /dev/null +++ b/flink_jobs/old-models/ams_ingest_sync/pom.xml @@ -0,0 +1,346 @@ + + + + + 4.0.0 + argo.streaming + 0.0.1-SNAPSHOT + ams-ingest-sync + Stream sync data from AMS to HDFS + + + UTF-8 + 1.3.1 + 1.7.7 + 1.2.17 + 2.10 + + + + + apache.snapshots + Apache Development Snapshot Repository + https://repository.apache.org/content/repositories/snapshots/ + + false + + + true + + + + + + + + + + org.apache.flink + flink-java + ${flink.version} + + + org.apache.flink + flink-streaming-java_2.10 + ${flink.version} + + + org.apache.flink + flink-clients_2.10 + ${flink.version} + + + org.apache.flink + flink-connector-filesystem_2.10 + 1.2.1 + + + + + org.slf4j + slf4j-log4j12 + ${slf4j.version} + + + log4j + log4j + ${log4j.version} + + + org.apache.httpcomponents + httpclient + 4.5.13 + + + com.google.code.gson + gson + 2.7 + + + junit + junit + 4.11 + test + + + junit-addons + junit-addons + 1.4 + test + + + + + + + build-jar + + + false + + + + + org.apache.flink + flink-java + ${flink.version} + provided + + + org.apache.flink + flink-streaming-java_2.10 + ${flink.version} + provided + + + org.apache.flink + flink-clients_2.10 + ${flink.version} + provided + + + org.apache.flink + flink-connector-filesystem_2.10 + 1.2.1 + + + org.slf4j + slf4j-log4j12 + ${slf4j.version} + provided + + + log4j + log4j + ${log4j.version} + provided + + + org.apache.httpcomponents + httpclient + 4.5.13 + + + com.google.code.gson + gson + 2.7 + + + + + + + + org.apache.maven.plugins + maven-shade-plugin + 2.4.1 + + + package + + shade + + + + + + + + + + + + + + + + + + + org.apache.maven.plugins + maven-shade-plugin + 2.4.1 + + + + package + + shade + + + + + + org.apache.flink:flink-annotations + org.apache.flink:flink-shaded-hadoop2 + org.apache.flink:flink-shaded-curator-recipes + org.apache.flink:flink-core + org.apache.flink:flink-java + org.apache.flink:flink-scala_2.10 + org.apache.flink:flink-runtime_2.10 + org.apache.flink:flink-optimizer_2.10 + org.apache.flink:flink-clients_2.10 + org.apache.flink:flink-avro_2.10 + org.apache.flink:flink-examples-batch_2.10 + org.apache.flink:flink-examples-streaming_2.10 + org.apache.flink:flink-streaming-java_2.10 + org.apache.flink:flink-streaming-scala_2.10 + org.apache.flink:flink-scala-shell_2.10 + org.apache.flink:flink-python + org.apache.flink:flink-metrics-core + org.apache.flink:flink-metrics-jmx + org.apache.flink:flink-statebackend-rocksdb_2.10 + + + + log4j:log4j + org.scala-lang:scala-library + org.scala-lang:scala-compiler + org.scala-lang:scala-reflect + com.data-artisans:flakka-actor_* + com.data-artisans:flakka-remote_* + com.data-artisans:flakka-slf4j_* + io.netty:netty-all + io.netty:netty + commons-fileupload:commons-fileupload + org.apache.avro:avro + commons-collections:commons-collections + org.codehaus.jackson:jackson-core-asl + org.codehaus.jackson:jackson-mapper-asl + com.thoughtworks.paranamer:paranamer + org.xerial.snappy:snappy-java + org.apache.commons:commons-compress + org.tukaani:xz + com.esotericsoftware.kryo:kryo + com.esotericsoftware.minlog:minlog + org.objenesis:objenesis + com.twitter:chill_* + com.twitter:chill-java + commons-lang:commons-lang + junit:junit + org.apache.commons:commons-lang3 + org.slf4j:slf4j-api + org.slf4j:slf4j-log4j12 + log4j:log4j + org.apache.commons:commons-math + org.apache.sling:org.apache.sling.commons.json + commons-logging:commons-logging + commons-codec:commons-codec + com.fasterxml.jackson.core:jackson-core + com.fasterxml.jackson.core:jackson-databind + com.fasterxml.jackson.core:jackson-annotations + stax:stax-api + com.typesafe:config + org.uncommons.maths:uncommons-maths + com.github.scopt:scopt_* + commons-io:commons-io + commons-cli:commons-cli + + + + + org.apache.flink:* + + + org/apache/flink/shaded/com/** + web-docs/** + + + + + *:* + + META-INF/*.SF + META-INF/*.DSA + META-INF/*.RSA + + + + + + false + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.1 + + 1.7 + 1.7 + + + + + + + + ams-ingest-sync + diff --git a/flink_jobs/old-models/ams_ingest_sync/src/main/java/argo/avro/Downtime.java b/flink_jobs/old-models/ams_ingest_sync/src/main/java/argo/avro/Downtime.java new file mode 100644 index 00000000..b73e100d --- /dev/null +++ b/flink_jobs/old-models/ams_ingest_sync/src/main/java/argo/avro/Downtime.java @@ -0,0 +1,286 @@ +/** + * Autogenerated by Avro + * + * DO NOT EDIT DIRECTLY + */ +package argo.avro; +@SuppressWarnings("all") +@org.apache.avro.specific.AvroGenerated +public class Downtime extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"Downtime\",\"namespace\":\"argo.avro\",\"fields\":[{\"name\":\"hostname\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"service\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"start_time\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"end_time\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}}]}"); + public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } + @Deprecated public java.lang.String hostname; + @Deprecated public java.lang.String service; + @Deprecated public java.lang.String start_time; + @Deprecated public java.lang.String end_time; + + /** + * Default constructor. + */ + public Downtime() {} + + /** + * All-args constructor. + */ + public Downtime(java.lang.String hostname, java.lang.String service, java.lang.String start_time, java.lang.String end_time) { + this.hostname = hostname; + this.service = service; + this.start_time = start_time; + this.end_time = end_time; + } + + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + // Used by DatumWriter. Applications should not call. + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return hostname; + case 1: return service; + case 2: return start_time; + case 3: return end_time; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + // Used by DatumReader. Applications should not call. + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: hostname = (java.lang.String)value$; break; + case 1: service = (java.lang.String)value$; break; + case 2: start_time = (java.lang.String)value$; break; + case 3: end_time = (java.lang.String)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + + /** + * Gets the value of the 'hostname' field. + */ + public java.lang.String getHostname() { + return hostname; + } + + /** + * Sets the value of the 'hostname' field. + * @param value the value to set. + */ + public void setHostname(java.lang.String value) { + this.hostname = value; + } + + /** + * Gets the value of the 'service' field. + */ + public java.lang.String getService() { + return service; + } + + /** + * Sets the value of the 'service' field. + * @param value the value to set. + */ + public void setService(java.lang.String value) { + this.service = value; + } + + /** + * Gets the value of the 'start_time' field. + */ + public java.lang.String getStartTime() { + return start_time; + } + + /** + * Sets the value of the 'start_time' field. + * @param value the value to set. + */ + public void setStartTime(java.lang.String value) { + this.start_time = value; + } + + /** + * Gets the value of the 'end_time' field. + */ + public java.lang.String getEndTime() { + return end_time; + } + + /** + * Sets the value of the 'end_time' field. + * @param value the value to set. + */ + public void setEndTime(java.lang.String value) { + this.end_time = value; + } + + /** Creates a new Downtime RecordBuilder */ + public static argo.avro.Downtime.Builder newBuilder() { + return new argo.avro.Downtime.Builder(); + } + + /** Creates a new Downtime RecordBuilder by copying an existing Builder */ + public static argo.avro.Downtime.Builder newBuilder(argo.avro.Downtime.Builder other) { + return new argo.avro.Downtime.Builder(other); + } + + /** Creates a new Downtime RecordBuilder by copying an existing Downtime instance */ + public static argo.avro.Downtime.Builder newBuilder(argo.avro.Downtime other) { + return new argo.avro.Downtime.Builder(other); + } + + /** + * RecordBuilder for Downtime instances. + */ + public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase + implements org.apache.avro.data.RecordBuilder { + + private java.lang.String hostname; + private java.lang.String service; + private java.lang.String start_time; + private java.lang.String end_time; + + /** Creates a new Builder */ + private Builder() { + super(argo.avro.Downtime.SCHEMA$); + } + + /** Creates a Builder by copying an existing Builder */ + private Builder(argo.avro.Downtime.Builder other) { + super(other); + } + + /** Creates a Builder by copying an existing Downtime instance */ + private Builder(argo.avro.Downtime other) { + super(argo.avro.Downtime.SCHEMA$); + if (isValidValue(fields()[0], other.hostname)) { + this.hostname = data().deepCopy(fields()[0].schema(), other.hostname); + fieldSetFlags()[0] = true; + } + if (isValidValue(fields()[1], other.service)) { + this.service = data().deepCopy(fields()[1].schema(), other.service); + fieldSetFlags()[1] = true; + } + if (isValidValue(fields()[2], other.start_time)) { + this.start_time = data().deepCopy(fields()[2].schema(), other.start_time); + fieldSetFlags()[2] = true; + } + if (isValidValue(fields()[3], other.end_time)) { + this.end_time = data().deepCopy(fields()[3].schema(), other.end_time); + fieldSetFlags()[3] = true; + } + } + + /** Gets the value of the 'hostname' field */ + public java.lang.String getHostname() { + return hostname; + } + + /** Sets the value of the 'hostname' field */ + public argo.avro.Downtime.Builder setHostname(java.lang.String value) { + validate(fields()[0], value); + this.hostname = value; + fieldSetFlags()[0] = true; + return this; + } + + /** Checks whether the 'hostname' field has been set */ + public boolean hasHostname() { + return fieldSetFlags()[0]; + } + + /** Clears the value of the 'hostname' field */ + public argo.avro.Downtime.Builder clearHostname() { + hostname = null; + fieldSetFlags()[0] = false; + return this; + } + + /** Gets the value of the 'service' field */ + public java.lang.String getService() { + return service; + } + + /** Sets the value of the 'service' field */ + public argo.avro.Downtime.Builder setService(java.lang.String value) { + validate(fields()[1], value); + this.service = value; + fieldSetFlags()[1] = true; + return this; + } + + /** Checks whether the 'service' field has been set */ + public boolean hasService() { + return fieldSetFlags()[1]; + } + + /** Clears the value of the 'service' field */ + public argo.avro.Downtime.Builder clearService() { + service = null; + fieldSetFlags()[1] = false; + return this; + } + + /** Gets the value of the 'start_time' field */ + public java.lang.String getStartTime() { + return start_time; + } + + /** Sets the value of the 'start_time' field */ + public argo.avro.Downtime.Builder setStartTime(java.lang.String value) { + validate(fields()[2], value); + this.start_time = value; + fieldSetFlags()[2] = true; + return this; + } + + /** Checks whether the 'start_time' field has been set */ + public boolean hasStartTime() { + return fieldSetFlags()[2]; + } + + /** Clears the value of the 'start_time' field */ + public argo.avro.Downtime.Builder clearStartTime() { + start_time = null; + fieldSetFlags()[2] = false; + return this; + } + + /** Gets the value of the 'end_time' field */ + public java.lang.String getEndTime() { + return end_time; + } + + /** Sets the value of the 'end_time' field */ + public argo.avro.Downtime.Builder setEndTime(java.lang.String value) { + validate(fields()[3], value); + this.end_time = value; + fieldSetFlags()[3] = true; + return this; + } + + /** Checks whether the 'end_time' field has been set */ + public boolean hasEndTime() { + return fieldSetFlags()[3]; + } + + /** Clears the value of the 'end_time' field */ + public argo.avro.Downtime.Builder clearEndTime() { + end_time = null; + fieldSetFlags()[3] = false; + return this; + } + + @Override + public Downtime build() { + try { + Downtime record = new Downtime(); + record.hostname = fieldSetFlags()[0] ? this.hostname : (java.lang.String) defaultValue(fields()[0]); + record.service = fieldSetFlags()[1] ? this.service : (java.lang.String) defaultValue(fields()[1]); + record.start_time = fieldSetFlags()[2] ? this.start_time : (java.lang.String) defaultValue(fields()[2]); + record.end_time = fieldSetFlags()[3] ? this.end_time : (java.lang.String) defaultValue(fields()[3]); + return record; + } catch (Exception e) { + throw new org.apache.avro.AvroRuntimeException(e); + } + } + } +} diff --git a/flink_jobs/old-models/ams_ingest_sync/src/main/java/argo/avro/GroupEndpoint.java b/flink_jobs/old-models/ams_ingest_sync/src/main/java/argo/avro/GroupEndpoint.java new file mode 100644 index 00000000..2386b1d2 --- /dev/null +++ b/flink_jobs/old-models/ams_ingest_sync/src/main/java/argo/avro/GroupEndpoint.java @@ -0,0 +1,336 @@ +/** + * Autogenerated by Avro + * + * DO NOT EDIT DIRECTLY + */ +package argo.avro; +@SuppressWarnings("all") +@org.apache.avro.specific.AvroGenerated +public class GroupEndpoint extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"GroupEndpoint\",\"namespace\":\"argo.avro\",\"fields\":[{\"name\":\"type\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"group\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"service\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"hostname\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"tags\",\"type\":[\"null\",{\"type\":\"map\",\"values\":{\"type\":\"string\",\"avro.java.string\":\"String\"},\"avro.java.string\":\"String\"}]}]}"); + public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } + @Deprecated public java.lang.String type; + @Deprecated public java.lang.String group; + @Deprecated public java.lang.String service; + @Deprecated public java.lang.String hostname; + @Deprecated public java.util.Map tags; + + /** + * Default constructor. + */ + public GroupEndpoint() {} + + /** + * All-args constructor. + */ + public GroupEndpoint(java.lang.String type, java.lang.String group, java.lang.String service, java.lang.String hostname, java.util.Map tags) { + this.type = type; + this.group = group; + this.service = service; + this.hostname = hostname; + this.tags = tags; + } + + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + // Used by DatumWriter. Applications should not call. + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return type; + case 1: return group; + case 2: return service; + case 3: return hostname; + case 4: return tags; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + // Used by DatumReader. Applications should not call. + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: type = (java.lang.String)value$; break; + case 1: group = (java.lang.String)value$; break; + case 2: service = (java.lang.String)value$; break; + case 3: hostname = (java.lang.String)value$; break; + case 4: tags = (java.util.Map)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + + /** + * Gets the value of the 'type' field. + */ + public java.lang.String getType() { + return type; + } + + /** + * Sets the value of the 'type' field. + * @param value the value to set. + */ + public void setType(java.lang.String value) { + this.type = value; + } + + /** + * Gets the value of the 'group' field. + */ + public java.lang.String getGroup() { + return group; + } + + /** + * Sets the value of the 'group' field. + * @param value the value to set. + */ + public void setGroup(java.lang.String value) { + this.group = value; + } + + /** + * Gets the value of the 'service' field. + */ + public java.lang.String getService() { + return service; + } + + /** + * Sets the value of the 'service' field. + * @param value the value to set. + */ + public void setService(java.lang.String value) { + this.service = value; + } + + /** + * Gets the value of the 'hostname' field. + */ + public java.lang.String getHostname() { + return hostname; + } + + /** + * Sets the value of the 'hostname' field. + * @param value the value to set. + */ + public void setHostname(java.lang.String value) { + this.hostname = value; + } + + /** + * Gets the value of the 'tags' field. + */ + public java.util.Map getTags() { + return tags; + } + + /** + * Sets the value of the 'tags' field. + * @param value the value to set. + */ + public void setTags(java.util.Map value) { + this.tags = value; + } + + /** Creates a new GroupEndpoint RecordBuilder */ + public static argo.avro.GroupEndpoint.Builder newBuilder() { + return new argo.avro.GroupEndpoint.Builder(); + } + + /** Creates a new GroupEndpoint RecordBuilder by copying an existing Builder */ + public static argo.avro.GroupEndpoint.Builder newBuilder(argo.avro.GroupEndpoint.Builder other) { + return new argo.avro.GroupEndpoint.Builder(other); + } + + /** Creates a new GroupEndpoint RecordBuilder by copying an existing GroupEndpoint instance */ + public static argo.avro.GroupEndpoint.Builder newBuilder(argo.avro.GroupEndpoint other) { + return new argo.avro.GroupEndpoint.Builder(other); + } + + /** + * RecordBuilder for GroupEndpoint instances. + */ + public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase + implements org.apache.avro.data.RecordBuilder { + + private java.lang.String type; + private java.lang.String group; + private java.lang.String service; + private java.lang.String hostname; + private java.util.Map tags; + + /** Creates a new Builder */ + private Builder() { + super(argo.avro.GroupEndpoint.SCHEMA$); + } + + /** Creates a Builder by copying an existing Builder */ + private Builder(argo.avro.GroupEndpoint.Builder other) { + super(other); + } + + /** Creates a Builder by copying an existing GroupEndpoint instance */ + private Builder(argo.avro.GroupEndpoint other) { + super(argo.avro.GroupEndpoint.SCHEMA$); + if (isValidValue(fields()[0], other.type)) { + this.type = data().deepCopy(fields()[0].schema(), other.type); + fieldSetFlags()[0] = true; + } + if (isValidValue(fields()[1], other.group)) { + this.group = data().deepCopy(fields()[1].schema(), other.group); + fieldSetFlags()[1] = true; + } + if (isValidValue(fields()[2], other.service)) { + this.service = data().deepCopy(fields()[2].schema(), other.service); + fieldSetFlags()[2] = true; + } + if (isValidValue(fields()[3], other.hostname)) { + this.hostname = data().deepCopy(fields()[3].schema(), other.hostname); + fieldSetFlags()[3] = true; + } + if (isValidValue(fields()[4], other.tags)) { + this.tags = data().deepCopy(fields()[4].schema(), other.tags); + fieldSetFlags()[4] = true; + } + } + + /** Gets the value of the 'type' field */ + public java.lang.String getType() { + return type; + } + + /** Sets the value of the 'type' field */ + public argo.avro.GroupEndpoint.Builder setType(java.lang.String value) { + validate(fields()[0], value); + this.type = value; + fieldSetFlags()[0] = true; + return this; + } + + /** Checks whether the 'type' field has been set */ + public boolean hasType() { + return fieldSetFlags()[0]; + } + + /** Clears the value of the 'type' field */ + public argo.avro.GroupEndpoint.Builder clearType() { + type = null; + fieldSetFlags()[0] = false; + return this; + } + + /** Gets the value of the 'group' field */ + public java.lang.String getGroup() { + return group; + } + + /** Sets the value of the 'group' field */ + public argo.avro.GroupEndpoint.Builder setGroup(java.lang.String value) { + validate(fields()[1], value); + this.group = value; + fieldSetFlags()[1] = true; + return this; + } + + /** Checks whether the 'group' field has been set */ + public boolean hasGroup() { + return fieldSetFlags()[1]; + } + + /** Clears the value of the 'group' field */ + public argo.avro.GroupEndpoint.Builder clearGroup() { + group = null; + fieldSetFlags()[1] = false; + return this; + } + + /** Gets the value of the 'service' field */ + public java.lang.String getService() { + return service; + } + + /** Sets the value of the 'service' field */ + public argo.avro.GroupEndpoint.Builder setService(java.lang.String value) { + validate(fields()[2], value); + this.service = value; + fieldSetFlags()[2] = true; + return this; + } + + /** Checks whether the 'service' field has been set */ + public boolean hasService() { + return fieldSetFlags()[2]; + } + + /** Clears the value of the 'service' field */ + public argo.avro.GroupEndpoint.Builder clearService() { + service = null; + fieldSetFlags()[2] = false; + return this; + } + + /** Gets the value of the 'hostname' field */ + public java.lang.String getHostname() { + return hostname; + } + + /** Sets the value of the 'hostname' field */ + public argo.avro.GroupEndpoint.Builder setHostname(java.lang.String value) { + validate(fields()[3], value); + this.hostname = value; + fieldSetFlags()[3] = true; + return this; + } + + /** Checks whether the 'hostname' field has been set */ + public boolean hasHostname() { + return fieldSetFlags()[3]; + } + + /** Clears the value of the 'hostname' field */ + public argo.avro.GroupEndpoint.Builder clearHostname() { + hostname = null; + fieldSetFlags()[3] = false; + return this; + } + + /** Gets the value of the 'tags' field */ + public java.util.Map getTags() { + return tags; + } + + /** Sets the value of the 'tags' field */ + public argo.avro.GroupEndpoint.Builder setTags(java.util.Map value) { + validate(fields()[4], value); + this.tags = value; + fieldSetFlags()[4] = true; + return this; + } + + /** Checks whether the 'tags' field has been set */ + public boolean hasTags() { + return fieldSetFlags()[4]; + } + + /** Clears the value of the 'tags' field */ + public argo.avro.GroupEndpoint.Builder clearTags() { + tags = null; + fieldSetFlags()[4] = false; + return this; + } + + @Override + public GroupEndpoint build() { + try { + GroupEndpoint record = new GroupEndpoint(); + record.type = fieldSetFlags()[0] ? this.type : (java.lang.String) defaultValue(fields()[0]); + record.group = fieldSetFlags()[1] ? this.group : (java.lang.String) defaultValue(fields()[1]); + record.service = fieldSetFlags()[2] ? this.service : (java.lang.String) defaultValue(fields()[2]); + record.hostname = fieldSetFlags()[3] ? this.hostname : (java.lang.String) defaultValue(fields()[3]); + record.tags = fieldSetFlags()[4] ? this.tags : (java.util.Map) defaultValue(fields()[4]); + return record; + } catch (Exception e) { + throw new org.apache.avro.AvroRuntimeException(e); + } + } + } +} diff --git a/flink_jobs/old-models/ams_ingest_sync/src/main/java/argo/avro/GroupGroup.java b/flink_jobs/old-models/ams_ingest_sync/src/main/java/argo/avro/GroupGroup.java new file mode 100644 index 00000000..a7712d67 --- /dev/null +++ b/flink_jobs/old-models/ams_ingest_sync/src/main/java/argo/avro/GroupGroup.java @@ -0,0 +1,286 @@ +/** + * Autogenerated by Avro + * + * DO NOT EDIT DIRECTLY + */ +package argo.avro; +@SuppressWarnings("all") +@org.apache.avro.specific.AvroGenerated +public class GroupGroup extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"GroupGroup\",\"namespace\":\"argo.avro\",\"fields\":[{\"name\":\"type\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"group\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"subgroup\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"tags\",\"type\":[\"null\",{\"type\":\"map\",\"values\":{\"type\":\"string\",\"avro.java.string\":\"String\"},\"avro.java.string\":\"String\"}]}]}"); + public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } + @Deprecated public java.lang.String type; + @Deprecated public java.lang.String group; + @Deprecated public java.lang.String subgroup; + @Deprecated public java.util.Map tags; + + /** + * Default constructor. + */ + public GroupGroup() {} + + /** + * All-args constructor. + */ + public GroupGroup(java.lang.String type, java.lang.String group, java.lang.String subgroup, java.util.Map tags) { + this.type = type; + this.group = group; + this.subgroup = subgroup; + this.tags = tags; + } + + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + // Used by DatumWriter. Applications should not call. + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return type; + case 1: return group; + case 2: return subgroup; + case 3: return tags; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + // Used by DatumReader. Applications should not call. + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: type = (java.lang.String)value$; break; + case 1: group = (java.lang.String)value$; break; + case 2: subgroup = (java.lang.String)value$; break; + case 3: tags = (java.util.Map)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + + /** + * Gets the value of the 'type' field. + */ + public java.lang.String getType() { + return type; + } + + /** + * Sets the value of the 'type' field. + * @param value the value to set. + */ + public void setType(java.lang.String value) { + this.type = value; + } + + /** + * Gets the value of the 'group' field. + */ + public java.lang.String getGroup() { + return group; + } + + /** + * Sets the value of the 'group' field. + * @param value the value to set. + */ + public void setGroup(java.lang.String value) { + this.group = value; + } + + /** + * Gets the value of the 'subgroup' field. + */ + public java.lang.String getSubgroup() { + return subgroup; + } + + /** + * Sets the value of the 'subgroup' field. + * @param value the value to set. + */ + public void setSubgroup(java.lang.String value) { + this.subgroup = value; + } + + /** + * Gets the value of the 'tags' field. + */ + public java.util.Map getTags() { + return tags; + } + + /** + * Sets the value of the 'tags' field. + * @param value the value to set. + */ + public void setTags(java.util.Map value) { + this.tags = value; + } + + /** Creates a new GroupGroup RecordBuilder */ + public static argo.avro.GroupGroup.Builder newBuilder() { + return new argo.avro.GroupGroup.Builder(); + } + + /** Creates a new GroupGroup RecordBuilder by copying an existing Builder */ + public static argo.avro.GroupGroup.Builder newBuilder(argo.avro.GroupGroup.Builder other) { + return new argo.avro.GroupGroup.Builder(other); + } + + /** Creates a new GroupGroup RecordBuilder by copying an existing GroupGroup instance */ + public static argo.avro.GroupGroup.Builder newBuilder(argo.avro.GroupGroup other) { + return new argo.avro.GroupGroup.Builder(other); + } + + /** + * RecordBuilder for GroupGroup instances. + */ + public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase + implements org.apache.avro.data.RecordBuilder { + + private java.lang.String type; + private java.lang.String group; + private java.lang.String subgroup; + private java.util.Map tags; + + /** Creates a new Builder */ + private Builder() { + super(argo.avro.GroupGroup.SCHEMA$); + } + + /** Creates a Builder by copying an existing Builder */ + private Builder(argo.avro.GroupGroup.Builder other) { + super(other); + } + + /** Creates a Builder by copying an existing GroupGroup instance */ + private Builder(argo.avro.GroupGroup other) { + super(argo.avro.GroupGroup.SCHEMA$); + if (isValidValue(fields()[0], other.type)) { + this.type = data().deepCopy(fields()[0].schema(), other.type); + fieldSetFlags()[0] = true; + } + if (isValidValue(fields()[1], other.group)) { + this.group = data().deepCopy(fields()[1].schema(), other.group); + fieldSetFlags()[1] = true; + } + if (isValidValue(fields()[2], other.subgroup)) { + this.subgroup = data().deepCopy(fields()[2].schema(), other.subgroup); + fieldSetFlags()[2] = true; + } + if (isValidValue(fields()[3], other.tags)) { + this.tags = data().deepCopy(fields()[3].schema(), other.tags); + fieldSetFlags()[3] = true; + } + } + + /** Gets the value of the 'type' field */ + public java.lang.String getType() { + return type; + } + + /** Sets the value of the 'type' field */ + public argo.avro.GroupGroup.Builder setType(java.lang.String value) { + validate(fields()[0], value); + this.type = value; + fieldSetFlags()[0] = true; + return this; + } + + /** Checks whether the 'type' field has been set */ + public boolean hasType() { + return fieldSetFlags()[0]; + } + + /** Clears the value of the 'type' field */ + public argo.avro.GroupGroup.Builder clearType() { + type = null; + fieldSetFlags()[0] = false; + return this; + } + + /** Gets the value of the 'group' field */ + public java.lang.String getGroup() { + return group; + } + + /** Sets the value of the 'group' field */ + public argo.avro.GroupGroup.Builder setGroup(java.lang.String value) { + validate(fields()[1], value); + this.group = value; + fieldSetFlags()[1] = true; + return this; + } + + /** Checks whether the 'group' field has been set */ + public boolean hasGroup() { + return fieldSetFlags()[1]; + } + + /** Clears the value of the 'group' field */ + public argo.avro.GroupGroup.Builder clearGroup() { + group = null; + fieldSetFlags()[1] = false; + return this; + } + + /** Gets the value of the 'subgroup' field */ + public java.lang.String getSubgroup() { + return subgroup; + } + + /** Sets the value of the 'subgroup' field */ + public argo.avro.GroupGroup.Builder setSubgroup(java.lang.String value) { + validate(fields()[2], value); + this.subgroup = value; + fieldSetFlags()[2] = true; + return this; + } + + /** Checks whether the 'subgroup' field has been set */ + public boolean hasSubgroup() { + return fieldSetFlags()[2]; + } + + /** Clears the value of the 'subgroup' field */ + public argo.avro.GroupGroup.Builder clearSubgroup() { + subgroup = null; + fieldSetFlags()[2] = false; + return this; + } + + /** Gets the value of the 'tags' field */ + public java.util.Map getTags() { + return tags; + } + + /** Sets the value of the 'tags' field */ + public argo.avro.GroupGroup.Builder setTags(java.util.Map value) { + validate(fields()[3], value); + this.tags = value; + fieldSetFlags()[3] = true; + return this; + } + + /** Checks whether the 'tags' field has been set */ + public boolean hasTags() { + return fieldSetFlags()[3]; + } + + /** Clears the value of the 'tags' field */ + public argo.avro.GroupGroup.Builder clearTags() { + tags = null; + fieldSetFlags()[3] = false; + return this; + } + + @Override + public GroupGroup build() { + try { + GroupGroup record = new GroupGroup(); + record.type = fieldSetFlags()[0] ? this.type : (java.lang.String) defaultValue(fields()[0]); + record.group = fieldSetFlags()[1] ? this.group : (java.lang.String) defaultValue(fields()[1]); + record.subgroup = fieldSetFlags()[2] ? this.subgroup : (java.lang.String) defaultValue(fields()[2]); + record.tags = fieldSetFlags()[3] ? this.tags : (java.util.Map) defaultValue(fields()[3]); + return record; + } catch (Exception e) { + throw new org.apache.avro.AvroRuntimeException(e); + } + } + } +} diff --git a/flink_jobs/old-models/ams_ingest_sync/src/main/java/argo/avro/MetricProfile.java b/flink_jobs/old-models/ams_ingest_sync/src/main/java/argo/avro/MetricProfile.java new file mode 100644 index 00000000..1fe15e09 --- /dev/null +++ b/flink_jobs/old-models/ams_ingest_sync/src/main/java/argo/avro/MetricProfile.java @@ -0,0 +1,286 @@ +/** + * Autogenerated by Avro + * + * DO NOT EDIT DIRECTLY + */ +package argo.avro; +@SuppressWarnings("all") +@org.apache.avro.specific.AvroGenerated +public class MetricProfile extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"MetricProfile\",\"namespace\":\"argo.avro\",\"fields\":[{\"name\":\"profile\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"service\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"metric\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"tags\",\"type\":[\"null\",{\"type\":\"map\",\"values\":{\"type\":\"string\",\"avro.java.string\":\"String\"},\"avro.java.string\":\"String\"}]}]}"); + public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } + @Deprecated public java.lang.String profile; + @Deprecated public java.lang.String service; + @Deprecated public java.lang.String metric; + @Deprecated public java.util.Map tags; + + /** + * Default constructor. + */ + public MetricProfile() {} + + /** + * All-args constructor. + */ + public MetricProfile(java.lang.String profile, java.lang.String service, java.lang.String metric, java.util.Map tags) { + this.profile = profile; + this.service = service; + this.metric = metric; + this.tags = tags; + } + + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + // Used by DatumWriter. Applications should not call. + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return profile; + case 1: return service; + case 2: return metric; + case 3: return tags; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + // Used by DatumReader. Applications should not call. + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: profile = (java.lang.String)value$; break; + case 1: service = (java.lang.String)value$; break; + case 2: metric = (java.lang.String)value$; break; + case 3: tags = (java.util.Map)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + + /** + * Gets the value of the 'profile' field. + */ + public java.lang.String getProfile() { + return profile; + } + + /** + * Sets the value of the 'profile' field. + * @param value the value to set. + */ + public void setProfile(java.lang.String value) { + this.profile = value; + } + + /** + * Gets the value of the 'service' field. + */ + public java.lang.String getService() { + return service; + } + + /** + * Sets the value of the 'service' field. + * @param value the value to set. + */ + public void setService(java.lang.String value) { + this.service = value; + } + + /** + * Gets the value of the 'metric' field. + */ + public java.lang.String getMetric() { + return metric; + } + + /** + * Sets the value of the 'metric' field. + * @param value the value to set. + */ + public void setMetric(java.lang.String value) { + this.metric = value; + } + + /** + * Gets the value of the 'tags' field. + */ + public java.util.Map getTags() { + return tags; + } + + /** + * Sets the value of the 'tags' field. + * @param value the value to set. + */ + public void setTags(java.util.Map value) { + this.tags = value; + } + + /** Creates a new MetricProfile RecordBuilder */ + public static argo.avro.MetricProfile.Builder newBuilder() { + return new argo.avro.MetricProfile.Builder(); + } + + /** Creates a new MetricProfile RecordBuilder by copying an existing Builder */ + public static argo.avro.MetricProfile.Builder newBuilder(argo.avro.MetricProfile.Builder other) { + return new argo.avro.MetricProfile.Builder(other); + } + + /** Creates a new MetricProfile RecordBuilder by copying an existing MetricProfile instance */ + public static argo.avro.MetricProfile.Builder newBuilder(argo.avro.MetricProfile other) { + return new argo.avro.MetricProfile.Builder(other); + } + + /** + * RecordBuilder for MetricProfile instances. + */ + public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase + implements org.apache.avro.data.RecordBuilder { + + private java.lang.String profile; + private java.lang.String service; + private java.lang.String metric; + private java.util.Map tags; + + /** Creates a new Builder */ + private Builder() { + super(argo.avro.MetricProfile.SCHEMA$); + } + + /** Creates a Builder by copying an existing Builder */ + private Builder(argo.avro.MetricProfile.Builder other) { + super(other); + } + + /** Creates a Builder by copying an existing MetricProfile instance */ + private Builder(argo.avro.MetricProfile other) { + super(argo.avro.MetricProfile.SCHEMA$); + if (isValidValue(fields()[0], other.profile)) { + this.profile = data().deepCopy(fields()[0].schema(), other.profile); + fieldSetFlags()[0] = true; + } + if (isValidValue(fields()[1], other.service)) { + this.service = data().deepCopy(fields()[1].schema(), other.service); + fieldSetFlags()[1] = true; + } + if (isValidValue(fields()[2], other.metric)) { + this.metric = data().deepCopy(fields()[2].schema(), other.metric); + fieldSetFlags()[2] = true; + } + if (isValidValue(fields()[3], other.tags)) { + this.tags = data().deepCopy(fields()[3].schema(), other.tags); + fieldSetFlags()[3] = true; + } + } + + /** Gets the value of the 'profile' field */ + public java.lang.String getProfile() { + return profile; + } + + /** Sets the value of the 'profile' field */ + public argo.avro.MetricProfile.Builder setProfile(java.lang.String value) { + validate(fields()[0], value); + this.profile = value; + fieldSetFlags()[0] = true; + return this; + } + + /** Checks whether the 'profile' field has been set */ + public boolean hasProfile() { + return fieldSetFlags()[0]; + } + + /** Clears the value of the 'profile' field */ + public argo.avro.MetricProfile.Builder clearProfile() { + profile = null; + fieldSetFlags()[0] = false; + return this; + } + + /** Gets the value of the 'service' field */ + public java.lang.String getService() { + return service; + } + + /** Sets the value of the 'service' field */ + public argo.avro.MetricProfile.Builder setService(java.lang.String value) { + validate(fields()[1], value); + this.service = value; + fieldSetFlags()[1] = true; + return this; + } + + /** Checks whether the 'service' field has been set */ + public boolean hasService() { + return fieldSetFlags()[1]; + } + + /** Clears the value of the 'service' field */ + public argo.avro.MetricProfile.Builder clearService() { + service = null; + fieldSetFlags()[1] = false; + return this; + } + + /** Gets the value of the 'metric' field */ + public java.lang.String getMetric() { + return metric; + } + + /** Sets the value of the 'metric' field */ + public argo.avro.MetricProfile.Builder setMetric(java.lang.String value) { + validate(fields()[2], value); + this.metric = value; + fieldSetFlags()[2] = true; + return this; + } + + /** Checks whether the 'metric' field has been set */ + public boolean hasMetric() { + return fieldSetFlags()[2]; + } + + /** Clears the value of the 'metric' field */ + public argo.avro.MetricProfile.Builder clearMetric() { + metric = null; + fieldSetFlags()[2] = false; + return this; + } + + /** Gets the value of the 'tags' field */ + public java.util.Map getTags() { + return tags; + } + + /** Sets the value of the 'tags' field */ + public argo.avro.MetricProfile.Builder setTags(java.util.Map value) { + validate(fields()[3], value); + this.tags = value; + fieldSetFlags()[3] = true; + return this; + } + + /** Checks whether the 'tags' field has been set */ + public boolean hasTags() { + return fieldSetFlags()[3]; + } + + /** Clears the value of the 'tags' field */ + public argo.avro.MetricProfile.Builder clearTags() { + tags = null; + fieldSetFlags()[3] = false; + return this; + } + + @Override + public MetricProfile build() { + try { + MetricProfile record = new MetricProfile(); + record.profile = fieldSetFlags()[0] ? this.profile : (java.lang.String) defaultValue(fields()[0]); + record.service = fieldSetFlags()[1] ? this.service : (java.lang.String) defaultValue(fields()[1]); + record.metric = fieldSetFlags()[2] ? this.metric : (java.lang.String) defaultValue(fields()[2]); + record.tags = fieldSetFlags()[3] ? this.tags : (java.util.Map) defaultValue(fields()[3]); + return record; + } catch (Exception e) { + throw new org.apache.avro.AvroRuntimeException(e); + } + } + } +} diff --git a/flink_jobs/old-models/ams_ingest_sync/src/main/java/argo/avro/Weight.java b/flink_jobs/old-models/ams_ingest_sync/src/main/java/argo/avro/Weight.java new file mode 100644 index 00000000..0238d7cf --- /dev/null +++ b/flink_jobs/old-models/ams_ingest_sync/src/main/java/argo/avro/Weight.java @@ -0,0 +1,236 @@ +/** + * Autogenerated by Avro + * + * DO NOT EDIT DIRECTLY + */ +package argo.avro; +@SuppressWarnings("all") +@org.apache.avro.specific.AvroGenerated +public class Weight extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"Weight\",\"namespace\":\"argo.avro\",\"fields\":[{\"name\":\"type\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"site\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"weight\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}}]}"); + public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } + @Deprecated public java.lang.String type; + @Deprecated public java.lang.String site; + @Deprecated public java.lang.String weight; + + /** + * Default constructor. + */ + public Weight() {} + + /** + * All-args constructor. + */ + public Weight(java.lang.String type, java.lang.String site, java.lang.String weight) { + this.type = type; + this.site = site; + this.weight = weight; + } + + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + // Used by DatumWriter. Applications should not call. + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return type; + case 1: return site; + case 2: return weight; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + // Used by DatumReader. Applications should not call. + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: type = (java.lang.String)value$; break; + case 1: site = (java.lang.String)value$; break; + case 2: weight = (java.lang.String)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + + /** + * Gets the value of the 'type' field. + */ + public java.lang.String getType() { + return type; + } + + /** + * Sets the value of the 'type' field. + * @param value the value to set. + */ + public void setType(java.lang.String value) { + this.type = value; + } + + /** + * Gets the value of the 'site' field. + */ + public java.lang.String getSite() { + return site; + } + + /** + * Sets the value of the 'site' field. + * @param value the value to set. + */ + public void setSite(java.lang.String value) { + this.site = value; + } + + /** + * Gets the value of the 'weight' field. + */ + public java.lang.String getWeight() { + return weight; + } + + /** + * Sets the value of the 'weight' field. + * @param value the value to set. + */ + public void setWeight(java.lang.String value) { + this.weight = value; + } + + /** Creates a new Weight RecordBuilder */ + public static argo.avro.Weight.Builder newBuilder() { + return new argo.avro.Weight.Builder(); + } + + /** Creates a new Weight RecordBuilder by copying an existing Builder */ + public static argo.avro.Weight.Builder newBuilder(argo.avro.Weight.Builder other) { + return new argo.avro.Weight.Builder(other); + } + + /** Creates a new Weight RecordBuilder by copying an existing Weight instance */ + public static argo.avro.Weight.Builder newBuilder(argo.avro.Weight other) { + return new argo.avro.Weight.Builder(other); + } + + /** + * RecordBuilder for Weight instances. + */ + public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase + implements org.apache.avro.data.RecordBuilder { + + private java.lang.String type; + private java.lang.String site; + private java.lang.String weight; + + /** Creates a new Builder */ + private Builder() { + super(argo.avro.Weight.SCHEMA$); + } + + /** Creates a Builder by copying an existing Builder */ + private Builder(argo.avro.Weight.Builder other) { + super(other); + } + + /** Creates a Builder by copying an existing Weight instance */ + private Builder(argo.avro.Weight other) { + super(argo.avro.Weight.SCHEMA$); + if (isValidValue(fields()[0], other.type)) { + this.type = data().deepCopy(fields()[0].schema(), other.type); + fieldSetFlags()[0] = true; + } + if (isValidValue(fields()[1], other.site)) { + this.site = data().deepCopy(fields()[1].schema(), other.site); + fieldSetFlags()[1] = true; + } + if (isValidValue(fields()[2], other.weight)) { + this.weight = data().deepCopy(fields()[2].schema(), other.weight); + fieldSetFlags()[2] = true; + } + } + + /** Gets the value of the 'type' field */ + public java.lang.String getType() { + return type; + } + + /** Sets the value of the 'type' field */ + public argo.avro.Weight.Builder setType(java.lang.String value) { + validate(fields()[0], value); + this.type = value; + fieldSetFlags()[0] = true; + return this; + } + + /** Checks whether the 'type' field has been set */ + public boolean hasType() { + return fieldSetFlags()[0]; + } + + /** Clears the value of the 'type' field */ + public argo.avro.Weight.Builder clearType() { + type = null; + fieldSetFlags()[0] = false; + return this; + } + + /** Gets the value of the 'site' field */ + public java.lang.String getSite() { + return site; + } + + /** Sets the value of the 'site' field */ + public argo.avro.Weight.Builder setSite(java.lang.String value) { + validate(fields()[1], value); + this.site = value; + fieldSetFlags()[1] = true; + return this; + } + + /** Checks whether the 'site' field has been set */ + public boolean hasSite() { + return fieldSetFlags()[1]; + } + + /** Clears the value of the 'site' field */ + public argo.avro.Weight.Builder clearSite() { + site = null; + fieldSetFlags()[1] = false; + return this; + } + + /** Gets the value of the 'weight' field */ + public java.lang.String getWeight() { + return weight; + } + + /** Sets the value of the 'weight' field */ + public argo.avro.Weight.Builder setWeight(java.lang.String value) { + validate(fields()[2], value); + this.weight = value; + fieldSetFlags()[2] = true; + return this; + } + + /** Checks whether the 'weight' field has been set */ + public boolean hasWeight() { + return fieldSetFlags()[2]; + } + + /** Clears the value of the 'weight' field */ + public argo.avro.Weight.Builder clearWeight() { + weight = null; + fieldSetFlags()[2] = false; + return this; + } + + @Override + public Weight build() { + try { + Weight record = new Weight(); + record.type = fieldSetFlags()[0] ? this.type : (java.lang.String) defaultValue(fields()[0]); + record.site = fieldSetFlags()[1] ? this.site : (java.lang.String) defaultValue(fields()[1]); + record.weight = fieldSetFlags()[2] ? this.weight : (java.lang.String) defaultValue(fields()[2]); + return record; + } catch (Exception e) { + throw new org.apache.avro.AvroRuntimeException(e); + } + } + } +} diff --git a/flink_jobs/old-models/ams_ingest_sync/src/main/java/argo/streaming/AmsIngestSync.java b/flink_jobs/old-models/ams_ingest_sync/src/main/java/argo/streaming/AmsIngestSync.java new file mode 100644 index 00000000..43815d66 --- /dev/null +++ b/flink_jobs/old-models/ams_ingest_sync/src/main/java/argo/streaming/AmsIngestSync.java @@ -0,0 +1,116 @@ +package argo.streaming; + +import java.util.concurrent.TimeUnit; + +import org.apache.flink.api.common.restartstrategy.RestartStrategies; +import org.apache.flink.api.common.time.Time; +import org.apache.flink.api.java.utils.ParameterTool; +import org.apache.flink.streaming.api.datastream.DataStream; +import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Flink Streaming JOB for Ingesting Sync Data to HDFS + * job required cli parameters: + * --ams.endpoint : ARGO messaging api endoint to connect to msg.example.com + * --ams.port : ARGO messaging api port + * --ams.token : ARGO messaging api token + * --ams.project : ARGO messaging api project to connect to + * --ams.sub.metric : ARGO messaging subscription to pull metric data from + * --ams.sub.sync : ARGO messaging subscription to pull sync data from + * --hdfs.path : Hdfs destination path to store the data + * --ams.batch : num of messages to be retrieved per request to AMS service + * --ams.interval : interval (in ms) between AMS service requests + * --ams.proxy : optional http proxy url + * --ams.verify : optional turn on/off ssl verify + */ +public class AmsIngestSync { + + // setup logger + static Logger LOG = LoggerFactory.getLogger(AmsIngestSync.class); + + /** + * Check if a list of expected cli arguments have been provided to this flink job + */ + public static boolean hasArgs(String[] reqArgs, ParameterTool paramTool) { + + for (String reqArg : reqArgs) { + if (!paramTool.has(reqArg)) + return false; + } + + return true; + } + + /** + * Check if flink job has been called with ams rate params + */ + public static boolean hasAmsRateArgs(ParameterTool paramTool) { + String args[] = { "ams.batch", "ams.interval" }; + return hasArgs(args, paramTool); + } + + // main job function + public static void main(String[] args) throws Exception { + + // Create flink execution enviroment + StreamExecutionEnvironment see = StreamExecutionEnvironment.getExecutionEnvironment(); + see.setParallelism(1); + // Fixed restart strategy: on failure attempt max 10 times to restart with a retry interval of 2 minutes + see.setRestartStrategy(RestartStrategies.fixedDelayRestart(10, Time.of(2, TimeUnit.MINUTES))); + // Initialize cli parameter tool + final ParameterTool parameterTool = ParameterTool.fromArgs(args); + + // Initialize Input Source : ARGO Messaging Source + String endpoint = parameterTool.getRequired("ams.endpoint"); + String port = parameterTool.getRequired("ams.port"); + String token = parameterTool.getRequired("ams.token"); + String project = parameterTool.getRequired("ams.project"); + String sub = parameterTool.getRequired("ams.sub"); + String basePath = parameterTool.getRequired("hdfs.path"); + + // set ams client batch and interval to default values + int batch = 1; + long interval = 100L; + + if (hasAmsRateArgs(parameterTool)) { + batch = parameterTool.getInt("ams.batch"); + interval = parameterTool.getLong("ams.interval"); + } + + + //Ingest sync avro encoded data from AMS endpoint + ArgoMessagingSource ams = new ArgoMessagingSource(endpoint, port, token, project, sub, batch, interval); + + if (parameterTool.has("ams.verify")){ + ams.setVerify(parameterTool.getBoolean("ams.verify")); + } + + if (parameterTool.has("ams.proxy")) { + ams.setProxy(parameterTool.get("ams.proxy")); + } + DataStream syncDataStream = see + .addSource(ams); + + SyncHDFSOutputFormat hdfsOut = new SyncHDFSOutputFormat(); + hdfsOut.setBasePath(basePath); + + syncDataStream.writeUsingOutputFormat(hdfsOut); + + // Create a job title message to discern job in flink dashboard/cli + StringBuilder jobTitleSB = new StringBuilder(); + jobTitleSB.append("Ingesting sync data from "); + jobTitleSB.append(endpoint); + jobTitleSB.append(":"); + jobTitleSB.append(port); + jobTitleSB.append("/v1/projects/"); + jobTitleSB.append(project); + jobTitleSB.append("/subscriptions/"); + jobTitleSB.append(sub); + + see.execute(jobTitleSB.toString()); + + } + +} diff --git a/flink_jobs/old-models/ams_ingest_sync/src/main/java/argo/streaming/ArgoMessagingClient.java b/flink_jobs/old-models/ams_ingest_sync/src/main/java/argo/streaming/ArgoMessagingClient.java new file mode 100644 index 00000000..4e6e1527 --- /dev/null +++ b/flink_jobs/old-models/ams_ingest_sync/src/main/java/argo/streaming/ArgoMessagingClient.java @@ -0,0 +1,334 @@ +package argo.streaming; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.net.URI; +import java.net.URISyntaxException; +import java.security.KeyManagementException; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; + +import org.apache.http.HttpEntity; +import org.apache.http.HttpHost; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.conn.ssl.NoopHostnameVerifier; +import org.apache.http.conn.ssl.SSLConnectionSocketFactory; +import org.apache.http.conn.ssl.TrustSelfSignedStrategy; +import org.apache.http.entity.StringEntity; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClients; +import org.apache.http.ssl.SSLContextBuilder; +import org.mortbay.log.Log; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.gson.JsonArray; +import com.google.gson.JsonElement; +import com.google.gson.JsonParser; + +import org.apache.http.client.config.RequestConfig; +import org.apache.http.client.methods.CloseableHttpResponse; + +/** + * Simple http client for pulling and acknowledging messages from AMS service + * http API + */ +public class ArgoMessagingClient { + + static Logger LOG = LoggerFactory.getLogger(ArgoMessagingClient.class); + // Http Client for contanting AMS service + private CloseableHttpClient httpClient = null; + // AMS endpoint (hostname:port or hostname) + private String endpoint = null; + // AMS project (/v1/projects/{project}) + private String project = null; + // AMS token (?key={token}) + private String token = null; + // AMS subscription (/v1/projects/{project}/subscriptions/{sub}) + private String sub = null; + // protocol (https,http) + private String proto = null; + // numer of message to be pulled; + private String maxMessages = ""; + // ssl verify or not + private boolean verify = true; + // proxy + private URI proxy = null; + + // Utility inner class for holding list of messages and acknowledgements + private class MsgAck { + String[] msgs; + String[] ackIds; + + private MsgAck(String[] msgs, String[] ackIds) { + this.msgs = msgs; + this.ackIds = ackIds; + } + + } + + public ArgoMessagingClient() { + this.httpClient = HttpClients.createDefault(); + this.proto = "https"; + this.token = "token"; + this.endpoint = "localhost"; + this.project = "test_project"; + this.sub = "test_sub"; + this.maxMessages = "100"; + this.proxy = null; + } + + public ArgoMessagingClient(String method, String token, String endpoint, String project, String sub, int batch, + boolean verify) throws NoSuchAlgorithmException, KeyStoreException, KeyManagementException { + + this.proto = method; + this.token = token; + this.endpoint = endpoint; + this.project = project; + this.sub = sub; + this.maxMessages = String.valueOf(batch); + this.verify = verify; + + this.httpClient = buildHttpClient(); + + } + + /** + * Initializes Http Client (if not initialized during constructor) + * + * @return + */ + private CloseableHttpClient buildHttpClient() + throws NoSuchAlgorithmException, KeyStoreException, KeyManagementException { + if (this.verify) { + return this.httpClient = HttpClients.createDefault(); + } else { + return this.httpClient = HttpClients.custom().setSSLSocketFactory(selfSignedSSLF()).build(); + } + } + + /** + * Create an SSL Connection Socket Factory with a strategy to trust self signed + * certificates + */ + private SSLConnectionSocketFactory selfSignedSSLF() + throws NoSuchAlgorithmException, KeyStoreException, KeyManagementException { + SSLContextBuilder sslBuild = new SSLContextBuilder(); + sslBuild.loadTrustMaterial(null, new TrustSelfSignedStrategy()); + return new SSLConnectionSocketFactory(sslBuild.build(), NoopHostnameVerifier.INSTANCE); + } + + /** + * Set AMS http client to use http proxy + */ + public void setProxy(String proxyURL) throws URISyntaxException { + // parse proxy url + this.proxy = URI.create(proxyURL); + } + + /** + * Set AMS http client to NOT use an http proxy + */ + public void unsetProxy() { + this.proxy = null; + } + + /** + * Create a configuration for using http proxy on each request + */ + private RequestConfig createProxyCfg() { + HttpHost proxy = new HttpHost(this.proxy.getHost(), this.proxy.getPort(), this.proxy.getScheme()); + RequestConfig config = RequestConfig.custom().setProxy(proxy).build(); + return config; + } + + public void logIssue(CloseableHttpResponse resp) throws UnsupportedOperationException, IOException { + InputStreamReader isRdr = new InputStreamReader(resp.getEntity().getContent()); + BufferedReader bRdr = new BufferedReader(isRdr); + int statusCode = resp.getStatusLine().getStatusCode(); + + // Parse error content from api response + StringBuilder result = new StringBuilder(); + String rLine; + while ((rLine = bRdr.readLine()) != null) + result.append(rLine); + isRdr.close(); + Log.warn("ApiStatusCode={}, ApiErrorMessage={}", statusCode, result); + + } + + /** + * Properly compose url for each AMS request + */ + public String composeURL(String method) { + return proto + "://" + endpoint + "/v1/projects/" + project + "/subscriptions/" + sub + ":" + method + "?key=" + + token; + } + + /** + * Executes a pull request against AMS api + */ + public MsgAck doPull() throws IOException, KeyManagementException, NoSuchAlgorithmException, KeyStoreException { + + ArrayList msgList = new ArrayList(); + ArrayList ackIdList = new ArrayList(); + + // Create the http post to pull + HttpPost postPull = new HttpPost(this.composeURL("pull")); + StringEntity postBody = new StringEntity( + "{\"maxMessages\":\"" + this.maxMessages + "\",\"returnImmediately\":\"true\"}"); + postBody.setContentType("application/json"); + postPull.setEntity(postBody); + + if (this.httpClient == null) { + this.httpClient = buildHttpClient(); + } + + // check for proxy + if (this.proxy != null) { + postPull.setConfig(createProxyCfg()); + } + + CloseableHttpResponse response = this.httpClient.execute(postPull); + String msg = ""; + String ackId = ""; + StringBuilder result = new StringBuilder(); + + HttpEntity entity = response.getEntity(); + + int statusCode = response.getStatusLine().getStatusCode(); + + if (entity != null && statusCode == 200) { + + InputStreamReader isRdr = new InputStreamReader(entity.getContent()); + BufferedReader bRdr = new BufferedReader(isRdr); + + String rLine; + + while ((rLine = bRdr.readLine()) != null) + result.append(rLine); + + // Gather message from json + JsonParser jsonParser = new JsonParser(); + // parse the json root object + Log.info("response: {}", result.toString()); + JsonElement jRoot = jsonParser.parse(result.toString()); + + JsonArray jRec = jRoot.getAsJsonObject().get("receivedMessages").getAsJsonArray(); + + // if has elements + for (JsonElement jMsgItem : jRec) { + JsonElement jMsg = jMsgItem.getAsJsonObject().get("message"); + JsonElement jAckId = jMsgItem.getAsJsonObject().get("ackId"); + msg = jMsg.toString(); + ackId = jAckId.toString(); + msgList.add(msg); + ackIdList.add(ackId); + } + + isRdr.close(); + + } else { + + logIssue(response); + + } + + response.close(); + + String[] msgArr = msgList.toArray(new String[0]); + String[] ackIdArr = ackIdList.toArray(new String[0]); + + // Return a Message array + return new MsgAck(msgArr, ackIdArr); + + } + + /** + * Executes a combination of Pull & Ack requests against AMS api + */ + public String[] consume() throws KeyManagementException, NoSuchAlgorithmException, KeyStoreException { + String[] msgs = new String[0]; + // Try first to pull a message + try { + + MsgAck msgAck = doPull(); + // get last ackid + String ackId = ""; + if (msgAck.ackIds.length > 0) { + ackId = msgAck.ackIds[msgAck.ackIds.length - 1]; + } + + if (ackId != "") { + // Do an ack for the received message + String ackRes = doAck(ackId); + if (ackRes == "") { + Log.info("Message Acknowledged ackid:" + ackId); + msgs = msgAck.msgs; + + } else { + Log.warn("No acknowledment for ackid:" + ackId + "-" + ackRes); + } + } + } catch (IOException e) { + LOG.error(e.getMessage()); + } + return msgs; + + } + + /** + * Executes an Acknowledge request against AMS api + */ + public String doAck(String ackId) throws IOException { + + // Create the http post to ack + HttpPost postAck = new HttpPost(this.composeURL("acknowledge")); + StringEntity postBody = new StringEntity("{\"ackIds\":[" + ackId + "]}"); + postBody.setContentType("application/json"); + postAck.setEntity(postBody); + + // check for proxy + if (this.proxy != null) { + postAck.setConfig(createProxyCfg()); + } + + CloseableHttpResponse response = httpClient.execute(postAck); + String resMsg = ""; + StringBuilder result = new StringBuilder(); + + HttpEntity entity = response.getEntity(); + int status = response.getStatusLine().getStatusCode(); + + if (status != 200) { + + InputStreamReader isRdr = new InputStreamReader(entity.getContent()); + BufferedReader bRdr = new BufferedReader(isRdr); + + String rLine; + + while ((rLine = bRdr.readLine()) != null) + result.append(rLine); + + resMsg = result.toString(); + isRdr.close(); + + } else { + // Log any api errors + logIssue(response); + } + response.close(); + // Return a resposeMessage + return resMsg; + + } + + /** + * Close AMS http client + */ + public void close() throws IOException { + this.httpClient.close(); + } +} diff --git a/flink_jobs/old-models/ams_ingest_sync/src/main/java/argo/streaming/ArgoMessagingSource.java b/flink_jobs/old-models/ams_ingest_sync/src/main/java/argo/streaming/ArgoMessagingSource.java new file mode 100644 index 00000000..30997677 --- /dev/null +++ b/flink_jobs/old-models/ams_ingest_sync/src/main/java/argo/streaming/ArgoMessagingSource.java @@ -0,0 +1,135 @@ +package argo.streaming; + +import java.security.KeyManagementException; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; + +import org.apache.flink.configuration.Configuration; +import org.apache.flink.streaming.api.functions.source.RichSourceFunction; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Custom source to connect to AMS service. Uses ArgoMessaging client + */ +public class ArgoMessagingSource extends RichSourceFunction { + + private static final long serialVersionUID = 1L; + + // setup logger + static Logger LOG = LoggerFactory.getLogger(ArgoMessagingSource.class); + + private String endpoint = null; + private String port = null; + private String token = null; + private String project = null; + private String sub = null; + private int batch = 1; + private long interval = 100L; + private boolean verify = true; + private boolean useProxy = false; + private String proxyURL = ""; + private transient Object rateLck; // lock for waiting to establish rate + + private volatile boolean isRunning = true; + + private ArgoMessagingClient client = null; + + + public ArgoMessagingSource(String endpoint, String port, String token, String project, String sub, int batch, Long interval) { + this.endpoint = endpoint; + this.port = port; + this.token = token; + this.project = project; + this.sub = sub; + this.interval = interval; + this.batch = batch; + this.verify = true; + + } + + /** + * Set verify to true or false. If set to false AMS client will be able to contact AMS endpoints that use self-signed certificates + */ + public void setVerify(boolean verify) { + this.verify=verify; + } + /** + * Set proxy details for AMS client + */ + public void setProxy(String proxyURL) { + this.useProxy = true; + this.proxyURL = proxyURL; + } + + /** + * Unset proxy details for AMS client + */ + public void unsetProxy(String proxyURL) { + this.useProxy = false; + this.proxyURL = ""; + } + + + @Override + public void cancel() { + isRunning = false; + + } + + @Override + public void run(SourceContext ctx) throws Exception { + // This is the main run logic + while (isRunning) { + String[] res = this.client.consume(); + if (res.length > 0) { + for (String msg : res) { + ctx.collect(msg); + } + + } + synchronized (rateLck) { + rateLck.wait(this.interval); + } + + } + + } + + /** + * AMS Source initialization + */ + @Override + public void open(Configuration parameters) throws Exception { + // init rate lock + rateLck = new Object(); + // init client + String fendpoint = this.endpoint; + if (this.port != null && !this.port.isEmpty()) { + fendpoint = this.endpoint + ":" + port; + } + try { + client = new ArgoMessagingClient("https", this.token, fendpoint, this.project, this.sub, this.batch, this.verify); + if (this.useProxy) { + client.setProxy(this.proxyURL); + } + } catch (KeyManagementException e) { + e.printStackTrace(); + } catch (NoSuchAlgorithmException e) { + e.printStackTrace(); + } catch (KeyStoreException e) { + e.printStackTrace(); + } + } + + @Override + public void close() throws Exception { + if (this.client != null) { + client.close(); + } + synchronized (rateLck) { + rateLck.notify(); + } + } + +} diff --git a/flink_jobs/old-models/ams_ingest_sync/src/main/java/argo/streaming/SyncHDFSOutputFormat.java b/flink_jobs/old-models/ams_ingest_sync/src/main/java/argo/streaming/SyncHDFSOutputFormat.java new file mode 100644 index 00000000..dec9b7f6 --- /dev/null +++ b/flink_jobs/old-models/ams_ingest_sync/src/main/java/argo/streaming/SyncHDFSOutputFormat.java @@ -0,0 +1,262 @@ +package argo.streaming; + +import java.io.BufferedOutputStream; +import java.io.BufferedWriter; +import java.io.IOException; +import java.io.OutputStream; +import java.io.OutputStreamWriter; +import java.io.UnsupportedEncodingException; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.Map; + +import org.apache.avro.file.DataFileWriter; +import org.apache.avro.io.BinaryDecoder; +import org.apache.avro.io.DatumReader; +import org.apache.avro.io.DatumWriter; +import org.apache.avro.io.DecoderFactory; +import org.apache.avro.specific.SpecificData; +import org.apache.avro.specific.SpecificDatumReader; +import org.apache.avro.specific.SpecificDatumWriter; +import org.apache.commons.codec.binary.Base64; +import org.apache.flink.api.common.io.OutputFormat; +import org.apache.flink.configuration.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.mortbay.log.Log; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.gson.JsonElement; +import com.google.gson.JsonParser; + +import argo.avro.Downtime; +import argo.avro.GroupEndpoint; +import argo.avro.GroupGroup; +import argo.avro.MetricProfile; +import argo.avro.Weight; + +/** + * Custom Output format for storing Sync Data to HDFS + */ +class SyncHDFSOutputFormat implements OutputFormat { + + // setup logger + static Logger LOG = LoggerFactory.getLogger(SyncHDFSOutputFormat.class); + + private static final long serialVersionUID = 1L; + + private URI basePath; + private org.apache.hadoop.conf.Configuration hadoopConf; + private FileSystem hdfs; + + public void setBasePath(String url) throws URISyntaxException { + this.basePath = new URI(url); + } + + @Override + public void configure(Configuration parameters) { + + } + + /** + * Initialize hadoop configuration and hdfs object + */ + @Override + public void open(int taskNumber, int numTasks) throws IOException { + // create hdfs configuration + hadoopConf = new org.apache.hadoop.conf.Configuration(); + hadoopConf.set("fs.defaultFS", basePath.getScheme() + "://" + basePath.getHost() + ":" + basePath.getPort()); + hdfs = FileSystem.newInstance(hadoopConf); + + } + + /** + * Accepts a binary payload of avro group endpoint records and creates an avro file in designated hdfs path + */ + private void writeGroupEndpoint(byte[] payload, Path file) throws IllegalArgumentException, IOException { + if (hdfs == null) { + return; + } + FSDataOutputStream os = hdfs.create(file); + DatumReader avroReader = new SpecificDatumReader(GroupEndpoint.getClassSchema(), + GroupEndpoint.getClassSchema(), new SpecificData()); + BinaryDecoder decoder = DecoderFactory.get().binaryDecoder(payload, null); + + DatumWriter avroWriter = new SpecificDatumWriter(GroupEndpoint.getClassSchema(), + new SpecificData()); + DataFileWriter dfw = new DataFileWriter(avroWriter); + dfw.create(GroupEndpoint.getClassSchema(), os); + + while (!decoder.isEnd()) { + GroupEndpoint cur = avroReader.read(null, decoder); + dfw.append(cur); + + } + + dfw.close(); + os.close(); + } + + /** + * Accepts a binary payload of avro group of groups records and creates an avro file in designated hdfs path + */ + private void writeGroupGroup(byte[] payload, Path file) throws IllegalArgumentException, IOException { + if (hdfs == null) { + return; + } + FSDataOutputStream os = hdfs.create(file); + DatumReader avroReader = new SpecificDatumReader(GroupGroup.getClassSchema(), + GroupGroup.getClassSchema(), new SpecificData()); + BinaryDecoder decoder = DecoderFactory.get().binaryDecoder(payload, null); + + DatumWriter avroWriter = new SpecificDatumWriter(GroupGroup.getClassSchema(), + new SpecificData()); + DataFileWriter dfw = new DataFileWriter(avroWriter); + dfw.create(GroupGroup.getClassSchema(), os); + + while (!decoder.isEnd()) { + GroupGroup cur = avroReader.read(null, decoder); + dfw.append(cur); + + } + + dfw.close(); + os.close(); + } + + /** + * Accepts a binary payload of weight records and creates an avro file in designated hdfs path + */ + private void writeWeight(byte[] payload, Path file) throws IllegalArgumentException, IOException { + if (hdfs == null) { + return; + } + FSDataOutputStream os = hdfs.create(file); + DatumReader avroReader = new SpecificDatumReader(Weight.getClassSchema(), + Weight.getClassSchema(), new SpecificData()); + BinaryDecoder decoder = DecoderFactory.get().binaryDecoder(payload, null); + + DatumWriter avroWriter = new SpecificDatumWriter(Weight.getClassSchema(), new SpecificData()); + DataFileWriter dfw = new DataFileWriter(avroWriter); + dfw.create(Weight.getClassSchema(), os); + + while (!decoder.isEnd()) { + Weight cur = avroReader.read(null, decoder); + dfw.append(cur); + + } + + dfw.close(); + os.close(); + } + + /** + * Accepts a binary payload of avro downtime records and creates an avro file in designated hdfs path + */ + private void writeDowntime(byte[] payload, Path file) throws IllegalArgumentException, IOException { + if (hdfs == null) { + return; + } + FSDataOutputStream os = hdfs.create(file); + DatumReader avroReader = new SpecificDatumReader(Downtime.getClassSchema(), + Downtime.getClassSchema(), new SpecificData()); + BinaryDecoder decoder = DecoderFactory.get().binaryDecoder(payload, null); + + DatumWriter avroWriter = new SpecificDatumWriter(Downtime.getClassSchema(), + new SpecificData()); + DataFileWriter dfw = new DataFileWriter(avroWriter); + dfw.create(Downtime.getClassSchema(), os); + + while (!decoder.isEnd()) { + Downtime cur = avroReader.read(null, decoder); + dfw.append(cur); + + } + + dfw.close(); + os.close(); + } + + /** + * Accepts a binary payload of metric profile records and creates an avro file in designated hdfs path + */ + private void writeMetricProfile(byte[] payload, Path file) throws IllegalArgumentException, IOException { + if (hdfs == null) { + return; + } + FSDataOutputStream os = hdfs.create(file); + DatumReader avroReader = new SpecificDatumReader(MetricProfile.getClassSchema(), + MetricProfile.getClassSchema(), new SpecificData()); + BinaryDecoder decoder = DecoderFactory.get().binaryDecoder(payload, null); + + DatumWriter avroWriter = new SpecificDatumWriter(MetricProfile.getClassSchema(), + new SpecificData()); + DataFileWriter dfw = new DataFileWriter(avroWriter); + dfw.create(MetricProfile.getClassSchema(), os); + + while (!decoder.isEnd()) { + MetricProfile cur = avroReader.read(null, decoder); + dfw.append(cur); + + } + + dfw.close(); + os.close(); + } + + /** + * Accepts an AMS json message, parses it's attributes and decodes the data payload. + * Then according to the attributes select an appropriate sync writing method to + * store the data as an hdfs avro file + */ + @Override + public void writeRecord(String record) throws IOException { + if (hdfs == null) { + return; + } + + JsonParser jsonParser = new JsonParser(); + // parse the json root object + JsonElement jRoot = jsonParser.parse(record); + // parse the json field "data" and read it as string + // this is the base64 string payload + String data = jRoot.getAsJsonObject().get("data").getAsString(); + // Decode from base64 + byte[] decoded64 = Base64.decodeBase64(data.getBytes("UTF-8")); + JsonElement jAttr = jRoot.getAsJsonObject().get("attributes"); + Map attr = SyncParse.parseAttributes(jAttr); + + if (attr.containsKey("type") && attr.containsKey("report") && attr.containsKey("partition_date")) { + + String sType = attr.get("type"); + String sReport = attr.get("report"); + String sPdate = attr.get("partition_date"); + + Path path = new Path(basePath.toString() + "/" + sReport + "/" + sType + "_" + sPdate + ".avro"); + LOG.info("Saving to:" + path.toString()); + + if (sType.equalsIgnoreCase("metric_profile")) { + writeMetricProfile(decoded64, path); + } else if (sType.equalsIgnoreCase("group_endpoints")) { + writeGroupEndpoint(decoded64, path); + } else if (sType.equalsIgnoreCase("group_groups")) { + writeGroupGroup(decoded64, path); + } else if (sType.equalsIgnoreCase("downtimes")) { + writeDowntime(decoded64, path); + } else if (sType.equalsIgnoreCase("weights")) { + writeWeight(decoded64, path); + } + } + + } + + @Override + public void close() throws IOException { + if (hdfs != null) { + hdfs.close(); + } + } + +} \ No newline at end of file diff --git a/flink_jobs/old-models/ams_ingest_sync/src/main/java/argo/streaming/SyncParse.java b/flink_jobs/old-models/ams_ingest_sync/src/main/java/argo/streaming/SyncParse.java new file mode 100644 index 00000000..29dc89a3 --- /dev/null +++ b/flink_jobs/old-models/ams_ingest_sync/src/main/java/argo/streaming/SyncParse.java @@ -0,0 +1,81 @@ +package argo.streaming; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; +import java.util.Map.Entry; + +import org.apache.avro.io.BinaryDecoder; +import org.apache.avro.io.DatumReader; +import org.apache.avro.io.DecoderFactory; +import org.apache.avro.specific.SpecificData; +import org.apache.avro.specific.SpecificDatumReader; + +import com.google.gson.JsonElement; + +import argo.avro.GroupEndpoint; +import argo.avro.MetricProfile; + + +/** + * SyncParse is a utility class providing methods to parse specific connector data in avro format + */ +public class SyncParse { + + /** + * Parses a byte arrray and decodes avro GroupEndpoint objects + */ + public static ArrayList parseGroupEndpoint(byte[] avroBytes) throws IOException{ + + ArrayList result = new ArrayList(); + + DatumReader avroReader = new SpecificDatumReader(GroupEndpoint.getClassSchema(),GroupEndpoint.getClassSchema(),new SpecificData()); + BinaryDecoder decoder = DecoderFactory.get().binaryDecoder(avroBytes, null); + + while (!decoder.isEnd()){ + GroupEndpoint cur = avroReader.read(null, decoder); + result.add(cur); + } + + return result; + } + + /** + * Parses a byte arrray and decodes avro MetricProfile objects + */ + public static ArrayList parseMetricProfile(byte[] avroBytes) throws IOException{ + + ArrayList result = new ArrayList(); + + DatumReader avroReader = new SpecificDatumReader(MetricProfile.getClassSchema(),MetricProfile.getClassSchema(),new SpecificData()); + BinaryDecoder decoder = DecoderFactory.get().binaryDecoder(avroBytes, null); + + while (!decoder.isEnd()){ + MetricProfile cur = avroReader.read(null, decoder); + result.add(cur); + } + + return result; + } + + /** + * Parses attributes from a json attribute element + */ + public static Map parseAttributes(JsonElement jAttr) throws IOException{ + + Map result = new HashMap(); + if (jAttr!=null){ + Set> jItems = jAttr.getAsJsonObject().entrySet(); + + for (Entry jItem : jItems){ + result.put(jItem.getKey(), jItem.getValue().getAsString()); + } + } + + return result; + } + + +} diff --git a/flink_jobs/old-models/ams_ingest_sync/src/test/java/argo/streaming/TestSyncDecoding.java b/flink_jobs/old-models/ams_ingest_sync/src/test/java/argo/streaming/TestSyncDecoding.java new file mode 100644 index 00000000..4b380048 --- /dev/null +++ b/flink_jobs/old-models/ams_ingest_sync/src/test/java/argo/streaming/TestSyncDecoding.java @@ -0,0 +1,80 @@ +package argo.streaming; + +import static org.junit.Assert.assertEquals; + +import java.io.BufferedReader; +import java.io.File; + +import java.io.FileReader; +import java.io.IOException; + +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; +import java.util.Map; + +import org.apache.avro.io.BinaryDecoder; +import org.apache.avro.io.DatumReader; +import org.apache.avro.io.DecoderFactory; +import org.apache.avro.specific.SpecificData; +import org.apache.avro.specific.SpecificDatumReader; +import org.apache.commons.codec.binary.Base64; +import org.junit.Test; + +import com.google.gson.JsonElement; +import com.google.gson.JsonParser; + +import argo.avro.MetricProfile; + +public class TestSyncDecoding { + + @Test + public void test() throws URISyntaxException, IOException { + URL resJsonFile = TestSyncDecoding.class.getResource("/request_metric_profile.json"); + File jsonFile = new File(resJsonFile.toURI()); + + BufferedReader br = new BufferedReader(new FileReader(jsonFile)); + + JsonParser jsonParser = new JsonParser(); + JsonElement jRoot = jsonParser.parse(br); + + JsonElement jMsg = jRoot.getAsJsonObject().get("messages").getAsJsonArray().get(0); + + String data = jMsg.getAsJsonObject().get("data").getAsString(); + // Decode from base64 + byte[] decoded64 = Base64.decodeBase64(data.getBytes("UTF-8")); + JsonElement jAttr = jMsg.getAsJsonObject().get("attributes"); + + Map attr = SyncParse.parseAttributes(jAttr); + + assertEquals("check report attr", attr.get("report"), "ops-mon"); + assertEquals("check type attr", attr.get("type"), "metric_profile"); + assertEquals("check partition date attr", attr.get("partition_date"), "2017-11-12"); + + URL resMetricProfile = TestSyncDecoding.class.getResource("/metric_profile.json"); + File jsonMetricProfile = new File(resMetricProfile.toURI()); + + BufferedReader br2 = new BufferedReader(new FileReader(jsonMetricProfile)); + + ArrayList mpContents = new ArrayList(); + + String line = ""; + while ((line = br2.readLine()) != null && line.length() != 0) { + mpContents.add(line); + } + + // Check decoding of MetricProfile avro objects + DatumReader avroReader = new SpecificDatumReader(MetricProfile.getClassSchema(), + MetricProfile.getClassSchema(), new SpecificData()); + BinaryDecoder decoder = DecoderFactory.get().binaryDecoder(decoded64, null); + + int i=0; + while (!decoder.isEnd()) { + MetricProfile cur = avroReader.read(null, decoder); + assertEquals("check decoded avro records",cur.toString(),mpContents.get(i)); + i++; + } + + } + +} diff --git a/flink_jobs/old-models/ams_ingest_sync/src/test/resources/metric_profile.json b/flink_jobs/old-models/ams_ingest_sync/src/test/resources/metric_profile.json new file mode 100644 index 00000000..30596c89 --- /dev/null +++ b/flink_jobs/old-models/ams_ingest_sync/src/test/resources/metric_profile.json @@ -0,0 +1,34 @@ +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "ARC-CE", "metric": "org.nordugrid.ARC-CE-ARIS", "tags": {"fqan": "", "vo": "ops"}} +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "ARC-CE", "metric": "org.nordugrid.ARC-CE-IGTF", "tags": {"fqan": "", "vo": "ops"}} +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "ARC-CE", "metric": "org.nordugrid.ARC-CE-result", "tags": {"fqan": "", "vo": "ops"}} +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "ARC-CE", "metric": "org.nordugrid.ARC-CE-srm", "tags": {"fqan": "", "vo": "ops"}} +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "ARC-CE", "metric": "org.nordugrid.ARC-CE-sw-csh", "tags": {"fqan": "", "vo": "ops"}} +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "CREAM-CE", "metric": "emi.cream.CREAMCE-AllowedSubmission", "tags": {"fqan": "", "vo": "ops"}} +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "CREAM-CE", "metric": "emi.cream.CREAMCE-JobPurge", "tags": {"fqan": "", "vo": "ops"}} +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "CREAM-CE", "metric": "emi.cream.CREAMCE-ServiceInfo", "tags": {"fqan": "", "vo": "ops"}} +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "CREAM-CE", "metric": "eu.egi.CREAM-IGTF", "tags": {"fqan": "", "vo": "ops"}} +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "CREAM-CE", "metric": "hr.srce.CREAMCE-CertLifetime", "tags": {"fqan": "", "vo": "ops"}} +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "GRAM5", "metric": "hr.srce.GRAM-Auth", "tags": {"fqan": "", "vo": "ops"}} +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "GRAM5", "metric": "hr.srce.GRAM-CertLifetime", "tags": {"fqan": "", "vo": "ops"}} +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "GRAM5", "metric": "hr.srce.GRAM-Command", "tags": {"fqan": "", "vo": "ops"}} +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "QCG.Computing", "metric": "hr.srce.QCG-Computing-CertLifetime", "tags": {"fqan": "", "vo": "ops"}} +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "QCG.Computing", "metric": "pl.plgrid.QCG-Computing", "tags": {"fqan": "", "vo": "ops"}} +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "SRM", "metric": "hr.srce.SRM2-CertLifetime", "tags": {"fqan": "", "vo": "ops"}} +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "SRM", "metric": "org.sam.SRM-Del", "tags": {"fqan": "", "vo": "ops"}} +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "SRM", "metric": "org.sam.SRM-Get", "tags": {"fqan": "", "vo": "ops"}} +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "SRM", "metric": "org.sam.SRM-GetSURLs", "tags": {"fqan": "", "vo": "ops"}} +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "SRM", "metric": "org.sam.SRM-GetTURLs", "tags": {"fqan": "", "vo": "ops"}} +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "SRM", "metric": "org.sam.SRM-Ls", "tags": {"fqan": "", "vo": "ops"}} +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "SRM", "metric": "org.sam.SRM-LsDir", "tags": {"fqan": "", "vo": "ops"}} +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "SRM", "metric": "org.sam.SRM-Put", "tags": {"fqan": "", "vo": "ops"}} +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "Site-BDII", "metric": "org.bdii.Entries", "tags": {"fqan": "", "vo": "ops"}} +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "Site-BDII", "metric": "org.bdii.Freshness", "tags": {"fqan": "", "vo": "ops"}} +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "eu.egi.cloud.vm-management.occi", "metric": "eu.egi.OCCI-IGTF", "tags": {"fqan": "", "vo": "ops"}} +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "eu.egi.cloud.vm-management.occi", "metric": "eu.egi.cloud.OCCI-Context", "tags": {"fqan": "", "vo": "ops"}} +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "eu.egi.cloud.vm-management.occi", "metric": "eu.egi.cloud.OCCI-VM", "tags": {"fqan": "", "vo": "ops"}} +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "eu.egi.cloud.vm-management.occi", "metric": "org.nagios.OCCI-TCP", "tags": {"fqan": "", "vo": "ops"}} +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "org.openstack.nova", "metric": "eu.egi.Keystone-IGTF", "tags": {"fqan": "", "vo": "ops"}} +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "org.openstack.nova", "metric": "eu.egi.cloud.OpenStack-VM", "tags": {"fqan": "", "vo": "ops"}} +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "org.openstack.nova", "metric": "org.nagios.Keystone-TCP", "tags": {"fqan": "", "vo": "ops"}} +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "unicore6.TargetSystemFactory", "metric": "emi.unicore.TargetSystemFactory", "tags": {"fqan": "", "vo": "ops"}} +{"profile": "ch.cern.SAM.ARGO_MON_CRITICAL", "service": "unicore6.TargetSystemFactory", "metric": "emi.unicore.UNICORE-Job", "tags": {"fqan": "", "vo": "ops"}} diff --git a/flink_jobs/old-models/ams_ingest_sync/src/test/resources/request_metric_profile.json b/flink_jobs/old-models/ams_ingest_sync/src/test/resources/request_metric_profile.json new file mode 100644 index 00000000..4b57faa5 --- /dev/null +++ b/flink_jobs/old-models/ams_ingest_sync/src/test/resources/request_metric_profile.json @@ -0,0 +1,12 @@ +{ + "messages": [ + { + "attributes": { + "report": "ops-mon", + "type": "metric_profile", + "partition_date": "2017-11-12" + }, + "data": "OmNoLmNlcm4uU0FNLkFSR09fTU9OX0NSSVRJQ0FMDEFSQy1DRTJvcmcubm9yZHVncmlkLkFSQy1DRS1BUklTAgQIZnFhbgAEdm8Gb3BzADpjaC5jZXJuLlNBTS5BUkdPX01PTl9DUklUSUNBTAxBUkMtQ0Uyb3JnLm5vcmR1Z3JpZC5BUkMtQ0UtSUdURgIECGZxYW4ABHZvBm9wcwA6Y2guY2Vybi5TQU0uQVJHT19NT05fQ1JJVElDQUwMQVJDLUNFNm9yZy5ub3JkdWdyaWQuQVJDLUNFLXJlc3VsdAIECGZxYW4ABHZvBm9wcwA6Y2guY2Vybi5TQU0uQVJHT19NT05fQ1JJVElDQUwMQVJDLUNFMG9yZy5ub3JkdWdyaWQuQVJDLUNFLXNybQIECGZxYW4ABHZvBm9wcwA6Y2guY2Vybi5TQU0uQVJHT19NT05fQ1JJVElDQUwMQVJDLUNFNm9yZy5ub3JkdWdyaWQuQVJDLUNFLXN3LWNzaAIECGZxYW4ABHZvBm9wcwA6Y2guY2Vybi5TQU0uQVJHT19NT05fQ1JJVElDQUwQQ1JFQU0tQ0VGZW1pLmNyZWFtLkNSRUFNQ0UtQWxsb3dlZFN1Ym1pc3Npb24CBAhmcWFuAAR2bwZvcHMAOmNoLmNlcm4uU0FNLkFSR09fTU9OX0NSSVRJQ0FMEENSRUFNLUNFNGVtaS5jcmVhbS5DUkVBTUNFLUpvYlB1cmdlAgQIZnFhbgAEdm8Gb3BzADpjaC5jZXJuLlNBTS5BUkdPX01PTl9DUklUSUNBTBBDUkVBTS1DRTplbWkuY3JlYW0uQ1JFQU1DRS1TZXJ2aWNlSW5mbwIECGZxYW4ABHZvBm9wcwA6Y2guY2Vybi5TQU0uQVJHT19NT05fQ1JJVElDQUwQQ1JFQU0tQ0UiZXUuZWdpLkNSRUFNLUlHVEYCBAhmcWFuAAR2bwZvcHMAOmNoLmNlcm4uU0FNLkFSR09fTU9OX0NSSVRJQ0FMEENSRUFNLUNFOGhyLnNyY2UuQ1JFQU1DRS1DZXJ0TGlmZXRpbWUCBAhmcWFuAAR2bwZvcHMAOmNoLmNlcm4uU0FNLkFSR09fTU9OX0NSSVRJQ0FMCkdSQU01ImhyLnNyY2UuR1JBTS1BdXRoAgQIZnFhbgAEdm8Gb3BzADpjaC5jZXJuLlNBTS5BUkdPX01PTl9DUklUSUNBTApHUkFNNTJoci5zcmNlLkdSQU0tQ2VydExpZmV0aW1lAgQIZnFhbgAEdm8Gb3BzADpjaC5jZXJuLlNBTS5BUkdPX01PTl9DUklUSUNBTApHUkFNNShoci5zcmNlLkdSQU0tQ29tbWFuZAIECGZxYW4ABHZvBm9wcwA6Y2guY2Vybi5TQU0uQVJHT19NT05fQ1JJVElDQUwaUUNHLkNvbXB1dGluZ0Roci5zcmNlLlFDRy1Db21wdXRpbmctQ2VydExpZmV0aW1lAgQIZnFhbgAEdm8Gb3BzADpjaC5jZXJuLlNBTS5BUkdPX01PTl9DUklUSUNBTBpRQ0cuQ29tcHV0aW5nLnBsLnBsZ3JpZC5RQ0ctQ29tcHV0aW5nAgQIZnFhbgAEdm8Gb3BzADpjaC5jZXJuLlNBTS5BUkdPX01PTl9DUklUSUNBTAZTUk0yaHIuc3JjZS5TUk0yLUNlcnRMaWZldGltZQIECGZxYW4ABHZvBm9wcwA6Y2guY2Vybi5TQU0uQVJHT19NT05fQ1JJVElDQUwGU1JNHm9yZy5zYW0uU1JNLURlbAIECGZxYW4ABHZvBm9wcwA6Y2guY2Vybi5TQU0uQVJHT19NT05fQ1JJVElDQUwGU1JNHm9yZy5zYW0uU1JNLUdldAIECGZxYW4ABHZvBm9wcwA6Y2guY2Vybi5TQU0uQVJHT19NT05fQ1JJVElDQUwGU1JNKG9yZy5zYW0uU1JNLUdldFNVUkxzAgQIZnFhbgAEdm8Gb3BzADpjaC5jZXJuLlNBTS5BUkdPX01PTl9DUklUSUNBTAZTUk0ob3JnLnNhbS5TUk0tR2V0VFVSTHMCBAhmcWFuAAR2bwZvcHMAOmNoLmNlcm4uU0FNLkFSR09fTU9OX0NSSVRJQ0FMBlNSTRxvcmcuc2FtLlNSTS1McwIECGZxYW4ABHZvBm9wcwA6Y2guY2Vybi5TQU0uQVJHT19NT05fQ1JJVElDQUwGU1JNIm9yZy5zYW0uU1JNLUxzRGlyAgQIZnFhbgAEdm8Gb3BzADpjaC5jZXJuLlNBTS5BUkdPX01PTl9DUklUSUNBTAZTUk0eb3JnLnNhbS5TUk0tUHV0AgQIZnFhbgAEdm8Gb3BzADpjaC5jZXJuLlNBTS5BUkdPX01PTl9DUklUSUNBTBJTaXRlLUJESUkgb3JnLmJkaWkuRW50cmllcwIECGZxYW4ABHZvBm9wcwA6Y2guY2Vybi5TQU0uQVJHT19NT05fQ1JJVElDQUwSU2l0ZS1CRElJJG9yZy5iZGlpLkZyZXNobmVzcwIECGZxYW4ABHZvBm9wcwA6Y2guY2Vybi5TQU0uQVJHT19NT05fQ1JJVElDQUw+ZXUuZWdpLmNsb3VkLnZtLW1hbmFnZW1lbnQub2NjaSBldS5lZ2kuT0NDSS1JR1RGAgQIZnFhbgAEdm8Gb3BzADpjaC5jZXJuLlNBTS5BUkdPX01PTl9DUklUSUNBTD5ldS5lZ2kuY2xvdWQudm0tbWFuYWdlbWVudC5vY2NpMmV1LmVnaS5jbG91ZC5PQ0NJLUNvbnRleHQCBAhmcWFuAAR2bwZvcHMAOmNoLmNlcm4uU0FNLkFSR09fTU9OX0NSSVRJQ0FMPmV1LmVnaS5jbG91ZC52bS1tYW5hZ2VtZW50Lm9jY2koZXUuZWdpLmNsb3VkLk9DQ0ktVk0CBAhmcWFuAAR2bwZvcHMAOmNoLmNlcm4uU0FNLkFSR09fTU9OX0NSSVRJQ0FMPmV1LmVnaS5jbG91ZC52bS1tYW5hZ2VtZW50Lm9jY2kmb3JnLm5hZ2lvcy5PQ0NJLVRDUAIECGZxYW4ABHZvBm9wcwA6Y2guY2Vybi5TQU0uQVJHT19NT05fQ1JJVElDQUwkb3JnLm9wZW5zdGFjay5ub3ZhKGV1LmVnaS5LZXlzdG9uZS1JR1RGAgQIZnFhbgAEdm8Gb3BzADpjaC5jZXJuLlNBTS5BUkdPX01PTl9DUklUSUNBTCRvcmcub3BlbnN0YWNrLm5vdmEyZXUuZWdpLmNsb3VkLk9wZW5TdGFjay1WTQIECGZxYW4ABHZvBm9wcwA6Y2guY2Vybi5TQU0uQVJHT19NT05fQ1JJVElDQUwkb3JnLm9wZW5zdGFjay5ub3ZhLm9yZy5uYWdpb3MuS2V5c3RvbmUtVENQAgQIZnFhbgAEdm8Gb3BzADpjaC5jZXJuLlNBTS5BUkdPX01PTl9DUklUSUNBTDh1bmljb3JlNi5UYXJnZXRTeXN0ZW1GYWN0b3J5PmVtaS51bmljb3JlLlRhcmdldFN5c3RlbUZhY3RvcnkCBAhmcWFuAAR2bwZvcHMAOmNoLmNlcm4uU0FNLkFSR09fTU9OX0NSSVRJQ0FMOHVuaWNvcmU2LlRhcmdldFN5c3RlbUZhY3RvcnkuZW1pLnVuaWNvcmUuVU5JQ09SRS1Kb2ICBAhmcWFuAAR2bwZvcHMA" + } + ] +} diff --git a/flink_jobs/old-models/batch_ar/.gitignore b/flink_jobs/old-models/batch_ar/.gitignore new file mode 100644 index 00000000..6c4e323f --- /dev/null +++ b/flink_jobs/old-models/batch_ar/.gitignore @@ -0,0 +1,8 @@ +/target/ +.project +.settings/ +.classpath/ +.classpath +/nbproject +nbactions.xml + diff --git a/flink_jobs/old-models/batch_ar/avro/downtime.avsc b/flink_jobs/old-models/batch_ar/avro/downtime.avsc new file mode 100644 index 00000000..90c10824 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/avro/downtime.avsc @@ -0,0 +1,10 @@ +{"namespace": "argo.avro", + "type": "record", + "name": "Downtime", + "fields": [ + {"name": "hostname", "type": "string"}, + {"name": "service", "type": "string"}, + {"name": "start_time", "type": "string"}, + {"name": "end_time", "type": "string"} + ] +} diff --git a/flink_jobs/old-models/batch_ar/avro/group_endpoint.avsc b/flink_jobs/old-models/batch_ar/avro/group_endpoint.avsc new file mode 100644 index 00000000..4ed8818e --- /dev/null +++ b/flink_jobs/old-models/batch_ar/avro/group_endpoint.avsc @@ -0,0 +1,14 @@ +{"namespace": "argo.avro", + "type": "record", + "name": "GroupEndpoint", + "fields": [ + {"name": "type", "type": "string"}, + {"name": "group", "type": "string"}, + {"name": "service", "type": "string"}, + {"name": "hostname", "type": "string"}, + {"name": "tags", "type" : ["null", { "name" : "Tags", + "type" : "map", + "values" : "string" + }] + }] +} diff --git a/flink_jobs/old-models/batch_ar/avro/group_group.avsc b/flink_jobs/old-models/batch_ar/avro/group_group.avsc new file mode 100644 index 00000000..f23f439d --- /dev/null +++ b/flink_jobs/old-models/batch_ar/avro/group_group.avsc @@ -0,0 +1,14 @@ +{"namespace": "argo.avro", + "type": "record", + "name": "GroupGroup", + "fields": [ + {"name": "type", "type": "string"}, + {"name": "group", "type": "string"}, + {"name": "subgroup", "type": "string"}, + {"name": "tags", "type" : ["null", { "name" : "Tags", + "type" : "map", + "values" : "string" + }] + }] +} + diff --git a/flink_jobs/old-models/batch_ar/avro/metric_data.avsc b/flink_jobs/old-models/batch_ar/avro/metric_data.avsc new file mode 100644 index 00000000..ff3d7a56 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/avro/metric_data.avsc @@ -0,0 +1,18 @@ +{"namespace": "argo.avro", + "type": "record", + "name": "MetricData", + "fields": [ + {"name": "timestamp", "type": "string"}, + {"name": "service", "type": "string"}, + {"name": "hostname", "type": "string"}, + {"name": "metric", "type": "string"}, + {"name": "status", "type": "string"}, + {"name": "monitoring_host", "type": ["null", "string"]}, + {"name": "summary", "type": ["null", "string"]}, + {"name": "message", "type": ["null", "string"]}, + {"name": "tags", "type" : ["null", {"name" : "Tags", + "type" : "map", + "values" : ["null", "string"] + }] + }] +} diff --git a/flink_jobs/old-models/batch_ar/avro/metric_profile.avsc b/flink_jobs/old-models/batch_ar/avro/metric_profile.avsc new file mode 100644 index 00000000..df6eb2a8 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/avro/metric_profile.avsc @@ -0,0 +1,13 @@ +{"namespace": "argo.avro", + "type": "record", + "name": "MetricProfile", + "fields": [ + {"name": "profile", "type": "string"}, + {"name": "service", "type": "string"}, + {"name": "metric", "type": "string"}, + {"name": "tags", "type" : ["null", {"name" : "Tags", + "type" : "map", + "values" : "string" + }] + }] +} diff --git a/flink_jobs/old-models/batch_ar/avro/weight.avsc b/flink_jobs/old-models/batch_ar/avro/weight.avsc new file mode 100644 index 00000000..5d39ba01 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/avro/weight.avsc @@ -0,0 +1,9 @@ +{"namespace": "argo.avro", + "type": "record", + "name": "Weight", + "fields": [ + {"name": "type", "type": "string"}, + {"name": "site", "type": "string"}, + {"name": "weight", "type": "string"} + ] +} diff --git a/flink_jobs/old-models/batch_ar/pom.xml b/flink_jobs/old-models/batch_ar/pom.xml new file mode 100644 index 00000000..4fa0820f --- /dev/null +++ b/flink_jobs/old-models/batch_ar/pom.xml @@ -0,0 +1,389 @@ + + + 4.0.0 + + argo.batch + ArgoArBatch + 1.0 + jar + + Argo Ar Batch Job + + + + UTF-8 + 1.3.2 + 1.7.7 + 1.2.17 + 2.6.0 + + + + + cloudera + https://repository.cloudera.com/artifactory/cloudera-repos + + true + + + true + + + + + apache.snapshots + Apache Development Snapshot Repository + https://repository.apache.org/content/repositories/snapshots/ + + false + + + true + + + + + + + + + + org.apache.flink + flink-java + ${flink.version} + + + org.apache.flink + flink-streaming-java_2.10 + ${flink.version} + + + org.apache.flink + flink-avro_2.10 + ${flink.version} + + + org.apache.flink + flink-clients_2.10 + ${flink.version} + + + + org.apache.flink + flink-hadoop-compatibility_2.10 + ${flink.version} + + + + + joda-time + joda-time + 1.6 + + + com.google.code.gson + gson + 2.2.4 + + + + + org.mongodb + mongo-java-driver + 3.2.2 + + + + + + org.slf4j + slf4j-log4j12 + ${slf4j.version} + + + log4j + log4j + ${log4j.version} + + + junit-addons + junit-addons + 1.4 + test + + + junit + junit + 4.13.1 + test + + + + + + + + build-jar + + + false + + + + + org.apache.flink + flink-java + ${flink.version} + provided + + + org.apache.flink + flink-streaming-java_2.10 + ${flink.version} + provided + + + org.apache.flink + flink-clients_2.10 + ${flink.version} + provided + + + + org.apache.flink + flink-hadoop-compatibility_2.10 + ${flink.version} + + + org.slf4j + slf4j-log4j12 + ${slf4j.version} + provided + + + log4j + log4j + ${log4j.version} + provided + + + joda-time + joda-time + 1.6 + + + com.google.code.gson + gson + 2.2.4 + + + + org.mongodb + mongo-java-driver + 3.2.2 + + + + + + + + + + org.apache.maven.plugins + maven-shade-plugin + 2.4.1 + + + package + + shade + + + + + + + + + + + + + + + + + + + + + + org.apache.maven.plugins + maven-shade-plugin + 2.4.1 + + + + package + + shade + + + + + + org.apache.flink:flink-annotations + org.apache.flink:flink-shaded-hadoop2 + org.apache.flink:flink-shaded-curator-recipes + org.apache.flink:flink-core + org.apache.flink:flink-java + org.apache.flink:flink-scala_2.11 + org.apache.flink:flink-runtime_2.11 + org.apache.flink:flink-optimizer_2.11 + org.apache.flink:flink-clients_2.11 + org.apache.flink:flink-avro_2.11 + org.apache.flink:flink-examples-batch_2.11 + org.apache.flink:flink-examples-streaming_2.11 + org.apache.flink:flink-streaming-java_2.11 + org.apache.flink:flink-streaming-scala_2.11 + org.apache.flink:flink-scala-shell_2.11 + org.apache.flink:flink-python + org.apache.flink:flink-metrics-core + org.apache.flink:flink-metrics-jmx + org.apache.flink:flink-statebackend-rocksdb_2.11 + + + + log4j:log4j + org.scala-lang:scala-library + org.scala-lang:scala-compiler + org.scala-lang:scala-reflect + com.data-artisans:flakka-actor_* + com.data-artisans:flakka-remote_* + com.data-artisans:flakka-slf4j_* + io.netty:netty-all + io.netty:netty + commons-fileupload:commons-fileupload + org.apache.avro:avro + commons-collections:commons-collections + org.codehaus.jackson:jackson-core-asl + org.codehaus.jackson:jackson-mapper-asl + com.thoughtworks.paranamer:paranamer + org.xerial.snappy:snappy-java + org.apache.commons:commons-compress + org.tukaani:xz + com.esotericsoftware.kryo:kryo + com.esotericsoftware.minlog:minlog + org.objenesis:objenesis + com.twitter:chill_* + com.twitter:chill-java + commons-lang:commons-lang + junit:junit + org.apache.commons:commons-lang3 + org.slf4j:slf4j-api + org.slf4j:slf4j-log4j12 + log4j:log4j + org.apache.commons:commons-math + org.apache.sling:org.apache.sling.commons.json + commons-logging:commons-logging + commons-codec:commons-codec + com.fasterxml.jackson.core:jackson-core + com.fasterxml.jackson.core:jackson-databind + com.fasterxml.jackson.core:jackson-annotations + stax:stax-api + com.typesafe:config + org.uncommons.maths:uncommons-maths + com.github.scopt:scopt_* + commons-io:commons-io + commons-cli:commons-cli + + + + + org.apache.flink:* + + + org/apache/flink/shaded/com/** + web-docs/** + + + + + *:* + + META-INF/*.SF + META-INF/*.DSA + META-INF/*.RSA + + + + + + false + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.1 + + 1.7 + 1.7 + + + + + + + + + + diff --git a/flink_jobs/old-models/batch_ar/src/main/java/argo/avro/Downtime.java b/flink_jobs/old-models/batch_ar/src/main/java/argo/avro/Downtime.java new file mode 100644 index 00000000..b73e100d --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/argo/avro/Downtime.java @@ -0,0 +1,286 @@ +/** + * Autogenerated by Avro + * + * DO NOT EDIT DIRECTLY + */ +package argo.avro; +@SuppressWarnings("all") +@org.apache.avro.specific.AvroGenerated +public class Downtime extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"Downtime\",\"namespace\":\"argo.avro\",\"fields\":[{\"name\":\"hostname\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"service\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"start_time\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"end_time\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}}]}"); + public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } + @Deprecated public java.lang.String hostname; + @Deprecated public java.lang.String service; + @Deprecated public java.lang.String start_time; + @Deprecated public java.lang.String end_time; + + /** + * Default constructor. + */ + public Downtime() {} + + /** + * All-args constructor. + */ + public Downtime(java.lang.String hostname, java.lang.String service, java.lang.String start_time, java.lang.String end_time) { + this.hostname = hostname; + this.service = service; + this.start_time = start_time; + this.end_time = end_time; + } + + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + // Used by DatumWriter. Applications should not call. + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return hostname; + case 1: return service; + case 2: return start_time; + case 3: return end_time; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + // Used by DatumReader. Applications should not call. + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: hostname = (java.lang.String)value$; break; + case 1: service = (java.lang.String)value$; break; + case 2: start_time = (java.lang.String)value$; break; + case 3: end_time = (java.lang.String)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + + /** + * Gets the value of the 'hostname' field. + */ + public java.lang.String getHostname() { + return hostname; + } + + /** + * Sets the value of the 'hostname' field. + * @param value the value to set. + */ + public void setHostname(java.lang.String value) { + this.hostname = value; + } + + /** + * Gets the value of the 'service' field. + */ + public java.lang.String getService() { + return service; + } + + /** + * Sets the value of the 'service' field. + * @param value the value to set. + */ + public void setService(java.lang.String value) { + this.service = value; + } + + /** + * Gets the value of the 'start_time' field. + */ + public java.lang.String getStartTime() { + return start_time; + } + + /** + * Sets the value of the 'start_time' field. + * @param value the value to set. + */ + public void setStartTime(java.lang.String value) { + this.start_time = value; + } + + /** + * Gets the value of the 'end_time' field. + */ + public java.lang.String getEndTime() { + return end_time; + } + + /** + * Sets the value of the 'end_time' field. + * @param value the value to set. + */ + public void setEndTime(java.lang.String value) { + this.end_time = value; + } + + /** Creates a new Downtime RecordBuilder */ + public static argo.avro.Downtime.Builder newBuilder() { + return new argo.avro.Downtime.Builder(); + } + + /** Creates a new Downtime RecordBuilder by copying an existing Builder */ + public static argo.avro.Downtime.Builder newBuilder(argo.avro.Downtime.Builder other) { + return new argo.avro.Downtime.Builder(other); + } + + /** Creates a new Downtime RecordBuilder by copying an existing Downtime instance */ + public static argo.avro.Downtime.Builder newBuilder(argo.avro.Downtime other) { + return new argo.avro.Downtime.Builder(other); + } + + /** + * RecordBuilder for Downtime instances. + */ + public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase + implements org.apache.avro.data.RecordBuilder { + + private java.lang.String hostname; + private java.lang.String service; + private java.lang.String start_time; + private java.lang.String end_time; + + /** Creates a new Builder */ + private Builder() { + super(argo.avro.Downtime.SCHEMA$); + } + + /** Creates a Builder by copying an existing Builder */ + private Builder(argo.avro.Downtime.Builder other) { + super(other); + } + + /** Creates a Builder by copying an existing Downtime instance */ + private Builder(argo.avro.Downtime other) { + super(argo.avro.Downtime.SCHEMA$); + if (isValidValue(fields()[0], other.hostname)) { + this.hostname = data().deepCopy(fields()[0].schema(), other.hostname); + fieldSetFlags()[0] = true; + } + if (isValidValue(fields()[1], other.service)) { + this.service = data().deepCopy(fields()[1].schema(), other.service); + fieldSetFlags()[1] = true; + } + if (isValidValue(fields()[2], other.start_time)) { + this.start_time = data().deepCopy(fields()[2].schema(), other.start_time); + fieldSetFlags()[2] = true; + } + if (isValidValue(fields()[3], other.end_time)) { + this.end_time = data().deepCopy(fields()[3].schema(), other.end_time); + fieldSetFlags()[3] = true; + } + } + + /** Gets the value of the 'hostname' field */ + public java.lang.String getHostname() { + return hostname; + } + + /** Sets the value of the 'hostname' field */ + public argo.avro.Downtime.Builder setHostname(java.lang.String value) { + validate(fields()[0], value); + this.hostname = value; + fieldSetFlags()[0] = true; + return this; + } + + /** Checks whether the 'hostname' field has been set */ + public boolean hasHostname() { + return fieldSetFlags()[0]; + } + + /** Clears the value of the 'hostname' field */ + public argo.avro.Downtime.Builder clearHostname() { + hostname = null; + fieldSetFlags()[0] = false; + return this; + } + + /** Gets the value of the 'service' field */ + public java.lang.String getService() { + return service; + } + + /** Sets the value of the 'service' field */ + public argo.avro.Downtime.Builder setService(java.lang.String value) { + validate(fields()[1], value); + this.service = value; + fieldSetFlags()[1] = true; + return this; + } + + /** Checks whether the 'service' field has been set */ + public boolean hasService() { + return fieldSetFlags()[1]; + } + + /** Clears the value of the 'service' field */ + public argo.avro.Downtime.Builder clearService() { + service = null; + fieldSetFlags()[1] = false; + return this; + } + + /** Gets the value of the 'start_time' field */ + public java.lang.String getStartTime() { + return start_time; + } + + /** Sets the value of the 'start_time' field */ + public argo.avro.Downtime.Builder setStartTime(java.lang.String value) { + validate(fields()[2], value); + this.start_time = value; + fieldSetFlags()[2] = true; + return this; + } + + /** Checks whether the 'start_time' field has been set */ + public boolean hasStartTime() { + return fieldSetFlags()[2]; + } + + /** Clears the value of the 'start_time' field */ + public argo.avro.Downtime.Builder clearStartTime() { + start_time = null; + fieldSetFlags()[2] = false; + return this; + } + + /** Gets the value of the 'end_time' field */ + public java.lang.String getEndTime() { + return end_time; + } + + /** Sets the value of the 'end_time' field */ + public argo.avro.Downtime.Builder setEndTime(java.lang.String value) { + validate(fields()[3], value); + this.end_time = value; + fieldSetFlags()[3] = true; + return this; + } + + /** Checks whether the 'end_time' field has been set */ + public boolean hasEndTime() { + return fieldSetFlags()[3]; + } + + /** Clears the value of the 'end_time' field */ + public argo.avro.Downtime.Builder clearEndTime() { + end_time = null; + fieldSetFlags()[3] = false; + return this; + } + + @Override + public Downtime build() { + try { + Downtime record = new Downtime(); + record.hostname = fieldSetFlags()[0] ? this.hostname : (java.lang.String) defaultValue(fields()[0]); + record.service = fieldSetFlags()[1] ? this.service : (java.lang.String) defaultValue(fields()[1]); + record.start_time = fieldSetFlags()[2] ? this.start_time : (java.lang.String) defaultValue(fields()[2]); + record.end_time = fieldSetFlags()[3] ? this.end_time : (java.lang.String) defaultValue(fields()[3]); + return record; + } catch (Exception e) { + throw new org.apache.avro.AvroRuntimeException(e); + } + } + } +} diff --git a/flink_jobs/old-models/batch_ar/src/main/java/argo/avro/GroupEndpoint.java b/flink_jobs/old-models/batch_ar/src/main/java/argo/avro/GroupEndpoint.java new file mode 100644 index 00000000..2386b1d2 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/argo/avro/GroupEndpoint.java @@ -0,0 +1,336 @@ +/** + * Autogenerated by Avro + * + * DO NOT EDIT DIRECTLY + */ +package argo.avro; +@SuppressWarnings("all") +@org.apache.avro.specific.AvroGenerated +public class GroupEndpoint extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"GroupEndpoint\",\"namespace\":\"argo.avro\",\"fields\":[{\"name\":\"type\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"group\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"service\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"hostname\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"tags\",\"type\":[\"null\",{\"type\":\"map\",\"values\":{\"type\":\"string\",\"avro.java.string\":\"String\"},\"avro.java.string\":\"String\"}]}]}"); + public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } + @Deprecated public java.lang.String type; + @Deprecated public java.lang.String group; + @Deprecated public java.lang.String service; + @Deprecated public java.lang.String hostname; + @Deprecated public java.util.Map tags; + + /** + * Default constructor. + */ + public GroupEndpoint() {} + + /** + * All-args constructor. + */ + public GroupEndpoint(java.lang.String type, java.lang.String group, java.lang.String service, java.lang.String hostname, java.util.Map tags) { + this.type = type; + this.group = group; + this.service = service; + this.hostname = hostname; + this.tags = tags; + } + + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + // Used by DatumWriter. Applications should not call. + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return type; + case 1: return group; + case 2: return service; + case 3: return hostname; + case 4: return tags; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + // Used by DatumReader. Applications should not call. + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: type = (java.lang.String)value$; break; + case 1: group = (java.lang.String)value$; break; + case 2: service = (java.lang.String)value$; break; + case 3: hostname = (java.lang.String)value$; break; + case 4: tags = (java.util.Map)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + + /** + * Gets the value of the 'type' field. + */ + public java.lang.String getType() { + return type; + } + + /** + * Sets the value of the 'type' field. + * @param value the value to set. + */ + public void setType(java.lang.String value) { + this.type = value; + } + + /** + * Gets the value of the 'group' field. + */ + public java.lang.String getGroup() { + return group; + } + + /** + * Sets the value of the 'group' field. + * @param value the value to set. + */ + public void setGroup(java.lang.String value) { + this.group = value; + } + + /** + * Gets the value of the 'service' field. + */ + public java.lang.String getService() { + return service; + } + + /** + * Sets the value of the 'service' field. + * @param value the value to set. + */ + public void setService(java.lang.String value) { + this.service = value; + } + + /** + * Gets the value of the 'hostname' field. + */ + public java.lang.String getHostname() { + return hostname; + } + + /** + * Sets the value of the 'hostname' field. + * @param value the value to set. + */ + public void setHostname(java.lang.String value) { + this.hostname = value; + } + + /** + * Gets the value of the 'tags' field. + */ + public java.util.Map getTags() { + return tags; + } + + /** + * Sets the value of the 'tags' field. + * @param value the value to set. + */ + public void setTags(java.util.Map value) { + this.tags = value; + } + + /** Creates a new GroupEndpoint RecordBuilder */ + public static argo.avro.GroupEndpoint.Builder newBuilder() { + return new argo.avro.GroupEndpoint.Builder(); + } + + /** Creates a new GroupEndpoint RecordBuilder by copying an existing Builder */ + public static argo.avro.GroupEndpoint.Builder newBuilder(argo.avro.GroupEndpoint.Builder other) { + return new argo.avro.GroupEndpoint.Builder(other); + } + + /** Creates a new GroupEndpoint RecordBuilder by copying an existing GroupEndpoint instance */ + public static argo.avro.GroupEndpoint.Builder newBuilder(argo.avro.GroupEndpoint other) { + return new argo.avro.GroupEndpoint.Builder(other); + } + + /** + * RecordBuilder for GroupEndpoint instances. + */ + public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase + implements org.apache.avro.data.RecordBuilder { + + private java.lang.String type; + private java.lang.String group; + private java.lang.String service; + private java.lang.String hostname; + private java.util.Map tags; + + /** Creates a new Builder */ + private Builder() { + super(argo.avro.GroupEndpoint.SCHEMA$); + } + + /** Creates a Builder by copying an existing Builder */ + private Builder(argo.avro.GroupEndpoint.Builder other) { + super(other); + } + + /** Creates a Builder by copying an existing GroupEndpoint instance */ + private Builder(argo.avro.GroupEndpoint other) { + super(argo.avro.GroupEndpoint.SCHEMA$); + if (isValidValue(fields()[0], other.type)) { + this.type = data().deepCopy(fields()[0].schema(), other.type); + fieldSetFlags()[0] = true; + } + if (isValidValue(fields()[1], other.group)) { + this.group = data().deepCopy(fields()[1].schema(), other.group); + fieldSetFlags()[1] = true; + } + if (isValidValue(fields()[2], other.service)) { + this.service = data().deepCopy(fields()[2].schema(), other.service); + fieldSetFlags()[2] = true; + } + if (isValidValue(fields()[3], other.hostname)) { + this.hostname = data().deepCopy(fields()[3].schema(), other.hostname); + fieldSetFlags()[3] = true; + } + if (isValidValue(fields()[4], other.tags)) { + this.tags = data().deepCopy(fields()[4].schema(), other.tags); + fieldSetFlags()[4] = true; + } + } + + /** Gets the value of the 'type' field */ + public java.lang.String getType() { + return type; + } + + /** Sets the value of the 'type' field */ + public argo.avro.GroupEndpoint.Builder setType(java.lang.String value) { + validate(fields()[0], value); + this.type = value; + fieldSetFlags()[0] = true; + return this; + } + + /** Checks whether the 'type' field has been set */ + public boolean hasType() { + return fieldSetFlags()[0]; + } + + /** Clears the value of the 'type' field */ + public argo.avro.GroupEndpoint.Builder clearType() { + type = null; + fieldSetFlags()[0] = false; + return this; + } + + /** Gets the value of the 'group' field */ + public java.lang.String getGroup() { + return group; + } + + /** Sets the value of the 'group' field */ + public argo.avro.GroupEndpoint.Builder setGroup(java.lang.String value) { + validate(fields()[1], value); + this.group = value; + fieldSetFlags()[1] = true; + return this; + } + + /** Checks whether the 'group' field has been set */ + public boolean hasGroup() { + return fieldSetFlags()[1]; + } + + /** Clears the value of the 'group' field */ + public argo.avro.GroupEndpoint.Builder clearGroup() { + group = null; + fieldSetFlags()[1] = false; + return this; + } + + /** Gets the value of the 'service' field */ + public java.lang.String getService() { + return service; + } + + /** Sets the value of the 'service' field */ + public argo.avro.GroupEndpoint.Builder setService(java.lang.String value) { + validate(fields()[2], value); + this.service = value; + fieldSetFlags()[2] = true; + return this; + } + + /** Checks whether the 'service' field has been set */ + public boolean hasService() { + return fieldSetFlags()[2]; + } + + /** Clears the value of the 'service' field */ + public argo.avro.GroupEndpoint.Builder clearService() { + service = null; + fieldSetFlags()[2] = false; + return this; + } + + /** Gets the value of the 'hostname' field */ + public java.lang.String getHostname() { + return hostname; + } + + /** Sets the value of the 'hostname' field */ + public argo.avro.GroupEndpoint.Builder setHostname(java.lang.String value) { + validate(fields()[3], value); + this.hostname = value; + fieldSetFlags()[3] = true; + return this; + } + + /** Checks whether the 'hostname' field has been set */ + public boolean hasHostname() { + return fieldSetFlags()[3]; + } + + /** Clears the value of the 'hostname' field */ + public argo.avro.GroupEndpoint.Builder clearHostname() { + hostname = null; + fieldSetFlags()[3] = false; + return this; + } + + /** Gets the value of the 'tags' field */ + public java.util.Map getTags() { + return tags; + } + + /** Sets the value of the 'tags' field */ + public argo.avro.GroupEndpoint.Builder setTags(java.util.Map value) { + validate(fields()[4], value); + this.tags = value; + fieldSetFlags()[4] = true; + return this; + } + + /** Checks whether the 'tags' field has been set */ + public boolean hasTags() { + return fieldSetFlags()[4]; + } + + /** Clears the value of the 'tags' field */ + public argo.avro.GroupEndpoint.Builder clearTags() { + tags = null; + fieldSetFlags()[4] = false; + return this; + } + + @Override + public GroupEndpoint build() { + try { + GroupEndpoint record = new GroupEndpoint(); + record.type = fieldSetFlags()[0] ? this.type : (java.lang.String) defaultValue(fields()[0]); + record.group = fieldSetFlags()[1] ? this.group : (java.lang.String) defaultValue(fields()[1]); + record.service = fieldSetFlags()[2] ? this.service : (java.lang.String) defaultValue(fields()[2]); + record.hostname = fieldSetFlags()[3] ? this.hostname : (java.lang.String) defaultValue(fields()[3]); + record.tags = fieldSetFlags()[4] ? this.tags : (java.util.Map) defaultValue(fields()[4]); + return record; + } catch (Exception e) { + throw new org.apache.avro.AvroRuntimeException(e); + } + } + } +} diff --git a/flink_jobs/old-models/batch_ar/src/main/java/argo/avro/GroupGroup.java b/flink_jobs/old-models/batch_ar/src/main/java/argo/avro/GroupGroup.java new file mode 100644 index 00000000..a7712d67 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/argo/avro/GroupGroup.java @@ -0,0 +1,286 @@ +/** + * Autogenerated by Avro + * + * DO NOT EDIT DIRECTLY + */ +package argo.avro; +@SuppressWarnings("all") +@org.apache.avro.specific.AvroGenerated +public class GroupGroup extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"GroupGroup\",\"namespace\":\"argo.avro\",\"fields\":[{\"name\":\"type\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"group\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"subgroup\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"tags\",\"type\":[\"null\",{\"type\":\"map\",\"values\":{\"type\":\"string\",\"avro.java.string\":\"String\"},\"avro.java.string\":\"String\"}]}]}"); + public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } + @Deprecated public java.lang.String type; + @Deprecated public java.lang.String group; + @Deprecated public java.lang.String subgroup; + @Deprecated public java.util.Map tags; + + /** + * Default constructor. + */ + public GroupGroup() {} + + /** + * All-args constructor. + */ + public GroupGroup(java.lang.String type, java.lang.String group, java.lang.String subgroup, java.util.Map tags) { + this.type = type; + this.group = group; + this.subgroup = subgroup; + this.tags = tags; + } + + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + // Used by DatumWriter. Applications should not call. + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return type; + case 1: return group; + case 2: return subgroup; + case 3: return tags; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + // Used by DatumReader. Applications should not call. + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: type = (java.lang.String)value$; break; + case 1: group = (java.lang.String)value$; break; + case 2: subgroup = (java.lang.String)value$; break; + case 3: tags = (java.util.Map)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + + /** + * Gets the value of the 'type' field. + */ + public java.lang.String getType() { + return type; + } + + /** + * Sets the value of the 'type' field. + * @param value the value to set. + */ + public void setType(java.lang.String value) { + this.type = value; + } + + /** + * Gets the value of the 'group' field. + */ + public java.lang.String getGroup() { + return group; + } + + /** + * Sets the value of the 'group' field. + * @param value the value to set. + */ + public void setGroup(java.lang.String value) { + this.group = value; + } + + /** + * Gets the value of the 'subgroup' field. + */ + public java.lang.String getSubgroup() { + return subgroup; + } + + /** + * Sets the value of the 'subgroup' field. + * @param value the value to set. + */ + public void setSubgroup(java.lang.String value) { + this.subgroup = value; + } + + /** + * Gets the value of the 'tags' field. + */ + public java.util.Map getTags() { + return tags; + } + + /** + * Sets the value of the 'tags' field. + * @param value the value to set. + */ + public void setTags(java.util.Map value) { + this.tags = value; + } + + /** Creates a new GroupGroup RecordBuilder */ + public static argo.avro.GroupGroup.Builder newBuilder() { + return new argo.avro.GroupGroup.Builder(); + } + + /** Creates a new GroupGroup RecordBuilder by copying an existing Builder */ + public static argo.avro.GroupGroup.Builder newBuilder(argo.avro.GroupGroup.Builder other) { + return new argo.avro.GroupGroup.Builder(other); + } + + /** Creates a new GroupGroup RecordBuilder by copying an existing GroupGroup instance */ + public static argo.avro.GroupGroup.Builder newBuilder(argo.avro.GroupGroup other) { + return new argo.avro.GroupGroup.Builder(other); + } + + /** + * RecordBuilder for GroupGroup instances. + */ + public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase + implements org.apache.avro.data.RecordBuilder { + + private java.lang.String type; + private java.lang.String group; + private java.lang.String subgroup; + private java.util.Map tags; + + /** Creates a new Builder */ + private Builder() { + super(argo.avro.GroupGroup.SCHEMA$); + } + + /** Creates a Builder by copying an existing Builder */ + private Builder(argo.avro.GroupGroup.Builder other) { + super(other); + } + + /** Creates a Builder by copying an existing GroupGroup instance */ + private Builder(argo.avro.GroupGroup other) { + super(argo.avro.GroupGroup.SCHEMA$); + if (isValidValue(fields()[0], other.type)) { + this.type = data().deepCopy(fields()[0].schema(), other.type); + fieldSetFlags()[0] = true; + } + if (isValidValue(fields()[1], other.group)) { + this.group = data().deepCopy(fields()[1].schema(), other.group); + fieldSetFlags()[1] = true; + } + if (isValidValue(fields()[2], other.subgroup)) { + this.subgroup = data().deepCopy(fields()[2].schema(), other.subgroup); + fieldSetFlags()[2] = true; + } + if (isValidValue(fields()[3], other.tags)) { + this.tags = data().deepCopy(fields()[3].schema(), other.tags); + fieldSetFlags()[3] = true; + } + } + + /** Gets the value of the 'type' field */ + public java.lang.String getType() { + return type; + } + + /** Sets the value of the 'type' field */ + public argo.avro.GroupGroup.Builder setType(java.lang.String value) { + validate(fields()[0], value); + this.type = value; + fieldSetFlags()[0] = true; + return this; + } + + /** Checks whether the 'type' field has been set */ + public boolean hasType() { + return fieldSetFlags()[0]; + } + + /** Clears the value of the 'type' field */ + public argo.avro.GroupGroup.Builder clearType() { + type = null; + fieldSetFlags()[0] = false; + return this; + } + + /** Gets the value of the 'group' field */ + public java.lang.String getGroup() { + return group; + } + + /** Sets the value of the 'group' field */ + public argo.avro.GroupGroup.Builder setGroup(java.lang.String value) { + validate(fields()[1], value); + this.group = value; + fieldSetFlags()[1] = true; + return this; + } + + /** Checks whether the 'group' field has been set */ + public boolean hasGroup() { + return fieldSetFlags()[1]; + } + + /** Clears the value of the 'group' field */ + public argo.avro.GroupGroup.Builder clearGroup() { + group = null; + fieldSetFlags()[1] = false; + return this; + } + + /** Gets the value of the 'subgroup' field */ + public java.lang.String getSubgroup() { + return subgroup; + } + + /** Sets the value of the 'subgroup' field */ + public argo.avro.GroupGroup.Builder setSubgroup(java.lang.String value) { + validate(fields()[2], value); + this.subgroup = value; + fieldSetFlags()[2] = true; + return this; + } + + /** Checks whether the 'subgroup' field has been set */ + public boolean hasSubgroup() { + return fieldSetFlags()[2]; + } + + /** Clears the value of the 'subgroup' field */ + public argo.avro.GroupGroup.Builder clearSubgroup() { + subgroup = null; + fieldSetFlags()[2] = false; + return this; + } + + /** Gets the value of the 'tags' field */ + public java.util.Map getTags() { + return tags; + } + + /** Sets the value of the 'tags' field */ + public argo.avro.GroupGroup.Builder setTags(java.util.Map value) { + validate(fields()[3], value); + this.tags = value; + fieldSetFlags()[3] = true; + return this; + } + + /** Checks whether the 'tags' field has been set */ + public boolean hasTags() { + return fieldSetFlags()[3]; + } + + /** Clears the value of the 'tags' field */ + public argo.avro.GroupGroup.Builder clearTags() { + tags = null; + fieldSetFlags()[3] = false; + return this; + } + + @Override + public GroupGroup build() { + try { + GroupGroup record = new GroupGroup(); + record.type = fieldSetFlags()[0] ? this.type : (java.lang.String) defaultValue(fields()[0]); + record.group = fieldSetFlags()[1] ? this.group : (java.lang.String) defaultValue(fields()[1]); + record.subgroup = fieldSetFlags()[2] ? this.subgroup : (java.lang.String) defaultValue(fields()[2]); + record.tags = fieldSetFlags()[3] ? this.tags : (java.util.Map) defaultValue(fields()[3]); + return record; + } catch (Exception e) { + throw new org.apache.avro.AvroRuntimeException(e); + } + } + } +} diff --git a/flink_jobs/old-models/batch_ar/src/main/java/argo/avro/MetricData.java b/flink_jobs/old-models/batch_ar/src/main/java/argo/avro/MetricData.java new file mode 100644 index 00000000..77800770 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/argo/avro/MetricData.java @@ -0,0 +1,811 @@ +/** + * Autogenerated by Avro + * + * DO NOT EDIT DIRECTLY + */ +package argo.avro; + +import org.apache.avro.specific.SpecificData; + +@SuppressWarnings("all") +@org.apache.avro.specific.AvroGenerated +public class MetricData extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + private static final long serialVersionUID = 3861438289744595870L; + public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"MetricData\",\"namespace\":\"argo.avro\",\"fields\":[{\"name\":\"timestamp\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"service\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"hostname\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"metric\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"status\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"monitoring_host\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}]},{\"name\":\"actual_data\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}],\"default\":null},{\"name\":\"summary\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}]},{\"name\":\"message\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}]},{\"name\":\"tags\",\"type\":[\"null\",{\"type\":\"map\",\"values\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}],\"avro.java.string\":\"String\"}]}]}"); + public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } + @Deprecated public java.lang.String timestamp; + @Deprecated public java.lang.String service; + @Deprecated public java.lang.String hostname; + @Deprecated public java.lang.String metric; + @Deprecated public java.lang.String status; + @Deprecated public java.lang.String monitoring_host; + @Deprecated public java.lang.String actual_data; + @Deprecated public java.lang.String summary; + @Deprecated public java.lang.String message; + @Deprecated public java.util.Map tags; + + /** + * Default constructor. Note that this does not initialize fields + * to their default values from the schema. If that is desired then + * one should use newBuilder(). + */ + public MetricData() {} + + /** + * All-args constructor. + * @param timestamp The new value for timestamp + * @param service The new value for service + * @param hostname The new value for hostname + * @param metric The new value for metric + * @param status The new value for status + * @param monitoring_host The new value for monitoring_host + * @param actual_data The new value for actual_data + * @param summary The new value for summary + * @param message The new value for message + * @param tags The new value for tags + */ + public MetricData(java.lang.String timestamp, java.lang.String service, java.lang.String hostname, java.lang.String metric, java.lang.String status, java.lang.String monitoring_host, java.lang.String actual_data, java.lang.String summary, java.lang.String message, java.util.Map tags) { + this.timestamp = timestamp; + this.service = service; + this.hostname = hostname; + this.metric = metric; + this.status = status; + this.monitoring_host = monitoring_host; + this.actual_data = actual_data; + this.summary = summary; + this.message = message; + this.tags = tags; + } + + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + // Used by DatumWriter. Applications should not call. + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return timestamp; + case 1: return service; + case 2: return hostname; + case 3: return metric; + case 4: return status; + case 5: return monitoring_host; + case 6: return actual_data; + case 7: return summary; + case 8: return message; + case 9: return tags; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + + // Used by DatumReader. Applications should not call. + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: timestamp = (java.lang.String)value$; break; + case 1: service = (java.lang.String)value$; break; + case 2: hostname = (java.lang.String)value$; break; + case 3: metric = (java.lang.String)value$; break; + case 4: status = (java.lang.String)value$; break; + case 5: monitoring_host = (java.lang.String)value$; break; + case 6: actual_data = (java.lang.String)value$; break; + case 7: summary = (java.lang.String)value$; break; + case 8: message = (java.lang.String)value$; break; + case 9: tags = (java.util.Map)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + + /** + * Gets the value of the 'timestamp' field. + * @return The value of the 'timestamp' field. + */ + public java.lang.String getTimestamp() { + return timestamp; + } + + /** + * Sets the value of the 'timestamp' field. + * @param value the value to set. + */ + public void setTimestamp(java.lang.String value) { + this.timestamp = value; + } + + /** + * Gets the value of the 'service' field. + * @return The value of the 'service' field. + */ + public java.lang.String getService() { + return service; + } + + /** + * Sets the value of the 'service' field. + * @param value the value to set. + */ + public void setService(java.lang.String value) { + this.service = value; + } + + /** + * Gets the value of the 'hostname' field. + * @return The value of the 'hostname' field. + */ + public java.lang.String getHostname() { + return hostname; + } + + /** + * Sets the value of the 'hostname' field. + * @param value the value to set. + */ + public void setHostname(java.lang.String value) { + this.hostname = value; + } + + /** + * Gets the value of the 'metric' field. + * @return The value of the 'metric' field. + */ + public java.lang.String getMetric() { + return metric; + } + + /** + * Sets the value of the 'metric' field. + * @param value the value to set. + */ + public void setMetric(java.lang.String value) { + this.metric = value; + } + + /** + * Gets the value of the 'status' field. + * @return The value of the 'status' field. + */ + public java.lang.String getStatus() { + return status; + } + + /** + * Sets the value of the 'status' field. + * @param value the value to set. + */ + public void setStatus(java.lang.String value) { + this.status = value; + } + + /** + * Gets the value of the 'monitoring_host' field. + * @return The value of the 'monitoring_host' field. + */ + public java.lang.String getMonitoringHost() { + return monitoring_host; + } + + /** + * Sets the value of the 'monitoring_host' field. + * @param value the value to set. + */ + public void setMonitoringHost(java.lang.String value) { + this.monitoring_host = value; + } + + /** + * Gets the value of the 'actual_data' field. + * @return The value of the 'actual_data' field. + */ + public java.lang.String getActualData() { + return actual_data; + } + + /** + * Sets the value of the 'actual_data' field. + * @param value the value to set. + */ + public void setActualData(java.lang.String value) { + this.actual_data = value; + } + + /** + * Gets the value of the 'summary' field. + * @return The value of the 'summary' field. + */ + public java.lang.String getSummary() { + return summary; + } + + /** + * Sets the value of the 'summary' field. + * @param value the value to set. + */ + public void setSummary(java.lang.String value) { + this.summary = value; + } + + /** + * Gets the value of the 'message' field. + * @return The value of the 'message' field. + */ + public java.lang.String getMessage() { + return message; + } + + /** + * Sets the value of the 'message' field. + * @param value the value to set. + */ + public void setMessage(java.lang.String value) { + this.message = value; + } + + /** + * Gets the value of the 'tags' field. + * @return The value of the 'tags' field. + */ + public java.util.Map getTags() { + return tags; + } + + /** + * Sets the value of the 'tags' field. + * @param value the value to set. + */ + public void setTags(java.util.Map value) { + this.tags = value; + } + + /** + * Creates a new MetricData RecordBuilder. + * @return A new MetricData RecordBuilder + */ + public static argo.avro.MetricData.Builder newBuilder() { + return new argo.avro.MetricData.Builder(); + } + + /** + * Creates a new MetricData RecordBuilder by copying an existing Builder. + * @param other The existing builder to copy. + * @return A new MetricData RecordBuilder + */ + public static argo.avro.MetricData.Builder newBuilder(argo.avro.MetricData.Builder other) { + return new argo.avro.MetricData.Builder(other); + } + + /** + * Creates a new MetricData RecordBuilder by copying an existing MetricData instance. + * @param other The existing instance to copy. + * @return A new MetricData RecordBuilder + */ + public static argo.avro.MetricData.Builder newBuilder(argo.avro.MetricData other) { + return new argo.avro.MetricData.Builder(other); + } + + /** + * RecordBuilder for MetricData instances. + */ + public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase + implements org.apache.avro.data.RecordBuilder { + + private java.lang.String timestamp; + private java.lang.String service; + private java.lang.String hostname; + private java.lang.String metric; + private java.lang.String status; + private java.lang.String monitoring_host; + private java.lang.String actual_data; + private java.lang.String summary; + private java.lang.String message; + private java.util.Map tags; + + /** Creates a new Builder */ + private Builder() { + super(SCHEMA$); + } + + /** + * Creates a Builder by copying an existing Builder. + * @param other The existing Builder to copy. + */ + private Builder(argo.avro.MetricData.Builder other) { + super(other); + if (isValidValue(fields()[0], other.timestamp)) { + this.timestamp = data().deepCopy(fields()[0].schema(), other.timestamp); + fieldSetFlags()[0] = true; + } + if (isValidValue(fields()[1], other.service)) { + this.service = data().deepCopy(fields()[1].schema(), other.service); + fieldSetFlags()[1] = true; + } + if (isValidValue(fields()[2], other.hostname)) { + this.hostname = data().deepCopy(fields()[2].schema(), other.hostname); + fieldSetFlags()[2] = true; + } + if (isValidValue(fields()[3], other.metric)) { + this.metric = data().deepCopy(fields()[3].schema(), other.metric); + fieldSetFlags()[3] = true; + } + if (isValidValue(fields()[4], other.status)) { + this.status = data().deepCopy(fields()[4].schema(), other.status); + fieldSetFlags()[4] = true; + } + if (isValidValue(fields()[5], other.monitoring_host)) { + this.monitoring_host = data().deepCopy(fields()[5].schema(), other.monitoring_host); + fieldSetFlags()[5] = true; + } + if (isValidValue(fields()[6], other.actual_data)) { + this.actual_data = data().deepCopy(fields()[6].schema(), other.actual_data); + fieldSetFlags()[6] = true; + } + if (isValidValue(fields()[7], other.summary)) { + this.summary = data().deepCopy(fields()[7].schema(), other.summary); + fieldSetFlags()[7] = true; + } + if (isValidValue(fields()[8], other.message)) { + this.message = data().deepCopy(fields()[8].schema(), other.message); + fieldSetFlags()[8] = true; + } + if (isValidValue(fields()[9], other.tags)) { + this.tags = data().deepCopy(fields()[9].schema(), other.tags); + fieldSetFlags()[9] = true; + } + } + + /** + * Creates a Builder by copying an existing MetricData instance + * @param other The existing instance to copy. + */ + private Builder(argo.avro.MetricData other) { + super(SCHEMA$); + if (isValidValue(fields()[0], other.timestamp)) { + this.timestamp = data().deepCopy(fields()[0].schema(), other.timestamp); + fieldSetFlags()[0] = true; + } + if (isValidValue(fields()[1], other.service)) { + this.service = data().deepCopy(fields()[1].schema(), other.service); + fieldSetFlags()[1] = true; + } + if (isValidValue(fields()[2], other.hostname)) { + this.hostname = data().deepCopy(fields()[2].schema(), other.hostname); + fieldSetFlags()[2] = true; + } + if (isValidValue(fields()[3], other.metric)) { + this.metric = data().deepCopy(fields()[3].schema(), other.metric); + fieldSetFlags()[3] = true; + } + if (isValidValue(fields()[4], other.status)) { + this.status = data().deepCopy(fields()[4].schema(), other.status); + fieldSetFlags()[4] = true; + } + if (isValidValue(fields()[5], other.monitoring_host)) { + this.monitoring_host = data().deepCopy(fields()[5].schema(), other.monitoring_host); + fieldSetFlags()[5] = true; + } + if (isValidValue(fields()[6], other.actual_data)) { + this.actual_data = data().deepCopy(fields()[6].schema(), other.actual_data); + fieldSetFlags()[6] = true; + } + if (isValidValue(fields()[7], other.summary)) { + this.summary = data().deepCopy(fields()[7].schema(), other.summary); + fieldSetFlags()[7] = true; + } + if (isValidValue(fields()[8], other.message)) { + this.message = data().deepCopy(fields()[8].schema(), other.message); + fieldSetFlags()[8] = true; + } + if (isValidValue(fields()[9], other.tags)) { + this.tags = data().deepCopy(fields()[9].schema(), other.tags); + fieldSetFlags()[9] = true; + } + } + + /** + * Gets the value of the 'timestamp' field. + * @return The value. + */ + public java.lang.String getTimestamp() { + return timestamp; + } + + /** + * Sets the value of the 'timestamp' field. + * @param value The value of 'timestamp'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setTimestamp(java.lang.String value) { + validate(fields()[0], value); + this.timestamp = value; + fieldSetFlags()[0] = true; + return this; + } + + /** + * Checks whether the 'timestamp' field has been set. + * @return True if the 'timestamp' field has been set, false otherwise. + */ + public boolean hasTimestamp() { + return fieldSetFlags()[0]; + } + + + /** + * Clears the value of the 'timestamp' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearTimestamp() { + timestamp = null; + fieldSetFlags()[0] = false; + return this; + } + + /** + * Gets the value of the 'service' field. + * @return The value. + */ + public java.lang.String getService() { + return service; + } + + /** + * Sets the value of the 'service' field. + * @param value The value of 'service'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setService(java.lang.String value) { + validate(fields()[1], value); + this.service = value; + fieldSetFlags()[1] = true; + return this; + } + + /** + * Checks whether the 'service' field has been set. + * @return True if the 'service' field has been set, false otherwise. + */ + public boolean hasService() { + return fieldSetFlags()[1]; + } + + + /** + * Clears the value of the 'service' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearService() { + service = null; + fieldSetFlags()[1] = false; + return this; + } + + /** + * Gets the value of the 'hostname' field. + * @return The value. + */ + public java.lang.String getHostname() { + return hostname; + } + + /** + * Sets the value of the 'hostname' field. + * @param value The value of 'hostname'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setHostname(java.lang.String value) { + validate(fields()[2], value); + this.hostname = value; + fieldSetFlags()[2] = true; + return this; + } + + /** + * Checks whether the 'hostname' field has been set. + * @return True if the 'hostname' field has been set, false otherwise. + */ + public boolean hasHostname() { + return fieldSetFlags()[2]; + } + + + /** + * Clears the value of the 'hostname' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearHostname() { + hostname = null; + fieldSetFlags()[2] = false; + return this; + } + + /** + * Gets the value of the 'metric' field. + * @return The value. + */ + public java.lang.String getMetric() { + return metric; + } + + /** + * Sets the value of the 'metric' field. + * @param value The value of 'metric'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setMetric(java.lang.String value) { + validate(fields()[3], value); + this.metric = value; + fieldSetFlags()[3] = true; + return this; + } + + /** + * Checks whether the 'metric' field has been set. + * @return True if the 'metric' field has been set, false otherwise. + */ + public boolean hasMetric() { + return fieldSetFlags()[3]; + } + + + /** + * Clears the value of the 'metric' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearMetric() { + metric = null; + fieldSetFlags()[3] = false; + return this; + } + + /** + * Gets the value of the 'status' field. + * @return The value. + */ + public java.lang.String getStatus() { + return status; + } + + /** + * Sets the value of the 'status' field. + * @param value The value of 'status'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setStatus(java.lang.String value) { + validate(fields()[4], value); + this.status = value; + fieldSetFlags()[4] = true; + return this; + } + + /** + * Checks whether the 'status' field has been set. + * @return True if the 'status' field has been set, false otherwise. + */ + public boolean hasStatus() { + return fieldSetFlags()[4]; + } + + + /** + * Clears the value of the 'status' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearStatus() { + status = null; + fieldSetFlags()[4] = false; + return this; + } + + /** + * Gets the value of the 'monitoring_host' field. + * @return The value. + */ + public java.lang.String getMonitoringHost() { + return monitoring_host; + } + + /** + * Sets the value of the 'monitoring_host' field. + * @param value The value of 'monitoring_host'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setMonitoringHost(java.lang.String value) { + validate(fields()[5], value); + this.monitoring_host = value; + fieldSetFlags()[5] = true; + return this; + } + + /** + * Checks whether the 'monitoring_host' field has been set. + * @return True if the 'monitoring_host' field has been set, false otherwise. + */ + public boolean hasMonitoringHost() { + return fieldSetFlags()[5]; + } + + + /** + * Clears the value of the 'monitoring_host' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearMonitoringHost() { + monitoring_host = null; + fieldSetFlags()[5] = false; + return this; + } + + /** + * Gets the value of the 'actual_data' field. + * @return The value. + */ + public java.lang.String getActualData() { + return actual_data; + } + + /** + * Sets the value of the 'actual_data' field. + * @param value The value of 'actual_data'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setActualData(java.lang.String value) { + validate(fields()[6], value); + this.actual_data = value; + fieldSetFlags()[6] = true; + return this; + } + + /** + * Checks whether the 'actual_data' field has been set. + * @return True if the 'actual_data' field has been set, false otherwise. + */ + public boolean hasActualData() { + return fieldSetFlags()[6]; + } + + + /** + * Clears the value of the 'actual_data' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearActualData() { + actual_data = null; + fieldSetFlags()[6] = false; + return this; + } + + /** + * Gets the value of the 'summary' field. + * @return The value. + */ + public java.lang.String getSummary() { + return summary; + } + + /** + * Sets the value of the 'summary' field. + * @param value The value of 'summary'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setSummary(java.lang.String value) { + validate(fields()[7], value); + this.summary = value; + fieldSetFlags()[7] = true; + return this; + } + + /** + * Checks whether the 'summary' field has been set. + * @return True if the 'summary' field has been set, false otherwise. + */ + public boolean hasSummary() { + return fieldSetFlags()[7]; + } + + + /** + * Clears the value of the 'summary' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearSummary() { + summary = null; + fieldSetFlags()[7] = false; + return this; + } + + /** + * Gets the value of the 'message' field. + * @return The value. + */ + public java.lang.String getMessage() { + return message; + } + + /** + * Sets the value of the 'message' field. + * @param value The value of 'message'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setMessage(java.lang.String value) { + validate(fields()[8], value); + this.message = value; + fieldSetFlags()[8] = true; + return this; + } + + /** + * Checks whether the 'message' field has been set. + * @return True if the 'message' field has been set, false otherwise. + */ + public boolean hasMessage() { + return fieldSetFlags()[8]; + } + + + /** + * Clears the value of the 'message' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearMessage() { + message = null; + fieldSetFlags()[8] = false; + return this; + } + + /** + * Gets the value of the 'tags' field. + * @return The value. + */ + public java.util.Map getTags() { + return tags; + } + + /** + * Sets the value of the 'tags' field. + * @param value The value of 'tags'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setTags(java.util.Map value) { + validate(fields()[9], value); + this.tags = value; + fieldSetFlags()[9] = true; + return this; + } + + /** + * Checks whether the 'tags' field has been set. + * @return True if the 'tags' field has been set, false otherwise. + */ + public boolean hasTags() { + return fieldSetFlags()[9]; + } + + + /** + * Clears the value of the 'tags' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearTags() { + tags = null; + fieldSetFlags()[9] = false; + return this; + } + + @Override + public MetricData build() { + try { + MetricData record = new MetricData(); + record.timestamp = fieldSetFlags()[0] ? this.timestamp : (java.lang.String) defaultValue(fields()[0]); + record.service = fieldSetFlags()[1] ? this.service : (java.lang.String) defaultValue(fields()[1]); + record.hostname = fieldSetFlags()[2] ? this.hostname : (java.lang.String) defaultValue(fields()[2]); + record.metric = fieldSetFlags()[3] ? this.metric : (java.lang.String) defaultValue(fields()[3]); + record.status = fieldSetFlags()[4] ? this.status : (java.lang.String) defaultValue(fields()[4]); + record.monitoring_host = fieldSetFlags()[5] ? this.monitoring_host : (java.lang.String) defaultValue(fields()[5]); + record.actual_data = fieldSetFlags()[6] ? this.actual_data : (java.lang.String) defaultValue(fields()[6]); + record.summary = fieldSetFlags()[7] ? this.summary : (java.lang.String) defaultValue(fields()[7]); + record.message = fieldSetFlags()[8] ? this.message : (java.lang.String) defaultValue(fields()[8]); + record.tags = fieldSetFlags()[9] ? this.tags : (java.util.Map) defaultValue(fields()[9]); + return record; + } catch (Exception e) { + throw new org.apache.avro.AvroRuntimeException(e); + } + } + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/main/java/argo/avro/MetricProfile.java b/flink_jobs/old-models/batch_ar/src/main/java/argo/avro/MetricProfile.java new file mode 100644 index 00000000..1fe15e09 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/argo/avro/MetricProfile.java @@ -0,0 +1,286 @@ +/** + * Autogenerated by Avro + * + * DO NOT EDIT DIRECTLY + */ +package argo.avro; +@SuppressWarnings("all") +@org.apache.avro.specific.AvroGenerated +public class MetricProfile extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"MetricProfile\",\"namespace\":\"argo.avro\",\"fields\":[{\"name\":\"profile\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"service\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"metric\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"tags\",\"type\":[\"null\",{\"type\":\"map\",\"values\":{\"type\":\"string\",\"avro.java.string\":\"String\"},\"avro.java.string\":\"String\"}]}]}"); + public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } + @Deprecated public java.lang.String profile; + @Deprecated public java.lang.String service; + @Deprecated public java.lang.String metric; + @Deprecated public java.util.Map tags; + + /** + * Default constructor. + */ + public MetricProfile() {} + + /** + * All-args constructor. + */ + public MetricProfile(java.lang.String profile, java.lang.String service, java.lang.String metric, java.util.Map tags) { + this.profile = profile; + this.service = service; + this.metric = metric; + this.tags = tags; + } + + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + // Used by DatumWriter. Applications should not call. + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return profile; + case 1: return service; + case 2: return metric; + case 3: return tags; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + // Used by DatumReader. Applications should not call. + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: profile = (java.lang.String)value$; break; + case 1: service = (java.lang.String)value$; break; + case 2: metric = (java.lang.String)value$; break; + case 3: tags = (java.util.Map)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + + /** + * Gets the value of the 'profile' field. + */ + public java.lang.String getProfile() { + return profile; + } + + /** + * Sets the value of the 'profile' field. + * @param value the value to set. + */ + public void setProfile(java.lang.String value) { + this.profile = value; + } + + /** + * Gets the value of the 'service' field. + */ + public java.lang.String getService() { + return service; + } + + /** + * Sets the value of the 'service' field. + * @param value the value to set. + */ + public void setService(java.lang.String value) { + this.service = value; + } + + /** + * Gets the value of the 'metric' field. + */ + public java.lang.String getMetric() { + return metric; + } + + /** + * Sets the value of the 'metric' field. + * @param value the value to set. + */ + public void setMetric(java.lang.String value) { + this.metric = value; + } + + /** + * Gets the value of the 'tags' field. + */ + public java.util.Map getTags() { + return tags; + } + + /** + * Sets the value of the 'tags' field. + * @param value the value to set. + */ + public void setTags(java.util.Map value) { + this.tags = value; + } + + /** Creates a new MetricProfile RecordBuilder */ + public static argo.avro.MetricProfile.Builder newBuilder() { + return new argo.avro.MetricProfile.Builder(); + } + + /** Creates a new MetricProfile RecordBuilder by copying an existing Builder */ + public static argo.avro.MetricProfile.Builder newBuilder(argo.avro.MetricProfile.Builder other) { + return new argo.avro.MetricProfile.Builder(other); + } + + /** Creates a new MetricProfile RecordBuilder by copying an existing MetricProfile instance */ + public static argo.avro.MetricProfile.Builder newBuilder(argo.avro.MetricProfile other) { + return new argo.avro.MetricProfile.Builder(other); + } + + /** + * RecordBuilder for MetricProfile instances. + */ + public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase + implements org.apache.avro.data.RecordBuilder { + + private java.lang.String profile; + private java.lang.String service; + private java.lang.String metric; + private java.util.Map tags; + + /** Creates a new Builder */ + private Builder() { + super(argo.avro.MetricProfile.SCHEMA$); + } + + /** Creates a Builder by copying an existing Builder */ + private Builder(argo.avro.MetricProfile.Builder other) { + super(other); + } + + /** Creates a Builder by copying an existing MetricProfile instance */ + private Builder(argo.avro.MetricProfile other) { + super(argo.avro.MetricProfile.SCHEMA$); + if (isValidValue(fields()[0], other.profile)) { + this.profile = data().deepCopy(fields()[0].schema(), other.profile); + fieldSetFlags()[0] = true; + } + if (isValidValue(fields()[1], other.service)) { + this.service = data().deepCopy(fields()[1].schema(), other.service); + fieldSetFlags()[1] = true; + } + if (isValidValue(fields()[2], other.metric)) { + this.metric = data().deepCopy(fields()[2].schema(), other.metric); + fieldSetFlags()[2] = true; + } + if (isValidValue(fields()[3], other.tags)) { + this.tags = data().deepCopy(fields()[3].schema(), other.tags); + fieldSetFlags()[3] = true; + } + } + + /** Gets the value of the 'profile' field */ + public java.lang.String getProfile() { + return profile; + } + + /** Sets the value of the 'profile' field */ + public argo.avro.MetricProfile.Builder setProfile(java.lang.String value) { + validate(fields()[0], value); + this.profile = value; + fieldSetFlags()[0] = true; + return this; + } + + /** Checks whether the 'profile' field has been set */ + public boolean hasProfile() { + return fieldSetFlags()[0]; + } + + /** Clears the value of the 'profile' field */ + public argo.avro.MetricProfile.Builder clearProfile() { + profile = null; + fieldSetFlags()[0] = false; + return this; + } + + /** Gets the value of the 'service' field */ + public java.lang.String getService() { + return service; + } + + /** Sets the value of the 'service' field */ + public argo.avro.MetricProfile.Builder setService(java.lang.String value) { + validate(fields()[1], value); + this.service = value; + fieldSetFlags()[1] = true; + return this; + } + + /** Checks whether the 'service' field has been set */ + public boolean hasService() { + return fieldSetFlags()[1]; + } + + /** Clears the value of the 'service' field */ + public argo.avro.MetricProfile.Builder clearService() { + service = null; + fieldSetFlags()[1] = false; + return this; + } + + /** Gets the value of the 'metric' field */ + public java.lang.String getMetric() { + return metric; + } + + /** Sets the value of the 'metric' field */ + public argo.avro.MetricProfile.Builder setMetric(java.lang.String value) { + validate(fields()[2], value); + this.metric = value; + fieldSetFlags()[2] = true; + return this; + } + + /** Checks whether the 'metric' field has been set */ + public boolean hasMetric() { + return fieldSetFlags()[2]; + } + + /** Clears the value of the 'metric' field */ + public argo.avro.MetricProfile.Builder clearMetric() { + metric = null; + fieldSetFlags()[2] = false; + return this; + } + + /** Gets the value of the 'tags' field */ + public java.util.Map getTags() { + return tags; + } + + /** Sets the value of the 'tags' field */ + public argo.avro.MetricProfile.Builder setTags(java.util.Map value) { + validate(fields()[3], value); + this.tags = value; + fieldSetFlags()[3] = true; + return this; + } + + /** Checks whether the 'tags' field has been set */ + public boolean hasTags() { + return fieldSetFlags()[3]; + } + + /** Clears the value of the 'tags' field */ + public argo.avro.MetricProfile.Builder clearTags() { + tags = null; + fieldSetFlags()[3] = false; + return this; + } + + @Override + public MetricProfile build() { + try { + MetricProfile record = new MetricProfile(); + record.profile = fieldSetFlags()[0] ? this.profile : (java.lang.String) defaultValue(fields()[0]); + record.service = fieldSetFlags()[1] ? this.service : (java.lang.String) defaultValue(fields()[1]); + record.metric = fieldSetFlags()[2] ? this.metric : (java.lang.String) defaultValue(fields()[2]); + record.tags = fieldSetFlags()[3] ? this.tags : (java.util.Map) defaultValue(fields()[3]); + return record; + } catch (Exception e) { + throw new org.apache.avro.AvroRuntimeException(e); + } + } + } +} diff --git a/flink_jobs/old-models/batch_ar/src/main/java/argo/avro/Weight.java b/flink_jobs/old-models/batch_ar/src/main/java/argo/avro/Weight.java new file mode 100644 index 00000000..0238d7cf --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/argo/avro/Weight.java @@ -0,0 +1,236 @@ +/** + * Autogenerated by Avro + * + * DO NOT EDIT DIRECTLY + */ +package argo.avro; +@SuppressWarnings("all") +@org.apache.avro.specific.AvroGenerated +public class Weight extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"Weight\",\"namespace\":\"argo.avro\",\"fields\":[{\"name\":\"type\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"site\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"weight\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}}]}"); + public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } + @Deprecated public java.lang.String type; + @Deprecated public java.lang.String site; + @Deprecated public java.lang.String weight; + + /** + * Default constructor. + */ + public Weight() {} + + /** + * All-args constructor. + */ + public Weight(java.lang.String type, java.lang.String site, java.lang.String weight) { + this.type = type; + this.site = site; + this.weight = weight; + } + + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + // Used by DatumWriter. Applications should not call. + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return type; + case 1: return site; + case 2: return weight; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + // Used by DatumReader. Applications should not call. + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: type = (java.lang.String)value$; break; + case 1: site = (java.lang.String)value$; break; + case 2: weight = (java.lang.String)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + + /** + * Gets the value of the 'type' field. + */ + public java.lang.String getType() { + return type; + } + + /** + * Sets the value of the 'type' field. + * @param value the value to set. + */ + public void setType(java.lang.String value) { + this.type = value; + } + + /** + * Gets the value of the 'site' field. + */ + public java.lang.String getSite() { + return site; + } + + /** + * Sets the value of the 'site' field. + * @param value the value to set. + */ + public void setSite(java.lang.String value) { + this.site = value; + } + + /** + * Gets the value of the 'weight' field. + */ + public java.lang.String getWeight() { + return weight; + } + + /** + * Sets the value of the 'weight' field. + * @param value the value to set. + */ + public void setWeight(java.lang.String value) { + this.weight = value; + } + + /** Creates a new Weight RecordBuilder */ + public static argo.avro.Weight.Builder newBuilder() { + return new argo.avro.Weight.Builder(); + } + + /** Creates a new Weight RecordBuilder by copying an existing Builder */ + public static argo.avro.Weight.Builder newBuilder(argo.avro.Weight.Builder other) { + return new argo.avro.Weight.Builder(other); + } + + /** Creates a new Weight RecordBuilder by copying an existing Weight instance */ + public static argo.avro.Weight.Builder newBuilder(argo.avro.Weight other) { + return new argo.avro.Weight.Builder(other); + } + + /** + * RecordBuilder for Weight instances. + */ + public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase + implements org.apache.avro.data.RecordBuilder { + + private java.lang.String type; + private java.lang.String site; + private java.lang.String weight; + + /** Creates a new Builder */ + private Builder() { + super(argo.avro.Weight.SCHEMA$); + } + + /** Creates a Builder by copying an existing Builder */ + private Builder(argo.avro.Weight.Builder other) { + super(other); + } + + /** Creates a Builder by copying an existing Weight instance */ + private Builder(argo.avro.Weight other) { + super(argo.avro.Weight.SCHEMA$); + if (isValidValue(fields()[0], other.type)) { + this.type = data().deepCopy(fields()[0].schema(), other.type); + fieldSetFlags()[0] = true; + } + if (isValidValue(fields()[1], other.site)) { + this.site = data().deepCopy(fields()[1].schema(), other.site); + fieldSetFlags()[1] = true; + } + if (isValidValue(fields()[2], other.weight)) { + this.weight = data().deepCopy(fields()[2].schema(), other.weight); + fieldSetFlags()[2] = true; + } + } + + /** Gets the value of the 'type' field */ + public java.lang.String getType() { + return type; + } + + /** Sets the value of the 'type' field */ + public argo.avro.Weight.Builder setType(java.lang.String value) { + validate(fields()[0], value); + this.type = value; + fieldSetFlags()[0] = true; + return this; + } + + /** Checks whether the 'type' field has been set */ + public boolean hasType() { + return fieldSetFlags()[0]; + } + + /** Clears the value of the 'type' field */ + public argo.avro.Weight.Builder clearType() { + type = null; + fieldSetFlags()[0] = false; + return this; + } + + /** Gets the value of the 'site' field */ + public java.lang.String getSite() { + return site; + } + + /** Sets the value of the 'site' field */ + public argo.avro.Weight.Builder setSite(java.lang.String value) { + validate(fields()[1], value); + this.site = value; + fieldSetFlags()[1] = true; + return this; + } + + /** Checks whether the 'site' field has been set */ + public boolean hasSite() { + return fieldSetFlags()[1]; + } + + /** Clears the value of the 'site' field */ + public argo.avro.Weight.Builder clearSite() { + site = null; + fieldSetFlags()[1] = false; + return this; + } + + /** Gets the value of the 'weight' field */ + public java.lang.String getWeight() { + return weight; + } + + /** Sets the value of the 'weight' field */ + public argo.avro.Weight.Builder setWeight(java.lang.String value) { + validate(fields()[2], value); + this.weight = value; + fieldSetFlags()[2] = true; + return this; + } + + /** Checks whether the 'weight' field has been set */ + public boolean hasWeight() { + return fieldSetFlags()[2]; + } + + /** Clears the value of the 'weight' field */ + public argo.avro.Weight.Builder clearWeight() { + weight = null; + fieldSetFlags()[2] = false; + return this; + } + + @Override + public Weight build() { + try { + Weight record = new Weight(); + record.type = fieldSetFlags()[0] ? this.type : (java.lang.String) defaultValue(fields()[0]); + record.site = fieldSetFlags()[1] ? this.site : (java.lang.String) defaultValue(fields()[1]); + record.weight = fieldSetFlags()[2] ? this.weight : (java.lang.String) defaultValue(fields()[2]); + return record; + } catch (Exception e) { + throw new org.apache.avro.AvroRuntimeException(e); + } + } + } +} diff --git a/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/ArgoArBatch.java b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/ArgoArBatch.java new file mode 100644 index 00000000..053c06d7 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/ArgoArBatch.java @@ -0,0 +1,223 @@ +package argo.batch; + +import org.slf4j.LoggerFactory; + +import argo.avro.Downtime; +import argo.avro.GroupEndpoint; +import argo.avro.GroupGroup; +import argo.avro.MetricData; +import argo.avro.MetricProfile; +import argo.avro.Weight; +import ops.ConfigManager; + +import org.slf4j.Logger; +import org.apache.flink.api.common.operators.Order; +import org.apache.flink.api.java.DataSet; +import org.apache.flink.api.java.ExecutionEnvironment; +import org.apache.flink.api.java.io.AvroInputFormat; + +import org.apache.flink.api.java.operators.DataSource; +import org.apache.flink.api.java.utils.ParameterTool; +import org.apache.flink.core.fs.Path; + + +/** + * Represents an ARGO A/R Batch Job in flink + *

+ * The specific batch job calculates the availability and reliability results based on the input metric data + * and sync files + *

+ * Required arguments: + *
    + *
  • --pdata : file location of previous day's metric data (local or + * hdfs)
  • + *
  • --mdata : file location of target day's metric data (local or hdfs)
  • + *
  • --egp : file location of endpoint group topology file (local or + * hdfs)
  • + *
  • --ggp : file location of group of groups topology file (local or + * hdfs)
  • + *
  • --mps : file location of metric profile (local or hdfs)
  • + *
  • --aps : file location of aggregations profile (local or hdfs)
  • + *
  • --ops : file location of operations profile (local or hdfs)
  • + *
  • --rec : file location of recomputations file (local or hdfs)
  • + *
  • --weights : file location of weights file (local or hdfs)
  • + *
  • --downtimes : file location of downtimes file (local or hdfs)
  • + *
  • --conf : file location of report configuration json file (local or + * hdfs)
  • + *
  • --run.date : target date in DD-MM-YYYY format
  • + *
  • --mongo.uri : mongo uri for outputting the results
  • + *
  • --mongo.method : mongo method for storing the results
  • + *
      + */ +public class ArgoArBatch { + // setup logger + static Logger LOG = LoggerFactory.getLogger(ArgoArBatch.class); + + public static void main(String[] args) throws Exception { + + final ParameterTool params = ParameterTool.fromArgs(args); + + // set up the execution environment + final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); + + // make parameters available in the web interface + env.getConfig().setGlobalJobParameters(params); + env.setParallelism(1); + // sync data for input + + Path mps = new Path(params.getRequired("mps")); + Path egp = new Path(params.getRequired("egp")); + Path ggp = new Path(params.getRequired("ggp")); + Path down = new Path(params.getRequired("downtimes")); + Path weight = new Path(params.getRequired("weights")); + + + DataSource confDS = env.readTextFile(params.getRequired("conf")); + DataSource opsDS = env.readTextFile(params.getRequired("ops")); + DataSource aprDS = env.readTextFile(params.getRequired("apr")); + DataSource recDS = env.readTextFile(params.getRequired("rec")); + + // begin with empty threshold datasource + DataSource thrDS = env.fromElements(""); + // if threshold filepath has been defined in cli parameters + if (params.has("thr")){ + // read file and update threshold datasource + thrDS = env.readTextFile(params.getRequired("thr")); + } + + + + ConfigManager confMgr = new ConfigManager(); + confMgr.loadJsonString(confDS.collect()); + + // sync data input: metric profile in avro format + AvroInputFormat mpsAvro = new AvroInputFormat(mps, MetricProfile.class); + DataSet mpsDS = env.createInput(mpsAvro); + + // sync data input: endpoint group topology data in avro format + AvroInputFormat egpAvro = new AvroInputFormat(egp, GroupEndpoint.class); + DataSet egpDS = env.createInput(egpAvro); + + // sync data input: group of group topology data in avro format + AvroInputFormat ggpAvro = new AvroInputFormat(ggp, GroupGroup.class); + DataSet ggpDS = env.createInput(ggpAvro); + + // sync data input: downtime data in avro format + AvroInputFormat downAvro = new AvroInputFormat(down, Downtime.class); + DataSet downDS = env.createInput(downAvro); + + // sync data input: weight data in avro format + AvroInputFormat weightAvro = new AvroInputFormat(weight, Weight.class); + DataSet weightDS = env.createInput(weightAvro); + + // todays metric data + Path in = new Path(params.getRequired("mdata")); + AvroInputFormat mdataAvro = new AvroInputFormat(in, MetricData.class); + DataSet mdataDS = env.createInput(mdataAvro); + + // previous metric data + Path pin = new Path(params.getRequired("pdata")); + AvroInputFormat pdataAvro = new AvroInputFormat(pin, MetricData.class); + DataSet pdataDS = env.createInput(pdataAvro); + + // Find the latest day + DataSet pdataMin = pdataDS.groupBy("service", "hostname", "metric") + .sortGroup("timestamp", Order.DESCENDING).first(1); + + DataSet mdataPrevTotalDS = mdataDS.union(pdataMin); + + // Generate Full Missing dataset for the given topology + DataSet fillMissDS = mdataPrevTotalDS.reduceGroup(new FillMissing(params)) + .withBroadcastSet(mpsDS, "mps").withBroadcastSet(egpDS, "egp").withBroadcastSet(ggpDS, "ggp") + .withBroadcastSet(opsDS, "ops").withBroadcastSet(aprDS, "aps").withBroadcastSet(confDS, "conf"); + + // Discard unused data and attach endpoint group as information + DataSet mdataTrimDS = mdataPrevTotalDS.flatMap(new PickEndpoints(params)) + .withBroadcastSet(mpsDS, "mps").withBroadcastSet(egpDS, "egp").withBroadcastSet(ggpDS, "ggp") + .withBroadcastSet(aprDS, "apr").withBroadcastSet(recDS, "rec").withBroadcastSet(confDS, "conf") + .withBroadcastSet(opsDS, "ops").withBroadcastSet(thrDS, "thr"); + + // Combine prev and todays metric data with the generated missing metric + // data + DataSet mdataTotalDS = mdataTrimDS.union(fillMissDS); + + // Create a dataset of metric timelines + DataSet metricTimelinesDS = mdataTotalDS.groupBy("group","service", "hostname", "metric") + .sortGroup("timestamp", Order.ASCENDING).reduceGroup(new CreateMetricTimeline(params)) + .withBroadcastSet(mpsDS, "mps").withBroadcastSet(egpDS, "egp").withBroadcastSet(ggpDS, "ggp") + .withBroadcastSet(opsDS, "ops").withBroadcastSet(aprDS, "aps").withBroadcastSet(confDS, "conf"); + + // Create a dataset of endpoint timelines + DataSet endpointTimelinesDS = metricTimelinesDS.groupBy("group", "service", "hostname") + .sortGroup("metric", Order.ASCENDING).reduceGroup(new CreateEndpointTimeline(params)) + .withBroadcastSet(mpsDS, "mps").withBroadcastSet(egpDS, "egp").withBroadcastSet(ggpDS, "ggp") + .withBroadcastSet(opsDS, "ops").withBroadcastSet(aprDS, "aps").withBroadcastSet(downDS, "down"); + + // Create a dataset of service timelines + DataSet serviceTimelinesDS = endpointTimelinesDS.groupBy("group", "service") + .sortGroup("hostname", Order.ASCENDING).reduceGroup(new CreateServiceTimeline(params)) + .withBroadcastSet(mpsDS, "mps").withBroadcastSet(egpDS, "egp").withBroadcastSet(ggpDS, "ggp") + .withBroadcastSet(opsDS, "ops").withBroadcastSet(aprDS, "aps"); + + // Create a dataset of endpoint group timelines + DataSet groupTimelinesDS = serviceTimelinesDS.groupBy("group") + .sortGroup("service", Order.ASCENDING).reduceGroup(new CreateEndpointGroupTimeline(params)) + .withBroadcastSet(mpsDS, "mps").withBroadcastSet(egpDS, "egp").withBroadcastSet(ggpDS, "ggp") + .withBroadcastSet(opsDS, "ops").withBroadcastSet(aprDS, "aps").withBroadcastSet(recDS, "rec"); + + // Calculate endpoint ar from endpoint timelines + DataSet endpointResultDS = endpointTimelinesDS.flatMap(new CalcEndpointAR(params)) + .withBroadcastSet(mpsDS, "mps").withBroadcastSet(egpDS, "egp").withBroadcastSet(ggpDS, "ggp") + .withBroadcastSet(aprDS, "apr").withBroadcastSet(recDS, "rec").withBroadcastSet(opsDS, "ops") + .withBroadcastSet(confDS, "conf"); + + // Calculate service ar from service timelines + DataSet serviceResultDS = serviceTimelinesDS.flatMap(new CalcServiceAR(params)) + .withBroadcastSet(mpsDS, "mps").withBroadcastSet(egpDS, "egp").withBroadcastSet(ggpDS, "ggp") + .withBroadcastSet(aprDS, "apr").withBroadcastSet(recDS, "rec").withBroadcastSet(opsDS, "ops") + .withBroadcastSet(confDS, "conf"); + + // Calculate endpoint group ar from endpoint group timelines + DataSet groupResultDS = groupTimelinesDS.flatMap(new CalcEndpointGroupAR(params)) + .withBroadcastSet(mpsDS, "mps").withBroadcastSet(egpDS, "egp").withBroadcastSet(ggpDS, "ggp") + .withBroadcastSet(aprDS, "apr").withBroadcastSet(recDS, "rec").withBroadcastSet(opsDS, "ops") + .withBroadcastSet(weightDS, "weight").withBroadcastSet(confDS, "conf"); + + + String dbURI = params.getRequired("mongo.uri"); + String dbMethod = params.getRequired("mongo.method"); + + // Initialize endpoint ar mongo output + MongoEndpointArOutput endpointMongoOut = new MongoEndpointArOutput(dbURI,"endpoint_ar",dbMethod); + // Initialize service ar mongo output + MongoServiceArOutput serviceMongoOut = new MongoServiceArOutput(dbURI,"service_ar",dbMethod); + // Initialize endpoint group ar mongo output + MongoEndGroupArOutput egroupMongoOut = new MongoEndGroupArOutput(dbURI,"endpoint_group_ar",dbMethod); + + + endpointResultDS.output(endpointMongoOut); + serviceResultDS.output(serviceMongoOut); + groupResultDS.output(egroupMongoOut); + + + String runDate = params.getRequired("run.date"); + + // Create a job title message to discern job in flink dashboard/cli + StringBuilder jobTitleSB = new StringBuilder(); + jobTitleSB.append("Ar Batch job for tenant:"); + jobTitleSB.append(confMgr.getTenant()); + jobTitleSB.append("on day:"); + jobTitleSB.append(runDate); + jobTitleSB.append("using report:"); + jobTitleSB.append(confMgr.getReport()); + + env.execute(jobTitleSB.toString()); + + + } + + + + + +} \ No newline at end of file diff --git a/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/CalcEndpointAR.java b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/CalcEndpointAR.java new file mode 100644 index 00000000..26622c0b --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/CalcEndpointAR.java @@ -0,0 +1,145 @@ +package argo.batch; + +import java.io.IOException; +import java.text.ParseException; + +import java.util.List; + +import org.apache.flink.api.common.functions.RichFlatMapFunction; + +import org.apache.flink.api.java.utils.ParameterTool; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.util.Collector; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import argo.avro.GroupEndpoint; +import argo.avro.GroupGroup; +import ops.ConfigManager; +import ops.DIntegrator; +import ops.OpsManager; +import sync.AggregationProfileManager; +import sync.EndpointGroupManager; +import sync.GroupGroupManager; + +import sync.RecomputationManager; + +/** + * Accepts a service monitor timeline entry and produces a ServiceAR object by + * calculating a/r over timeline data + */ +public class CalcEndpointAR extends RichFlatMapFunction { + + private static final long serialVersionUID = 1L; + + final ParameterTool params; + + public CalcEndpointAR(ParameterTool params) { + this.params = params; + } + + static Logger LOG = LoggerFactory.getLogger(ArgoArBatch.class); + + + private List egp; + private List ggp; + private List apr; + private List rec; + private List ops; + private List conf; + + private EndpointGroupManager egpMgr; + private GroupGroupManager ggpMgr; + private AggregationProfileManager aprMgr; + private RecomputationManager recMgr; + private OpsManager opsMgr; + private ConfigManager confMgr; + + + private String runDate; + private String report; + + /** + * Initialization method of the RichFlatMapFunction operator + *

      + * This runs at the initialization of the operator and receives a + * configuration parameter object. It initializes all required structures + * used by this operator such as profile managers, operations managers, + * topology managers etc. + * + * @param parameters + * A flink Configuration object + */ + @Override + public void open(Configuration parameters) throws IOException, ParseException { + // Get data from broadcast variable + + this.egp = getRuntimeContext().getBroadcastVariable("egp"); + this.ggp = getRuntimeContext().getBroadcastVariable("ggp"); + this.apr = getRuntimeContext().getBroadcastVariable("apr"); + this.rec = getRuntimeContext().getBroadcastVariable("rec"); + this.ops = getRuntimeContext().getBroadcastVariable("ops"); + this.conf = getRuntimeContext().getBroadcastVariable("conf"); + + + // Initialize endpoint group manager + this.egpMgr = new EndpointGroupManager(); + this.egpMgr.loadFromList(egp); + + this.ggpMgr = new GroupGroupManager(); + this.ggpMgr.loadFromList(ggp); + + // Initialize Aggregation Profile Manager ; + this.aprMgr = new AggregationProfileManager(); + this.aprMgr.loadJsonString(apr); + + // Initialize Recomputations Manager; + this.recMgr = new RecomputationManager(); + this.recMgr.loadJsonString(rec); + + // Initialize Operations Manager; + this.opsMgr = new OpsManager(); + this.opsMgr.loadJsonString(ops); + + // Initialize Config Manager + this.confMgr = new ConfigManager(); + this.confMgr.loadJsonString(conf); + + + + // Initialize rundate + this.runDate = params.getRequired("run.date"); + + // Initialize report id + this.report = this.confMgr.id; + } + + /** + * The main operator business logic of calculating a/r results from timeline + * data + *

      + * Uses a DIntegrator to scan the timeline and calculate availability and + * reliability scores + * + * @param in + * A MonTimeline Object representing a service timeline + * @param out + * An EndpointAR Object containing a/r results + */ + @Override + public void flatMap(MonTimeline mtl, Collector out) throws Exception { + + + + DIntegrator dAR = new DIntegrator(); + dAR.calculateAR(mtl.getTimeline(),this.opsMgr); + + int runDateInt = Integer.parseInt(this.runDate.replace("-", "")); + + EndpointAR result = new EndpointAR(runDateInt,this.report,mtl.getHostname(),mtl.getService(),mtl.getGroup(),dAR.availability,dAR.reliability,dAR.up_f,dAR.unknown_f,dAR.down_f); + + out.collect(result); + + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/CalcEndpointGroupAR.java b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/CalcEndpointGroupAR.java new file mode 100644 index 00000000..ee063a76 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/CalcEndpointGroupAR.java @@ -0,0 +1,163 @@ +package argo.batch; + +import java.io.IOException; +import java.text.ParseException; + +import java.util.List; + +import org.apache.flink.api.common.functions.RichFlatMapFunction; + +import org.apache.flink.api.java.utils.ParameterTool; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.util.Collector; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import argo.avro.GroupEndpoint; +import argo.avro.GroupGroup; + +import argo.avro.MetricProfile; +import argo.avro.Weight; +import ops.ConfigManager; +import ops.DIntegrator; + +import ops.OpsManager; +import sync.AggregationProfileManager; +import sync.EndpointGroupManager; +import sync.GroupGroupManager; +import sync.MetricProfileManager; +import sync.RecomputationManager; +import sync.WeightManager; + +/** + * Accepts an endpoint group monitor timeline entry and produces an + * EndpointGRoupAR object by calculating a/r over timeline data + */ +public class CalcEndpointGroupAR extends RichFlatMapFunction { + + private static final long serialVersionUID = 1L; + + final ParameterTool params; + + public CalcEndpointGroupAR(ParameterTool params) { + this.params = params; + } + + static Logger LOG = LoggerFactory.getLogger(ArgoArBatch.class); + + private List mps; + private List egp; + private List ggp; + private List weight; + private List apr; + private List rec; + private List ops; + private List conf; + private MetricProfileManager mpsMgr; + private EndpointGroupManager egpMgr; + private GroupGroupManager ggpMgr; + private AggregationProfileManager aprMgr; + private RecomputationManager recMgr; + private OpsManager opsMgr; + private WeightManager weightMgr; + private ConfigManager confMgr; + + private String ggroupType; + private String runDate; + private String report; + + /** + * Initialization method of the RichFlatMapFunction operator + *

      + * This runs at the initialization of the operator and receives a + * configuration parameter object. It initializes all required structures + * used by this operator such as profile managers, operations managers, + * topology managers etc. + * + * @param parameters + * A flink Configuration object + */ + @Override + public void open(Configuration parameters) throws IOException, ParseException { + // Get data from broadcast variable + this.mps = getRuntimeContext().getBroadcastVariable("mps"); + this.egp = getRuntimeContext().getBroadcastVariable("egp"); + this.ggp = getRuntimeContext().getBroadcastVariable("ggp"); + this.apr = getRuntimeContext().getBroadcastVariable("apr"); + this.rec = getRuntimeContext().getBroadcastVariable("rec"); + this.ops = getRuntimeContext().getBroadcastVariable("ops"); + this.weight = getRuntimeContext().getBroadcastVariable("weight"); + this.conf = getRuntimeContext().getBroadcastVariable("conf"); + + // Initialize metric profile manager + this.mpsMgr = new MetricProfileManager(); + this.mpsMgr.loadFromList(mps); + // Initialize endpoint group manager + this.egpMgr = new EndpointGroupManager(); + this.egpMgr.loadFromList(egp); + + this.ggpMgr = new GroupGroupManager(); + this.ggpMgr.loadFromList(ggp); + + this.weightMgr = new WeightManager(); + this.weightMgr.loadFromList(weight); + + // Initialize Aggregation Profile Manager ; + this.aprMgr = new AggregationProfileManager(); + this.aprMgr.loadJsonString(apr); + + // Initialize Recomputations Manager; + this.recMgr = new RecomputationManager(); + this.recMgr.loadJsonString(rec); + + // Initialize Operations Manager; + this.opsMgr = new OpsManager(); + this.opsMgr.loadJsonString(ops); + + // Initialize Config Manager + this.confMgr = new ConfigManager(); + this.confMgr.loadJsonString(conf); + + // Initialize rundate + this.runDate = params.getRequired("run.date"); + + // Initialize report id + this.report = this.confMgr.id; + + // Initialize endpoint group type + this.ggroupType = this.confMgr.ggroup; + } + + /** + * The main operator business logic of calculating a/r results from timeline + * data + *

      + * Uses a DIntegrator to scan the timeline and calculate availability and + * reliability scores + * + * @param in + * A MonTimeline Object representing an endpoint group timeline + * @param out + * An EndpointGroupAR Object containing a/r results + */ + @Override + public void flatMap(MonTimeline mtl, Collector out) throws Exception { + + + + DIntegrator dAR = new DIntegrator(); + dAR.calculateAR(mtl.getTimeline(),this.opsMgr); + + int runDateInt = Integer.parseInt(this.runDate.replace("-", "")); + + int w = weightMgr.getWeight("hepspec", mtl.getGroup()); + + String supergroup = ggpMgr.getGroup(this.ggroupType, mtl.getGroup()); + + EndpointGroupAR result = new EndpointGroupAR(runDateInt,this.report,mtl.getGroup(),supergroup,w,dAR.availability,dAR.reliability,dAR.up_f,dAR.unknown_f,dAR.down_f); + + out.collect(result); + + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/CalcServiceAR.java b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/CalcServiceAR.java new file mode 100644 index 00000000..b2c6ba7b --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/CalcServiceAR.java @@ -0,0 +1,145 @@ +package argo.batch; + +import java.io.IOException; +import java.text.ParseException; + +import java.util.List; + +import org.apache.flink.api.common.functions.RichFlatMapFunction; + +import org.apache.flink.api.java.utils.ParameterTool; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.util.Collector; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import argo.avro.GroupEndpoint; +import argo.avro.GroupGroup; +import ops.ConfigManager; +import ops.DIntegrator; +import ops.OpsManager; +import sync.AggregationProfileManager; +import sync.EndpointGroupManager; +import sync.GroupGroupManager; + +import sync.RecomputationManager; + +/** + * Accepts a service monitor timeline entry and produces a ServiceAR object by + * calculating a/r over timeline data + */ +public class CalcServiceAR extends RichFlatMapFunction { + + private static final long serialVersionUID = 1L; + + final ParameterTool params; + + public CalcServiceAR(ParameterTool params) { + this.params = params; + } + + static Logger LOG = LoggerFactory.getLogger(ArgoArBatch.class); + + + private List egp; + private List ggp; + private List apr; + private List rec; + private List ops; + private List conf; + + private EndpointGroupManager egpMgr; + private GroupGroupManager ggpMgr; + private AggregationProfileManager aprMgr; + private RecomputationManager recMgr; + private OpsManager opsMgr; + private ConfigManager confMgr; + + + private String runDate; + private String report; + + /** + * Initialization method of the RichFlatMapFunction operator + *

      + * This runs at the initialization of the operator and receives a + * configuration parameter object. It initializes all required structures + * used by this operator such as profile managers, operations managers, + * topology managers etc. + * + * @param parameters + * A flink Configuration object + */ + @Override + public void open(Configuration parameters) throws IOException, ParseException { + // Get data from broadcast variable + + this.egp = getRuntimeContext().getBroadcastVariable("egp"); + this.ggp = getRuntimeContext().getBroadcastVariable("ggp"); + this.apr = getRuntimeContext().getBroadcastVariable("apr"); + this.rec = getRuntimeContext().getBroadcastVariable("rec"); + this.ops = getRuntimeContext().getBroadcastVariable("ops"); + this.conf = getRuntimeContext().getBroadcastVariable("conf"); + + + // Initialize endpoint group manager + this.egpMgr = new EndpointGroupManager(); + this.egpMgr.loadFromList(egp); + + this.ggpMgr = new GroupGroupManager(); + this.ggpMgr.loadFromList(ggp); + + // Initialize Aggregation Profile Manager ; + this.aprMgr = new AggregationProfileManager(); + this.aprMgr.loadJsonString(apr); + + // Initialize Recomputations Manager; + this.recMgr = new RecomputationManager(); + this.recMgr.loadJsonString(rec); + + // Initialize Operations Manager; + this.opsMgr = new OpsManager(); + this.opsMgr.loadJsonString(ops); + + // Initialize Config Manager + this.confMgr = new ConfigManager(); + this.confMgr.loadJsonString(conf); + + + + // Initialize rundate + this.runDate = params.getRequired("run.date"); + + // Initialize report id + this.report = this.confMgr.id; + } + + /** + * The main operator business logic of calculating a/r results from timeline + * data + *

      + * Uses a DIntegrator to scan the timeline and calculate availability and + * reliability scores + * + * @param in + * A MonTimeline Object representing a service timeline + * @param out + * A ServiceAR Object containing a/r results + */ + @Override + public void flatMap(MonTimeline mtl, Collector out) throws Exception { + + + + DIntegrator dAR = new DIntegrator(); + dAR.calculateAR(mtl.getTimeline(),this.opsMgr); + + int runDateInt = Integer.parseInt(this.runDate.replace("-", "")); + + ServiceAR result = new ServiceAR(runDateInt,this.report,mtl.getService(),mtl.getGroup(),dAR.availability,dAR.reliability,dAR.up_f,dAR.unknown_f,dAR.down_f); + + out.collect(result); + + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/CreateEndpointGroupTimeline.java b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/CreateEndpointGroupTimeline.java new file mode 100644 index 00000000..11b67c8b --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/CreateEndpointGroupTimeline.java @@ -0,0 +1,212 @@ +package argo.batch; + +import java.io.IOException; +import java.text.ParseException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.flink.api.common.functions.RichGroupReduceFunction; + +import org.apache.flink.api.java.utils.ParameterTool; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.util.Collector; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import argo.avro.GroupEndpoint; +import argo.avro.GroupGroup; + +import argo.avro.MetricProfile; +import ops.DAggregator; +import ops.DTimeline; +import ops.OpsManager; +import sync.AggregationProfileManager; +import sync.EndpointGroupManager; +import sync.GroupGroupManager; +import sync.MetricProfileManager; +import sync.RecomputationManager; + +/** + * Accepts a list of service timelines and produces an aggregated endpoint group + * timeline The class is used as a RichGroupReduce Function in flink pipeline + */ +public class CreateEndpointGroupTimeline extends RichGroupReduceFunction { + + private static final long serialVersionUID = 1L; + + final ParameterTool params; + + public CreateEndpointGroupTimeline(ParameterTool params) { + this.params = params; + } + + static Logger LOG = LoggerFactory.getLogger(ArgoArBatch.class); + + private List mps; + private List aps; + private List ops; + private List egp; + private List ggp; + private List rec; + private MetricProfileManager mpsMgr; + private AggregationProfileManager apsMgr; + private EndpointGroupManager egpMgr; + private GroupGroupManager ggpMgr; + private OpsManager opsMgr; + private RecomputationManager recMgr; + private String runDate; + + /** + * Initialization method of the RichGroupReduceFunction operator + *

      + * This runs at the initialization of the operator and receives a + * configuration parameter object. It initializes all required structures + * used by this operator such as profile managers, operations managers, + * topology managers etc. + * + * @param parameters + * A flink Configuration object + * @throws ParseException + */ + @Override + public void open(Configuration parameters) throws IOException, ParseException { + + // Get data from broadcast variables + this.mps = getRuntimeContext().getBroadcastVariable("mps"); + this.aps = getRuntimeContext().getBroadcastVariable("aps"); + this.ops = getRuntimeContext().getBroadcastVariable("ops"); + this.egp = getRuntimeContext().getBroadcastVariable("egp"); + this.ggp = getRuntimeContext().getBroadcastVariable("ggp"); + this.rec = getRuntimeContext().getBroadcastVariable("rec"); + // Initialize metric profile manager + this.mpsMgr = new MetricProfileManager(); + this.mpsMgr.loadFromList(mps); + // Initialize aggregation profile manager + this.apsMgr = new AggregationProfileManager(); + + this.apsMgr.loadJsonString(aps); + // Initialize operations manager + this.opsMgr = new OpsManager(); + this.opsMgr.loadJsonString(ops); + + // Initialize endpoint group manager + this.egpMgr = new EndpointGroupManager(); + this.egpMgr.loadFromList(egp); + + this.ggpMgr = new GroupGroupManager(); + this.ggpMgr.loadFromList(ggp); + + // Initialize Recomputations Manager; + this.recMgr = new RecomputationManager(); + this.recMgr.loadJsonString(rec); + + this.runDate = params.getRequired("run.date"); + + } + + /** + * The main operator business logic of transforming a collection of service + * timelines to an aggregated endpoint group timeline + *

      + * This runs for each group item (endpointGroup) and contains a list of + * service timelines sorted by the "service" field. It uses multiple + * Discrete Aggregator to aggregate the endpoint timelines according to the + * aggregation groups defined in the aggregation profile and the operations + * defined in the operations profile to produce the final endpoint group + * timeline. + * + * @param in + * An Iterable collection of MonTimeline objects + * @param out + * A Collector list of MonTimeline to acquire the produced group + * endpoint timelines. + */ + @Override + public void reduce(Iterable in, Collector out) throws Exception { + + // Initialize field values and aggregator + String service = ""; + String endpointGroup = ""; + + // Create a Hasmap of Discrete Aggregators for each aggregation group + // based on aggregation profile + Map groupAggr = new HashMap(); + + // Grab metric operation type from aggregation profile + String avProf = apsMgr.getAvProfiles().get(0); + // For each service timeline of the input group + for (MonTimeline item : in) { + + service = item.getService(); + endpointGroup = item.getGroup(); + + // Get the aggregation group + String group = apsMgr.getGroupByService(avProf, service); + + // if group doesn't exist yet create it + if (groupAggr.containsKey(group) == false) { + groupAggr.put(group, new DAggregator()); + } + + // Initialize a DTimelineObject + DTimeline dtl = new DTimeline(); + dtl.samples = item.getTimeline(); + dtl.setStartState(dtl.samples[0]); + + // group will be present now + groupAggr.get(group).timelines.put(service, dtl); + } + + // Aggregate each group + for (String group : groupAggr.keySet()) { + // Get group Operation + + String gop = this.apsMgr.getProfileGroupOp(avProf, group); + + groupAggr.get(group).aggregate(gop, this.opsMgr); + + } + + // Combine group aggregates to a final endpoint group aggregation + // Aggregate all sites + DAggregator totalSite = new DAggregator(); + + // Aggregate each group + for (String group : groupAggr.keySet()) { + DTimeline curTimeline = groupAggr.get(group).aggregation; + for (int i = 0; i < curTimeline.samples.length; i++) { + totalSite.insertSlot(group, i, curTimeline.samples[i]); + + } + + } + + // Final site aggregate + // Get appropriate operation from availability profile + totalSite.aggregate(this.apsMgr.getTotalOp(avProf), this.opsMgr); + + // Check if endpoint group is excluded in recomputations + if (this.recMgr.isExcluded(endpointGroup)) { + + ArrayList> periods = this.recMgr.getPeriods(endpointGroup, this.runDate); + + for (Map period : periods) { + totalSite.aggregation.fill(this.opsMgr.getDefaultUnknownInt(), period.get("start"), period.get("end"), + this.runDate); + } + + } + + // Create a new MonTimeline object for endpoint group + MonTimeline mtl = new MonTimeline(endpointGroup, "", "", ""); + // Add Discrete Timeline samples int array to the MonTimeline + mtl.setTimeline(totalSite.aggregation.samples); + // Output MonTimeline object + out.collect(mtl); + + } + +} \ No newline at end of file diff --git a/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/CreateEndpointTimeline.java b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/CreateEndpointTimeline.java new file mode 100644 index 00000000..6c3520b1 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/CreateEndpointTimeline.java @@ -0,0 +1,171 @@ +package argo.batch; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import org.apache.flink.api.common.functions.RichGroupReduceFunction; + +import org.apache.flink.api.java.utils.ParameterTool; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.util.Collector; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import argo.avro.Downtime; +import argo.avro.GroupEndpoint; +import argo.avro.GroupGroup; + +import argo.avro.MetricProfile; +import ops.DAggregator; +import ops.DTimeline; +import ops.OpsManager; +import sync.AggregationProfileManager; +import sync.DowntimeManager; +import sync.EndpointGroupManager; +import sync.GroupGroupManager; +import sync.MetricProfileManager; + +/** + * Accepts a list of monitoring timelines and produces an endpoint timeline The + * class is used as a RichGroupReduce Function in flink pipeline + */ +public class CreateEndpointTimeline extends RichGroupReduceFunction { + + private static final long serialVersionUID = 1L; + + final ParameterTool params; + + public CreateEndpointTimeline(ParameterTool params) { + this.params = params; + } + + static Logger LOG = LoggerFactory.getLogger(ArgoArBatch.class); + + private List mps; + private List aps; + private List ops; + private List egp; + private List ggp; + private List downtime; + private MetricProfileManager mpsMgr; + private AggregationProfileManager apsMgr; + private EndpointGroupManager egpMgr; + private GroupGroupManager ggpMgr; + private OpsManager opsMgr; + private DowntimeManager downtimeMgr; + private String runDate; + + /** + * Initialization method of the RichGroupReduceFunction operator + *

      + * This runs at the initialization of the operator and receives a configuration + * parameter object. It initializes all required structures used by this + * operator such as profile managers, operations managers, topology managers + * etc. + * + * @param parameters + * A flink Configuration object + */ + @Override + public void open(Configuration parameters) throws IOException { + + // Get data from broadcast variables + this.mps = getRuntimeContext().getBroadcastVariable("mps"); + this.aps = getRuntimeContext().getBroadcastVariable("aps"); + this.ops = getRuntimeContext().getBroadcastVariable("ops"); + this.egp = getRuntimeContext().getBroadcastVariable("egp"); + this.ggp = getRuntimeContext().getBroadcastVariable("ggp"); + this.downtime = getRuntimeContext().getBroadcastVariable("down"); + // Initialize metric profile manager + this.mpsMgr = new MetricProfileManager(); + this.mpsMgr.loadFromList(mps); + // Initialize aggregation profile manager + this.apsMgr = new AggregationProfileManager(); + + this.apsMgr.loadJsonString(aps); + // Initialize operations manager + this.opsMgr = new OpsManager(); + this.opsMgr.loadJsonString(ops); + + // Initialize endpoint group manager + this.egpMgr = new EndpointGroupManager(); + this.egpMgr.loadFromList(egp); + + this.ggpMgr = new GroupGroupManager(); + this.ggpMgr.loadFromList(ggp); + + // Initialize downtime manager + this.downtimeMgr = new DowntimeManager(); + this.downtimeMgr.loadFromList(downtime); + + this.runDate = params.getRequired("run.date"); + + } + + /** + * The main operator business logic of transforming a collection of + * MetricTimelines to an aggregated endpoint timeline + *

      + * This runs for each group item (endpointGroup,service,hostname) and contains a + * list of metric timelines sorted by the "metric" field. It uses a Discrete + * Aggregator to aggregate the metric timelines according to the operations + * profile defined in the Operations Manager as to produce the final Endpoint + * Timeline. The type of metric aggregation is defined in the aggregation + * profile managed by the AggregationManager + * + * @param in + * An Iterable collection of MonTimeline objects + * @param out + * A Collector list of MonTimeline to acquire the produced endpoint + * timelines. + */ + @Override + public void reduce(Iterable in, Collector out) throws Exception { + + // Initialize field values and aggregator + String service = ""; + String endpointGroup = ""; + String hostname = ""; + + DAggregator dAgg = new DAggregator(); + + // For each metric timeline of the input group + for (MonTimeline item : in) { + + service = item.getService(); + hostname = item.getHostname(); + endpointGroup = item.getGroup(); + // Initialize a DTimelineObject + DTimeline dtl = new DTimeline(); + dtl.samples = item.getTimeline(); + dtl.setStartState(dtl.samples[0]); + // Push Discrete Timeline directly to the hashtable of the + // aggregator + dAgg.timelines.put(item.getMetric(), dtl); + + } + + // Grab metric operation type from aggregation profile + String avProf = apsMgr.getAvProfiles().get(0); + dAgg.aggregate(apsMgr.getMetricOp(avProf), opsMgr); + + // Apply Downtimes if hostname is on downtime list + ArrayList downPeriod = this.downtimeMgr.getPeriod(hostname, service); + if (downPeriod != null) { + // We have downtime declared + dAgg.aggregation.fill(this.opsMgr.getDefaultDownInt(), downPeriod.get(0), downPeriod.get(1), this.runDate); + } + + // Create a new MonTimeline object for endpoint + MonTimeline mtl = new MonTimeline(endpointGroup, service, hostname, ""); + // Add Discrete Timeline samples int array to the MonTimeline + mtl.setTimeline(dAgg.aggregation.samples); + // Output MonTimeline object + out.collect(mtl); + + } + +} \ No newline at end of file diff --git a/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/CreateMetricTimeline.java b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/CreateMetricTimeline.java new file mode 100644 index 00000000..99eb57f2 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/CreateMetricTimeline.java @@ -0,0 +1,165 @@ +package argo.batch; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.flink.api.common.functions.RichGroupReduceFunction; + +import org.apache.flink.api.java.utils.ParameterTool; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.util.Collector; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +import argo.avro.GroupEndpoint; +import argo.avro.GroupGroup; +import argo.avro.MetricData; +import argo.avro.MetricProfile; +import ops.ConfigManager; +import ops.DTimeline; +import ops.OpsManager; +import sync.AggregationProfileManager; +import sync.EndpointGroupManager; +import sync.GroupGroupManager; +import sync.MetricProfileManager; + +/** + * Accepts a list of Mon Data objects and produces a metric timeline + * The class is used as a RichGroupReduce Function in flink pipeline + */ +public class CreateMetricTimeline extends RichGroupReduceFunction { + + private static final long serialVersionUID = 1L; + + final ParameterTool params; + + public CreateMetricTimeline(ParameterTool params) { + this.params = params; + } + + static Logger LOG = LoggerFactory.getLogger(ArgoArBatch.class); + + private List mps; + private List aps; + private List ops; + private List egp; + private List ggp; + private List conf; + private MetricProfileManager mpsMgr; + private AggregationProfileManager apsMgr; + private EndpointGroupManager egpMgr; + private GroupGroupManager ggpMgr; + private OpsManager opsMgr; + private ConfigManager confMgr; + private String runDate; + private String egroupType; + + + + /** + * Initialization method of the RichGroupReduceFunction operator + *

      + * This runs at the initialization of the operator and receives a configuration + * parameter object. It initializes all required structures used by this operator + * such as profile managers, operations managers, topology managers etc. + * + * @param parameters A flink Configuration object + */ + @Override + public void open(Configuration parameters) throws IOException { + + this.runDate = params.getRequired("run.date"); + // Get data from broadcast variables + this.mps = getRuntimeContext().getBroadcastVariable("mps"); + this.aps = getRuntimeContext().getBroadcastVariable("aps"); + this.ops = getRuntimeContext().getBroadcastVariable("ops"); + this.egp = getRuntimeContext().getBroadcastVariable("egp"); + this.ggp = getRuntimeContext().getBroadcastVariable("ggp"); + this.conf = getRuntimeContext().getBroadcastVariable("conf"); + // Initialize metric profile manager + this.mpsMgr = new MetricProfileManager(); + this.mpsMgr.loadFromList(mps); + // Initialize aggregation profile manager + this.apsMgr = new AggregationProfileManager(); + + this.apsMgr.loadJsonString(aps); + // Initialize operations manager + this.opsMgr = new OpsManager(); + this.opsMgr.loadJsonString(ops); + + // Initialize endpoint group manager + this.egpMgr = new EndpointGroupManager(); + this.egpMgr.loadFromList(egp); + + this.ggpMgr = new GroupGroupManager(); + this.ggpMgr.loadFromList(ggp); + + this.confMgr = new ConfigManager(); + this.confMgr.loadJsonString(conf); + + this.runDate = params.getRequired("run.date"); + this.egroupType = confMgr.egroup; + + + + } + + /** + * The main operator buisness logic of transforming a collection of MonData to a metric timeline + *

      + * This runs for each group item (endpointGroup,service,hostname,metric) and contains a list of MonData objects sorted + * by the "timestamp" field. It uses a Discrete Timeline object to map individual status change points in time to an array of statuses. + * The Discrete timeline automatically fills the gaps between the status changes to produce a full array of status points representing + * the discrete timeline. Notice that status values are mapped from string representations to integer ids ("OK" => 0, "CRITICAL" => 4) + * for more efficient processing during aggregation comparisons. + * + * @param in An Iterable collection of MonData objects + * @param out A Collector list of MonTimeline to acquire the produced metric timelines. + */ + @Override + public void reduce(Iterable in, Collector out) throws Exception { + + String service = ""; + String endpointGroup = ""; + String hostname = ""; + String metric = ""; + + DTimeline dtl = new DTimeline(); + + for (MonData item : in) { + + service = item.getService(); + hostname = item.getHostname(); + metric = item.getMetric(); + + + endpointGroup = item.getGroup(); + + + String ts = item.getTimestamp(); + String status = item.getStatus(); + // insert monitoring point (ts,status) into the discrete timeline + + if (!(ts.substring(0, ts.indexOf("T")).equals(this.runDate))) { + dtl.setStartState(this.opsMgr.getIntStatus(status)); + continue; + } + + dtl.insert(ts, opsMgr.getIntStatus(status)); + + } + + dtl.settle(opsMgr.getDefaultMissingInt()); + + // Create a new MonTimeline object + MonTimeline mtl = new MonTimeline(endpointGroup, service, hostname, metric); + // Add Discrete Timeline samples int array to the MonTimeline + mtl.setTimeline(dtl.samples); + // Output MonTimeline object + out.collect(mtl); + + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/CreateServiceTimeline.java b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/CreateServiceTimeline.java new file mode 100644 index 00000000..00ce60a3 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/CreateServiceTimeline.java @@ -0,0 +1,146 @@ +package argo.batch; + +import java.io.IOException; +import java.util.List; + +import org.apache.flink.api.common.functions.RichGroupReduceFunction; + +import org.apache.flink.api.java.utils.ParameterTool; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.util.Collector; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + + +import argo.avro.GroupEndpoint; +import argo.avro.GroupGroup; + +import argo.avro.MetricProfile; +import ops.DAggregator; +import ops.DTimeline; +import ops.OpsManager; +import sync.AggregationProfileManager; +import sync.EndpointGroupManager; +import sync.GroupGroupManager; +import sync.MetricProfileManager; + +/** + * Accepts a list of endpoint timelines and produces a service timeline + * The class is used as a RichGroupReduce Function in flink pipeline + */ +public class CreateServiceTimeline extends RichGroupReduceFunction { + + private static final long serialVersionUID = 1L; + + final ParameterTool params; + + public CreateServiceTimeline(ParameterTool params) { + this.params = params; + } + + static Logger LOG = LoggerFactory.getLogger(ArgoArBatch.class); + + private List mps; + private List aps; + private List ops; + private List egp; + private List ggp; + private MetricProfileManager mpsMgr; + private AggregationProfileManager apsMgr; + private EndpointGroupManager egpMgr; + private GroupGroupManager ggpMgr; + private OpsManager opsMgr; + + /** + * Initialization method of the RichGroupReduceFunction operator + *

      + * This runs at the initialization of the operator and receives a configuration + * parameter object. It initializes all required structures used by this operator + * such as profile managers, operations managers, topology managers etc. + * + * @param parameters A flink Configuration object + */ + @Override + public void open(Configuration parameters) throws IOException { + + // Get data from broadcast variables + this.mps = getRuntimeContext().getBroadcastVariable("mps"); + this.aps = getRuntimeContext().getBroadcastVariable("aps"); + this.ops = getRuntimeContext().getBroadcastVariable("ops"); + this.egp = getRuntimeContext().getBroadcastVariable("egp"); + this.ggp = getRuntimeContext().getBroadcastVariable("ggp"); + // Initialize metric profile manager + this.mpsMgr = new MetricProfileManager(); + this.mpsMgr.loadFromList(mps); + // Initialize aggregation profile manager + this.apsMgr = new AggregationProfileManager(); + + this.apsMgr.loadJsonString(aps); + // Initialize operations manager + this.opsMgr = new OpsManager(); + this.opsMgr.loadJsonString(ops); + + // Initialize endpoint group manager + this.egpMgr = new EndpointGroupManager(); + this.egpMgr.loadFromList(egp); + + this.ggpMgr = new GroupGroupManager(); + this.ggpMgr.loadFromList(ggp); + + + } + + /** + * The main operator business logic of transforming a collection of EndpointTimelines to an aggregated service timeline + *

      + * This runs for each group item (endpointGroup,service) and contains a list of metric timelines sorted + * by the "hostname" field. It uses a Discrete Aggregator to aggregate the endpoint timelines according to the operations + * profile defined in the Operations Manager as to produce the final Service Timeline. The type of metric aggregation is + * defined in the aggregation profile managed by the AggregationManager + * + * @param in An Iterable collection of MonTimeline objects + * @param out A Collector list of MonTimeline to acquire the produced service timelines. + */ + @Override + public void reduce(Iterable in, Collector out) throws Exception { + + // Initialize field values and aggregator + String service =""; + String endpointGroup =""; + + + DAggregator dAgg = new DAggregator(); + + + // For each endpoint timeline of the input group + for (MonTimeline item : in) { + + + service = item.getService(); + endpointGroup = item.getGroup(); + // Initialize a DTimelineObject + DTimeline dtl = new DTimeline(); + dtl.samples = item.getTimeline(); + dtl.setStartState(dtl.samples[0]); + // Push Discrete Timeline directly to the hashtable of the aggregator + dAgg.timelines.put(item.getHostname(), dtl); + + } + + // Grab metric operation type from aggregation profile + String avProf = apsMgr.getAvProfiles().get(0); + // Get the availability Group in which this service belongs + String avGroup = this.apsMgr.getGroupByService(avProf, service); + dAgg.aggregate(apsMgr.getProfileGroupServiceOp(avProf, avGroup, service), opsMgr); + // Create a new MonTimeline object for endpoint + MonTimeline mtl = new MonTimeline(endpointGroup,service,"",""); + // Add Discrete Timeline samples int array to the MonTimeline + mtl.setTimeline(dAgg.aggregation.samples); + // Output MonTimeline object + out.collect(mtl); + + } + +} \ No newline at end of file diff --git a/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/EndpointAR.java b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/EndpointAR.java new file mode 100644 index 00000000..928c1e31 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/EndpointAR.java @@ -0,0 +1,99 @@ +package argo.batch; + +public class EndpointAR { + + private int dateInt; + private String report; + private String name; + private String service; + private String group; + private double a; + private double r; + private double up; + private double unknown; + private double down; + + public EndpointAR(int _dateInt, String _report, String _name, String _service, String _group, double _a, double _r, double _up, double _unknown, double _down){ + this.dateInt = _dateInt; + this.report=_report; + this.name = _name; + this.service = _service; + this.group = _group; + this.a = _a; + this.r = _r; + this.up = _up; + this.unknown = _unknown; + this.down = _down; + } + + + public String getService() { + return this.service; + } + + public void setService(String service) { + this.service = service; + } + + public int getDateInt(){ + return this.dateInt; + } + + public void setDateInt(int dateInt){ + this.dateInt= dateInt; + } + + public String getReport() { + return report; + } + public void setReport(String report) { + this.report = report; + } + public String getName() { + return name; + } + public void setName(String name) { + this.name = name; + } + public String getGroup() { + return group; + } + public void setGroup(String group) { + this.group = group; + } + public double getA() { + return a; + } + public void setA(double a) { + this.a = a; + } + public double getR() { + return r; + } + public void setR(double r) { + this.r = r; + } + public double getUp() { + return up; + } + public void setUp(double up) { + this.up = up; + } + public double getUnknown() { + return unknown; + } + public void setUnknown(double unknown) { + this.unknown = unknown; + } + public double getDown() { + return down; + } + public void setDown(double down) { + this.down = down; + } + + public String toString() { + return "(" + this.dateInt+ "," + this.report + "," + this.name + "," + this.service + "," + this.group + "," + this.a + "," + this.r + "," + this.up + "," + this.unknown + "," + this.down + ")"; + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/EndpointGroupAR.java b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/EndpointGroupAR.java new file mode 100644 index 00000000..19c9f519 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/EndpointGroupAR.java @@ -0,0 +1,100 @@ +package argo.batch; + + + +public class EndpointGroupAR { + + private int dateInt; + private String report; + private String name; + private String group; + private int weight; + private double a; + private double r; + private double up; + private double unknown; + private double down; + + public EndpointGroupAR(int _dateInt, String _report, String _name, String _group, int _weight, double _a, double _r, double _up, double _unknown, double _down){ + this.dateInt = _dateInt; + this.report=_report; + this.name = _name; + this.group = _group; + this.weight = _weight; + this.a = _a; + this.r = _r; + this.up = _up; + this.unknown = _unknown; + this.down = _down; + } + + public int getWeight(){ + return this.weight; + } + + public void setWeight(int weight){ + this.weight = weight; + } + + public int getDateInt(){ + return this.dateInt; + } + + public void setDateInt(int dateInt){ + this.dateInt= dateInt; + } + + public String getReport() { + return report; + } + public void setReport(String report) { + this.report = report; + } + public String getName() { + return name; + } + public void setName(String name) { + this.name = name; + } + public String getGroup() { + return group; + } + public void setGroup(String group) { + this.group = group; + } + public double getA() { + return a; + } + public void setA(double a) { + this.a = a; + } + public double getR() { + return r; + } + public void setR(double r) { + this.r = r; + } + public double getUp() { + return up; + } + public void setUp(double up) { + this.up = up; + } + public double getUnknown() { + return unknown; + } + public void setUnknown(double unknown) { + this.unknown = unknown; + } + public double getDown() { + return down; + } + public void setDown(double down) { + this.down = down; + } + + public String toString() { + return "(" + this.dateInt+ "," + this.report + "," + this.name + "," + this.group + "," + this.weight + "," + this.a + "," + this.r + "," + this.up + "," + this.unknown + "," + this.down + ")"; + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/FillMissing.java b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/FillMissing.java new file mode 100644 index 00000000..8344ad2a --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/FillMissing.java @@ -0,0 +1,215 @@ +package argo.batch; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import org.apache.flink.api.common.functions.RichGroupReduceFunction; +import org.apache.flink.api.java.tuple.Tuple4; +import org.apache.flink.api.java.utils.ParameterTool; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.util.Collector; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import argo.avro.GroupEndpoint; +import argo.avro.GroupGroup; +import argo.avro.MetricData; +import argo.avro.MetricProfile; +import ops.ConfigManager; +import ops.DTimeline; +import ops.OpsManager; +import sync.AggregationProfileManager; +import sync.EndpointGroupManager; +import sync.GroupGroupManager; +import sync.MetricProfileManager; + +/** + * Accepts a list of metric data objects and produces a list of missing mon data objects + */ +public class FillMissing extends RichGroupReduceFunction { + + private static final long serialVersionUID = 1L; + + final ParameterTool params; + + public FillMissing(ParameterTool params) { + this.params = params; + } + + static Logger LOG = LoggerFactory.getLogger(ArgoArBatch.class); + + private List mps; + private List aps; + private List ops; + private List egp; + private List ggp; + private List conf; + private MetricProfileManager mpsMgr; + private AggregationProfileManager apsMgr; + private EndpointGroupManager egpMgr; + private GroupGroupManager ggpMgr; + private OpsManager opsMgr; + private ConfigManager confMgr; + private String runDate; + private String egroupType; + private Set> expected; + + /** + * Initialization method of the RichGroupReduceFunction operator + *

      + * This runs at the initialization of the operator and receives a + * configuration parameter object. It initializes all required structures + * used by this operator such as profile managers, operations managers, + * topology managers etc. + * + * @param parameters + * A flink Configuration object + */ + @Override + public void open(Configuration parameters) throws IOException { + + this.runDate = params.getRequired("run.date"); + // Get data from broadcast variables + this.mps = getRuntimeContext().getBroadcastVariable("mps"); + this.aps = getRuntimeContext().getBroadcastVariable("aps"); + this.ops = getRuntimeContext().getBroadcastVariable("ops"); + this.egp = getRuntimeContext().getBroadcastVariable("egp"); + this.ggp = getRuntimeContext().getBroadcastVariable("ggp"); + this.conf = getRuntimeContext().getBroadcastVariable("conf"); + + // Initialize metric profile manager + this.mpsMgr = new MetricProfileManager(); + this.mpsMgr.loadFromList(mps); + // Initialize aggregation profile manager + this.apsMgr = new AggregationProfileManager(); + + this.apsMgr.loadJsonString(aps); + // Initialize operations manager + this.opsMgr = new OpsManager(); + this.opsMgr.loadJsonString(ops); + + // Initialize endpoint group manager + this.egpMgr = new EndpointGroupManager(); + this.egpMgr.loadFromList(egp); + + this.ggpMgr = new GroupGroupManager(); + this.ggpMgr.loadFromList(ggp); + + this.confMgr = new ConfigManager(); + this.confMgr.loadJsonString(conf); + + this.runDate = params.getRequired("run.date"); + this.egroupType = this.confMgr.egroup; + + + + } + + + /** + * Reads the topology in endpoint group list and the metric profile and produces a set of available service endpoint metrics + * that are expected to be found (as tuple objects (endpoint_group,service,hostname,metric) + **/ + public void initExpected() { + this.expected = new HashSet>(); + String mProfile = this.mpsMgr.getProfiles().get(0); + for (GroupEndpoint servPoint: this.egp){ + + + ArrayList metrics = this.mpsMgr.getProfileServiceMetrics(mProfile, servPoint.getService()); + + if (metrics==null) continue; + for (String metric:metrics){ + this.expected.add(new Tuple4(servPoint.getGroup(),servPoint.getService(),servPoint.getHostname(),metric)); + } + + } + + + + + + } + + /** + * Iterates over all metric data and gathers a set of encountered service endpoint metrics. Then subtracts it from + * a set of expected service endpoint metrics (based on topology) so as the missing service endpoint metrics to be identified. Then based on the + * list of the missing service endpoint metrics corresponding metric data are created + * + * @param in + * An Iterable collection of MetricData objects + * @param out + * A Collector list of Missing MonData objects + */ + @Override + public void reduce(Iterable in, Collector out) throws Exception { + + initExpected(); + + Set> found = new HashSet>(); + + String service = ""; + String endpointGroup = ""; + String hostname = ""; + String metric = ""; + + String timestamp = this.runDate + "T00:00:00Z"; + String state = this.opsMgr.getDefaultMissing(); + + DTimeline dtl = new DTimeline(); + + for (MetricData item : in) { + + service = item.getService(); + hostname = item.getHostname(); + metric = item.getMetric(); + + // Filter By endpoint group if belongs to supergroup + ArrayList groupnames = egpMgr.getGroup(egroupType, hostname, service); + + for (String groupname : groupnames) { + if (ggpMgr.checkSubGroup(groupname) == true) { + endpointGroup = groupname; + found.add(new Tuple4(endpointGroup, service, hostname, metric)); + } + + } + + + + } + + + // Clone expected set to missing (because missing is going to be mutated after subtraction + Set> missing = new HashSet>(this.expected); + // The result of the subtraction is in missing set + missing.removeAll(found); + + + + + // For each item in missing create a missing metric data entry + for (Tuple4 item:missing){ + MonData mn = new MonData(); + mn.setGroup(item.f0); + mn.setService(item.f1); + mn.setHostname(item.f2); + mn.setMetric(item.f3); + mn.setStatus(state); + mn.setMessage(""); + mn.setSummary(""); + mn.setTimestamp(timestamp); + + out.collect(mn); + + + } + + + + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/MonData.java b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/MonData.java new file mode 100644 index 00000000..35bbcd23 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/MonData.java @@ -0,0 +1,120 @@ +package argo.batch; + + +/* Extends the metric data information by adding the extra group field + * + */ +public class MonData { + + private String group; + private String service; + private String hostname; + private String metric; + private String status; + private String timestamp; + private String monHost; + private String summary; + private String message; + private String actualData; + + + public MonData(){ + this.group=""; + this.service=""; + this.hostname=""; + this.metric=""; + this.status=""; + this.timestamp=""; + this.monHost=""; + this.summary=""; + this.message=""; + this.actualData=""; + } + + public String getGroup() { + return group; + } + + public void setGroup(String group) { + this.group = group; + } + + public String getService() { + return service; + } + + public void setService(String service) { + this.service = service; + } + + public String getHostname() { + return hostname; + } + + public void setHostname(String hostname) { + this.hostname = hostname; + } + + public String getMetric() { + return metric; + } + + public void setMetric(String metric) { + this.metric = metric; + } + + public String getStatus() { + return status; + } + + public void setStatus(String status) { + this.status = status; + } + + public String getTimestamp() { + return timestamp; + } + + public void setTimestamp(String timestamp) { + this.timestamp = timestamp; + } + + public String getMonHost() { + return monHost; + } + + public void setMonHost(String monHost) { + this.monHost = monHost; + } + + public String getSummary() { + return summary; + } + + public void setSummary(String summary) { + this.summary = summary; + } + + public String getMessage() { + return message; + } + + public void setMessage(String message) { + this.message = message; + } + + public String getActualData() { + return actualData; + } + + public void setActualData(String actualData) { + this.actualData = actualData; + } + + + public String toString() { + return "(" + this.group + "," + this.service + "," + this.hostname + "," + this.metric + "," + this.status + "," + + this.timestamp + "," + this.monHost + "," + this.summary + "," + this.message + "," + this.actualData + ")"; + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/MonTimeline.java b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/MonTimeline.java new file mode 100644 index 00000000..87ec334b --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/MonTimeline.java @@ -0,0 +1,82 @@ +package argo.batch; + +import java.util.Arrays; + +public class MonTimeline { + + private String group; + private String service; + private String hostname; + private String metric; + private int[] timeline; + + public MonTimeline() { + this.group = ""; + this.service = ""; + this.hostname = ""; + this.metric = ""; + this.timeline = new int[1440]; + } + + public MonTimeline(String _group, String _service, String _hostname, String _metric) { + this.group = _group; + this.service = _service; + this.hostname = _hostname; + this.metric = _metric; + this.timeline = new int[1440]; + + } + + public MonTimeline(String _group, String _service, String _hostname, String _metric, int n) { + this.group = _group; + this.service = _service; + this.hostname = _hostname; + this.metric = _metric; + this.timeline = new int[n]; + } + + public String getGroup() { + return group; + } + + public void setGroup(String group) { + this.group = group; + } + + public String getService() { + return service; + } + + public void setService(String service) { + this.service = service; + } + + public String getHostname() { + return hostname; + } + + public void setHostname(String hostname) { + this.hostname = hostname; + } + + public String getMetric() { + return metric; + } + + public void setMetric(String metric) { + this.metric = metric; + } + + public int[] getTimeline() { + return timeline; + } + + public void setTimeline(int[] timeline) { + this.timeline = timeline; + } + + public String toString() { + return "(" + this.group + "," + this.service + "," + this.hostname + "," + this.metric + "," + Arrays.toString(this.timeline) + ")"; + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/MongoEndGroupArOutput.java b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/MongoEndGroupArOutput.java new file mode 100644 index 00000000..2af4b26b --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/MongoEndGroupArOutput.java @@ -0,0 +1,142 @@ +package argo.batch; + +import java.io.IOException; + +import org.apache.flink.api.common.io.OutputFormat; +import org.apache.flink.configuration.Configuration; +import org.bson.Document; +import org.bson.conversions.Bson; + +import com.mongodb.MongoClient; +import com.mongodb.MongoClientURI; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.model.Filters; +import com.mongodb.client.model.UpdateOptions; + +import argo.avro.MetricData; + +/** + * MongoOutputFormat for storing Endpoint Group AR data to MongoDB. + */ +public class MongoEndGroupArOutput implements OutputFormat { + + + public enum MongoMethod { INSERT, UPSERT }; + + private static final long serialVersionUID = 1L; + + private String mongoHost; + private int mongoPort; + private String dbName; + private String colName; + private MongoMethod method; + + + + private MongoClient mClient; + private MongoDatabase mDB; + private MongoCollection mCol; + + // constructor + public MongoEndGroupArOutput(String uri, String col, String method) { + + if (method.equalsIgnoreCase("upsert")){ + this.method = MongoMethod.UPSERT; + } else { + this.method = MongoMethod.INSERT; + } + + MongoClientURI mURI = new MongoClientURI(uri); + String[] hostParts = mURI.getHosts().get(0).split(":"); + String hostname = hostParts[0]; + int port = Integer.parseInt(hostParts[1]); + + this.mongoHost = hostname; + this.mongoPort = port; + this.dbName = mURI.getDatabase(); + this.colName = col; + } + + public MongoEndGroupArOutput(String host, int port, String db, String col, MongoMethod method) { + this.mongoHost = host; + this.mongoPort = port; + this.dbName = db; + this.colName = col; + this.method = method; + } + + + private void initMongo() { + this.mClient = new MongoClient(mongoHost, mongoPort); + this.mDB = mClient.getDatabase(dbName); + this.mCol = mDB.getCollection(colName); + } + + /** + * Initialize MongoDB remote connection + */ + @Override + public void open(int taskNumber, int numTasks) throws IOException { + // Configure mongo + initMongo(); + } + + /** + * Store a MongoDB document record + */ + @Override + public void writeRecord(EndpointGroupAR record) throws IOException { + + + // create document from record + Document doc = new Document("report",record.getReport()) + .append("date", record.getDateInt()) + .append("name", record.getName()) + .append("supergroup", record.getGroup()) + .append("weight",record.getWeight()) + .append("availability", record.getA()) + .append("reliability", record.getR()) + .append("up", record.getUp()) + .append("unknown", record.getUnknown()) + .append("down", record.getDown()); + + + if (this.method == MongoMethod.UPSERT) { + Bson f = Filters.and(Filters.eq("report", record.getReport()), + Filters.eq("date", record.getDateInt()), + Filters.eq("name", record.getName()), + Filters.eq("supergroup", record.getGroup())); + + UpdateOptions opts = new UpdateOptions().upsert(true); + + mCol.replaceOne(f,doc,opts); + } else { + mCol.insertOne(doc); + } + } + + /** + * Close MongoDB Connection + */ + @Override + public void close() throws IOException { + if (mClient != null) { + mClient.close(); + mClient = null; + mDB = null; + mCol = null; + } + } + + @Override + public void configure(Configuration arg0) { + // configure + + } + + + + + +} diff --git a/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/MongoEndpointArOutput.java b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/MongoEndpointArOutput.java new file mode 100644 index 00000000..8c67bb1f --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/MongoEndpointArOutput.java @@ -0,0 +1,127 @@ +package argo.batch; + +import java.io.IOException; + +import org.apache.flink.api.common.io.OutputFormat; +import org.apache.flink.configuration.Configuration; +import org.bson.Document; +import org.bson.conversions.Bson; + +import com.mongodb.MongoClient; +import com.mongodb.MongoClientURI; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.model.Filters; +import com.mongodb.client.model.UpdateOptions; + +import argo.batch.MongoEndGroupArOutput.MongoMethod; + +/** + * MongoOutputFormat for storing Service AR data to mongodb + */ +public class MongoEndpointArOutput implements OutputFormat { + + public enum MongoMethod { + INSERT, UPSERT + }; + + private static final long serialVersionUID = 1L; + + private String mongoHost; + private int mongoPort; + private String dbName; + private String colName; + private MongoMethod method; + + private MongoClient mClient; + private MongoDatabase mDB; + private MongoCollection mCol; + + // constructor + public MongoEndpointArOutput(String uri, String col, String method) { + + if (method.equalsIgnoreCase("upsert")) { + this.method = MongoMethod.UPSERT; + } else { + this.method = MongoMethod.INSERT; + } + + MongoClientURI mURI = new MongoClientURI(uri); + String[] hostParts = mURI.getHosts().get(0).split(":"); + String hostname = hostParts[0]; + int port = Integer.parseInt(hostParts[1]); + + this.mongoHost = hostname; + this.mongoPort = port; + this.dbName = mURI.getDatabase(); + this.colName = col; + } + + // constructor + public MongoEndpointArOutput(String host, int port, String db, String col, MongoMethod method) { + this.mongoHost = host; + this.mongoPort = port; + this.dbName = db; + this.colName = col; + this.method = method; + } + + private void initMongo() { + this.mClient = new MongoClient(mongoHost, mongoPort); + this.mDB = mClient.getDatabase(dbName); + this.mCol = mDB.getCollection(colName); + } + + /** + * Initialize MongoDB remote connection + */ + @Override + public void open(int taskNumber, int numTasks) throws IOException { + // Configure mongo + initMongo(); + } + + /** + * Store a MongoDB document record + */ + @Override + public void writeRecord(EndpointAR record) throws IOException { + + // create document from record + Document doc = new Document("report", record.getReport()).append("date", record.getDateInt()) + .append("name", record.getName()).append("service", record.getService()).append("supergroup", record.getGroup()) + .append("availability", record.getA()).append("reliability", record.getR()).append("up", record.getUp()) + .append("unknown", record.getUnknown()).append("down", record.getDown()); + + if (this.method == MongoMethod.UPSERT) { + Bson f = Filters.and(Filters.eq("report", record.getReport()), Filters.eq("date", record.getDateInt()), + Filters.eq("name", record.getName()), Filters.eq("service", record.getService()), Filters.eq("supergroup", record.getGroup())); + + UpdateOptions opts = new UpdateOptions().upsert(true); + + mCol.replaceOne(f, doc, opts); + } else { + mCol.insertOne(doc); + } + } + + /** + * Close MongoDB Connection + */ + @Override + public void close() throws IOException { + if (mClient != null) { + mClient.close(); + mClient = null; + mDB = null; + mCol = null; + } + } + + @Override + public void configure(Configuration arg0) { + // configure + + } + +} \ No newline at end of file diff --git a/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/MongoServiceArOutput.java b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/MongoServiceArOutput.java new file mode 100644 index 00000000..5bd3bc4b --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/MongoServiceArOutput.java @@ -0,0 +1,127 @@ +package argo.batch; + +import java.io.IOException; + +import org.apache.flink.api.common.io.OutputFormat; +import org.apache.flink.configuration.Configuration; +import org.bson.Document; +import org.bson.conversions.Bson; + +import com.mongodb.MongoClient; +import com.mongodb.MongoClientURI; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.model.Filters; +import com.mongodb.client.model.UpdateOptions; + +import argo.batch.MongoEndGroupArOutput.MongoMethod; + +/** + * MongoOutputFormat for storing Service AR data to mongodb + */ +public class MongoServiceArOutput implements OutputFormat { + + public enum MongoMethod { + INSERT, UPSERT + }; + + private static final long serialVersionUID = 1L; + + private String mongoHost; + private int mongoPort; + private String dbName; + private String colName; + private MongoMethod method; + + private MongoClient mClient; + private MongoDatabase mDB; + private MongoCollection mCol; + + // constructor + public MongoServiceArOutput(String uri, String col, String method) { + + if (method.equalsIgnoreCase("upsert")) { + this.method = MongoMethod.UPSERT; + } else { + this.method = MongoMethod.INSERT; + } + + MongoClientURI mURI = new MongoClientURI(uri); + String[] hostParts = mURI.getHosts().get(0).split(":"); + String hostname = hostParts[0]; + int port = Integer.parseInt(hostParts[1]); + + this.mongoHost = hostname; + this.mongoPort = port; + this.dbName = mURI.getDatabase(); + this.colName = col; + } + + // constructor + public MongoServiceArOutput(String host, int port, String db, String col, MongoMethod method) { + this.mongoHost = host; + this.mongoPort = port; + this.dbName = db; + this.colName = col; + this.method = method; + } + + private void initMongo() { + this.mClient = new MongoClient(mongoHost, mongoPort); + this.mDB = mClient.getDatabase(dbName); + this.mCol = mDB.getCollection(colName); + } + + /** + * Initialize MongoDB remote connection + */ + @Override + public void open(int taskNumber, int numTasks) throws IOException { + // Configure mongo + initMongo(); + } + + /** + * Store a MongoDB document record + */ + @Override + public void writeRecord(ServiceAR record) throws IOException { + + // create document from record + Document doc = new Document("report", record.getReport()).append("date", record.getDateInt()) + .append("name", record.getName()).append("supergroup", record.getGroup()) + .append("availability", record.getA()).append("reliability", record.getR()).append("up", record.getUp()) + .append("unknown", record.getUnknown()).append("down", record.getDown()); + + if (this.method == MongoMethod.UPSERT) { + Bson f = Filters.and(Filters.eq("report", record.getReport()), Filters.eq("date", record.getDateInt()), + Filters.eq("name", record.getName()), Filters.eq("supergroup", record.getGroup())); + + UpdateOptions opts = new UpdateOptions().upsert(true); + + mCol.replaceOne(f, doc, opts); + } else { + mCol.insertOne(doc); + } + } + + /** + * Close MongoDB Connection + */ + @Override + public void close() throws IOException { + if (mClient != null) { + mClient.close(); + mClient = null; + mDB = null; + mCol = null; + } + } + + @Override + public void configure(Configuration arg0) { + // configure + + } + +} \ No newline at end of file diff --git a/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/PickEndpoints.java b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/PickEndpoints.java new file mode 100644 index 00000000..60b769b9 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/PickEndpoints.java @@ -0,0 +1,218 @@ +package argo.batch; + +import java.io.IOException; +import java.text.ParseException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.apache.flink.api.common.functions.RichFlatMapFunction; + +import org.apache.flink.api.java.utils.ParameterTool; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.util.Collector; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +import argo.avro.GroupEndpoint; +import argo.avro.GroupGroup; +import argo.avro.MetricData; +import argo.avro.MetricProfile; +import ops.ConfigManager; +import ops.OpsManager; +import ops.ThresholdManager; +import sync.AggregationProfileManager; +import sync.EndpointGroupManager; +import sync.GroupGroupManager; +import sync.MetricProfileManager; +import sync.RecomputationManager; + +/** + * Accepts a metric data entry and converts it to a status metric object by appending endpoint group information + * Filters out entries that do not appear in topology and metric profiles + */ +public class PickEndpoints extends RichFlatMapFunction { + + private static final long serialVersionUID = 1L; + + + final ParameterTool params; + + public PickEndpoints(ParameterTool params){ + this.params = params; + } + + static Logger LOG = LoggerFactory.getLogger(ArgoArBatch.class); + + private List mps; + private List egp; + private List ggp; + private List apr; + private List rec; + private List conf; + private List thr; + private List ops; + private MetricProfileManager mpsMgr; + private EndpointGroupManager egpMgr; + private GroupGroupManager ggpMgr; + private AggregationProfileManager aprMgr; + private RecomputationManager recMgr; + private ConfigManager confMgr; + private OpsManager opsMgr; + private ThresholdManager thrMgr; + + private String egroupType; + + /** + * Initialization method of the RichFlatMapFunction operator + *

      + * This runs at the initialization of the operator and receives a configuration + * parameter object. It initializes all required structures used by this operator + * such as profile managers, operations managers, topology managers etc. + * + * @param parameters A flink Configuration object + */ + @Override + public void open(Configuration parameters) throws IOException, ParseException { + // Get data from broadcast variable + this.mps = getRuntimeContext().getBroadcastVariable("mps"); + this.egp = getRuntimeContext().getBroadcastVariable("egp"); + this.ggp = getRuntimeContext().getBroadcastVariable("ggp"); + this.apr = getRuntimeContext().getBroadcastVariable("apr"); + this.rec = getRuntimeContext().getBroadcastVariable("rec"); + this.conf = getRuntimeContext().getBroadcastVariable("conf"); + this.ops = getRuntimeContext().getBroadcastVariable("ops"); + this.thr = getRuntimeContext().getBroadcastVariable("thr"); + + // Initialize metric profile manager + this.mpsMgr = new MetricProfileManager(); + this.mpsMgr.loadFromList(mps); + // Initialize endpoint group manager + this.egpMgr = new EndpointGroupManager(); + this.egpMgr.loadFromList(egp); + + this.ggpMgr = new GroupGroupManager(); + this.ggpMgr.loadFromList(ggp); + + // Initialize Aggregation Profile Manager ; + this.aprMgr = new AggregationProfileManager(); + this.aprMgr.loadJsonString(apr); + + // Initialize Recomputations Manager; + this.recMgr = new RecomputationManager(); + this.recMgr.loadJsonString(rec); + + // Initialize Configurations Manager; + this.confMgr = new ConfigManager(); + this.confMgr.loadJsonString(conf); + + // Initialize endpoint group type + this.egroupType = this.confMgr.egroup; + + // Initialize Ops Manager + this.opsMgr = new OpsManager(); + this.opsMgr.loadJsonString(ops); + + // Initialize Threshold manager + this.thrMgr = new ThresholdManager(); + if (!this.thr.get(0).isEmpty()){ + this.thrMgr.parseJSON(this.thr.get(0)); + } + + } + + + /** + * The main operator business logic of filtering a collection of MetricData + *

      + * This runs for a dataset of Metric data items and returns a collection of MonData items after filtering out the + * unwanted ones. + * The filtering happens in 5 stages: + * 1) Filter out by checking if monitoring engine is excluded (Recomputation Manager used) + * 2) Filter out by checking if service belongs to aggregation profile (Aggregation Profile Manager used) + * 3) Filter out by checking if service and metric belongs to metric profile used (Metric Profile Manager used) + * 4) Filter out by checking if service endpoint belongs to group endpoint topology (Group Endpoint Manager used + * 5) Filter out by checking if group endpoint belongs to a valid upper group (Group of Groups Manager used) + * + * @param in An Iterable collection of MetricData objects + * @param out A Collector list of valid MonData objects after filtering + */ + @Override + public void flatMap(MetricData md, Collector out) throws Exception { + + String prof = mpsMgr.getProfiles().get(0); + String aprof = aprMgr.getAvProfiles().get(0); + String hostname = md.getHostname(); + String service = md.getService(); + String metric = md.getMetric(); + String monHost = md.getMonitoringHost(); + String ts = md.getTimestamp(); + + // Filter by monitoring engine + if (recMgr.isMonExcluded(monHost, ts) == true) return; + + // Filter By aggregation profile + if (aprMgr.checkService(aprof, service) == false) return; + + // Filter By metric profile + if (mpsMgr.checkProfileServiceMetric(prof, service, metric) == false) return; + + + // Filter by endpoint group + if (egpMgr.checkEndpoint(hostname, service) == false) return; + + // Filter By endpoint group if belongs to supergroup + ArrayList groupnames = egpMgr.getGroup(egroupType, hostname, service); + + for (String groupname : groupnames) { + + if (ggpMgr.checkSubGroup(groupname) == true){ + + String status = md.getStatus(); + String actualData = md.getActualData(); + + if (actualData != null) { + // Check for relevant rule + String rule = thrMgr.getMostRelevantRule(groupname, md.getHostname(), md.getMetric()); + // if rule is indeed found + if (rule != ""){ + // get the retrieved values from the actual data + Map values = thrMgr.getThresholdValues(actualData); + // calculate + String[] statusNext = thrMgr.getStatusByRuleAndValues(rule, this.opsMgr, "AND", values); + if (statusNext[0] == "") statusNext[0] = status; + LOG.info("{},{},{} data:({}) {} --> {}",groupname,md.getHostname(),md.getMetric(),values,status,statusNext[0]); + if (status != statusNext[0]) { + status = statusNext[0]; + } + } + + + } + + MonData mn = new MonData(); + mn.setGroup(groupname); + mn.setHostname(hostname); + mn.setService(service); + mn.setMetric(metric); + mn.setMonHost(monHost); + mn.setStatus(status); + mn.setTimestamp(ts); + mn.setMessage(md.getMessage()); + mn.setSummary(md.getSummary()); + // transfer the actual data to the enriched monitoring data object + mn.setActualData(actualData); + + out.collect(mn); + } + + + } + + } + + + + +} diff --git a/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/ServiceAR.java b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/ServiceAR.java new file mode 100644 index 00000000..8664d5a0 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/argo/batch/ServiceAR.java @@ -0,0 +1,88 @@ +package argo.batch; + +public class ServiceAR { + + private int dateInt; + private String report; + private String name; + private String group; + private double a; + private double r; + private double up; + private double unknown; + private double down; + + public ServiceAR(int _dateInt, String _report, String _name, String _group, double _a, double _r, double _up, double _unknown, double _down){ + this.dateInt = _dateInt; + this.report=_report; + this.name = _name; + this.group = _group; + this.a = _a; + this.r = _r; + this.up = _up; + this.unknown = _unknown; + this.down = _down; + } + + public int getDateInt(){ + return this.dateInt; + } + + public void setDateInt(int dateInt){ + this.dateInt= dateInt; + } + + public String getReport() { + return report; + } + public void setReport(String report) { + this.report = report; + } + public String getName() { + return name; + } + public void setName(String name) { + this.name = name; + } + public String getGroup() { + return group; + } + public void setGroup(String group) { + this.group = group; + } + public double getA() { + return a; + } + public void setA(double a) { + this.a = a; + } + public double getR() { + return r; + } + public void setR(double r) { + this.r = r; + } + public double getUp() { + return up; + } + public void setUp(double up) { + this.up = up; + } + public double getUnknown() { + return unknown; + } + public void setUnknown(double unknown) { + this.unknown = unknown; + } + public double getDown() { + return down; + } + public void setDown(double down) { + this.down = down; + } + + public String toString() { + return "(" + this.dateInt+ "," + this.report + "," + this.name + "," + this.group + "," + this.a + "," + this.r + "," + this.up + "," + this.unknown + "," + this.down + ")"; + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/main/java/ops/ConfigManager.java b/flink_jobs/old-models/batch_ar/src/main/java/ops/ConfigManager.java new file mode 100644 index 00000000..bdc3069a --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/ops/ConfigManager.java @@ -0,0 +1,196 @@ +package ops; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileReader; +import java.io.IOException; +import java.util.List; +import java.util.TreeMap; + +import org.apache.commons.io.IOUtils; +import org.apache.log4j.Logger; + +import com.google.gson.JsonArray; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParseException; +import com.google.gson.JsonParser; + + +public class ConfigManager { + + private static final Logger LOG = Logger.getLogger(ConfigManager.class.getName()); + + public String id; // report uuid reference + public String report; + public String tenant; + public String egroup; // endpoint group + public String ggroup; // group of groups + public String weight; // weight factor type + public TreeMap egroupTags; + public TreeMap ggroupTags; + public TreeMap mdataTags; + + public ConfigManager() { + this.report = null; + this.id = null; + this.tenant = null; + this.egroup = null; + this.ggroup = null; + this.weight = null; + this.egroupTags = new TreeMap(); + this.ggroupTags = new TreeMap(); + this.mdataTags = new TreeMap(); + + } + + public void clear() { + this.id = null; + this.report = null; + this.tenant = null; + this.egroup = null; + this.ggroup = null; + this.weight = null; + this.egroupTags.clear(); + this.ggroupTags.clear(); + this.mdataTags.clear(); + + } + + public String getReportID() { + return id; + } + + public String getReport() { + return report; + } + + public String getTenant() { + return tenant; + } + + + public String getEgroup() { + return egroup; + } + + public void loadJson(File jsonFile) throws IOException { + // Clear data + this.clear(); + + BufferedReader br = null; + try { + br = new BufferedReader(new FileReader(jsonFile)); + + JsonParser jsonParser = new JsonParser(); + JsonElement jElement = jsonParser.parse(br); + JsonObject jObj = jElement.getAsJsonObject(); + // Get the simple fields + this.id = jObj.get("id").getAsString(); + this.tenant = jObj.get("tenant").getAsString(); + this.report = jObj.get("info").getAsJsonObject().get("name").getAsString(); + + // get topology schema names + JsonObject topoGroup = jObj.get("topology_schema").getAsJsonObject().getAsJsonObject("group"); + this.ggroup = topoGroup.get("type").getAsString(); + this.egroup = topoGroup.get("group").getAsJsonObject().get("type").getAsString(); + + // optional weight filtering + this.weight = ""; + if (jObj.has("weight")){ + this.weight = jObj.get("weight").getAsString(); + } + // Get compound fields + JsonArray jTags = jObj.getAsJsonArray("filter_tags"); + + // Iterate tags + if (jTags != null) { + for (JsonElement tag : jTags) { + JsonObject jTag = tag.getAsJsonObject(); + String name = jTag.get("name").getAsString(); + String value = jTag.get("value").getAsString(); + String ctx = jTag.get("context").getAsString(); + if (ctx.equalsIgnoreCase("group_of_groups")){ + this.ggroupTags.put(name, value); + } else if (ctx.equalsIgnoreCase("endpoint_groups")){ + this.egroupTags.put(name, value); + } else if (ctx.equalsIgnoreCase("metric_data")) { + this.mdataTags.put(name, value); + } + + } + } + + + } catch (FileNotFoundException ex) { + LOG.error("Could not open file:" + jsonFile.getName()); + throw ex; + + } catch (JsonParseException ex) { + LOG.error("File is not valid json:" + jsonFile.getName()); + throw ex; + } finally { + // Close quietly without exceptions the buffered reader + IOUtils.closeQuietly(br); + } + + } + + + /** + * Loads Report config information from a config json string + * + */ + public void loadJsonString(List confJson) throws JsonParseException { + // Clear data + this.clear(); + + try { + + JsonParser jsonParser = new JsonParser(); + // Grab the first - and only line of json from ops data + JsonElement jElement = jsonParser.parse(confJson.get(0)); + JsonObject jObj = jElement.getAsJsonObject(); + // Get the simple fields + this.id = jObj.get("id").getAsString(); + this.tenant = jObj.get("tenant").getAsString(); + this.report = jObj.get("info").getAsJsonObject().get("name").getAsString(); + // get topology schema names + JsonObject topoGroup = jObj.get("topology_schema").getAsJsonObject().getAsJsonObject("group"); + this.ggroup = topoGroup.get("type").getAsString(); + this.egroup = topoGroup.get("group").getAsJsonObject().get("type").getAsString(); + // optional weight filtering + this.weight = ""; + if (jObj.has("weight")){ + this.weight = jObj.get("weight").getAsString(); + } + // Get compound fields + JsonArray jTags = jObj.getAsJsonArray("tags"); + + // Iterate tags + if (jTags != null) { + for (JsonElement tag : jTags) { + JsonObject jTag = tag.getAsJsonObject(); + String name = jTag.get("name").getAsString(); + String value = jTag.get("value").getAsString(); + String ctx = jTag.get("context").getAsString(); + if (ctx.equalsIgnoreCase("group_of_groups")){ + this.ggroupTags.put(name, value); + } else if (ctx.equalsIgnoreCase("endpoint_groups")){ + this.egroupTags.put(name, value); + } else if (ctx.equalsIgnoreCase("metric_data")) { + this.mdataTags.put(name, value); + } + + } + } + + } catch (JsonParseException ex) { + LOG.error("Not valid json contents"); + throw ex; + } + + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/main/java/ops/DAggregator.java b/flink_jobs/old-models/batch_ar/src/main/java/ops/DAggregator.java new file mode 100644 index 00000000..ae3d68e4 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/ops/DAggregator.java @@ -0,0 +1,118 @@ +package ops; + +import java.io.File; +import java.io.FileNotFoundException; +import java.text.ParseException; +import java.util.HashMap; +import java.util.Map.Entry; + +public class DAggregator { + + public HashMap timelines; + public DTimeline aggregation; + + private int period; // used for sampling of the timelines + private int interval; // used for sampling of the timelines + + // public OpsManager opsMgr; + + public DAggregator() { + + this.period = 1440; + this.interval = 5; + + this.timelines = new HashMap(); + this.aggregation = new DTimeline(this.period, this.interval); + // this.opsMgr = new OpsManager(); + } + + public DAggregator(int period, int interval) { + + this.period = period; + this.interval = interval; + + this.timelines = new HashMap(); + this.aggregation = new DTimeline(); + // this.opsMgr = new OpsManager(); + } + + public void initTimeline(String name, int startStateInt) { + this.timelines.put(name, new DTimeline(this.period, this.interval)); + this.setStartState(name, startStateInt); + } + + public void loadOpsFile(File opsFile) throws FileNotFoundException { + // this.opsMgr.openFile(opsFile); + } + + public void insertSlot(String name, int slot, int statusInt) { + if (timelines.containsKey(name) == false) { + DTimeline tempTimeline = new DTimeline(this.period, this.interval); + tempTimeline.samples[slot] = statusInt; + timelines.put(name, tempTimeline); + } else { + timelines.get(name).samples[slot] = statusInt; + } + + } + + public void insert(String name, String timestamp, int statusInt) throws ParseException { + // Get the integer value of the specified status string + + // Check if time-line exists or else create it + if (timelines.containsKey(name) == false) { + DTimeline tempTimeline = new DTimeline(this.period, this.interval); + tempTimeline.insert(timestamp, statusInt); + timelines.put(name, tempTimeline); + } else { + timelines.get(name).insert(timestamp, statusInt); + } + } + + public void setStartState(String name, int statusInt) { + // Get the integer value of the specified status string + + // Check if time-line exists or else create it + if (timelines.containsKey(name) == false) { + DTimeline tempTimeline = new DTimeline(this.period, this.interval); + tempTimeline.setStartState(statusInt); + timelines.put(name, tempTimeline); + } else { + timelines.get(name).setStartState(statusInt); + } + } + + public void clear() { + this.timelines.clear(); + this.aggregation.clear(); + } + + public void settleAll(int missingStart) { + for (Entry item : timelines.entrySet()) { + item.getValue().settle(missingStart); + } + } + + public void aggregate(String opType, OpsManager opsMgr) { + + int opTypeInt = opsMgr.getIntOperation(opType); + + for (int i = 0; i < this.aggregation.samples.length; i++) { + + boolean firstItem = true; + + for (Entry item : timelines.entrySet()) { + + if (firstItem) { + this.aggregation.samples[i] = item.getValue().samples[i]; + firstItem = false; + } else { + int a = this.aggregation.samples[i]; + int b = item.getValue().samples[i]; + this.aggregation.samples[i] = opsMgr.opInt(opTypeInt, a, b); + } + } + } + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/main/java/ops/DIntegrator.java b/flink_jobs/old-models/batch_ar/src/main/java/ops/DIntegrator.java new file mode 100644 index 00000000..ef2443fe --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/ops/DIntegrator.java @@ -0,0 +1,74 @@ +package ops; + +import java.math.BigDecimal; + +public class DIntegrator { + + public double availability; + public double reliability; + + public double up_f; + public double unknown_f; + public double down_f; + + public static double round(double input, int prec, int mode) { + try { + BigDecimal inputBD = BigDecimal.valueOf(input); + BigDecimal rounded = inputBD.setScale(prec, mode); + return rounded.doubleValue(); + + } catch (NumberFormatException e) { + return -1; + } + } + + public void clear() { + this.up_f = 0; + this.unknown_f = 0; + this.down_f = 0; + + this.availability = 0; + this.reliability = 0; + } + + public void calculateAR(int[] samples, OpsManager opsMgr) { + + clear(); + + double up = 0; + double down = 0; + double unknown = 0; + + for (int i = 0; i < samples.length; i++) { + if (samples[i] == opsMgr.getIntStatus("OK")) { + up++; + } else if (samples[i] == opsMgr.getIntStatus("WARNING")) { + up++; + } else if (samples[i] == opsMgr.getIntStatus("MISSING")) { + unknown++; + } else if (samples[i] == opsMgr.getIntStatus("UNKNOWN")) { + unknown++; + } else if (samples[i] == opsMgr.getIntStatus("DOWNTIME")) { + down++; + } else if (samples[i] == opsMgr.getIntStatus("CRITICAL")) { + + } + } + + double dt = samples.length; + + // Availability = UP period / KNOWN period = UP period / (Total period – + // UNKNOWN period) + this.availability = round(((up / dt) / (1.0 - (unknown / dt))) * 100, 5, BigDecimal.ROUND_HALF_UP); + + // Reliability = UP period / (KNOWN period – Scheduled Downtime) + // = UP period / (Total period – UNKNOWN period – ScheduledDowntime) + this.reliability = round(((up / dt) / (1.0 - (unknown / dt) - (down / dt))) * 100, 5, BigDecimal.ROUND_HALF_UP); + + this.up_f = round(up / dt, 5, BigDecimal.ROUND_HALF_UP); + this.unknown_f = round(unknown / dt, 5, BigDecimal.ROUND_HALF_UP); + this.down_f = round(down / dt, 5, BigDecimal.ROUND_HALF_UP); + + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/main/java/ops/DTimeline.java b/flink_jobs/old-models/batch_ar/src/main/java/ops/DTimeline.java new file mode 100644 index 00000000..a9f57b7c --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/ops/DTimeline.java @@ -0,0 +1,151 @@ +package ops; + +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.Arrays; +import java.util.Calendar; +import java.util.Date; +import java.util.TreeMap; + +public class DTimeline { + + private int startState; // state to define the beginning of the timeline + private TreeMap inputStates; // input states with the + // timestamp converted to + // slots + + private int sPeriod; // sampling period measured in minutes + private int sInterval; // sampling interval measured in minutes; + + public int[] samples; // array of samples based on sampling frequency + + public DTimeline() { + this.startState = -1; + this.sPeriod = 1440; // 1 day = 24 hours = 24 * 60 minutes = 1440 + // minutes + this.sInterval = 5; // every 5 minutes; + this.samples = new int[1440 / 5]; // 288 samples; + this.inputStates = new TreeMap(); + Arrays.fill(samples, -1); + } + + public DTimeline(int period, int interval) { + this.startState = -1; + this.sPeriod = period; // given in minutes + this.sInterval = interval; // every ? minutes; + this.samples = new int[period / interval]; // ? samples + this.inputStates = new TreeMap(); + Arrays.fill(samples, -1); + } + + public void setSampling(int period, int interval) { + this.sPeriod = period; + this.sInterval = interval; + samples = new int[this.sPeriod / this.sInterval]; + } + + public void clear() { + clearSamples(); + clearTimestamps(); + } + + public void clearSamples() { + samples = new int[this.sPeriod / this.sInterval]; + Arrays.fill(samples, -1); + } + + public void clearTimestamps() { + startState = -1; + inputStates.clear(); + } + + public void setStartState(int state) { + this.startState = state; + } + + public int getStartState() { + return this.startState; + } + + public void fill(int stateInt, String startTs, String endTs, String targetDate) throws ParseException { + // Find begin state + int start; + int end; + + SimpleDateFormat dmy = new SimpleDateFormat("yyyy-MM-dd"); + Date startDt = dmy.parse(startTs); + Date endDt = dmy.parse(endTs); + Date targetDt = dmy.parse(targetDate); + + if (startDt.before(targetDt) && !((startTs.substring(0, startTs.indexOf("T")).equals(targetDate)))) { + start = 0; + } else { + start = tsInt(startTs); + } + + if (endDt.after(targetDt) && !((endTs.substring(0, endTs.indexOf("T")).equals(targetDate)))) { + end = this.samples.length-1; + } else { + end = tsInt(endTs); + } + + for (int i = start; i <= end; i++) { + this.samples[i] = stateInt; + } + + } + + public int tsInt(String timestamp) throws ParseException { + + SimpleDateFormat w3c_date = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'"); + Date parsedDate = w3c_date.parse(timestamp); + Calendar cal = Calendar.getInstance(); + cal.setTime(parsedDate); + + int total_seconds = (cal.get(Calendar.HOUR_OF_DAY) * 3600) + (cal.get(Calendar.MINUTE) * 60) + + cal.get(Calendar.SECOND); + + double total_minutes = Math.round(total_seconds / 60.0); + double result = Math.round(total_minutes / this.sInterval); + + if ((int) result == samples.length) { + return (int) result - 1; + } else { + return (int) result; + } + } + + public void insert(String timestamp, int state) throws ParseException { + int slot = this.tsInt(timestamp); + this.inputStates.put(slot, state); + } + + public void settle(int missingStart) { + if (this.startState == -1) { + this.startState = missingStart; + } + int prev_state = this.startState; + int prev_slot = 0; + for (int item : this.inputStates.keySet()) { + if (item == 0) { + this.samples[item] = this.inputStates.get(item); + continue; + } + this.samples[item] = this.inputStates.get(item); + // fill previous states + for (int i = prev_slot; i < item - 1; i++) { + this.samples[i] = prev_state; + } + // set the prev_state and prev_slot + prev_state = this.inputStates.get(item); + prev_slot = item - 1; + } + + // Fill the rest of the array with the last state + for (int i = prev_slot; i < this.samples.length; i++) { + this.samples[i] = prev_state; + } + + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/main/java/ops/OpsManager.java b/flink_jobs/old-models/batch_ar/src/main/java/ops/OpsManager.java new file mode 100644 index 00000000..341c9260 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/ops/OpsManager.java @@ -0,0 +1,311 @@ +package ops; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileReader; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map.Entry; + +import org.apache.commons.io.IOUtils; +import org.apache.log4j.Logger; + +import com.google.gson.JsonArray; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParseException; +import com.google.gson.JsonParser; + +public class OpsManager { + + private static final Logger LOG = Logger.getLogger(OpsManager.class.getName()); + + private HashMap states; + private HashMap ops; + private ArrayList revStates; + private ArrayList revOps; + + private int[][][] truthTable; + + private String defaultDownState; + private String defaultMissingState; + private String defaultUnknownState; + + private boolean order; + + public OpsManager() { + this.states = new HashMap(); + this.ops = new HashMap(); + this.revStates = new ArrayList(); + this.revOps = new ArrayList(); + + this.truthTable = null; + + this.order = false; + + } + + public OpsManager(boolean _order) { + this.states = new HashMap(); + this.ops = new HashMap(); + this.revStates = new ArrayList(); + this.revOps = new ArrayList(); + this.order = _order; + + this.truthTable = null; + } + + public String getDefaultDown() { + return this.defaultDownState; + } + + public String getDefaultUnknown() { + return this.defaultUnknownState; + } + + public int getDefaultUnknownInt() { + return this.getIntStatus(this.defaultUnknownState); + } + + public int getDefaultDownInt() { + return this.getIntStatus(this.defaultDownState); + } + + public String getDefaultMissing() { + return this.defaultMissingState; + } + + public int getDefaultMissingInt() { + return this.getIntStatus(this.defaultMissingState); + } + + public void clear() { + this.states = new HashMap(); + this.ops = new HashMap(); + this.revStates = new ArrayList(); + this.revOps = new ArrayList(); + + this.truthTable = null; + } + + public int opInt(int op, int a, int b) { + int result = -1; + try { + result = this.truthTable[op][a][b]; + } catch (IndexOutOfBoundsException ex) { + LOG.info(ex); + result = -1; + } + + return result; + } + + public int opInt(String op, String a, String b) { + + int opInt = this.ops.get(op); + int aInt = this.states.get(a); + int bInt = this.states.get(b); + + return this.truthTable[opInt][aInt][bInt]; + } + + public String op(int op, int a, int b) { + return this.revStates.get(this.truthTable[op][a][b]); + } + + public String op(String op, String a, String b) { + int opInt = this.ops.get(op); + int aInt = this.states.get(a); + int bInt = this.states.get(b); + + return this.revStates.get(this.truthTable[opInt][aInt][bInt]); + } + + public String getStrStatus(int status) { + return this.revStates.get(status); + } + + public int getIntStatus(String status) { + return this.states.get(status); + } + + public String getStrOperation(int op) { + return this.revOps.get(op); + } + + public int getIntOperation(String op) { + return this.ops.get(op); + } + + public ArrayList availableStates() { + + return this.revStates; + } + + public ArrayList availableOps() { + return this.revOps; + } + + public void loadJson(File jsonFile) throws IOException { + // Clear data + this.clear(); + + BufferedReader br = null; + try { + br = new BufferedReader(new FileReader(jsonFile)); + + JsonParser json_parser = new JsonParser(); + JsonElement j_element = json_parser.parse(br); + JsonObject j_obj = j_element.getAsJsonObject(); + JsonArray j_states = j_obj.getAsJsonArray("available_states"); + JsonArray j_ops = j_obj.getAsJsonArray("operations"); + this.defaultMissingState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("missing").getAsString(); + this.defaultDownState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("down").getAsString(); + this.defaultUnknownState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("unknown").getAsString(); + // Collect the available states + for (int i = 0; i < j_states.size(); i++) { + this.states.put(j_states.get(i).getAsString(), i); + this.revStates.add(j_states.get(i).getAsString()); + + } + + // Collect the available operations + int i = 0; + for (JsonElement item : j_ops) { + JsonObject jObjItem = item.getAsJsonObject(); + this.ops.put(jObjItem.getAsJsonPrimitive("name").getAsString(), i); + this.revOps.add(jObjItem.getAsJsonPrimitive("name").getAsString()); + i++; + } + // Initialize the truthtable + int num_ops = this.revOps.size(); + int num_states = this.revStates.size(); + this.truthTable = new int[num_ops][num_states][num_states]; + + for (int[][] surface : this.truthTable) { + for (int[] line : surface) { + Arrays.fill(line, -1); + } + } + + // Fill the truth table + for (JsonElement item : j_ops) { + JsonObject jObjItem = item.getAsJsonObject(); + String opname = jObjItem.getAsJsonPrimitive("name").getAsString(); + JsonArray tops = jObjItem.getAsJsonArray("truth_table"); + // System.out.println(tops); + + for (int j = 0; j < tops.size(); j++) { + // System.out.println(opname); + JsonObject row = tops.get(j).getAsJsonObject(); + + int a_val = this.states.get(row.getAsJsonPrimitive("a").getAsString()); + int b_val = this.states.get(row.getAsJsonPrimitive("b").getAsString()); + int x_val = this.states.get(row.getAsJsonPrimitive("x").getAsString()); + int op_val = this.ops.get(opname); + + // Fill in truth table + // Check if order sensitivity is off so to insert two truth + // values + // ...[a][b] and [b][a] + this.truthTable[op_val][a_val][b_val] = x_val; + if (!this.order) { + this.truthTable[op_val][b_val][a_val] = x_val; + } + } + } + } catch (FileNotFoundException ex) { + LOG.error("Could not open file:" + jsonFile.getName()); + throw ex; + + } catch (JsonParseException ex) { + LOG.error("File is not valid json:" + jsonFile.getName()); + throw ex; + } finally { + // Close quietly without exceptions the buffered reader + IOUtils.closeQuietly(br); + } + + } + + public void loadJsonString(List opsJson) throws JsonParseException { + // Clear data + this.clear(); + + try { + + + JsonParser json_parser = new JsonParser(); + // Grab the first - and only line of json from ops data + JsonElement j_element = json_parser.parse(opsJson.get(0)); + JsonObject j_obj = j_element.getAsJsonObject(); + JsonArray j_states = j_obj.getAsJsonArray("available_states"); + JsonArray j_ops = j_obj.getAsJsonArray("operations"); + this.defaultMissingState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("missing").getAsString(); + this.defaultDownState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("down").getAsString(); + this.defaultUnknownState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("unknown").getAsString(); + // Collect the available states + for (int i = 0; i < j_states.size(); i++) { + this.states.put(j_states.get(i).getAsString(), i); + this.revStates.add(j_states.get(i).getAsString()); + + } + + // Collect the available operations + int i = 0; + for (JsonElement item : j_ops) { + JsonObject jObjItem = item.getAsJsonObject(); + this.ops.put(jObjItem.getAsJsonPrimitive("name").getAsString(), i); + this.revOps.add(jObjItem.getAsJsonPrimitive("name").getAsString()); + i++; + } + // Initialize the truthtable + int num_ops = this.revOps.size(); + int num_states = this.revStates.size(); + this.truthTable = new int[num_ops][num_states][num_states]; + + for (int[][] surface : this.truthTable) { + for (int[] line : surface) { + Arrays.fill(line, -1); + } + } + + // Fill the truth table + for (JsonElement item : j_ops) { + JsonObject jObjItem = item.getAsJsonObject(); + String opname = jObjItem.getAsJsonPrimitive("name").getAsString(); + JsonArray tops = jObjItem.getAsJsonArray("truth_table"); + // System.out.println(tops); + + for (int j = 0; j < tops.size(); j++) { + // System.out.println(opname); + JsonObject row = tops.get(j).getAsJsonObject(); + + int a_val = this.states.get(row.getAsJsonPrimitive("a").getAsString()); + int b_val = this.states.get(row.getAsJsonPrimitive("b").getAsString()); + int x_val = this.states.get(row.getAsJsonPrimitive("x").getAsString()); + int op_val = this.ops.get(opname); + + // Fill in truth table + // Check if order sensitivity is off so to insert two truth + // values + // ...[a][b] and [b][a] + this.truthTable[op_val][a_val][b_val] = x_val; + if (!this.order) { + this.truthTable[op_val][b_val][a_val] = x_val; + } + } + } + + } catch (JsonParseException ex) { + LOG.error("Not valid json contents"); + throw ex; + } + + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/main/java/ops/ThresholdManager.java b/flink_jobs/old-models/batch_ar/src/main/java/ops/ThresholdManager.java new file mode 100644 index 00000000..e69a0190 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/ops/ThresholdManager.java @@ -0,0 +1,754 @@ +package ops; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileReader; +import java.io.IOException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Map.Entry; + +import org.apache.commons.io.IOUtils; +import org.apache.log4j.Logger; + +import com.google.gson.JsonArray; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParseException; +import com.google.gson.JsonParser; + +/** + * @author kaggis + * + */ +public class ThresholdManager { + + private static final Logger LOG = Logger.getLogger(ThresholdManager.class.getName()); + + // Nested map that holds rule definitions: "groups/hosts/metrics" -> label -> + // threshold + // rules" + private Map> rules; + + // Reverse index checks for group, host, metrics + private HashSet metrics; + private HashSet hosts; + private HashSet groups; + private String aggregationOp = "AND"; + + public Map> getRules() { + return this.rules; + } + + /** + * Threshold class implements objects that hold threshold values as they are + * parsed by a threshold expression such as the following one: + * + * label=30s;0:50,50:100,0,100 + * + * A Threshold object can be directly constructed from a string including an + * expression as the above + * + * Each threshold object stores the threshold expression and the individual + * parsed items such as value, uom, warning range, critical range and min,max + * values + * + */ + class Threshold { + + private static final String defWarning = "WARNING"; + private static final String defCritical = "CRITICAL"; + + private String expression; + private String label; + private Float value; + private String uom; + private Range warning; + private Range critical; + private Float min; + private Float max; + + + + /** + * Constructs a threshold from a string containing a threshold expression + * + * @param expression + * A string containing a threshold exception as the following one: + * label=30s;0:50,50:100,0,100 + * + */ + public Threshold(String expression) { + Threshold temp = parseAndSet(expression); + this.expression = temp.expression; + this.label = temp.label; + this.value = temp.value; + this.uom = temp.uom; + this.warning = temp.warning; + this.critical = temp.critical; + this.min = temp.min; + this.max = temp.max; + + } + + /** + * Create a new threshold object by providing each parameter + * + * @param expression + * string containing the threshold expression + * @param label + * threshold label + * @param value + * threshold value + * @param uom + * unit of measurement - optional + * @param warning + * a range determining warning statuses + * @param critical + * a range determining critical statuses + * @param min + * minimum value available for this threshold + * @param max + * maximum value available for this threshold + */ + public Threshold(String expression, String label, float value, String uom, Range warning, Range critical, + float min, float max) { + + this.expression = expression; + this.label = label; + this.value = value; + this.uom = uom; + this.warning = warning; + this.critical = critical; + this.min = min; + this.max = max; + } + + public String getExpression() { + return expression; + } + + public String getLabel() { + return label; + } + + public float getValue() { + return value; + } + + public String getUom() { + return uom; + } + + public Range getWarning() { + return warning; + } + + public Range getCritical() { + return critical; + } + + public float getMin() { + return min; + } + + public float getMax() { + return max; + } + + /** + * Parses a threshold expression string and returns a Threshold object + * + * @param threshold + * string containing the threshold expression + * @return Threshold object + */ + public Threshold parseAndSet(String threshold) { + + String pThresh = threshold; + String curLabel = ""; + String curUom = ""; + Float curValue = Float.NaN; + Range curWarning = new Range(); // empty range + Range curCritical = new Range(); // emtpy range + Float curMin = Float.NaN; + Float curMax = Float.NaN; + // find label by splitting at = + String[] tokens = pThresh.split("="); + // Must have two tokens to continue, label=something + if (tokens.length == 2) { + curLabel = tokens[0]; + + // Split right value by ; to find the array of arguments + String[] subtokens = tokens[1].split(";"); + // Must have size > 0 at least a value + if (subtokens.length > 0) { + curUom = getUOM(subtokens[0]); + curValue = Float.parseFloat(subtokens[0].replaceAll(curUom, "")); + if (subtokens.length > 1) { + // iterate over rest of subtokens + for (int i = 1; i < subtokens.length; i++) { + if (i == 1) { + // parse warning range + curWarning = new Range(subtokens[i]); + continue; + } else if (i == 2) { + // parse critical + curCritical = new Range(subtokens[i]); + continue; + } else if (i == 3) { + // parse min + curMin = Float.parseFloat(subtokens[i]); + continue; + } else if (i == 4) { + // parse min + curMax = Float.parseFloat(subtokens[i]); + } + } + } + + } + + } + + return new Threshold(threshold, curLabel, curValue, curUom, curWarning, curCritical, curMin, curMax); + + } + + /** + * Reads a threshold string value and extracts the unit of measurement if + * present + * + * @param value + * String containing a representation of the value and uom + * @return String representing the uom. + */ + public String getUOM(String value) { + // check if ends with digit + if (Character.isDigit(value.charAt(value.length() - 1))) { + return ""; + } + + // check if ends with seconds + if (value.endsWith("s")) + return "s"; + if (value.endsWith("us")) + return "us"; + if (value.endsWith("ms")) + return "ms"; + if (value.endsWith("%")) + return "%"; + if (value.endsWith("B")) + return "B"; + if (value.endsWith("KB")) + return "KB"; + if (value.endsWith("MB")) + return "MB"; + if (value.endsWith("TB")) + return "TB"; + if (value.endsWith("c")) + return "c"; + + // Not valid range + throw new RuntimeException("Invalid Unit of measurement: " + value); + + } + + /** + * Checks an external value against a threshold's warning,critical ranges. If a + * range contains the value (warning or critical) the corresponding status is + * returned as string "WARNING" or "CRITICAL". If the threshold doesn't provide + * the needed data to decide on status an "" is returned back. + * + * @return string with the status result "WARNING", "CRITICAL" + */ + public String calcStatusWithValue(Float value) { + + if (!Float.isFinite(this.value)) + return ""; + if (!this.warning.isUndef()) { + if (this.warning.contains(value)) + return defWarning; + } + if (!this.critical.isUndef()) { + if (this.critical.contains(value)) + return defCritical; + } + + return ""; + } + + /** + * Checks a threshold's value against warning,critical ranges. If a range + * contains the value (warning or critical) the corresponding status is returned + * as string "WARNING" or "CRITICAL". If the threshold doesn't provide the + * needed data to decide on status an "" is returned back. + * + * @return string with the status result "WARNING", "CRITICAL" + */ + public String calcStatus() { + + if (!Float.isFinite(this.value)) + return ""; + if (!this.warning.isUndef()) { + if (this.warning.contains(this.value)) + return defWarning; + } + if (!this.critical.isUndef()) { + if (this.critical.contains(this.value)) + return defCritical; + } + + return ""; + } + + public String toString() { + String strWarn = ""; + String strCrit = ""; + String strMin = ""; + String strMax = ""; + + if (this.warning != null) + strWarn = this.warning.toString(); + if (this.critical != null) + strCrit = this.critical.toString(); + if (this.min != null) + strMin = this.min.toString(); + if (this.max != null) + strMax = this.max.toString(); + + return "[expression=" + this.expression + ", label=" + this.label + ", value=" + this.value + ", uom=" + + this.uom + ", warning=" + strWarn + ", critical=" + strCrit + ", min=" + strMin + ", max=" + + strMax + ")"; + } + + } + + /** + * Range implements a simple object that holds a threshold's critical or warning + * range. It includes a floor,ceil as floats and an exclude flag when a range is + * supposed to be used for exclusion and not inclusion. The threshold spec uses + * an '@' character in front of a range to define inversion(exclusion) + * + * Inclusion assumes that floor < value < ceil and not floor <= value <= ceil + * + */ + class Range { + Float floor; + Float ceil; + Boolean exclude; + + /** + * Creates an empty range. Invert is false and limits are NaN + */ + public Range() { + this.floor = Float.NaN; + this.ceil = Float.NaN; + this.exclude = false; + } + + /** + * Creates a range by parameters + * + * @param floor + * Float that defines the lower limit of the range + * @param ceil + * Float that defines the upper limit of the range + * @param exclude + * boolean that defines if the range is used for inclusion (true) or + * exlusion (false) + */ + public Range(Float floor, Float ceil, Boolean exclude) { + this.floor = floor; + this.ceil = ceil; + this.exclude = exclude; + } + + /** + * Creates a range by parsing a range expression string like the following one: + * '0:10' + * + * @param range + * string including a range expression + */ + public Range(String range) { + Range tmp = parseAndSet(range); + this.floor = tmp.floor; + this.ceil = tmp.ceil; + this.exclude = tmp.exclude; + } + + /** + * Checks if a Range is undefined (float,ceiling are NaN) + * + * @return boolean + */ + public boolean isUndef() { + return this.floor == Float.NaN || this.ceil == Float.NaN; + } + + /** + * Checks if a value is included in range (or truly excluded if range is an + * exclusion) + * + * @param value + * Float + * @return boolean + */ + public boolean contains(Float value) { + boolean result = value > this.floor && value < this.ceil; + if (this.exclude) { + return !result; + } + return result; + } + + /** + * Parses a range expression string and creates a Range object Range expressions + * can be in the following forms: + *

        + *
      • 10 - range starting from 0 to 10
      • + *
      • 10: - range starting from 10 to infinity
      • + *
      • ~:20 - range starting from negative inf. up to 20
      • + *
      • 20:30 - range between two numbers
      • + *
      • @20:30 - inverted range, excludes betweeen two numbers + *
      + * + * @param expression + * String containing a range expression + * @return + */ + public Range parseAndSet(String expression) { + String parsedRange = expression; + Float curFloor = 0F; + Float curCeil = 0F; + boolean curInv = false; + if (parsedRange.replaceAll(" ", "").equals("")) { + return new Range(); + } + // check if invert + if (parsedRange.startsWith("@")) { + curInv = true; + // after check remove @ from range string + parsedRange = parsedRange.replaceAll("^@", ""); + } + + // check if range string doesn't have separator : + if (!parsedRange.contains(":")) { + // then we are in the case of a single number like 10 + // which defines the rule 0 --> 10 so + curFloor = 0F; + curCeil = Float.parseFloat(parsedRange); + + return new Range(curFloor, curCeil, curInv); + } + + // check if range end with separator : + if (parsedRange.endsWith(":")) { + parsedRange = parsedRange.replaceAll(":$", ""); + // then we are in the case of a signle number like 10: + // which defines the rule 10 --> positive infinity + curFloor = Float.parseFloat(parsedRange); + curCeil = Float.POSITIVE_INFINITY; + return new Range(curFloor, curCeil, curInv); + } + + // tokenize string without prefixes + String[] tokens = parsedRange.split(":"); + if (tokens.length == 2) { + // check if token[0] is negative infinity ~ + if (tokens[0].equalsIgnoreCase("~")) { + curFloor = Float.NEGATIVE_INFINITY; + } else { + curFloor = Float.parseFloat(tokens[0]); + } + + curCeil = Float.parseFloat(tokens[1]); + return new Range(curFloor, curCeil, curInv); + } + + // Not valid range + throw new RuntimeException("Invalid threshold: " + expression); + + } + + public String toString() { + return "(floor=" + this.floor + ",ceil=" + this.ceil + ",invert=" + this.exclude.toString() + ")"; + } + + } + + /** + * Creates a Manager that parses rules files with thresholds and stores them + * internally as objects. A ThresholdManager can be used to automatically + * calculate statuses about a monitoring item (group,host,metric) based on the + * most relevant threshold rules stored in it. + */ + public ThresholdManager() { + + this.rules = new HashMap>(); + this.hosts = new HashSet(); + this.groups = new HashSet(); + this.metrics = new HashSet(); + + } + + /** + * Return the default operation when aggregating statuses generated from multiple threshold rules + * @return + */ + public String getAggregationOp() { + return this.aggregationOp; + } + + + /** + * @param op string with the name of the operation to be used in the aggregation (AND,OR,custom one) + */ + public void setAggregationOp(String op) { + this.aggregationOp = op; + } + + /** + * Returns a status calculation for a specific rule key Each rule key is defined + * as follows: 'group/host/metric' and leads to a threshold rule. Group and host + * parts are optional as such: 'group//metric' or '/host/metric' or '//metric' + * + * @param rule + * string containing a rule key + * @param opsMgr + * an OpsManager Object to handle status aggregations + * @param opType + * an OpsManager operation to be used (like 'OR', 'AND') + * @return string with status result + */ + public String getStatusByRule(String rule, OpsManager opsMgr, String opType) { + + if (!rules.containsKey(rule)) + return ""; + String status = ""; + Map tholds = rules.get(rule); + for (Entry thold : tholds.entrySet()) { + // first step + if (status == "") { + status = thold.getValue().calcStatus(); + continue; + } + String statusNext = thold.getValue().calcStatus(); + if (statusNext != "") { + status = opsMgr.op(opType, status, statusNext); + } + } + return status; + } + + /** + * Returns a status calculation for a specific rule key Each rule key is defined + * as follows: 'group/host/metric' and leads to a threshold rule. Group and host + * parts are optional as such: 'group//metric' or '/host/metric' or '//metric' + * + * @param rule + * string containing a rule key + * @param opsMgr + * an OpsManager Object to handle status aggregations + * @param opType + * an OpsManager operation to be used (like 'OR', 'AND') + * @return string array with two elements. First element is the status result and second one the rule applied + */ + public String[] getStatusByRuleAndValues(String rule, OpsManager opsMgr, String opType, Map values) { + + if (!rules.containsKey(rule)) + return new String[] {"",""}; + String status = ""; + String explain = ""; + Map tholds = rules.get(rule); + + for ( Entry value : values.entrySet()) { + String label = value.getKey(); + if (tholds.containsKey(label)) { + Threshold th = tholds.get(label); + // first step + if (status == "") { + + status = th.calcStatusWithValue(value.getValue()); + explain = th.getExpression(); + continue; + } + + String statusNext = th.calcStatusWithValue(value.getValue()); + + if (statusNext != "") { + status = opsMgr.op(opType, status, statusNext); + explain = explain + " " + th.getExpression(); + } + } + } + + + return new String[]{status,explain}; + + } + + /** + * Gets the most relevant rule based on a monitoring item (group,host,metric) + * using the following precedence (specific to least specific) (group, host, + * metric) #1 ( , host, metric) #2 (group, , metric) #3 ( , , metric) #4 + * + * @param group + * string with name of the monitored endpoint group + * @param host + * string with name of the monitored host + * @param metric + * string with name of the monitored metric + * @return a string with the relevant rule key + */ + public String getMostRelevantRule(String group, String host, String metric) { + if (!this.metrics.contains(metric)) { + return ""; // nothing found + } else { + + // order or precedence: more specific first + // group,host,metric #1 + // ,host,metric #2 + // group ,metric #3 + // ,metric #4 + if (this.hosts.contains(host)) { + if (this.groups.contains(group)) { + // check if combined entry indeed exists + String key = String.format("%s/%s/%s", group, host, metric); + if (this.rules.containsKey(key)) + return key; // #1 + + } else { + return String.format("/%s/%s", host, metric); // #2 + } + } + + if (this.groups.contains(group)) { + // check if combined entry indeed exists + String key = String.format("%s//%s", group, metric); // #3 + if (this.rules.containsKey(key)) + return key; + } + + return String.format("//%s", metric); + } + + } + + /** + * Parses an expression that might contain multiple labels=thresholds separated + * by whitespace and creates a HashMap of labels to parsed threshold objects + * + * @param thresholds + * an expression that might contain multiple thresholds + * @return a HashMap to Threshold objects + */ + public Map parseThresholds(String thresholds) { + Map subMap = new HashMap(); + // Tokenize with lookahead on the point when a new label starts + String[] tokens = thresholds.split("(;|[ ]+)(?=[a-zA-Z])"); + for (String token : tokens) { + Threshold curTh = new Threshold(token); + if (curTh != null) { + subMap.put(curTh.getLabel(), curTh); + } + } + return subMap; + } + + /** + * Parses an expression that might contain multiple labels=thresholds separated + * by whitespace and creates a HashMap of labels to parsed Float values + * + * @param thresholds + * an expression that might contain multiple thresholds + * @return a HashMap to Floats + */ + public Map getThresholdValues(String thresholds) { + Map subMap = new HashMap(); + // tokenize thresholds by whitespace + String[] tokens = thresholds.split("(;|[ ]+)(?=[a-zA-Z])"); + for (String token : tokens) { + Threshold curTh = new Threshold(token); + if (curTh != null) { + subMap.put(curTh.getLabel(), curTh.getValue()); + } + } + return subMap; + } + + /** + * Parses a JSON threshold rule file and populates the ThresholdManager + * + * @param jsonFile + * File to be parsed + * @return boolean signaling whether operation succeeded or not + */ + public boolean parseJSONFile(File jsonFile) { + BufferedReader br = null; + try { + br = new BufferedReader(new FileReader(jsonFile)); + String jsonStr = IOUtils.toString(br); + if (!parseJSON(jsonStr)) + return false; + + } catch (IOException ex) { + LOG.error("Could not open file:" + jsonFile.getName()); + return false; + + } catch (JsonParseException ex) { + LOG.error("File is not valid json:" + jsonFile.getName()); + return false; + } finally { + // Close quietly without exceptions the buffered reader + IOUtils.closeQuietly(br); + } + + return true; + + } + + /** + * Parses a json string with the appropriate threshold rule schema and populates + * the ThresholdManager + * + * @param jsonString + * string containing threshold rules in json format + * @return boolean signaling whether the parse information succeded or not + */ + public boolean parseJSON(String jsonString) { + + + JsonParser json_parser = new JsonParser(); + JsonObject jRoot = json_parser.parse(jsonString).getAsJsonObject(); + JsonArray jRules = jRoot.getAsJsonArray("rules"); + for (JsonElement jRule : jRules) { + JsonObject jRuleObj = jRule.getAsJsonObject(); + String ruleMetric = jRuleObj.getAsJsonPrimitive("metric").getAsString(); + String ruleHost = ""; + String ruleEgroup = ""; + + if (jRuleObj.has("host")) { + ruleHost = jRuleObj.getAsJsonPrimitive("host").getAsString(); + } + if (jRuleObj.has("endpoint_group")) { + ruleEgroup = jRuleObj.getAsJsonPrimitive("endpoint_group").getAsString(); + } + + String ruleThr = jRuleObj.getAsJsonPrimitive("thresholds").getAsString(); + this.metrics.add(ruleMetric); + if (ruleHost != "") + this.hosts.add(ruleHost); + if (ruleEgroup != "") + this.groups.add(ruleEgroup); + String full = ruleEgroup + "/" + ruleHost + "/" + ruleMetric; + Map thrMap = parseThresholds(ruleThr); + this.rules.put(full, thrMap); + } + + return true; + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/main/java/sync/AggregationProfileManager.java b/flink_jobs/old-models/batch_ar/src/main/java/sync/AggregationProfileManager.java new file mode 100644 index 00000000..11648956 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/sync/AggregationProfileManager.java @@ -0,0 +1,342 @@ +package sync; + + + +import org.apache.commons.io.IOUtils; +import org.apache.log4j.Logger; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileReader; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map.Entry; + +import com.google.gson.JsonArray; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParseException; +import com.google.gson.JsonParser; + +public class AggregationProfileManager { + + private HashMap list; + private static final Logger LOG = Logger.getLogger(AggregationProfileManager.class.getName()); + + public AggregationProfileManager() { + + this.list = new HashMap(); + + } + + private class AvProfileItem { + + private String name; + private String namespace; + private String metricProfile; + private String metricOp; + private String groupType; + private String op; + + private HashMap groups; + private HashMap serviceIndex; + + AvProfileItem() { + this.groups = new HashMap(); + this.serviceIndex = new HashMap(); + } + + private class ServGroupItem { + + String op; + HashMap services; + + ServGroupItem(String op) { + this.op = op; + this.services = new HashMap(); + } + } + + // ServGroupItem Declaration Ends Here + + public void insertGroup(String group, String op) { + if (!this.groups.containsKey(group)) { + this.groups.put(group, new ServGroupItem(op)); + } + } + + public void insertService(String group, String service, String op) { + if (this.groups.containsKey(group)) { + this.groups.get(group).services.put(service, op); + this.serviceIndex.put(service, group); + } + } + } + + // AvProfileItem Declaration Ends Here + + public void clearProfiles() { + this.list.clear(); + } + + public String getTotalOp(String avProfile) { + if (this.list.containsKey(avProfile)) { + return this.list.get(avProfile).op; + } + + return ""; + } + + public String getMetricOp(String avProfile) { + if (this.list.containsKey(avProfile)) { + return this.list.get(avProfile).metricOp; + } + + return ""; + } + + // Return the available Group Names of a profile + public ArrayList getProfileGroups(String avProfile) { + + if (this.list.containsKey(avProfile)) { + ArrayList result = new ArrayList(); + Iterator groupIterator = this.list.get(avProfile).groups.keySet().iterator(); + + while (groupIterator.hasNext()) { + result.add(groupIterator.next()); + } + + return result; + } + + return null; + } + + // Return the available group operation + public String getProfileGroupOp(String avProfile, String groupName) { + if (this.list.containsKey(avProfile)) { + if (this.list.get(avProfile).groups.containsKey(groupName)) { + return this.list.get(avProfile).groups.get(groupName).op; + } + } + + return null; + } + + public ArrayList getProfileGroupServices(String avProfile, String groupName) { + if (this.list.containsKey(avProfile)) { + if (this.list.get(avProfile).groups.containsKey(groupName)) { + ArrayList result = new ArrayList(); + Iterator srvIterator = this.list.get(avProfile).groups.get(groupName).services.keySet() + .iterator(); + + while (srvIterator.hasNext()) { + result.add(srvIterator.next()); + } + + return result; + } + } + + return null; + } + + public String getProfileGroupServiceOp(String avProfile, String groupName, String service) { + + if (this.list.containsKey(avProfile)) { + if (this.list.get(avProfile).groups.containsKey(groupName)) { + if (this.list.get(avProfile).groups.get(groupName).services.containsKey(service)) { + return this.list.get(avProfile).groups.get(groupName).services.get(service); + } + } + } + + return null; + } + + public ArrayList getAvProfiles() { + + if (this.list.size() > 0) { + ArrayList result = new ArrayList(); + Iterator avpIterator = this.list.keySet().iterator(); + while (avpIterator.hasNext()) { + result.add(avpIterator.next()); + } + + return result; + + } + + return null; + } + + public String getProfileNamespace(String avProfile) { + + if (this.list.containsKey(avProfile)) { + return this.list.get(avProfile).namespace; + } + + return null; + } + + public String getProfileMetricProfile(String avProfile) { + + if (this.list.containsKey(avProfile)) { + return this.list.get(avProfile).metricProfile; + } + + return null; + } + + public String getProfileGroupType(String avProfile) { + + if (this.list.containsKey(avProfile)) { + return this.list.get(avProfile).groupType; + } + + return null; + } + + public String getGroupByService(String avProfile, String service) { + + if (this.list.containsKey(avProfile)) { + + return this.list.get(avProfile).serviceIndex.get(service); + + } + return null; + + } + + public boolean checkService(String avProfile, String service) { + + if (this.list.containsKey(avProfile)) { + + if (this.list.get(avProfile).serviceIndex.containsKey(service)) { + return true; + } + + } + return false; + + } + + public void loadJson(File jsonFile) throws IOException { + + BufferedReader br = null; + try { + + br = new BufferedReader(new FileReader(jsonFile)); + + JsonParser jsonParser = new JsonParser(); + JsonElement jRootElement = jsonParser.parse(br); + JsonObject jRootObj = jRootElement.getAsJsonObject(); + + JsonArray apGroups = jRootObj.getAsJsonArray("groups"); + + // Create new entry for this availability profile + AvProfileItem tmpAvp = new AvProfileItem(); + + tmpAvp.name = jRootObj.get("name").getAsString(); + tmpAvp.namespace = jRootObj.get("namespace").getAsString(); + tmpAvp.metricProfile = jRootObj.get("metric_profile").getAsJsonObject().get("name").getAsString(); + tmpAvp.metricOp = jRootObj.get("metric_operation").getAsString(); + tmpAvp.groupType = jRootObj.get("endpoint_group").getAsString(); + tmpAvp.op = jRootObj.get("profile_operation").getAsString(); + + for ( JsonElement item : apGroups) { + // service name + JsonObject itemObj = item.getAsJsonObject(); + String itemName = itemObj.get("name").getAsString(); + String itemOp = itemObj.get("operation").getAsString(); + JsonArray itemServices = itemObj.get("services").getAsJsonArray(); + tmpAvp.insertGroup(itemName, itemOp); + + for (JsonElement subItem : itemServices) { + JsonObject subObj = subItem.getAsJsonObject(); + String serviceName = subObj.get("name").getAsString(); + String serviceOp = subObj.get("operation").getAsString(); + tmpAvp.insertService(itemName, serviceName,serviceOp); + } + + } + + // Add profile to the list + this.list.put(tmpAvp.name, tmpAvp); + + } catch (FileNotFoundException ex) { + LOG.error("Could not open file:" + jsonFile.getName()); + throw ex; + + } catch (JsonParseException ex) { + LOG.error("File is not valid json:" + jsonFile.getName()); + throw ex; + } finally { + // Close quietly without exceptions the buffered reader + IOUtils.closeQuietly(br); + } + + } + + public void loadJsonString(List apsJson) throws IOException { + + + try { + + + + JsonParser jsonParser = new JsonParser(); + JsonElement jRootElement = jsonParser.parse(apsJson.get(0)); + JsonObject jRootObj = jRootElement.getAsJsonObject(); + + + // Create new entry for this availability profile + AvProfileItem tmpAvp = new AvProfileItem(); + + JsonArray apGroups = jRootObj.getAsJsonArray("groups"); + + tmpAvp.name = jRootObj.get("name").getAsString(); + tmpAvp.namespace = jRootObj.get("namespace").getAsString(); + tmpAvp.metricProfile = jRootObj.get("metric_profile").getAsJsonObject().get("name").getAsString(); + tmpAvp.metricOp = jRootObj.get("metric_operation").getAsString(); + tmpAvp.groupType = jRootObj.get("endpoint_group").getAsString(); + tmpAvp.op = jRootObj.get("profile_operation").getAsString(); + + for ( JsonElement item : apGroups) { + // service name + JsonObject itemObj = item.getAsJsonObject(); + String itemName = itemObj.get("name").getAsString(); + String itemOp = itemObj.get("operation").getAsString(); + JsonArray itemServices = itemObj.get("services").getAsJsonArray(); + tmpAvp.insertGroup(itemName, itemOp); + + for (JsonElement subItem : itemServices) { + JsonObject subObj = subItem.getAsJsonObject(); + String serviceName = subObj.get("name").getAsString(); + String serviceOp = subObj.get("operation").getAsString(); + tmpAvp.insertService(itemName, serviceName,serviceOp); + } + + } + + // Add profile to the list + this.list.put(tmpAvp.name, tmpAvp); + + + + } catch (JsonParseException ex) { + LOG.error("Contents are not valid json"); + throw ex; + } + + } + + + + +} + diff --git a/flink_jobs/old-models/batch_ar/src/main/java/sync/DowntimeManager.java b/flink_jobs/old-models/batch_ar/src/main/java/sync/DowntimeManager.java new file mode 100644 index 00000000..2034458d --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/sync/DowntimeManager.java @@ -0,0 +1,188 @@ +package sync; + +import org.apache.log4j.Logger; + +import argo.avro.Downtime; + + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; + +import org.apache.avro.Schema; + +import org.apache.avro.file.DataFileReader; +import org.apache.avro.generic.GenericData; +import org.apache.avro.generic.GenericDatumReader; +import org.apache.avro.generic.GenericRecord; +import org.apache.avro.io.DatumReader; +import org.apache.avro.util.Utf8; +import org.apache.commons.io.IOUtils; + +/** + * DowntimeManager manages supplementary downtime information that is needed in computation of a/r scores for endpoint groups + * Information can be loaded either directly from an avro file or from a list of avro objects + */ +public class DowntimeManager { + + /** + * List of Downtime information items + */ + private ArrayList list; + private static final Logger LOG = Logger.getLogger(DowntimeManager.class.getName()); + + /** + * Inner class that holds information about a downtime item which is a actually a 4-tuple (hostname,service,startTime,endTime) + */ + private class DowntimeItem { + String hostname; // name of host + String service; // name of service + String startTime; // declare start time of downtime + String endTime; // declare end time of downtime + + + + public DowntimeItem(String hostname, String service, String startTime, String endTime) { + this.hostname = hostname; + this.service = service; + this.startTime = startTime; + this.endTime = endTime; + } + + } + + public DowntimeManager() { + this.list = new ArrayList(); + } + /** + * Inserts new downtime information to Donwtime Manager (hostname,service,startTime,endTime) + */ + public int insert(String hostname, String service, String startTime, String endTime) { + DowntimeItem tmpItem = new DowntimeItem(hostname, service, startTime, endTime); + this.list.add(tmpItem); + return 0; // All good + } + + /** + * Returns the downtime period (if any) for a specific service endpoint: (hostname,service) + */ + public ArrayList getPeriod(String hostname, String service) { + + ArrayList period = new ArrayList(); + + for (DowntimeItem item : this.list) { + + if (item.hostname.equals(hostname)) { + if (item.service.equals(service)) { + period.add(item.startTime); + period.add(item.endTime); + return period; + } + } + } + + return null; + + } + + /** + * Loads downtime information from an avro file + *

      + * This method loads downtimes information contained in an .avro file with + * specific avro schema. + * + *

      + * The following fields are expected to be found in each avro row: + *

        + *
      1. start_time: string
      2. + *
      3. end_time: string
      4. + *
      5. service: string
      6. + *
      7. hostname: string
      8. + *
      9. [optional] tags: hashmap (contains a map of arbitrary key values) + *
      10. + *
      + * + * @param avroFile + * a File object of the avro file that will be opened + * @throws IOException + * if there is an error during opening of the avro file + */ + @SuppressWarnings("unchecked") + public void loadAvro(File avroFile) throws IOException { + + // Prepare Avro File Readers + DatumReader datumReader = new GenericDatumReader(); + DataFileReader dataFileReader = null; + + try { + dataFileReader = new DataFileReader(avroFile, datumReader); + + // Grab avro schema + Schema avroSchema = dataFileReader.getSchema(); + + // Generate 1st level generic record reader (rows) + GenericRecord avroRow = new GenericData.Record(avroSchema); + + // For all rows in file repeat + while (dataFileReader.hasNext()) { + // read the row + avroRow = dataFileReader.next(avroRow); + HashMap tagMap = new HashMap(); + + // Generate 2nd level generic record reader (tags) + + HashMap tags = (HashMap) (avroRow.get("tags")); + + if (tags != null) { + for (Utf8 item : tags.keySet()) { + tagMap.put(item.toString(), String.valueOf(tags.get(item))); + } + } + + // Grab 1st level mandatory fields + String hostname = avroRow.get("hostname").toString(); + String service = avroRow.get("service").toString(); + String startTime = avroRow.get("start_time").toString(); + String endTime = avroRow.get("end_time").toString(); + + // Insert data to list + this.insert(hostname, service, startTime, endTime); + + } // end of avro rows + + } catch (IOException ex) { + LOG.error("Could not open avro file:" + avroFile.getName()); + throw ex; + } finally { + // Close quietly without exceptions the buffered reader + IOUtils.closeQuietly(dataFileReader); + } + + } + + + /** + * Loads downtime information from a list of downtime objects + * + */ + + public void loadFromList( List dnt) { + // IF no downtimes collected return + if (dnt==null) return; + + // For each downtime object in list + for (Downtime item : dnt){ + String hostname = item.getHostname(); + String service = item.getService(); + String startTime = item.getStartTime(); + String endTime = item.getEndTime(); + // Insert data to list + this.insert(hostname,service,startTime,endTime); + } + + + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/main/java/sync/EndpointGroupManager.java b/flink_jobs/old-models/batch_ar/src/main/java/sync/EndpointGroupManager.java new file mode 100644 index 00000000..87bf6b0d --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/sync/EndpointGroupManager.java @@ -0,0 +1,255 @@ +package sync; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; + +import java.util.TreeMap; +import java.util.Map.Entry; + +import org.apache.avro.Schema; + +import org.apache.avro.file.DataFileReader; +import org.apache.avro.generic.GenericData; +import org.apache.avro.generic.GenericDatumReader; +import org.apache.avro.generic.GenericRecord; +import org.apache.avro.io.DatumReader; +import org.apache.avro.util.Utf8; +import org.apache.commons.io.IOUtils; +import org.apache.log4j.Logger; + +import argo.avro.GroupEndpoint; + + +public class EndpointGroupManager { + + private static final Logger LOG = Logger.getLogger(EndpointGroupManager.class.getName()); + + private ArrayList list; + private ArrayList fList; + + private class EndpointItem { + String type; // type of group + String group; // name of the group + String service; // type of the service + String hostname; // name of host + HashMap tags; // Tag list + + + + public EndpointItem(String type, String group, String service, String hostname, HashMap tags) { + this.type = type; + this.group = group; + this.service = service; + this.hostname = hostname; + this.tags = tags; + + } + + } + + public EndpointGroupManager() { + this.list = new ArrayList(); + this.fList = new ArrayList(); + + } + + public int insert(String type, String group, String service, String hostname, HashMap tags) { + EndpointItem new_item = new EndpointItem(type, group, service, hostname, tags); + this.list.add(new_item); + return 0; // All good + } + + public boolean checkEndpoint(String hostname, String service) { + + for (EndpointItem item : fList) { + if (item.hostname.equals(hostname) && item.service.equals(service)) { + return true; + } + } + + return false; + } + + public ArrayList getGroup(String type, String hostname, String service) { + + ArrayList results = new ArrayList(); + + for (EndpointItem item : fList) { + if (item.type.equals(type) && item.hostname.equals(hostname) && item.service.equals(service)) { + results.add(item.group); + } + } + + return results; + } + + public HashMap getGroupTags(String type, String hostname, String service) { + + for (EndpointItem item : fList) { + if (item.type.equals(type) && item.hostname.equals(hostname) && item.service.equals(service)) { + return item.tags; + } + } + + return null; + } + + public int count() { + return this.fList.size(); + } + + public void unfilter() { + this.fList.clear(); + for (EndpointItem item : this.list) { + this.fList.add(item); + } + } + + public void filter(TreeMap fTags) { + this.fList.clear(); + boolean trim; + for (EndpointItem item : this.list) { + trim = false; + HashMap itemTags = item.tags; + for (Entry fTagItem : fTags.entrySet()) { + + if (itemTags.containsKey(fTagItem.getKey())) { + // First Check binary tags as Y/N 0/1 + + if (fTagItem.getValue().equalsIgnoreCase("y") || fTagItem.getValue().equalsIgnoreCase("n")) { + String binValue = ""; + if (fTagItem.getValue().equalsIgnoreCase("y")) + binValue = "1"; + if (fTagItem.getValue().equalsIgnoreCase("n")) + binValue = "0"; + + if (itemTags.get(fTagItem.getKey()).equalsIgnoreCase(binValue) == false) { + trim = true; + } + } else if (itemTags.get(fTagItem.getKey()).equalsIgnoreCase(fTagItem.getValue()) == false) { + trim = true; + } + + } + } + + if (trim == false) { + fList.add(item); + } + } + } + + /** + * Loads endpoint grouping information from an avro file + *

      + * This method loads endpoint grouping information contained in an .avro + * file with specific avro schema. + * + *

      + * The following fields are expected to be found in each avro row: + *

        + *
      1. type: string (describes the type of grouping)
      2. + *
      3. group: string
      4. + *
      5. service: string
      6. + *
      7. hostname: string
      8. + *
      9. tags: hashmap (contains a map of arbitrary key values)
      10. + *
      + * + * @param avroFile + * a File object of the avro file that will be opened + * @throws IOException + * if there is an error during opening of the avro file + */ + @SuppressWarnings("unchecked") + public void loadAvro(File avroFile) throws IOException { + + // Prepare Avro File Readers + DatumReader datumReader = new GenericDatumReader(); + DataFileReader dataFileReader = null; + try { + dataFileReader = new DataFileReader(avroFile, datumReader); + + // Grab Avro schema + Schema avroSchema = dataFileReader.getSchema(); + + // Generate 1st level generic record reader (rows) + GenericRecord avroRow = new GenericData.Record(avroSchema); + + // For all rows in file repeat + while (dataFileReader.hasNext()) { + // read the row + avroRow = dataFileReader.next(avroRow); + HashMap tagMap = new HashMap(); + + // Generate 2nd level generic record reader (tags) + + HashMap tags = (HashMap) (avroRow.get("tags")); + + if (tags != null) { + for (Utf8 item : tags.keySet()) { + tagMap.put(item.toString(), String.valueOf(tags.get(item))); + } + } + + // Grab 1st level mandatory fields + String type = avroRow.get("type").toString(); + String group = avroRow.get("group").toString(); + String service = avroRow.get("service").toString(); + String hostname = avroRow.get("hostname").toString(); + + // Insert data to list + this.insert(type, group, service, hostname, tagMap); + + } // end of avro rows + + this.unfilter(); + + } catch (IOException ex) { + LOG.error("Could not open avro file:" + avroFile.getName()); + throw ex; + } finally { + // Close quietly without exceptions the buffered reader + IOUtils.closeQuietly(dataFileReader); + } + + } + + + public ArrayList getList(){ + return this.list; + } + + /** + * Loads information from a list of EndpointGroup objects + * + */ + public void loadFromList( List egp) { + + // For each endpoint group record + for (GroupEndpoint item : egp){ + String type = item.getType(); + String group = item.getGroup(); + String service = item.getService(); + String hostname = item.getHostname(); + HashMap tagMap = new HashMap(); + HashMap tags = (HashMap) item.getTags(); + + if (tags != null) { + for (String key : tags.keySet()) { + tagMap.put(key, tags.get(key)); + } + } + + // Insert data to list + this.insert(type, group, service, hostname, tagMap); + } + + this.unfilter(); + + + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/main/java/sync/GroupGroupManager.java b/flink_jobs/old-models/batch_ar/src/main/java/sync/GroupGroupManager.java new file mode 100644 index 00000000..74bb2378 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/sync/GroupGroupManager.java @@ -0,0 +1,227 @@ +package sync; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map.Entry; +import java.util.TreeMap; + +import org.apache.avro.Schema; + +import org.apache.avro.file.DataFileReader; +import org.apache.avro.generic.GenericData; +import org.apache.avro.generic.GenericDatumReader; +import org.apache.avro.generic.GenericRecord; +import org.apache.avro.io.DatumReader; +import org.apache.avro.util.Utf8; +import org.apache.commons.io.IOUtils; +import org.apache.log4j.Logger; + + +import argo.avro.GroupGroup; + +public class GroupGroupManager { + + static Logger log = Logger.getLogger(GroupGroupManager.class.getName()); + + private ArrayList list; + private ArrayList fList; + + private class GroupItem { + String type; // type of group + String group; // name of the group + String subgroup; // name of sub-group + HashMap tags; // Tag list + + + + public GroupItem(String type, String group, String subgroup, HashMap tags) { + this.type = type; + this.group = group; + this.subgroup = subgroup; + this.tags = tags; + + } + + } + + public GroupGroupManager() { + this.list = new ArrayList(); + this.fList = new ArrayList(); + } + + public int insert(String type, String group, String subgroup, HashMap tags) { + GroupItem new_item = new GroupItem(type, group, subgroup, tags); + this.list.add(new_item); + return 0; // All good + } + + public HashMap getGroupTags(String type, String subgroup) { + for (GroupItem item : this.fList) { + if (item.type.equals(type) && item.subgroup.equals(subgroup)) { + return item.tags; + } + } + + return null; + } + + public int count() { + return this.fList.size(); + } + + public String getGroup(String type, String subgroup) { + for (GroupItem item : this.fList) { + if (item.type.equals(type) && item.subgroup.equals(subgroup)) { + return item.group; + } + } + + return null; + } + + public void unfilter() { + this.fList.clear(); + for (GroupItem item : this.list) { + this.fList.add(item); + } + } + + public void filter(TreeMap fTags) { + this.fList.clear(); + boolean trim; + for (GroupItem item : this.list) { + trim = false; + HashMap itemTags = item.tags; + for (Entry fTagItem : fTags.entrySet()) { + + if (itemTags.containsKey(fTagItem.getKey())) { + if (itemTags.get(fTagItem.getKey()).equalsIgnoreCase(fTagItem.getValue()) == false) { + trim = true; + } + + } + } + + if (trim == false) { + fList.add(item); + } + } + } + + public boolean checkSubGroup(String subgroup) { + for (GroupItem item : fList) { + if (item.subgroup.equals(subgroup)) { + return true; + } + } + + return false; + } + + /** + * Loads groups of groups information from an avro file + *

      + * This method loads groups of groups information contained in an .avro file + * with specific avro schema. + * + *

      + * The following fields are expected to be found in each avro row: + *

        + *
      1. type: string (describes the type of grouping)
      2. + *
      3. group: string
      4. + *
      5. subgroup: string
      6. + *
      7. tags: hashmap (contains a map of arbitrary key values)
      8. + *
      + * + * @param avroFile + * a File object of the avro file that will be opened + * @throws IOException + * if there is an error during opening of the avro file + */ + @SuppressWarnings("unchecked") + public void loadAvro(File avroFile) throws IOException { + + // Prepare Avro File Readers + DatumReader datumReader = new GenericDatumReader(); + DataFileReader dataFileReader = null; + try { + dataFileReader = new DataFileReader(avroFile, datumReader); + + // Grab avro schema + Schema avroSchema = dataFileReader.getSchema(); + + // Generate 1st level generic record reader (rows) + GenericRecord avroRow = new GenericData.Record(avroSchema); + + // For all rows in file repeat + while (dataFileReader.hasNext()) { + // read the row + avroRow = dataFileReader.next(avroRow); + HashMap tagMap = new HashMap(); + + // Generate 2nd level generic record reader (tags) + HashMap tags = (HashMap) avroRow.get("tags"); + if (tags != null) { + for (Object item : tags.keySet()) { + tagMap.put(item.toString(), tags.get(item).toString()); + } + } + + // Grab 1st level mandatory fields + String type = avroRow.get("type").toString(); + String group = avroRow.get("group").toString(); + String subgroup = avroRow.get("subgroup").toString(); + + // Insert data to list + this.insert(type, group, subgroup, tagMap); + + } // end of avro rows + + this.unfilter(); + + dataFileReader.close(); + + } catch (IOException ex) { + log.error("Could not open avro file:" + avroFile.getName()); + throw ex; + } finally { + // Close quietly without exceptions the buffered reader + IOUtils.closeQuietly(dataFileReader); + } + + } + + + /** + * Loads group of group information from a list of GroupGroup objects + * + */ + public void loadFromList( List ggp) { + + // For each group of groups record + for (GroupGroup item : ggp){ + String type = item.getType(); + String group = item.getGroup(); + String subgroup = item.getSubgroup(); + + HashMap tagMap = new HashMap(); + HashMap tags = (HashMap) item.getTags(); + + if (tags != null) { + for (String key : tags.keySet()) { + tagMap.put(key, tags.get(key)); + } + } + + // Insert data to list + this.insert(type, group, subgroup, tagMap); + } + + this.unfilter(); + + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/main/java/sync/MetricProfileManager.java b/flink_jobs/old-models/batch_ar/src/main/java/sync/MetricProfileManager.java new file mode 100644 index 00000000..8da8ca79 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/sync/MetricProfileManager.java @@ -0,0 +1,243 @@ +package sync; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.avro.Schema; + +import org.apache.avro.file.DataFileReader; +import org.apache.avro.generic.GenericData; +import org.apache.avro.generic.GenericDatumReader; +import org.apache.avro.generic.GenericRecord; +import org.apache.avro.io.DatumReader; +import org.apache.avro.util.Utf8; +import org.apache.commons.io.IOUtils; +import org.apache.log4j.Logger; + +import argo.avro.MetricProfile; + +public class MetricProfileManager { + + private static final Logger LOG = Logger.getLogger(MetricProfileManager.class.getName()); + + + private Map>> index; + + + + public MetricProfileManager() { + + this.index = new HashMap>>(); + } + + // Clear all profile data (both list and indexes) + public void clear() { + + this.index = new HashMap>>(); + } + + // Indexed List Functions + public int indexInsertProfile(String profile) { + if (!index.containsKey(profile)) { + index.put(profile, new HashMap>()); + return 0; + } + return -1; + } + + public void insert(String profile, String service, String metric, HashMap tags) { + + this.indexInsertMetric(profile, service, metric); + } + + public int indexInsertService(String profile, String service) { + if (index.containsKey(profile)) { + if (index.get(profile).containsKey(service)) { + return -1; + } else { + index.get(profile).put(service, new ArrayList()); + return 0; + } + + } + + index.put(profile, new HashMap>()); + index.get(profile).put(service, new ArrayList()); + return 0; + + } + + public int indexInsertMetric(String profile, String service, String metric) { + if (index.containsKey(profile)) { + if (index.get(profile).containsKey(service)) { + if (index.get(profile).get(service).contains(metric)) { + // Metric exists so no insertion + return -1; + } + // Metric doesn't exist and must be added + index.get(profile).get(service).add(metric); + return 0; + } else { + // Create the service and the metric + index.get(profile).put(service, new ArrayList()); + index.get(profile).get(service).add(metric); + return 0; + } + + } + // No profile - service - metric so add them all + index.put(profile, new HashMap>()); + index.get(profile).put(service, new ArrayList()); + index.get(profile).get(service).add(metric); + return 0; + + } + + // Getter Functions + + public ArrayList getProfileServices(String profile) { + if (index.containsKey(profile)) { + ArrayList ans = new ArrayList(); + ans.addAll(index.get(profile).keySet()); + return ans; + } + return null; + + } + + public ArrayList getProfiles() { + if (index.size() > 0) { + ArrayList ans = new ArrayList(); + ans.addAll(index.keySet()); + return ans; + } + return null; + } + + public ArrayList getProfileServiceMetrics(String profile, String service) { + if (index.containsKey(profile)) { + if (index.get(profile).containsKey(service)) { + return index.get(profile).get(service); + } + } + return null; + } + + public boolean checkProfileServiceMetric(String profile, String service, String metric) { + if (index.containsKey(profile)) { + if (index.get(profile).containsKey(service)) { + if (index.get(profile).get(service).contains(metric)) + return true; + } + } + + return false; + } + + /** + * Loads metric profile information from an avro file + *

      + * This method loads metric profile information contained in an .avro file + * with specific avro schema. + * + *

      + * The following fields are expected to be found in each avro row: + *

        + *
      1. profile: string
      2. + *
      3. service: string
      4. + *
      5. metric: string
      6. + *
      7. [optional] tags: hashmap (contains a map of arbitrary key values) + *
      8. + *
      + * + * @param avroFile + * a File object of the avro file that will be opened + * @throws IOException + * if there is an error during opening of the avro file + */ + @SuppressWarnings("unchecked") + public void loadAvro(File avroFile) throws IOException { + + // Prepare Avro File Readers + DatumReader datumReader = new GenericDatumReader(); + DataFileReader dataFileReader = null; + try { + dataFileReader = new DataFileReader(avroFile, datumReader); + + // Grab avro schema + Schema avroSchema = dataFileReader.getSchema(); + + // Generate 1st level generic record reader (rows) + GenericRecord avroRow = new GenericData.Record(avroSchema); + + // For all rows in file repeat + while (dataFileReader.hasNext()) { + // read the row + avroRow = dataFileReader.next(avroRow); + HashMap tagMap = new HashMap(); + + // Generate 2nd level generic record reader (tags) + + HashMap tags = (HashMap) (avroRow.get("tags")); + + if (tags != null) { + for (Utf8 item : tags.keySet()) { + tagMap.put(item.toString(), String.valueOf(tags.get(item))); + } + } + + // Grab 1st level mandatory fields + String profile = avroRow.get("profile").toString(); + String service = avroRow.get("service").toString(); + String metric = avroRow.get("metric").toString(); + + // Insert data to list + this.insert(profile, service, metric, tagMap); + + } // end of avro rows + + dataFileReader.close(); + + } catch (IOException ex) { + LOG.error("Could not open avro file:" + avroFile.getName()); + throw ex; + } finally { + // Close quietly without exceptions the buffered reader + IOUtils.closeQuietly(dataFileReader); + } + + } + + /** + * Loads metric profile information from a list of MetricProfile objects + * + */ + public void loadFromList( List mps) { + + // For each metric profile object in list + for (MetricProfile item : mps){ + String profile = item.getProfile(); + String service = item.getService(); + String metric = item.getMetric(); + HashMap tagMap = new HashMap(); + HashMap tags = (HashMap) item.getTags(); + + if (tags != null) { + for (String key : tags.keySet()) { + tagMap.put(key, tags.get(key)); + } + } + + // Insert data to list + this.insert(profile, service, metric, tagMap); + } + + + } + + +} diff --git a/flink_jobs/old-models/batch_ar/src/main/java/sync/RecomputationManager.java b/flink_jobs/old-models/batch_ar/src/main/java/sync/RecomputationManager.java new file mode 100644 index 00000000..e79c53a3 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/sync/RecomputationManager.java @@ -0,0 +1,256 @@ +package sync; + +import org.apache.commons.io.IOUtils; +import org.apache.log4j.Logger; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileReader; +import java.io.IOException; +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; + +import java.util.List; +import java.util.Map; + +import com.google.gson.JsonArray; +import com.google.gson.JsonElement; + +import com.google.gson.JsonParser; + +public class RecomputationManager { + + private static final Logger LOG = Logger.getLogger(RecomputationManager.class.getName()); + + public Map>> groups; + // Recomputations for filtering monitoring engine results + public Map>> monEngines; + + public RecomputationManager() { + this.groups = new HashMap>>(); + this.monEngines = new HashMap>>(); + } + + // Clear all the recomputation data + public void clear() { + this.groups = new HashMap>>(); + this.monEngines = new HashMap>>(); + } + + // Insert new recomputation data for a specific endpoint group + public void insert(String group, String start, String end) { + + Maptemp = new HashMap(); + temp.put("start", start); + temp.put("end",end); + + if (this.groups.containsKey(group) == false){ + this.groups.put(group, new ArrayList>()); + } + + this.groups.get(group).add(temp); + + } + + // Insert new recomputation data for a specific monitoring engine + public void insertMon(String monHost, String start, String end) throws ParseException { + + Maptemp = new HashMap(); + SimpleDateFormat tsW3C = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'"); + + temp.put("s", tsW3C.parse(start)); + temp.put("e",tsW3C.parse(end)); + + if (this.monEngines.containsKey(monHost) == false){ + this.monEngines.put(monHost, new ArrayList>()); + } + + this.monEngines.get(monHost).add(temp); + + } + + // Check if group is excluded in recomputations + public boolean isExcluded (String group){ + return this.groups.containsKey(group); + } + + + + // Check if a recomputation period is valid for target date + public boolean validPeriod(String target, String start, String end) throws ParseException { + + SimpleDateFormat dmy = new SimpleDateFormat("yyyy-MM-dd"); + Date tDate = dmy.parse(target); + Date sDate = dmy.parse(start); + Date eDate = dmy.parse(end); + + return (tDate.compareTo(sDate) >= 0 && tDate.compareTo(eDate) <= 0); + + } + + public ArrayList> getPeriods(String group,String targetDate) throws ParseException { + ArrayList> periods = new ArrayList>(); + + if (this.groups.containsKey(group)){ + for (Map period : this.groups.get(group)){ + if (this.validPeriod(targetDate, period.get("start"), period.get("end"))){ + periods.add(period); + } + } + + } + + return periods; + } + + // + public boolean isMonExcluded(String monHost, String inputTs) throws ParseException{ + + if (this.monEngines.containsKey(monHost) == false) + { + return false; + } + SimpleDateFormat tsW3C = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'"); + Date targetDate = tsW3C.parse(inputTs); + for (Map item : this.monEngines.get(monHost)) + { + + if (!(targetDate.before(item.get("s")) || targetDate.after(item.get("e")))) { + return true; + } + } + + return false; + } + + + + public void loadJson(File jsonFile) throws IOException, ParseException { + + this.clear(); + + BufferedReader br = null; + try { + br = new BufferedReader(new FileReader(jsonFile)); + + JsonParser jsonParser = new JsonParser(); + JsonElement jRootElement = jsonParser.parse(br); + JsonArray jRootObj = jRootElement.getAsJsonArray(); + + for (JsonElement item : jRootObj) { + + // Get the excluded sites + if (item.getAsJsonObject().get("start_time") != null + && item.getAsJsonObject().get("end_time") != null + && item.getAsJsonObject().get("exclude") != null ) { + + String start = item.getAsJsonObject().get("start_time").getAsString(); + String end = item.getAsJsonObject().get("end_time").getAsString(); + + // Get the excluded + JsonArray jExclude = item.getAsJsonObject().get("exclude").getAsJsonArray(); + for (JsonElement subitem : jExclude) { + this.insert(subitem.getAsString(),start,end); + } + } + + // Get the excluded Monitoring sources + if (item.getAsJsonObject().get("exclude_monitoring_source") != null) { + JsonArray jMon = item.getAsJsonObject().get("exclude_monitoring_source").getAsJsonArray(); + for (JsonElement subitem: jMon){ + + String monHost = subitem.getAsJsonObject().get("host").getAsString(); + String monStart = subitem.getAsJsonObject().get("start_time").getAsString(); + String monEnd = subitem.getAsJsonObject().get("end_time").getAsString(); + this.insertMon(monHost, monStart, monEnd); + } + } + + } + + } catch (FileNotFoundException ex) { + LOG.error("Could not open file:" + jsonFile.getName()); + throw ex; + + } catch (ParseException pex) { + LOG.error("Parsing date error"); + throw pex; + } finally { + // Close quietly without exceptions the buffered reader + IOUtils.closeQuietly(br); + } + + } + + /** + * Load Recompuatation information from a JSON string instead of a File source. + * This method is used in execution enviroments where the required data is provided by broadcast variables + */ + public void loadJsonString(List recJson) throws IOException, ParseException { + + this.clear(); + + + try { + + // If no recomputations collected return + if (recJson == null) return; + + JsonParser jsonParser = new JsonParser(); + + + + if (recJson.get(0).equalsIgnoreCase("{}")) return; + + + JsonElement jRootElement = jsonParser.parse(recJson.get(0)); + + + + JsonArray jRootObj = jRootElement.getAsJsonArray(); + + for (JsonElement item : jRootObj) { + + // Get the excluded sites + if (item.getAsJsonObject().get("start_time") != null + && item.getAsJsonObject().get("end_time") != null + && item.getAsJsonObject().get("exclude") != null ) { + + String start = item.getAsJsonObject().get("start_time").getAsString(); + String end = item.getAsJsonObject().get("end_time").getAsString(); + + // Get the excluded + JsonArray jExclude = item.getAsJsonObject().get("exclude").getAsJsonArray(); + for (JsonElement subitem : jExclude) { + this.insert(subitem.getAsString(),start,end); + } + } + + // Get the excluded Monitoring sources + if (item.getAsJsonObject().get("exclude_monitoring_source") != null) { + JsonArray jMon = item.getAsJsonObject().get("exclude_monitoring_source").getAsJsonArray(); + for (JsonElement subitem: jMon){ + + String monHost = subitem.getAsJsonObject().get("host").getAsString(); + String monStart = subitem.getAsJsonObject().get("start_time").getAsString(); + String monEnd = subitem.getAsJsonObject().get("end_time").getAsString(); + this.insertMon(monHost, monStart, monEnd); + } + } + + } + + + + } catch (ParseException pex) { + LOG.error("Parsing date error"); + throw pex; + } + } + + +} diff --git a/flink_jobs/old-models/batch_ar/src/main/java/sync/WeightManager.java b/flink_jobs/old-models/batch_ar/src/main/java/sync/WeightManager.java new file mode 100644 index 00000000..c8cb72c7 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/java/sync/WeightManager.java @@ -0,0 +1,180 @@ +package sync; + +import org.apache.log4j.Logger; + +import argo.avro.Weight; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; + +import org.apache.avro.Schema; + +import org.apache.avro.file.DataFileReader; +import org.apache.avro.generic.GenericData; +import org.apache.avro.generic.GenericDatumReader; +import org.apache.avro.generic.GenericRecord; +import org.apache.avro.io.DatumReader; +import org.apache.avro.util.Utf8; +import org.apache.commons.io.IOUtils; + +/** + * WeightManager manages supplementary weight information that is needed in computation of a/r scores for endpoint groups + * Information can be loaded either directly from an avro file or from a list of avro objects + */ +public class WeightManager { + + + /** + * Hashmap that holds information to weight lists by type. + */ + private HashMap> list; + + private static final Logger LOG = Logger.getLogger(WeightManager.class.getName()); + + /** + * Inner class that holds information about a weight item which is a actually a tuple (group_name,weight_value) + */ + private class WeightItem { + String group; // name of the group + String weight; // weight value + + + public WeightItem(String group, String weight) { + this.group = group; + this.weight = weight; + } + } + + public WeightManager() { + list = new HashMap>(); + } + + /** + * Inserts new weight information (type,group_name,weight_value) to the Weight manager + */ + public int insert(String type, String group, String weight) { + WeightItem tmpItem = new WeightItem(group, weight); + if (this.list.containsKey(type)) { + this.list.get(type).add(tmpItem); + } else { + this.list.put(type, new ArrayList()); + this.list.get(type).add(tmpItem); + } + + return 0; // All good + } + + /** + * Returns weight information by (type,group_name) + */ + public int getWeight(String type, String group) { + if (list.containsKey(type)) { + for (WeightItem item : list.get(type)) { + if (item.group.equals(group)) { + return Integer.parseInt(item.weight); + } + } + } + + return 0; + + } + + /** + * Loads weight information from an avro file + *

      + * This method loads weight information contained in an .avro file with + * specific avro schema. + * + *

      + * The following fields are expected to be found in each avro row: + *

        + *
      1. type: string
      2. + *
      3. site: string
      4. + *
      5. weight: string
      6. + *
      7. [optional] tags: hashmap (contains a map of arbitrary key values) + *
      8. + *
      + * + * @param avroFile + * a File object of the avro file that will be opened + * @throws IOException + * if there is an error during opening of the avro file + */ + @SuppressWarnings("unchecked") + public int loadAvro(File avroFile) throws IOException { + + // Prepare Avro File Readers + DatumReader datumReader = new GenericDatumReader(); + DataFileReader dataFileReader = null; + try { + dataFileReader = new DataFileReader(avroFile, datumReader); + + // Grab avro schema + Schema avroSchema = dataFileReader.getSchema(); + + // Generate 1st level generic record reader (rows) + GenericRecord avroRow = new GenericData.Record(avroSchema); + + // For all rows in file repeat + while (dataFileReader.hasNext()) { + // read the row + avroRow = dataFileReader.next(avroRow); + HashMap tagMap = new HashMap(); + + // Generate 2nd level generic record reader (tags) + + HashMap tags = (HashMap) (avroRow.get("tags")); + + if (tags != null) { + for (Utf8 item : tags.keySet()) { + tagMap.put(item.toString(), String.valueOf(tags.get(item))); + } + } + + // Grab 1st level mandatory fields + String type = avroRow.get("type").toString(); + String group = avroRow.get("site").toString(); + String weight = avroRow.get("weight").toString(); + + // Insert data to list + this.insert(type, group, weight); + + } // end of avro rows + + dataFileReader.close(); + + } catch (IOException ex) { + LOG.error("Could not open avro file:" + avroFile.getName()); + throw ex; + } finally { + // Close quietly without exceptions the buffered reader + IOUtils.closeQuietly(dataFileReader); + } + + return 0; // allgood + } + + /** + * Loads Weight information from a list of Weight objects + * + */ + + public void loadFromList( List wg) { + + // For each weight in list + for (Weight item : wg){ + String type = item.getType(); + String group = item.getSite(); + String weight = item.getWeight(); + // Insert data to list + this.insert(type, group, weight); + } + + + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/ar/cream-ce-timeline.json b/flink_jobs/old-models/batch_ar/src/main/resources/ar/cream-ce-timeline.json new file mode 100644 index 00000000..03880319 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/ar/cream-ce-timeline.json @@ -0,0 +1 @@ +{"service":"CREAM-CE","hostname":"cce.ihep.ac.cn","bag_0":[{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-02-06T21:51:32Z","status":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-02-06T12:31:40Z","status":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-02-06T21:51:32Z","status":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-02-06T18:31:40Z","status":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-02-06T21:51:32Z","status":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-02-06T15:51:36Z","status":"OK"},{"metric":"hr.srce.CREAMCE-CertLifetime","timestamp":"2015-02-06T20:33:16Z","status":"UNKNOWN"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-02-06T18:33:10Z","status":"OK"},{"metric":"hr.srce.CREAMCE-CertLifetime","timestamp":"2015-02-06T23:58:20Z","status":"UNKNOWN"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-02-06T16:33:16Z","status":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-02-06T15:51:36Z","status":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-02-06T15:53:16Z","status":"OK"},{"metric":"hr.srce.CREAMCE-CertLifetime","timestamp":"2015-02-06T19:28:20Z","status":"UNKNOWN"},{"metric":"hr.srce.CREAMCE-CertLifetime","timestamp":"2015-02-06T14:34:14Z","status":"UNKNOWN"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-02-06T18:31:40Z","status":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-02-06T20:56:36Z","status":"OK"},{"metric":"hr.srce.CREAMCE-CertLifetime","timestamp":"2015-02-06T17:43:24Z","status":"UNKNOWN"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-02-06T20:56:36Z","status":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-02-06T20:58:16Z","status":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-02-06T18:08:10Z","status":"CRITICAL"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-02-06T15:13:06Z","status":"CRITICAL"},{"metric":"hr.srce.CREAMCE-CertLifetime","timestamp":"2015-02-06T15:14:16Z","status":"UNKNOWN"},{"metric":"hr.srce.CREAMCE-CertLifetime","timestamp":"2015-02-06T18:09:00Z","status":"UNKNOWN"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-02-06T18:31:40Z","status":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-02-06T22:53:16Z","status":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-02-06T12:33:10Z","status":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-02-06T15:51:36Z","status":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-02-06T13:31:35Z","status":"OK"},{"metric":"hr.srce.CREAMCE-CertLifetime","timestamp":"2015-02-06T22:41:56Z","status":"UNKNOWN"},{"metric":"hr.srce.CREAMCE-CertLifetime","timestamp":"2015-02-06T17:33:14Z","status":"UNKNOWN"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-02-06T20:56:36Z","status":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-02-06T22:51:36Z","status":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-02-06T20:13:06Z","status":"CRITICAL"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-02-06T16:31:36Z","status":"OK"},{"metric":"hr.srce.CREAMCE-CertLifetime","timestamp":"2015-02-06T20:13:16Z","status":"UNKNOWN"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-02-06T13:31:35Z","status":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-02-06T16:31:36Z","status":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-02-06T21:53:12Z","status":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-02-06T13:31:35Z","status":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-02-06T16:31:36Z","status":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-02-06T20:56:36Z","status":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-02-06T13:31:35Z","status":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-02-06T12:31:40Z","status":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-02-06T15:51:36Z","status":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-02-06T12:31:40Z","status":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-02-06T20:33:06Z","status":"CRITICAL"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-02-06T13:33:15Z","status":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-02-06T16:31:36Z","status":"OK"},{"metric":"hr.srce.CREAMCE-CertLifetime","timestamp":"2015-02-06T13:33:15Z","status":"UNKNOWN"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-02-06T22:51:36Z","status":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-02-06T22:51:36Z","status":"OK"},{"metric":"hr.srce.CREAMCE-CertLifetime","timestamp":"2015-02-06T14:54:09Z","status":"UNKNOWN"},{"metric":"hr.srce.CREAMCE-CertLifetime","timestamp":"2015-02-06T14:26:54Z","status":"UNKNOWN"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-02-06T14:53:09Z","status":"CRITICAL"},{"metric":"hr.srce.CREAMCE-CertLifetime","timestamp":"2015-02-06T21:33:12Z","status":"UNKNOWN"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-02-06T12:31:40Z","status":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-02-06T18:31:40Z","status":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-02-06T21:51:32Z","status":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-02-06T22:51:36Z","status":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-02-06T10:31:33Z","status":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-02-06T10:31:33Z","status":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-02-06T10:31:33Z","status":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-02-06T04:33:07Z","status":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-02-06T03:33:10Z","status":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-02-06T11:31:40Z","status":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-02-06T11:31:40Z","status":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-02-06T11:31:40Z","status":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-02-06T11:31:40Z","status":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-02-06T08:33:14Z","status":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-02-06T01:06:33Z","status":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-02-06T01:06:33Z","status":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-02-06T10:33:13Z","status":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-02-06T01:06:33Z","status":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-02-06T01:06:33Z","status":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-02-06T04:31:37Z","status":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-02-06T04:31:37Z","status":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-02-06T00:08:12Z","status":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-02-06T02:41:39Z","status":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-02-06T02:41:39Z","status":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-02-06T02:41:39Z","status":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-02-06T02:41:39Z","status":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-02-06T02:41:39Z","status":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-02-06T02:41:39Z","status":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-02-06T02:41:39Z","status":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-02-06T02:41:39Z","status":"OK"},{"metric":"hr.srce.CREAMCE-CertLifetime","timestamp":"2015-02-06T05:33:11Z","status":"UNKNOWN"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-02-06T05:33:11Z","status":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-02-06T06:33:12Z","status":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-02-06T11:33:10Z","status":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-02-06T04:31:37Z","status":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-02-06T04:31:37Z","status":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-02-06T09:31:33Z","status":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-02-06T00:06:32Z","status":"OK"},{"metric":"hr.srce.CREAMCE-CertLifetime","timestamp":"2015-02-06T09:33:13Z","status":"UNKNOWN"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-02-06T00:06:32Z","status":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-02-06T00:06:32Z","status":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-02-06T00:06:32Z","status":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-02-06T09:33:13Z","status":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-02-06T01:08:13Z","status":"OK"},{"metric":"hr.srce.CREAMCE-CertLifetime","timestamp":"2015-02-06T02:12:49Z","status":"UNKNOWN"},{"metric":"hr.srce.CREAMCE-CertLifetime","timestamp":"2015-02-06T02:13:19Z","status":"UNKNOWN"},{"metric":"hr.srce.CREAMCE-CertLifetime","timestamp":"2015-02-06T01:33:13Z","status":"UNKNOWN"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-02-06T09:31:33Z","status":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-02-06T08:31:34Z","status":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-02-06T08:31:34Z","status":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-02-06T08:31:34Z","status":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-02-06T09:31:33Z","status":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-02-06T08:31:34Z","status":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-02-06T09:31:33Z","status":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-02-06T05:31:31Z","status":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-02-06T05:31:31Z","status":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-02-06T05:31:31Z","status":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-02-06T05:31:31Z","status":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-02-06T06:31:32Z","status":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-02-06T06:31:32Z","status":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-02-06T06:31:32Z","status":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-02-06T06:31:32Z","status":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-02-06T07:31:35Z","status":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-02-06T07:31:35Z","status":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-02-06T07:33:15Z","status":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-02-06T07:31:35Z","status":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-02-06T07:31:35Z","status":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-02-06T10:31:33Z","status":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-02-05T23:06:35Z","status":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-02-05T23:06:35Z","status":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-02-05T23:06:35Z","status":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-02-05T23:06:35Z","status":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-02-05T23:08:15Z","status":"OK"},{"metric":"hr.srce.CREAMCE-CertLifetime","timestamp":"2015-02-05T21:33:14Z","status":"UNKNOWN"}]} diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/ar/endpoint_timeline.json b/flink_jobs/old-models/batch_ar/src/main/resources/ar/endpoint_timeline.json new file mode 100644 index 00000000..ce2b0441 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/ar/endpoint_timeline.json @@ -0,0 +1 @@ +{"service":"unicore6.TargetSystemFactory","hostname":"unicore-ui.reef.man.poznan.pl","bag_0":[{"metric":"emi.unicore.UNICORE-Job","timestamp":"2015-01-24T10:18:53Z","status":"CRITICAL"},{"metric":"emi.unicore.UNICORE-Job","timestamp":"2015-01-24T00:19:12Z","status":"CRITICAL"},{"metric":"emi.unicore.UNICORE-Job","timestamp":"2015-01-24T05:18:52Z","status":"CRITICAL"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T00:13:42Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T07:13:42Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T06:33:43Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T02:33:42Z","status":"OK"},{"metric":"emi.unicore.UNICORE-Job","timestamp":"2015-01-24T03:18:54Z","status":"CRITICAL"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T11:53:34Z","status":"OK"},{"metric":"emi.unicore.UNICORE-Job","timestamp":"2015-01-24T08:18:53Z","status":"CRITICAL"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T03:13:34Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T00:53:41Z","status":"OK"},{"metric":"emi.unicore.UNICORE-Job","timestamp":"2015-01-24T04:18:52Z","status":"CRITICAL"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T05:53:38Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T12:33:39Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T10:33:37Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T07:33:42Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T01:53:39Z","status":"OK"},{"metric":"emi.unicore.UNICORE-Job","timestamp":"2015-01-24T01:18:53Z","status":"CRITICAL"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T02:53:34Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T06:13:42Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T11:33:44Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T08:53:38Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T09:53:35Z","status":"OK"},{"metric":"emi.unicore.UNICORE-Job","timestamp":"2015-01-24T06:18:52Z","status":"CRITICAL"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T05:13:42Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T00:33:41Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T10:53:37Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T01:13:33Z","status":"OK"},{"metric":"emi.unicore.UNICORE-Job","timestamp":"2015-01-24T02:18:52Z","status":"CRITICAL"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T08:33:38Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T07:53:37Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T05:33:38Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T11:13:43Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T04:13:42Z","status":"OK"},{"metric":"emi.unicore.UNICORE-Job","timestamp":"2015-01-24T11:18:53Z","status":"CRITICAL"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T04:53:42Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T03:33:34Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T01:33:33Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T12:13:39Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T02:13:42Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T04:33:42Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T03:53:34Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T08:13:43Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T06:53:43Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T10:13:43Z","status":"OK"},{"metric":"emi.unicore.UNICORE-Job","timestamp":"2015-01-24T12:19:49Z","status":"CRITICAL"},{"metric":"emi.unicore.UNICORE-Job","timestamp":"2015-01-24T07:18:52Z","status":"CRITICAL"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T12:53:39Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T23:13:43Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T16:13:43Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T14:13:42Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T18:53:36Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T20:13:42Z","status":"OK"},{"metric":"emi.unicore.UNICORE-Job","timestamp":"2015-01-24T20:33:02Z","status":"CRITICAL"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T14:53:37Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T18:13:33Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T22:53:41Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T23:33:42Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T20:33:42Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T21:13:42Z","status":"OK"},{"metric":"emi.unicore.UNICORE-Job","timestamp":"2015-01-24T17:33:02Z","status":"CRITICAL"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T19:13:42Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T16:53:43Z","status":"OK"},{"metric":"emi.unicore.UNICORE-Job","timestamp":"2015-01-24T23:33:02Z","status":"CRITICAL"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T15:13:42Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T20:53:39Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T15:33:42Z","status":"OK"},{"metric":"emi.unicore.UNICORE-Job","timestamp":"2015-01-24T21:18:56Z","status":"CRITICAL"},{"metric":"emi.unicore.UNICORE-Job","timestamp":"2015-01-24T14:18:52Z","status":"CRITICAL"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T19:33:37Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T21:53:36Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T13:33:43Z","status":"OK"},{"metric":"emi.unicore.UNICORE-Job","timestamp":"2015-01-24T15:18:52Z","status":"CRITICAL"},{"metric":"emi.unicore.UNICORE-Job","timestamp":"2015-01-24T22:18:53Z","status":"CRITICAL"},{"metric":"emi.unicore.UNICORE-Job","timestamp":"2015-01-24T16:18:53Z","status":"CRITICAL"},{"metric":"emi.unicore.UNICORE-Job","timestamp":"2015-01-24T18:18:53Z","status":"CRITICAL"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T19:53:37Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T17:13:42Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T22:13:43Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T14:33:42Z","status":"OK"},{"metric":"emi.unicore.UNICORE-Job","timestamp":"2015-01-24T23:38:02Z","status":"CRITICAL"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T15:53:37Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T23:53:42Z","status":"OK"},{"metric":"emi.unicore.UNICORE-Job","timestamp":"2015-01-24T19:18:52Z","status":"CRITICAL"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T17:53:38Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T21:33:36Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T13:13:43Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T17:33:42Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T13:53:43Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T22:33:43Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T18:33:43Z","status":"OK"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-24T16:33:43Z","status":"OK"},{"metric":"emi.unicore.UNICORE-Job","timestamp":"2015-01-24T13:18:53Z","status":"CRITICAL"},{"metric":"emi.unicore.UNICORE-Job","timestamp":"2015-01-23T23:19:12Z","status":"CRITICAL"},{"metric":"emi.unicore.TargetSystemFactory","timestamp":"2015-01-23T23:53:38Z","status":"OK"}]} diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/ar/metric_timeline.json b/flink_jobs/old-models/batch_ar/src/main/resources/ar/metric_timeline.json new file mode 100644 index 00000000..4dff249f --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/ar/metric_timeline.json @@ -0,0 +1 @@ +{"service":"unicore6.TargetSystemFactory","hostname":"unicore.grid.task.gda.pl","metric":"emi.unicore.UNICORE-Job","bag_0":[{"timestamp":"2015-01-23T23:35:22Z","status":"OK"},{"timestamp":"2015-01-24T00:35:21Z","status":"OK"},{"timestamp":"2015-01-24T01:35:23Z","status":"OK"},{"timestamp":"2015-01-24T02:35:22Z","status":"OK"},{"timestamp":"2015-01-24T03:35:24Z","status":"OK"},{"timestamp":"2015-01-24T04:35:22Z","status":"OK"},{"timestamp":"2015-01-24T05:35:18Z","status":"OK"},{"timestamp":"2015-01-24T06:35:23Z","status":"OK"},{"timestamp":"2015-01-24T07:35:22Z","status":"OK"},{"timestamp":"2015-01-24T08:35:18Z","status":"OK"},{"timestamp":"2015-01-24T10:35:17Z","status":"OK"},{"timestamp":"2015-01-24T11:35:24Z","status":"OK"},{"timestamp":"2015-01-24T12:35:19Z","status":"OK"},{"timestamp":"2015-01-24T13:35:23Z","status":"OK"},{"timestamp":"2015-01-24T14:35:22Z","status":"OK"},{"timestamp":"2015-01-24T15:35:22Z","status":"OK"},{"timestamp":"2015-01-24T16:35:23Z","status":"OK"},{"timestamp":"2015-01-24T17:35:22Z","status":"OK"},{"timestamp":"2015-01-24T18:35:26Z","status":"OK"},{"timestamp":"2015-01-24T19:35:27Z","status":"OK"},{"timestamp":"2015-01-24T20:35:22Z","status":"OK"},{"timestamp":"2015-01-24T21:35:26Z","status":"OK"},{"timestamp":"2015-01-24T22:35:21Z","status":"OK"},{"timestamp":"2015-01-24T22:45:51Z","status":"OK"},{"timestamp":"2015-01-24T23:45:52Z","status":"OK"}]} \ No newline at end of file diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/ar/missing_endpoint.json b/flink_jobs/old-models/batch_ar/src/main/resources/ar/missing_endpoint.json new file mode 100644 index 00000000..42f413fe --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/ar/missing_endpoint.json @@ -0,0 +1,30 @@ +{ + "service": "SRMv2", + "hostname": "se01.afroditi.hellasgrid.gr", + "metrics": [ + { + "metric": "hr.srce.SRM2-CertLifetime" + }, + { + "metric": "org.sam.SRM-Del" + }, + { + "metric": "org.sam.SRM-Get" + }, + { + "metric": "org.sam.SRM-GetSURLs" + }, + { + "metric": "org.sam.SRM-GetTURLs" + }, + { + "metric": "org.sam.SRM-Ls" + }, + { + "metric": "org.sam.SRM-LsDir" + }, + { + "metric": "org.sam.SRM-Put" + } + ] +} diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/ar/missing_endpoint_full.json b/flink_jobs/old-models/batch_ar/src/main/resources/ar/missing_endpoint_full.json new file mode 100644 index 00000000..6db42031 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/ar/missing_endpoint_full.json @@ -0,0 +1,11 @@ +{ + "report":"c800846f-8478-4af8-85d1-a3f12fe4c18f", + "monitoring_host":"none", + "service": "SRMv2", + "hostname": "se01.afroditi.hellasgrid.gr", + "metric": "org.sam.SRM-Put", + "timestamp":"2015-01-01T00:00:00Z", + "status":"MISSING", + "summary":"missing", + "message":"missing" +} diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/ar/service_timeline.json b/flink_jobs/old-models/batch_ar/src/main/resources/ar/service_timeline.json new file mode 100644 index 00000000..0e2203da --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/ar/service_timeline.json @@ -0,0 +1,38 @@ +{"groupname":"CA-VICTORIA-WESTGRID-T2","service":"CREAM-CE","bag_0":[{"hostname":"gorgon03.westgrid.ca","timeline":[{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status +":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{" +status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status" +:0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"s +tatus":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status": +0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"st +atus":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0 +},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"sta +tus":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0} +,{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"stat +us":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0}, +{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"statu +s":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{ +"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status +":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{" +status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status" +:0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"s +tatus":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status": +0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"st +atus":0},{"status":0}]},{"hostname":"gorgon02.westgrid.ca","timeline":[{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{ +"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status +":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{" +status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status" +:0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"s +tatus":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status": +0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"st +atus":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0 +},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"sta +tus":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0} +,{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"stat +us":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0}, +{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"statu +s":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{ +"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status +":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{" +status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status" +:0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"s +tatus":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0}]}]} diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/ar/site_timeline.json b/flink_jobs/old-models/batch_ar/src/main/resources/ar/site_timeline.json new file mode 100644 index 00000000..972c2c73 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/ar/site_timeline.json @@ -0,0 +1 @@ +{"group":"UKI-NORTHGRID-LANCS-HEP","bag_0":[{"service":"CREAM-CE","timeline":[{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0}]},{"service":"Site-BDII","timeline":[{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0},{"status":0}]}]} \ No newline at end of file diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/avro/downtimes_v2.avro b/flink_jobs/old-models/batch_ar/src/main/resources/avro/downtimes_v2.avro new file mode 100644 index 00000000..b31d809c Binary files /dev/null and b/flink_jobs/old-models/batch_ar/src/main/resources/avro/downtimes_v2.avro differ diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/avro/group_endpoints_v2.avro b/flink_jobs/old-models/batch_ar/src/main/resources/avro/group_endpoints_v2.avro new file mode 100644 index 00000000..68b4dbf9 Binary files /dev/null and b/flink_jobs/old-models/batch_ar/src/main/resources/avro/group_endpoints_v2.avro differ diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/avro/group_example.avro b/flink_jobs/old-models/batch_ar/src/main/resources/avro/group_example.avro new file mode 100644 index 00000000..e71785ad Binary files /dev/null and b/flink_jobs/old-models/batch_ar/src/main/resources/avro/group_example.avro differ diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/avro/group_groups_v2.avro b/flink_jobs/old-models/batch_ar/src/main/resources/avro/group_groups_v2.avro new file mode 100644 index 00000000..d4e82bfe Binary files /dev/null and b/flink_jobs/old-models/batch_ar/src/main/resources/avro/group_groups_v2.avro differ diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/avro/poem_sync_v2.avro b/flink_jobs/old-models/batch_ar/src/main/resources/avro/poem_sync_v2.avro new file mode 100644 index 00000000..fac926ca Binary files /dev/null and b/flink_jobs/old-models/batch_ar/src/main/resources/avro/poem_sync_v2.avro differ diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/avro/weights_v2.avro b/flink_jobs/old-models/batch_ar/src/main/resources/avro/weights_v2.avro new file mode 100644 index 00000000..7b45958b Binary files /dev/null and b/flink_jobs/old-models/batch_ar/src/main/resources/avro/weights_v2.avro differ diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/hst_data/hst_input_data.json b/flink_jobs/old-models/batch_ar/src/main/resources/hst_data/hst_input_data.json new file mode 100644 index 00000000..6acfc657 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/hst_data/hst_input_data.json @@ -0,0 +1,947 @@ +{ + "timeline_s": [ + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-20T00:27:37Z" + }, + { + "metric": "emi.wn.WN-Bi", + "status": "OK", + "time_stamp": "2014-02-20T00:50:48Z" + }, + { + "metric": "emi.wn.WN-Csh", + "status": "OK", + "time_stamp": "2014-02-20T00:50:49Z" + }, + { + "metric": "emi.wn.WN-SoftVer", + "status": "OK", + "time_stamp": "2014-02-20T00:50:49Z" + }, + { + "metric": "hr.srce.CADist-Check", + "status": "OK", + "time_stamp": "2014-02-20T00:50:49Z" + }, + { + "metric": "org.sam.WN-Rep", + "status": "OK", + "time_stamp": "2014-02-20T00:50:50Z" + }, + { + "metric": "emi.cream.CREAMCE-JobSubmit", + "status": "OK", + "time_stamp": "2014-02-20T00:53:57Z" + }, + { + "metric": "emi.wn.WN-Bi", + "status": "OK", + "time_stamp": "2014-02-20T01:50:50Z" + }, + { + "metric": "emi.wn.WN-Csh", + "status": "OK", + "time_stamp": "2014-02-20T01:50:50Z" + }, + { + "metric": "hr.srce.CADist-Check", + "status": "OK", + "time_stamp": "2014-02-20T01:50:51Z" + }, + { + "metric": "org.sam.WN-Rep", + "status": "OK", + "time_stamp": "2014-02-20T01:50:51Z" + }, + { + "metric": "emi.wn.WN-SoftVer", + "status": "OK", + "time_stamp": "2014-02-20T01:50:51Z" + }, + { + "metric": "emi.cream.CREAMCE-JobSubmit", + "status": "OK", + "time_stamp": "2014-02-20T01:54:00Z" + }, + { + "metric": "emi.cream.CREAMCE-JobSubmit", + "status": "OK", + "time_stamp": "2014-02-20T02:49:05Z" + }, + { + "metric": "hr.srce.CADist-Check", + "status": "OK", + "time_stamp": "2014-02-20T02:50:46Z" + }, + { + "metric": "emi.wn.WN-SoftVer", + "status": "OK", + "time_stamp": "2014-02-20T02:50:46Z" + }, + { + "metric": "emi.wn.WN-Csh", + "status": "OK", + "time_stamp": "2014-02-20T02:50:46Z" + }, + { + "metric": "emi.wn.WN-Bi", + "status": "OK", + "time_stamp": "2014-02-20T02:50:46Z" + }, + { + "metric": "org.sam.WN-Rep", + "status": "OK", + "time_stamp": "2014-02-20T02:50:47Z" + }, + { + "metric": "emi.wn.WN-Bi", + "status": "OK", + "time_stamp": "2014-02-20T03:50:48Z" + }, + { + "metric": "hr.srce.CADist-Check", + "status": "OK", + "time_stamp": "2014-02-20T03:50:49Z" + }, + { + "metric": "emi.wn.WN-SoftVer", + "status": "OK", + "time_stamp": "2014-02-20T03:50:49Z" + }, + { + "metric": "emi.wn.WN-Csh", + "status": "OK", + "time_stamp": "2014-02-20T03:50:49Z" + }, + { + "metric": "org.sam.WN-Rep", + "status": "OK", + "time_stamp": "2014-02-20T03:50:50Z" + }, + { + "metric": "emi.cream.CREAMCE-JobSubmit", + "status": "OK", + "time_stamp": "2014-02-20T03:53:58Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-20T04:27:38Z" + }, + { + "metric": "emi.wn.WN-Bi", + "status": "OK", + "time_stamp": "2014-02-20T04:50:50Z" + }, + { + "metric": "emi.wn.WN-Csh", + "status": "OK", + "time_stamp": "2014-02-20T04:50:50Z" + }, + { + "metric": "emi.wn.WN-SoftVer", + "status": "OK", + "time_stamp": "2014-02-20T04:50:50Z" + }, + { + "metric": "hr.srce.CADist-Check", + "status": "OK", + "time_stamp": "2014-02-20T04:50:50Z" + }, + { + "metric": "org.sam.WN-Rep", + "status": "OK", + "time_stamp": "2014-02-20T04:50:51Z" + }, + { + "metric": "emi.cream.CREAMCE-JobSubmit", + "status": "OK", + "time_stamp": "2014-02-20T04:53:59Z" + }, + { + "metric": "emi.wn.WN-Bi", + "status": "OK", + "time_stamp": "2014-02-20T05:50:49Z" + }, + { + "metric": "emi.wn.WN-Csh", + "status": "OK", + "time_stamp": "2014-02-20T05:50:50Z" + }, + { + "metric": "emi.wn.WN-SoftVer", + "status": "OK", + "time_stamp": "2014-02-20T05:50:50Z" + }, + { + "metric": "hr.srce.CADist-Check", + "status": "OK", + "time_stamp": "2014-02-20T05:50:50Z" + }, + { + "metric": "org.sam.WN-Rep", + "status": "OK", + "time_stamp": "2014-02-20T05:50:51Z" + }, + { + "metric": "emi.cream.CREAMCE-JobSubmit", + "status": "OK", + "time_stamp": "2014-02-20T05:53:59Z" + }, + { + "metric": "emi.cream.CREAMCE-JobSubmit", + "status": "OK", + "time_stamp": "2014-02-20T06:48:56Z" + }, + { + "metric": "emi.wn.WN-Csh", + "status": "OK", + "time_stamp": "2014-02-20T06:50:46Z" + }, + { + "metric": "emi.wn.WN-Bi", + "status": "OK", + "time_stamp": "2014-02-20T06:50:46Z" + }, + { + "metric": "org.sam.WN-Rep", + "status": "OK", + "time_stamp": "2014-02-20T06:50:47Z" + }, + { + "metric": "hr.srce.CADist-Check", + "status": "OK", + "time_stamp": "2014-02-20T06:50:47Z" + }, + { + "metric": "emi.wn.WN-SoftVer", + "status": "OK", + "time_stamp": "2014-02-20T06:50:47Z" + }, + { + "metric": "emi.cream.CREAMCE-JobSubmit", + "status": "OK", + "time_stamp": "2014-02-20T07:48:56Z" + }, + { + "metric": "hr.srce.CADist-Check", + "status": "OK", + "time_stamp": "2014-02-20T07:50:47Z" + }, + { + "metric": "emi.wn.WN-SoftVer", + "status": "OK", + "time_stamp": "2014-02-20T07:50:47Z" + }, + { + "metric": "emi.wn.WN-Csh", + "status": "OK", + "time_stamp": "2014-02-20T07:50:47Z" + }, + { + "metric": "emi.wn.WN-Bi", + "status": "OK", + "time_stamp": "2014-02-20T07:50:47Z" + }, + { + "metric": "org.sam.WN-Rep", + "status": "OK", + "time_stamp": "2014-02-20T07:50:48Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-20T08:27:36Z" + }, + { + "metric": "emi.wn.WN-Csh", + "status": "OK", + "time_stamp": "2014-02-20T08:50:47Z" + }, + { + "metric": "emi.wn.WN-SoftVer", + "status": "OK", + "time_stamp": "2014-02-20T08:50:47Z" + }, + { + "metric": "hr.srce.CADist-Check", + "status": "OK", + "time_stamp": "2014-02-20T08:50:47Z" + }, + { + "metric": "emi.wn.WN-Bi", + "status": "OK", + "time_stamp": "2014-02-20T08:50:47Z" + }, + { + "metric": "org.sam.WN-Rep", + "status": "OK", + "time_stamp": "2014-02-20T08:50:48Z" + }, + { + "metric": "emi.cream.CREAMCE-JobSubmit", + "status": "OK", + "time_stamp": "2014-02-20T08:53:56Z" + }, + { + "metric": "emi.cream.CREAMCE-JobSubmit", + "status": "OK", + "time_stamp": "2014-02-20T09:48:59Z" + }, + { + "metric": "emi.wn.WN-Bi", + "status": "OK", + "time_stamp": "2014-02-20T09:50:49Z" + }, + { + "metric": "emi.wn.WN-Csh", + "status": "OK", + "time_stamp": "2014-02-20T09:50:49Z" + }, + { + "metric": "emi.wn.WN-SoftVer", + "status": "OK", + "time_stamp": "2014-02-20T09:50:50Z" + }, + { + "metric": "hr.srce.CADist-Check", + "status": "OK", + "time_stamp": "2014-02-20T09:50:50Z" + }, + { + "metric": "org.sam.WN-Rep", + "status": "OK", + "time_stamp": "2014-02-20T09:50:51Z" + }, + { + "metric": "emi.wn.WN-Bi", + "status": "OK", + "time_stamp": "2014-02-20T10:50:50Z" + }, + { + "metric": "emi.wn.WN-Csh", + "status": "OK", + "time_stamp": "2014-02-20T10:50:50Z" + }, + { + "metric": "emi.wn.WN-SoftVer", + "status": "OK", + "time_stamp": "2014-02-20T10:50:50Z" + }, + { + "metric": "hr.srce.CADist-Check", + "status": "OK", + "time_stamp": "2014-02-20T10:50:51Z" + }, + { + "metric": "org.sam.WN-Rep", + "status": "OK", + "time_stamp": "2014-02-20T10:50:51Z" + }, + { + "metric": "emi.cream.CREAMCE-JobSubmit", + "status": "OK", + "time_stamp": "2014-02-20T10:53:59Z" + }, + { + "metric": "emi.wn.WN-Bi", + "status": "OK", + "time_stamp": "2014-02-20T11:50:51Z" + }, + { + "metric": "emi.wn.WN-Csh", + "status": "OK", + "time_stamp": "2014-02-20T11:50:52Z" + }, + { + "metric": "emi.wn.WN-SoftVer", + "status": "OK", + "time_stamp": "2014-02-20T11:50:52Z" + }, + { + "metric": "hr.srce.CADist-Check", + "status": "OK", + "time_stamp": "2014-02-20T11:50:52Z" + }, + { + "metric": "emi.cream.CREAMCE-JobSubmit", + "status": "OK", + "time_stamp": "2014-02-20T11:54:01Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-20T12:27:31Z" + }, + { + "metric": "emi.wn.WN-Bi", + "status": "OK", + "time_stamp": "2014-02-20T12:50:51Z" + }, + { + "metric": "emi.wn.WN-Csh", + "status": "OK", + "time_stamp": "2014-02-20T12:50:51Z" + }, + { + "metric": "emi.wn.WN-SoftVer", + "status": "OK", + "time_stamp": "2014-02-20T12:50:52Z" + }, + { + "metric": "hr.srce.CADist-Check", + "status": "OK", + "time_stamp": "2014-02-20T12:50:52Z" + }, + { + "metric": "org.sam.WN-Rep", + "status": "CRITICAL", + "time_stamp": "2014-02-20T12:50:58Z" + }, + { + "metric": "emi.cream.CREAMCE-JobSubmit", + "status": "OK", + "time_stamp": "2014-02-20T12:53:56Z" + }, + { + "metric": "emi.cream.CREAMCE-JobSubmit", + "status": "OK", + "time_stamp": "2014-02-20T14:04:00Z" + }, + { + "metric": "emi.wn.WN-Bi", + "status": "OK", + "time_stamp": "2014-02-20T14:50:50Z" + }, + { + "metric": "emi.wn.WN-Csh", + "status": "OK", + "time_stamp": "2014-02-20T14:50:50Z" + }, + { + "metric": "emi.wn.WN-SoftVer", + "status": "OK", + "time_stamp": "2014-02-20T14:50:50Z" + }, + { + "metric": "hr.srce.CADist-Check", + "status": "OK", + "time_stamp": "2014-02-20T14:50:50Z" + }, + { + "metric": "org.sam.WN-Rep", + "status": "OK", + "time_stamp": "2014-02-20T14:50:52Z" + }, + { + "metric": "emi.cream.CREAMCE-JobSubmit", + "status": "OK", + "time_stamp": "2014-02-20T14:53:58Z" + }, + { + "metric": "emi.cream.CREAMCE-JobSubmit", + "status": "OK", + "time_stamp": "2014-02-20T15:49:02Z" + }, + { + "metric": "emi.wn.WN-SoftVer", + "status": "OK", + "time_stamp": "2014-02-20T15:50:43Z" + }, + { + "metric": "emi.wn.WN-Csh", + "status": "OK", + "time_stamp": "2014-02-20T15:50:43Z" + }, + { + "metric": "emi.wn.WN-Bi", + "status": "OK", + "time_stamp": "2014-02-20T15:50:43Z" + }, + { + "metric": "org.sam.WN-Rep", + "status": "OK", + "time_stamp": "2014-02-20T15:50:44Z" + }, + { + "metric": "hr.srce.CADist-Check", + "status": "OK", + "time_stamp": "2014-02-20T15:50:44Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-20T16:27:38Z" + }, + { + "metric": "emi.wn.WN-Bi", + "status": "OK", + "time_stamp": "2014-02-20T16:50:48Z" + }, + { + "metric": "emi.wn.WN-Csh", + "status": "OK", + "time_stamp": "2014-02-20T16:50:49Z" + }, + { + "metric": "emi.wn.WN-SoftVer", + "status": "OK", + "time_stamp": "2014-02-20T16:50:49Z" + }, + { + "metric": "hr.srce.CADist-Check", + "status": "OK", + "time_stamp": "2014-02-20T16:50:49Z" + }, + { + "metric": "org.sam.WN-Rep", + "status": "OK", + "time_stamp": "2014-02-20T16:50:50Z" + }, + { + "metric": "emi.cream.CREAMCE-JobSubmit", + "status": "OK", + "time_stamp": "2014-02-20T16:54:08Z" + }, + { + "metric": "hr.srce.CADist-Check", + "status": "OK", + "time_stamp": "2014-02-20T17:50:46Z" + }, + { + "metric": "emi.wn.WN-SoftVer", + "status": "OK", + "time_stamp": "2014-02-20T17:50:46Z" + }, + { + "metric": "emi.wn.WN-Csh", + "status": "OK", + "time_stamp": "2014-02-20T17:50:46Z" + }, + { + "metric": "emi.wn.WN-Bi", + "status": "OK", + "time_stamp": "2014-02-20T17:50:46Z" + }, + { + "metric": "org.sam.WN-Rep", + "status": "OK", + "time_stamp": "2014-02-20T17:50:47Z" + }, + { + "metric": "emi.cream.CREAMCE-JobSubmit", + "status": "OK", + "time_stamp": "2014-02-20T17:54:05Z" + }, + { + "metric": "hr.srce.CADist-Check", + "status": "OK", + "time_stamp": "2014-02-20T18:50:43Z" + }, + { + "metric": "emi.wn.WN-SoftVer", + "status": "OK", + "time_stamp": "2014-02-20T18:50:43Z" + }, + { + "metric": "emi.wn.WN-Csh", + "status": "OK", + "time_stamp": "2014-02-20T18:50:43Z" + }, + { + "metric": "emi.wn.WN-Bi", + "status": "OK", + "time_stamp": "2014-02-20T18:50:43Z" + }, + { + "metric": "org.sam.WN-Rep", + "status": "OK", + "time_stamp": "2014-02-20T18:50:44Z" + }, + { + "metric": "emi.cream.CREAMCE-JobSubmit", + "status": "OK", + "time_stamp": "2014-02-20T18:54:33Z" + }, + { + "metric": "emi.cream.CREAMCE-JobSubmit", + "status": "OK", + "time_stamp": "2014-02-20T19:49:35Z" + }, + { + "metric": "emi.wn.WN-Bi", + "status": "OK", + "time_stamp": "2014-02-20T19:50:46Z" + }, + { + "metric": "emi.wn.WN-Csh", + "status": "OK", + "time_stamp": "2014-02-20T19:50:46Z" + }, + { + "metric": "emi.wn.WN-SoftVer", + "status": "OK", + "time_stamp": "2014-02-20T19:50:46Z" + }, + { + "metric": "hr.srce.CADist-Check", + "status": "OK", + "time_stamp": "2014-02-20T19:50:46Z" + }, + { + "metric": "org.sam.WN-Rep", + "status": "OK", + "time_stamp": "2014-02-20T19:50:47Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-20T20:27:35Z" + }, + { + "metric": "hr.srce.CADist-Check", + "status": "OK", + "time_stamp": "2014-02-20T20:50:47Z" + }, + { + "metric": "emi.wn.WN-SoftVer", + "status": "OK", + "time_stamp": "2014-02-20T20:50:47Z" + }, + { + "metric": "emi.wn.WN-Csh", + "status": "OK", + "time_stamp": "2014-02-20T20:50:47Z" + }, + { + "metric": "emi.wn.WN-Bi", + "status": "OK", + "time_stamp": "2014-02-20T20:50:47Z" + }, + { + "metric": "org.sam.WN-Rep", + "status": "OK", + "time_stamp": "2014-02-20T20:50:49Z" + }, + { + "metric": "emi.cream.CREAMCE-JobSubmit", + "status": "OK", + "time_stamp": "2014-02-20T20:54:35Z" + }, + { + "metric": "emi.wn.WN-Csh", + "status": "OK", + "time_stamp": "2014-02-20T21:50:50Z" + }, + { + "metric": "emi.wn.WN-Bi", + "status": "OK", + "time_stamp": "2014-02-20T21:50:50Z" + }, + { + "metric": "hr.srce.CADist-Check", + "status": "OK", + "time_stamp": "2014-02-20T21:50:51Z" + }, + { + "metric": "emi.wn.WN-SoftVer", + "status": "OK", + "time_stamp": "2014-02-20T21:50:51Z" + }, + { + "metric": "org.sam.WN-Rep", + "status": "OK", + "time_stamp": "2014-02-20T21:50:52Z" + }, + { + "metric": "emi.cream.CREAMCE-JobSubmit", + "status": "OK", + "time_stamp": "2014-02-20T21:54:28Z" + }, + { + "metric": "hr.srce.CADist-Check", + "status": "OK", + "time_stamp": "2014-02-20T22:55:49Z" + }, + { + "metric": "emi.wn.WN-Bi", + "status": "OK", + "time_stamp": "2014-02-20T22:55:49Z" + }, + { + "metric": "emi.wn.WN-Csh", + "status": "OK", + "time_stamp": "2014-02-20T22:55:49Z" + }, + { + "metric": "emi.wn.WN-SoftVer", + "status": "OK", + "time_stamp": "2014-02-20T22:55:49Z" + }, + { + "metric": "org.sam.WN-Rep", + "status": "OK", + "time_stamp": "2014-02-20T22:55:50Z" + }, + { + "metric": "emi.cream.CREAMCE-JobSubmit", + "status": "OK", + "time_stamp": "2014-02-20T22:59:38Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-21T00:25:05Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-21T00:27:35Z" + }, + { + "metric": "org.sam.WN-Rep", + "status": "OK", + "time_stamp": "2014-02-21T01:10:51Z" + }, + { + "metric": "hr.srce.CADist-Check", + "status": "OK", + "time_stamp": "2014-02-21T01:10:51Z" + }, + { + "metric": "emi.wn.WN-SoftVer", + "status": "OK", + "time_stamp": "2014-02-21T01:10:51Z" + }, + { + "metric": "emi.wn.WN-Csh", + "status": "OK", + "time_stamp": "2014-02-21T01:10:51Z" + }, + { + "metric": "emi.wn.WN-Bi", + "status": "OK", + "time_stamp": "2014-02-21T01:10:51Z" + }, + { + "metric": "emi.cream.CREAMCE-JobSubmit", + "status": "OK", + "time_stamp": "2014-02-21T01:14:59Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-21T02:25:00Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-21T03:25:01Z" + }, + { + "metric": "hr.srce.CADist-Check", + "status": "OK", + "time_stamp": "2014-02-21T03:30:52Z" + }, + { + "metric": "emi.wn.WN-Bi", + "status": "OK", + "time_stamp": "2014-02-21T03:30:52Z" + }, + { + "metric": "emi.wn.WN-Csh", + "status": "OK", + "time_stamp": "2014-02-21T03:30:52Z" + }, + { + "metric": "emi.wn.WN-SoftVer", + "status": "OK", + "time_stamp": "2014-02-21T03:30:52Z" + }, + { + "metric": "org.sam.WN-Rep", + "status": "OK", + "time_stamp": "2014-02-21T03:30:53Z" + }, + { + "metric": "emi.cream.CREAMCE-JobSubmit", + "status": "OK", + "time_stamp": "2014-02-21T03:35:01Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-21T04:25:06Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-21T04:27:36Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-21T05:25:06Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-21T06:25:05Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-21T07:25:03Z" + }, + { + "metric": "emi.wn.WN-SoftVer", + "status": "OK", + "time_stamp": "2014-02-21T08:00:46Z" + }, + { + "metric": "emi.wn.WN-Csh", + "status": "OK", + "time_stamp": "2014-02-21T08:00:46Z" + }, + { + "metric": "emi.wn.WN-Bi", + "status": "OK", + "time_stamp": "2014-02-21T08:00:46Z" + }, + { + "metric": "hr.srce.CADist-Check", + "status": "OK", + "time_stamp": "2014-02-21T08:00:47Z" + }, + { + "metric": "org.sam.WN-Rep", + "status": "OK", + "time_stamp": "2014-02-21T08:00:48Z" + }, + { + "metric": "emi.cream.CREAMCE-JobSubmit", + "status": "OK", + "time_stamp": "2014-02-21T08:04:58Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-21T08:27:38Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-21T09:24:58Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-21T10:47:39Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-21T11:52:34Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-21T12:27:28Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-21T12:52:38Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-21T13:52:39Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-21T14:17:39Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-21T14:52:38Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-21T15:49:39Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-21T16:27:32Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-21T16:49:41Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-21T17:49:37Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-21T18:49:46Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-21T19:49:44Z" + }, + { + "metric": "emi.cream.CREAMCE-JobSubmit", + "status": "CRITICAL", + "time_stamp": "2014-02-21T20:07:24Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-21T20:07:36Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-21T20:27:35Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-21T20:49:44Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-21T21:49:45Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-21T22:49:47Z" + }, + { + "metric": "hr.srce.CREAMCE-CertLifetime", + "status": "OK", + "time_stamp": "2014-02-21T23:49:47Z" + } + ], + "profile": "ch.cern.sam.ROC_CRITICAL", + "val_0": "2014-02-20", + "hostname": "ce01.up.pt", + "service_flavour": "CREAM-CE", + "val_1": "20140221", + "val_2": "H4sIAGACflMAA62Wb2+CMBDG4wfqhRbQzXfOscUEEiMuS/YOamENAoY/Rhc//KZzWRXUtpjwqsn9cnfPc3fQZV1WrMAWgSSrIaCQFD1/5q1JjxjYQgZBBM+xPTSMn+9DfCTm0H7cP+5KBjSogIarCMKW8EPseTi2hrZ1DDdIR0CYo8pEBgZaAM2CCHgWZcArSZRQSpUXKYoYommpCcP94+MujUuGYfW5LXkCdcZRGvDsCxZMOS36K5NtCCqNpo6rINIRYQqEd89XB2BTtIqb02CJ3JexDggLoHm+Qk/Pk4k6xxYxPq+YJqcvYLzttMg3W41kRIXGM2fkobGj0ZvzeVRHWAIgdvdt0TPMSUWx62wY1agHP3R2LsaD+wh9moy+SKRzi5uLs+PaVAinf0uTNPbceRW3dmZ8IDXXZZtEcqy2rDrhWi6DBu5Kz9psJEtrJqdBu34E75NWc/xlWU13sJTDaPb65qvUeOM6K93mcMF5+3HWcEYa0wunXkPKq/8NKu1aFwbpQ7wMoKR5FRd8sd9WdSI34Xjw16zLIOnqZGCS9pJBsRpYzMGb3hbxH/cNiV8POQ0LAAA=", + "val_3": "H4sIAGACflMAA62UUW+DIBDH0w8ED022d4utcbHthnZ7XCxelUzQAK5psg8/ajeTJkbF7fHu+P25/LmDFZiBklinAtM9eSc0TELiRQuPEkTWi0rlWFYqa3LFM3xLIo+G8aKq9RebQ4dBsplPlyc2H1agm9LM57USf4DPiOlimCd07W2vCiA4ZgpsvU1Z/Kk6xs1R8JH27xTOEr/t0Io7I8Sp0RsTVyfzCmoiVyisFQNMPJ9rg0gB7MMV/XGGgDIRP4HhAiZKXB/oWrVtU6iHoYB624fu0muEvMaM+NMDTW+zD66ESGU2zL2QANuDdWO4zDveZlGXdejiXq0ucV2243ynNywRcwNo5Ydha/gx4xyvpVEctDO3sbtbSNBOZK5NanCcSm4uEyYsptvPZeebjZYOdt3g38GyEfKhdEUCGFnvXiQ+0GjMlz4umcNF2ucjS94HuRLPzYgTjeSsUvCIk1Tl1oOLNiA2KTOVurS/0s+B3vr/KB92IdnT9mtuFb8BKxCAvU0HAAA=" +} + diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/hst_data/hst_output.txt b/flink_jobs/old-models/batch_ar/src/main/resources/hst_data/hst_output.txt new file mode 100644 index 00000000..d7cd6aa4 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/hst_data/hst_output.txt @@ -0,0 +1 @@ +(20140221,[OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL]) \ No newline at end of file diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/log4j.properties b/flink_jobs/old-models/batch_ar/src/main/resources/log4j.properties new file mode 100644 index 00000000..65bd0b8c --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/log4j.properties @@ -0,0 +1,23 @@ +################################################################################ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +################################################################################ + +log4j.rootLogger=INFO, console + +log4j.appender.console=org.apache.log4j.ConsoleAppender +log4j.appender.console.layout=org.apache.log4j.PatternLayout +log4j.appender.console.layout.ConversionPattern=%d{HH:mm:ss,SSS} %-5p %-60c %x - %m%n \ No newline at end of file diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/mongodata/aps.cloud_monitor.json b/flink_jobs/old-models/batch_ar/src/main/resources/mongodata/aps.cloud_monitor.json new file mode 100644 index 00000000..7eb200fc --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/mongodata/aps.cloud_monitor.json @@ -0,0 +1,22 @@ +{ + 'name' : 'fedcloud', + 'namespace' : 'egi', + 'groups' : [ + [ + 'eu.egi.cloud.accounting' + ], + [ + 'eu.egi.cloud.information.bdii' + ], + [ + 'eu.egi.cloud.storage-management.cdmi' + ], + [ + 'eu.egi.cloud.vm-management.occi' + ] + ], + 'poems' : [ + 'ch.cern.sam.CLOUD-MON' + ] +} + diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/mongodata/aps.roc_critical.json b/flink_jobs/old-models/batch_ar/src/main/resources/mongodata/aps.roc_critical.json new file mode 100644 index 00000000..e745581b --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/mongodata/aps.roc_critical.json @@ -0,0 +1,24 @@ +{ + 'name' : 'ap1', + 'namespace' : 'test', + 'groups' : [ + [ + 'CREAM-CE', + 'ARC-CE', + 'GRAM5', + 'unicore6.TargetSystemFactory', + 'QCG.Computing' + ], + [ + 'SRM', + 'SRMv2' + ], + [ + 'Site-BDII' + ] + ], + 'poems' : [ + 'ch.cern.sam.ROC_CRITICAL' + ] +} + diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/mongodata/get_aps.json b/flink_jobs/old-models/batch_ar/src/main/resources/mongodata/get_aps.json new file mode 100644 index 00000000..be83b618 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/mongodata/get_aps.json @@ -0,0 +1,19 @@ +{ + "test-ap1": { + "ARC-CE": 0, + "GRAM5": 0, + "QCG.Computing": 0, + "SRMv2": 1, + "Site-BDII": 2, + "unicore6.TargetSystemFactory": 0, + "CREAM-CE": 0, + "SRM": 1 + }, + "egi-fedcloud": { + "eu.egi.cloud.vm-management.occi": 3, + "eu.egi.cloud.storage-management.cdmi": 2, + "eu.egi.cloud.information.bdii": 1, + "eu.egi.cloud.accounting": 0 + } +} + diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/mongodata/get_recalc_requests.json b/flink_jobs/old-models/batch_ar/src/main/resources/mongodata/get_recalc_requests.json new file mode 100644 index 00000000..3bd522df --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/mongodata/get_recalc_requests.json @@ -0,0 +1,13 @@ +{ + "NGI_GRNET": { + "exclude": [ + "GR-01-AUTH", + "HG-03-AUTH" + ], + "data": { + "key": 0, + "value": 287 + } + } +} + diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/mongodata/get_sf_to_aps.clean.json b/flink_jobs/old-models/batch_ar/src/main/resources/mongodata/get_sf_to_aps.clean.json new file mode 100644 index 00000000..43507dc7 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/mongodata/get_sf_to_aps.clean.json @@ -0,0 +1,19 @@ +{ + "ch.cern.sam.ROC_CRITICAL": { + "ARC-CE": "test-ap1", + "GRAM5": "test-ap1", + "QCG.Computing": "test-ap1", + "SRMv2": "test-ap1", + "Site-BDII": "test-ap1", + "unicore6.TargetSystemFactory": "test-ap1", + "CREAM-CE": "test-ap1", + "SRM": "test-ap1" + }, + "ch.cern.sam.CLOUD-MON": { + "eu.egi.cloud.vm-management.occi": "egi-fedcloud", + "eu.egi.cloud.storage-management.cdmi": "egi-fedcloud", + "eu.egi.cloud.information.bdii": "egi-fedcloud", + "eu.egi.cloud.accounting": "egi-fedcloud" + } +} + diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/mongodata/get_sf_to_aps.json b/flink_jobs/old-models/batch_ar/src/main/resources/mongodata/get_sf_to_aps.json new file mode 100644 index 00000000..d085d07c --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/mongodata/get_sf_to_aps.json @@ -0,0 +1,175 @@ +{ + "ch.cern.sam.ROC_CRITICAL": { + "ARC-CE": { + "mContents": [ + { + "isNull": false, + "mFields": [ + "test-ap1" + ] + } + ], + "mSize": 1, + "mLastContentsSize": 1, + "avgTupleSize": 152, + "spillableRegistered": false + }, + "GRAM5": { + "mContents": [ + { + "isNull": false, + "mFields": [ + "test-ap1" + ] + } + ], + "mSize": 1, + "mLastContentsSize": 1, + "avgTupleSize": 152, + "spillableRegistered": false + }, + "QCG.Computing": { + "mContents": [ + { + "isNull": false, + "mFields": [ + "test-ap1" + ] + } + ], + "mSize": 1, + "mLastContentsSize": 1, + "avgTupleSize": 152, + "spillableRegistered": false + }, + "SRMv2": { + "mContents": [ + { + "isNull": false, + "mFields": [ + "test-ap1" + ] + } + ], + "mSize": 1, + "mLastContentsSize": 1, + "avgTupleSize": 152, + "spillableRegistered": false + }, + "Site-BDII": { + "mContents": [ + { + "isNull": false, + "mFields": [ + "test-ap1" + ] + } + ], + "mSize": 1, + "mLastContentsSize": 1, + "avgTupleSize": 152, + "spillableRegistered": false + }, + "unicore6.TargetSystemFactory": { + "mContents": [ + { + "isNull": false, + "mFields": [ + "test-ap1" + ] + } + ], + "mSize": 1, + "mLastContentsSize": 1, + "avgTupleSize": 152, + "spillableRegistered": false + }, + "CREAM-CE": { + "mContents": [ + { + "isNull": false, + "mFields": [ + "test-ap1" + ] + } + ], + "mSize": 1, + "mLastContentsSize": 1, + "avgTupleSize": 152, + "spillableRegistered": false + }, + "SRM": { + "mContents": [ + { + "isNull": false, + "mFields": [ + "test-ap1" + ] + } + ], + "mSize": 1, + "mLastContentsSize": 1, + "avgTupleSize": 152, + "spillableRegistered": false + } + }, + "ch.cern.sam.CLOUD-MON": { + "eu.egi.cloud.vm-management.occi": { + "mContents": [ + { + "isNull": false, + "mFields": [ + "egi-fedcloud" + ] + } + ], + "mSize": 1, + "mLastContentsSize": 1, + "avgTupleSize": 160, + "spillableRegistered": false + }, + "eu.egi.cloud.storage-management.cdmi": { + "mContents": [ + { + "isNull": false, + "mFields": [ + "egi-fedcloud" + ] + } + ], + "mSize": 1, + "mLastContentsSize": 1, + "avgTupleSize": 160, + "spillableRegistered": false + }, + "eu.egi.cloud.information.bdii": { + "mContents": [ + { + "isNull": false, + "mFields": [ + "egi-fedcloud" + ] + } + ], + "mSize": 1, + "mLastContentsSize": 1, + "avgTupleSize": 160, + "spillableRegistered": false + }, + "eu.egi.cloud.accounting": { + "mContents": [ + { + "isNull": false, + "mFields": [ + "egi-fedcloud" + ] + } + ], + "mSize": 1, + "mLastContentsSize": 1, + "avgTupleSize": 160, + "spillableRegistered": false + } + } +} + diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/mongodata/recalculations.json b/flink_jobs/old-models/batch_ar/src/main/resources/mongodata/recalculations.json new file mode 100644 index 00000000..f7fd1504 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/mongodata/recalculations.json @@ -0,0 +1,14 @@ +{ + "es" : [ + "GR-01-AUTH", + "HG-03-AUTH" + ], + "et" : "2013-12-10T12:03:44Z", + "n" : "NGI_GRNET", + "r" : "testing_compute_engine", + "s" : "pending", + "st" : "2013-12-08T12:03:44Z", + "t" : "2014-03-07 12:03:44" +} + + diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/ops/EGI-algorithm.json b/flink_jobs/old-models/batch_ar/src/main/resources/ops/EGI-algorithm.json new file mode 100644 index 00000000..b88d8c99 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/ops/EGI-algorithm.json @@ -0,0 +1,239 @@ +{ + "id": "1b0318f0-429d-44fc-8bba-07184354c73b", + "name": "egi_ops", + "available_states": [ + "OK", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME" + ], + "defaults": { + "down": "DOWNTIME", + "missing": "MISSING", + "unknown": "UNKNOWN" + }, + "operations": [ + { + "name": "AND", + "truth_table": [ + { + "a": "OK", + "b": "OK", + "x": "OK" + }, + { + "a": "OK", + "b": "WARNING", + "x": "WARNING" + }, + { + "a": "OK", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "OK", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "OK", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "OK", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "WARNING", + "b": "WARNING", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "WARNING", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "WARNING", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "WARNING", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "UNKNOWN", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "UNKNOWN", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "UNKNOWN", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "UNKNOWN", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "MISSING", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "MISSING", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "MISSING", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "CRITICAL", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "CRITICAL", + "b": "DOWNTIME", + "x": "CRITICAL" + }, + { + "a": "DOWNTIME", + "b": "DOWNTIME", + "x": "DOWNTIME" + } + ] + }, + { + "name": "OR", + "truth_table": [ + { + "a": "OK", + "b": "OK", + "x": "OK" + }, + { + "a": "OK", + "b": "WARNING", + "x": "OK" + }, + { + "a": "OK", + "b": "UNKNOWN", + "x": "OK" + }, + { + "a": "OK", + "b": "MISSING", + "x": "OK" + }, + { + "a": "OK", + "b": "CRITICAL", + "x": "OK" + }, + { + "a": "OK", + "b": "DOWNTIME", + "x": "OK" + }, + { + "a": "WARNING", + "b": "WARNING", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "UNKNOWN", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "MISSING", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "CRITICAL", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "DOWNTIME", + "x": "WARNING" + }, + { + "a": "UNKNOWN", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "UNKNOWN", + "b": "MISSING", + "x": "UNKNOWN" + }, + { + "a": "UNKNOWN", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "UNKNOWN", + "b": "DOWNTIME", + "x": "UNKNOWN" + }, + { + "a": "MISSING", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "MISSING", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "MISSING", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "CRITICAL", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "CRITICAL", + "b": "DOWNTIME", + "x": "CRITICAL" + }, + { + "a": "DOWNTIME", + "b": "DOWNTIME", + "x": "DOWNTIME" + } + ] + } + ] +} diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/ops/EGI-ar.json b/flink_jobs/old-models/batch_ar/src/main/resources/ops/EGI-ar.json new file mode 100644 index 00000000..5d59b900 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/ops/EGI-ar.json @@ -0,0 +1,11 @@ +{ + "counters":{ + "up":["OK","WARNING"], + "unknown":["UNKNOWN","MISSING"], + "downtime":["DOWNTIME"] + }, + "computations":{ + "availability":"(up/t) - (1.0 - (unknown / t))", + "reliability":"(up/t) - (1.0 - (unknown / t) - (downtime/t))" + } +} \ No newline at end of file diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/ops/EGI-rules.json b/flink_jobs/old-models/batch_ar/src/main/resources/ops/EGI-rules.json new file mode 100644 index 00000000..2a52c99a --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/ops/EGI-rules.json @@ -0,0 +1,33 @@ +{ + "rules": [ + { + "metric": "org.bdii.Freshness", + "thresholds": "freshness=10s;30;50:60;0;100 entries=5;0:10;20:30;50;30" + }, + { + "metric": "org.bdii.Entries", + "thresholds": "time=-35s;~:10;15:;-100;300 entries=55;20;50:60;50;30" + }, + { + "metric": "org.bdii.Freshness", + "thresholds": "freshness=10s; entries=29;;30:50", + "host" : "bdii.host3.example.foo" + }, + { + "metric": "org.bdii.Freshness", + "thresholds": "freshness=10s;30;50:60;0;100 entries=29;0:10;20:30;0;30", + "host" : "bdii.host1.example.foo" + }, + { + "metric": "org.bdii.Freshness", + "thresholds": "freshness=10s;30;50:60;0;100 entries=5;0:10;20:30;50;30", + "host" : "bdii.host1.example.foo", + "endpoint_group": "SITE-101" + }, + { + "metric": "org.bdii.Freshness", + "thresholds": "freshness=10s;30;50:60;0;100 entries=5;0:10;20:30;50;30", + "endpoint_group": "SITE-101" + } + ] +} diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/ops/ap1.json b/flink_jobs/old-models/batch_ar/src/main/resources/ops/ap1.json new file mode 100644 index 00000000..d754320c --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/ops/ap1.json @@ -0,0 +1,64 @@ +{ + "id": "297c368a-524f-4144-9eb6-924fae5f08fa", + "name": "ap1", + "namespace": "test", + "endpoint_group": "sites", + "metric_operation": "AND", + "profile_operation": "AND", + "metric_profile": { + "name": "CH.CERN.SAM.ARGO_MON_CRITICAL", + "id": "c81fdb7b-d8f8-4ff9-96c5-6a0c336e2b25" + }, + "groups": [ + { + "name": "compute", + "operation": "OR", + "services": [ + { + "name": "CREAM-CE", + "operation": "OR" + }, + { + "name": "ARC-CE", + "operation": "OR" + }, + { + "name": "GRAM5", + "operation": "OR" + }, + { + "name": "unicore6.TargetSystemFactory", + "operation": "OR" + }, + { + "name": "QCG.Computing", + "operation": "OR" + } + ] + }, + { + "name": "storage", + "operation": "OR", + "services": [ + { + "name": "SRMv2", + "operation": "OR" + }, + { + "name": "SRM", + "operation": "OR" + } + ] + }, + { + "name": "information", + "operation": "OR", + "services": [ + { + "name": "Site-BDII", + "operation": "OR" + } + ] + } + ] + } diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/ops/ap2.json b/flink_jobs/old-models/batch_ar/src/main/resources/ops/ap2.json new file mode 100644 index 00000000..fda7868f --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/ops/ap2.json @@ -0,0 +1,54 @@ +{ + "id": "337c368a-524f-4144-9eb6-924fae5f08fa", + "name": "fedcloud", + "namespace": "egi", + "endpoint_group": "sites", + "metric_operation": "AND", + "profile_operation": "AND", + "metric_profile": { + "name": "ch.cern.sam.CLOUD-MON", + "id": "c88fdb7b-d8f8-4ff9-96c5-6a0c336e2b25" + }, + "groups": [ + { + "name": "accounting", + "operation": "OR", + "services": [ + { + "name": "eu.egi.cloud.accounting", + "operation": "OR" + } + ] + }, + { + "name": "information", + "operation": "OR", + "services": [ + { + "name": "eu.egi.cloud.information.bdii", + "operation": "OR" + } + ] + }, + { + "name": "storage-management", + "operation": "OR", + "services": [ + { + "name": "eu.egi.cloud.storage-management.cdmi", + "operation": "OR" + } + ] + }, + { + "name": "vm-management", + "operation": "OR", + "services": [ + { + "name": "eu.egi.cloud.vm-management.occi", + "operation": "OR" + } + ] + } + ] +} diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/ops/config.json b/flink_jobs/old-models/batch_ar/src/main/resources/ops/config.json new file mode 100644 index 00000000..c2c550e5 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/ops/config.json @@ -0,0 +1,83 @@ +{ + "id": "c800846f-8478-4af8-85d1-a3f12fe4c18f", + "info": { + "name": "Critical", + "description": "EGI report for Roc critical", + "created": "2015-10-19 10:35:49", + "updated": "2015-10-19 10:35:49" + }, + "tenant": "EGI", + "topology_schema": { + "group": { + "type": "NGI", + "group": { + "type": "SITES" + } + } + }, + "weight": "hepspec", + "profiles": [ + { + "id": "433beb2c-45cc-49d4-a8e0-b132bb30327e", + "name": "ch.cern.sam.ROC_CRITICAL", + "type": "metric" + }, + { + "id": "17d1462f-8f91-4728-a253-1a6e8e2e848d", + "name": "ops1", + "type": "operations" + }, + { + "id": "1ef8c0c9-f9ef-4ca1-9ee7-bb8b36332036", + "name": "critical", + "type": "aggregation" + } + ], + "filter_tags": [ + { + "name": "production", + "value": "1", + "context": "endpoint_groups" + }, + { + "name": "monitored", + "value": "1", + "context": "endpoint_groups" + }, + { + "name": "scope", + "value": "EGI", + "context": "endpoint_groups" + }, + { + "name": "scope", + "value": "EGI", + "context": "group_of_groups" + }, + { + "name": "infrastructure", + "value": "Production", + "context": "group_of_groups" + }, + { + "name": "certification", + "value": "Certified", + "context": "group_of_groups" + }, + { + "name": "vo", + "value": "ops", + "context": "metric_data" + }, + { + "name": "vo_fqan", + "value": "ops", + "context": "metric_data" + }, + { + "name": "roc", + "value": "any", + "context": "metric_data" + } + ] + } diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/ops/recomp.json b/flink_jobs/old-models/batch_ar/src/main/resources/ops/recomp.json new file mode 100644 index 00000000..d7371592 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/ops/recomp.json @@ -0,0 +1,43 @@ +[{ + "reason": "testing_compute_engine", + "start_time": "2013-12-08T12:03:44Z", + "end_time": "2013-12-10T12:03:44Z", + "exclude": [ + "GR-01-AUTH", + "HG-03-AUTH" + ], + "status": "running", + "timestamp": "2015-02-01 14:58:40" +}, +{ + "reason": "testing_compute_engine", + "start_time": "2013-12-08T12:03:44Z", + "end_time": "2013-12-08T13:03:44Z", + "exclude": [ + "SITE-A", + "SITE-B" + ], + "status": "running", + "timestamp": "2015-02-01 14:58:40", + "exclude_monitoring_source": [ + { "host": "monA", + "start_time": "2013-12-08T12:03:44Z", + "end_time": "2013-12-08T15:03:44Z" + }, + { "host": "monA", + "start_time": "2013-12-08T18:03:44Z", + "end_time": "2013-12-08T19:03:44Z" + } + ] +}, +{ + "reason": "testing_compute_engine", + "start_time": "2013-12-08T16:03:44Z", + "end_time": "2013-12-08T18:03:44Z", + "exclude": [ + "SITE-A", + "SITE-c" + ], + "status": "running", + "timestamp": "2015-02-01 14:58:40" +}] diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/sfa_data/sfa_input.json b/flink_jobs/old-models/batch_ar/src/main/resources/sfa_data/sfa_input.json new file mode 100644 index 00000000..ed4d374c --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/sfa_data/sfa_input.json @@ -0,0 +1,105 @@ +{ + + "t": [ + { + "date": 20140221, + "profile": "ch.cern.sam.ROC_CRITICAL", + "timeline": "[OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK]", + "hostname": "grid01.elfak.ni.ac.rs", + "service_flavour": "CREAM-CE", + "vos": [ + { + "vo": "ops" + } + ], + "topology::production": "Y", + "topology::monitored": "Y", + "topology::scope": "EGI", + "topology::site": "AEGIS03-ELEF-LEDA", + "topology::ngi": "NGI_AEGIS", + "topology::infrastructure": "Production", + "topology::certification_status": "Certified", + "topology::site_scope": "EGI", + "topology::availability_profiles": [ + { + "availability_profile": "test-ap1" + } + ] + }, + { + "date": 20140221, + "profile": "ch.cern.sam.ROC_CRITICAL", + "timeline": "[OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK]", + "hostname": "grid02.elfak.ni.ac.rs", + "service_flavour": "SRMv2", + "vos": [ + { + "vo": "ops" + } + ], + "topology::production": "Y", + "topology::monitored": "Y", + "topology::scope": "EGI", + "topology::site": "AEGIS03-ELEF-LEDA", + "topology::ngi": "NGI_AEGIS", + "topology::infrastructure": "Production", + "topology::c ertification_status": "Certified", + "topology::site_scope": "EGI", + "topology::availability_profiles": [ + { + "availability_profile": "test-ap1" + } + ] + }, + { + "date": 20140221, + "profile": "ch.cern.sam.ROC_CRITICAL ", + "timeline": "[OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL, CRITICAL]", + "hostname": "grid01.elfak.ni.ac.rs", + "service_flavour": "Site-BDII", + "vos": [ + { + "vo": "ops" + } + ], + "topology::production": "Y", + "topology::monitored": "Y", + "topology::scope": "EGI", + "topology::site": "AEGIS03-ELEF-LEDA", + "topology::ngi": "NGI_AEGIS", + "topology::infrastructure": "Production", + "topology::certification_status": "Certified", + "topology::site _scope": "EGI", + "topology::availability_profiles": [ + { + "availability_profile": "test-ap1" + } + ] + }, + { + "date": 20140221, + "profile": "ch.cern.sam.ROC_CRITICAL ", + "timeline": "[OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, OK, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING, WARNING]", + "hostname": "grid02.elfak.ni.ac.rs", + "service_flavour": "Site-BDII", + "vos": [ + { + "vo": "ops" + } + ], + "topology::production": "Y", + "topology::monitored": "Y", + "topology::scope": "EGI", + "topology::site": "AEGIS03-ELEF-LEDA", + "topology::ngi": "NGI_AEGIS", + "topology::infrastructure": "Production", + "topology::certification_status": "Certified", + "topology::site _scope": "EGI", + "topology::availability_profiles": [ + { + "availability_profile": "test-ap1" + } + ] + } + ] +} \ No newline at end of file diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/sfa_data/sfa_output.txt b/flink_jobs/old-models/batch_ar/src/main/resources/sfa_data/sfa_output.txt new file mode 100644 index 00000000..ed00d0f5 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/sfa_data/sfa_output.txt @@ -0,0 +1 @@ +{(100.0,100.0,1.0,0.0,0.0,CREAM-CE),(97.917,97.917,0.97917,0.0,0.0,SRMv2),(100.0,100.0,1.0,0.0,0.0,Site-BDII)} \ No newline at end of file diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/status/endpoint.json b/flink_jobs/old-models/batch_ar/src/main/resources/status/endpoint.json new file mode 100644 index 00000000..b7ea4c9a --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/status/endpoint.json @@ -0,0 +1 @@ +{"report":"Critical","date_integer":20150602,"endpoint_group":"GRIF","service":"SRMv2","bag_0":[{"host":"ipnsedpm.in2p3.fr","timeline":[{"timestamp":"2015-06-02T00:00:00Z","status":"UNKNOWN"},{"timestamp":"2015-06-02T15:19:01Z","status":"WARNING"},{"timestamp":"2015-06-02T15:19:12Z","status":"OK"}]},{"host":"lpnse1.in2p3.fr","timeline":[{"timestamp":"2015-06-02T00:00:00Z","status":"UNKNOWN"},{"timestamp":"2015-06-02T15:38:58Z","status":"WARNING"},{"timestamp":"2015-06-02T15:39:09Z","status":"OK"}]},{"host":"node12.datagrid.cea.fr","timeline":[{"timestamp":"2015-06-02T00:00:00Z","status":"UNKNOWN"},{"timestamp":"2015-06-02T15:18:31Z","status":"CRITICAL"},{"timestamp":"2015-06-02T17:18:35Z","status":"UNKNOWN"},{"timestamp":"2015-06-02T20:06:19Z","status":"OK"}]},{"host":"polgrid4.in2p3.fr","timeline":[{"timestamp":"2015-06-02T00:00:00Z","status":"UNKNOWN"},{"timestamp":"2015-06-02T15:48:58Z","status":"WARNING"},{"timestamp":"2015-06-02T15:49:18Z","status":"OK"}]},{"host":"grid05.lal.in2p3.fr","timeline":[{"timestamp":"2015-06-02T00:00:00Z","status":"UNKNOWN"},{"timestamp":"2015-06-02T15:42:58Z","status":"WARNING"},{"timestamp":"2015-06-02T15:43:09Z","status":"OK"}]}]} diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/status/metric.json b/flink_jobs/old-models/batch_ar/src/main/resources/status/metric.json new file mode 100644 index 00000000..33ae3102 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/status/metric.json @@ -0,0 +1 @@ +{"report":"Critical","endpoint_group":"AEGIS01-IPB-SCL","service":"CREAM-CE","host":"ce64.ipb.ac.rs","bag_0":[{"metric":"emi.cream.CREAMCE-DirectJobSubmit","timestamp":"2015-06-02T00:31:42Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-DirectJobSubmit","timestamp":"2015-06-02T01:31:41Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-DirectJobSubmit","timestamp":"2015-06-02T02:31:46Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-DirectJobSubmit","timestamp":"2015-06-02T03:31:41Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-DirectJobSubmit","timestamp":"2015-06-02T04:31:39Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-DirectJobSubmit","timestamp":"2015-06-02T05:31:37Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-DirectJobSubmit","timestamp":"2015-06-02T06:31:38Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-DirectJobSubmit","timestamp":"2015-06-02T07:31:44Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-DirectJobSubmit","timestamp":"2015-06-02T08:31:45Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-DirectJobSubmit","timestamp":"2015-06-02T09:31:45Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-DirectJobSubmit","timestamp":"2015-06-02T10:31:44Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-DirectJobSubmit","timestamp":"2015-06-02T11:31:40Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-DirectJobSubmit","timestamp":"2015-06-02T12:31:41Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-DirectJobSubmit","timestamp":"2015-06-02T13:31:41Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-DirectJobSubmit","timestamp":"2015-06-02T14:33:38Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-DirectJobSubmit","timestamp":"2015-06-02T15:33:37Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-DirectJobSubmit","timestamp":"2015-06-02T16:33:43Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-DirectJobSubmit","timestamp":"2015-06-02T17:33:43Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-DirectJobSubmit","timestamp":"2015-06-02T18:33:37Z","status":"CRITICAL","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-DirectJobSubmit","timestamp":"2015-06-02T19:33:37Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-DirectJobSubmit","timestamp":"2015-06-02T20:33:38Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-DirectJobSubmit","timestamp":"2015-06-02T21:33:43Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-DirectJobSubmit","timestamp":"2015-06-02T22:33:42Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-DirectJobSubmit","timestamp":"2015-06-02T23:33:42Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-06-02T00:24:42Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-06-02T01:19:40Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-06-02T02:19:46Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-06-02T03:24:39Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-06-02T04:19:40Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-06-02T05:19:37Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-06-02T06:19:48Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-06-02T07:19:43Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-06-02T08:19:45Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-06-02T09:19:45Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-06-02T10:19:44Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-06-02T11:19:53Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-06-02T12:19:53Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-06-02T13:24:41Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-06-02T14:19:48Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-06-02T15:19:47Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-06-02T16:19:44Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-06-02T17:19:44Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-06-02T18:19:51Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-06-02T19:19:47Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-06-02T20:24:48Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-06-02T21:19:53Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-06-02T22:19:53Z","status":"OK","previous_state":"OK"},{"metric":"emi.cream.CREAMCE-JobSubmit","timestamp":"2015-06-02T23:24:52Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-06-02T00:20:22Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-06-02T01:20:30Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-06-02T02:20:26Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-06-02T03:20:29Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-06-02T04:20:29Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-06-02T05:20:27Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-06-02T06:20:29Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-06-02T07:20:24Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-06-02T08:20:25Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-06-02T09:20:25Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-06-02T10:20:25Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-06-02T11:20:31Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-06-02T12:20:31Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-06-02T13:20:31Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-06-02T13:20:31Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-06-02T14:20:28Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-06-02T15:20:28Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-06-02T17:20:23Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-06-02T18:20:31Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-06-02T20:20:28Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-06-02T21:20:23Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Bi","timestamp":"2015-06-02T22:20:23Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-06-02T00:20:22Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-06-02T01:20:31Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-06-02T02:20:26Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-06-02T03:20:29Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-06-02T04:20:30Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-06-02T05:20:28Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-06-02T06:20:29Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-06-02T07:20:24Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-06-02T08:20:25Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-06-02T09:20:25Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-06-02T10:20:25Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-06-02T11:20:31Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-06-02T12:20:32Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-06-02T13:20:31Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-06-02T13:20:31Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-06-02T14:20:28Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-06-02T15:20:28Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-06-02T17:20:24Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-06-02T18:20:32Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-06-02T20:20:29Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-06-02T21:20:23Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-Csh","timestamp":"2015-06-02T22:20:24Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-06-02T00:20:22Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-06-02T01:20:31Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-06-02T02:20:26Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-06-02T03:20:29Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-06-02T04:20:30Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-06-02T05:20:28Z","status":"CRITICAL","previous_state":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-06-02T06:20:29Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-06-02T07:20:24Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-06-02T08:20:26Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-06-02T09:20:25Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-06-02T10:20:25Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-06-02T11:20:32Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-06-02T12:20:32Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-06-02T13:20:31Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-06-02T13:20:31Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-06-02T14:20:29Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-06-02T15:20:28Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-06-02T17:20:24Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-06-02T18:20:32Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-06-02T20:20:29Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-06-02T21:20:23Z","status":"OK","previous_state":"OK"},{"metric":"emi.wn.WN-SoftVer","timestamp":"2015-06-02T22:20:24Z","status":"OK","previous_state":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-06-02T00:20:22Z","status":"OK","previous_state":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-06-02T01:20:31Z","status":"OK","previous_state":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-06-02T02:20:27Z","status":"OK","previous_state":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-06-02T03:20:30Z","status":"OK","previous_state":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-06-02T04:20:30Z","status":"OK","previous_state":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-06-02T05:20:28Z","status":"OK","previous_state":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-06-02T06:20:29Z","status":"OK","previous_state":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-06-02T07:20:24Z","status":"OK","previous_state":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-06-02T08:20:26Z","status":"OK","previous_state":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-06-02T09:20:25Z","status":"OK","previous_state":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-06-02T10:20:25Z","status":"OK","previous_state":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-06-02T11:20:32Z","status":"OK","previous_state":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-06-02T12:20:32Z","status":"OK","previous_state":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-06-02T13:20:32Z","status":"OK","previous_state":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-06-02T13:20:32Z","status":"OK","previous_state":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-06-02T14:20:29Z","status":"OK","previous_state":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-06-02T15:20:28Z","status":"OK","previous_state":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-06-02T17:20:24Z","status":"OK","previous_state":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-06-02T18:20:32Z","status":"OK","previous_state":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-06-02T20:20:29Z","status":"OK","previous_state":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-06-02T21:20:23Z","status":"OK","previous_state":"OK"},{"metric":"hr.srce.CADist-Check","timestamp":"2015-06-02T22:20:24Z","status":"OK","previous_state":"OK"},{"metric":"hr.srce.CREAMCE-CertLifetime","timestamp":"2015-06-02T00:29:02Z","status":"OK","previous_state":"OK"},{"metric":"hr.srce.CREAMCE-CertLifetime","timestamp":"2015-06-02T04:29:11Z","status":"OK","previous_state":"OK"},{"metric":"hr.srce.CREAMCE-CertLifetime","timestamp":"2015-06-02T08:29:06Z","status":"OK","previous_state":"OK"},{"metric":"hr.srce.CREAMCE-CertLifetime","timestamp":"2015-06-02T12:29:11Z","status":"OK","previous_state":"OK"},{"metric":"hr.srce.CREAMCE-CertLifetime","timestamp":"2015-06-02T16:29:03Z","status":"OK","previous_state":"OK"},{"metric":"hr.srce.CREAMCE-CertLifetime","timestamp":"2015-06-02T20:29:08Z","status":"OK","previous_state":"OK"}]} diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/status/service.json b/flink_jobs/old-models/batch_ar/src/main/resources/status/service.json new file mode 100644 index 00000000..b8d7cc02 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/status/service.json @@ -0,0 +1,2 @@ +{"report":"Critical","date_integer":20150602,"endpoint_group":"AEGIS01-IPB-SCL","bag_0":[{"service":"SRMv2","timeline":[{"timestamp":"2015-06-02T00:00:00Z","status":"OK"}]},{"service":"CREAM-CE","timeline":[{"timestamp":"2015-06-02T00:00:00Z","status":"OK"}]},{"service":"Site-BDII","timeline":[{"timestamp":"2015-06-02T00:00:00Z","status":"OK"}]}]} + diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/sync/downtimes.in.gz.base64 b/flink_jobs/old-models/batch_ar/src/main/resources/sync/downtimes.in.gz.base64 new file mode 100644 index 00000000..beb27df0 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/sync/downtimes.in.gz.base64 @@ -0,0 +1 @@ +H4sIAHfsQ1MAA62Z227iMBCGxQPZcsKhpXe0iyokaCvYg7R3XscEq7ET2QndVvvwm6QtDeAE7OE2MF/G/4ztmUmabXgiKH4TG5wXKNLcRFzhiPdWy8U27IUkGCASouD6OxndDMgNIb+bD4OwfFI9/Gc4NizNOC6UUBSL3EIgH39uPgz7N8NxTYi1MAUcozmjRlFkOAn8KSajTNQUqD95iKJMotKZyl6tlZc7eaolWnPEpMFMY6bo2h/GMRMGx+kWZ4mztYwND3C2eTXiuVIFSSrUmz1luklZmpQxjwblQsKsj9famZCweHiNjVAZlqbAujiTQK4/05ZpTiWWAhuqCkwZ1qY3eZrO9/49tiCCQRdiJXKObr/NZkDO3XI6WaC7KRDDC8xjgRdPLv68SNMPrjDjWmG26f1arE4bh3vGI4jxAGBMQG8mkDcHEOMhxDiErBn0ZkiSkD7EbZDakAwLIG4TSIYFkDiTMeTNX3Ge3/qfBe62IcCW+Ns2tqO77QDgM8Q2APjcB9iOAbaAvCKQvILEF5JXgHwmgPgGnvFlrK4pWNkplFtiV6ydX5tYSVcXIzV8iufTv5zBPXLmRFXjJFNla6Wsy+vuptKuvsyBw47al6PitrvC3qbSzO5n6GHyhfj5eHB1nGg3ylr4TyTEUSNlrZG7WVwKxKrG7oBllbgbdSyNB8TaHoKcaCnV/WTxglUhL4+oA5Rj0OtmHpx97yOBC+ZPi1sekW+fVsDd8Qrb59yD7c09HAXfDU8+NQepXdNqfxqjDw+t7UsDgI5mQy5b/mMyBE7u3Yjpgvnd7pzP4dY1A3Pg0DyhBtX37H4qOAr2zqmlao7SPGRq9chDpZpVZubRienBapsVXmJplpLmHBLVcWGanPKuwZPl/Q+X2yDjem3K4kg3QYrnuPph9fgwWeJbqqIXEeUbH2zYzp3TnCv26qlgeInksLE8o2EhuZ46JybJDqh6JzYnyT4H1uEs2q14atj6lYANgOtplPGkaR7Pq+U7QmRci2idpnuoKWPWMpv3UKdz0O/ASRItM/E+lu5u+U5y4BDrNweQE8472aqHV8lntpqEIxwntLp78mph1cC/eHY+FFpBPmVDK8xZqnaUl14bnlWIYZnUOBFbF62q9n/UhATkEpQDV85W+6Q7/qQDl86M2UmHnDnWb3un7Xdf9v4DZN1R4tseAAA= \ No newline at end of file diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/sync/downtimes.out.json b/flink_jobs/old-models/batch_ar/src/main/resources/sync/downtimes.out.json new file mode 100644 index 00000000..f0ee2cb5 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/sync/downtimes.out.json @@ -0,0 +1,462 @@ +{ + "desdemona.zih.tu-dresden.de CREAM-CE": { + "key": 80, + "value": 144 + }, + "ophelia.zih.tu-dresden.de SRM": { + "key": 80, + "value": 144 + }, + "wms312.cern.ch WMS": { + "key": 108, + "value": 144 + }, + "cream.mi.sanu.ac.rs APEL": { + "key": 108, + "value": 168 + }, + "wms305.cern.ch LB": { + "key": 108, + "value": 144 + }, + "mgse1.physik.uni-mainz.de SRM": { + "key": 0, + "value": 287 + }, + "wms316.cern.ch LB": { + "key": 108, + "value": 144 + }, + "llrmpicream.in2p3.fr CREAM-CE": { + "key": 0, + "value": 287 + }, + "atlas-cream01.na.infn.it CREAM-CE": { + "key": 0, + "value": 287 + }, + "llrcream.in2p3.fr gLExec": { + "key": 0, + "value": 287 + }, + "wms317.cern.ch WMS": { + "key": 108, + "value": 144 + }, + "wms313.cern.ch LB": { + "key": 108, + "value": 144 + }, + "cream.mi.sanu.ac.rs CREAM-CE": { + "key": 108, + "value": 168 + }, + "wms315.cern.ch WMS": { + "key": 108, + "value": 144 + }, + "perfsonar2.na.infn.it net.perfSONAR.Latency": { + "key": 0, + "value": 287 + }, + "grisuce.scope.unina.it CREAM-CE": { + "key": 0, + "value": 287 + }, + "apel.cis.gov.pl gLite-APEL": { + "key": 0, + "value": 287 + }, + "wms310.cern.ch WMS": { + "key": 108, + "value": 144 + }, + "storm-fe-cms.cr.cnaf.infn.it SRM": { + "key": 0, + "value": 287 + }, + "wms303.cern.ch WMS": { + "key": 108, + "value": 144 + }, + "grisuce.scope.unina.it APEL": { + "key": 0, + "value": 287 + }, + "cream.mi.sanu.ac.rs eu.egi.MPI": { + "key": 108, + "value": 168 + }, + "grisusitebdii.scope.unina.it Site-BDII": { + "key": 0, + "value": 287 + }, + "recasna-sitebdii.unina.it Site-BDII": { + "key": 0, + "value": 287 + }, + "wms302.cern.ch WMS": { + "key": 108, + "value": 144 + }, + "vomsIGI-NA.unina.it VOMS": { + "key": 0, + "value": 287 + }, + "wms316.cern.ch WMS": { + "key": 108, + "value": 144 + }, + "hepgrid10.ph.liv.ac.uk gLExec": { + "key": 0, + "value": 109 + }, + "cccreamceli05.in2p3.fr gLExec": { + "key": 108, + "value": 144 + }, + "atlas-cream02.na.infn.it APEL": { + "key": 0, + "value": 287 + }, + "hepgrid10.ph.liv.ac.uk CREAM-CE": { + "key": 0, + "value": 109 + }, + "wms303.cern.ch LB": { + "key": 108, + "value": 144 + }, + "wms307.cern.ch WMS": { + "key": 108, + "value": 144 + }, + "mgbdii.physik.uni-mainz.de Site-BDII": { + "key": 0, + "value": 287 + }, + "wms317.cern.ch LB": { + "key": 108, + "value": 144 + }, + "llrcream.in2p3.fr CREAM-CE": { + "key": 0, + "value": 287 + }, + "wms309.cern.ch LB": { + "key": 108, + "value": 144 + }, + "spacina-se.scope.unina.it SRMv2": { + "key": 0, + "value": 287 + }, + "svr026.gla.scotgrid.ac.uk CREAM-CE": { + "key": 0, + "value": 287 + }, + "wms302.cern.ch LB": { + "key": 108, + "value": 144 + }, + "spacina-se.scope.unina.it SRM": { + "key": 0, + "value": 287 + }, + "hepgrid5.ph.liv.ac.uk APEL": { + "key": 0, + "value": 109 + }, + "wms304.cern.ch WMS": { + "key": 108, + "value": 144 + }, + "wms312.cern.ch LB": { + "key": 108, + "value": 144 + }, + "spacina-sitebdii.scope.unina.it Site-BDII": { + "key": 0, + "value": 287 + }, + "grisuce.scope.unina.it eu.egi.MPI": { + "key": 0, + "value": 287 + }, + "recasna-se01.unina.it SRM": { + "key": 0, + "value": 287 + }, + "storm-fe-cms.cr.cnaf.infn.it SRMv2": { + "key": 0, + "value": 287 + }, + "recasna-ce01.unina.it APEL": { + "key": 0, + "value": 287 + }, + "wms300.cern.ch LB": { + "key": 108, + "value": 144 + }, + "emi-ce01.scope.unina.it eu.egi.MPI": { + "key": 0, + "value": 287 + }, + "wms300.cern.ch WMS": { + "key": 108, + "value": 144 + }, + "wms301.cern.ch LB": { + "key": 108, + "value": 144 + }, + "se.cis.gov.pl SRMv2": { + "key": 0, + "value": 287 + }, + "wms304.cern.ch LB": { + "key": 108, + "value": 144 + }, + "grisuse.scope.unina.it SRMv2": { + "key": 0, + "value": 287 + }, + "t2-dpm-01.na.infn.it SRM": { + "key": 0, + "value": 287 + }, + "ce.cis.gov.pl CREAM-CE": { + "key": 0, + "value": 287 + }, + "wms311.cern.ch LB": { + "key": 108, + "value": 144 + }, + "polgrid4.in2p3.fr SRM": { + "key": 0, + "value": 287 + }, + "atlas-cream02.na.infn.it gLExec": { + "key": 0, + "value": 287 + }, + "svr026.gla.scotgrid.ac.uk eu.egi.MPI": { + "key": 0, + "value": 287 + }, + "mgse1.physik.uni-mainz.de SRMv2": { + "key": 0, + "value": 287 + }, + "atlas-cream01.na.infn.it APEL": { + "key": 0, + "value": 287 + }, + "atlas-cream01.na.infn.it gLExec": { + "key": 0, + "value": 287 + }, + "atlas-argus.na.infn.it emi.ARGUS": { + "key": 0, + "value": 287 + }, + "polgrid4.in2p3.fr SRMv2": { + "key": 0, + "value": 287 + }, + "wms311.cern.ch WMS": { + "key": 108, + "value": 144 + }, + "wms305.cern.ch WMS": { + "key": 108, + "value": 144 + }, + "ophelia.zih.tu-dresden.de SRMv2": { + "key": 80, + "value": 144 + }, + "voms02.scope.unina.it VOMS": { + "key": 0, + "value": 287 + }, + "wms310.cern.ch LB": { + "key": 108, + "value": 144 + }, + "cccreamceli07.in2p3.fr CREAM-CE": { + "key": 108, + "value": 144 + }, + "bdii.cis.gov.pl Site-BDII": { + "key": 0, + "value": 287 + }, + "ce.cis.gov.pl APEL": { + "key": 0, + "value": 287 + }, + "mgce1.physik.uni-mainz.de CREAM-CE": { + "key": 0, + "value": 287 + }, + "ce.scope.unina.it CREAM-CE": { + "key": 0, + "value": 287 + }, + "perfsonar.na.infn.it net.perfSONAR.Bandwidth": { + "key": 0, + "value": 287 + }, + "hepgrid10.ph.liv.ac.uk APEL": { + "key": 0, + "value": 109 + }, + "atlas-bdii.na.infn.it Site-BDII": { + "key": 0, + "value": 287 + }, + "lcg58.sinp.msu.ru SRMv2": { + "key": 0, + "value": 96 + }, + "cream.mi.sanu.ac.rs Site-BDII": { + "key": 108, + "value": 168 + }, + "wms309.cern.ch WMS": { + "key": 108, + "value": 144 + }, + "wms301.cern.ch WMS": { + "key": 108, + "value": 144 + }, + "grisuse.scope.unina.it SRM": { + "key": 0, + "value": 287 + }, + "svr026.gla.scotgrid.ac.uk gLExec": { + "key": 0, + "value": 287 + }, + "emi-ce01.scope.unina.it CREAM-CE": { + "key": 0, + "value": 287 + }, + "wms315.cern.ch LB": { + "key": 108, + "value": 144 + }, + "recasna-se01.unina.it SRMv2": { + "key": 0, + "value": 287 + }, + "wms307.cern.ch LB": { + "key": 108, + "value": 144 + }, + "se.scope.unina.it SRM": { + "key": 0, + "value": 287 + }, + "hepgrid5.ph.liv.ac.uk CREAM-CE": { + "key": 0, + "value": 109 + }, + "se.cis.gov.pl SRM": { + "key": 0, + "value": 287 + }, + "ce.scope.unina.it eu.egi.MPI": { + "key": 0, + "value": 287 + }, + "ce.scope.unina.it APEL": { + "key": 0, + "value": 287 + }, + "atlas-cream02.na.infn.it CREAM-CE": { + "key": 0, + "value": 287 + }, + "lcg58.sinp.msu.ru SRM": { + "key": 0, + "value": 96 + }, + "hepgrid5.ph.liv.ac.uk gLExec": { + "key": 0, + "value": 109 + }, + "wms306.cern.ch LB": { + "key": 108, + "value": 144 + }, + "cccreamceli05.in2p3.fr CREAM-CE": { + "key": 108, + "value": 144 + }, + "se.scope.unina.it SRMv2": { + "key": 0, + "value": 287 + }, + "svr026.gla.scotgrid.ac.uk APEL": { + "key": 0, + "value": 287 + }, + "spacina-ce.scope.unina.it APEL": { + "key": 0, + "value": 287 + }, + "atlasce02.scope.unina.it CREAM-CE": { + "key": 0, + "value": 287 + }, + "llrmpicream.in2p3.fr eu.egi.MPI": { + "key": 0, + "value": 287 + }, + "recasna-ce01.unina.it CREAM-CE": { + "key": 0, + "value": 287 + }, + "wms314.cern.ch LB": { + "key": 108, + "value": 144 + }, + "recasce01.na.infn.it CREAM-CE": { + "key": 0, + "value": 287 + }, + "t2-dpm-01.na.infn.it SRMv2": { + "key": 0, + "value": 287 + }, + "sitebdii.scope.unina.it Site-BDII": { + "key": 0, + "value": 287 + }, + "wms314.cern.ch WMS": { + "key": 108, + "value": 144 + }, + "wms313.cern.ch WMS": { + "key": 108, + "value": 144 + }, + "cccreamceli07.in2p3.fr gLExec": { + "key": 108, + "value": 144 + }, + "spacina-ce.scope.unina.it CREAM-CE": { + "key": 0, + "value": 287 + }, + "wms306.cern.ch WMS": { + "key": 108, + "value": 144 + } +} \ No newline at end of file diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/sync/poems.in.gz.base64 b/flink_jobs/old-models/batch_ar/src/main/resources/sync/poems.in.gz.base64 new file mode 100644 index 00000000..3d943883 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/sync/poems.in.gz.base64 @@ -0,0 +1 @@ +H4sIAHfsQ1MAA62UUW+DIBDH0w8ED022d4utcbHthnZ7XCxelUzQAK5psg8/ajeTJkbF7fHu+P25/LmDFZiBklinAtM9eSc0TELiRQuPEkTWi0rlWFYqa3LFM3xLIo+G8aKq9RebQ4dBsplPlyc2H1agm9LM57USf4DPiOlimCd07W2vCiA4ZgpsvU1Z/Kk6xs1R8JH27xTOEr/t0Io7I8Sp0RsTVyfzCmoiVyisFQNMPJ9rg0gB7MMV/XGGgDIRP4HhAiZKXB/oWrVtU6iHoYB624fu0muEvMaM+NMDTW+zD66ESGU2zL2QANuDdWO4zDveZlGXdejiXq0ucV2243ynNywRcwNo5Ydha/gx4xyvpVEctDO3sbtbSNBOZK5NanCcSm4uEyYsptvPZeebjZYOdt3g38GyEfKhdEUCGFnvXiQ+0GjMlz4umcNF2ucjS94HuRLPzYgTjeSsUvCIk1Tl1oOLNiA2KTOVurS/0s+B3vr/KB92IdnT9mtuFb8BKxCAvU0HAAA= \ No newline at end of file diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/sync/poems.out.json b/flink_jobs/old-models/batch_ar/src/main/resources/sync/poems.out.json new file mode 100644 index 00000000..362e8d61 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/sync/poems.out.json @@ -0,0 +1,48 @@ +{ + "ch.cern.sam.ROC_CRITICAL ARC-CE": [ + "org.nordugrid.ARC-CE-ARIS", + "org.nordugrid.ARC-CE-IGTF", + "org.nordugrid.ARC-CE-lfc", + "org.nordugrid.ARC-CE-result", + "org.nordugrid.ARC-CE-srm", + "org.nordugrid.ARC-CE-sw-csh" + ], + "ch.cern.sam.ROC_CRITICAL CREAM-CE": [ + "emi.cream.CREAMCE-JobSubmit", + "emi.wn.WN-Bi", + "emi.wn.WN-Csh", + "emi.wn.WN-SoftVer", + "hr.srce.CADist-Check", + "hr.srce.CREAMCE-CertLifetime", + "org.sam.WN-Rep" + ], + "ch.cern.sam.ROC_CRITICAL SRMv2": [ + "hr.srce.SRM2-CertLifetime", + "org.sam.SRM-Del", + "org.sam.SRM-Get", + "org.sam.SRM-GetSURLs", + "org.sam.SRM-GetTURLs", + "org.sam.SRM-LsDir", + "org.sam.SRM-Ls", + "org.sam.SRM-Put" + ], + "ch.cern.sam.ROC_CRITICAL Site-BDII": [ + "org.bdii.Entries", + "org.bdii.Freshness", + "org.gstat.SanityCheck" + ], + "ch.cern.sam.ROC_CRITICAL QCG.Computing": [ + "hr.srce.QCG-Computing-CertLifetime", + "pl.plgrid.QCG-Computing" + ], + "ch.cern.sam.ROC_CRITICAL GRAM5": [ + "hr.srce.GRAM-Auth", + "hr.srce.GRAM-CertLifetime", + "hr.srce.GRAM-Command" + ], + "ch.cern.sam.ROC_CRITICAL unicore6.TargetSystemFactory": [ + "emi.unicore.TargetSystemFactory", + "emi.unicore.UNICORE-Job" + ] +} + diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/sync/weights.in.gz.base64 b/flink_jobs/old-models/batch_ar/src/main/resources/sync/weights.in.gz.base64 new file mode 100644 index 00000000..a44285f6 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/sync/weights.in.gz.base64 @@ -0,0 +1 @@ +H4sIAM9tS1MAA11Y23LbuBIsfhBO8S7yEYRAChZvCxD22i8qxdYmqjhSSrKT2ip+/OkBKG+SBz/IJIHBTE93DxqtjGVi6OTEmerrnokpiMoonaeYTRsmbG+mNQ/C+dPl/PPEnr9d2enwE7+VHA03bHu+Hp8PQZak8Xw3so3Sg9mojrOHVjRBUaTZ3Kh2fVucT7xXPIiKZJay5cz27RikaTK7xyNfD/d4mmXZLDibtLJdzaZ4i/1a07Ct7YNVEc2bw/cv/7L74+F02gervAhn0WumWsFGZXgQp/P4fnl5PzBxPr0d8DFWarRa/xFEFIezfsAp+f75y+EUJHEUr2ahpt1GjvjqwU5Bvopvi2nBBXKTZvnM7b3UtGIQraLVfPxnfwjSfJUUsxk1FxIfd8koRZCVCLczdcOGUfZBHMbx/P2y//x+2D0frqfD2+71+XOMb6NyHnBCTclMU5zI9uqetaptJfKRFrPdKtYPetq4g7S8F4ZRmHGR5eVc2TUfpUHtorJMZ960U7yb6B9YyQiGYsTIep6mLpGyw0miVYHEsDBhqL7FSdLIPZzs2mUZFegEitcy0dpO/oVC4AAFYtMDixKmjAniOJ/1O5su5+Pb9Svyq/1WaYKD/hEyzkMBR1GSFfPlnY39qIIkzdK5asKIqZGehUU8N5pFIbODCkosj0Twfi1NgOJgzcpvkGEHD5pqaIemB1qSICnyaNZSAJc9H4dWARx/hmE2svZxhAW23jQsLNjw9bA/na9BGpbYnVoCu/YcC6bAFvWGK3aWhjnwobuO8l7MXDbKIH+yxZqtRJ/k6Jz7/enlcPl0fH0j5CW7jd2tD58uh2dALEchx3us9q/gWIMKrYDcsfMhxYWrAEd5qAKTFJteDb17WJSrlT+xHjoe0ctZ9N8/cPoyi6mglLuWbyvVBVGOLIlqrIMkjyNXt5SpCU+SMsfWfTwmwBJlPs2K1Yxe6KXAtqw1GtE/AQJS8y0RgOU7JR6rHdcCv6pOP7Fa835bWz1huXJu1cja4/XT+RTEWRm73TLWq7GXlOzQYVrwHl+Lx1oPvZwWXK6SCIX20YhxRNxZjNpYzra9pW9TH3t0Ww1V4Zq7egole6G4CQDfaDad3W1GCnCoDC2Uh9lSphgNbCsALv8DER33CY6TCDEioh3tugqRj15teBfE+Qot1oEXRGeI+hQxX3XZvx2vr/sf+6CI0KBKSN7ID3aJI78PGpH91dk2SKI8z+dKqjvVN8vBo6IMfQk9XhnXzUDHK+YekNlfv+6D318YeQfeBFBXGSG1DqIkWSWhw3HGajpUUOT5b99Q1AmwUT9t/b7hfPh8OPzvePn0vy8XpB+onCbLeD9xaemEFLgZ7K1lrPkbnQ/s/3z//v1wedu/fr+cX4gv0w+q3exPVwAcsCJ+EDuxP+1f9jsj9T0yQ4tWfbubDte3XYzkzkKbNCizef3+9YDWDYlu8OY0aAXxAHO5rUE5SRhjl/tpB36kZLQ7ZYXYKfoxPYDKkJSR2IEgyio1jUGeIghV3yFb4IcmSFGLuw9uitMVVIkSljO5JYbk79e3y/71uGd8agGlKEwTCIHUkCQ9rPF9UqAkqmdrLtm9FIKFRKeA7VwdXpvL8YVZ0ULu0G5c9zgumCOjCrEw9VVhShgstICR6K5iBt+ADRHdVhPUicMyAL9Wu40adzg82hLQI+znzHIlsC7eVrUCpztKSuO09NX853B15cyQCGEEIJIgb78rHjK5wAmta/BvUOwqTFwyYoYu4g484xZtpSavkBNHT1WaPw3mhsWFcnFeCN6xQSMHaZkkUPEaVfTQTktfkN6IrtLQTfx8/375RhYC69CBlZkUa4SeWAg6C6OEDhqWzE6iD4oVZPx3HFZoRk+FGeTSCs28Tm+2bLOFixHLOpSfm+Sl2SohzMDijAuwqSPx3LX8KknKuRcNlMttMULxc5TaPEA/xVjhk34grrih0ZEYG6W2DSU0Q/a5lsDdAKsA16Jkx2+0lpdoGeLzB6s9n3tD45+GaRYufdr2LQMuy3IRTYpQb8AZ4BVATFtmxk/GEqeEoctqt4G2lXArBjwIBsPKfxPHo7EIcytSTzphyyvgdEWayg3ICX8dbxQRJL2HXJ85ehaiJHsiZnyyeWRWVVtsBk6vkErPrBVHdpmc6qrFe7WyY+Ob1ghFTI7cgMxC5yiiDwGK8xLcEu/k9e18Ou6DtChh/XwPlKyeerbFS8UiumbkQt2kO0/n/kGBjfu1p13BO0QVA8FWrG9QdFroGA5SX873lnXqluFkBXzqySJoyiQCsTe/6jjPCw4UBYkOS0QBOunxDiEjIR4jEJg2Jze3pkbWsoccIIDBgo3ADzvPF4hoPF/ezmgYmEPDuxFOII/T2w4ahIDaJRHAw++5QlVa8ojUdgnjduFs1BWvBnkCBmhJ+2m92P27A4sl+H77ZX/5ev6B3hkXlKVkfyuBFgNDZ7N5mvhWkTkIKUwcZeFNOKhH9tSTyiX5XPe83dVSd2oxNumstWDuyyxdBEAMnoSlWMNAZGW5mjukhPmU/N6bYmnNuEAVui2dq1Y91kMg+c3ee3MegwmadngInJEEVm+ivkphcCbXligWJSfFOAL9kgixgLmGHbF36KsiW6Ah95e3L86NSCJhlHGxSFOEhJPf9efebIIyTWDufjtXg9o1iCNJiiKfH4QxXklQia5iVRXAl5BR7c7X5/NPnAdq4pEVRzQAwRcGK7DN9NAF5ULintPM2nMRWjwmd5uzZkTH/sc/1eX9dHgle4JjUVJJhYClfk2cMtfd+EWRB62wQ5hn8/Px9APSuf+B90xtfRg5XFLobJc4H7/BLqA7AIFf+uJekZ9I4gxW5tHQoUGE0Ke23eF3RAFJfUckJ6o7cD5gSobJ40ZNRH/V/t/X84VNx8OFVHrsoeLQvNeDovNo3vpYyjxNsj8wMfy9uNoU3vNBqic4rSWDK1KpXmrI2Lg2tdGda/B6lJA13nNYabgx15H1NJFkwyp0sIaWTH6x6FqHTgI9uybEQBMmqLesMWzeQZGiJHaGdsMRaUrtcBsKAZrDZWmRXoxLMhOkeUsjbztYohcCMYhsq7odFoiIAnXTsUG3kvegm/JjLjxfr9/2J8yPWQIS6R8tu9FC/XTnnHN3m4pu5bnZPW83oPGAUScMKB89BgKeumXiSEgi1oqD4dikao1iEsnujz/3p2VRgAyCJbhyLYTXOWZm4kOk7/2ft6snqd2tgr7PXYfEWGz+jinaGTAiNUGFNx6PVpi1B6aj65RtGyQN7f3+7WVR8WWAgOFGS6BZIC8GHt8LJWp3MM8Y0olvyxBM5sZSP0qQT3OjRBxlYQo3KGFGDGTYwEA8Dj3Oknx47p2Lz8/Nbl6mTIc3b9O0imtFlI4kNurDnUQpYrXj3cclRQTcYB4Tm91oCLzitnrs5RDzhR0hpSgJvEy/bmrKUpImlNZKfnY2r63YPcYIkCiJNTpUstahvEwwn9kazXJ82TM/hJBXMEo20k8YpA6enrSCw5VoAxDWaHoRZDlxsJuxunGZsQgImAwrpWkd8EmcY3gBnWSw5J7vUppmxBmb7l/Oy7XFcpWiO3AH8EhrttMo3aIrGtRUyybuZQ6OAVyqtjQUy9aZ3TKmSPztzFQbyHMZFUvYMOc95pMS53Ce83p5PpDnjD+mYeSwA81jgMpA+ySowjrLhrneGkIxSQ97ENFyp0MaQd7Iu1KpNQwNvk3n1gp3++E7MSbrQU6FlBj+rVPBMmD94g9hItypsiLPbmm4h01Dh9ymS4dNmAg03G9asLaaRr0QB+8PNLhjlEP7NyT5NE/MdVWDtQ/XZ0wJsMY5XVuAR5Fm2B/fyMzBFCpgB8OMEYIgGH/sDNuaINsfF2lE5b3xqpigqReEYVUy1B48tZaqsroBAyYYHsgKmI64MU8L5+Dy/1x7w7XzXmulA3Ji/t7tdqeGjGBBnGo99mLxHnS+Za9FlzDFeBmnaQ+7jaqTkGZ/H8XWGGQ6SxrlfCipd7yoN9iaUm4sFgX2lDNs0jRIHiyfqzQss7900ocX5d/Y2F/IKf6FUEDnGfzbPA3bx+FmdD7ap6J+xwGRLotha9wJ2aNt2xv4W0xoMoDqYS/bQNcmeNPCR9Ha5XKF9D5PYfRAHAuVDEb2T9wfnnCWMAv8E9XAgo+X89uP4+nMlEtQHmdgl4nf2LL68yi2O16vviO/H58RPl3MwS/ztpJ6+mPGJVfbnA++qeMCfNPqJ9Z25PmLD6KBIKA50dMGu934HLOeuYlqmHpfH3lTCTtU3JSyctcu9AiKBn7Nk1VIRXSPQVAOJHT/5W6+GDhL+qFN1CNAN9FgBeoQHVJJdxuY5j9krkOX0MpkIJw+fVgQHG3lHFDC+gYtEIeAEuQQavIoNXkimkjvRraVmDm1m6shZ6vlbk+QlrUbUWEsjjJap8D8siaKcDO+G+rB0QAo3PZk12jQl8OJPakNEBnf8GRsxZ2fho2IHQrurcPA7/wBBI2gxBLm7v/PKfnOGBcAAA== \ No newline at end of file diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/sync/weights.out.json b/flink_jobs/old-models/batch_ar/src/main/resources/sync/weights.out.json new file mode 100644 index 00000000..5fe61b54 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/sync/weights.out.json @@ -0,0 +1,375 @@ +{ + "wuppertalprod": 32144, + "MREN-01-CIS": 3200, + "RWTH-Aachen": 32127, + "UKI-SOUTHGRID-BRIS-HEP": 5865, + "TOKYO-LCG2": 46156, + "ARNES": 38455, + "LSG-AMC": 781, + "UKI-SOUTHGRID-CAM-HEP": 2829, + "umd-cms": 0, + "NIKHEF-ELPROD": 92544, + "IL_COMP": 0, + "TW-NTU-HEP": 3600, + "MWT2-SL6": 0, + "MSFG-OPEN": 2022, + "VU-MIF-LCG2": 13788, + "UA-KNU": 10841, + "PK-CIIT": 1120, + "IFISC-GRID": 4249, + "NWICG_Earth": 0, + "BG06-GPHI": 0, + "UNIBE-ID": 25428, + "AEGIS03-ELEF-LEDA": 614, + "INFN-MIB": 0, + "INFN-FRASCATI": 9703, + "NCP-LCG2": 6365, + "RO-02-NIPNE": 3105, + "OU_OSCER_ATLAS": 0, + "NDGF-T1": 343474, + "TR-10-ULAKBIM": 1632, + "ifae": 46738, + "INFN-LECCE": 2578, + "WT2_SL6": 0, + "MD-03-USM": 320, + "IFCA-LCG2": 14920, + "IFIC-LCG2": 14573, + "NCBJ-CIS": 2605, + "UKI-LT2-Brunel": 16038, + "RU-Protvino-IHEP": 26259, + "INDIACMS-TIFR": 3200, + "IN2P3-LPC": 12131, + "TR-03-METU": 1741, + "MIT_CMS": 0, + "UKI-SCOTGRID-DURHAM": 0, + "UKI-SCOTGRID-GLASGOW": 33886, + "DESY-HH": 94309, + "CYFRONET-LCG2": 173126, + "UFlorida-HPC": 0, + "AEGIS04-KG": 1229, + "FI_HIP_T2": 14569, + "AUVERGRID": 1717, + "FNAL_FERMIGRID": 544, + "GR-04-FORTH-ICS": 41, + "TR-01-ULAKBIM": 2698, + "FMPhI-UNIBA": 7065, + "ICEAGE-CATANIA": 216, + "BCBR": 2005, + "UA-BITP": 6400, + "INFN-BOLOGNA-T3": 3861, + "UColorado_HEP": 0, + "uprm-cms": 0, + "INFN-ROMA3": 3952, + "T2_Estonia": 48983, + "INFN-ROMA1": 12851, + "Tufts_ATLAS_Tier3": 0, + "INFN-ROMA2": 372, + "BG05-SUGrid": 1446, + "BG01-IPP": 11082, + "UKI-LT2-QMUL": 31666, + "egee.fesb.hr": 520, + "SCAI": 1474, + "HU_ATLAS_Tier2": 0, + "CAMK": 2240, + "BU_ATLAS_Tier2": 0, + "INFN-PARMA": 728, + "KR-KISTI-GSDC-01": 14502, + "LRZ-LMU": 21822, + "TAMU_BRAZOS": 0, + "UB-LCG2": 5432, + "praguelcg2": 35320, + "UKI-NORTHGRID-SHEF-HEP": 10854, + "VT_OSG": 0, + "LSG-VU": 781, + "RO-07-NIPNE": 14747, + "AGLT2": 0, + "ICM": 89216, + "CA-SCINET-T2": 8900, + "LIP-Coimbra": 2205, + "UA_ILTPE_ARC": 7399, + "UCR-HEP": 0, + "AEGIS09-FTN-KM": 28, + "KR-KISTI-GCRT-01": 1013, + "GARR-01-DIR": 900, + "CIT_HEP": 0, + "LSG-WUR": 781, + "FBF-Brescia-IT": 654, + "GRISU-COMETA-INFN-CT": 1914, + "HEPHY-UIBK": 2199, + "OU_OCHEP_SWT2": 0, + "LCG_KNU": 7087, + "SPRACE": 0, + "INFN-NAPOLI-PAMELA": 4075, + "IN-DAE-VECC-02": 18312, + "GR-10-UOI": 926, + "CBPF": 3621, + "NYSGRID_CORNELL_NYS1": 0, + "MY-UTM-GRID": 536, + "prague_cesnet_lcg2": 4619, + "GR-09-UoA": 96, + "GRISU-UNINA": 3840, + "AGLT2_TEST": 0, + "FIUPG": 0, + "TH-HAII": 4800, + "BG03-NGCC": 2080, + "HG-06-EKT": 0, + "UPENN": 0, + "UNAVAILABLE": 0, + "EELA-UNLP": 443, + "Purdue-Rossmann": 35328, + "INFN-PADOVA": 1555, + "CAFPE-GRANADA": 3108, + "GILDA-PADOVA": 165, + "INSU01-PARIS": 0, + "T2-TH-CUNSTDA": 0, + "TW-EMI-PPS": 120, + "UKI-SOUTHGRID-RALPP": 6910, + "OBSPM": 1605, + "UNIGE-DPNC": 0, + "Hephy-Vienna": 7680, + "CETA-GRID": 5406, + "BNL_Test_2": 70, + "CA-VICTORIA-WESTGRID-T2": 30244, + "RO-13-ISS": 226, + "RU-SPbSU": 216000, + "TTU-ANTAEUS": 0, + "USC-LCG2": 18644, + "RAL-LCG2": 96435, + "NIHAM": 26769, + "CERN-PROD": 413808, + "RO-09-UTCN": 8724, + "GridUNESP_CENTRAL": 0, + "JINR-T1": 18024, + "CA-MCGILL-CLUMEQ-T2": 20880, + "MIT_CMS_T3": 0, + "MK-03-FINKI": 28063, + "TUDresden-ZIH": 3120, + "ARAGRID-CIENCIAS": 7411, + "IL_IUCC_IG": 0, + "UKI-LT2-UCL-HEP": 2006, + "BNL-ATLAS": 0, + "CNR-ILC-PISA": 24, + "IMCSUL": 7, + "SiGNET": 49333, + "ZA-MERAKA": 0, + "UA-NSCMBR": 1920, + "BEgrid-BELNET": 0, + "TW-FTT": 24795, + "egee.srce.hr": 2288, + "INFN-COSENZA": 2006, + "INFN-ROMA1-CMS": 7629, + "CA-ALBERTA-WESTGRID-T2": 3240, + "IN2P3-LAPP": 14587, + "Nebraska": 0, + "UA_BITP_ARC": 21504, + "HG-02-IASA": 866, + "IEPSAS-Kosice": 5342, + "UMissHEP": 0, + "CFP-IST": 1019, + "AEGIS01-IPB-SCL": 13516, + "PSNC": 56544, + "IN2P3-SUBATECH": 4792, + "IGI-BOLOGNA": 1429, + "UKI-SCOTGRID-ECDF": 35997, + "IN2P3-CC-T2": 22491, + "HG-04-CTI-CEID": 838, + "JP-HIROSHIMA-WLCG": 8845, + "INFN-NAPOLI-ATLAS": 15691, + "MD-04-RENAM": 40, + "UA_ICMP_ARC": 0, + "USCMS-FNAL-WC1": 0, + "CIEMAT-LCG2": 16906, + "SARA-MATRIX": 36270, + "CRS4": 95, + "UERJ": 0, + "RTUETF": 1488, + "INFN-TRIESTE": 7507, + "KR-KNU-T3": 554, + "UA-IMBG": 331, + "IEETA": 71, + "UA_ICYB_ARC": 0, + "egee.irb.hr": 1768, + "UPJS-Kosice": 184, + "INFN-CAGLIARI": 1761, + "AM-04-YERPHI": 440, + "CIRMMP": 288, + "MK-02-ETF": 96, + "UKI-NORTHGRID-MAN-HEP": 23100, + "OUHEP_OSG": 0, + "HG-08-Okeanos": 409, + "Purdue-Conte": 0, + "RECAS-NAPOLI": 762, + "ru-PNPI": 3454, + "UNI-SIEGEN-HEP": 156, + "RUG-CIT": 8988, + "INFN-BARI": 0, + "LSG-EMC": 1178, + "MPPMU": 38684, + "BEIJING-LCG2": 11890, + "KR-UOS-SSCC": 1422, + "CESGA": 6898, + "UPorto": 1156, + "INFN-ROMA1-VIRGO": 3257, + "UCSDT2": 0, + "UA-PIMEE": 180, + "GR-06-IASA": 866, + "INFN-NAPOLI-ARGO": 1408, + "CIT_CMS_T2": 0, + "WCSS-PPS": 16, + "TW-eScience": 19052, + "CSCS-LCG2": 75040, + "UMB-BB": 731, + "Vanderbilt": 0, + "UA-ISMA": 31648, + "CREATIS-INSA-LYON": 1439, + "INFN-NAPOLI-CMS": 352, + "UKI-SOUTHGRID-SUSX": 2006, + "HG-01-GRNET": 0, + "UPV-GRyCAP": 2480, + "ATLAND": 154, + "EENet": 0, + "UKI-LT2-IC-HEP": 27339, + "ITEP": 2788, + "SNS-PISA": 300, + "T3_HU_Debrecen": 68, + "WUT": 672, + "BELLARMINE-ATLAS-T3": 0, + "ru-Moscow-FIAN-LCG2": 1210, + "AEGIS11-MISANU": 563, + "BMRZ-FRANKFURT": 399, + "BG08-MADARA": 6400, + "SAMPA": 6624, + "SE-SNIC-T2": 0, + "EELA-UTFSM": 2918, + "UNI-FREIBURG": 24341, + "BEgrid-ULB-VUB": 13622, + "SBU_Tier3": 0, + "HG-05-FORTH": 866, + "INFN-FERRARA": 564, + "OSG-Rice": 4480, + "JP-KEK-CRC-02": 50872, + "HK-HKU-CC-01": 101, + "LSG-RUG": 1178, + "Kharkov-KIPT-LCG2": 4680, + "INFN-GENOVA": 1409, + "EFDA-JET": 1327, + "GR-01-AUTH": 1078, + "GE-01-GRENA": 640, + "BIFI": 7411, + "INAF-TS": 768, + "GRISU-SPACI-NAPOLI": 64, + "TW-NCUHEP": 882, + "RO-11-NIPNE": 140, + "ROC_Canada_SERVICES": 0, + "INFN-CATANIA": 12440, + "JINR-LCG2": 24745, + "ITWM": 916, + "BUDAPEST": 11994, + "HG-03-AUTH": 866, + "CA-TRIUMF-T2K": 0, + "GRIF": 133730, + "INFN-TORINO": 1956, + "Taiwan-LCG2": 44508, + "IN2P3-LPSC": 5137, + "UNI-PERUGIA": 1549, + "BMEGrid": 107, + "NERSC-PDSFSRM": 0, + "IISAS-Bratislava": 8148, + "OUHEP_ITB": 0, + "PNNL_BelleII": 0, + "Baylor-Tier3": 0, + "pennT3": 0, + "GR-11-UPATRAS": 33, + "TU-Kosice": 1768, + "UCD": 0, + "UNI-DORTMUND": 16000, + "AEGIS02-RCUB": 346, + "NO-NORGRID-T2": 0, + "MY-UPM-BIRUNI-01": 2646, + "brown-cms-new": 0, + "FZK-LCG2": 0, + "LSG-NKI": 781, + "NCG-INGRID-PT": 6608, + "UNIBE-LHEP": 19377, + "KTH-CLOUD": 0, + "pic": 46994, + "BelGrid-UCL": 8832, + "LSG-KUN": 781, + "LUCILLE": 0, + "SFU-LCG2": 65250, + "Purdue-RCAC": 19456, + "T3_CH_PSI": 0, + "IL-TAU-HEP": 6275, + "FLTECH": 1152, + "GILDA-INFN-CATANIA": 183, + "UNICAN": 0, + "INFN-MILANO-ATLASC": 10364, + "TH-NECTEC-LSR": 0, + "WCSS64": 36176, + "IN2P3-IRES": 21631, + "LSG-LUMC": 781, + "BA-01-ETFBL": 0, + "Australia-ATLAS": 10437, + "RO-16-UAIC": 3816, + "UKI-SOUTHGRID-BHAM-HEP": 8519, + "GLOW": 0, + "MWT2": 0, + "cinvestav": 0, + "WEIZMANN-LCG2": 7520, + "INFN-LNL-2": 29922, + "UKI-NORTHGRID-LIV-HEP": 11358, + "Purdue-Carter": 12800, + "brown-cms": 0, + "UAM-LCG2": 4305, + "GoeGrid": 12874, + "RRC-KI-T1": 22320, + "RedIRIS": 0, + "CY-01-KIMON": 6370000, + "MK-01-UKIM_II": 111, + "DESY-ZN": 23136, + "AREA-BO": 675, + "NWICG_NDCMS": 0, + "INFN-T1": 211864, + "UTA_SWT2": 0, + "UNIV-LILLE": 1548, + "DukeT3": 0, + "INFN-CNAF-LHCB": 5515, + "RO-14-ITIM": 3960, + "Purdue-Hansen": 9600, + "LSG-TUD": 781, + "BRGM-ORLEANS": 79, + "MD-02-IMI": 16, + "TECHNION-HEP": 8977, + "UA-IRE": 634, + "SZTAKI": 12800, + "IN2P3-CC": 85375, + "NYU-ATLAS": 0, + "UNINA-EGEE": 1920, + "CSC": 40344, + "INFN-BOLOGNA": 888, + "SMU_HPC": 0, + "UKI-NORTHGRID-LANCS-HEP": 28569, + "IN2P3-CPPM": 15254, + "IN2P3-IPNL": 10928, + "UNIANDES": 1272, + "TRIUMF-LCG2": 104506, + "IR-IPM-HEP": 128, + "UKI-LT2-RHUL": 14812, + "IFJ-PAN-BG": 416, + "ZA-UJ": 1685, + "INFN-PISA": 24220, + "INFN-PAVIA": 450, + "LIP-Lisbon": 2592, + "UKI-SOUTHGRID-OX-HEP": 12421, + "TRIGRID-INFN-CATANIA": 1120, + "Ru-Troitsk-INR-LCG2": 4322, + "FZJ": 0, + "UA-MHI": 7927, + "RRC-KI": 12540, + "GR-07-UOI-HEPLAB": 1872, + "SWT2_CPB": 0, + "M3PEC": 5981, + "RO-15-NIPNE": 10800, + "GRASE-CSE-MAGIC": 0 +} + diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/timetables/t1.json b/flink_jobs/old-models/batch_ar/src/main/resources/timetables/t1.json new file mode 100644 index 00000000..0e35bc92 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/timetables/t1.json @@ -0,0 +1,77 @@ +{ + "t1": [ + "OK", + "OK", + "OK", + "OK", + "OK", + "OK", + "WARNING", + "WARNING", + "WARNING", + "WARNING", + "WARNING", + "WARNING", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "CRITICAL", + "CRITICAL", + "CRITICAL", + "CRITICAL", + "CRITICAL", + "CRITICAL", + "DOWNTIME", + "DOWNTIME", + "DOWNTIME", + "DOWNTIME", + "DOWNTIME", + "DOWNTIME", + "OK", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME", + "OK", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME", + "OK", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME", + "OK", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME", + "OK", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME", + "OK", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME" + ] +} + diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/timetables/t1_and_t2.json b/flink_jobs/old-models/batch_ar/src/main/resources/timetables/t1_and_t2.json new file mode 100644 index 00000000..b33ceb7c --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/timetables/t1_and_t2.json @@ -0,0 +1,77 @@ +{ + "t1_and_t2": [ + "OK", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME", + "WARNING", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "CRITICAL", + "DOWNTIME", + "CRITICAL", + "CRITICAL", + "CRITICAL", + "CRITICAL", + "CRITICAL", + "DOWNTIME", + "DOWNTIME", + "DOWNTIME", + "DOWNTIME", + "DOWNTIME", + "DOWNTIME", + "DOWNTIME", + "OK", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME", + "WARNING", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "CRITICAL", + "DOWNTIME", + "CRITICAL", + "CRITICAL", + "CRITICAL", + "CRITICAL", + "CRITICAL", + "DOWNTIME", + "DOWNTIME", + "DOWNTIME", + "DOWNTIME", + "DOWNTIME", + "DOWNTIME", + "DOWNTIME" + ] +} + diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/timetables/t1_miss.json b/flink_jobs/old-models/batch_ar/src/main/resources/timetables/t1_miss.json new file mode 100644 index 00000000..10dc3278 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/timetables/t1_miss.json @@ -0,0 +1,77 @@ +{ + "tmiss": [ + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING" + ] +} + diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/timetables/t1_or_t2.json b/flink_jobs/old-models/batch_ar/src/main/resources/timetables/t1_or_t2.json new file mode 100644 index 00000000..5bf283d8 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/timetables/t1_or_t2.json @@ -0,0 +1,77 @@ +{ + "t1_or_t2": [ + "OK", + "OK", + "OK", + "OK", + "OK", + "OK", + "OK", + "WARNING", + "WARNING", + "WARNING", + "WARNING", + "WARNING", + "OK", + "WARNING", + "UNKNOWN", + "UNKNOWN", + "CRITICAL", + "UNKNOWN", + "OK", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "MISSING", + "OK", + "WARNING", + "CRITICAL", + "CRITICAL", + "CRITICAL", + "CRITICAL", + "OK", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME", + "OK", + "OK", + "OK", + "OK", + "OK", + "OK", + "OK", + "WARNING", + "WARNING", + "WARNING", + "WARNING", + "WARNING", + "OK", + "WARNING", + "UNKNOWN", + "UNKNOWN", + "CRITICAL", + "UNKNOWN", + "OK", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "MISSING", + "OK", + "WARNING", + "CRITICAL", + "CRITICAL", + "CRITICAL", + "CRITICAL", + "OK", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME" + ] +} + diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/timetables/t1_recalc.json b/flink_jobs/old-models/batch_ar/src/main/resources/timetables/t1_recalc.json new file mode 100644 index 00000000..2e07aa74 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/timetables/t1_recalc.json @@ -0,0 +1,77 @@ +{ + "t1_recalc(36-56)": [ + "OK", + "OK", + "OK", + "OK", + "OK", + "OK", + "WARNING", + "WARNING", + "WARNING", + "WARNING", + "WARNING", + "WARNING", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "MISSING", + "MISSING", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "DOWNTIME", + "OK", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME", + "OK", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME", + "OK", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME", + "OK", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME" + ] +} + diff --git a/flink_jobs/old-models/batch_ar/src/main/resources/timetables/t2.json b/flink_jobs/old-models/batch_ar/src/main/resources/timetables/t2.json new file mode 100644 index 00000000..a8cefb26 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/main/resources/timetables/t2.json @@ -0,0 +1,77 @@ +{ + "t2": [ + "OK", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME", + "OK", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME", + "OK", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME", + "OK", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME", + "OK", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME", + "OK", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME", + "OK", + "OK", + "OK", + "OK", + "OK", + "OK", + "WARNING", + "WARNING", + "WARNING", + "WARNING", + "WARNING", + "WARNING", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "UNKNOWN", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "MISSING", + "CRITICAL", + "CRITICAL", + "CRITICAL", + "CRITICAL", + "CRITICAL", + "CRITICAL", + "DOWNTIME", + "DOWNTIME", + "DOWNTIME", + "DOWNTIME", + "DOWNTIME", + "DOWNTIME" + ] +} + diff --git a/flink_jobs/old-models/batch_ar/src/test/java/argo/batch/EndpointArTest.java b/flink_jobs/old-models/batch_ar/src/test/java/argo/batch/EndpointArTest.java new file mode 100644 index 00000000..25d00ecf --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/test/java/argo/batch/EndpointArTest.java @@ -0,0 +1,104 @@ +package argo.batch; + +import static org.junit.Assert.*; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; +import java.text.ParseException; +import java.util.ArrayList; + + +import org.junit.BeforeClass; +import org.junit.Test; + + +import ops.DIntegrator; +import ops.OpsManager; + +public class EndpointArTest { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", ServiceArTest.class.getResource("/ops/EGI-algorithm.json")); + } + + @Test + public void test() throws URISyntaxException, IOException, ParseException { + // Load operations manager + // Prepare Resource File + URL resJsonFile = ServiceArTest.class.getResource("/ops/EGI-algorithm.json"); + File JsonFile = new File(resJsonFile.toURI()); + // Instatiate class + OpsManager opsMgr = new OpsManager(); + // Test loading file + opsMgr.loadJson(JsonFile); + + // Assert the state strings to integer mappings ("OK"=>0, "MISSING"=>3 + // etc) + assertEquals(0, opsMgr.getIntStatus("OK")); + assertEquals(1, opsMgr.getIntStatus("WARNING")); + assertEquals(3, opsMgr.getIntStatus("MISSING")); + assertEquals(4, opsMgr.getIntStatus("CRITICAL")); + + // Add endpoint Timelines + int[] endpoint01s = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + + int[] endpoint02s = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + + MonTimeline endp01 = new MonTimeline("SITEA", "CREAM-CE", "cream01.endpointA.foo", ""); + endp01.setTimeline(endpoint01s); + + MonTimeline endp02 = new MonTimeline("SITEA", "SRM", "srm01.endpointB.foo", ""); + endp02.setTimeline(endpoint02s); + + ArrayList endpDS = new ArrayList(); + endpDS.add(endp01); + endpDS.add(endp02); + + ArrayList resultDS = new ArrayList(); + + String runDate = "2017-07-01"; + String report = "reportFOO"; + + + + for (MonTimeline item : endpDS) { + + DIntegrator dAR = new DIntegrator(); + dAR.calculateAR(item.getTimeline(),opsMgr); + + int runDateInt = Integer.parseInt(runDate.replace("-", "")); + + EndpointAR result = new EndpointAR(runDateInt,report,item.getHostname(),item.getService(),item.getGroup(),dAR.availability,dAR.reliability,dAR.up_f,dAR.unknown_f,dAR.down_f); + resultDS.add(result); + } + + String expEndpoint01 = "(20170701,reportFOO,cream01.endpointA.foo,CREAM-CE,SITEA,87.5,87.5,0.875,0.0,0.0)"; + String expEndpoint02 = "(20170701,reportFOO,srm01.endpointB.foo,SRM,SITEA,100.0,100.0,1.0,0.0,0.0)"; + + assertEquals("check cream-ce service results",expEndpoint01,resultDS.get(0).toString()); + assertEquals("check srm service results",expEndpoint02,resultDS.get(1).toString()); + + + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/test/java/argo/batch/EndpointGroupArTest.java b/flink_jobs/old-models/batch_ar/src/test/java/argo/batch/EndpointGroupArTest.java new file mode 100644 index 00000000..16b71836 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/test/java/argo/batch/EndpointGroupArTest.java @@ -0,0 +1,128 @@ +package argo.batch; + +import static org.junit.Assert.*; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; +import java.text.ParseException; +import java.util.ArrayList; +import org.junit.BeforeClass; +import org.junit.Test; + + +import ops.DIntegrator; +import ops.OpsManager; + +public class EndpointGroupArTest { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", EndpointGroupArTest.class.getResource("/ops/EGI-algorithm.json")); + } + + @Test + public void test() throws URISyntaxException, IOException, ParseException { + // Load operations manager + // Prepare Resource File + URL resJsonFile = EndpointGroupArTest.class.getResource("/ops/EGI-algorithm.json"); + File JsonFile = new File(resJsonFile.toURI()); + // Instatiate class + OpsManager opsMgr = new OpsManager(); + // Test loading file + opsMgr.loadJson(JsonFile); + + // Assert the state strings to integer mappings ("OK"=>0, "MISSING"=>3 + // etc) + assertEquals(0, opsMgr.getIntStatus("OK")); + assertEquals(1, opsMgr.getIntStatus("WARNING")); + assertEquals(3, opsMgr.getIntStatus("MISSING")); + assertEquals(4, opsMgr.getIntStatus("CRITICAL")); + + // Add Service Timelines + int[] g01s = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + + int[] g02s = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + + int[] g03s = { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4 }; + + MonTimeline g01 = new MonTimeline("SITEA", "", "", ""); + g01.setTimeline(g01s); + + MonTimeline g02 = new MonTimeline("SITEB", "", "", ""); + g02.setTimeline(g02s); + + MonTimeline g03 = new MonTimeline("SITEC", "", "", ""); + g03.setTimeline(g03s); + + ArrayList gDS = new ArrayList(); + gDS.add(g01); + gDS.add(g02); + gDS.add(g03); + + + ArrayList resultDS = new ArrayList(); + + String runDate = "2017-07-01"; + String report = "reportFOO"; + String supergroup = "SUPERGROUP"; + + int weights[] = { 166, 150, 200 }; + + + int i=0; + + for (MonTimeline item : gDS) { + + DIntegrator dAR = new DIntegrator(); + dAR.calculateAR(item.getTimeline(),opsMgr); + + int runDateInt = Integer.parseInt(runDate.replace("-", "")); + + int w = weights[i]; + i++; + + EndpointGroupAR result = new EndpointGroupAR(runDateInt,report,item.getGroup(),supergroup,w,dAR.availability,dAR.reliability,dAR.up_f,dAR.unknown_f,dAR.down_f); + resultDS.add(result); + } + + + + String expA = "(20170701,reportFOO,SITEA,SUPERGROUP,166,87.5,87.5,0.875,0.0,0.0)"; + String expB = "(20170701,reportFOO,SITEB,SUPERGROUP,150,100.0,100.0,1.0,0.0,0.0)"; + String expC = "(20170701,reportFOO,SITEC,SUPERGROUP,200,99.65278,99.65278,0.99653,0.0,0.0)"; + + assertEquals("check site a results",expA,resultDS.get(0).toString()); + assertEquals("check site b results",expB,resultDS.get(1).toString()); + assertEquals("check site c results",expC,resultDS.get(2).toString()); + + + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/test/java/argo/batch/EndpointGroupTimelineTest.java b/flink_jobs/old-models/batch_ar/src/test/java/argo/batch/EndpointGroupTimelineTest.java new file mode 100644 index 00000000..1f7bdb36 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/test/java/argo/batch/EndpointGroupTimelineTest.java @@ -0,0 +1,187 @@ +package argo.batch; + +import static org.junit.Assert.*; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; +import java.text.ParseException; +import java.util.ArrayList; + +import java.util.HashMap; +import java.util.Map; + +import org.junit.BeforeClass; +import org.junit.Test; + +import ops.DAggregator; +import ops.DTimeline; +import ops.OpsManager; +import sync.AggregationProfileManager; + + +public class EndpointGroupTimelineTest { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", EndpointGroupTimelineTest.class.getResource("/ops/EGI-algorithm.json")); + // Assert that files are present + assertNotNull("Test file missing", EndpointGroupTimelineTest.class.getResource("/ops/ap1.json")); + } + + @Test + public void test() throws URISyntaxException, IOException, ParseException { + // Load operations manager + // Prepare Resource File + URL resJsonFile = EndpointGroupTimelineTest.class.getResource("/ops/EGI-algorithm.json"); + File jsonFile = new File(resJsonFile.toURI()); + // Instatiate class + OpsManager opsMgr = new OpsManager(); + // Test loading file + opsMgr.loadJson(jsonFile); + + // Prepare Resource File + URL resJsonFile2 = EndpointGroupTimelineTest.class.getResource("/ops/ap1.json"); + File jsonFile2 = new File(resJsonFile2.toURI()); + // Instatiate class + AggregationProfileManager apsMgr = new AggregationProfileManager(); + apsMgr.clearProfiles(); + apsMgr.loadJson(jsonFile2); + + // Assert the state strings to integer mappings ("OK"=>0, "MISSING"=>3 + // etc) + assertEquals(0, opsMgr.getIntStatus("OK")); + assertEquals(1, opsMgr.getIntStatus("WARNING")); + assertEquals(3, opsMgr.getIntStatus("MISSING")); + assertEquals(4, opsMgr.getIntStatus("CRITICAL")); + + // Add Service Timelines + int[] cream01s = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + + int[] cream02s = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + + int[] srm01s = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + + int[] srm02s = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + + MonTimeline cream01 = new MonTimeline("SITEA", "CREAM-CE", "cream01.foo", ""); + cream01.setTimeline(cream01s); + + MonTimeline cream02 = new MonTimeline("SITEA", "CREAM-CE", "cream02.foo", ""); + cream02.setTimeline(cream02s); + + MonTimeline srm01 = new MonTimeline("SITEA", "SRM", "srm01.foo", ""); + srm01.setTimeline(srm01s); + + MonTimeline srm02 = new MonTimeline("SITEA", "SRM", "srm02.foo", ""); + srm02.setTimeline(srm02s); + + ArrayList groupDS = new ArrayList(); + groupDS.add(cream01); + groupDS.add(cream02); + groupDS.add(srm01); + + + Map groupAggr = new HashMap(); + + String avProf = apsMgr.getAvProfiles().get(0); + for (MonTimeline item : groupDS) { + + String service = item.getService(); + + + // Get the aggregation group + String group = apsMgr.getGroupByService(avProf, service); + + // if group doesn't exist yet create it + if (groupAggr.containsKey(group) == false) { + groupAggr.put(group, new DAggregator()); + } + + // Initialize a DTimelineObject + DTimeline dtl = new DTimeline(); + dtl.samples = item.getTimeline(); + dtl.setStartState(dtl.samples[0]); + + // group will be present now + groupAggr.get(group).timelines.put(service, dtl); + + } + + // Aggregate each group + for (String group : groupAggr.keySet()) { + // Get group Operation + + String gop = apsMgr.getProfileGroupOp(avProf, group); + + groupAggr.get(group).aggregate(gop, opsMgr); + + } + + // Combine group aggregates to a final endpoint group aggregation + // Aggregate all sites + DAggregator totalSite = new DAggregator(); + + // Aggregate each group + for (String group : groupAggr.keySet()) { + DTimeline curTimeline = groupAggr.get(group).aggregation; + for (int i = 0; i < curTimeline.samples.length; i++) { + totalSite.insertSlot(group, i, curTimeline.samples[i]); + + } + + } + + // Final site aggregate + // Get appropriate operation from availability profile + totalSite.aggregate(apsMgr.getTotalOp(avProf), opsMgr); + + // Expected result + int[] expResult = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + + assertArrayEquals("check service aggregation", expResult, totalSite.aggregation.samples); + + } +} diff --git a/flink_jobs/old-models/batch_ar/src/test/java/argo/batch/EndpointTimelineTest.java b/flink_jobs/old-models/batch_ar/src/test/java/argo/batch/EndpointTimelineTest.java new file mode 100644 index 00000000..e51124ee --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/test/java/argo/batch/EndpointTimelineTest.java @@ -0,0 +1,243 @@ +package argo.batch; + +import static org.junit.Assert.*; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; +import java.text.ParseException; +import java.util.ArrayList; + + +import org.junit.BeforeClass; +import org.junit.Test; + +import argo.avro.MetricData; +import ops.DAggregator; +import ops.DTimeline; +import ops.OpsManager; + +public class EndpointTimelineTest { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", EndpointTimelineTest.class.getResource("/ops/EGI-algorithm.json")); + } + + @Test + public void test() throws URISyntaxException, IOException, ParseException { + // Load operations manager + // Prepare Resource File + URL resJsonFile = EndpointTimelineTest.class.getResource("/ops/EGI-algorithm.json"); + File JsonFile = new File(resJsonFile.toURI()); + // Instatiate class + OpsManager opsMgr = new OpsManager(); + // Test loading file + opsMgr.loadJson(JsonFile); + + // Assert the state strings to integer mappings ("OK"=>0, "MISSING"=>3 + // etc) + assertEquals(0, opsMgr.getIntStatus("OK")); + assertEquals(1, opsMgr.getIntStatus("WARNING")); + assertEquals(3, opsMgr.getIntStatus("MISSING")); + assertEquals(4, opsMgr.getIntStatus("CRITICAL")); + + // List of MetricData + ArrayList mdata1 = new ArrayList(); + ArrayList mdata2 = new ArrayList(); + ArrayList mdata3 = new ArrayList(); + mdata1.add(new MetricData("2017-07-01T23:00:00Z", "CREAM-CE", "cream01.foo", "job_submit", "OK", "mon01.foo", + "summary", "ok", null, null)); + mdata1.add(new MetricData("2017-07-02T05:00:00Z", "CREAM-CE", "cream01.foo", "job_submit", "WARNING", + "mon01.foo", "summary", "ok", null, null)); + mdata1.add(new MetricData("2017-07-02T00:00:00Z", "CREAM-CE", "cream01.foo", "job_submit", "OK", "mon01.foo", + "summary", "ok", null, null)); + mdata1.add(new MetricData("2017-07-02T12:00:00Z", "CREAM-CE", "cream01.foo", "job_submit", "CRITICAL", + "mon01.foo", "summary", "ok", null, null)); + mdata1.add(new MetricData("2017-07-02T14:00:00Z", "CREAM-CE", "cream01.foo", "job_submit", "OK", "mon01.foo", + "summary", "ok", null, null)); + mdata2.add(new MetricData("2017-07-01T23:00:00Z", "CREAM-CE", "cream01.foo", "job_cancel", "OK", "mon01.foo", + "summary", "ok", null, null)); + mdata2.add(new MetricData("2017-07-02T16:00:00Z", "CREAM-CE", "cream01.foo", "job_cancel", "OK", "mon01.foo", + "summary", "ok", null, null)); + mdata2.add(new MetricData("2017-07-02T19:00:00Z", "CREAM-CE", "cream01.foo", "job_cancel", "CRITICAL", + "mon01.foo", "summary", "ok", null, null)); + mdata2.add(new MetricData("2017-07-02T20:00:00Z", "CREAM-CE", "cream01.foo", "job_cancel", "OK", "mon01.foo", + "summary", "ok", null, null)); + mdata3.add(new MetricData("2017-07-01T21:00:00Z", "CREAM-CE", "cream01.foo", "cert", "OK", "mon01.foo", + "summary", "ok", null, null)); + mdata3.add(new MetricData("2017-07-02T21:00:00Z", "CREAM-CE", "cream01.foo", "cert", "OK", "mon01.foo", + "summary", "ok", null, null)); + mdata3.add(new MetricData("2017-07-02T22:00:00Z", "CREAM-CE", "cream01.foo", "cert", "WARNING", "mon01.foo", + "summary", "ok", null, null)); + mdata3.add(new MetricData("2017-07-02T23:00:00Z", "CREAM-CE", "cream01.foo", "cert", "OK", "mon01.foo", + "summary", "ok", null, null)); + + // Create Frist Metric Timeline + DTimeline dtl1 = new DTimeline(); + + boolean first = true; + for (MetricData item : mdata1) { + if (first) { + dtl1.setStartState(opsMgr.getIntStatus(item.getStatus())); + first = false; + } + + dtl1.insert(item.getTimestamp(), opsMgr.getIntStatus(item.getStatus())); + + } + + dtl1.settle(opsMgr.getDefaultMissingInt()); + MonTimeline mtl1 = new MonTimeline("SITEA", "CREAM-CE", "cream01.foo", "job_submit"); + mtl1.setTimeline(dtl1.samples); + + DTimeline dtl2 = new DTimeline(); + + first = true; + for (MetricData item : mdata2) { + if (first) { + dtl2.setStartState(opsMgr.getIntStatus(item.getStatus())); + first = false; + } + + dtl2.insert(item.getTimestamp(), opsMgr.getIntStatus(item.getStatus())); + + } + + dtl2.settle(opsMgr.getDefaultMissingInt()); + MonTimeline mtl2 = new MonTimeline("SITEA", "CREAM-CE", "cream01.foo", "job_cancel"); + mtl2.setTimeline(dtl2.samples); + + DTimeline dtl3 = new DTimeline(); + + first = true; + for (MetricData item : mdata3) { + if (first) { + dtl3.setStartState(opsMgr.getIntStatus(item.getStatus())); + first = false; + } + + dtl3.insert(item.getTimestamp(), opsMgr.getIntStatus(item.getStatus())); + + } + + dtl3.settle(opsMgr.getDefaultMissingInt()); + MonTimeline mtl3 = new MonTimeline("SITEA", "CREAM-CE", "cream01.foo", "cert"); + mtl3.setTimeline(dtl3.samples); + + // Create 'job_submit' expected metric timeline + DTimeline etl1 = new DTimeline(); + + for (int i = 0; i < 59; i++) { + etl1.samples[i] = 0; + } + + for (int i = 59; i < 143; i++) { + etl1.samples[i] = 1; + } + + for (int i = 143; i < 167; i++) { + etl1.samples[i] = 4; + } + + for (int i = 167; i < etl1.samples.length; i++) { + etl1.samples[i] = 0; + } + + // Create 'job_cancel' expected metric timeline + DTimeline etl2 = new DTimeline(); + + for (int i = 0; i < 227; i++) { + etl2.samples[i] = 0; + } + + for (int i = 227; i < 239; i++) { + etl2.samples[i] = 4; + } + + for (int i = 239; i < etl2.samples.length; i++) { + etl2.samples[i] = 0; + } + + // Create 'cert' expected metric timeline + DTimeline etl3 = new DTimeline(); + + for (int i = 0; i < 267; i++) { + etl3.samples[i] = 0; + } + + for (int i = 263; i < 275; i++) { + etl3.samples[i] = 1; + } + + for (int i = 275; i < etl3.samples.length; i++) { + etl3.samples[i] = 0; + } + + // Assert that MonTimelines are as expected + assertEquals("mon timeline 1: endpoint group check", "SITEA", mtl1.getGroup()); + assertEquals("mon timeline 1: service check", "CREAM-CE", mtl1.getService()); + assertEquals("mon timeline 1: hostname check", "cream01.foo", mtl1.getHostname()); + assertEquals("mon timeline 1: metric check", "job_submit", mtl1.getMetric()); + assertArrayEquals("mon timeline 1 samples check", etl1.samples, mtl1.getTimeline()); + + assertEquals("mon timeline 2: endpoint group check", "SITEA", mtl2.getGroup()); + assertEquals("mon timeline 2: service check", "CREAM-CE", mtl2.getService()); + assertEquals("mon timeline 2: hostname check", "cream01.foo", mtl2.getHostname()); + assertEquals("mon timeline 2: metric check", "job_cancel", mtl2.getMetric()); + assertArrayEquals("mon timeline 2 samples check", etl2.samples, mtl2.getTimeline()); + + assertEquals("mon timeline 3: endpoint group check", "SITEA", mtl3.getGroup()); + assertEquals("mon timeline 3: service check", "CREAM-CE", mtl3.getService()); + assertEquals("mon timeline 3: hostname check", "cream01.foo", mtl3.getHostname()); + assertEquals("mon timeline 3: metric check", "cert", mtl3.getMetric()); + assertArrayEquals("mon timeline 3 samples check", etl3.samples, mtl3.getTimeline()); + + // Endpoint Aggregation + ArrayList monDS = new ArrayList(); + monDS.add(mtl1); + monDS.add(mtl2); + monDS.add(mtl3); + + DAggregator dAgg = new DAggregator(); + for (MonTimeline item : monDS) { + + DTimeline dtl = new DTimeline(); + dtl.samples = item.getTimeline(); + + dtl.setStartState(dtl.samples[0]); + // Insert timeline directly into aggragator's hashtable + + dAgg.timelines.put(item.getMetric(), dtl); + + + } + // dont use settleAll because timelines in the aggregator are already + // directly settled from mon timeline objects + dAgg.aggregate("AND", opsMgr); + + int[] expResult = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + + MonTimeline endTl = new MonTimeline("SITEA", "CREAM-CE", "cream01.foo", ""); + endTl.setTimeline(dAgg.aggregation.samples); + + assertEquals("endpoint timeline: endpoint group check", "SITEA", endTl.getGroup()); + assertEquals("endpoint timeline: service check", "CREAM-CE", endTl.getService()); + assertEquals("endpoint timeline: hostname check", "cream01.foo", endTl.getHostname()); + assertEquals("endpoint timeline: metric check", "", endTl.getMetric()); + assertArrayEquals("Assert aggregated endpoint timeline",expResult,endTl.getTimeline()); + + + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/test/java/argo/batch/MetricTimelineTest.java b/flink_jobs/old-models/batch_ar/src/test/java/argo/batch/MetricTimelineTest.java new file mode 100644 index 00000000..0c91625c --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/test/java/argo/batch/MetricTimelineTest.java @@ -0,0 +1,97 @@ +package argo.batch; + +import static org.junit.Assert.*; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; +import java.text.ParseException; +import java.util.ArrayList; + + +import org.junit.BeforeClass; +import org.junit.Test; + +import ops.DTimeline; +import ops.OpsManager; +import argo.avro.MetricData; + +public class MetricTimelineTest { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", MetricTimelineTest.class.getResource("/ops/EGI-algorithm.json")); + } + + @Test + public void test() throws URISyntaxException, IOException, ParseException { + // Load operations manager + // Prepare Resource File + URL resJsonFile = MetricTimelineTest.class.getResource("/ops/EGI-algorithm.json"); + File JsonFile = new File(resJsonFile.toURI()); + // Instatiate class + OpsManager opsMgr = new OpsManager(); + // Test loading file + opsMgr.loadJson(JsonFile); + + + // Assert the state strings to integer mappings ("OK"=>0, "MISSING"=>3 etc) + assertEquals(0,opsMgr.getIntStatus("OK")); + assertEquals(1,opsMgr.getIntStatus("WARNING")); + assertEquals(3,opsMgr.getIntStatus("MISSING")); + assertEquals(4,opsMgr.getIntStatus("CRITICAL")); + + + // List of MetricData + ArrayList mdata = new ArrayList(); + mdata.add(new MetricData("2017-07-01T23:00:00Z", "CREAM-CE", "cream01.foo", "job_submit", "OK", "mon01.foo", + "summary", "ok", null, null)); + mdata.add(new MetricData("2017-07-02T05:00:00Z", "CREAM-CE", "cream01.foo", "job_submit", "WARNING", + "mon01.foo", "summary", "ok", null, null)); + mdata.add(new MetricData("2017-07-02T00:00:00Z", "CREAM-CE", "cream01.foo", "job_submit", "OK", "mon01.foo", + "summary", "ok", null, null)); + mdata.add(new MetricData("2017-07-02T12:00:00Z", "CREAM-CE", "cream01.foo", "job_submit", "CRITICAL", + "mon01.foo", "summary", "ok", null, null)); + mdata.add(new MetricData("2017-07-02T14:00:00Z", "CREAM-CE", "cream01.foo", "job_submit", "OK", "mon01.foo", + "summary", "ok", null, null)); + + DTimeline dtl = new DTimeline(); + boolean first = true; + for (MetricData item : mdata) { + if (first) { + dtl.setStartState(opsMgr.getIntStatus(item.getStatus())); + first =false; + } + + dtl.insert(item.getTimestamp(), opsMgr.getIntStatus(item.getStatus())); + + } + + dtl.settle(opsMgr.getDefaultMissingInt()); + + // Create expected metric timeline + DTimeline etl = new DTimeline(); + + for (int i=0;i<59;i++){ + etl.samples[i]=0; + } + + for (int i=59;i<143;i++){ + etl.samples[i]=1; + } + + for (int i=143;i<167;i++){ + etl.samples[i]=4; + } + + for (int i=167;i groups = ge.getGroup("GROUPA", md1.getHostname(), md1.getService()); + + ArrayList monList = new ArrayList(); + + for (String group : groups) { + MonData mn = new MonData(); + mn.setGroup(group); + mn.setHostname(md1.getHostname()); + mn.setService(md1.getService()); + monList.add(mn); + } + + String expOut = "[(SH,service.typeA,hostA,,,,,,,), " + "(SH_N,service.typeA,hostA,,,,,,,), " + + "(PROV,service.typeA,hostA,,,,,,,), " + "(D,service.typeA,hostA,,,,,,,), " + + "(SC,service.typeA,hostA,,,,,,,), " + "(PR,service.typeA,hostA,,,,,,,), " + + "(SH_R,service.typeA,hostA,,,,,,,), " + "(OP,service.typeA,hostA,,,,,,,), " + + "(SH_L,service.typeA,hostA,,,,,,,), " + "(GT,service.typeA,hostA,,,,,,,), " + + "(ORG_C,service.typeA,hostA,,,,,,,)]"; + + + + assertEquals("multiple group test",expOut,Arrays.toString(monList.toArray())); + + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/test/java/argo/batch/ServiceArTest.java b/flink_jobs/old-models/batch_ar/src/test/java/argo/batch/ServiceArTest.java new file mode 100644 index 00000000..ff960c20 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/test/java/argo/batch/ServiceArTest.java @@ -0,0 +1,104 @@ +package argo.batch; + +import static org.junit.Assert.*; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; +import java.text.ParseException; +import java.util.ArrayList; + + +import org.junit.BeforeClass; +import org.junit.Test; + + +import ops.DIntegrator; +import ops.OpsManager; + +public class ServiceArTest { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", ServiceArTest.class.getResource("/ops/EGI-algorithm.json")); + } + + @Test + public void test() throws URISyntaxException, IOException, ParseException { + // Load operations manager + // Prepare Resource File + URL resJsonFile = ServiceArTest.class.getResource("/ops/EGI-algorithm.json"); + File JsonFile = new File(resJsonFile.toURI()); + // Instatiate class + OpsManager opsMgr = new OpsManager(); + // Test loading file + opsMgr.loadJson(JsonFile); + + // Assert the state strings to integer mappings ("OK"=>0, "MISSING"=>3 + // etc) + assertEquals(0, opsMgr.getIntStatus("OK")); + assertEquals(1, opsMgr.getIntStatus("WARNING")); + assertEquals(3, opsMgr.getIntStatus("MISSING")); + assertEquals(4, opsMgr.getIntStatus("CRITICAL")); + + // Add Service Timelines + int[] serv01s = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + + int[] serv02s = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + + MonTimeline serv01 = new MonTimeline("SITEA", "CREAM-CE", "", ""); + serv01.setTimeline(serv01s); + + MonTimeline serv02 = new MonTimeline("SITEA", "SRM", "", ""); + serv02.setTimeline(serv02s); + + ArrayList servDS = new ArrayList(); + servDS.add(serv01); + servDS.add(serv02); + + ArrayList resultDS = new ArrayList(); + + String runDate = "2017-07-01"; + String report = "reportFOO"; + + + + for (MonTimeline item : servDS) { + + DIntegrator dAR = new DIntegrator(); + dAR.calculateAR(item.getTimeline(),opsMgr); + + int runDateInt = Integer.parseInt(runDate.replace("-", "")); + + ServiceAR result = new ServiceAR(runDateInt,report,item.getService(),item.getGroup(),dAR.availability,dAR.reliability,dAR.up_f,dAR.unknown_f,dAR.down_f); + resultDS.add(result); + } + + String expResultCREAM = "(20170701,reportFOO,CREAM-CE,SITEA,87.5,87.5,0.875,0.0,0.0)"; + String expResultSRM = "(20170701,reportFOO,SRM,SITEA,100.0,100.0,1.0,0.0,0.0)"; + + assertEquals("check cream-ce service results",expResultCREAM,resultDS.get(0).toString()); + assertEquals("check srm service results",expResultSRM,resultDS.get(1).toString()); + + + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/test/java/argo/batch/ServiceTimelineTest.java b/flink_jobs/old-models/batch_ar/src/test/java/argo/batch/ServiceTimelineTest.java new file mode 100644 index 00000000..6ca856db --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/test/java/argo/batch/ServiceTimelineTest.java @@ -0,0 +1,111 @@ +package argo.batch; + +import static org.junit.Assert.*; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; +import java.text.ParseException; +import java.util.ArrayList; + + +import org.junit.BeforeClass; +import org.junit.Test; + + +import ops.DAggregator; +import ops.DTimeline; +import ops.OpsManager; + +public class ServiceTimelineTest { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", ServiceTimelineTest.class.getResource("/ops/EGI-algorithm.json")); + } + + @Test + public void test() throws URISyntaxException, IOException, ParseException { + // Load operations manager + // Prepare Resource File + URL resJsonFile = ServiceTimelineTest.class.getResource("/ops/EGI-algorithm.json"); + File JsonFile = new File(resJsonFile.toURI()); + // Instatiate class + OpsManager opsMgr = new OpsManager(); + // Test loading file + opsMgr.loadJson(JsonFile); + + // Assert the state strings to integer mappings ("OK"=>0, "MISSING"=>3 + // etc) + assertEquals(0, opsMgr.getIntStatus("OK")); + assertEquals(1, opsMgr.getIntStatus("WARNING")); + assertEquals(3, opsMgr.getIntStatus("MISSING")); + assertEquals(4, opsMgr.getIntStatus("CRITICAL")); + + // Add Endpoint Timelines + int[] endp01 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + + int[] endp02 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + + MonTimeline endtl01 = new MonTimeline("SITEA", "CREAM-CE", "cream01.foo", ""); + endtl01.setTimeline(endp01); + + MonTimeline endtl02 = new MonTimeline("SITEA", "CREAM-CE", "cream02.foo", ""); + endtl02.setTimeline(endp02); + + ArrayList endpDS = new ArrayList(); + endpDS.add(endtl01); + endpDS.add(endtl02); + + DAggregator dAgg = new DAggregator(); + for (MonTimeline item : endpDS) { + + DTimeline dtl = new DTimeline(); + dtl.samples = item.getTimeline(); + + dtl.setStartState(dtl.samples[0]); + // Insert timeline directly into aggragator's hashtable + + dAgg.timelines.put(item.getMetric(), dtl); + + } + // dont use settleAll because timelines in the aggregator are already + // directly settled from mon timeline objects + dAgg.aggregate("OR", opsMgr); + + + + // Expected result + int[] expResult = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + + assertArrayEquals("check service aggregation",expResult,dAgg.aggregation.samples); + + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/test/java/ops/ConfigManagerTest.java b/flink_jobs/old-models/batch_ar/src/test/java/ops/ConfigManagerTest.java new file mode 100644 index 00000000..acae33c8 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/test/java/ops/ConfigManagerTest.java @@ -0,0 +1,58 @@ +package ops; + +import static org.junit.Assert.*; + +import java.io.File; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; + +import org.junit.BeforeClass; +import org.junit.Test; + +public class ConfigManagerTest { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", ConfigManagerTest.class.getResource("/ops/config.json")); + } + + @Test + public void test() throws URISyntaxException, IOException { + // Load the resource file + URL resJsonFile = OpsManagerTest.class.getResource("/ops/config.json"); + File jsonFile = new File(resJsonFile.toURI()); + + // Instantiate a new ConfigManager and load the test file + ConfigManager cfgMgr = new ConfigManager(); + cfgMgr.loadJson(jsonFile); + + // Assert that the simple fields are loaded correctly + assertEquals("EGI",cfgMgr.getTenant()); + assertEquals("Critical", cfgMgr.report); + assertEquals("SITES", cfgMgr.egroup); + assertEquals("NGI", cfgMgr.ggroup); + assertEquals("hepspec", cfgMgr.weight); + assertEquals("c800846f-8478-4af8-85d1-a3f12fe4c18f",cfgMgr.id); + + // Assert compound fields + assertEquals("Production", cfgMgr.ggroupTags.get("infrastructure")); + assertEquals("Certified", cfgMgr.ggroupTags.get("certification")); + assertEquals("EGI", cfgMgr.ggroupTags.get("scope")); + + // Assert compound fields + assertEquals("1", cfgMgr.egroupTags.get("production")); + assertEquals("1", cfgMgr.egroupTags.get("monitored")); + assertEquals("EGI", cfgMgr.egroupTags.get("scope")); + + // Assert compound fields + assertEquals("ops", cfgMgr.mdataTags.get("vo")); + assertEquals("ops", cfgMgr.mdataTags.get("vo_fqan")); + assertEquals("any", cfgMgr.mdataTags.get("roc")); + + + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/test/java/ops/DAggregatorTest.java b/flink_jobs/old-models/batch_ar/src/test/java/ops/DAggregatorTest.java new file mode 100644 index 00000000..6374c17f --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/test/java/ops/DAggregatorTest.java @@ -0,0 +1,133 @@ +package ops; + +import static org.junit.Assert.*; + +import java.io.File; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; +import java.text.ParseException; +import java.util.Arrays; + +import org.junit.BeforeClass; +import org.junit.Test; + +public class DAggregatorTest { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", DAggregatorTest.class.getResource("/ops/EGI-algorithm.json")); + } + + @Test + public void test() throws URISyntaxException, ParseException, IOException { + + URL resJsonFile = DAggregatorTest.class.getResource("/ops/EGI-algorithm.json"); + File jsonFile = new File(resJsonFile.toURI()); + + DAggregator dAgg = new DAggregator(); + OpsManager opsMgr = new OpsManager(); + + opsMgr.loadJson(jsonFile); + + // Create 3 Timelines + DTimeline t1 = new DTimeline(); + DTimeline t2 = new DTimeline(); + DTimeline t3 = new DTimeline(); + + t1.setSampling(1440, 120); + t2.setSampling(1440, 120); + t3.setSampling(1440, 120); + + dAgg.aggregation.setSampling(1440, 120); + + // Set First States + t1.setStartState(opsMgr.getIntStatus("OK")); + t2.setStartState(opsMgr.getIntStatus("UNKNOWN")); + t3.setStartState(opsMgr.getIntStatus("OK")); + + // Add some timestamps int timeline 1 + t1.insert("2014-01-15T01:33:44Z", opsMgr.getIntStatus("CRITICAL")); + t1.insert("2014-01-15T05:33:01Z", opsMgr.getIntStatus("OK")); + t1.insert("2014-01-15T12:50:42Z", opsMgr.getIntStatus("WARNING")); + t1.insert("2014-01-15T15:33:44Z", opsMgr.getIntStatus("OK")); + + // Add some timestamps int timeline 2 + t2.insert("2014-01-15T05:33:44Z", opsMgr.getIntStatus("OK")); + t2.insert("2014-01-15T08:33:01Z", opsMgr.getIntStatus("MISSING")); + t2.insert("2014-01-15T12:50:42Z", opsMgr.getIntStatus("CRITICAL")); + t2.insert("2014-01-15T19:33:44Z", opsMgr.getIntStatus("UNKNOWN")); + + // Add some timestamps int timeline 2 + t3.insert("2014-01-15T04:00:44Z", opsMgr.getIntStatus("WARNING")); + t3.insert("2014-01-15T09:33:01Z", opsMgr.getIntStatus("CRITICAL")); + t3.insert("2014-01-15T12:50:42Z", opsMgr.getIntStatus("OK")); + t3.insert("2014-01-15T16:33:44Z", opsMgr.getIntStatus("WARNING")); + + t1.settle(opsMgr.getIntStatus("MISSING")); + t2.settle(opsMgr.getIntStatus("MISSING")); + t3.settle(opsMgr.getIntStatus("MISSING")); + + dAgg.timelines.put("timeline1", t1); + dAgg.timelines.put("timeline2", t2); + dAgg.timelines.put("timeline3", t3); + + dAgg.aggregate("OR", opsMgr); + + int[] expected = { 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + // Check the arrays + assertArrayEquals("Aggregation check", expected, dAgg.aggregation.samples); + + } + + @Test + public void test2() throws URISyntaxException, ParseException, IOException { + + URL resJsonFile = DAggregatorTest.class.getResource("/ops/EGI-algorithm.json"); + File jsonFile = new File(resJsonFile.toURI()); + + DAggregator dAgg = new DAggregator(); + dAgg.loadOpsFile(jsonFile); + + OpsManager opsMgr = new OpsManager(); + opsMgr.loadJson(jsonFile); + + dAgg.setStartState("m1", opsMgr.getIntStatus("OK")); + dAgg.setStartState("m2", opsMgr.getIntStatus("OK")); + dAgg.setStartState("m3", opsMgr.getIntStatus("OK")); + dAgg.setStartState("m4", opsMgr.getIntStatus("OK")); + dAgg.insert("m1", "2014-01-15T00:00:00Z", opsMgr.getIntStatus("CRITICAL")); + dAgg.insert("m1", "2014-01-15T04:33:44Z", opsMgr.getIntStatus("CRITICAL")); + dAgg.insert("m1", "2014-01-15T06:33:44Z", opsMgr.getIntStatus("CRITICAL")); + dAgg.insert("m1", "2014-01-15T12:33:44Z", opsMgr.getIntStatus("CRITICAL")); + dAgg.insert("m1", "2014-01-15T22:11:44Z", opsMgr.getIntStatus("CRITICAL")); + dAgg.insert("m2", "2014-01-15T01:33:44Z", opsMgr.getIntStatus("CRITICAL")); + dAgg.insert("m2", "2014-01-15T05:33:44Z", opsMgr.getIntStatus("CRITICAL")); + dAgg.insert("m2", "2014-01-15T06:33:44Z", opsMgr.getIntStatus("CRITICAL")); + dAgg.insert("m2", "2014-01-15T22:33:44Z", opsMgr.getIntStatus("CRITICAL")); + dAgg.insert("m3", "2014-01-15T01:33:44Z", opsMgr.getIntStatus("CRITICAL")); + dAgg.insert("m3", "2014-01-15T05:33:44Z", opsMgr.getIntStatus("CRITICAL")); + dAgg.insert("m3", "2014-01-15T11:33:44Z", opsMgr.getIntStatus("CRITICAL")); + dAgg.insert("m4", "2014-01-15T01:33:44Z", opsMgr.getIntStatus("WARNING")); + dAgg.insert("m4", "2014-01-15T02:33:44Z", opsMgr.getIntStatus("OK")); + dAgg.insert("m4", "2014-01-15T24:59:59Z", opsMgr.getIntStatus("CRITICAL")); + + int[] expected = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 }; + + dAgg.settleAll(opsMgr.getIntStatus("MISSING")); + dAgg.aggregate("AND", opsMgr); + System.out.println(Arrays.toString(dAgg.aggregation.samples)); + assertArrayEquals("Aggregation test 3", expected, dAgg.aggregation.samples); + + } +} diff --git a/flink_jobs/old-models/batch_ar/src/test/java/ops/DIntergatorTest.java b/flink_jobs/old-models/batch_ar/src/test/java/ops/DIntergatorTest.java new file mode 100644 index 00000000..64ea691f --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/test/java/ops/DIntergatorTest.java @@ -0,0 +1,46 @@ +package ops; + +import static org.junit.Assert.*; + +import java.io.File; + +import java.io.IOException; + +import java.net.URISyntaxException; +import java.net.URL; +import java.text.ParseException; + + +import org.junit.Test; + +public class DIntergatorTest { + + @Test + public void test() throws URISyntaxException, ParseException, IOException { + + URL resJsonFile = DIntergatorTest.class.getResource("/ops/EGI-algorithm.json"); + File JsonFile = new File(resJsonFile.toURI()); + + OpsManager opsMgr = new OpsManager(); + // Test loading file + opsMgr.loadJson(JsonFile); + + DTimeline dtl = new DTimeline(); + + DIntegrator inter = new DIntegrator(); + + dtl.setStartState(opsMgr.getIntStatus("OK")); + dtl.insert("2015-01-24T20:21:01Z", opsMgr.getIntStatus("DOWNTIME")); + dtl.insert("2015-01-24T20:39:21Z", opsMgr.getIntStatus("OK")); + dtl.insert("2015-01-24T22:00:21Z", opsMgr.getIntStatus("CRITICAL")); + dtl.insert("2015-01-24T22:42:21Z", opsMgr.getIntStatus("OK")); + + dtl.settle(opsMgr.getIntStatus("MISSING")); + inter.calculateAR(dtl.samples, opsMgr); + + assertEquals(inter.availability, 95.83, 0.01); + assertEquals(inter.reliability, 97.18, 0.01); + + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/test/java/ops/DTimelineTest.java b/flink_jobs/old-models/batch_ar/src/test/java/ops/DTimelineTest.java new file mode 100644 index 00000000..fc96df63 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/test/java/ops/DTimelineTest.java @@ -0,0 +1,131 @@ +package ops; + +import static org.junit.Assert.*; + +import java.io.File; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; +import java.text.ParseException; + + +import org.junit.BeforeClass; +import org.junit.Test; + +public class DTimelineTest { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", DTimelineTest.class.getResource("/ops/EGI-algorithm.json")); + } + + @Test + public void test() throws URISyntaxException, ParseException, IOException { + + // Use Operations Manager + // Prepare Resource File + URL resJsonFile = DTimelineTest.class.getResource("/ops/EGI-algorithm.json"); + File JsonFile = new File(resJsonFile.toURI()); + // Instatiate class + OpsManager opsMgr = new OpsManager(); + // Test loading file + opsMgr.loadJson(JsonFile); + + // Initialize Discrete Timeline for testing + DTimeline dtl = new DTimeline(); + // Clear Samples + dtl.clearSamples(); + // Assert default array size = 288 + // assertEquals("Default sample array size must be + // 288",dtl.samples.length,288); + // Assert each initialized element that is = -1 + for (int i = 0; i < dtl.samples.length; i++) { + assertEquals("Init element must be -1", dtl.samples[i], -1); + } + // Assert Start state + assertEquals("Start State", dtl.getStartState(), -1); + // Set Start state + dtl.setStartState(opsMgr.getIntStatus("CRITICAL")); + // Assert Start state + assertEquals("Start State", dtl.getStartState(), 4); + // Create a sample timeline + dtl.insert("2014-01-15T01:33:44Z", opsMgr.getIntStatus("OK")); + dtl.insert("2014-01-15T05:53:40Z", opsMgr.getIntStatus("WARNING")); + dtl.insert("2014-01-15T12:33:22Z", opsMgr.getIntStatus("UNKNOWN")); + dtl.settle(opsMgr.getIntStatus("MISSING")); + + // Create expected state array + int[] expected = new int[288]; + // start state = "CRITICAL" + int expectedSS = opsMgr.getIntStatus("CRITICAL"); + // First state has timestamp 01:33:44 + // In seconds = 5624 + // In minutes = 5624 / 60 = 94 + // In slots = 94 / 5 = 19 + // So for i=0;i<19-1 fill with expectedSS + for (int i = 0; i < 19 - 1; i++) + expected[i] = expectedSS; + + // Second state has timestamp 05:53:40 + // In seconds = 21220 + // In minutes = 21220 / 60 = 354 + // In slots = 354 / 5 = 71 + // So for i=18;i<71-1 fill with first timestamp + for (int i = 18; i < 71 - 1; i++) + expected[i] = opsMgr.getIntStatus("OK"); + + // Second state has timestamp 12:33:22 + // In seconds = 45202 + // In minutes = 45202 / 60 = 753 + // In slots = 753 / 5 = 151 + // So for i=71;i<151-1 fill with first timestamp + for (int i = 70; i < 151 - 1; i++) + expected[i] = opsMgr.getIntStatus("WARNING"); + + for (int i = 150; i < expected.length; i++) + expected[i] = opsMgr.getIntStatus("UNKNOWN"); + + assertArrayEquals("Aggregation check", expected, dtl.samples); + + // New Timeline + DTimeline dt2 = new DTimeline(); + dt2.setStartState(opsMgr.getIntStatus("OK")); + + dt2.insert("2015-01-24T00:35:21Z", opsMgr.getIntStatus("OK")); + dt2.insert("2015-01-24T01:35:23Z", opsMgr.getIntStatus("OK")); + dt2.insert("2015-01-24T02:35:22Z", opsMgr.getIntStatus("OK")); + dt2.insert("2015-01-24T03:35:24Z", opsMgr.getIntStatus("OK")); + dt2.insert("2015-01-24T04:35:22Z", opsMgr.getIntStatus("OK")); + dt2.insert("2015-01-24T05:35:18Z", opsMgr.getIntStatus("OK")); + dt2.insert("2015-01-24T06:35:23Z", opsMgr.getIntStatus("OK")); + dt2.insert("2015-01-24T07:35:22Z", opsMgr.getIntStatus("OK")); + dt2.insert("2015-01-24T08:35:18Z", opsMgr.getIntStatus("OK")); + dt2.insert("2015-01-24T10:35:17Z", opsMgr.getIntStatus("OK")); + dt2.insert("2015-01-24T11:35:24Z", opsMgr.getIntStatus("OK")); + dt2.insert("2015-01-24T12:35:19Z", opsMgr.getIntStatus("OK")); + dt2.insert("2015-01-24T13:35:23Z", opsMgr.getIntStatus("OK")); + dt2.insert("2015-01-24T14:35:22Z", opsMgr.getIntStatus("OK")); + dt2.insert("2015-01-24T15:35:22Z", opsMgr.getIntStatus("OK")); + dt2.insert("2015-01-24T16:35:23Z", opsMgr.getIntStatus("OK")); + dt2.insert("2015-01-24T17:35:22Z", opsMgr.getIntStatus("OK")); + dt2.insert("2015-01-24T18:35:26Z", opsMgr.getIntStatus("OK")); + dt2.insert("2015-01-24T19:35:27Z", opsMgr.getIntStatus("OK")); + dt2.insert("2015-01-24T20:35:22Z", opsMgr.getIntStatus("OK")); + dt2.insert("2015-01-24T21:35:26Z", opsMgr.getIntStatus("OK")); + dt2.insert("2015-01-24T22:35:21Z", opsMgr.getIntStatus("OK")); + dt2.insert("2015-01-24T22:45:51Z", opsMgr.getIntStatus("OK")); + dt2.insert("2015-01-24T23:45:52Z", opsMgr.getIntStatus("OK")); + + dt2.settle(opsMgr.getIntStatus("MISSING")); + + int[] expected2 = new int[288]; + for (int i = 0; i < 288; i++) + expected2[i] = 0; + + assertArrayEquals("Aggregation check", expected2, dt2.samples); + + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/test/java/ops/OpsManagerTest.java b/flink_jobs/old-models/batch_ar/src/test/java/ops/OpsManagerTest.java new file mode 100644 index 00000000..776a794b --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/test/java/ops/OpsManagerTest.java @@ -0,0 +1,65 @@ +package ops; + +import static org.junit.Assert.*; + +import java.io.File; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; + +import org.junit.BeforeClass; +import org.junit.Test; + + + +public class OpsManagerTest { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", OpsManagerTest.class.getResource("/ops/EGI-algorithm.json")); + } + + @Test + public void test() throws URISyntaxException, IOException { + // Prepare Resource File + URL resJsonFile = OpsManagerTest.class.getResource("/ops/EGI-algorithm.json"); + File JsonFile = new File(resJsonFile.toURI()); + // Instatiate class + OpsManager opsMgr = new OpsManager(); + // Test loading file + opsMgr.loadJson(JsonFile); + + // Test the available states + ArrayList avStates = new ArrayList(); + avStates.add("OK"); + avStates.add("WARNING"); + avStates.add("UNKNOWN"); + avStates.add("MISSING"); + avStates.add("CRITICAL"); + avStates.add("DOWNTIME"); + + assertEquals("Retrieve Available States", opsMgr.availableStates(), avStates); + + // Test the available operations + ArrayList avOps = new ArrayList(); + avOps.add("AND"); + avOps.add("OR"); + assertEquals("Retrieve Available Operations", opsMgr.availableOps(), avOps); + + // Test the available operations on a variety of states + assertEquals("OK (OR) OK = OK", opsMgr.op("OR", "OK", "OK"), "OK"); + assertEquals("OK (OR) CRITICAL = OK", opsMgr.op("OR", "CRITICAL", "OK"), "OK"); + assertEquals("CRITICAL (OR) MISSING = CRITICAL", opsMgr.op("OR", "CRITICAL", "MISSING"), "CRITICAL"); + assertEquals("WARNING (OR) MISSING = WARNING", opsMgr.op("OR", "WARNING", "MISSING"), "WARNING"); + assertEquals("WARNING (AND) MISSING = MISSING", opsMgr.op("AND", "WARNING", "MISSING"), "MISSING"); + assertEquals("OK (AND) CRITICAL = CRITICAL", opsMgr.op("AND", "OK", "CRITICAL"), "CRITICAL"); + assertEquals("DOWNTIME (AND) UNKNOWN = DOWNTIME", opsMgr.op("AND", "DOWNTIME", "UNKNOWN"), "DOWNTIME"); + + assertEquals("Default Downtime Status = DOWNTIME", opsMgr.getDefaultDown(), "DOWNTIME"); + System.out.println(opsMgr.getDefaultMissingInt()); + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/test/java/ops/ThresholdManagerTest.java b/flink_jobs/old-models/batch_ar/src/test/java/ops/ThresholdManagerTest.java new file mode 100644 index 00000000..ec7b1f84 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/test/java/ops/ThresholdManagerTest.java @@ -0,0 +1,76 @@ +package ops; + +import static org.junit.Assert.*; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; + +import org.junit.BeforeClass; +import org.junit.Test; + +public class ThresholdManagerTest { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", ThresholdManagerTest.class.getResource("/ops/EGI-algorithm.json")); + assertNotNull("Test file missing", ThresholdManagerTest.class.getResource("/ops/EGI-rules.json")); + } + + @Test + public void test() throws IOException, URISyntaxException { + + // Prepare Resource File + URL opsJsonFile = ThresholdManagerTest.class.getResource("/ops/EGI-algorithm.json"); + File opsFile = new File(opsJsonFile.toURI()); + // Instantiate class + OpsManager opsMgr = new OpsManager(); + // Test loading file + opsMgr.loadJson(opsFile); + + // Prepare Resource File + URL thrJsonFile = ThresholdManagerTest.class.getResource("/ops/EGI-rules.json"); + File thrFile = new File(thrJsonFile.toURI()); + // Instantiate class + ThresholdManager t = new ThresholdManager(); + t.parseJSONFile(thrFile); + + String[] expectedRules = new String[] { "//org.bdii.Freshness", "//org.bdii.Entries", + "/bdii.host1.example.foo/org.bdii.Freshness", "/bdii.host3.example.foo/org.bdii.Freshness", + "SITE-101/bdii.host1.example.foo/org.bdii.Freshness", "SITE-101//org.bdii.Freshness" }; + + assertEquals(expectedRules.length, t.getRules().entrySet().size()); + + for (String rule : expectedRules) { + assertEquals(true, t.getRules().keySet().contains(rule)); + } + + assertEquals("SITE-101/bdii.host1.example.foo/org.bdii.Freshness", + t.getMostRelevantRule("SITE-101", "bdii.host1.example.foo", "org.bdii.Freshness")); + + assertEquals("SITE-101//org.bdii.Freshness", + t.getMostRelevantRule("SITE-101", "bdii.host2.example.foo", "org.bdii.Freshness")); + + assertEquals("//org.bdii.Freshness", + t.getMostRelevantRule("SITE-202", "bdii.host2.example.foo", "org.bdii.Freshness")); + + assertEquals("//org.bdii.Freshness", + t.getMostRelevantRule("SITE-202", "bdii.host2.example.foo", "org.bdii.Freshness")); + + assertEquals("//org.bdii.Entries", + t.getMostRelevantRule("SITE-101", "bdii.host1.example.foo", "org.bdii.Entries")); + + assertEquals("", t.getMostRelevantRule("SITE-101", "bdii.host1.example.foo", "org.bdii.Foo")); + + assertEquals("WARNING", t.getStatusByRule("SITE-101/bdii.host1.example.foo/org.bdii.Freshness", opsMgr, "AND")); + assertEquals("CRITICAL", t.getStatusByRule("//org.bdii.Entries", opsMgr, "AND")); + assertEquals("WARNING", t.getStatusByRule("//org.bdii.Entries", opsMgr, "OR")); + assertEquals("CRITICAL", t.getStatusByRule("/bdii.host1.example.foo/org.bdii.Freshness", opsMgr, "AND")); + assertEquals("WARNING", t.getStatusByRule("/bdii.host1.example.foo/org.bdii.Freshness", opsMgr, "OR")); + assertEquals("",t.getStatusByRule("/bdii.host3.example.foo/org.bdii.Freshness", opsMgr, "AND")); //no critical or warning ranges defined + + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/test/java/sync/AggregationProfileManagerTest.java b/flink_jobs/old-models/batch_ar/src/test/java/sync/AggregationProfileManagerTest.java new file mode 100644 index 00000000..01fba6a3 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/test/java/sync/AggregationProfileManagerTest.java @@ -0,0 +1,101 @@ +package sync; + +import static org.junit.Assert.*; + +import java.io.File; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; + +import org.junit.BeforeClass; +import org.junit.Test; + +import junitx.framework.ListAssert; + +public class AggregationProfileManagerTest { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", AggregationProfileManagerTest.class.getResource("/ops/ap1.json")); + } + + @Test + public void test() throws URISyntaxException, IOException { + // Prepare Resource File + URL resJsonFile = AggregationProfileManagerTest.class.getResource("/ops/ap1.json"); + File jsonFile = new File(resJsonFile.toURI()); + // Instatiate class + AggregationProfileManager avp = new AggregationProfileManager(); + avp.clearProfiles(); + avp.loadJson(jsonFile); + + // Check that only one availability profile was loaded + assertEquals("Only 1 av profile present", avp.getAvProfiles().size(), 1); + + ArrayList expApList = new ArrayList(); + expApList.add("ap1"); + + // Check the profile list is correct + assertEquals("Profile list check", avp.getAvProfiles(), expApList); + + // Check the profile namespace + assertEquals("Profile namespace", avp.getProfileNamespace("ap1"), "test"); + + // Check the profile groupType + assertEquals("Profile group type", avp.getProfileGroupType("ap1"), "sites"); + + // Set the expected profile groups + ArrayList expGroups = new ArrayList(); + expGroups.add("information"); + expGroups.add("compute"); + expGroups.add("storage"); + // Check the available group list + ListAssert.assertEquals("Profile Groups", avp.getProfileGroups("ap1"), expGroups); + + // Check compute group service list + ArrayList expServices = new ArrayList(); + expServices.add("GRAM5"); + expServices.add("QCG.Computing"); + expServices.add("ARC-CE"); + expServices.add("unicore6.TargetSystemFactory"); + expServices.add("CREAM-CE"); + + ListAssert.assertEquals("compute service list", avp.getProfileGroupServices("ap1", "compute"), expServices); + + // Check storage group service list + expServices = new ArrayList(); + expServices.add("SRM"); + expServices.add("SRMv2"); + ListAssert.assertEquals("storage service list", avp.getProfileGroupServices("ap1", "storage"), expServices); + + // Check storage group service list + expServices = new ArrayList(); + expServices.add("Site-BDII"); + ListAssert.assertEquals("accounting list", avp.getProfileGroupServices("ap1", "information"), expServices); + + // Check Various Service Instances operation + assertEquals("group compute: CREAM-CE op", avp.getProfileGroupServiceOp("ap1", "compute", "CREAM-CE"), "OR"); + assertEquals("group compute: ARC-CE op", avp.getProfileGroupServiceOp("ap1", "compute", "ARC-CE"), "OR"); + assertEquals("group storage: SRMv2 op", avp.getProfileGroupServiceOp("ap1", "storage", "SRM"), "OR"); + assertEquals("group storage: SRM op", avp.getProfileGroupServiceOp("ap1", "storage", "SRMv2"), "OR"); + assertEquals("group information: Site-BDII op", avp.getProfileGroupServiceOp("ap1", "information", "Site-BDII"), + "OR"); + assertEquals("get group by service: ", avp.getGroupByService("ap1", "CREAM-CE"), "compute"); + assertEquals("get group by service: ", avp.getGroupByService("ap1", "SRMv2"), "storage"); + // we check for an unexpected operation + assertNotEquals("group compute: CREAM-CE op", avp.getProfileGroupServiceOp("ap1", "compute", "CREAM-CE"), + "AND"); + assertNotEquals("group compute: CREAM-CE op", avp.getProfileGroupServiceOp("ap1", "informationss", "CREAM-CE"), + "AND"); + assertNotEquals("group compute: CREAM-CE op", avp.getProfileGroupServiceOp("ap1", "storage", "CREAM-CE"), + "FOO"); + // check for metric profile operations and total operation + assertEquals("metric profile operations: AND", avp.getMetricOp("ap1"), "AND"); + assertEquals("total profile operations: AND", avp.getMetricOp("ap1"), "AND"); + + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/test/java/sync/DowntimeManagerTest.java b/flink_jobs/old-models/batch_ar/src/test/java/sync/DowntimeManagerTest.java new file mode 100644 index 00000000..3451ab4e --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/test/java/sync/DowntimeManagerTest.java @@ -0,0 +1,68 @@ +package sync; + +import static org.junit.Assert.*; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; + +import org.junit.BeforeClass; +import org.junit.Test; + +public class DowntimeManagerTest { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", DowntimeManagerTest.class.getResource("/avro/downtimes_v2.avro")); + } + + @Test + public void test() throws IOException, URISyntaxException { + // Prepare Resource File + URL resAvroFile = DowntimeManagerTest.class.getResource("/avro/downtimes_v2.avro"); + File avroFile = new File(resAvroFile.toURI()); + // Instatiate class + DowntimeManager dt = new DowntimeManager(); + // Test loading file + dt.loadAvro(avroFile); + assertNotNull("File Loaded", dt); + + // Test time period retrieval by service endpoint + + // test for cream-ce01.gridpp.rl.ac.uk CREAM-CE + ArrayList timePeriod = new ArrayList(); + timePeriod.add("2015-05-07T00:00:00Z"); + timePeriod.add("2015-05-07T23:59:00Z"); + assertEquals("Test timeperiod #1", dt.getPeriod("cream-ce01.gridpp.rl.ac.uk", "CREAM-CE"), timePeriod); + // test for px.ire.kharkov.ua, MyProxy + timePeriod.clear(); + timePeriod.add("2015-05-07T00:00:00Z"); + timePeriod.add("2015-05-07T23:59:00Z"); + assertEquals("Test timeperiod #2", dt.getPeriod("px.ire.kharkov.ua", "MyProxy"), timePeriod); + // test for gb-ui-nki.els.sara.nl, UI + timePeriod.clear(); + timePeriod.add("2015-05-07T00:00:00Z"); + timePeriod.add("2015-05-07T23:59:00Z"); + assertEquals("Test timeperiod #3", dt.getPeriod("gb-ui-nki.els.sara.nl", "UI"), timePeriod); + // test for cream-ce01.gridpp.rl.ac.uk, gLExec + timePeriod.clear(); + timePeriod.add("2015-05-07T00:00:00Z"); + timePeriod.add("2015-05-07T23:59:00Z"); + assertEquals("Test timeperiod #4", dt.getPeriod("cream-ce01.gridpp.rl.ac.uk", "gLExec"), timePeriod); + // test for gcvmfs.cat.cbpf.br, org.squid-cache.Squid + timePeriod.clear(); + timePeriod.add("2015-05-07T00:00:00Z"); + timePeriod.add("2015-05-07T20:00:00Z"); + assertEquals("Test timeperiod #5", dt.getPeriod("cvmfs.cat.cbpf.br", "org.squid-cache.Squid"), timePeriod); + // test for apel.ire.kharkov.ua, APEL + timePeriod.clear(); + timePeriod.add("2015-05-07T00:00:00Z"); + timePeriod.add("2015-05-07T23:59:00Z"); + assertEquals("Test timeperiod #6", dt.getPeriod("apel.ire.kharkov.ua", "APEL"), timePeriod); + + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/test/java/sync/EndpointGroupManagerTest.java b/flink_jobs/old-models/batch_ar/src/test/java/sync/EndpointGroupManagerTest.java new file mode 100644 index 00000000..99093f48 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/test/java/sync/EndpointGroupManagerTest.java @@ -0,0 +1,61 @@ +package sync; + +import static org.junit.Assert.*; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; + +import ops.ConfigManager; + +import org.junit.BeforeClass; +import org.junit.Test; + +public class EndpointGroupManagerTest { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", EndpointGroupManagerTest.class.getResource("/avro/group_endpoints_v2.avro")); + } + + @Test + public void test() throws URISyntaxException, IOException { + // Prepare Resource File + URL resAvroFile = EndpointGroupManagerTest.class.getResource("/avro/group_endpoints_v2.avro"); + File avroFile = new File(resAvroFile.toURI()); + // Instatiate class + EndpointGroupManager ge = new EndpointGroupManager(); + // Test loading file + ge.loadAvro(avroFile); + assertNotNull("File Loaded", ge); + + // Test Check if service endpoint exists in topology + assertTrue(ge.checkEndpoint("storage1.grid.upjs.sk", "ARC-CE")); + assertTrue(ge.checkEndpoint("storage1.grid.upjs.sk", "ARC-CE")); + assertTrue(ge.checkEndpoint("se01.afroditi.hellasgrid.gr", "SRMv2")); + assertTrue(ge.checkEndpoint("grid-perfsonar.hpc.susx.ac.uk", "net.perfSONAR.Latency")); + assertTrue(ge.checkEndpoint("se.grid.tuke.sk", "SRMv2")); + assertTrue(ge.checkEndpoint("dpm.grid.atomki.hu", "SRMv2")); + // Test check Group retrieval + ArrayList result1 = new ArrayList(); + result1.add("ru-PNPI"); + assertEquals(ge.getGroup("SITES", "gt3.pnpi.nw.ru", "CREAM-CE"), result1); + + // Test Tag Filtering (Wont filter out anything since input is already + // filtered) + URL resJson = GroupsOfGroupsTest.class.getResource("/ops/config.json"); + File cfgFile = new File(resJson.toURI()); + ConfigManager cfgMgr = new ConfigManager(); + cfgMgr.loadJson(cfgFile); + ge.filter(cfgMgr.egroupTags); + + // Check non-existent groups + assertTrue(ge.checkEndpoint("ce.etfos.cro-ngi.hr", "GRAM5") == false); + assertTrue(ge.checkEndpoint("grid129.sinp.msu.ru", "CREAM-CE") == false); + + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/test/java/sync/GroupGroupManagerTest.java b/flink_jobs/old-models/batch_ar/src/test/java/sync/GroupGroupManagerTest.java new file mode 100644 index 00000000..693e1d64 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/test/java/sync/GroupGroupManagerTest.java @@ -0,0 +1,113 @@ +package sync; + +import static org.junit.Assert.*; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; + +import org.junit.BeforeClass; +import org.junit.Test; + +import junitx.framework.ListAssert; + +public class GroupGroupManagerTest { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", GroupGroupManagerTest.class.getResource("/avro/poem_sync_v2.avro")); + } + + @Test + public void test() throws URISyntaxException, IOException { + // Prepare Resource File + URL resAvroFile = GroupGroupManagerTest.class.getResource("/avro/poem_sync_v2.avro"); + File avroFile = new File(resAvroFile.toURI()); + // Instatiate class + MetricProfileManager mp = new MetricProfileManager(); + // Test loading file + mp.loadAvro(avroFile); + assertNotNull("File Loaded", mp); + + // Test Loaded Metric Profile + assertEquals("Only one metric profile must be loaded", mp.getProfiles().size(), 1); + assertEquals("Profile ch.cern.sam.roc_critical must be loaded", mp.getProfiles().get(0).toString(), + "ch.cern.sam.ROC_CRITICAL"); + + // Test Loaded Metric Profile Services + ArrayList serviceList = new ArrayList(); + serviceList.add("GRAM5"); + serviceList.add("QCG.Computing"); + serviceList.add("ARC-CE"); + serviceList.add("unicore6.TargetSystemFactory"); + serviceList.add("Site-BDII"); + serviceList.add("CREAM-CE"); + serviceList.add("SRMv2"); + + ListAssert.assertEquals("Test Presence of Loaded Profile Services", mp.getProfileServices("ch.cern.sam.ROC_CRITICAL"), + serviceList); + + // Test Loaded Metric Profile service metrics; + + // GRAM5 service + ArrayList gram5Metrics = new ArrayList(); + gram5Metrics.add("hr.srce.GRAM-Auth"); + gram5Metrics.add("hr.srce.GRAM-CertLifetime"); + gram5Metrics.add("hr.srce.GRAM-Command"); + + ListAssert.assertEquals("Test GRAM5 metrics", mp.getProfileServiceMetrics("ch.cern.sam.ROC_CRITICAL", "GRAM5"), + gram5Metrics); + // Test Loaded Metric Profile service metrics; + + // QCG service + ArrayList qcgMetrics = new ArrayList(); + qcgMetrics.add("hr.srce.QCG-Computing-CertLifetime"); + qcgMetrics.add("pl.plgrid.QCG-Computing"); + + ListAssert.assertEquals("Test QCG metrics", mp.getProfileServiceMetrics("ch.cern.sam.ROC_CRITICAL", "QCG.Computing"), + qcgMetrics); + + // Site-BDII + ArrayList siteBdiiMetrics = new ArrayList(); + siteBdiiMetrics.add("org.bdii.Entries"); + siteBdiiMetrics.add("org.bdii.Freshness"); + ListAssert.assertEquals("Test Site-BDII metrics", mp.getProfileServiceMetrics("ch.cern.sam.ROC_CRITICAL", "Site-BDII"), + siteBdiiMetrics); + + // SRMv2 + ArrayList srmv2metrics = new ArrayList(); + srmv2metrics.add("hr.srce.SRM2-CertLifetime"); + srmv2metrics.add("org.sam.SRM-Del"); + srmv2metrics.add("org.sam.SRM-Get"); + srmv2metrics.add("org.sam.SRM-GetSURLs"); + srmv2metrics.add("org.sam.SRM-GetTURLs"); + srmv2metrics.add("org.sam.SRM-Ls"); + srmv2metrics.add("org.sam.SRM-LsDir"); + srmv2metrics.add("org.sam.SRM-Put"); + ListAssert.assertEquals("SRMv2 ", (mp.getProfileServiceMetrics("ch.cern.sam.ROC_CRITICAL", "SRMv2")), srmv2metrics); + + // Check Existense of Profile Service Metric + + assertTrue("Existence of CREAM-CE Metric", + mp.checkProfileServiceMetric("ch.cern.sam.ROC_CRITICAL", "CREAM-CE", "emi.cream.CREAMCE-JobSubmit")); + assertTrue("Existence of CREAM-CE Metric", + mp.checkProfileServiceMetric("ch.cern.sam.ROC_CRITICAL", "CREAM-CE", "emi.cream.CREAMCE-JobSubmit")); + assertTrue("Existence of CREAM-CE Metric", + mp.checkProfileServiceMetric("ch.cern.sam.ROC_CRITICAL", "CREAM-CE", "emi.wn.WN-Bi")); + assertTrue("Existence of CREAM-CE Metric", + mp.checkProfileServiceMetric("ch.cern.sam.ROC_CRITICAL", "CREAM-CE", "emi.wn.WN-Csh")); + assertTrue("Existence of CREAM-CE Metric", + mp.checkProfileServiceMetric("ch.cern.sam.ROC_CRITICAL", "CREAM-CE", "emi.wn.WN-SoftVer")); + assertTrue("Existence of CREAM-CE Metric", + mp.checkProfileServiceMetric("ch.cern.sam.ROC_CRITICAL", "CREAM-CE", "hr.srce.CADist-Check")); + assertTrue("Existence of CREAM-CE Metric", + mp.checkProfileServiceMetric("ch.cern.sam.ROC_CRITICAL", "CREAM-CE", "hr.srce.CREAMCE-CertLifetime")); + // False results + assertTrue("ARC-CE doesn't have certLifetime", + !(mp.checkProfileServiceMetric("ch.cern.sam.ROC_CRITICAL", "ARC-CE", "hr.srce.CREAMCE-CertLifetime"))); + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/test/java/sync/GroupsOfGroupsTest.java b/flink_jobs/old-models/batch_ar/src/test/java/sync/GroupsOfGroupsTest.java new file mode 100644 index 00000000..dfa4aa6a --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/test/java/sync/GroupsOfGroupsTest.java @@ -0,0 +1,64 @@ +package sync; + +import static org.junit.Assert.*; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; + + +import ops.ConfigManager; + +import org.junit.BeforeClass; +import org.junit.Test; + +public class GroupsOfGroupsTest { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", GroupsOfGroupsTest.class.getResource("/avro/group_groups_v2.avro")); + } + + @Test + public void test() throws URISyntaxException, IOException { + // Prepare Resource File + URL resAvroFile = GroupsOfGroupsTest.class.getResource("/avro/group_groups_v2.avro"); + File avroFile = new File(resAvroFile.toURI()); + // Instatiate class + GroupGroupManager gg = new GroupGroupManager(); + // Test loading file + gg.loadAvro(avroFile); + assertNotNull("File Loaded", gg); + // Test retrieve group by subgroup name and group type + assertEquals(gg.getGroup("NGI", "UNI-BONN"), "NGI_DE"); + assertEquals(gg.getGroup("NGI", "MSFG-OPEN"), "NGI_FRANCE"); + assertEquals(gg.getGroup("NGI", "HG-02-IASA"), "NGI_GRNET"); + assertEquals(gg.getGroup("NGI", "ZA-MERAKA"), "AfricaArabia"); + assertEquals(gg.getGroup("NGI", "RU-SPbSU"), "Russia"); + // Test to assert if groups exist + assertTrue(gg.checkSubGroup("UNI-BONN")); + assertTrue(gg.checkSubGroup("MSFG-OPEN")); + assertTrue(gg.checkSubGroup("HG-02-IASA")); + assertTrue(gg.checkSubGroup("ZA-MERAKA")); + assertTrue(gg.checkSubGroup("RU-SPbSU")); + + // Test Tag Filtering (Wont filter out anything since input is already + // filtered) + URL resJson = GroupsOfGroupsTest.class.getResource("/ops/config.json"); + File cfgFile = new File(resJson.toURI()); + ConfigManager cfgMgr = new ConfigManager(); + cfgMgr.loadJson(cfgFile); + gg.filter(cfgMgr.ggroupTags); + + // Test groups that are not present + assertNotEquals(gg.getGroup("NGI", "KE-UONBI-01"), "AfricaArabia"); + assertNotEquals(gg.getGroup("NGI", "RU-Novosibirsk-BINP"), "Russia"); + assertTrue(gg.checkSubGroup("FRANCE-GRILLES-TESTBED") == false); + + // Test exceptions + + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/test/java/sync/RecomputationManagerTest.java b/flink_jobs/old-models/batch_ar/src/test/java/sync/RecomputationManagerTest.java new file mode 100644 index 00000000..e9bcb872 --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/test/java/sync/RecomputationManagerTest.java @@ -0,0 +1,103 @@ +package sync; + +import static org.junit.Assert.*; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; +import java.text.ParseException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Map; + + + +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +public class RecomputationManagerTest { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", RecomputationManagerTest.class.getResource("/ops/recomp.json")); + } + + @Test + public void test() throws URISyntaxException, ParseException, IOException { + // Prepare Resource File + URL resJsonFile = RecomputationManagerTest.class.getResource("/ops/recomp.json"); + File jsonFile = new File(resJsonFile.toURI()); + + RecomputationManager recMgr = new RecomputationManager(); + recMgr.loadJson(jsonFile); + + + assertEquals(recMgr.isExcluded("GR-01-AUTH"), true); + assertEquals(recMgr.isExcluded("HG-03-AUTH"), true); + assertEquals(recMgr.isExcluded("GR-04-IASA"), false); + + // Check period functionality + ArrayList> gr01list = new ArrayList>(); + ArrayList> siteAlist = new ArrayList>(); + ArrayList> siteBlist = new ArrayList>(); + ArrayList> siteClist = new ArrayList>(); + + Map gr01map = new HashMap(); + + Map siteA1map = new HashMap(); + Map siteA2map = new HashMap(); + + + + Map siteBmap = new HashMap(); + Map siteCmap = new HashMap(); + + // Check period functionality + + gr01map.put("start", "2013-12-08T12:03:44Z"); + gr01map.put("end", "2013-12-10T12:03:44Z"); + + siteA1map.put("start", "2013-12-08T12:03:44Z"); + siteA1map.put("end", "2013-12-08T13:03:44Z"); + + siteA2map.put("start", "2013-12-08T16:03:44Z"); + siteA2map.put("end", "2013-12-08T18:03:44Z"); + + siteBmap.put("start", "2013-12-08T12:03:44Z"); + siteBmap.put("end", "2013-12-08T13:03:44Z"); + + siteCmap.put("start", "2013-12-08T16:03:44Z"); + siteCmap.put("end", "2013-12-08T18:03:44Z"); + + gr01list.add(gr01map); + siteAlist.add(siteA1map); + siteAlist.add(siteA2map); + siteBlist.add(siteBmap); + siteClist.add(siteCmap); + + Assert.assertEquals(recMgr.getPeriods("GR-01-AUTH", "2013-12-08"),gr01list); + Assert.assertEquals(recMgr.getPeriods("SITE-A", "2013-12-08"),siteAlist); + Assert.assertEquals(recMgr.getPeriods("SITE-B", "2013-12-08"),siteBlist); + + // check monitoring exclusions + Assert.assertEquals(false,recMgr.isMonExcluded("monA", "2013-12-08T11:03:43Z")); + Assert.assertEquals(false,recMgr.isMonExcluded("monA", "2013-12-08T11:03:44Z")); + Assert.assertEquals(true,recMgr.isMonExcluded("monA", "2013-12-08T12:06:44Z")); + Assert.assertEquals(true,recMgr.isMonExcluded("monA", "2013-12-08T14:05:44Z")); + Assert.assertEquals(true,recMgr.isMonExcluded("monA", "2013-12-08T15:02:44Z")); + Assert.assertEquals(false,recMgr.isMonExcluded("monA", "2013-12-08T15:03:45Z")); + + // check monitoring exclusions + Assert.assertEquals(false,recMgr.isMonExcluded("monB", "2013-12-08T11:03:43Z")); + Assert.assertEquals(false,recMgr.isMonExcluded("monB", "2013-12-08T11:03:44Z")); + Assert.assertEquals(false,recMgr.isMonExcluded("monB", "2013-12-08T12:06:44Z")); + Assert.assertEquals(false,recMgr.isMonExcluded("monB", "2013-12-08T14:05:44Z")); + Assert.assertEquals(false,recMgr.isMonExcluded("monB", "2013-12-08T15:02:44Z")); + Assert.assertEquals(false,recMgr.isMonExcluded("monB", "2013-12-08T15:03:45Z")); + + } + +} diff --git a/flink_jobs/old-models/batch_ar/src/test/java/sync/WeightManagerTest.java b/flink_jobs/old-models/batch_ar/src/test/java/sync/WeightManagerTest.java new file mode 100644 index 00000000..96d8660a --- /dev/null +++ b/flink_jobs/old-models/batch_ar/src/test/java/sync/WeightManagerTest.java @@ -0,0 +1,58 @@ +package sync; + +import static org.junit.Assert.*; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; + +import org.junit.BeforeClass; +import org.junit.Test; + +public class WeightManagerTest { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", WeightManagerTest.class.getResource("/avro/weights_v2.avro")); + } + + @Test + public void test() throws IOException, URISyntaxException { + // Prepare Resource File + URL resAvroFile = WeightManagerTest.class.getResource("/avro/weights_v2.avro"); + File avroFile = new File(resAvroFile.toURI()); + // Instatiate class + WeightManager wg = new WeightManager(); + // Test loading file + wg.loadAvro(avroFile); + assertNotNull("File Loaded", wg); + + // Test factor retrieval for various sites + assertEquals("Factor for Taiwan-LCG2", wg.getWeight("hepspec", "Taiwan-LCG2"), 125913); + assertEquals("Factor for UNI-DORTMUND", wg.getWeight("hepspec", "UNI-DORTMUND"), 16000); + assertEquals("Factor for INFN-COSENZA", wg.getWeight("hepspec", "INFN-COSENZA"), 2006); + assertEquals("Factor for CA-ALBERTA-WESTGRID-T2", wg.getWeight("hepspec", "CA-ALBERTA-WESTGRID-T2"), 9720); + assertEquals("Factor for INFN-COSENZA", wg.getWeight("hepspec", "INFN-COSENZA"), 2006); + assertEquals("Factor for T2_Estonia", wg.getWeight("hepspec", "T2_Estonia"), 48983); + assertEquals("Factor for AEGIS09-FTN-KM", wg.getWeight("hepspec", "AEGIS09-FTN-KM"), 28); + assertEquals("Factor for UNIBE-ID", wg.getWeight("hepspec", "UNIBE-ID"), 6278); + assertEquals("Factor for NWICG_NDCMS", wg.getWeight("hepspec", "NWICG_NDCMS"), 0); + assertEquals("Factor for CREATIS-INSA-LYON", wg.getWeight("hepspec", "CREATIS-INSA-LYON"), 720); + assertEquals("Factor for FNAL_GPGRID_1", wg.getWeight("hepspec", "FNAL_GPGRID_1"), 344576); + assertEquals("Factor for UA-MHI", wg.getWeight("hepspec", "UA-MHI"), 8984); + assertEquals("Factor for SARA-MATRIX", wg.getWeight("hepspec", "SARA-MATRIX"), 139866); + assertEquals("Factor for UKI-SCOTGRID-GLASGOW", wg.getWeight("hepspec", "UKI-SCOTGRID-GLASGOW"), 43878); + assertEquals("Factor for IN2P3-LPSC", wg.getWeight("hepspec", "IN2P3-LPSC"), 11207); + assertEquals("Factor for INFN-LECCE", wg.getWeight("hepspec", "INFN-LECCE"), 19); + assertEquals("Factor for RUG-CIT", wg.getWeight("hepspec", "RUG-CIT"), 8988); + assertEquals("Factor for GR-07-UOI-HEPLAB", wg.getWeight("hepspec", "GR-07-UOI-HEPLAB"), 1872); + + // Test zero factor return for unlisted sites + assertEquals("Factor for FOO", wg.getWeight("hepspec", "FOO"), 0); + assertEquals("Factor for BAR", wg.getWeight("hepspec", "BAR"), 0); + + } + +} diff --git a/flink_jobs/old-models/batch_status/.gitignore b/flink_jobs/old-models/batch_status/.gitignore new file mode 100644 index 00000000..6c4e323f --- /dev/null +++ b/flink_jobs/old-models/batch_status/.gitignore @@ -0,0 +1,8 @@ +/target/ +.project +.settings/ +.classpath/ +.classpath +/nbproject +nbactions.xml + diff --git a/flink_jobs/old-models/batch_status/avro/group_endpoint.avsc b/flink_jobs/old-models/batch_status/avro/group_endpoint.avsc new file mode 100644 index 00000000..4ed8818e --- /dev/null +++ b/flink_jobs/old-models/batch_status/avro/group_endpoint.avsc @@ -0,0 +1,14 @@ +{"namespace": "argo.avro", + "type": "record", + "name": "GroupEndpoint", + "fields": [ + {"name": "type", "type": "string"}, + {"name": "group", "type": "string"}, + {"name": "service", "type": "string"}, + {"name": "hostname", "type": "string"}, + {"name": "tags", "type" : ["null", { "name" : "Tags", + "type" : "map", + "values" : "string" + }] + }] +} diff --git a/flink_jobs/old-models/batch_status/avro/group_group.avsc b/flink_jobs/old-models/batch_status/avro/group_group.avsc new file mode 100644 index 00000000..f23f439d --- /dev/null +++ b/flink_jobs/old-models/batch_status/avro/group_group.avsc @@ -0,0 +1,14 @@ +{"namespace": "argo.avro", + "type": "record", + "name": "GroupGroup", + "fields": [ + {"name": "type", "type": "string"}, + {"name": "group", "type": "string"}, + {"name": "subgroup", "type": "string"}, + {"name": "tags", "type" : ["null", { "name" : "Tags", + "type" : "map", + "values" : "string" + }] + }] +} + diff --git a/flink_jobs/old-models/batch_status/avro/metric_data.avsc b/flink_jobs/old-models/batch_status/avro/metric_data.avsc new file mode 100644 index 00000000..ff3d7a56 --- /dev/null +++ b/flink_jobs/old-models/batch_status/avro/metric_data.avsc @@ -0,0 +1,18 @@ +{"namespace": "argo.avro", + "type": "record", + "name": "MetricData", + "fields": [ + {"name": "timestamp", "type": "string"}, + {"name": "service", "type": "string"}, + {"name": "hostname", "type": "string"}, + {"name": "metric", "type": "string"}, + {"name": "status", "type": "string"}, + {"name": "monitoring_host", "type": ["null", "string"]}, + {"name": "summary", "type": ["null", "string"]}, + {"name": "message", "type": ["null", "string"]}, + {"name": "tags", "type" : ["null", {"name" : "Tags", + "type" : "map", + "values" : ["null", "string"] + }] + }] +} diff --git a/flink_jobs/old-models/batch_status/avro/metric_profile.avsc b/flink_jobs/old-models/batch_status/avro/metric_profile.avsc new file mode 100644 index 00000000..df6eb2a8 --- /dev/null +++ b/flink_jobs/old-models/batch_status/avro/metric_profile.avsc @@ -0,0 +1,13 @@ +{"namespace": "argo.avro", + "type": "record", + "name": "MetricProfile", + "fields": [ + {"name": "profile", "type": "string"}, + {"name": "service", "type": "string"}, + {"name": "metric", "type": "string"}, + {"name": "tags", "type" : ["null", {"name" : "Tags", + "type" : "map", + "values" : "string" + }] + }] +} diff --git a/flink_jobs/old-models/batch_status/pom.xml b/flink_jobs/old-models/batch_status/pom.xml new file mode 100644 index 00000000..d3129133 --- /dev/null +++ b/flink_jobs/old-models/batch_status/pom.xml @@ -0,0 +1,390 @@ + + + 4.0.0 + + argo.batch + ArgoStatusBatch + 1.0 + jar + + Argo Status Batch Job + + + + UTF-8 + 1.3.2 + 1.7.7 + 1.2.17 + 2.6.0 + + + + + cloudera + https://repository.cloudera.com/artifactory/cloudera-repos + + true + + + true + + + + + apache.snapshots + Apache Development Snapshot Repository + https://repository.apache.org/content/repositories/snapshots/ + + false + + + true + + + + + + + + + + org.apache.flink + flink-java + ${flink.version} + + + org.apache.flink + flink-streaming-java_2.10 + ${flink.version} + + + org.apache.flink + flink-avro_2.10 + ${flink.version} + + + org.apache.flink + flink-clients_2.10 + ${flink.version} + + + + org.apache.flink + flink-hadoop-compatibility_2.10 + ${flink.version} + + + + + joda-time + joda-time + 1.6 + + + com.google.code.gson + gson + 2.2.4 + + + + + org.mongodb + mongo-java-driver + 3.2.2 + + + + + + org.slf4j + slf4j-log4j12 + ${slf4j.version} + + + log4j + log4j + ${log4j.version} + + + + junit-addons + junit-addons + 1.4 + test + + + junit + junit + 4.13.1 + test + + + + + + + + build-jar + + + false + + + + + org.apache.flink + flink-java + ${flink.version} + provided + + + org.apache.flink + flink-streaming-java_2.10 + ${flink.version} + provided + + + org.apache.flink + flink-clients_2.10 + ${flink.version} + provided + + + + org.apache.flink + flink-hadoop-compatibility_2.10 + ${flink.version} + + + org.slf4j + slf4j-log4j12 + ${slf4j.version} + provided + + + log4j + log4j + ${log4j.version} + provided + + + joda-time + joda-time + 1.6 + + + com.google.code.gson + gson + 2.2.4 + + + + org.mongodb + mongo-java-driver + 3.2.2 + + + + + + + + + + org.apache.maven.plugins + maven-shade-plugin + 2.4.1 + + + package + + shade + + + + + + + + + + + + + + + + + + + + + + org.apache.maven.plugins + maven-shade-plugin + 2.4.1 + + + + package + + shade + + + + + + org.apache.flink:flink-annotations + org.apache.flink:flink-shaded-hadoop2 + org.apache.flink:flink-shaded-curator-recipes + org.apache.flink:flink-core + org.apache.flink:flink-java + org.apache.flink:flink-scala_2.11 + org.apache.flink:flink-runtime_2.11 + org.apache.flink:flink-optimizer_2.11 + org.apache.flink:flink-clients_2.11 + org.apache.flink:flink-avro_2.11 + org.apache.flink:flink-examples-batch_2.11 + org.apache.flink:flink-examples-streaming_2.11 + org.apache.flink:flink-streaming-java_2.11 + org.apache.flink:flink-streaming-scala_2.11 + org.apache.flink:flink-scala-shell_2.11 + org.apache.flink:flink-python + org.apache.flink:flink-metrics-core + org.apache.flink:flink-metrics-jmx + org.apache.flink:flink-statebackend-rocksdb_2.11 + + + + log4j:log4j + org.scala-lang:scala-library + org.scala-lang:scala-compiler + org.scala-lang:scala-reflect + com.data-artisans:flakka-actor_* + com.data-artisans:flakka-remote_* + com.data-artisans:flakka-slf4j_* + io.netty:netty-all + io.netty:netty + commons-fileupload:commons-fileupload + org.apache.avro:avro + commons-collections:commons-collections + org.codehaus.jackson:jackson-core-asl + org.codehaus.jackson:jackson-mapper-asl + com.thoughtworks.paranamer:paranamer + org.xerial.snappy:snappy-java + org.apache.commons:commons-compress + org.tukaani:xz + com.esotericsoftware.kryo:kryo + com.esotericsoftware.minlog:minlog + org.objenesis:objenesis + com.twitter:chill_* + com.twitter:chill-java + commons-lang:commons-lang + junit:junit + org.apache.commons:commons-lang3 + org.slf4j:slf4j-api + org.slf4j:slf4j-log4j12 + log4j:log4j + org.apache.commons:commons-math + org.apache.sling:org.apache.sling.commons.json + commons-logging:commons-logging + commons-codec:commons-codec + com.fasterxml.jackson.core:jackson-core + com.fasterxml.jackson.core:jackson-databind + com.fasterxml.jackson.core:jackson-annotations + stax:stax-api + com.typesafe:config + org.uncommons.maths:uncommons-maths + com.github.scopt:scopt_* + commons-io:commons-io + commons-cli:commons-cli + + + + + org.apache.flink:* + + + org/apache/flink/shaded/com/** + web-docs/** + + + + + *:* + + META-INF/*.SF + META-INF/*.DSA + META-INF/*.RSA + + + + + + false + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.1 + + 1.7 + 1.7 + + + + + + + + + + \ No newline at end of file diff --git a/flink_jobs/old-models/batch_status/src/main/java/argo/avro/GroupEndpoint.java b/flink_jobs/old-models/batch_status/src/main/java/argo/avro/GroupEndpoint.java new file mode 100644 index 00000000..2386b1d2 --- /dev/null +++ b/flink_jobs/old-models/batch_status/src/main/java/argo/avro/GroupEndpoint.java @@ -0,0 +1,336 @@ +/** + * Autogenerated by Avro + * + * DO NOT EDIT DIRECTLY + */ +package argo.avro; +@SuppressWarnings("all") +@org.apache.avro.specific.AvroGenerated +public class GroupEndpoint extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"GroupEndpoint\",\"namespace\":\"argo.avro\",\"fields\":[{\"name\":\"type\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"group\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"service\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"hostname\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"tags\",\"type\":[\"null\",{\"type\":\"map\",\"values\":{\"type\":\"string\",\"avro.java.string\":\"String\"},\"avro.java.string\":\"String\"}]}]}"); + public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } + @Deprecated public java.lang.String type; + @Deprecated public java.lang.String group; + @Deprecated public java.lang.String service; + @Deprecated public java.lang.String hostname; + @Deprecated public java.util.Map tags; + + /** + * Default constructor. + */ + public GroupEndpoint() {} + + /** + * All-args constructor. + */ + public GroupEndpoint(java.lang.String type, java.lang.String group, java.lang.String service, java.lang.String hostname, java.util.Map tags) { + this.type = type; + this.group = group; + this.service = service; + this.hostname = hostname; + this.tags = tags; + } + + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + // Used by DatumWriter. Applications should not call. + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return type; + case 1: return group; + case 2: return service; + case 3: return hostname; + case 4: return tags; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + // Used by DatumReader. Applications should not call. + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: type = (java.lang.String)value$; break; + case 1: group = (java.lang.String)value$; break; + case 2: service = (java.lang.String)value$; break; + case 3: hostname = (java.lang.String)value$; break; + case 4: tags = (java.util.Map)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + + /** + * Gets the value of the 'type' field. + */ + public java.lang.String getType() { + return type; + } + + /** + * Sets the value of the 'type' field. + * @param value the value to set. + */ + public void setType(java.lang.String value) { + this.type = value; + } + + /** + * Gets the value of the 'group' field. + */ + public java.lang.String getGroup() { + return group; + } + + /** + * Sets the value of the 'group' field. + * @param value the value to set. + */ + public void setGroup(java.lang.String value) { + this.group = value; + } + + /** + * Gets the value of the 'service' field. + */ + public java.lang.String getService() { + return service; + } + + /** + * Sets the value of the 'service' field. + * @param value the value to set. + */ + public void setService(java.lang.String value) { + this.service = value; + } + + /** + * Gets the value of the 'hostname' field. + */ + public java.lang.String getHostname() { + return hostname; + } + + /** + * Sets the value of the 'hostname' field. + * @param value the value to set. + */ + public void setHostname(java.lang.String value) { + this.hostname = value; + } + + /** + * Gets the value of the 'tags' field. + */ + public java.util.Map getTags() { + return tags; + } + + /** + * Sets the value of the 'tags' field. + * @param value the value to set. + */ + public void setTags(java.util.Map value) { + this.tags = value; + } + + /** Creates a new GroupEndpoint RecordBuilder */ + public static argo.avro.GroupEndpoint.Builder newBuilder() { + return new argo.avro.GroupEndpoint.Builder(); + } + + /** Creates a new GroupEndpoint RecordBuilder by copying an existing Builder */ + public static argo.avro.GroupEndpoint.Builder newBuilder(argo.avro.GroupEndpoint.Builder other) { + return new argo.avro.GroupEndpoint.Builder(other); + } + + /** Creates a new GroupEndpoint RecordBuilder by copying an existing GroupEndpoint instance */ + public static argo.avro.GroupEndpoint.Builder newBuilder(argo.avro.GroupEndpoint other) { + return new argo.avro.GroupEndpoint.Builder(other); + } + + /** + * RecordBuilder for GroupEndpoint instances. + */ + public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase + implements org.apache.avro.data.RecordBuilder { + + private java.lang.String type; + private java.lang.String group; + private java.lang.String service; + private java.lang.String hostname; + private java.util.Map tags; + + /** Creates a new Builder */ + private Builder() { + super(argo.avro.GroupEndpoint.SCHEMA$); + } + + /** Creates a Builder by copying an existing Builder */ + private Builder(argo.avro.GroupEndpoint.Builder other) { + super(other); + } + + /** Creates a Builder by copying an existing GroupEndpoint instance */ + private Builder(argo.avro.GroupEndpoint other) { + super(argo.avro.GroupEndpoint.SCHEMA$); + if (isValidValue(fields()[0], other.type)) { + this.type = data().deepCopy(fields()[0].schema(), other.type); + fieldSetFlags()[0] = true; + } + if (isValidValue(fields()[1], other.group)) { + this.group = data().deepCopy(fields()[1].schema(), other.group); + fieldSetFlags()[1] = true; + } + if (isValidValue(fields()[2], other.service)) { + this.service = data().deepCopy(fields()[2].schema(), other.service); + fieldSetFlags()[2] = true; + } + if (isValidValue(fields()[3], other.hostname)) { + this.hostname = data().deepCopy(fields()[3].schema(), other.hostname); + fieldSetFlags()[3] = true; + } + if (isValidValue(fields()[4], other.tags)) { + this.tags = data().deepCopy(fields()[4].schema(), other.tags); + fieldSetFlags()[4] = true; + } + } + + /** Gets the value of the 'type' field */ + public java.lang.String getType() { + return type; + } + + /** Sets the value of the 'type' field */ + public argo.avro.GroupEndpoint.Builder setType(java.lang.String value) { + validate(fields()[0], value); + this.type = value; + fieldSetFlags()[0] = true; + return this; + } + + /** Checks whether the 'type' field has been set */ + public boolean hasType() { + return fieldSetFlags()[0]; + } + + /** Clears the value of the 'type' field */ + public argo.avro.GroupEndpoint.Builder clearType() { + type = null; + fieldSetFlags()[0] = false; + return this; + } + + /** Gets the value of the 'group' field */ + public java.lang.String getGroup() { + return group; + } + + /** Sets the value of the 'group' field */ + public argo.avro.GroupEndpoint.Builder setGroup(java.lang.String value) { + validate(fields()[1], value); + this.group = value; + fieldSetFlags()[1] = true; + return this; + } + + /** Checks whether the 'group' field has been set */ + public boolean hasGroup() { + return fieldSetFlags()[1]; + } + + /** Clears the value of the 'group' field */ + public argo.avro.GroupEndpoint.Builder clearGroup() { + group = null; + fieldSetFlags()[1] = false; + return this; + } + + /** Gets the value of the 'service' field */ + public java.lang.String getService() { + return service; + } + + /** Sets the value of the 'service' field */ + public argo.avro.GroupEndpoint.Builder setService(java.lang.String value) { + validate(fields()[2], value); + this.service = value; + fieldSetFlags()[2] = true; + return this; + } + + /** Checks whether the 'service' field has been set */ + public boolean hasService() { + return fieldSetFlags()[2]; + } + + /** Clears the value of the 'service' field */ + public argo.avro.GroupEndpoint.Builder clearService() { + service = null; + fieldSetFlags()[2] = false; + return this; + } + + /** Gets the value of the 'hostname' field */ + public java.lang.String getHostname() { + return hostname; + } + + /** Sets the value of the 'hostname' field */ + public argo.avro.GroupEndpoint.Builder setHostname(java.lang.String value) { + validate(fields()[3], value); + this.hostname = value; + fieldSetFlags()[3] = true; + return this; + } + + /** Checks whether the 'hostname' field has been set */ + public boolean hasHostname() { + return fieldSetFlags()[3]; + } + + /** Clears the value of the 'hostname' field */ + public argo.avro.GroupEndpoint.Builder clearHostname() { + hostname = null; + fieldSetFlags()[3] = false; + return this; + } + + /** Gets the value of the 'tags' field */ + public java.util.Map getTags() { + return tags; + } + + /** Sets the value of the 'tags' field */ + public argo.avro.GroupEndpoint.Builder setTags(java.util.Map value) { + validate(fields()[4], value); + this.tags = value; + fieldSetFlags()[4] = true; + return this; + } + + /** Checks whether the 'tags' field has been set */ + public boolean hasTags() { + return fieldSetFlags()[4]; + } + + /** Clears the value of the 'tags' field */ + public argo.avro.GroupEndpoint.Builder clearTags() { + tags = null; + fieldSetFlags()[4] = false; + return this; + } + + @Override + public GroupEndpoint build() { + try { + GroupEndpoint record = new GroupEndpoint(); + record.type = fieldSetFlags()[0] ? this.type : (java.lang.String) defaultValue(fields()[0]); + record.group = fieldSetFlags()[1] ? this.group : (java.lang.String) defaultValue(fields()[1]); + record.service = fieldSetFlags()[2] ? this.service : (java.lang.String) defaultValue(fields()[2]); + record.hostname = fieldSetFlags()[3] ? this.hostname : (java.lang.String) defaultValue(fields()[3]); + record.tags = fieldSetFlags()[4] ? this.tags : (java.util.Map) defaultValue(fields()[4]); + return record; + } catch (Exception e) { + throw new org.apache.avro.AvroRuntimeException(e); + } + } + } +} diff --git a/flink_jobs/old-models/batch_status/src/main/java/argo/avro/GroupGroup.java b/flink_jobs/old-models/batch_status/src/main/java/argo/avro/GroupGroup.java new file mode 100644 index 00000000..a7712d67 --- /dev/null +++ b/flink_jobs/old-models/batch_status/src/main/java/argo/avro/GroupGroup.java @@ -0,0 +1,286 @@ +/** + * Autogenerated by Avro + * + * DO NOT EDIT DIRECTLY + */ +package argo.avro; +@SuppressWarnings("all") +@org.apache.avro.specific.AvroGenerated +public class GroupGroup extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"GroupGroup\",\"namespace\":\"argo.avro\",\"fields\":[{\"name\":\"type\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"group\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"subgroup\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"tags\",\"type\":[\"null\",{\"type\":\"map\",\"values\":{\"type\":\"string\",\"avro.java.string\":\"String\"},\"avro.java.string\":\"String\"}]}]}"); + public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } + @Deprecated public java.lang.String type; + @Deprecated public java.lang.String group; + @Deprecated public java.lang.String subgroup; + @Deprecated public java.util.Map tags; + + /** + * Default constructor. + */ + public GroupGroup() {} + + /** + * All-args constructor. + */ + public GroupGroup(java.lang.String type, java.lang.String group, java.lang.String subgroup, java.util.Map tags) { + this.type = type; + this.group = group; + this.subgroup = subgroup; + this.tags = tags; + } + + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + // Used by DatumWriter. Applications should not call. + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return type; + case 1: return group; + case 2: return subgroup; + case 3: return tags; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + // Used by DatumReader. Applications should not call. + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: type = (java.lang.String)value$; break; + case 1: group = (java.lang.String)value$; break; + case 2: subgroup = (java.lang.String)value$; break; + case 3: tags = (java.util.Map)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + + /** + * Gets the value of the 'type' field. + */ + public java.lang.String getType() { + return type; + } + + /** + * Sets the value of the 'type' field. + * @param value the value to set. + */ + public void setType(java.lang.String value) { + this.type = value; + } + + /** + * Gets the value of the 'group' field. + */ + public java.lang.String getGroup() { + return group; + } + + /** + * Sets the value of the 'group' field. + * @param value the value to set. + */ + public void setGroup(java.lang.String value) { + this.group = value; + } + + /** + * Gets the value of the 'subgroup' field. + */ + public java.lang.String getSubgroup() { + return subgroup; + } + + /** + * Sets the value of the 'subgroup' field. + * @param value the value to set. + */ + public void setSubgroup(java.lang.String value) { + this.subgroup = value; + } + + /** + * Gets the value of the 'tags' field. + */ + public java.util.Map getTags() { + return tags; + } + + /** + * Sets the value of the 'tags' field. + * @param value the value to set. + */ + public void setTags(java.util.Map value) { + this.tags = value; + } + + /** Creates a new GroupGroup RecordBuilder */ + public static argo.avro.GroupGroup.Builder newBuilder() { + return new argo.avro.GroupGroup.Builder(); + } + + /** Creates a new GroupGroup RecordBuilder by copying an existing Builder */ + public static argo.avro.GroupGroup.Builder newBuilder(argo.avro.GroupGroup.Builder other) { + return new argo.avro.GroupGroup.Builder(other); + } + + /** Creates a new GroupGroup RecordBuilder by copying an existing GroupGroup instance */ + public static argo.avro.GroupGroup.Builder newBuilder(argo.avro.GroupGroup other) { + return new argo.avro.GroupGroup.Builder(other); + } + + /** + * RecordBuilder for GroupGroup instances. + */ + public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase + implements org.apache.avro.data.RecordBuilder { + + private java.lang.String type; + private java.lang.String group; + private java.lang.String subgroup; + private java.util.Map tags; + + /** Creates a new Builder */ + private Builder() { + super(argo.avro.GroupGroup.SCHEMA$); + } + + /** Creates a Builder by copying an existing Builder */ + private Builder(argo.avro.GroupGroup.Builder other) { + super(other); + } + + /** Creates a Builder by copying an existing GroupGroup instance */ + private Builder(argo.avro.GroupGroup other) { + super(argo.avro.GroupGroup.SCHEMA$); + if (isValidValue(fields()[0], other.type)) { + this.type = data().deepCopy(fields()[0].schema(), other.type); + fieldSetFlags()[0] = true; + } + if (isValidValue(fields()[1], other.group)) { + this.group = data().deepCopy(fields()[1].schema(), other.group); + fieldSetFlags()[1] = true; + } + if (isValidValue(fields()[2], other.subgroup)) { + this.subgroup = data().deepCopy(fields()[2].schema(), other.subgroup); + fieldSetFlags()[2] = true; + } + if (isValidValue(fields()[3], other.tags)) { + this.tags = data().deepCopy(fields()[3].schema(), other.tags); + fieldSetFlags()[3] = true; + } + } + + /** Gets the value of the 'type' field */ + public java.lang.String getType() { + return type; + } + + /** Sets the value of the 'type' field */ + public argo.avro.GroupGroup.Builder setType(java.lang.String value) { + validate(fields()[0], value); + this.type = value; + fieldSetFlags()[0] = true; + return this; + } + + /** Checks whether the 'type' field has been set */ + public boolean hasType() { + return fieldSetFlags()[0]; + } + + /** Clears the value of the 'type' field */ + public argo.avro.GroupGroup.Builder clearType() { + type = null; + fieldSetFlags()[0] = false; + return this; + } + + /** Gets the value of the 'group' field */ + public java.lang.String getGroup() { + return group; + } + + /** Sets the value of the 'group' field */ + public argo.avro.GroupGroup.Builder setGroup(java.lang.String value) { + validate(fields()[1], value); + this.group = value; + fieldSetFlags()[1] = true; + return this; + } + + /** Checks whether the 'group' field has been set */ + public boolean hasGroup() { + return fieldSetFlags()[1]; + } + + /** Clears the value of the 'group' field */ + public argo.avro.GroupGroup.Builder clearGroup() { + group = null; + fieldSetFlags()[1] = false; + return this; + } + + /** Gets the value of the 'subgroup' field */ + public java.lang.String getSubgroup() { + return subgroup; + } + + /** Sets the value of the 'subgroup' field */ + public argo.avro.GroupGroup.Builder setSubgroup(java.lang.String value) { + validate(fields()[2], value); + this.subgroup = value; + fieldSetFlags()[2] = true; + return this; + } + + /** Checks whether the 'subgroup' field has been set */ + public boolean hasSubgroup() { + return fieldSetFlags()[2]; + } + + /** Clears the value of the 'subgroup' field */ + public argo.avro.GroupGroup.Builder clearSubgroup() { + subgroup = null; + fieldSetFlags()[2] = false; + return this; + } + + /** Gets the value of the 'tags' field */ + public java.util.Map getTags() { + return tags; + } + + /** Sets the value of the 'tags' field */ + public argo.avro.GroupGroup.Builder setTags(java.util.Map value) { + validate(fields()[3], value); + this.tags = value; + fieldSetFlags()[3] = true; + return this; + } + + /** Checks whether the 'tags' field has been set */ + public boolean hasTags() { + return fieldSetFlags()[3]; + } + + /** Clears the value of the 'tags' field */ + public argo.avro.GroupGroup.Builder clearTags() { + tags = null; + fieldSetFlags()[3] = false; + return this; + } + + @Override + public GroupGroup build() { + try { + GroupGroup record = new GroupGroup(); + record.type = fieldSetFlags()[0] ? this.type : (java.lang.String) defaultValue(fields()[0]); + record.group = fieldSetFlags()[1] ? this.group : (java.lang.String) defaultValue(fields()[1]); + record.subgroup = fieldSetFlags()[2] ? this.subgroup : (java.lang.String) defaultValue(fields()[2]); + record.tags = fieldSetFlags()[3] ? this.tags : (java.util.Map) defaultValue(fields()[3]); + return record; + } catch (Exception e) { + throw new org.apache.avro.AvroRuntimeException(e); + } + } + } +} diff --git a/flink_jobs/old-models/batch_status/src/main/java/argo/avro/MetricData.java b/flink_jobs/old-models/batch_status/src/main/java/argo/avro/MetricData.java new file mode 100644 index 00000000..77800770 --- /dev/null +++ b/flink_jobs/old-models/batch_status/src/main/java/argo/avro/MetricData.java @@ -0,0 +1,811 @@ +/** + * Autogenerated by Avro + * + * DO NOT EDIT DIRECTLY + */ +package argo.avro; + +import org.apache.avro.specific.SpecificData; + +@SuppressWarnings("all") +@org.apache.avro.specific.AvroGenerated +public class MetricData extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + private static final long serialVersionUID = 3861438289744595870L; + public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"MetricData\",\"namespace\":\"argo.avro\",\"fields\":[{\"name\":\"timestamp\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"service\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"hostname\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"metric\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"status\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"monitoring_host\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}]},{\"name\":\"actual_data\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}],\"default\":null},{\"name\":\"summary\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}]},{\"name\":\"message\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}]},{\"name\":\"tags\",\"type\":[\"null\",{\"type\":\"map\",\"values\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}],\"avro.java.string\":\"String\"}]}]}"); + public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } + @Deprecated public java.lang.String timestamp; + @Deprecated public java.lang.String service; + @Deprecated public java.lang.String hostname; + @Deprecated public java.lang.String metric; + @Deprecated public java.lang.String status; + @Deprecated public java.lang.String monitoring_host; + @Deprecated public java.lang.String actual_data; + @Deprecated public java.lang.String summary; + @Deprecated public java.lang.String message; + @Deprecated public java.util.Map tags; + + /** + * Default constructor. Note that this does not initialize fields + * to their default values from the schema. If that is desired then + * one should use newBuilder(). + */ + public MetricData() {} + + /** + * All-args constructor. + * @param timestamp The new value for timestamp + * @param service The new value for service + * @param hostname The new value for hostname + * @param metric The new value for metric + * @param status The new value for status + * @param monitoring_host The new value for monitoring_host + * @param actual_data The new value for actual_data + * @param summary The new value for summary + * @param message The new value for message + * @param tags The new value for tags + */ + public MetricData(java.lang.String timestamp, java.lang.String service, java.lang.String hostname, java.lang.String metric, java.lang.String status, java.lang.String monitoring_host, java.lang.String actual_data, java.lang.String summary, java.lang.String message, java.util.Map tags) { + this.timestamp = timestamp; + this.service = service; + this.hostname = hostname; + this.metric = metric; + this.status = status; + this.monitoring_host = monitoring_host; + this.actual_data = actual_data; + this.summary = summary; + this.message = message; + this.tags = tags; + } + + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + // Used by DatumWriter. Applications should not call. + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return timestamp; + case 1: return service; + case 2: return hostname; + case 3: return metric; + case 4: return status; + case 5: return monitoring_host; + case 6: return actual_data; + case 7: return summary; + case 8: return message; + case 9: return tags; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + + // Used by DatumReader. Applications should not call. + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: timestamp = (java.lang.String)value$; break; + case 1: service = (java.lang.String)value$; break; + case 2: hostname = (java.lang.String)value$; break; + case 3: metric = (java.lang.String)value$; break; + case 4: status = (java.lang.String)value$; break; + case 5: monitoring_host = (java.lang.String)value$; break; + case 6: actual_data = (java.lang.String)value$; break; + case 7: summary = (java.lang.String)value$; break; + case 8: message = (java.lang.String)value$; break; + case 9: tags = (java.util.Map)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + + /** + * Gets the value of the 'timestamp' field. + * @return The value of the 'timestamp' field. + */ + public java.lang.String getTimestamp() { + return timestamp; + } + + /** + * Sets the value of the 'timestamp' field. + * @param value the value to set. + */ + public void setTimestamp(java.lang.String value) { + this.timestamp = value; + } + + /** + * Gets the value of the 'service' field. + * @return The value of the 'service' field. + */ + public java.lang.String getService() { + return service; + } + + /** + * Sets the value of the 'service' field. + * @param value the value to set. + */ + public void setService(java.lang.String value) { + this.service = value; + } + + /** + * Gets the value of the 'hostname' field. + * @return The value of the 'hostname' field. + */ + public java.lang.String getHostname() { + return hostname; + } + + /** + * Sets the value of the 'hostname' field. + * @param value the value to set. + */ + public void setHostname(java.lang.String value) { + this.hostname = value; + } + + /** + * Gets the value of the 'metric' field. + * @return The value of the 'metric' field. + */ + public java.lang.String getMetric() { + return metric; + } + + /** + * Sets the value of the 'metric' field. + * @param value the value to set. + */ + public void setMetric(java.lang.String value) { + this.metric = value; + } + + /** + * Gets the value of the 'status' field. + * @return The value of the 'status' field. + */ + public java.lang.String getStatus() { + return status; + } + + /** + * Sets the value of the 'status' field. + * @param value the value to set. + */ + public void setStatus(java.lang.String value) { + this.status = value; + } + + /** + * Gets the value of the 'monitoring_host' field. + * @return The value of the 'monitoring_host' field. + */ + public java.lang.String getMonitoringHost() { + return monitoring_host; + } + + /** + * Sets the value of the 'monitoring_host' field. + * @param value the value to set. + */ + public void setMonitoringHost(java.lang.String value) { + this.monitoring_host = value; + } + + /** + * Gets the value of the 'actual_data' field. + * @return The value of the 'actual_data' field. + */ + public java.lang.String getActualData() { + return actual_data; + } + + /** + * Sets the value of the 'actual_data' field. + * @param value the value to set. + */ + public void setActualData(java.lang.String value) { + this.actual_data = value; + } + + /** + * Gets the value of the 'summary' field. + * @return The value of the 'summary' field. + */ + public java.lang.String getSummary() { + return summary; + } + + /** + * Sets the value of the 'summary' field. + * @param value the value to set. + */ + public void setSummary(java.lang.String value) { + this.summary = value; + } + + /** + * Gets the value of the 'message' field. + * @return The value of the 'message' field. + */ + public java.lang.String getMessage() { + return message; + } + + /** + * Sets the value of the 'message' field. + * @param value the value to set. + */ + public void setMessage(java.lang.String value) { + this.message = value; + } + + /** + * Gets the value of the 'tags' field. + * @return The value of the 'tags' field. + */ + public java.util.Map getTags() { + return tags; + } + + /** + * Sets the value of the 'tags' field. + * @param value the value to set. + */ + public void setTags(java.util.Map value) { + this.tags = value; + } + + /** + * Creates a new MetricData RecordBuilder. + * @return A new MetricData RecordBuilder + */ + public static argo.avro.MetricData.Builder newBuilder() { + return new argo.avro.MetricData.Builder(); + } + + /** + * Creates a new MetricData RecordBuilder by copying an existing Builder. + * @param other The existing builder to copy. + * @return A new MetricData RecordBuilder + */ + public static argo.avro.MetricData.Builder newBuilder(argo.avro.MetricData.Builder other) { + return new argo.avro.MetricData.Builder(other); + } + + /** + * Creates a new MetricData RecordBuilder by copying an existing MetricData instance. + * @param other The existing instance to copy. + * @return A new MetricData RecordBuilder + */ + public static argo.avro.MetricData.Builder newBuilder(argo.avro.MetricData other) { + return new argo.avro.MetricData.Builder(other); + } + + /** + * RecordBuilder for MetricData instances. + */ + public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase + implements org.apache.avro.data.RecordBuilder { + + private java.lang.String timestamp; + private java.lang.String service; + private java.lang.String hostname; + private java.lang.String metric; + private java.lang.String status; + private java.lang.String monitoring_host; + private java.lang.String actual_data; + private java.lang.String summary; + private java.lang.String message; + private java.util.Map tags; + + /** Creates a new Builder */ + private Builder() { + super(SCHEMA$); + } + + /** + * Creates a Builder by copying an existing Builder. + * @param other The existing Builder to copy. + */ + private Builder(argo.avro.MetricData.Builder other) { + super(other); + if (isValidValue(fields()[0], other.timestamp)) { + this.timestamp = data().deepCopy(fields()[0].schema(), other.timestamp); + fieldSetFlags()[0] = true; + } + if (isValidValue(fields()[1], other.service)) { + this.service = data().deepCopy(fields()[1].schema(), other.service); + fieldSetFlags()[1] = true; + } + if (isValidValue(fields()[2], other.hostname)) { + this.hostname = data().deepCopy(fields()[2].schema(), other.hostname); + fieldSetFlags()[2] = true; + } + if (isValidValue(fields()[3], other.metric)) { + this.metric = data().deepCopy(fields()[3].schema(), other.metric); + fieldSetFlags()[3] = true; + } + if (isValidValue(fields()[4], other.status)) { + this.status = data().deepCopy(fields()[4].schema(), other.status); + fieldSetFlags()[4] = true; + } + if (isValidValue(fields()[5], other.monitoring_host)) { + this.monitoring_host = data().deepCopy(fields()[5].schema(), other.monitoring_host); + fieldSetFlags()[5] = true; + } + if (isValidValue(fields()[6], other.actual_data)) { + this.actual_data = data().deepCopy(fields()[6].schema(), other.actual_data); + fieldSetFlags()[6] = true; + } + if (isValidValue(fields()[7], other.summary)) { + this.summary = data().deepCopy(fields()[7].schema(), other.summary); + fieldSetFlags()[7] = true; + } + if (isValidValue(fields()[8], other.message)) { + this.message = data().deepCopy(fields()[8].schema(), other.message); + fieldSetFlags()[8] = true; + } + if (isValidValue(fields()[9], other.tags)) { + this.tags = data().deepCopy(fields()[9].schema(), other.tags); + fieldSetFlags()[9] = true; + } + } + + /** + * Creates a Builder by copying an existing MetricData instance + * @param other The existing instance to copy. + */ + private Builder(argo.avro.MetricData other) { + super(SCHEMA$); + if (isValidValue(fields()[0], other.timestamp)) { + this.timestamp = data().deepCopy(fields()[0].schema(), other.timestamp); + fieldSetFlags()[0] = true; + } + if (isValidValue(fields()[1], other.service)) { + this.service = data().deepCopy(fields()[1].schema(), other.service); + fieldSetFlags()[1] = true; + } + if (isValidValue(fields()[2], other.hostname)) { + this.hostname = data().deepCopy(fields()[2].schema(), other.hostname); + fieldSetFlags()[2] = true; + } + if (isValidValue(fields()[3], other.metric)) { + this.metric = data().deepCopy(fields()[3].schema(), other.metric); + fieldSetFlags()[3] = true; + } + if (isValidValue(fields()[4], other.status)) { + this.status = data().deepCopy(fields()[4].schema(), other.status); + fieldSetFlags()[4] = true; + } + if (isValidValue(fields()[5], other.monitoring_host)) { + this.monitoring_host = data().deepCopy(fields()[5].schema(), other.monitoring_host); + fieldSetFlags()[5] = true; + } + if (isValidValue(fields()[6], other.actual_data)) { + this.actual_data = data().deepCopy(fields()[6].schema(), other.actual_data); + fieldSetFlags()[6] = true; + } + if (isValidValue(fields()[7], other.summary)) { + this.summary = data().deepCopy(fields()[7].schema(), other.summary); + fieldSetFlags()[7] = true; + } + if (isValidValue(fields()[8], other.message)) { + this.message = data().deepCopy(fields()[8].schema(), other.message); + fieldSetFlags()[8] = true; + } + if (isValidValue(fields()[9], other.tags)) { + this.tags = data().deepCopy(fields()[9].schema(), other.tags); + fieldSetFlags()[9] = true; + } + } + + /** + * Gets the value of the 'timestamp' field. + * @return The value. + */ + public java.lang.String getTimestamp() { + return timestamp; + } + + /** + * Sets the value of the 'timestamp' field. + * @param value The value of 'timestamp'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setTimestamp(java.lang.String value) { + validate(fields()[0], value); + this.timestamp = value; + fieldSetFlags()[0] = true; + return this; + } + + /** + * Checks whether the 'timestamp' field has been set. + * @return True if the 'timestamp' field has been set, false otherwise. + */ + public boolean hasTimestamp() { + return fieldSetFlags()[0]; + } + + + /** + * Clears the value of the 'timestamp' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearTimestamp() { + timestamp = null; + fieldSetFlags()[0] = false; + return this; + } + + /** + * Gets the value of the 'service' field. + * @return The value. + */ + public java.lang.String getService() { + return service; + } + + /** + * Sets the value of the 'service' field. + * @param value The value of 'service'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setService(java.lang.String value) { + validate(fields()[1], value); + this.service = value; + fieldSetFlags()[1] = true; + return this; + } + + /** + * Checks whether the 'service' field has been set. + * @return True if the 'service' field has been set, false otherwise. + */ + public boolean hasService() { + return fieldSetFlags()[1]; + } + + + /** + * Clears the value of the 'service' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearService() { + service = null; + fieldSetFlags()[1] = false; + return this; + } + + /** + * Gets the value of the 'hostname' field. + * @return The value. + */ + public java.lang.String getHostname() { + return hostname; + } + + /** + * Sets the value of the 'hostname' field. + * @param value The value of 'hostname'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setHostname(java.lang.String value) { + validate(fields()[2], value); + this.hostname = value; + fieldSetFlags()[2] = true; + return this; + } + + /** + * Checks whether the 'hostname' field has been set. + * @return True if the 'hostname' field has been set, false otherwise. + */ + public boolean hasHostname() { + return fieldSetFlags()[2]; + } + + + /** + * Clears the value of the 'hostname' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearHostname() { + hostname = null; + fieldSetFlags()[2] = false; + return this; + } + + /** + * Gets the value of the 'metric' field. + * @return The value. + */ + public java.lang.String getMetric() { + return metric; + } + + /** + * Sets the value of the 'metric' field. + * @param value The value of 'metric'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setMetric(java.lang.String value) { + validate(fields()[3], value); + this.metric = value; + fieldSetFlags()[3] = true; + return this; + } + + /** + * Checks whether the 'metric' field has been set. + * @return True if the 'metric' field has been set, false otherwise. + */ + public boolean hasMetric() { + return fieldSetFlags()[3]; + } + + + /** + * Clears the value of the 'metric' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearMetric() { + metric = null; + fieldSetFlags()[3] = false; + return this; + } + + /** + * Gets the value of the 'status' field. + * @return The value. + */ + public java.lang.String getStatus() { + return status; + } + + /** + * Sets the value of the 'status' field. + * @param value The value of 'status'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setStatus(java.lang.String value) { + validate(fields()[4], value); + this.status = value; + fieldSetFlags()[4] = true; + return this; + } + + /** + * Checks whether the 'status' field has been set. + * @return True if the 'status' field has been set, false otherwise. + */ + public boolean hasStatus() { + return fieldSetFlags()[4]; + } + + + /** + * Clears the value of the 'status' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearStatus() { + status = null; + fieldSetFlags()[4] = false; + return this; + } + + /** + * Gets the value of the 'monitoring_host' field. + * @return The value. + */ + public java.lang.String getMonitoringHost() { + return monitoring_host; + } + + /** + * Sets the value of the 'monitoring_host' field. + * @param value The value of 'monitoring_host'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setMonitoringHost(java.lang.String value) { + validate(fields()[5], value); + this.monitoring_host = value; + fieldSetFlags()[5] = true; + return this; + } + + /** + * Checks whether the 'monitoring_host' field has been set. + * @return True if the 'monitoring_host' field has been set, false otherwise. + */ + public boolean hasMonitoringHost() { + return fieldSetFlags()[5]; + } + + + /** + * Clears the value of the 'monitoring_host' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearMonitoringHost() { + monitoring_host = null; + fieldSetFlags()[5] = false; + return this; + } + + /** + * Gets the value of the 'actual_data' field. + * @return The value. + */ + public java.lang.String getActualData() { + return actual_data; + } + + /** + * Sets the value of the 'actual_data' field. + * @param value The value of 'actual_data'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setActualData(java.lang.String value) { + validate(fields()[6], value); + this.actual_data = value; + fieldSetFlags()[6] = true; + return this; + } + + /** + * Checks whether the 'actual_data' field has been set. + * @return True if the 'actual_data' field has been set, false otherwise. + */ + public boolean hasActualData() { + return fieldSetFlags()[6]; + } + + + /** + * Clears the value of the 'actual_data' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearActualData() { + actual_data = null; + fieldSetFlags()[6] = false; + return this; + } + + /** + * Gets the value of the 'summary' field. + * @return The value. + */ + public java.lang.String getSummary() { + return summary; + } + + /** + * Sets the value of the 'summary' field. + * @param value The value of 'summary'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setSummary(java.lang.String value) { + validate(fields()[7], value); + this.summary = value; + fieldSetFlags()[7] = true; + return this; + } + + /** + * Checks whether the 'summary' field has been set. + * @return True if the 'summary' field has been set, false otherwise. + */ + public boolean hasSummary() { + return fieldSetFlags()[7]; + } + + + /** + * Clears the value of the 'summary' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearSummary() { + summary = null; + fieldSetFlags()[7] = false; + return this; + } + + /** + * Gets the value of the 'message' field. + * @return The value. + */ + public java.lang.String getMessage() { + return message; + } + + /** + * Sets the value of the 'message' field. + * @param value The value of 'message'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setMessage(java.lang.String value) { + validate(fields()[8], value); + this.message = value; + fieldSetFlags()[8] = true; + return this; + } + + /** + * Checks whether the 'message' field has been set. + * @return True if the 'message' field has been set, false otherwise. + */ + public boolean hasMessage() { + return fieldSetFlags()[8]; + } + + + /** + * Clears the value of the 'message' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearMessage() { + message = null; + fieldSetFlags()[8] = false; + return this; + } + + /** + * Gets the value of the 'tags' field. + * @return The value. + */ + public java.util.Map getTags() { + return tags; + } + + /** + * Sets the value of the 'tags' field. + * @param value The value of 'tags'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setTags(java.util.Map value) { + validate(fields()[9], value); + this.tags = value; + fieldSetFlags()[9] = true; + return this; + } + + /** + * Checks whether the 'tags' field has been set. + * @return True if the 'tags' field has been set, false otherwise. + */ + public boolean hasTags() { + return fieldSetFlags()[9]; + } + + + /** + * Clears the value of the 'tags' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearTags() { + tags = null; + fieldSetFlags()[9] = false; + return this; + } + + @Override + public MetricData build() { + try { + MetricData record = new MetricData(); + record.timestamp = fieldSetFlags()[0] ? this.timestamp : (java.lang.String) defaultValue(fields()[0]); + record.service = fieldSetFlags()[1] ? this.service : (java.lang.String) defaultValue(fields()[1]); + record.hostname = fieldSetFlags()[2] ? this.hostname : (java.lang.String) defaultValue(fields()[2]); + record.metric = fieldSetFlags()[3] ? this.metric : (java.lang.String) defaultValue(fields()[3]); + record.status = fieldSetFlags()[4] ? this.status : (java.lang.String) defaultValue(fields()[4]); + record.monitoring_host = fieldSetFlags()[5] ? this.monitoring_host : (java.lang.String) defaultValue(fields()[5]); + record.actual_data = fieldSetFlags()[6] ? this.actual_data : (java.lang.String) defaultValue(fields()[6]); + record.summary = fieldSetFlags()[7] ? this.summary : (java.lang.String) defaultValue(fields()[7]); + record.message = fieldSetFlags()[8] ? this.message : (java.lang.String) defaultValue(fields()[8]); + record.tags = fieldSetFlags()[9] ? this.tags : (java.util.Map) defaultValue(fields()[9]); + return record; + } catch (Exception e) { + throw new org.apache.avro.AvroRuntimeException(e); + } + } + } + +} diff --git a/flink_jobs/old-models/batch_status/src/main/java/argo/avro/MetricProfile.java b/flink_jobs/old-models/batch_status/src/main/java/argo/avro/MetricProfile.java new file mode 100644 index 00000000..1fe15e09 --- /dev/null +++ b/flink_jobs/old-models/batch_status/src/main/java/argo/avro/MetricProfile.java @@ -0,0 +1,286 @@ +/** + * Autogenerated by Avro + * + * DO NOT EDIT DIRECTLY + */ +package argo.avro; +@SuppressWarnings("all") +@org.apache.avro.specific.AvroGenerated +public class MetricProfile extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"MetricProfile\",\"namespace\":\"argo.avro\",\"fields\":[{\"name\":\"profile\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"service\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"metric\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"tags\",\"type\":[\"null\",{\"type\":\"map\",\"values\":{\"type\":\"string\",\"avro.java.string\":\"String\"},\"avro.java.string\":\"String\"}]}]}"); + public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } + @Deprecated public java.lang.String profile; + @Deprecated public java.lang.String service; + @Deprecated public java.lang.String metric; + @Deprecated public java.util.Map tags; + + /** + * Default constructor. + */ + public MetricProfile() {} + + /** + * All-args constructor. + */ + public MetricProfile(java.lang.String profile, java.lang.String service, java.lang.String metric, java.util.Map tags) { + this.profile = profile; + this.service = service; + this.metric = metric; + this.tags = tags; + } + + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + // Used by DatumWriter. Applications should not call. + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return profile; + case 1: return service; + case 2: return metric; + case 3: return tags; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + // Used by DatumReader. Applications should not call. + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: profile = (java.lang.String)value$; break; + case 1: service = (java.lang.String)value$; break; + case 2: metric = (java.lang.String)value$; break; + case 3: tags = (java.util.Map)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + + /** + * Gets the value of the 'profile' field. + */ + public java.lang.String getProfile() { + return profile; + } + + /** + * Sets the value of the 'profile' field. + * @param value the value to set. + */ + public void setProfile(java.lang.String value) { + this.profile = value; + } + + /** + * Gets the value of the 'service' field. + */ + public java.lang.String getService() { + return service; + } + + /** + * Sets the value of the 'service' field. + * @param value the value to set. + */ + public void setService(java.lang.String value) { + this.service = value; + } + + /** + * Gets the value of the 'metric' field. + */ + public java.lang.String getMetric() { + return metric; + } + + /** + * Sets the value of the 'metric' field. + * @param value the value to set. + */ + public void setMetric(java.lang.String value) { + this.metric = value; + } + + /** + * Gets the value of the 'tags' field. + */ + public java.util.Map getTags() { + return tags; + } + + /** + * Sets the value of the 'tags' field. + * @param value the value to set. + */ + public void setTags(java.util.Map value) { + this.tags = value; + } + + /** Creates a new MetricProfile RecordBuilder */ + public static argo.avro.MetricProfile.Builder newBuilder() { + return new argo.avro.MetricProfile.Builder(); + } + + /** Creates a new MetricProfile RecordBuilder by copying an existing Builder */ + public static argo.avro.MetricProfile.Builder newBuilder(argo.avro.MetricProfile.Builder other) { + return new argo.avro.MetricProfile.Builder(other); + } + + /** Creates a new MetricProfile RecordBuilder by copying an existing MetricProfile instance */ + public static argo.avro.MetricProfile.Builder newBuilder(argo.avro.MetricProfile other) { + return new argo.avro.MetricProfile.Builder(other); + } + + /** + * RecordBuilder for MetricProfile instances. + */ + public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase + implements org.apache.avro.data.RecordBuilder { + + private java.lang.String profile; + private java.lang.String service; + private java.lang.String metric; + private java.util.Map tags; + + /** Creates a new Builder */ + private Builder() { + super(argo.avro.MetricProfile.SCHEMA$); + } + + /** Creates a Builder by copying an existing Builder */ + private Builder(argo.avro.MetricProfile.Builder other) { + super(other); + } + + /** Creates a Builder by copying an existing MetricProfile instance */ + private Builder(argo.avro.MetricProfile other) { + super(argo.avro.MetricProfile.SCHEMA$); + if (isValidValue(fields()[0], other.profile)) { + this.profile = data().deepCopy(fields()[0].schema(), other.profile); + fieldSetFlags()[0] = true; + } + if (isValidValue(fields()[1], other.service)) { + this.service = data().deepCopy(fields()[1].schema(), other.service); + fieldSetFlags()[1] = true; + } + if (isValidValue(fields()[2], other.metric)) { + this.metric = data().deepCopy(fields()[2].schema(), other.metric); + fieldSetFlags()[2] = true; + } + if (isValidValue(fields()[3], other.tags)) { + this.tags = data().deepCopy(fields()[3].schema(), other.tags); + fieldSetFlags()[3] = true; + } + } + + /** Gets the value of the 'profile' field */ + public java.lang.String getProfile() { + return profile; + } + + /** Sets the value of the 'profile' field */ + public argo.avro.MetricProfile.Builder setProfile(java.lang.String value) { + validate(fields()[0], value); + this.profile = value; + fieldSetFlags()[0] = true; + return this; + } + + /** Checks whether the 'profile' field has been set */ + public boolean hasProfile() { + return fieldSetFlags()[0]; + } + + /** Clears the value of the 'profile' field */ + public argo.avro.MetricProfile.Builder clearProfile() { + profile = null; + fieldSetFlags()[0] = false; + return this; + } + + /** Gets the value of the 'service' field */ + public java.lang.String getService() { + return service; + } + + /** Sets the value of the 'service' field */ + public argo.avro.MetricProfile.Builder setService(java.lang.String value) { + validate(fields()[1], value); + this.service = value; + fieldSetFlags()[1] = true; + return this; + } + + /** Checks whether the 'service' field has been set */ + public boolean hasService() { + return fieldSetFlags()[1]; + } + + /** Clears the value of the 'service' field */ + public argo.avro.MetricProfile.Builder clearService() { + service = null; + fieldSetFlags()[1] = false; + return this; + } + + /** Gets the value of the 'metric' field */ + public java.lang.String getMetric() { + return metric; + } + + /** Sets the value of the 'metric' field */ + public argo.avro.MetricProfile.Builder setMetric(java.lang.String value) { + validate(fields()[2], value); + this.metric = value; + fieldSetFlags()[2] = true; + return this; + } + + /** Checks whether the 'metric' field has been set */ + public boolean hasMetric() { + return fieldSetFlags()[2]; + } + + /** Clears the value of the 'metric' field */ + public argo.avro.MetricProfile.Builder clearMetric() { + metric = null; + fieldSetFlags()[2] = false; + return this; + } + + /** Gets the value of the 'tags' field */ + public java.util.Map getTags() { + return tags; + } + + /** Sets the value of the 'tags' field */ + public argo.avro.MetricProfile.Builder setTags(java.util.Map value) { + validate(fields()[3], value); + this.tags = value; + fieldSetFlags()[3] = true; + return this; + } + + /** Checks whether the 'tags' field has been set */ + public boolean hasTags() { + return fieldSetFlags()[3]; + } + + /** Clears the value of the 'tags' field */ + public argo.avro.MetricProfile.Builder clearTags() { + tags = null; + fieldSetFlags()[3] = false; + return this; + } + + @Override + public MetricProfile build() { + try { + MetricProfile record = new MetricProfile(); + record.profile = fieldSetFlags()[0] ? this.profile : (java.lang.String) defaultValue(fields()[0]); + record.service = fieldSetFlags()[1] ? this.service : (java.lang.String) defaultValue(fields()[1]); + record.metric = fieldSetFlags()[2] ? this.metric : (java.lang.String) defaultValue(fields()[2]); + record.tags = fieldSetFlags()[3] ? this.tags : (java.util.Map) defaultValue(fields()[3]); + return record; + } catch (Exception e) { + throw new org.apache.avro.AvroRuntimeException(e); + } + } + } +} diff --git a/flink_jobs/old-models/batch_status/src/main/java/argo/batch/ArgoStatusBatch.java b/flink_jobs/old-models/batch_status/src/main/java/argo/batch/ArgoStatusBatch.java new file mode 100644 index 00000000..1e7b94b1 --- /dev/null +++ b/flink_jobs/old-models/batch_status/src/main/java/argo/batch/ArgoStatusBatch.java @@ -0,0 +1,192 @@ +package argo.batch; + +import org.slf4j.LoggerFactory; + +import argo.avro.GroupEndpoint; +import argo.avro.GroupGroup; +import argo.avro.MetricData; +import argo.avro.MetricProfile; +import ops.ConfigManager; + +import org.slf4j.Logger; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +import org.apache.flink.api.common.operators.Order; +import org.apache.flink.api.common.restartstrategy.RestartStrategies; +import org.apache.flink.api.common.time.Time; +import org.apache.flink.api.java.DataSet; +import org.apache.flink.api.java.ExecutionEnvironment; +import org.apache.flink.api.java.io.AvroInputFormat; +import org.apache.flink.api.java.operators.DataSource; + +import org.apache.flink.api.java.utils.ParameterTool; + +import org.apache.flink.core.fs.Path; + + +/** + * Implements an ARGO Status Batch Job in flink + * + * Submit job in flink cluster using the following parameters + * --mps: path tometric profile sync file (For hdfs use: hdfs://namenode:port/path/to/file) + * --egp: path to endpoints group topology file (For hdfs use: hdfs://namenode:port/path/to/file) + * --ggp: path to group of groups topology file (For hdfs use: hdfs://namenode:port/path/to/file) + * --pdata: path to previous day's metric data file (For hdfs use: hdfs://namenode:port/path/to/file) + * --mdata: path to metric data file (For hdfs use: hdfs://namenode:port/path/to/file) + * --ops: path to operations profile file (For hdfs use: hdfs://namenode:port/path/to/file) + * --aps: path to aggregations profile file (For hdfs use: hdfs://namenode:port/path/to/file) + * --cfg: path to report's configuration file (For hdfs use: hdfs://namenode:port/path/to/file) + * --rec: path to recomputations file + * --run.date: target date of computation in DD-MM-YYYY format + * --mongo.uri: path to MongoDB destination (eg mongodb://localhost:27017/database.table + * --mongo.method: Method for storing results to Mongo (insert,upsert) + */ +public class ArgoStatusBatch { + // setup logger + static Logger LOG = LoggerFactory.getLogger(ArgoStatusBatch.class); + + public static void main(String[] args) throws Exception { + + final ParameterTool params = ParameterTool.fromArgs(args); + + // set up the execution environment + final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); + + // make parameters available in the web interface + env.getConfig().setGlobalJobParameters(params); + env.setParallelism(1); + + // sync data for input + Path mps = new Path(params.getRequired("mps")); + Path egp = new Path(params.getRequired("egp")); + Path ggp = new Path(params.getRequired("ggp")); + + + DataSource cfgDS = env.readTextFile(params.getRequired("conf")); + DataSource opsDS = env.readTextFile(params.getRequired("ops")); + DataSource apsDS = env.readTextFile(params.getRequired("apr")); + DataSource recDS = env.readTextFile(params.getRequired("rec")); + + // begin with empty threshold datasource + DataSource thrDS = env.fromElements(""); + // if threshold filepath has been defined in cli parameters + if (params.has("thr")){ + // read file and update threshold datasource + thrDS = env.readTextFile(params.getRequired("thr")); + } + + ConfigManager confMgr = new ConfigManager(); + confMgr.loadJsonString(cfgDS.collect()); + + // Get conf data + List confData = cfgDS.collect(); + ConfigManager cfgMgr = new ConfigManager(); + cfgMgr.loadJsonString(confData); + // sync data input: metric profile in avro format + AvroInputFormat mpsAvro = new AvroInputFormat(mps, MetricProfile.class); + DataSet mpsDS = env.createInput(mpsAvro); + + // sync data input: endpoint group topology data in avro format + AvroInputFormat egpAvro = new AvroInputFormat(egp, GroupEndpoint.class); + DataSet egpDS = env.createInput(egpAvro); + + // sync data input: group of group topology data in avro format + AvroInputFormat ggpAvro = new AvroInputFormat(ggp, GroupGroup.class); + DataSet ggpDS = env.createInput(ggpAvro); + + // todays metric data + Path in = new Path(params.getRequired("mdata")); + AvroInputFormat mdataAvro = new AvroInputFormat(in, MetricData.class); + DataSet mdataDS = env.createInput(mdataAvro); + + // previous metric data + Path pin = new Path(params.getRequired("pdata")); + AvroInputFormat pdataAvro = new AvroInputFormat(pin, MetricData.class); + DataSet pdataDS = env.createInput(pdataAvro); + + // Find the latest day + DataSet pdataMin = pdataDS.groupBy("service", "hostname", "metric") + .sortGroup("timestamp", Order.DESCENDING).first(1); + + // Union todays data with the latest statuses from previous day + DataSet mdataPrevTotalDS = mdataDS.union(pdataMin); + + // Use yesterday's latest statuses and todays data to find the missing ones and add them to the mix + DataSet fillMissDS = mdataPrevTotalDS.reduceGroup(new FillMissing(params)) + .withBroadcastSet(mpsDS, "mps").withBroadcastSet(egpDS, "egp").withBroadcastSet(ggpDS, "ggp") + .withBroadcastSet(opsDS, "ops").withBroadcastSet(cfgDS, "conf"); + + + // Discard unused data and attach endpoint group as information + DataSet mdataTrimDS = mdataPrevTotalDS.flatMap(new PickEndpoints(params)) + .withBroadcastSet(mpsDS, "mps").withBroadcastSet(egpDS, "egp").withBroadcastSet(ggpDS, "ggp") + .withBroadcastSet(recDS, "rec").withBroadcastSet(cfgDS, "conf").withBroadcastSet(thrDS, "thr") + .withBroadcastSet(opsDS, "ops").withBroadcastSet(apsDS, "aps"); + + // Combine prev and todays metric data with the generated missing metric + // data + DataSet mdataTotalDS = mdataTrimDS.union(fillMissDS); + + // Create status detail data set + DataSet stDetailDS = mdataTotalDS.groupBy("group", "service", "hostname", "metric") + .sortGroup("timestamp", Order.ASCENDING).reduceGroup(new CalcPrevStatus(params)) + .withBroadcastSet(mpsDS, "mps").withBroadcastSet(egpDS, "egp").withBroadcastSet(ggpDS, "ggp"); + + + // Create status endpoint data set + DataSet stEndpointDS = stDetailDS.groupBy("group", "service", "hostname") + .sortGroup("metric", Order.ASCENDING).sortGroup("timestamp", Order.ASCENDING) + .reduceGroup(new CalcStatusEndpoint(params)).withBroadcastSet(mpsDS, "mps") + .withBroadcastSet(egpDS, "egp").withBroadcastSet(ggpDS, "ggp").withBroadcastSet(opsDS, "ops") + .withBroadcastSet(apsDS, "aps"); + + + // Create status service data set + DataSet stServiceDS = stEndpointDS.groupBy("group", "service") + .sortGroup("hostname", Order.ASCENDING).sortGroup("timestamp", Order.ASCENDING) + .reduceGroup(new CalcStatusService(params)).withBroadcastSet(mpsDS, "mps") + .withBroadcastSet(egpDS, "egp").withBroadcastSet(ggpDS, "ggp").withBroadcastSet(opsDS, "ops") + .withBroadcastSet(apsDS, "aps"); + + + // Create status endpoint group data set + DataSet stEndGroupDS = stServiceDS.groupBy("group").sortGroup("service", Order.ASCENDING) + .sortGroup("timestamp", Order.ASCENDING).reduceGroup(new CalcStatusEndGroup(params)) + .withBroadcastSet(mpsDS, "mps").withBroadcastSet(egpDS, "egp").withBroadcastSet(ggpDS, "ggp") + .withBroadcastSet(opsDS, "ops").withBroadcastSet(apsDS, "aps"); + + + String dbURI = params.getRequired("mongo.uri"); + String dbMethod = params.getRequired("mongo.method"); + + String reportID = cfgMgr.getReportID(); + // Initialize four mongo outputs (metric,endpoint,service,endpoint_group) + MongoStatusOutput metricMongoOut = new MongoStatusOutput(dbURI,"status_metrics",dbMethod, MongoStatusOutput.StatusType.STATUS_METRIC, reportID); + MongoStatusOutput endpointMongoOut = new MongoStatusOutput(dbURI,"status_endpoints",dbMethod, MongoStatusOutput.StatusType.STATUS_ENDPOINT, reportID); + MongoStatusOutput serviceMongoOut = new MongoStatusOutput(dbURI,"status_services",dbMethod, MongoStatusOutput.StatusType.STATUS_ENDPOINT, reportID); + MongoStatusOutput endGroupMongoOut = new MongoStatusOutput(dbURI,"status_endpoint_groups",dbMethod, MongoStatusOutput.StatusType.STATUS_ENDPOINT_GROUP, reportID); + + // Store datasets to the designated outputs prepared above + stDetailDS.output(metricMongoOut); + stEndpointDS.output(endpointMongoOut); + stServiceDS.output(serviceMongoOut); + stEndGroupDS.output(endGroupMongoOut); + + String runDate = params.getRequired("run.date"); + + // Create a job title message to discern job in flink dashboard/cli + StringBuilder jobTitleSB = new StringBuilder(); + jobTitleSB.append("Status Batch job for tenant:"); + jobTitleSB.append(confMgr.getTenant()); + jobTitleSB.append(" on day:"); + jobTitleSB.append(runDate); + jobTitleSB.append(" using report:"); + jobTitleSB.append(confMgr.getReport()); + + env.execute(jobTitleSB.toString()); + + } + +} \ No newline at end of file diff --git a/flink_jobs/old-models/batch_status/src/main/java/argo/batch/CalcPrevStatus.java b/flink_jobs/old-models/batch_status/src/main/java/argo/batch/CalcPrevStatus.java new file mode 100644 index 00000000..b6282b0f --- /dev/null +++ b/flink_jobs/old-models/batch_status/src/main/java/argo/batch/CalcPrevStatus.java @@ -0,0 +1,102 @@ +package argo.batch; + +import java.util.List; + +import org.apache.flink.api.common.functions.RichGroupReduceFunction; + +import org.apache.flink.api.java.utils.ParameterTool; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.util.Collector; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import argo.avro.GroupEndpoint; +import argo.avro.GroupGroup; + +import argo.avro.MetricProfile; +import sync.EndpointGroupManager; +import sync.GroupGroupManager; +import sync.MetricProfileManager; + +public class CalcPrevStatus extends RichGroupReduceFunction { + + private static final long serialVersionUID = 1L; + + + final ParameterTool params; + + public CalcPrevStatus(ParameterTool params){ + this.params = params; + } + + static Logger LOG = LoggerFactory.getLogger(ArgoStatusBatch.class); + + private List mps; + private List egp; + private List ggp; + private MetricProfileManager mpsMgr; + private EndpointGroupManager egpMgr; + private GroupGroupManager ggpMgr; + private String runDate; + + @Override + public void open(Configuration parameters) { + // Get data from broadcast variable + this.runDate = params.getRequired("run.date"); + + this.mps = getRuntimeContext().getBroadcastVariable("mps"); + this.egp = getRuntimeContext().getBroadcastVariable("egp"); + this.ggp = getRuntimeContext().getBroadcastVariable("ggp"); + // Initialize metric profile manager + this.mpsMgr = new MetricProfileManager(); + this.mpsMgr.loadFromList(mps); + // Initialize endpoint group manager + this.egpMgr = new EndpointGroupManager(); + this.egpMgr.loadFromList(egp); + + + // Initialize group group manager + this.ggpMgr = new GroupGroupManager(); + this.ggpMgr.loadFromList(ggp); + + } + + @Override + public void reduce(Iterable in, Collector out) throws Exception { + // group input is sorted + String prevStatus = "MISSING"; + String prevTimestamp = this.runDate+"T00:00:00Z"; + boolean gotPrev = false; + for (StatusMetric item : in){ + // If haven't captured yet previous timestamp + if (!gotPrev){ + if (item.getTimestamp().split("T")[0].compareToIgnoreCase(this.runDate) != 0) { + // set prevTimestamp to this + prevTimestamp = item.getTimestamp(); + prevStatus = item.getStatus(); + gotPrev = true; + continue; + } + } + + item.setPrevState(prevStatus); + item.setPrevTs(prevTimestamp); + + if (item.getTimestamp().split("T")[0].compareToIgnoreCase(this.runDate) == 0){ + out.collect(item); + } + + + prevStatus = item.getStatus(); + prevTimestamp = item.getTimestamp(); + + + + } + + } + + + + +} diff --git a/flink_jobs/old-models/batch_status/src/main/java/argo/batch/CalcStatusEndGroup.java b/flink_jobs/old-models/batch_status/src/main/java/argo/batch/CalcStatusEndGroup.java new file mode 100644 index 00000000..b36d3ec2 --- /dev/null +++ b/flink_jobs/old-models/batch_status/src/main/java/argo/batch/CalcStatusEndGroup.java @@ -0,0 +1,155 @@ +package argo.batch; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map.Entry; + +import org.apache.flink.api.common.functions.RichGroupReduceFunction; +import org.apache.flink.api.java.utils.ParameterTool; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.util.Collector; +import org.joda.time.DateTime; +import org.joda.time.format.DateTimeFormat; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import ops.CAggregator; +import ops.OpsManager; +import sync.AggregationProfileManager; + +/** + * Accepts a list o status metrics grouped by the fields: endpoint group + * Uses Continuous Timelines and Aggregators to calculate the status results of an endpoint group + * Prepares the data in a form aligned with the datastore schema for status flavor collection + */ +public class CalcStatusEndGroup extends RichGroupReduceFunction { + + private static final long serialVersionUID = 1L; + + final ParameterTool params; + + public CalcStatusEndGroup(ParameterTool params) { + this.params = params; + } + + static Logger LOG = LoggerFactory.getLogger(ArgoStatusBatch.class); + + + + private List aps; + private List ops; + + private AggregationProfileManager apsMgr; + private OpsManager opsMgr; + + + private String runDate; + public HashMap groupEndpointAggr; + + private boolean getGroup; + + @Override + public void open(Configuration parameters) throws IOException { + + this.runDate = params.getRequired("run.date"); + // Get data from broadcast variables + this.aps = getRuntimeContext().getBroadcastVariable("aps"); + this.ops = getRuntimeContext().getBroadcastVariable("ops"); + + // Initialize aggregation profile manager + this.apsMgr = new AggregationProfileManager(); + this.apsMgr.loadJsonString(aps); + // Initialize operations manager + this.opsMgr = new OpsManager(); + this.opsMgr.loadJsonString(ops); + + // Initialize endpoint group type + this.runDate = params.getRequired("run.date"); + // set the Structures + this.groupEndpointAggr = new HashMap(); + + this.getGroup = true; + } + + @Override + public void reduce(Iterable in, Collector out) throws Exception { + + this.groupEndpointAggr.clear(); + + + + String aProfile = this.apsMgr.getAvProfiles().get(0); + String service =""; + String endpointGroup =""; + int dateInt = Integer.parseInt(this.runDate.replace("-", "")); + + + + for (StatusMetric item : in) { + + if (getGroup){ + endpointGroup = item.getGroup(); + getGroup =false; + } + + service = item.getService(); + endpointGroup = item.getGroup(); + + String ts = item.getTimestamp(); + String status = item.getStatus(); + + + // Get the availability group + String group = apsMgr.getGroupByService(aProfile, service); + + // if group doesn't exist yet create it + if (this.groupEndpointAggr.containsKey(group) == false) { + this.groupEndpointAggr.put(group, new CAggregator()); + } + + this.groupEndpointAggr.get(group).insert(service, ts, this.opsMgr.getIntStatus(status)); + + + + } + + // Aggregate each group + for (String group : this.groupEndpointAggr.keySet()) { + // Get group Operation + + String gop = this.apsMgr.getProfileGroupOp(aProfile, group); + + this.groupEndpointAggr.get(group).aggregate(this.opsMgr, gop); + + } + + // Aggregate all sites + CAggregator totalSite = new CAggregator(); + + // Aggregate each group + for (String group : this.groupEndpointAggr.keySet()) { + for (Entry item : this.groupEndpointAggr.get(group).getSamples()) { + String ts = item.getKey().toString(DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'")); + totalSite.insert(group,ts, item.getValue()); + } + + } + + totalSite.aggregate( this.opsMgr,apsMgr.getTotalOp(aProfile)); + + // Append the timeline + for (Entry item : totalSite.getSamples()) { + + StatusMetric cur = new StatusMetric(); + cur.setDateInt(dateInt); + cur.setGroup(endpointGroup); + + + cur.setTimestamp(item.getKey().toString(DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"))); + cur.setStatus(opsMgr.getStrStatus(item.getValue())); + out.collect(cur); + } + + } +} diff --git a/flink_jobs/old-models/batch_status/src/main/java/argo/batch/CalcStatusEndpoint.java b/flink_jobs/old-models/batch_status/src/main/java/argo/batch/CalcStatusEndpoint.java new file mode 100644 index 00000000..c65f9e57 --- /dev/null +++ b/flink_jobs/old-models/batch_status/src/main/java/argo/batch/CalcStatusEndpoint.java @@ -0,0 +1,168 @@ +package argo.batch; + +import java.io.IOException; +import java.util.List; +import java.util.Map.Entry; + +import org.apache.flink.api.common.functions.RichGroupReduceFunction; + +import org.apache.flink.api.java.utils.ParameterTool; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.util.Collector; +import org.joda.time.DateTime; +import org.joda.time.format.DateTimeFormat; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.esotericsoftware.minlog.Log; + +import argo.avro.GroupGroup; + +import argo.avro.MetricProfile; +import ops.CAggregator; +import ops.OpsManager; +import sync.AggregationProfileManager; +import sync.GroupGroupManager; +import sync.MetricProfileManager; + + +/** + * Accepts a list o status metrics grouped by the fields: endpoint group, service, endpoint + * Uses Continuous Timelines and Aggregators to calculate the status results of a service endpoint + * Prepares the data in a form aligned with the datastore schema for status endpoint collection + */ +public class CalcStatusEndpoint extends RichGroupReduceFunction { + + private static final long serialVersionUID = 1L; + + final ParameterTool params; + + public CalcStatusEndpoint(ParameterTool params) { + this.params = params; + } + + static Logger LOG = LoggerFactory.getLogger(ArgoStatusBatch.class); + + private List mps; + private List aps; + private List ops; + private MetricProfileManager mpsMgr; + private AggregationProfileManager apsMgr; + private OpsManager opsMgr; + private String runDate; + private CAggregator endpointAggr; + + private boolean fillMissing; + + @Override + public void open(Configuration parameters) throws IOException { + + this.runDate = params.getRequired("run.date"); + // Get data from broadcast variables + this.mps = getRuntimeContext().getBroadcastVariable("mps"); + this.aps = getRuntimeContext().getBroadcastVariable("aps"); + this.ops = getRuntimeContext().getBroadcastVariable("ops"); + // Initialize metric profile manager + this.mpsMgr = new MetricProfileManager(); + this.mpsMgr.loadFromList(mps); + // Initialize aggregation profile manager + this.apsMgr = new AggregationProfileManager(); + + this.apsMgr.loadJsonString(aps); + // Initialize operations manager + this.opsMgr = new OpsManager(); + this.opsMgr.loadJsonString(ops); + + this.runDate = params.getRequired("run.date"); + this.endpointAggr = new CAggregator(); // Create aggregator + + this.fillMissing = true; + } + + @Override + public void reduce(Iterable in, Collector out) throws Exception { + + this.endpointAggr.clear(); + + String defTimestamp = this.endpointAggr.tsFromDate(this.runDate); + String prevMetricName = ""; + + // Only 1 profile per job + String mProfile = this.mpsMgr.getProfiles().get(0); + // Get default missing state + int defMissing = this.opsMgr.getDefaultMissingInt(); + // Iterate all metric names of profile and initiate timelines + + String aprofile = this.apsMgr.getAvProfiles().get(0); + + String service =""; + String endpointGroup =""; + String hostname =""; + int dateInt = Integer.parseInt(this.runDate.replace("-", "")); + + + + for (StatusMetric item : in) { + if (fillMissing) { + // Before reading metric messages, init expected metric + // timelines + + service = item.getService(); + endpointGroup = item.getGroup(); + hostname = item.getHostname(); + + + this.mpsMgr.getProfileServiceMetrics(mProfile, item.getService()); + + + for (String mName : this.mpsMgr.getProfileServiceMetrics(mProfile, service)) { + this.endpointAggr.createTimeline(mName, defTimestamp, defMissing); + } + + fillMissing = false; + } + + service = item.getService(); + endpointGroup = item.getGroup(); + hostname = item.getHostname(); + String metric = item.getMetric(); + String ts = item.getTimestamp(); + String status = item.getStatus(); + String prevStatus = item.getPrevState(); + + + // Check if we are in the switch of a new metric name + if (prevMetricName.equals(metric) == false) { + + this.endpointAggr.setFirst(metric, ts, this.opsMgr.getIntStatus(prevStatus)); + + } + + this.endpointAggr.insert(metric, ts, this.opsMgr.getIntStatus(status)); + prevMetricName = metric; + + } + + this.endpointAggr.aggregate(this.opsMgr, this.apsMgr.getMetricOp(aprofile)); + + // Append the timeline + + + + for (Entry item : this.endpointAggr.getSamples()) { + + StatusMetric cur = new StatusMetric(); + cur.setDateInt(dateInt); + cur.setGroup(endpointGroup); + cur.setHostname(hostname); + cur.setService(service); + + + cur.setTimestamp(item.getKey().toString(DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"))); + cur.setStatus(opsMgr.getStrStatus(item.getValue())); + out.collect(cur); + } + + } + +} diff --git a/flink_jobs/old-models/batch_status/src/main/java/argo/batch/CalcStatusService.java b/flink_jobs/old-models/batch_status/src/main/java/argo/batch/CalcStatusService.java new file mode 100644 index 00000000..8105888a --- /dev/null +++ b/flink_jobs/old-models/batch_status/src/main/java/argo/batch/CalcStatusService.java @@ -0,0 +1,142 @@ +package argo.batch; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map.Entry; + +import org.apache.flink.api.common.functions.RichGroupReduceFunction; + +import org.apache.flink.api.java.utils.ParameterTool; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.util.Collector; +import org.joda.time.DateTime; +import org.joda.time.format.DateTimeFormat; +import org.mortbay.log.Log; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import argo.avro.GroupEndpoint; +import argo.avro.GroupGroup; + +import argo.avro.MetricProfile; +import ops.CAggregator; +import ops.OpsManager; +import sync.AggregationProfileManager; +import sync.GroupGroupManager; +import sync.MetricProfileManager; + + +/** + * Accepts a list o status metrics grouped by the fields: endpoint group, service + * Uses Continuous Timelines and Aggregators to calculate the status results of a service flavor + * Prepares the data in a form aligned with the datastore schema for status flavor collection + */ +public class CalcStatusService extends RichGroupReduceFunction { + + private static final long serialVersionUID = 1L; + + final ParameterTool params; + + public CalcStatusService(ParameterTool params) { + this.params = params; + } + + static Logger LOG = LoggerFactory.getLogger(ArgoStatusBatch.class); + + + + private List aps; + private List ops; + + private AggregationProfileManager apsMgr; + private OpsManager opsMgr; + + + private String runDate; + private CAggregator serviceAggr; + + private boolean getService; + + @Override + public void open(Configuration parameters) throws IOException { + + this.runDate = params.getRequired("run.date"); + // Get data from broadcast variables + this.aps = getRuntimeContext().getBroadcastVariable("aps"); + this.ops = getRuntimeContext().getBroadcastVariable("ops"); + + // Initialize aggregation profile manager + this.apsMgr = new AggregationProfileManager(); + this.apsMgr.loadJsonString(aps); + // Initialize operations manager + this.opsMgr = new OpsManager(); + this.opsMgr.loadJsonString(ops); + + // Initialize endpoint group type + this.runDate = params.getRequired("run.date"); + this.serviceAggr = new CAggregator(); // Create aggregator + + this.getService = true; + } + + @Override + public void reduce(Iterable in, Collector out) throws Exception { + + this.serviceAggr.clear(); + + String aProfile = this.apsMgr.getAvProfiles().get(0); + String avGroup = ""; + String service =""; + String endpointGroup =""; + int dateInt = Integer.parseInt(this.runDate.replace("-", "")); + + + for (StatusMetric item : in) { + + if (getService){ + + service = item.getService(); + + // Get the availability Group in which this service belongs + avGroup = this.apsMgr.getGroupByService(aProfile, service); + + getService =false; + + } + + + service = item.getService(); + endpointGroup = item.getGroup(); + String hostname = item.getHostname(); + String ts = item.getTimestamp(); + String status = item.getStatus(); + + + this.serviceAggr.insert(hostname, ts, this.opsMgr.getIntStatus(status)); + + } + + + avGroup = this.apsMgr.getGroupByService(aProfile, service); + String avOp = this.apsMgr.getProfileGroupServiceOp(aProfile, avGroup, service); + + this.serviceAggr.aggregate(this.opsMgr, avOp); + + // Append the timeline + for (Entry item : this.serviceAggr.getSamples()) { + + StatusMetric cur = new StatusMetric(); + cur.setDateInt(dateInt); + cur.setGroup(endpointGroup); + cur.setService(service); + + + cur.setTimestamp(item.getKey().toString(DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"))); + cur.setStatus(opsMgr.getStrStatus(item.getValue())); + out.collect(cur); + } + + } + +} diff --git a/flink_jobs/old-models/batch_status/src/main/java/argo/batch/FillMissing.java b/flink_jobs/old-models/batch_status/src/main/java/argo/batch/FillMissing.java new file mode 100644 index 00000000..93cc243b --- /dev/null +++ b/flink_jobs/old-models/batch_status/src/main/java/argo/batch/FillMissing.java @@ -0,0 +1,215 @@ +package argo.batch; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import org.apache.flink.api.common.functions.RichGroupReduceFunction; +import org.apache.flink.api.java.tuple.Tuple4; +import org.apache.flink.api.java.utils.ParameterTool; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.util.Collector; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import argo.avro.GroupEndpoint; +import argo.avro.GroupGroup; +import argo.avro.MetricData; +import argo.avro.MetricProfile; +import ops.ConfigManager; +import ops.OpsManager; +import sync.AggregationProfileManager; +import sync.EndpointGroupManager; +import sync.GroupGroupManager; +import sync.MetricProfileManager; + +/** + * Accepts a list of metric data objects and produces a list of missing mon data objects + */ +public class FillMissing extends RichGroupReduceFunction { + + private static final long serialVersionUID = 1L; + + final ParameterTool params; + + public FillMissing(ParameterTool params) { + this.params = params; + } + + static Logger LOG = LoggerFactory.getLogger(ArgoStatusBatch.class); + + private List mps; + private List ops; + private List egp; + private List ggp; + private List conf; + private MetricProfileManager mpsMgr; + private EndpointGroupManager egpMgr; + private GroupGroupManager ggpMgr; + private OpsManager opsMgr; + private ConfigManager confMgr; + private String runDate; + private String egroupType; + private Set> expected; + + /** + * Initialization method of the RichGroupReduceFunction operator + *

      + * This runs at the initialization of the operator and receives a + * configuration parameter object. It initializes all required structures + * used by this operator such as profile managers, operations managers, + * topology managers etc. + * + * @param parameters + * A flink Configuration object + */ + @Override + public void open(Configuration parameters) throws IOException { + + this.runDate = params.getRequired("run.date"); + // Get data from broadcast variables + this.mps = getRuntimeContext().getBroadcastVariable("mps"); + this.ops = getRuntimeContext().getBroadcastVariable("ops"); + this.egp = getRuntimeContext().getBroadcastVariable("egp"); + this.ggp = getRuntimeContext().getBroadcastVariable("ggp"); + this.conf = getRuntimeContext().getBroadcastVariable("conf"); + + // Initialize metric profile manager + this.mpsMgr = new MetricProfileManager(); + this.mpsMgr.loadFromList(mps); + // Initialize operations manager + this.opsMgr = new OpsManager(); + this.opsMgr.loadJsonString(ops); + + // Initialize endpoint group manager + this.egpMgr = new EndpointGroupManager(); + this.egpMgr.loadFromList(egp); + + this.ggpMgr = new GroupGroupManager(); + this.ggpMgr.loadFromList(ggp); + + this.confMgr = new ConfigManager(); + this.confMgr.loadJsonString(conf); + + this.runDate = params.getRequired("run.date"); + this.egroupType = this.confMgr.egroup; + + + + } + + + /** + * Reads the topology in endpoint group list and the metric profile and produces a set of available service endpoint metrics + * that are expected to be found (as tuple objects (endpoint_group,service,hostname,metric) + **/ + public void initExpected() { + this.expected = new HashSet>(); + String mProfile = this.mpsMgr.getProfiles().get(0); + for (GroupEndpoint servPoint: this.egp){ + + + ArrayList metrics = this.mpsMgr.getProfileServiceMetrics(mProfile, servPoint.getService()); + + if (metrics==null) continue; + for (String metric:metrics){ + this.expected.add(new Tuple4(servPoint.getGroup(),servPoint.getService(),servPoint.getHostname(),metric)); + } + + } + + + + + + } + + /** + * Iterates over all metric data and gathers a set of encountered service endpoint metrics. Then subtracts it from + * a set of expected service endpoint metrics (based on topology) so as the missing service endpoint metrics to be identified. Then based on the + * list of the missing service endpoint metrics corresponding metric data are created + * + * @param in + * An Iterable collection of MetricData objects + * @param out + * A Collector list of Missing MonData objects + */ + @Override + public void reduce(Iterable in, Collector out) throws Exception { + + initExpected(); + + Set> found = new HashSet>(); + + String service = ""; + String endpointGroup = ""; + String hostname = ""; + String metric = ""; + + String timestamp = this.runDate + "T00:00:00Z"; + String state = this.opsMgr.getDefaultMissing(); + + + for (MetricData item : in) { + + service = item.getService(); + hostname = item.getHostname(); + metric = item.getMetric(); + + // Filter By endpoint group if belongs to supergroup + ArrayList groupnames = egpMgr.getGroup(egroupType, hostname, service); + + for (String groupname : groupnames) { + if (ggpMgr.checkSubGroup(groupname) == true) { + endpointGroup = groupname; + found.add(new Tuple4(endpointGroup, service, hostname, metric)); + } + + } + + + + } + + + // Clone expected set to missing (because missing is going to be mutated after subtraction + Set> missing = new HashSet>(this.expected); + // The result of the subtraction is in missing set + missing.removeAll(found); + + + + + // For each item in missing create a missing metric data entry + for (Tuple4 item:missing){ + StatusMetric mn = new StatusMetric(); + // Create a StatusMetric output + // Grab the timestamp to generate the date and time integer fields + // that are exclusively used in datastore for indexing + String timestamp2 = timestamp.split("Z")[0]; + String[] tsToken = timestamp2.split("T"); + int dateInt = Integer.parseInt(tsToken[0].replace("-", "")); + int timeInt = Integer.parseInt(tsToken[1].replace(":","")); + mn.setGroup(item.f0); + mn.setService(item.f1); + mn.setHostname(item.f2); + mn.setMetric(item.f3); + mn.setStatus(state); + mn.setMessage(""); + mn.setSummary(""); + mn.setTimestamp(timestamp); + mn.setDateInt(dateInt); + mn.setTimeInt(timeInt); + + out.collect(mn); + + + } + + + + } + +} diff --git a/flink_jobs/old-models/batch_status/src/main/java/argo/batch/MongoStatusOutput.java b/flink_jobs/old-models/batch_status/src/main/java/argo/batch/MongoStatusOutput.java new file mode 100644 index 00000000..65b52e98 --- /dev/null +++ b/flink_jobs/old-models/batch_status/src/main/java/argo/batch/MongoStatusOutput.java @@ -0,0 +1,216 @@ +package argo.batch; + +import java.io.IOException; + +import org.apache.flink.api.common.io.OutputFormat; +import org.apache.flink.configuration.Configuration; +import org.bson.Document; +import org.bson.conversions.Bson; + +import com.mongodb.MongoClient; +import com.mongodb.MongoClientURI; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.model.Filters; +import com.mongodb.client.model.UpdateOptions; + + +/** + * MongoOutputFormat for storing status data to mongodb + */ +public class MongoStatusOutput implements OutputFormat { + + public enum MongoMethod { + INSERT, UPSERT + }; + + // Select the type of status input + public enum StatusType { + STATUS_METRIC, STATUS_ENDPOINT, STATUS_SERVICE, STATUS_ENDPOINT_GROUP + } + + private static final long serialVersionUID = 1L; + + private String mongoHost; + private int mongoPort; + private String dbName; + private String colName; + private MongoMethod method; + private StatusType sType; + private String report; + + private MongoClient mClient; + private MongoDatabase mDB; + private MongoCollection mCol; + + // constructor + public MongoStatusOutput(String uri, String col, String method, StatusType sType, String report) { + + if (method.equalsIgnoreCase("upsert")) { + this.method = MongoMethod.UPSERT; + } else { + this.method = MongoMethod.INSERT; + } + + this.sType = sType; + this.report = report; + + MongoClientURI mURI = new MongoClientURI(uri); + String[] hostParts = mURI.getHosts().get(0).split(":"); + String hostname = hostParts[0]; + int port = Integer.parseInt(hostParts[1]); + + this.mongoHost = hostname; + this.mongoPort = port; + this.dbName = mURI.getDatabase(); + this.colName = col; + } + + // constructor + public MongoStatusOutput(String host, int port, String db, String col, MongoMethod method, StatusType sType, + String report) { + this.mongoHost = host; + this.mongoPort = port; + this.dbName = db; + this.colName = col; + this.method = method; + this.sType = sType; + this.report = report; + } + + private void initMongo() { + this.mClient = new MongoClient(mongoHost, mongoPort); + this.mDB = mClient.getDatabase(dbName); + this.mCol = mDB.getCollection(colName); + } + + /** + * Initialize MongoDB remote connection + */ + @Override + public void open(int taskNumber, int numTasks) throws IOException { + // Configure mongo + initMongo(); + } + + /** + * Prepare correct MongoDocument according to record values and selected StatusType. + * A different document is needed for storing Status Metric results than Endpoint, + * Service or Endpoint Group ones. + */ + private Document prepDoc(StatusMetric record) { + Document doc = new Document("report",this.report) + .append("endpoint_group", record.getGroup()); + + + if (this.sType == StatusType.STATUS_SERVICE) { + + doc.append("service",record.getService()); + + } else if (this.sType == StatusType.STATUS_ENDPOINT) { + + doc.append("service", record.getService()) + .append("host", record.getHostname()); + + } else if (this.sType == StatusType.STATUS_METRIC) { + + doc.append("service", record.getService()) + .append("host", record.getHostname()) + .append("metric", record.getMetric()) + .append("message", record.getMessage()) + .append("summary", record.getSummary()) + .append("time_integer",record.getTimeInt()) + .append("previous_state",record.getPrevState()) + .append("previous_timestamp", record.getPrevTs()) + // append the actual data to status metric record in datastore + .append("actual_data", record.getActualData()) + // append original status and threshold rule applied + .append("original_status", record.getOgStatus()) + .append("threshold_rule_applied", record.getRuleApplied()); + } + + + doc.append("status",record.getStatus()) + .append("timestamp",record.getTimestamp()) + .append("date_integer",record.getDateInt()); + + return doc; + } + + /** + * Prepare correct Update filter according to record values and selected StatusType. + * A different update filter is needed for updating Status Metric results than Endpoint, + * Service or Endpoint Group ones. + */ + private Bson prepFilter(StatusMetric record) { + + if (this.sType == StatusType.STATUS_METRIC) { + + return Filters.and(Filters.eq("report", this.report), Filters.eq("date_integer", record.getDateInt()), + Filters.eq("endpoint_group", record.getGroup()), Filters.eq("service", record.getService()), + Filters.eq("host", record.getHostname()), Filters.eq("metric", record.getMetric()), + Filters.eq("timestamp", record.getTimestamp())); + + } else if (this.sType == StatusType.STATUS_ENDPOINT) { + + return Filters.and(Filters.eq("report", this.report), Filters.eq("date_integer", record.getDateInt()), + Filters.eq("endpoint_group", record.getGroup()), Filters.eq("service", record.getService()), + Filters.eq("host", record.getHostname()), Filters.eq("timestamp", record.getTimestamp())); + + } else if (this.sType == StatusType.STATUS_SERVICE) { + + return Filters.and(Filters.eq("report", this.report), Filters.eq("date_integer", record.getDateInt()), + Filters.eq("endpoint_group", record.getGroup()), Filters.eq("service", record.getService()), + Filters.eq("timestamp", record.getTimestamp())); + + } else { + + return Filters.and(Filters.eq("report", this.report), Filters.eq("date_integer", record.getDateInt()), + Filters.eq("endpoint_group", record.getGroup()), Filters.eq("timestamp", record.getTimestamp())); + + } + + + } + + /** + * Store a MongoDB document record + */ + @Override + public void writeRecord(StatusMetric record) throws IOException { + + // Mongo Document to be prepared according to StatusType of input + Document doc = prepDoc(record); + + if (this.method == MongoMethod.UPSERT) { + + // Filter for upsert to be prepared according to StatusType of input + Bson f = prepFilter(record); + UpdateOptions opts = new UpdateOptions().upsert(true); + + mCol.replaceOne(f, doc, opts); + } else { + mCol.insertOne(doc); + } + } + + /** + * Close MongoDB Connection + */ + @Override + public void close() throws IOException { + if (mClient != null) { + mClient.close(); + mClient = null; + mDB = null; + mCol = null; + } + } + + @Override + public void configure(Configuration arg0) { + // configure + + } + +} \ No newline at end of file diff --git a/flink_jobs/old-models/batch_status/src/main/java/argo/batch/PickEndpoints.java b/flink_jobs/old-models/batch_status/src/main/java/argo/batch/PickEndpoints.java new file mode 100644 index 00000000..717af10a --- /dev/null +++ b/flink_jobs/old-models/batch_status/src/main/java/argo/batch/PickEndpoints.java @@ -0,0 +1,190 @@ +package argo.batch; + +import java.io.IOException; +import java.text.ParseException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.apache.flink.api.common.functions.RichFilterFunction; +import org.apache.flink.api.common.functions.RichFlatMapFunction; +import org.apache.flink.api.java.DataSet; +import org.apache.flink.api.java.utils.ParameterTool; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.util.Collector; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.esotericsoftware.minlog.Log; + +import argo.avro.GroupEndpoint; +import argo.avro.GroupGroup; +import argo.avro.MetricData; +import argo.avro.MetricProfile; +import ops.ConfigManager; +import ops.OpsManager; +import ops.ThresholdManager; +import sync.AggregationProfileManager; +import sync.EndpointGroupManager; +import sync.GroupGroupManager; +import sync.MetricProfileManager; +import sync.RecomputationsManager; + +/** + * Accepts a metric data entry and converts it to a status metric object by appending endpoint group information + * Filters out entries that do not appear in topology and metric profiles + */ +public class PickEndpoints extends RichFlatMapFunction { + + private static final long serialVersionUID = 1L; + + + final ParameterTool params; + + public PickEndpoints(ParameterTool params){ + this.params = params; + } + + static Logger LOG = LoggerFactory.getLogger(ArgoStatusBatch.class); + + private List mps; + private List egp; + private List ggp; + private List rec; + private List cfg; + private List thr; + private List ops; + private List aps; + private OpsManager opsMgr; + private MetricProfileManager mpsMgr; + private EndpointGroupManager egpMgr; + private GroupGroupManager ggpMgr; + private RecomputationsManager recMgr; + private ConfigManager cfgMgr; + private ThresholdManager thrMgr; + private AggregationProfileManager apsMgr; + + private String egroupType; + + @Override + public void open(Configuration parameters) throws IOException, ParseException { + // Get data from broadcast variable + this.mps = getRuntimeContext().getBroadcastVariable("mps"); + this.egp = getRuntimeContext().getBroadcastVariable("egp"); + this.ggp = getRuntimeContext().getBroadcastVariable("ggp"); + this.ggp = getRuntimeContext().getBroadcastVariable("ggp"); + this.rec = getRuntimeContext().getBroadcastVariable("rec"); + this.cfg = getRuntimeContext().getBroadcastVariable("conf"); + this.thr = getRuntimeContext().getBroadcastVariable("thr"); + this.ops = getRuntimeContext().getBroadcastVariable("ops"); + this.aps = getRuntimeContext().getBroadcastVariable("aps"); + + + // Initialize Recomputation manager + this.recMgr = new RecomputationsManager(); + this.recMgr.loadJsonString(rec); + + // Initialize metric profile manager + this.mpsMgr = new MetricProfileManager(); + this.mpsMgr.loadFromList(mps); + // Initialize endpoint group manager + this.egpMgr = new EndpointGroupManager(); + this.egpMgr.loadFromList(egp); + + this.ggpMgr = new GroupGroupManager(); + this.ggpMgr.loadFromList(ggp); + + // Initialize report configuration manager + this.cfgMgr = new ConfigManager(); + this.cfgMgr.loadJsonString(cfg); + + // Initialize Ops Manager + this.opsMgr = new OpsManager(); + this.opsMgr.loadJsonString(ops); + + // Initialize Aggregation Profile manager + this.apsMgr = new AggregationProfileManager(); + this.apsMgr.loadJsonString(aps); + + this.egroupType = cfgMgr.egroup; + + // Initialize Threshold manager + this.thrMgr = new ThresholdManager(); + if (!this.thr.get(0).isEmpty()){ + this.thrMgr.parseJSON(this.thr.get(0)); + } + + + } + + + + @Override + public void flatMap(MetricData md, Collector out) throws Exception { + + String prof = mpsMgr.getProfiles().get(0); + String aprof = apsMgr.getAvProfiles().get(0); + String hostname = md.getHostname(); + String service = md.getService(); + String metric = md.getMetric(); + String monHost = md.getMonitoringHost(); + String ts = md.getTimestamp(); + + // Filter By monitoring engine + if (recMgr.isMonExcluded(monHost, ts) == true) return; + + // Filter By aggregation profile + if (apsMgr.checkService(aprof, service) == false) return; + + // Filter By metric profile + if (mpsMgr.checkProfileServiceMetric(prof, service, metric) == false) return; + + + + + // Filter By endpoint group if belongs to supergroup + ArrayList groupnames = egpMgr.getGroup(egroupType, hostname, service); + + for (String groupname : groupnames) { + if (ggpMgr.checkSubGroup(groupname) == true){ + // Create a StatusMetric output + String timestamp2 = md.getTimestamp().split("Z")[0]; + String[] tsToken = timestamp2.split("T"); + int dateInt = Integer.parseInt(tsToken[0].replace("-", "")); + int timeInt = Integer.parseInt(tsToken[1].replace(":","")); + String status = md.getStatus(); + String actualData = md.getActualData(); + String ogStatus = ""; + String ruleApplied = ""; + + if (actualData != null) { + // Check for relevant rule + String rule = thrMgr.getMostRelevantRule(groupname, md.getHostname(), md.getMetric()); + // if rule is indeed found + if (rule != ""){ + // get the retrieved values from the actual data + Map values = thrMgr.getThresholdValues(actualData); + // calculate + String[] statusNext = thrMgr.getStatusByRuleAndValues(rule, this.opsMgr, "AND", values); + if (statusNext[0] == "") statusNext[0] = status; + LOG.info("{},{},{} data:({}) {} --> {}",groupname,md.getHostname(),md.getMetric(),values,status,statusNext[0]); + if (status != statusNext[0]) { + ogStatus = status; + ruleApplied = statusNext[1]; + status = statusNext[0]; + } + } + + + } + + StatusMetric sm = new StatusMetric(groupname,md.getService(),md.getHostname(),md.getMetric(), status,md.getTimestamp(),dateInt,timeInt,md.getSummary(),md.getMessage(),"","",actualData, ogStatus, ruleApplied); + + out.collect(sm); + } + + + } + + } +} diff --git a/flink_jobs/old-models/batch_status/src/main/java/argo/batch/StatusMetric.java b/flink_jobs/old-models/batch_status/src/main/java/argo/batch/StatusMetric.java new file mode 100644 index 00000000..9bf147c1 --- /dev/null +++ b/flink_jobs/old-models/batch_status/src/main/java/argo/batch/StatusMetric.java @@ -0,0 +1,167 @@ +package argo.batch; + + +public class StatusMetric { + + + private String group; + private String service; + private String hostname; + private String metric; + private String status; + private String timestamp; + private int dateInt; + private int timeInt; + private String summary; + private String message; + private String prevState; + private String prevTs; + private String actualData; + private String ogStatus; // original status from moniting host + private String ruleApplied; // threshold rule applied - empty if not + + public StatusMetric(){ + this.group = ""; + this.service =""; + this.hostname = ""; + this.metric=""; + this.status = ""; + this.timestamp = ""; + this.dateInt = 0; + this.timeInt =0; + this.summary = ""; + this.message = ""; + this.prevState = ""; + this.prevTs = ""; + this.actualData = ""; + this.ogStatus = ""; + this.ruleApplied = ""; + } + + public StatusMetric(String group, String service, String hostname, String metric, String status, String timestamp, + int dateInt, int timeInt, String summary, String message, String prevState, String prevTs, String actualData, String ogStatus, String ruleApplied) { + + this.group = group; + this.service = service; + this.hostname = hostname; + this.metric = metric; + this.status = status; + this.timestamp = timestamp; + this.dateInt = dateInt; + this.timeInt = timeInt; + this.summary = summary; + this.message = message; + this.prevState = prevState; + this.prevTs = prevTs; + this.actualData = actualData; + this.ogStatus = ogStatus; + this.ruleApplied = ruleApplied; + } + + + public String getGroup() { + return group; + } + public void setGroup(String group) { + this.group = group; + } + public String getService() { + return service; + } + public void setService(String service) { + this.service = service; + } + public String getHostname() { + return hostname; + } + public void setHostname(String hostname) { + this.hostname = hostname; + } + public String getMetric() { + return metric; + } + public void setMetric(String metric) { + this.metric = metric; + } + public String getStatus() { + return status; + } + public void setStatus(String status) { + this.status = status; + } + public String getTimestamp() { + return timestamp; + } + public void setTimestamp(String timestamp) { + this.timestamp = timestamp; + } + public int getDateInt() { + return dateInt; + } + public void setDateInt(int dateInt) { + this.dateInt = dateInt; + } + public int getTimeInt() { + return timeInt; + } + public void setTimeInt(int timeInt) { + this.timeInt = timeInt; + } + + public String getSummary() { + return summary; + } + + public void setSummary(String summary) { + this.summary = summary; + } + + public String getMessage() { + return message; + } + + public void setMessage(String message) { + this.message = message; + } + + public String getPrevState() { + return prevState; + } + public void setPrevState(String prevState) { + this.prevState = prevState; + } + public String getPrevTs() { + return prevTs; + } + public void setPrevTs(String prevTs) { + this.prevTs = prevTs; + } + + public String getActualData() { + return actualData; + } + public void setActualData(String actualData) { + this.actualData = actualData; + } + + public String getOgStatus() { + return ogStatus; + } + public void setOgStatus(String ogStatus) { + this.ogStatus = ogStatus; + } + + public String getRuleApplied() { + return ruleApplied; + } + public void setRuleApplied(String ruleApplied) { + this.ruleApplied = ruleApplied; + } + + @Override + public String toString() { + return "(" + this.group + "," + this.service + "," + this.hostname + "," + this.metric + "," + this.status + "," + this.timestamp + "," + + this.dateInt + "," + this.timeInt + "," + this.prevState + "," + this.prevTs + "," + this.actualData + "," + this.ogStatus + "," + this.ruleApplied + ")"; + } + +} diff --git a/flink_jobs/old-models/batch_status/src/main/java/ops/CAggregator.java b/flink_jobs/old-models/batch_status/src/main/java/ops/CAggregator.java new file mode 100644 index 00000000..fc1189a0 --- /dev/null +++ b/flink_jobs/old-models/batch_status/src/main/java/ops/CAggregator.java @@ -0,0 +1,98 @@ +package ops; + +import java.text.ParseException; +import java.util.HashMap; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +import org.joda.time.DateTime; +import org.joda.time.LocalDate; +import org.joda.time.format.DateTimeFormat; +import org.joda.time.format.DateTimeFormatter; + +public class CAggregator { + + private CTimeline output; + private Map inputs; + + public CAggregator(String timestamp) throws ParseException + { + this.output = new CTimeline(timestamp); + this.inputs = new HashMap(); + } + + public CAggregator(){ + this.output = new CTimeline(); + this.inputs = new HashMap(); + + } + + public void clear(){ + this.output.clear(); + this.inputs.clear(); + } + + public String tsFromDate(String date){ + DateTime tmp_date = new DateTime(); + DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd"); + tmp_date = fmt.parseDateTime(date); + tmp_date = tmp_date.withTime(0, 0, 0, 0); + return tmp_date.toString(DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'")); + } + + public void createTimeline(String name, String timestamp, int prevState){ + CTimeline temp = new CTimeline(timestamp,prevState); + this.inputs.put(name, temp); + } + + public void insert(String name, String timestamp, int status){ + // Check if timeline exists, if not create it + if (this.inputs.containsKey(name) == false) + { + CTimeline temp = new CTimeline(timestamp,status); + this.inputs.put(name, temp); + return; + } + + this.inputs.get(name).insert(timestamp, status); + } + + public void setFirst(String name, String timestamp, int status){ + // Check if timeline exists, if not create it + if (this.inputs.containsKey(name) == false) + { + CTimeline temp = new CTimeline(timestamp,status); + this.inputs.put(name, temp); + return; + } + + this.inputs.get(name).setFirst(timestamp, status); + } + + public LocalDate getDate(){ + return output.getDate(); + } + + public Set> getSamples(){ + return this.output.getSamples(); + } + + + public void clearAndSetDate(String timestamp) + { + this.output = new CTimeline(timestamp); + this.inputs.clear(); + + } + + public void aggregate(OpsManager opsMgr, String op){ + this.output.clear(); + + //Iterate through all available input timelines and aggregate + for (CTimeline item : this.inputs.values()) { + this.output.aggregate(item, opsMgr, opsMgr.getIntOperation(op)); + } + + } +} diff --git a/flink_jobs/old-models/batch_status/src/main/java/ops/CTimeline.java b/flink_jobs/old-models/batch_status/src/main/java/ops/CTimeline.java new file mode 100644 index 00000000..c047aca2 --- /dev/null +++ b/flink_jobs/old-models/batch_status/src/main/java/ops/CTimeline.java @@ -0,0 +1,180 @@ +package ops; + + +import java.util.Map.Entry; +import java.util.Set; +import java.util.TreeMap; + +import org.joda.time.DateTime; +import org.joda.time.LocalDate; +import org.joda.time.format.DateTimeFormat; +import org.joda.time.format.DateTimeFormatter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.esotericsoftware.minlog.Log; + +import argo.batch.ArgoStatusBatch; + + +public class CTimeline { + + private LocalDate date; + + static Logger LOG = LoggerFactory.getLogger(CTimeline.class); + + private TreeMap samples; + + CTimeline() + { + this.date = null; + this.samples = new TreeMap(); + } + + CTimeline(String timestamp){ + DateTime tmp_date = new DateTime(); + DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + tmp_date = fmt.parseDateTime(timestamp); + tmp_date.withTime(0, 0, 0, 0); + this.date = tmp_date.toLocalDate(); + this.samples = new TreeMap(); + } + + CTimeline(String timestamp, int state){ + DateTime tmp_date = new DateTime(); + DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + tmp_date = fmt.parseDateTime(timestamp); + tmp_date = tmp_date.withTime(0, 0, 0, 0); + this.date = tmp_date.toLocalDate(); + this.samples = new TreeMap(); + this.samples.put(tmp_date, state); + + } + + public int get(String timestamp) { + DateTime tmp_date = new DateTime(); + DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + tmp_date = fmt.parseDateTime(timestamp); + return this.samples.floorEntry(tmp_date).getValue(); + } + + public int get(DateTime point) { + if (this.samples.floorEntry(point) == null){ + + throw new RuntimeException("no item found in timeline, size of timeline:" + this.samples.size() + "," + point.toString()); + } + return this.samples.floorEntry(point).getValue(); + } + + public void insert(String timestamp, int status) + { + + DateTime tmp_date = new DateTime(); + DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + tmp_date = fmt.parseDateTime(timestamp); + this.samples.put(tmp_date, status); + } + + public void insert(DateTime date, int status) + { + samples.put(date, status); + + } + + public void setFirst(String timestamp, int state) + { + DateTime tmp_date = new DateTime(); + DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + tmp_date = fmt.parseDateTime(timestamp); + this.samples = new TreeMap(); + tmp_date = tmp_date.withTime(0, 0, 0, 0); + this.samples.put(tmp_date, state); + } + + public void clear(){ + this.samples.clear(); + } + + public void bulkInsert(Set> samples){ + this.samples.clear(); + for (Entry entry : samples){ + this.samples.put(entry.getKey(), entry.getValue()); + } + } + + public Set> getSamples(){ + return samples.entrySet(); + } + + public LocalDate getDate(){ + return this.date; + } + + public int getLength(){ + return this.samples.size(); + } + + public boolean isEmpty() { + return this.samples.isEmpty(); + } + + public void optimize() + { + TreeMap optimal = new TreeMap(); + int prevstate = -1; + for (DateTime key : this.samples.keySet()){ + int value = this.samples.get(key); + if (prevstate == -1) { + + optimal.put(key, value); + prevstate = value; + + } + if (prevstate != value){ + optimal.put(key, value); + prevstate = value; + } + } + + this.samples = optimal; + } + + public Set getPoints(){ + return this.samples.keySet(); + } + + public void aggregate(CTimeline second, OpsManager opsMgr, int op){ + if (this.isEmpty()){ + this.bulkInsert(second.getSamples()); + // Optimize even when we have a single timeline for aggregation + this.optimize(); + return; + } + + CTimeline result = new CTimeline(); + + // Slice for first + for (DateTime point : this.getPoints()){ + result.insert(point, -1); + } + // Slice for second + for (DateTime point : second.getPoints()){ + result.insert(point, -1); + } + + // Iterate over result and ask + for (DateTime point : result.getPoints()){ + int a = this.get(point); + int b = second.get(point); + int x = opsMgr.opInt(op, a, b); + result.insert(point, x); + } + + result.optimize(); + + // Engrave the result in this timeline + this.clear(); + this.bulkInsert(result.getSamples()); + } + +} diff --git a/flink_jobs/old-models/batch_status/src/main/java/ops/ConfigManager.java b/flink_jobs/old-models/batch_status/src/main/java/ops/ConfigManager.java new file mode 100644 index 00000000..bdc3069a --- /dev/null +++ b/flink_jobs/old-models/batch_status/src/main/java/ops/ConfigManager.java @@ -0,0 +1,196 @@ +package ops; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileReader; +import java.io.IOException; +import java.util.List; +import java.util.TreeMap; + +import org.apache.commons.io.IOUtils; +import org.apache.log4j.Logger; + +import com.google.gson.JsonArray; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParseException; +import com.google.gson.JsonParser; + + +public class ConfigManager { + + private static final Logger LOG = Logger.getLogger(ConfigManager.class.getName()); + + public String id; // report uuid reference + public String report; + public String tenant; + public String egroup; // endpoint group + public String ggroup; // group of groups + public String weight; // weight factor type + public TreeMap egroupTags; + public TreeMap ggroupTags; + public TreeMap mdataTags; + + public ConfigManager() { + this.report = null; + this.id = null; + this.tenant = null; + this.egroup = null; + this.ggroup = null; + this.weight = null; + this.egroupTags = new TreeMap(); + this.ggroupTags = new TreeMap(); + this.mdataTags = new TreeMap(); + + } + + public void clear() { + this.id = null; + this.report = null; + this.tenant = null; + this.egroup = null; + this.ggroup = null; + this.weight = null; + this.egroupTags.clear(); + this.ggroupTags.clear(); + this.mdataTags.clear(); + + } + + public String getReportID() { + return id; + } + + public String getReport() { + return report; + } + + public String getTenant() { + return tenant; + } + + + public String getEgroup() { + return egroup; + } + + public void loadJson(File jsonFile) throws IOException { + // Clear data + this.clear(); + + BufferedReader br = null; + try { + br = new BufferedReader(new FileReader(jsonFile)); + + JsonParser jsonParser = new JsonParser(); + JsonElement jElement = jsonParser.parse(br); + JsonObject jObj = jElement.getAsJsonObject(); + // Get the simple fields + this.id = jObj.get("id").getAsString(); + this.tenant = jObj.get("tenant").getAsString(); + this.report = jObj.get("info").getAsJsonObject().get("name").getAsString(); + + // get topology schema names + JsonObject topoGroup = jObj.get("topology_schema").getAsJsonObject().getAsJsonObject("group"); + this.ggroup = topoGroup.get("type").getAsString(); + this.egroup = topoGroup.get("group").getAsJsonObject().get("type").getAsString(); + + // optional weight filtering + this.weight = ""; + if (jObj.has("weight")){ + this.weight = jObj.get("weight").getAsString(); + } + // Get compound fields + JsonArray jTags = jObj.getAsJsonArray("filter_tags"); + + // Iterate tags + if (jTags != null) { + for (JsonElement tag : jTags) { + JsonObject jTag = tag.getAsJsonObject(); + String name = jTag.get("name").getAsString(); + String value = jTag.get("value").getAsString(); + String ctx = jTag.get("context").getAsString(); + if (ctx.equalsIgnoreCase("group_of_groups")){ + this.ggroupTags.put(name, value); + } else if (ctx.equalsIgnoreCase("endpoint_groups")){ + this.egroupTags.put(name, value); + } else if (ctx.equalsIgnoreCase("metric_data")) { + this.mdataTags.put(name, value); + } + + } + } + + + } catch (FileNotFoundException ex) { + LOG.error("Could not open file:" + jsonFile.getName()); + throw ex; + + } catch (JsonParseException ex) { + LOG.error("File is not valid json:" + jsonFile.getName()); + throw ex; + } finally { + // Close quietly without exceptions the buffered reader + IOUtils.closeQuietly(br); + } + + } + + + /** + * Loads Report config information from a config json string + * + */ + public void loadJsonString(List confJson) throws JsonParseException { + // Clear data + this.clear(); + + try { + + JsonParser jsonParser = new JsonParser(); + // Grab the first - and only line of json from ops data + JsonElement jElement = jsonParser.parse(confJson.get(0)); + JsonObject jObj = jElement.getAsJsonObject(); + // Get the simple fields + this.id = jObj.get("id").getAsString(); + this.tenant = jObj.get("tenant").getAsString(); + this.report = jObj.get("info").getAsJsonObject().get("name").getAsString(); + // get topology schema names + JsonObject topoGroup = jObj.get("topology_schema").getAsJsonObject().getAsJsonObject("group"); + this.ggroup = topoGroup.get("type").getAsString(); + this.egroup = topoGroup.get("group").getAsJsonObject().get("type").getAsString(); + // optional weight filtering + this.weight = ""; + if (jObj.has("weight")){ + this.weight = jObj.get("weight").getAsString(); + } + // Get compound fields + JsonArray jTags = jObj.getAsJsonArray("tags"); + + // Iterate tags + if (jTags != null) { + for (JsonElement tag : jTags) { + JsonObject jTag = tag.getAsJsonObject(); + String name = jTag.get("name").getAsString(); + String value = jTag.get("value").getAsString(); + String ctx = jTag.get("context").getAsString(); + if (ctx.equalsIgnoreCase("group_of_groups")){ + this.ggroupTags.put(name, value); + } else if (ctx.equalsIgnoreCase("endpoint_groups")){ + this.egroupTags.put(name, value); + } else if (ctx.equalsIgnoreCase("metric_data")) { + this.mdataTags.put(name, value); + } + + } + } + + } catch (JsonParseException ex) { + LOG.error("Not valid json contents"); + throw ex; + } + + } + +} diff --git a/flink_jobs/old-models/batch_status/src/main/java/ops/DAggregator.java b/flink_jobs/old-models/batch_status/src/main/java/ops/DAggregator.java new file mode 100644 index 00000000..ae3d68e4 --- /dev/null +++ b/flink_jobs/old-models/batch_status/src/main/java/ops/DAggregator.java @@ -0,0 +1,118 @@ +package ops; + +import java.io.File; +import java.io.FileNotFoundException; +import java.text.ParseException; +import java.util.HashMap; +import java.util.Map.Entry; + +public class DAggregator { + + public HashMap timelines; + public DTimeline aggregation; + + private int period; // used for sampling of the timelines + private int interval; // used for sampling of the timelines + + // public OpsManager opsMgr; + + public DAggregator() { + + this.period = 1440; + this.interval = 5; + + this.timelines = new HashMap(); + this.aggregation = new DTimeline(this.period, this.interval); + // this.opsMgr = new OpsManager(); + } + + public DAggregator(int period, int interval) { + + this.period = period; + this.interval = interval; + + this.timelines = new HashMap(); + this.aggregation = new DTimeline(); + // this.opsMgr = new OpsManager(); + } + + public void initTimeline(String name, int startStateInt) { + this.timelines.put(name, new DTimeline(this.period, this.interval)); + this.setStartState(name, startStateInt); + } + + public void loadOpsFile(File opsFile) throws FileNotFoundException { + // this.opsMgr.openFile(opsFile); + } + + public void insertSlot(String name, int slot, int statusInt) { + if (timelines.containsKey(name) == false) { + DTimeline tempTimeline = new DTimeline(this.period, this.interval); + tempTimeline.samples[slot] = statusInt; + timelines.put(name, tempTimeline); + } else { + timelines.get(name).samples[slot] = statusInt; + } + + } + + public void insert(String name, String timestamp, int statusInt) throws ParseException { + // Get the integer value of the specified status string + + // Check if time-line exists or else create it + if (timelines.containsKey(name) == false) { + DTimeline tempTimeline = new DTimeline(this.period, this.interval); + tempTimeline.insert(timestamp, statusInt); + timelines.put(name, tempTimeline); + } else { + timelines.get(name).insert(timestamp, statusInt); + } + } + + public void setStartState(String name, int statusInt) { + // Get the integer value of the specified status string + + // Check if time-line exists or else create it + if (timelines.containsKey(name) == false) { + DTimeline tempTimeline = new DTimeline(this.period, this.interval); + tempTimeline.setStartState(statusInt); + timelines.put(name, tempTimeline); + } else { + timelines.get(name).setStartState(statusInt); + } + } + + public void clear() { + this.timelines.clear(); + this.aggregation.clear(); + } + + public void settleAll(int missingStart) { + for (Entry item : timelines.entrySet()) { + item.getValue().settle(missingStart); + } + } + + public void aggregate(String opType, OpsManager opsMgr) { + + int opTypeInt = opsMgr.getIntOperation(opType); + + for (int i = 0; i < this.aggregation.samples.length; i++) { + + boolean firstItem = true; + + for (Entry item : timelines.entrySet()) { + + if (firstItem) { + this.aggregation.samples[i] = item.getValue().samples[i]; + firstItem = false; + } else { + int a = this.aggregation.samples[i]; + int b = item.getValue().samples[i]; + this.aggregation.samples[i] = opsMgr.opInt(opTypeInt, a, b); + } + } + } + } + +} diff --git a/flink_jobs/old-models/batch_status/src/main/java/ops/DIntegrator.java b/flink_jobs/old-models/batch_status/src/main/java/ops/DIntegrator.java new file mode 100644 index 00000000..ef2443fe --- /dev/null +++ b/flink_jobs/old-models/batch_status/src/main/java/ops/DIntegrator.java @@ -0,0 +1,74 @@ +package ops; + +import java.math.BigDecimal; + +public class DIntegrator { + + public double availability; + public double reliability; + + public double up_f; + public double unknown_f; + public double down_f; + + public static double round(double input, int prec, int mode) { + try { + BigDecimal inputBD = BigDecimal.valueOf(input); + BigDecimal rounded = inputBD.setScale(prec, mode); + return rounded.doubleValue(); + + } catch (NumberFormatException e) { + return -1; + } + } + + public void clear() { + this.up_f = 0; + this.unknown_f = 0; + this.down_f = 0; + + this.availability = 0; + this.reliability = 0; + } + + public void calculateAR(int[] samples, OpsManager opsMgr) { + + clear(); + + double up = 0; + double down = 0; + double unknown = 0; + + for (int i = 0; i < samples.length; i++) { + if (samples[i] == opsMgr.getIntStatus("OK")) { + up++; + } else if (samples[i] == opsMgr.getIntStatus("WARNING")) { + up++; + } else if (samples[i] == opsMgr.getIntStatus("MISSING")) { + unknown++; + } else if (samples[i] == opsMgr.getIntStatus("UNKNOWN")) { + unknown++; + } else if (samples[i] == opsMgr.getIntStatus("DOWNTIME")) { + down++; + } else if (samples[i] == opsMgr.getIntStatus("CRITICAL")) { + + } + } + + double dt = samples.length; + + // Availability = UP period / KNOWN period = UP period / (Total period – + // UNKNOWN period) + this.availability = round(((up / dt) / (1.0 - (unknown / dt))) * 100, 5, BigDecimal.ROUND_HALF_UP); + + // Reliability = UP period / (KNOWN period – Scheduled Downtime) + // = UP period / (Total period – UNKNOWN period – ScheduledDowntime) + this.reliability = round(((up / dt) / (1.0 - (unknown / dt) - (down / dt))) * 100, 5, BigDecimal.ROUND_HALF_UP); + + this.up_f = round(up / dt, 5, BigDecimal.ROUND_HALF_UP); + this.unknown_f = round(unknown / dt, 5, BigDecimal.ROUND_HALF_UP); + this.down_f = round(down / dt, 5, BigDecimal.ROUND_HALF_UP); + + } + +} diff --git a/flink_jobs/old-models/batch_status/src/main/java/ops/DTimeline.java b/flink_jobs/old-models/batch_status/src/main/java/ops/DTimeline.java new file mode 100644 index 00000000..a9f57b7c --- /dev/null +++ b/flink_jobs/old-models/batch_status/src/main/java/ops/DTimeline.java @@ -0,0 +1,151 @@ +package ops; + +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.Arrays; +import java.util.Calendar; +import java.util.Date; +import java.util.TreeMap; + +public class DTimeline { + + private int startState; // state to define the beginning of the timeline + private TreeMap inputStates; // input states with the + // timestamp converted to + // slots + + private int sPeriod; // sampling period measured in minutes + private int sInterval; // sampling interval measured in minutes; + + public int[] samples; // array of samples based on sampling frequency + + public DTimeline() { + this.startState = -1; + this.sPeriod = 1440; // 1 day = 24 hours = 24 * 60 minutes = 1440 + // minutes + this.sInterval = 5; // every 5 minutes; + this.samples = new int[1440 / 5]; // 288 samples; + this.inputStates = new TreeMap(); + Arrays.fill(samples, -1); + } + + public DTimeline(int period, int interval) { + this.startState = -1; + this.sPeriod = period; // given in minutes + this.sInterval = interval; // every ? minutes; + this.samples = new int[period / interval]; // ? samples + this.inputStates = new TreeMap(); + Arrays.fill(samples, -1); + } + + public void setSampling(int period, int interval) { + this.sPeriod = period; + this.sInterval = interval; + samples = new int[this.sPeriod / this.sInterval]; + } + + public void clear() { + clearSamples(); + clearTimestamps(); + } + + public void clearSamples() { + samples = new int[this.sPeriod / this.sInterval]; + Arrays.fill(samples, -1); + } + + public void clearTimestamps() { + startState = -1; + inputStates.clear(); + } + + public void setStartState(int state) { + this.startState = state; + } + + public int getStartState() { + return this.startState; + } + + public void fill(int stateInt, String startTs, String endTs, String targetDate) throws ParseException { + // Find begin state + int start; + int end; + + SimpleDateFormat dmy = new SimpleDateFormat("yyyy-MM-dd"); + Date startDt = dmy.parse(startTs); + Date endDt = dmy.parse(endTs); + Date targetDt = dmy.parse(targetDate); + + if (startDt.before(targetDt) && !((startTs.substring(0, startTs.indexOf("T")).equals(targetDate)))) { + start = 0; + } else { + start = tsInt(startTs); + } + + if (endDt.after(targetDt) && !((endTs.substring(0, endTs.indexOf("T")).equals(targetDate)))) { + end = this.samples.length-1; + } else { + end = tsInt(endTs); + } + + for (int i = start; i <= end; i++) { + this.samples[i] = stateInt; + } + + } + + public int tsInt(String timestamp) throws ParseException { + + SimpleDateFormat w3c_date = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'"); + Date parsedDate = w3c_date.parse(timestamp); + Calendar cal = Calendar.getInstance(); + cal.setTime(parsedDate); + + int total_seconds = (cal.get(Calendar.HOUR_OF_DAY) * 3600) + (cal.get(Calendar.MINUTE) * 60) + + cal.get(Calendar.SECOND); + + double total_minutes = Math.round(total_seconds / 60.0); + double result = Math.round(total_minutes / this.sInterval); + + if ((int) result == samples.length) { + return (int) result - 1; + } else { + return (int) result; + } + } + + public void insert(String timestamp, int state) throws ParseException { + int slot = this.tsInt(timestamp); + this.inputStates.put(slot, state); + } + + public void settle(int missingStart) { + if (this.startState == -1) { + this.startState = missingStart; + } + int prev_state = this.startState; + int prev_slot = 0; + for (int item : this.inputStates.keySet()) { + if (item == 0) { + this.samples[item] = this.inputStates.get(item); + continue; + } + this.samples[item] = this.inputStates.get(item); + // fill previous states + for (int i = prev_slot; i < item - 1; i++) { + this.samples[i] = prev_state; + } + // set the prev_state and prev_slot + prev_state = this.inputStates.get(item); + prev_slot = item - 1; + } + + // Fill the rest of the array with the last state + for (int i = prev_slot; i < this.samples.length; i++) { + this.samples[i] = prev_state; + } + + } + +} diff --git a/flink_jobs/old-models/batch_status/src/main/java/ops/OpsManager.java b/flink_jobs/old-models/batch_status/src/main/java/ops/OpsManager.java new file mode 100644 index 00000000..341c9260 --- /dev/null +++ b/flink_jobs/old-models/batch_status/src/main/java/ops/OpsManager.java @@ -0,0 +1,311 @@ +package ops; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileReader; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map.Entry; + +import org.apache.commons.io.IOUtils; +import org.apache.log4j.Logger; + +import com.google.gson.JsonArray; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParseException; +import com.google.gson.JsonParser; + +public class OpsManager { + + private static final Logger LOG = Logger.getLogger(OpsManager.class.getName()); + + private HashMap states; + private HashMap ops; + private ArrayList revStates; + private ArrayList revOps; + + private int[][][] truthTable; + + private String defaultDownState; + private String defaultMissingState; + private String defaultUnknownState; + + private boolean order; + + public OpsManager() { + this.states = new HashMap(); + this.ops = new HashMap(); + this.revStates = new ArrayList(); + this.revOps = new ArrayList(); + + this.truthTable = null; + + this.order = false; + + } + + public OpsManager(boolean _order) { + this.states = new HashMap(); + this.ops = new HashMap(); + this.revStates = new ArrayList(); + this.revOps = new ArrayList(); + this.order = _order; + + this.truthTable = null; + } + + public String getDefaultDown() { + return this.defaultDownState; + } + + public String getDefaultUnknown() { + return this.defaultUnknownState; + } + + public int getDefaultUnknownInt() { + return this.getIntStatus(this.defaultUnknownState); + } + + public int getDefaultDownInt() { + return this.getIntStatus(this.defaultDownState); + } + + public String getDefaultMissing() { + return this.defaultMissingState; + } + + public int getDefaultMissingInt() { + return this.getIntStatus(this.defaultMissingState); + } + + public void clear() { + this.states = new HashMap(); + this.ops = new HashMap(); + this.revStates = new ArrayList(); + this.revOps = new ArrayList(); + + this.truthTable = null; + } + + public int opInt(int op, int a, int b) { + int result = -1; + try { + result = this.truthTable[op][a][b]; + } catch (IndexOutOfBoundsException ex) { + LOG.info(ex); + result = -1; + } + + return result; + } + + public int opInt(String op, String a, String b) { + + int opInt = this.ops.get(op); + int aInt = this.states.get(a); + int bInt = this.states.get(b); + + return this.truthTable[opInt][aInt][bInt]; + } + + public String op(int op, int a, int b) { + return this.revStates.get(this.truthTable[op][a][b]); + } + + public String op(String op, String a, String b) { + int opInt = this.ops.get(op); + int aInt = this.states.get(a); + int bInt = this.states.get(b); + + return this.revStates.get(this.truthTable[opInt][aInt][bInt]); + } + + public String getStrStatus(int status) { + return this.revStates.get(status); + } + + public int getIntStatus(String status) { + return this.states.get(status); + } + + public String getStrOperation(int op) { + return this.revOps.get(op); + } + + public int getIntOperation(String op) { + return this.ops.get(op); + } + + public ArrayList availableStates() { + + return this.revStates; + } + + public ArrayList availableOps() { + return this.revOps; + } + + public void loadJson(File jsonFile) throws IOException { + // Clear data + this.clear(); + + BufferedReader br = null; + try { + br = new BufferedReader(new FileReader(jsonFile)); + + JsonParser json_parser = new JsonParser(); + JsonElement j_element = json_parser.parse(br); + JsonObject j_obj = j_element.getAsJsonObject(); + JsonArray j_states = j_obj.getAsJsonArray("available_states"); + JsonArray j_ops = j_obj.getAsJsonArray("operations"); + this.defaultMissingState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("missing").getAsString(); + this.defaultDownState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("down").getAsString(); + this.defaultUnknownState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("unknown").getAsString(); + // Collect the available states + for (int i = 0; i < j_states.size(); i++) { + this.states.put(j_states.get(i).getAsString(), i); + this.revStates.add(j_states.get(i).getAsString()); + + } + + // Collect the available operations + int i = 0; + for (JsonElement item : j_ops) { + JsonObject jObjItem = item.getAsJsonObject(); + this.ops.put(jObjItem.getAsJsonPrimitive("name").getAsString(), i); + this.revOps.add(jObjItem.getAsJsonPrimitive("name").getAsString()); + i++; + } + // Initialize the truthtable + int num_ops = this.revOps.size(); + int num_states = this.revStates.size(); + this.truthTable = new int[num_ops][num_states][num_states]; + + for (int[][] surface : this.truthTable) { + for (int[] line : surface) { + Arrays.fill(line, -1); + } + } + + // Fill the truth table + for (JsonElement item : j_ops) { + JsonObject jObjItem = item.getAsJsonObject(); + String opname = jObjItem.getAsJsonPrimitive("name").getAsString(); + JsonArray tops = jObjItem.getAsJsonArray("truth_table"); + // System.out.println(tops); + + for (int j = 0; j < tops.size(); j++) { + // System.out.println(opname); + JsonObject row = tops.get(j).getAsJsonObject(); + + int a_val = this.states.get(row.getAsJsonPrimitive("a").getAsString()); + int b_val = this.states.get(row.getAsJsonPrimitive("b").getAsString()); + int x_val = this.states.get(row.getAsJsonPrimitive("x").getAsString()); + int op_val = this.ops.get(opname); + + // Fill in truth table + // Check if order sensitivity is off so to insert two truth + // values + // ...[a][b] and [b][a] + this.truthTable[op_val][a_val][b_val] = x_val; + if (!this.order) { + this.truthTable[op_val][b_val][a_val] = x_val; + } + } + } + } catch (FileNotFoundException ex) { + LOG.error("Could not open file:" + jsonFile.getName()); + throw ex; + + } catch (JsonParseException ex) { + LOG.error("File is not valid json:" + jsonFile.getName()); + throw ex; + } finally { + // Close quietly without exceptions the buffered reader + IOUtils.closeQuietly(br); + } + + } + + public void loadJsonString(List opsJson) throws JsonParseException { + // Clear data + this.clear(); + + try { + + + JsonParser json_parser = new JsonParser(); + // Grab the first - and only line of json from ops data + JsonElement j_element = json_parser.parse(opsJson.get(0)); + JsonObject j_obj = j_element.getAsJsonObject(); + JsonArray j_states = j_obj.getAsJsonArray("available_states"); + JsonArray j_ops = j_obj.getAsJsonArray("operations"); + this.defaultMissingState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("missing").getAsString(); + this.defaultDownState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("down").getAsString(); + this.defaultUnknownState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("unknown").getAsString(); + // Collect the available states + for (int i = 0; i < j_states.size(); i++) { + this.states.put(j_states.get(i).getAsString(), i); + this.revStates.add(j_states.get(i).getAsString()); + + } + + // Collect the available operations + int i = 0; + for (JsonElement item : j_ops) { + JsonObject jObjItem = item.getAsJsonObject(); + this.ops.put(jObjItem.getAsJsonPrimitive("name").getAsString(), i); + this.revOps.add(jObjItem.getAsJsonPrimitive("name").getAsString()); + i++; + } + // Initialize the truthtable + int num_ops = this.revOps.size(); + int num_states = this.revStates.size(); + this.truthTable = new int[num_ops][num_states][num_states]; + + for (int[][] surface : this.truthTable) { + for (int[] line : surface) { + Arrays.fill(line, -1); + } + } + + // Fill the truth table + for (JsonElement item : j_ops) { + JsonObject jObjItem = item.getAsJsonObject(); + String opname = jObjItem.getAsJsonPrimitive("name").getAsString(); + JsonArray tops = jObjItem.getAsJsonArray("truth_table"); + // System.out.println(tops); + + for (int j = 0; j < tops.size(); j++) { + // System.out.println(opname); + JsonObject row = tops.get(j).getAsJsonObject(); + + int a_val = this.states.get(row.getAsJsonPrimitive("a").getAsString()); + int b_val = this.states.get(row.getAsJsonPrimitive("b").getAsString()); + int x_val = this.states.get(row.getAsJsonPrimitive("x").getAsString()); + int op_val = this.ops.get(opname); + + // Fill in truth table + // Check if order sensitivity is off so to insert two truth + // values + // ...[a][b] and [b][a] + this.truthTable[op_val][a_val][b_val] = x_val; + if (!this.order) { + this.truthTable[op_val][b_val][a_val] = x_val; + } + } + } + + } catch (JsonParseException ex) { + LOG.error("Not valid json contents"); + throw ex; + } + + } + +} diff --git a/flink_jobs/old-models/batch_status/src/main/java/ops/ThresholdManager.java b/flink_jobs/old-models/batch_status/src/main/java/ops/ThresholdManager.java new file mode 100644 index 00000000..e69a0190 --- /dev/null +++ b/flink_jobs/old-models/batch_status/src/main/java/ops/ThresholdManager.java @@ -0,0 +1,754 @@ +package ops; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileReader; +import java.io.IOException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Map.Entry; + +import org.apache.commons.io.IOUtils; +import org.apache.log4j.Logger; + +import com.google.gson.JsonArray; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParseException; +import com.google.gson.JsonParser; + +/** + * @author kaggis + * + */ +public class ThresholdManager { + + private static final Logger LOG = Logger.getLogger(ThresholdManager.class.getName()); + + // Nested map that holds rule definitions: "groups/hosts/metrics" -> label -> + // threshold + // rules" + private Map> rules; + + // Reverse index checks for group, host, metrics + private HashSet metrics; + private HashSet hosts; + private HashSet groups; + private String aggregationOp = "AND"; + + public Map> getRules() { + return this.rules; + } + + /** + * Threshold class implements objects that hold threshold values as they are + * parsed by a threshold expression such as the following one: + * + * label=30s;0:50,50:100,0,100 + * + * A Threshold object can be directly constructed from a string including an + * expression as the above + * + * Each threshold object stores the threshold expression and the individual + * parsed items such as value, uom, warning range, critical range and min,max + * values + * + */ + class Threshold { + + private static final String defWarning = "WARNING"; + private static final String defCritical = "CRITICAL"; + + private String expression; + private String label; + private Float value; + private String uom; + private Range warning; + private Range critical; + private Float min; + private Float max; + + + + /** + * Constructs a threshold from a string containing a threshold expression + * + * @param expression + * A string containing a threshold exception as the following one: + * label=30s;0:50,50:100,0,100 + * + */ + public Threshold(String expression) { + Threshold temp = parseAndSet(expression); + this.expression = temp.expression; + this.label = temp.label; + this.value = temp.value; + this.uom = temp.uom; + this.warning = temp.warning; + this.critical = temp.critical; + this.min = temp.min; + this.max = temp.max; + + } + + /** + * Create a new threshold object by providing each parameter + * + * @param expression + * string containing the threshold expression + * @param label + * threshold label + * @param value + * threshold value + * @param uom + * unit of measurement - optional + * @param warning + * a range determining warning statuses + * @param critical + * a range determining critical statuses + * @param min + * minimum value available for this threshold + * @param max + * maximum value available for this threshold + */ + public Threshold(String expression, String label, float value, String uom, Range warning, Range critical, + float min, float max) { + + this.expression = expression; + this.label = label; + this.value = value; + this.uom = uom; + this.warning = warning; + this.critical = critical; + this.min = min; + this.max = max; + } + + public String getExpression() { + return expression; + } + + public String getLabel() { + return label; + } + + public float getValue() { + return value; + } + + public String getUom() { + return uom; + } + + public Range getWarning() { + return warning; + } + + public Range getCritical() { + return critical; + } + + public float getMin() { + return min; + } + + public float getMax() { + return max; + } + + /** + * Parses a threshold expression string and returns a Threshold object + * + * @param threshold + * string containing the threshold expression + * @return Threshold object + */ + public Threshold parseAndSet(String threshold) { + + String pThresh = threshold; + String curLabel = ""; + String curUom = ""; + Float curValue = Float.NaN; + Range curWarning = new Range(); // empty range + Range curCritical = new Range(); // emtpy range + Float curMin = Float.NaN; + Float curMax = Float.NaN; + // find label by splitting at = + String[] tokens = pThresh.split("="); + // Must have two tokens to continue, label=something + if (tokens.length == 2) { + curLabel = tokens[0]; + + // Split right value by ; to find the array of arguments + String[] subtokens = tokens[1].split(";"); + // Must have size > 0 at least a value + if (subtokens.length > 0) { + curUom = getUOM(subtokens[0]); + curValue = Float.parseFloat(subtokens[0].replaceAll(curUom, "")); + if (subtokens.length > 1) { + // iterate over rest of subtokens + for (int i = 1; i < subtokens.length; i++) { + if (i == 1) { + // parse warning range + curWarning = new Range(subtokens[i]); + continue; + } else if (i == 2) { + // parse critical + curCritical = new Range(subtokens[i]); + continue; + } else if (i == 3) { + // parse min + curMin = Float.parseFloat(subtokens[i]); + continue; + } else if (i == 4) { + // parse min + curMax = Float.parseFloat(subtokens[i]); + } + } + } + + } + + } + + return new Threshold(threshold, curLabel, curValue, curUom, curWarning, curCritical, curMin, curMax); + + } + + /** + * Reads a threshold string value and extracts the unit of measurement if + * present + * + * @param value + * String containing a representation of the value and uom + * @return String representing the uom. + */ + public String getUOM(String value) { + // check if ends with digit + if (Character.isDigit(value.charAt(value.length() - 1))) { + return ""; + } + + // check if ends with seconds + if (value.endsWith("s")) + return "s"; + if (value.endsWith("us")) + return "us"; + if (value.endsWith("ms")) + return "ms"; + if (value.endsWith("%")) + return "%"; + if (value.endsWith("B")) + return "B"; + if (value.endsWith("KB")) + return "KB"; + if (value.endsWith("MB")) + return "MB"; + if (value.endsWith("TB")) + return "TB"; + if (value.endsWith("c")) + return "c"; + + // Not valid range + throw new RuntimeException("Invalid Unit of measurement: " + value); + + } + + /** + * Checks an external value against a threshold's warning,critical ranges. If a + * range contains the value (warning or critical) the corresponding status is + * returned as string "WARNING" or "CRITICAL". If the threshold doesn't provide + * the needed data to decide on status an "" is returned back. + * + * @return string with the status result "WARNING", "CRITICAL" + */ + public String calcStatusWithValue(Float value) { + + if (!Float.isFinite(this.value)) + return ""; + if (!this.warning.isUndef()) { + if (this.warning.contains(value)) + return defWarning; + } + if (!this.critical.isUndef()) { + if (this.critical.contains(value)) + return defCritical; + } + + return ""; + } + + /** + * Checks a threshold's value against warning,critical ranges. If a range + * contains the value (warning or critical) the corresponding status is returned + * as string "WARNING" or "CRITICAL". If the threshold doesn't provide the + * needed data to decide on status an "" is returned back. + * + * @return string with the status result "WARNING", "CRITICAL" + */ + public String calcStatus() { + + if (!Float.isFinite(this.value)) + return ""; + if (!this.warning.isUndef()) { + if (this.warning.contains(this.value)) + return defWarning; + } + if (!this.critical.isUndef()) { + if (this.critical.contains(this.value)) + return defCritical; + } + + return ""; + } + + public String toString() { + String strWarn = ""; + String strCrit = ""; + String strMin = ""; + String strMax = ""; + + if (this.warning != null) + strWarn = this.warning.toString(); + if (this.critical != null) + strCrit = this.critical.toString(); + if (this.min != null) + strMin = this.min.toString(); + if (this.max != null) + strMax = this.max.toString(); + + return "[expression=" + this.expression + ", label=" + this.label + ", value=" + this.value + ", uom=" + + this.uom + ", warning=" + strWarn + ", critical=" + strCrit + ", min=" + strMin + ", max=" + + strMax + ")"; + } + + } + + /** + * Range implements a simple object that holds a threshold's critical or warning + * range. It includes a floor,ceil as floats and an exclude flag when a range is + * supposed to be used for exclusion and not inclusion. The threshold spec uses + * an '@' character in front of a range to define inversion(exclusion) + * + * Inclusion assumes that floor < value < ceil and not floor <= value <= ceil + * + */ + class Range { + Float floor; + Float ceil; + Boolean exclude; + + /** + * Creates an empty range. Invert is false and limits are NaN + */ + public Range() { + this.floor = Float.NaN; + this.ceil = Float.NaN; + this.exclude = false; + } + + /** + * Creates a range by parameters + * + * @param floor + * Float that defines the lower limit of the range + * @param ceil + * Float that defines the upper limit of the range + * @param exclude + * boolean that defines if the range is used for inclusion (true) or + * exlusion (false) + */ + public Range(Float floor, Float ceil, Boolean exclude) { + this.floor = floor; + this.ceil = ceil; + this.exclude = exclude; + } + + /** + * Creates a range by parsing a range expression string like the following one: + * '0:10' + * + * @param range + * string including a range expression + */ + public Range(String range) { + Range tmp = parseAndSet(range); + this.floor = tmp.floor; + this.ceil = tmp.ceil; + this.exclude = tmp.exclude; + } + + /** + * Checks if a Range is undefined (float,ceiling are NaN) + * + * @return boolean + */ + public boolean isUndef() { + return this.floor == Float.NaN || this.ceil == Float.NaN; + } + + /** + * Checks if a value is included in range (or truly excluded if range is an + * exclusion) + * + * @param value + * Float + * @return boolean + */ + public boolean contains(Float value) { + boolean result = value > this.floor && value < this.ceil; + if (this.exclude) { + return !result; + } + return result; + } + + /** + * Parses a range expression string and creates a Range object Range expressions + * can be in the following forms: + *

        + *
      • 10 - range starting from 0 to 10
      • + *
      • 10: - range starting from 10 to infinity
      • + *
      • ~:20 - range starting from negative inf. up to 20
      • + *
      • 20:30 - range between two numbers
      • + *
      • @20:30 - inverted range, excludes betweeen two numbers + *
      + * + * @param expression + * String containing a range expression + * @return + */ + public Range parseAndSet(String expression) { + String parsedRange = expression; + Float curFloor = 0F; + Float curCeil = 0F; + boolean curInv = false; + if (parsedRange.replaceAll(" ", "").equals("")) { + return new Range(); + } + // check if invert + if (parsedRange.startsWith("@")) { + curInv = true; + // after check remove @ from range string + parsedRange = parsedRange.replaceAll("^@", ""); + } + + // check if range string doesn't have separator : + if (!parsedRange.contains(":")) { + // then we are in the case of a single number like 10 + // which defines the rule 0 --> 10 so + curFloor = 0F; + curCeil = Float.parseFloat(parsedRange); + + return new Range(curFloor, curCeil, curInv); + } + + // check if range end with separator : + if (parsedRange.endsWith(":")) { + parsedRange = parsedRange.replaceAll(":$", ""); + // then we are in the case of a signle number like 10: + // which defines the rule 10 --> positive infinity + curFloor = Float.parseFloat(parsedRange); + curCeil = Float.POSITIVE_INFINITY; + return new Range(curFloor, curCeil, curInv); + } + + // tokenize string without prefixes + String[] tokens = parsedRange.split(":"); + if (tokens.length == 2) { + // check if token[0] is negative infinity ~ + if (tokens[0].equalsIgnoreCase("~")) { + curFloor = Float.NEGATIVE_INFINITY; + } else { + curFloor = Float.parseFloat(tokens[0]); + } + + curCeil = Float.parseFloat(tokens[1]); + return new Range(curFloor, curCeil, curInv); + } + + // Not valid range + throw new RuntimeException("Invalid threshold: " + expression); + + } + + public String toString() { + return "(floor=" + this.floor + ",ceil=" + this.ceil + ",invert=" + this.exclude.toString() + ")"; + } + + } + + /** + * Creates a Manager that parses rules files with thresholds and stores them + * internally as objects. A ThresholdManager can be used to automatically + * calculate statuses about a monitoring item (group,host,metric) based on the + * most relevant threshold rules stored in it. + */ + public ThresholdManager() { + + this.rules = new HashMap>(); + this.hosts = new HashSet(); + this.groups = new HashSet(); + this.metrics = new HashSet(); + + } + + /** + * Return the default operation when aggregating statuses generated from multiple threshold rules + * @return + */ + public String getAggregationOp() { + return this.aggregationOp; + } + + + /** + * @param op string with the name of the operation to be used in the aggregation (AND,OR,custom one) + */ + public void setAggregationOp(String op) { + this.aggregationOp = op; + } + + /** + * Returns a status calculation for a specific rule key Each rule key is defined + * as follows: 'group/host/metric' and leads to a threshold rule. Group and host + * parts are optional as such: 'group//metric' or '/host/metric' or '//metric' + * + * @param rule + * string containing a rule key + * @param opsMgr + * an OpsManager Object to handle status aggregations + * @param opType + * an OpsManager operation to be used (like 'OR', 'AND') + * @return string with status result + */ + public String getStatusByRule(String rule, OpsManager opsMgr, String opType) { + + if (!rules.containsKey(rule)) + return ""; + String status = ""; + Map tholds = rules.get(rule); + for (Entry thold : tholds.entrySet()) { + // first step + if (status == "") { + status = thold.getValue().calcStatus(); + continue; + } + String statusNext = thold.getValue().calcStatus(); + if (statusNext != "") { + status = opsMgr.op(opType, status, statusNext); + } + } + return status; + } + + /** + * Returns a status calculation for a specific rule key Each rule key is defined + * as follows: 'group/host/metric' and leads to a threshold rule. Group and host + * parts are optional as such: 'group//metric' or '/host/metric' or '//metric' + * + * @param rule + * string containing a rule key + * @param opsMgr + * an OpsManager Object to handle status aggregations + * @param opType + * an OpsManager operation to be used (like 'OR', 'AND') + * @return string array with two elements. First element is the status result and second one the rule applied + */ + public String[] getStatusByRuleAndValues(String rule, OpsManager opsMgr, String opType, Map values) { + + if (!rules.containsKey(rule)) + return new String[] {"",""}; + String status = ""; + String explain = ""; + Map tholds = rules.get(rule); + + for ( Entry value : values.entrySet()) { + String label = value.getKey(); + if (tholds.containsKey(label)) { + Threshold th = tholds.get(label); + // first step + if (status == "") { + + status = th.calcStatusWithValue(value.getValue()); + explain = th.getExpression(); + continue; + } + + String statusNext = th.calcStatusWithValue(value.getValue()); + + if (statusNext != "") { + status = opsMgr.op(opType, status, statusNext); + explain = explain + " " + th.getExpression(); + } + } + } + + + return new String[]{status,explain}; + + } + + /** + * Gets the most relevant rule based on a monitoring item (group,host,metric) + * using the following precedence (specific to least specific) (group, host, + * metric) #1 ( , host, metric) #2 (group, , metric) #3 ( , , metric) #4 + * + * @param group + * string with name of the monitored endpoint group + * @param host + * string with name of the monitored host + * @param metric + * string with name of the monitored metric + * @return a string with the relevant rule key + */ + public String getMostRelevantRule(String group, String host, String metric) { + if (!this.metrics.contains(metric)) { + return ""; // nothing found + } else { + + // order or precedence: more specific first + // group,host,metric #1 + // ,host,metric #2 + // group ,metric #3 + // ,metric #4 + if (this.hosts.contains(host)) { + if (this.groups.contains(group)) { + // check if combined entry indeed exists + String key = String.format("%s/%s/%s", group, host, metric); + if (this.rules.containsKey(key)) + return key; // #1 + + } else { + return String.format("/%s/%s", host, metric); // #2 + } + } + + if (this.groups.contains(group)) { + // check if combined entry indeed exists + String key = String.format("%s//%s", group, metric); // #3 + if (this.rules.containsKey(key)) + return key; + } + + return String.format("//%s", metric); + } + + } + + /** + * Parses an expression that might contain multiple labels=thresholds separated + * by whitespace and creates a HashMap of labels to parsed threshold objects + * + * @param thresholds + * an expression that might contain multiple thresholds + * @return a HashMap to Threshold objects + */ + public Map parseThresholds(String thresholds) { + Map subMap = new HashMap(); + // Tokenize with lookahead on the point when a new label starts + String[] tokens = thresholds.split("(;|[ ]+)(?=[a-zA-Z])"); + for (String token : tokens) { + Threshold curTh = new Threshold(token); + if (curTh != null) { + subMap.put(curTh.getLabel(), curTh); + } + } + return subMap; + } + + /** + * Parses an expression that might contain multiple labels=thresholds separated + * by whitespace and creates a HashMap of labels to parsed Float values + * + * @param thresholds + * an expression that might contain multiple thresholds + * @return a HashMap to Floats + */ + public Map getThresholdValues(String thresholds) { + Map subMap = new HashMap(); + // tokenize thresholds by whitespace + String[] tokens = thresholds.split("(;|[ ]+)(?=[a-zA-Z])"); + for (String token : tokens) { + Threshold curTh = new Threshold(token); + if (curTh != null) { + subMap.put(curTh.getLabel(), curTh.getValue()); + } + } + return subMap; + } + + /** + * Parses a JSON threshold rule file and populates the ThresholdManager + * + * @param jsonFile + * File to be parsed + * @return boolean signaling whether operation succeeded or not + */ + public boolean parseJSONFile(File jsonFile) { + BufferedReader br = null; + try { + br = new BufferedReader(new FileReader(jsonFile)); + String jsonStr = IOUtils.toString(br); + if (!parseJSON(jsonStr)) + return false; + + } catch (IOException ex) { + LOG.error("Could not open file:" + jsonFile.getName()); + return false; + + } catch (JsonParseException ex) { + LOG.error("File is not valid json:" + jsonFile.getName()); + return false; + } finally { + // Close quietly without exceptions the buffered reader + IOUtils.closeQuietly(br); + } + + return true; + + } + + /** + * Parses a json string with the appropriate threshold rule schema and populates + * the ThresholdManager + * + * @param jsonString + * string containing threshold rules in json format + * @return boolean signaling whether the parse information succeded or not + */ + public boolean parseJSON(String jsonString) { + + + JsonParser json_parser = new JsonParser(); + JsonObject jRoot = json_parser.parse(jsonString).getAsJsonObject(); + JsonArray jRules = jRoot.getAsJsonArray("rules"); + for (JsonElement jRule : jRules) { + JsonObject jRuleObj = jRule.getAsJsonObject(); + String ruleMetric = jRuleObj.getAsJsonPrimitive("metric").getAsString(); + String ruleHost = ""; + String ruleEgroup = ""; + + if (jRuleObj.has("host")) { + ruleHost = jRuleObj.getAsJsonPrimitive("host").getAsString(); + } + if (jRuleObj.has("endpoint_group")) { + ruleEgroup = jRuleObj.getAsJsonPrimitive("endpoint_group").getAsString(); + } + + String ruleThr = jRuleObj.getAsJsonPrimitive("thresholds").getAsString(); + this.metrics.add(ruleMetric); + if (ruleHost != "") + this.hosts.add(ruleHost); + if (ruleEgroup != "") + this.groups.add(ruleEgroup); + String full = ruleEgroup + "/" + ruleHost + "/" + ruleMetric; + Map thrMap = parseThresholds(ruleThr); + this.rules.put(full, thrMap); + } + + return true; + } + +} diff --git a/flink_jobs/old-models/batch_status/src/main/java/sync/AggregationProfileManager.java b/flink_jobs/old-models/batch_status/src/main/java/sync/AggregationProfileManager.java new file mode 100644 index 00000000..11648956 --- /dev/null +++ b/flink_jobs/old-models/batch_status/src/main/java/sync/AggregationProfileManager.java @@ -0,0 +1,342 @@ +package sync; + + + +import org.apache.commons.io.IOUtils; +import org.apache.log4j.Logger; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileReader; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map.Entry; + +import com.google.gson.JsonArray; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParseException; +import com.google.gson.JsonParser; + +public class AggregationProfileManager { + + private HashMap list; + private static final Logger LOG = Logger.getLogger(AggregationProfileManager.class.getName()); + + public AggregationProfileManager() { + + this.list = new HashMap(); + + } + + private class AvProfileItem { + + private String name; + private String namespace; + private String metricProfile; + private String metricOp; + private String groupType; + private String op; + + private HashMap groups; + private HashMap serviceIndex; + + AvProfileItem() { + this.groups = new HashMap(); + this.serviceIndex = new HashMap(); + } + + private class ServGroupItem { + + String op; + HashMap services; + + ServGroupItem(String op) { + this.op = op; + this.services = new HashMap(); + } + } + + // ServGroupItem Declaration Ends Here + + public void insertGroup(String group, String op) { + if (!this.groups.containsKey(group)) { + this.groups.put(group, new ServGroupItem(op)); + } + } + + public void insertService(String group, String service, String op) { + if (this.groups.containsKey(group)) { + this.groups.get(group).services.put(service, op); + this.serviceIndex.put(service, group); + } + } + } + + // AvProfileItem Declaration Ends Here + + public void clearProfiles() { + this.list.clear(); + } + + public String getTotalOp(String avProfile) { + if (this.list.containsKey(avProfile)) { + return this.list.get(avProfile).op; + } + + return ""; + } + + public String getMetricOp(String avProfile) { + if (this.list.containsKey(avProfile)) { + return this.list.get(avProfile).metricOp; + } + + return ""; + } + + // Return the available Group Names of a profile + public ArrayList getProfileGroups(String avProfile) { + + if (this.list.containsKey(avProfile)) { + ArrayList result = new ArrayList(); + Iterator groupIterator = this.list.get(avProfile).groups.keySet().iterator(); + + while (groupIterator.hasNext()) { + result.add(groupIterator.next()); + } + + return result; + } + + return null; + } + + // Return the available group operation + public String getProfileGroupOp(String avProfile, String groupName) { + if (this.list.containsKey(avProfile)) { + if (this.list.get(avProfile).groups.containsKey(groupName)) { + return this.list.get(avProfile).groups.get(groupName).op; + } + } + + return null; + } + + public ArrayList getProfileGroupServices(String avProfile, String groupName) { + if (this.list.containsKey(avProfile)) { + if (this.list.get(avProfile).groups.containsKey(groupName)) { + ArrayList result = new ArrayList(); + Iterator srvIterator = this.list.get(avProfile).groups.get(groupName).services.keySet() + .iterator(); + + while (srvIterator.hasNext()) { + result.add(srvIterator.next()); + } + + return result; + } + } + + return null; + } + + public String getProfileGroupServiceOp(String avProfile, String groupName, String service) { + + if (this.list.containsKey(avProfile)) { + if (this.list.get(avProfile).groups.containsKey(groupName)) { + if (this.list.get(avProfile).groups.get(groupName).services.containsKey(service)) { + return this.list.get(avProfile).groups.get(groupName).services.get(service); + } + } + } + + return null; + } + + public ArrayList getAvProfiles() { + + if (this.list.size() > 0) { + ArrayList result = new ArrayList(); + Iterator avpIterator = this.list.keySet().iterator(); + while (avpIterator.hasNext()) { + result.add(avpIterator.next()); + } + + return result; + + } + + return null; + } + + public String getProfileNamespace(String avProfile) { + + if (this.list.containsKey(avProfile)) { + return this.list.get(avProfile).namespace; + } + + return null; + } + + public String getProfileMetricProfile(String avProfile) { + + if (this.list.containsKey(avProfile)) { + return this.list.get(avProfile).metricProfile; + } + + return null; + } + + public String getProfileGroupType(String avProfile) { + + if (this.list.containsKey(avProfile)) { + return this.list.get(avProfile).groupType; + } + + return null; + } + + public String getGroupByService(String avProfile, String service) { + + if (this.list.containsKey(avProfile)) { + + return this.list.get(avProfile).serviceIndex.get(service); + + } + return null; + + } + + public boolean checkService(String avProfile, String service) { + + if (this.list.containsKey(avProfile)) { + + if (this.list.get(avProfile).serviceIndex.containsKey(service)) { + return true; + } + + } + return false; + + } + + public void loadJson(File jsonFile) throws IOException { + + BufferedReader br = null; + try { + + br = new BufferedReader(new FileReader(jsonFile)); + + JsonParser jsonParser = new JsonParser(); + JsonElement jRootElement = jsonParser.parse(br); + JsonObject jRootObj = jRootElement.getAsJsonObject(); + + JsonArray apGroups = jRootObj.getAsJsonArray("groups"); + + // Create new entry for this availability profile + AvProfileItem tmpAvp = new AvProfileItem(); + + tmpAvp.name = jRootObj.get("name").getAsString(); + tmpAvp.namespace = jRootObj.get("namespace").getAsString(); + tmpAvp.metricProfile = jRootObj.get("metric_profile").getAsJsonObject().get("name").getAsString(); + tmpAvp.metricOp = jRootObj.get("metric_operation").getAsString(); + tmpAvp.groupType = jRootObj.get("endpoint_group").getAsString(); + tmpAvp.op = jRootObj.get("profile_operation").getAsString(); + + for ( JsonElement item : apGroups) { + // service name + JsonObject itemObj = item.getAsJsonObject(); + String itemName = itemObj.get("name").getAsString(); + String itemOp = itemObj.get("operation").getAsString(); + JsonArray itemServices = itemObj.get("services").getAsJsonArray(); + tmpAvp.insertGroup(itemName, itemOp); + + for (JsonElement subItem : itemServices) { + JsonObject subObj = subItem.getAsJsonObject(); + String serviceName = subObj.get("name").getAsString(); + String serviceOp = subObj.get("operation").getAsString(); + tmpAvp.insertService(itemName, serviceName,serviceOp); + } + + } + + // Add profile to the list + this.list.put(tmpAvp.name, tmpAvp); + + } catch (FileNotFoundException ex) { + LOG.error("Could not open file:" + jsonFile.getName()); + throw ex; + + } catch (JsonParseException ex) { + LOG.error("File is not valid json:" + jsonFile.getName()); + throw ex; + } finally { + // Close quietly without exceptions the buffered reader + IOUtils.closeQuietly(br); + } + + } + + public void loadJsonString(List apsJson) throws IOException { + + + try { + + + + JsonParser jsonParser = new JsonParser(); + JsonElement jRootElement = jsonParser.parse(apsJson.get(0)); + JsonObject jRootObj = jRootElement.getAsJsonObject(); + + + // Create new entry for this availability profile + AvProfileItem tmpAvp = new AvProfileItem(); + + JsonArray apGroups = jRootObj.getAsJsonArray("groups"); + + tmpAvp.name = jRootObj.get("name").getAsString(); + tmpAvp.namespace = jRootObj.get("namespace").getAsString(); + tmpAvp.metricProfile = jRootObj.get("metric_profile").getAsJsonObject().get("name").getAsString(); + tmpAvp.metricOp = jRootObj.get("metric_operation").getAsString(); + tmpAvp.groupType = jRootObj.get("endpoint_group").getAsString(); + tmpAvp.op = jRootObj.get("profile_operation").getAsString(); + + for ( JsonElement item : apGroups) { + // service name + JsonObject itemObj = item.getAsJsonObject(); + String itemName = itemObj.get("name").getAsString(); + String itemOp = itemObj.get("operation").getAsString(); + JsonArray itemServices = itemObj.get("services").getAsJsonArray(); + tmpAvp.insertGroup(itemName, itemOp); + + for (JsonElement subItem : itemServices) { + JsonObject subObj = subItem.getAsJsonObject(); + String serviceName = subObj.get("name").getAsString(); + String serviceOp = subObj.get("operation").getAsString(); + tmpAvp.insertService(itemName, serviceName,serviceOp); + } + + } + + // Add profile to the list + this.list.put(tmpAvp.name, tmpAvp); + + + + } catch (JsonParseException ex) { + LOG.error("Contents are not valid json"); + throw ex; + } + + } + + + + +} + diff --git a/flink_jobs/old-models/batch_status/src/main/java/sync/EndpointGroupManager.java b/flink_jobs/old-models/batch_status/src/main/java/sync/EndpointGroupManager.java new file mode 100644 index 00000000..ab56a7dd --- /dev/null +++ b/flink_jobs/old-models/batch_status/src/main/java/sync/EndpointGroupManager.java @@ -0,0 +1,263 @@ +package sync; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.Map.Entry; + +import org.apache.avro.Schema; +import org.apache.avro.Schema.Field; +import org.apache.avro.file.DataFileReader; +import org.apache.avro.generic.GenericData; +import org.apache.avro.generic.GenericDatumReader; +import org.apache.avro.generic.GenericRecord; +import org.apache.avro.io.DatumReader; +import org.apache.avro.util.Utf8; +import org.apache.commons.io.IOUtils; +import org.apache.log4j.Logger; + +import argo.avro.GroupEndpoint; +import argo.avro.MetricProfile; + +public class EndpointGroupManager { + + private static final Logger LOG = Logger.getLogger(EndpointGroupManager.class.getName()); + + private ArrayList list; + private ArrayList fList; + + private class EndpointItem { + String type; // type of group + String group; // name of the group + String service; // type of the service + String hostname; // name of host + HashMap tags; // Tag list + + public EndpointItem() { + // Initializations + this.type = ""; + this.group = ""; + this.service = ""; + this.hostname = ""; + this.tags = new HashMap(); + } + + public EndpointItem(String type, String group, String service, String hostname, HashMap tags) { + this.type = type; + this.group = group; + this.service = service; + this.hostname = hostname; + this.tags = tags; + + } + + } + + public EndpointGroupManager() { + this.list = new ArrayList(); + this.fList = new ArrayList(); + + } + + public int insert(String type, String group, String service, String hostname, HashMap tags) { + EndpointItem new_item = new EndpointItem(type, group, service, hostname, tags); + this.list.add(new_item); + return 0; // All good + } + + public boolean checkEndpoint(String hostname, String service) { + + for (EndpointItem item : fList) { + if (item.hostname.equals(hostname) && item.service.equals(service)) { + return true; + } + } + + return false; + } + + public ArrayList getGroup(String type, String hostname, String service) { + + ArrayList results = new ArrayList(); + + for (EndpointItem item : fList) { + if (item.type.equals(type) && item.hostname.equals(hostname) && item.service.equals(service)) { + results.add(item.group); + } + } + + return results; + } + + public HashMap getGroupTags(String type, String hostname, String service) { + + for (EndpointItem item : fList) { + if (item.type.equals(type) && item.hostname.equals(hostname) && item.service.equals(service)) { + return item.tags; + } + } + + return null; + } + + public int count() { + return this.fList.size(); + } + + public void unfilter() { + this.fList.clear(); + for (EndpointItem item : this.list) { + this.fList.add(item); + } + } + + public void filter(TreeMap fTags) { + this.fList.clear(); + boolean trim; + for (EndpointItem item : this.list) { + trim = false; + HashMap itemTags = item.tags; + for (Entry fTagItem : fTags.entrySet()) { + + if (itemTags.containsKey(fTagItem.getKey())) { + // First Check binary tags as Y/N 0/1 + + if (fTagItem.getValue().equalsIgnoreCase("y") || fTagItem.getValue().equalsIgnoreCase("n")) { + String binValue = ""; + if (fTagItem.getValue().equalsIgnoreCase("y")) + binValue = "1"; + if (fTagItem.getValue().equalsIgnoreCase("n")) + binValue = "0"; + + if (itemTags.get(fTagItem.getKey()).equalsIgnoreCase(binValue) == false) { + trim = true; + } + } else if (itemTags.get(fTagItem.getKey()).equalsIgnoreCase(fTagItem.getValue()) == false) { + trim = true; + } + + } + } + + if (trim == false) { + fList.add(item); + } + } + } + + /** + * Loads endpoint grouping information from an avro file + *

      + * This method loads endpoint grouping information contained in an .avro + * file with specific avro schema. + * + *

      + * The following fields are expected to be found in each avro row: + *

        + *
      1. type: string (describes the type of grouping)
      2. + *
      3. group: string
      4. + *
      5. service: string
      6. + *
      7. hostname: string
      8. + *
      9. tags: hashmap (contains a map of arbitrary key values)
      10. + *
      + * + * @param avroFile + * a File object of the avro file that will be opened + * @throws IOException + * if there is an error during opening of the avro file + */ + @SuppressWarnings("unchecked") + public void loadAvro(File avroFile) throws IOException { + + // Prepare Avro File Readers + DatumReader datumReader = new GenericDatumReader(); + DataFileReader dataFileReader = null; + try { + dataFileReader = new DataFileReader(avroFile, datumReader); + + // Grab Avro schema + Schema avroSchema = dataFileReader.getSchema(); + + // Generate 1st level generic record reader (rows) + GenericRecord avroRow = new GenericData.Record(avroSchema); + + // For all rows in file repeat + while (dataFileReader.hasNext()) { + // read the row + avroRow = dataFileReader.next(avroRow); + HashMap tagMap = new HashMap(); + + // Generate 2nd level generic record reader (tags) + + HashMap tags = (HashMap) (avroRow.get("tags")); + + if (tags != null) { + for (Utf8 item : tags.keySet()) { + tagMap.put(item.toString(), String.valueOf(tags.get(item))); + } + } + + // Grab 1st level mandatory fields + String type = avroRow.get("type").toString(); + String group = avroRow.get("group").toString(); + String service = avroRow.get("service").toString(); + String hostname = avroRow.get("hostname").toString(); + + // Insert data to list + this.insert(type, group, service, hostname, tagMap); + + } // end of avro rows + + this.unfilter(); + + } catch (IOException ex) { + LOG.error("Could not open avro file:" + avroFile.getName()); + throw ex; + } finally { + // Close quietly without exceptions the buffered reader + IOUtils.closeQuietly(dataFileReader); + } + + } + + + public ArrayList getList(){ + return this.list; + } + + /** + * Loads information from a list of EndpointGroup objects + * + */ + @SuppressWarnings("unchecked") + public void loadFromList( List egp) { + + // For each endpoint group record + for (GroupEndpoint item : egp){ + String type = item.getType(); + String group = item.getGroup(); + String service = item.getService(); + String hostname = item.getHostname(); + HashMap tagMap = new HashMap(); + HashMap tags = (HashMap) item.getTags(); + + if (tags != null) { + for (String key : tags.keySet()) { + tagMap.put(key, tags.get(key)); + } + } + + // Insert data to list + this.insert(type, group, service, hostname, tagMap); + } + + this.unfilter(); + + + } + +} diff --git a/flink_jobs/old-models/batch_status/src/main/java/sync/GroupGroupManager.java b/flink_jobs/old-models/batch_status/src/main/java/sync/GroupGroupManager.java new file mode 100644 index 00000000..f27f3d67 --- /dev/null +++ b/flink_jobs/old-models/batch_status/src/main/java/sync/GroupGroupManager.java @@ -0,0 +1,232 @@ +package sync; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map.Entry; +import java.util.TreeMap; + +import org.apache.avro.Schema; +import org.apache.avro.Schema.Field; +import org.apache.avro.file.DataFileReader; +import org.apache.avro.generic.GenericData; +import org.apache.avro.generic.GenericDatumReader; +import org.apache.avro.generic.GenericRecord; +import org.apache.avro.io.DatumReader; +import org.apache.commons.io.IOUtils; +import org.apache.log4j.Logger; + +import argo.avro.GroupEndpoint; +import argo.avro.GroupGroup; + +public class GroupGroupManager { + + static Logger log = Logger.getLogger(GroupGroupManager.class.getName()); + + private ArrayList list; + private ArrayList fList; + + private class GroupItem { + String type; // type of group + String group; // name of the group + String subgroup; // name of sub-group + HashMap tags; // Tag list + + public GroupItem() { + // Initializations + this.type = ""; + this.group = ""; + this.subgroup = ""; + this.tags = new HashMap(); + } + + public GroupItem(String type, String group, String subgroup, HashMap tags) { + this.type = type; + this.group = group; + this.subgroup = subgroup; + this.tags = tags; + + } + + } + + public GroupGroupManager() { + this.list = new ArrayList(); + this.fList = new ArrayList(); + } + + public int insert(String type, String group, String subgroup, HashMap tags) { + GroupItem new_item = new GroupItem(type, group, subgroup, tags); + this.list.add(new_item); + return 0; // All good + } + + public HashMap getGroupTags(String type, String subgroup) { + for (GroupItem item : this.fList) { + if (item.type.equals(type) && item.subgroup.equals(subgroup)) { + return item.tags; + } + } + + return null; + } + + public int count() { + return this.fList.size(); + } + + public String getGroup(String type, String subgroup) { + for (GroupItem item : this.fList) { + if (item.type.equals(type) && item.subgroup.equals(subgroup)) { + return item.group; + } + } + + return null; + } + + public void unfilter() { + this.fList.clear(); + for (GroupItem item : this.list) { + this.fList.add(item); + } + } + + public void filter(TreeMap fTags) { + this.fList.clear(); + boolean trim; + for (GroupItem item : this.list) { + trim = false; + HashMap itemTags = item.tags; + for (Entry fTagItem : fTags.entrySet()) { + + if (itemTags.containsKey(fTagItem.getKey())) { + if (itemTags.get(fTagItem.getKey()).equalsIgnoreCase(fTagItem.getValue()) == false) { + trim = true; + } + + } + } + + if (trim == false) { + fList.add(item); + } + } + } + + public boolean checkSubGroup(String subgroup) { + for (GroupItem item : fList) { + if (item.subgroup.equals(subgroup)) { + return true; + } + } + + return false; + } + + /** + * Loads groups of groups information from an avro file + *

      + * This method loads groups of groups information contained in an .avro file + * with specific avro schema. + * + *

      + * The following fields are expected to be found in each avro row: + *

        + *
      1. type: string (describes the type of grouping)
      2. + *
      3. group: string
      4. + *
      5. subgroup: string
      6. + *
      7. tags: hashmap (contains a map of arbitrary key values)
      8. + *
      + * + * @param avroFile + * a File object of the avro file that will be opened + * @throws IOException + * if there is an error during opening of the avro file + */ + public void loadAvro(File avroFile) throws IOException { + + // Prepare Avro File Readers + DatumReader datumReader = new GenericDatumReader(); + DataFileReader dataFileReader = null; + try { + dataFileReader = new DataFileReader(avroFile, datumReader); + + // Grab avro schema + Schema avroSchema = dataFileReader.getSchema(); + + // Generate 1st level generic record reader (rows) + GenericRecord avroRow = new GenericData.Record(avroSchema); + + // For all rows in file repeat + while (dataFileReader.hasNext()) { + // read the row + avroRow = dataFileReader.next(avroRow); + HashMap tagMap = new HashMap(); + + // Generate 2nd level generic record reader (tags) + HashMap tags = (HashMap) avroRow.get("tags"); + if (tags != null) { + for (Object item : tags.keySet()) { + tagMap.put(item.toString(), tags.get(item).toString()); + } + } + + // Grab 1st level mandatory fields + String type = avroRow.get("type").toString(); + String group = avroRow.get("group").toString(); + String subgroup = avroRow.get("subgroup").toString(); + + // Insert data to list + this.insert(type, group, subgroup, tagMap); + + } // end of avro rows + + this.unfilter(); + + dataFileReader.close(); + + } catch (IOException ex) { + log.error("Could not open avro file:" + avroFile.getName()); + throw ex; + } finally { + // Close quietly without exceptions the buffered reader + IOUtils.closeQuietly(dataFileReader); + } + + } + + + /** + * Loads group of group information from a list of GroupGroup objects + * + */ + @SuppressWarnings("unchecked") + public void loadFromList( List ggp) { + + // For each group of groups record + for (GroupGroup item : ggp){ + String type = item.getType(); + String group = item.getGroup(); + String subgroup = item.getSubgroup(); + + HashMap tagMap = new HashMap(); + HashMap tags = (HashMap) item.getTags(); + + if (tags != null) { + for (String key : tags.keySet()) { + tagMap.put(key, tags.get(key)); + } + } + + // Insert data to list + this.insert(type, group, subgroup, tagMap); + } + + this.unfilter(); + + } + +} diff --git a/flink_jobs/old-models/batch_status/src/main/java/sync/MetricProfileManager.java b/flink_jobs/old-models/batch_status/src/main/java/sync/MetricProfileManager.java new file mode 100644 index 00000000..12ea8143 --- /dev/null +++ b/flink_jobs/old-models/batch_status/src/main/java/sync/MetricProfileManager.java @@ -0,0 +1,265 @@ +package sync; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.avro.Schema; +import org.apache.avro.Schema.Field; +import org.apache.avro.file.DataFileReader; +import org.apache.avro.generic.GenericData; +import org.apache.avro.generic.GenericDatumReader; +import org.apache.avro.generic.GenericRecord; +import org.apache.avro.io.DatumReader; +import org.apache.avro.util.Utf8; +import org.apache.commons.io.IOUtils; +import org.apache.log4j.Logger; + +import argo.avro.MetricProfile; + +public class MetricProfileManager { + + private static final Logger LOG = Logger.getLogger(MetricProfileManager.class.getName()); + + private ArrayList list; + private Map>> index; + + private class ProfileItem { + String profile; // Name of the profile + String service; // Name of the service type + String metric; // Name of the metric + HashMap tags; // Tag list + + public ProfileItem() { + // Initializations + this.profile = ""; + this.service = ""; + this.metric = ""; + this.tags = new HashMap(); + } + + public ProfileItem(String profile, String service, String metric, HashMap tags) { + this.profile = profile; + this.service = service; + this.metric = metric; + this.tags = tags; + } + } + + public MetricProfileManager() { + this.list = new ArrayList(); + this.index = new HashMap>>(); + } + + // Clear all profile data (both list and indexes) + public void clear() { + this.list = new ArrayList(); + this.index = new HashMap>>(); + } + + // Indexed List Functions + public int indexInsertProfile(String profile) { + if (!index.containsKey(profile)) { + index.put(profile, new HashMap>()); + return 0; + } + return -1; + } + + public void insert(String profile, String service, String metric, HashMap tags) { + ProfileItem tmpProfile = new ProfileItem(profile, service, metric, tags); + this.list.add(tmpProfile); + this.indexInsertMetric(profile, service, metric); + } + + public int indexInsertService(String profile, String service) { + if (index.containsKey(profile)) { + if (index.get(profile).containsKey(service)) { + return -1; + } else { + index.get(profile).put(service, new ArrayList()); + return 0; + } + + } + + index.put(profile, new HashMap>()); + index.get(profile).put(service, new ArrayList()); + return 0; + + } + + public int indexInsertMetric(String profile, String service, String metric) { + if (index.containsKey(profile)) { + if (index.get(profile).containsKey(service)) { + if (index.get(profile).get(service).contains(metric)) { + // Metric exists so no insertion + return -1; + } + // Metric doesn't exist and must be added + index.get(profile).get(service).add(metric); + return 0; + } else { + // Create the service and the metric + index.get(profile).put(service, new ArrayList()); + index.get(profile).get(service).add(metric); + return 0; + } + + } + // No profile - service - metric so add them all + index.put(profile, new HashMap>()); + index.get(profile).put(service, new ArrayList()); + index.get(profile).get(service).add(metric); + return 0; + + } + + // Getter Functions + + public ArrayList getProfileServices(String profile) { + if (index.containsKey(profile)) { + ArrayList ans = new ArrayList(); + ans.addAll(index.get(profile).keySet()); + return ans; + } + return null; + + } + + public ArrayList getProfiles() { + if (index.size() > 0) { + ArrayList ans = new ArrayList(); + ans.addAll(index.keySet()); + return ans; + } + return null; + } + + public ArrayList getProfileServiceMetrics(String profile, String service) { + if (index.containsKey(profile)) { + if (index.get(profile).containsKey(service)) { + return index.get(profile).get(service); + } + } + return null; + } + + public boolean checkProfileServiceMetric(String profile, String service, String metric) { + if (index.containsKey(profile)) { + if (index.get(profile).containsKey(service)) { + if (index.get(profile).get(service).contains(metric)) + return true; + } + } + + return false; + } + + /** + * Loads metric profile information from an avro file + *

      + * This method loads metric profile information contained in an .avro file + * with specific avro schema. + * + *

      + * The following fields are expected to be found in each avro row: + *

        + *
      1. profile: string
      2. + *
      3. service: string
      4. + *
      5. metric: string
      6. + *
      7. [optional] tags: hashmap (contains a map of arbitrary key values) + *
      8. + *
      + * + * @param avroFile + * a File object of the avro file that will be opened + * @throws IOException + * if there is an error during opening of the avro file + */ + @SuppressWarnings("unchecked") + public void loadAvro(File avroFile) throws IOException { + + // Prepare Avro File Readers + DatumReader datumReader = new GenericDatumReader(); + DataFileReader dataFileReader = null; + try { + dataFileReader = new DataFileReader(avroFile, datumReader); + + // Grab avro schema + Schema avroSchema = dataFileReader.getSchema(); + + // Generate 1st level generic record reader (rows) + GenericRecord avroRow = new GenericData.Record(avroSchema); + + // For all rows in file repeat + while (dataFileReader.hasNext()) { + // read the row + avroRow = dataFileReader.next(avroRow); + HashMap tagMap = new HashMap(); + + // Generate 2nd level generic record reader (tags) + + HashMap tags = (HashMap) (avroRow.get("tags")); + + if (tags != null) { + for (Utf8 item : tags.keySet()) { + tagMap.put(item.toString(), String.valueOf(tags.get(item))); + } + } + + // Grab 1st level mandatory fields + String profile = avroRow.get("profile").toString(); + String service = avroRow.get("service").toString(); + String metric = avroRow.get("metric").toString(); + + // Insert data to list + this.insert(profile, service, metric, tagMap); + + } // end of avro rows + + dataFileReader.close(); + + } catch (IOException ex) { + LOG.error("Could not open avro file:" + avroFile.getName()); + throw ex; + } finally { + // Close quietly without exceptions the buffered reader + IOUtils.closeQuietly(dataFileReader); + } + + } + + /** + * Loads metric profile information from a list of MetricProfile objects + * + */ + @SuppressWarnings("unchecked") + public void loadFromList( List mps) { + + // For each metric profile object in list + for (MetricProfile item : mps){ + String profile = item.getProfile(); + String service = item.getService(); + String metric = item.getMetric(); + HashMap tagMap = new HashMap(); + HashMap tags = (HashMap) item.getTags(); + + if (tags != null) { + for (String key : tags.keySet()) { + tagMap.put(key, tags.get(key)); + } + } + + // Insert data to list + this.insert(profile, service, metric, tagMap); + } + + + } + + +} diff --git a/flink_jobs/old-models/batch_status/src/main/java/sync/RecomputationsManager.java b/flink_jobs/old-models/batch_status/src/main/java/sync/RecomputationsManager.java new file mode 100644 index 00000000..7eea465d --- /dev/null +++ b/flink_jobs/old-models/batch_status/src/main/java/sync/RecomputationsManager.java @@ -0,0 +1,245 @@ +package sync; + +import org.apache.commons.io.IOUtils; +import org.apache.log4j.Logger; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileReader; +import java.io.IOException; +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import com.google.gson.JsonArray; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParser; + +public class RecomputationsManager { + + private static final Logger LOG = Logger.getLogger(RecomputationsManager.class.getName()); + + public Map>> groups; + // Recomputations for filtering monitoring engine results + public Map>> monEngines; + + public RecomputationsManager() { + this.groups = new HashMap>>(); + this.monEngines = new HashMap>>(); + } + + // Clear all the recomputation data + public void clear() { + this.groups = new HashMap>>(); + this.monEngines = new HashMap>>(); + } + + // Insert new recomputation data for a specific endpoint group + public void insert(String group, String start, String end) { + + Maptemp = new HashMap(); + temp.put("start", start); + temp.put("end",end); + + if (this.groups.containsKey(group) == false){ + this.groups.put(group, new ArrayList>()); + } + + this.groups.get(group).add(temp); + + } + + // Insert new recomputation data for a specific monitoring engine + public void insertMon(String monHost, String start, String end) throws ParseException { + + Maptemp = new HashMap(); + SimpleDateFormat tsW3C = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'"); + + temp.put("s", tsW3C.parse(start)); + temp.put("e",tsW3C.parse(end)); + + if (this.monEngines.containsKey(monHost) == false){ + this.monEngines.put(monHost, new ArrayList>()); + } + + this.monEngines.get(monHost).add(temp); + + } + + // Check if group is excluded in recomputations + public boolean isExcluded (String group){ + return this.groups.containsKey(group); + } + + + + // Check if a recomputation period is valid for target date + public boolean validPeriod(String target, String start, String end) throws ParseException { + + SimpleDateFormat dmy = new SimpleDateFormat("yyyy-MM-dd"); + Date tDate = dmy.parse(target); + Date sDate = dmy.parse(start); + Date eDate = dmy.parse(end); + + return (tDate.compareTo(sDate) >= 0 && tDate.compareTo(eDate) <= 0); + + } + + public ArrayList> getPeriods(String group,String targetDate) throws ParseException { + ArrayList> periods = new ArrayList>(); + + if (this.groups.containsKey(group)){ + for (Map period : this.groups.get(group)){ + if (this.validPeriod(targetDate, period.get("start"), period.get("end"))){ + periods.add(period); + } + } + + } + + return periods; + } + + // + public boolean isMonExcluded(String monHost, String inputTs) throws ParseException{ + + if (this.monEngines.containsKey(monHost) == false) + { + return false; + } + SimpleDateFormat tsW3C = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'"); + Date targetDate = tsW3C.parse(inputTs); + for (Map item : this.monEngines.get(monHost)) + { + + if (!(targetDate.before(item.get("s")) || targetDate.after(item.get("e")))) { + return true; + } + } + + return false; + } + + + + public void loadJson(File jsonFile) throws IOException, ParseException { + + this.clear(); + + BufferedReader br = null; + try { + br = new BufferedReader(new FileReader(jsonFile)); + + JsonParser jsonParser = new JsonParser(); + JsonElement jRootElement = jsonParser.parse(br); + JsonArray jRootObj = jRootElement.getAsJsonArray(); + + for (JsonElement item : jRootObj) { + + // Get the excluded sites + if (item.getAsJsonObject().get("start_time") != null + && item.getAsJsonObject().get("end_time") != null + && item.getAsJsonObject().get("exclude") != null ) { + + String start = item.getAsJsonObject().get("start_time").getAsString(); + String end = item.getAsJsonObject().get("end_time").getAsString(); + + // Get the excluded + JsonArray jExclude = item.getAsJsonObject().get("exclude").getAsJsonArray(); + for (JsonElement subitem : jExclude) { + this.insert(subitem.getAsString(),start,end); + } + } + + // Get the excluded Monitoring sources + if (item.getAsJsonObject().get("exclude_monitoring_source") != null) { + JsonArray jMon = item.getAsJsonObject().get("exclude_monitoring_source").getAsJsonArray(); + for (JsonElement subitem: jMon){ + + String monHost = subitem.getAsJsonObject().get("host").getAsString(); + String monStart = subitem.getAsJsonObject().get("start_time").getAsString(); + String monEnd = subitem.getAsJsonObject().get("end_time").getAsString(); + this.insertMon(monHost, monStart, monEnd); + } + } + + } + + } catch (FileNotFoundException ex) { + LOG.error("Could not open file:" + jsonFile.getName()); + throw ex; + + } catch (ParseException pex) { + LOG.error("Parsing date error"); + throw pex; + } finally { + // Close quietly without exceptions the buffered reader + IOUtils.closeQuietly(br); + } + + } + + /** + * Load Recompuatation information from a JSON string instead of a File source. + * This method is used in execution enviroments where the required data is provided by broadcast variables + */ + public void loadJsonString(List recJson) throws IOException, ParseException { + + this.clear(); + + + try { + + + JsonParser jsonParser = new JsonParser(); + JsonElement jRootElement = jsonParser.parse(recJson.get(0)); + JsonArray jRootObj = jRootElement.getAsJsonArray(); + + for (JsonElement item : jRootObj) { + + // Get the excluded sites + if (item.getAsJsonObject().get("start_time") != null + && item.getAsJsonObject().get("end_time") != null + && item.getAsJsonObject().get("exclude") != null ) { + + String start = item.getAsJsonObject().get("start_time").getAsString(); + String end = item.getAsJsonObject().get("end_time").getAsString(); + + // Get the excluded + JsonArray jExclude = item.getAsJsonObject().get("exclude").getAsJsonArray(); + for (JsonElement subitem : jExclude) { + this.insert(subitem.getAsString(),start,end); + } + } + + // Get the excluded Monitoring sources + if (item.getAsJsonObject().get("exclude_monitoring_source") != null) { + JsonArray jMon = item.getAsJsonObject().get("exclude_monitoring_source").getAsJsonArray(); + for (JsonElement subitem: jMon){ + + String monHost = subitem.getAsJsonObject().get("host").getAsString(); + String monStart = subitem.getAsJsonObject().get("start_time").getAsString(); + String monEnd = subitem.getAsJsonObject().get("end_time").getAsString(); + this.insertMon(monHost, monStart, monEnd); + } + } + + } + + + + } catch (ParseException pex) { + LOG.error("Parsing date error"); + throw pex; + } + } + + +} diff --git a/flink_jobs/old-models/batch_status/src/main/resources/log4j.properties b/flink_jobs/old-models/batch_status/src/main/resources/log4j.properties new file mode 100644 index 00000000..65bd0b8c --- /dev/null +++ b/flink_jobs/old-models/batch_status/src/main/resources/log4j.properties @@ -0,0 +1,23 @@ +################################################################################ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +################################################################################ + +log4j.rootLogger=INFO, console + +log4j.appender.console=org.apache.log4j.ConsoleAppender +log4j.appender.console.layout=org.apache.log4j.PatternLayout +log4j.appender.console.layout.ConversionPattern=%d{HH:mm:ss,SSS} %-5p %-60c %x - %m%n \ No newline at end of file diff --git a/flink_jobs/old-models/batch_status/src/main/resources/ops/EGI-algorithm.json b/flink_jobs/old-models/batch_status/src/main/resources/ops/EGI-algorithm.json new file mode 100644 index 00000000..b88d8c99 --- /dev/null +++ b/flink_jobs/old-models/batch_status/src/main/resources/ops/EGI-algorithm.json @@ -0,0 +1,239 @@ +{ + "id": "1b0318f0-429d-44fc-8bba-07184354c73b", + "name": "egi_ops", + "available_states": [ + "OK", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME" + ], + "defaults": { + "down": "DOWNTIME", + "missing": "MISSING", + "unknown": "UNKNOWN" + }, + "operations": [ + { + "name": "AND", + "truth_table": [ + { + "a": "OK", + "b": "OK", + "x": "OK" + }, + { + "a": "OK", + "b": "WARNING", + "x": "WARNING" + }, + { + "a": "OK", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "OK", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "OK", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "OK", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "WARNING", + "b": "WARNING", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "WARNING", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "WARNING", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "WARNING", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "UNKNOWN", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "UNKNOWN", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "UNKNOWN", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "UNKNOWN", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "MISSING", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "MISSING", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "MISSING", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "CRITICAL", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "CRITICAL", + "b": "DOWNTIME", + "x": "CRITICAL" + }, + { + "a": "DOWNTIME", + "b": "DOWNTIME", + "x": "DOWNTIME" + } + ] + }, + { + "name": "OR", + "truth_table": [ + { + "a": "OK", + "b": "OK", + "x": "OK" + }, + { + "a": "OK", + "b": "WARNING", + "x": "OK" + }, + { + "a": "OK", + "b": "UNKNOWN", + "x": "OK" + }, + { + "a": "OK", + "b": "MISSING", + "x": "OK" + }, + { + "a": "OK", + "b": "CRITICAL", + "x": "OK" + }, + { + "a": "OK", + "b": "DOWNTIME", + "x": "OK" + }, + { + "a": "WARNING", + "b": "WARNING", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "UNKNOWN", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "MISSING", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "CRITICAL", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "DOWNTIME", + "x": "WARNING" + }, + { + "a": "UNKNOWN", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "UNKNOWN", + "b": "MISSING", + "x": "UNKNOWN" + }, + { + "a": "UNKNOWN", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "UNKNOWN", + "b": "DOWNTIME", + "x": "UNKNOWN" + }, + { + "a": "MISSING", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "MISSING", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "MISSING", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "CRITICAL", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "CRITICAL", + "b": "DOWNTIME", + "x": "CRITICAL" + }, + { + "a": "DOWNTIME", + "b": "DOWNTIME", + "x": "DOWNTIME" + } + ] + } + ] +} diff --git a/flink_jobs/old-models/batch_status/src/main/resources/ops/EGI-rules.json b/flink_jobs/old-models/batch_status/src/main/resources/ops/EGI-rules.json new file mode 100644 index 00000000..2a52c99a --- /dev/null +++ b/flink_jobs/old-models/batch_status/src/main/resources/ops/EGI-rules.json @@ -0,0 +1,33 @@ +{ + "rules": [ + { + "metric": "org.bdii.Freshness", + "thresholds": "freshness=10s;30;50:60;0;100 entries=5;0:10;20:30;50;30" + }, + { + "metric": "org.bdii.Entries", + "thresholds": "time=-35s;~:10;15:;-100;300 entries=55;20;50:60;50;30" + }, + { + "metric": "org.bdii.Freshness", + "thresholds": "freshness=10s; entries=29;;30:50", + "host" : "bdii.host3.example.foo" + }, + { + "metric": "org.bdii.Freshness", + "thresholds": "freshness=10s;30;50:60;0;100 entries=29;0:10;20:30;0;30", + "host" : "bdii.host1.example.foo" + }, + { + "metric": "org.bdii.Freshness", + "thresholds": "freshness=10s;30;50:60;0;100 entries=5;0:10;20:30;50;30", + "host" : "bdii.host1.example.foo", + "endpoint_group": "SITE-101" + }, + { + "metric": "org.bdii.Freshness", + "thresholds": "freshness=10s;30;50:60;0;100 entries=5;0:10;20:30;50;30", + "endpoint_group": "SITE-101" + } + ] +} diff --git a/flink_jobs/old-models/batch_status/src/test/java/ops/ThresholdManagerTest.java b/flink_jobs/old-models/batch_status/src/test/java/ops/ThresholdManagerTest.java new file mode 100644 index 00000000..b2250b33 --- /dev/null +++ b/flink_jobs/old-models/batch_status/src/test/java/ops/ThresholdManagerTest.java @@ -0,0 +1,91 @@ +package ops; + +import static org.junit.Assert.*; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; + +import org.junit.BeforeClass; +import org.junit.Test; + +public class ThresholdManagerTest { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", ThresholdManagerTest.class.getResource("/ops/EGI-algorithm.json")); + assertNotNull("Test file missing", ThresholdManagerTest.class.getResource("/ops/EGI-rules.json")); + } + + @Test + public void test() throws IOException, URISyntaxException { + + // Prepare Resource File + URL opsJsonFile = ThresholdManagerTest.class.getResource("/ops/EGI-algorithm.json"); + File opsFile = new File(opsJsonFile.toURI()); + // Instantiate class + OpsManager opsMgr = new OpsManager(); + // Test loading file + opsMgr.loadJson(opsFile); + + // Prepare Resource File + URL thrJsonFile = ThresholdManagerTest.class.getResource("/ops/EGI-rules.json"); + File thrFile = new File(thrJsonFile.toURI()); + // Instantiate class + ThresholdManager t = new ThresholdManager(); + t.parseJSONFile(thrFile); + + String[] expectedRules = new String[] { "//org.bdii.Freshness", "//org.bdii.Entries", + "/bdii.host1.example.foo/org.bdii.Freshness", "/bdii.host3.example.foo/org.bdii.Freshness", + "SITE-101/bdii.host1.example.foo/org.bdii.Freshness", "SITE-101//org.bdii.Freshness" }; + + assertEquals(expectedRules.length, t.getRules().entrySet().size()); + + for (String rule : expectedRules) { + assertEquals(true, t.getRules().keySet().contains(rule)); + } + + assertEquals("SITE-101/bdii.host1.example.foo/org.bdii.Freshness", + t.getMostRelevantRule("SITE-101", "bdii.host1.example.foo", "org.bdii.Freshness")); + + assertEquals("SITE-101//org.bdii.Freshness", + t.getMostRelevantRule("SITE-101", "bdii.host2.example.foo", "org.bdii.Freshness")); + + assertEquals("//org.bdii.Freshness", + t.getMostRelevantRule("SITE-202", "bdii.host2.example.foo", "org.bdii.Freshness")); + + assertEquals("//org.bdii.Freshness", + t.getMostRelevantRule("SITE-202", "bdii.host2.example.foo", "org.bdii.Freshness")); + + assertEquals("//org.bdii.Entries", + t.getMostRelevantRule("SITE-101", "bdii.host1.example.foo", "org.bdii.Entries")); + + assertEquals("", t.getMostRelevantRule("SITE-101", "bdii.host1.example.foo", "org.bdii.Foo")); + + assertEquals("WARNING", t.getStatusByRule("SITE-101/bdii.host1.example.foo/org.bdii.Freshness", opsMgr, "AND")); + assertEquals("CRITICAL", t.getStatusByRule("//org.bdii.Entries", opsMgr, "AND")); + assertEquals("WARNING", t.getStatusByRule("//org.bdii.Entries", opsMgr, "OR")); + assertEquals("CRITICAL", t.getStatusByRule("/bdii.host1.example.foo/org.bdii.Freshness", opsMgr, "AND")); + assertEquals("WARNING", t.getStatusByRule("/bdii.host1.example.foo/org.bdii.Freshness", opsMgr, "OR")); + assertEquals("",t.getStatusByRule("/bdii.host3.example.foo/org.bdii.Freshness", opsMgr, "AND")); //no critical or warning ranges defined + + // Test parsing of label=value lists including space separation or not + assertEquals("{size=6754.0, time=3.714648}",t.getThresholdValues("time=3.714648s;;;0.000000 size=6754B;;;0").toString()); + assertEquals("{time=0.037908}",t.getThresholdValues("time=0.037908s;;;0.000000;120.000000").toString()); + assertEquals("{time=0.041992}",t.getThresholdValues("time=0.041992s;;;0.000000;120.000000").toString()); + assertEquals("{entries=1.0, time=0.15}",t.getThresholdValues("time=0.15s;entries=1").toString()); + assertEquals("{entries=1.0, freshness=111.0}",t.getThresholdValues("freshness=111s;entries=1").toString()); + assertEquals("{entries=1.0, freshness=111.0}",t.getThresholdValues("freshness=111s; entries=1").toString()); + assertEquals("{entries=1.0, freshness=111.0}",t.getThresholdValues("freshness=111s;;;entries=1").toString()); + assertEquals("{entries=1.0, freshness=111.0}",t.getThresholdValues("freshness=111s;; entries=1").toString()); + assertEquals("{TSSInstances=1.0}",t.getThresholdValues("TSSInstances=1").toString()); + + String thBig = "tls_ciphers=105.47s dir_head=0.69s dir_get=0.89s file_put=0.82s file_get=0.45s file_options=0.39s file_move=0.42s file_head=0.40s file_head_on_non_existent=0.38s file_propfind=0.40s file_delete=0.72s file_delete_on_non_existent=0.37s"; + String expThBig = "{file_head_on_non_existent=0.38, file_put=0.82, file_delete_on_non_existent=0.37, file_delete=0.72, dir_head=0.69, file_head=0.4, file_propfind=0.4, dir_get=0.89, file_move=0.42, file_options=0.39, file_get=0.45, tls_ciphers=105.47}"; + + assertEquals(expThBig,t.getThresholdValues(thBig).toString()); + } + +} diff --git a/flink_jobs/old-models/stream_status/.gitignore b/flink_jobs/old-models/stream_status/.gitignore new file mode 100644 index 00000000..6c4e323f --- /dev/null +++ b/flink_jobs/old-models/stream_status/.gitignore @@ -0,0 +1,8 @@ +/target/ +.project +.settings/ +.classpath/ +.classpath +/nbproject +nbactions.xml + diff --git a/flink_jobs/old-models/stream_status/metric_data.avsc b/flink_jobs/old-models/stream_status/metric_data.avsc new file mode 100644 index 00000000..ab2b9513 --- /dev/null +++ b/flink_jobs/old-models/stream_status/metric_data.avsc @@ -0,0 +1,19 @@ +{"namespace": "argo.avro", + "type": "record", + "name": "MetricData", + "fields": [ + {"name": "timestamp", "type": "string"}, + {"name": "service", "type": "string"}, + {"name": "hostname", "type": "string"}, + {"name": "metric", "type": "string"}, + {"name": "status", "type": "string"}, + {"name": "monitoring_host", "type": ["null", "string"]}, + {"name": "actual_data", "type": ["null", "string"], "default": null}, + {"name": "summary", "type": ["null", "string"]}, + {"name": "message", "type": ["null", "string"]}, + {"name": "tags", "type" : ["null", {"name" : "Tags", + "type" : "map", + "values" : ["null", "string"] + }] + }] +} diff --git a/flink_jobs/old-models/stream_status/pom.xml b/flink_jobs/old-models/stream_status/pom.xml new file mode 100644 index 00000000..0cc7aecc --- /dev/null +++ b/flink_jobs/old-models/stream_status/pom.xml @@ -0,0 +1,399 @@ + + + 4.0.0 + + argo.streaming + streaming-status + 0.1 + jar + + ARGO Streaming status job + + + + + UTF-8 + 1.1.3 + + + + + + cloudera + https://repository.cloudera.com/artifactory/cloudera-repos/ + + + + apache.snapshots + Apache Development Snapshot Repository + https://repository.apache.org/content/repositories/snapshots/ + + false + + + true + + + + + + + + + org.apache.flink + flink-avro_2.10 + ${flink.version} + + + org.apache.flink + flink-connector-filesystem_2.10 + ${flink.version} + + + org.apache.flink + flink-java + ${flink.version} + + + org.apache.flink + flink-streaming-java_2.10 + ${flink.version} + + + org.apache.flink + flink-clients_2.10 + ${flink.version} + + + org.apache.flink + flink-connector-kafka-0.9_2.10 + ${flink.version} + + + commons-codec + commons-codec + 20041127.091804 + + + com.google.code.gson + gson + 2.7 + + + + org.apache.hbase + hbase-client + 1.2.0-cdh5.7.4 + + + + org.apache.httpcomponents + httpclient + 4.5.13 + + + org.apache.httpcomponents + fluent-hc + 4.5.13 + + + junit + junit + 4.13.1 + test + + + junit-addons + junit-addons + 1.4 + test + + + + xerces + xercesImpl + 2.12.0 + + + org.mongodb + mongo-java-driver + 3.2.2 + compile + + + + + + + + + + + + build-jar + + false + + + + org.apache.flink + flink-java + ${flink.version} + provided + + + org.apache.flink + flink-streaming-java_2.10 + ${flink.version} + provided + + + org.apache.flink + flink-clients_2.10 + ${flink.version} + provided + + + org.apache.flink + flink-connector-kafka-0.9_2.10 + ${flink.version} + provided + + + commons-codec + commons-codec + 20041127.091804 + provided + + + + com.google.code.gson + gson + 2.7 + + + org.mongodb + mongo-java-driver + 3.2.2 + compile + + + org.apache.httpcomponents + httpclient + 4.5.13 + + + org.apache.httpcomponents + fluent-hc + 4.5.13 + + + + + + + + org.apache.maven.plugins + maven-shade-plugin + 2.4.1 + + + package + + shade + + + + + + + + + + + + + + + + + + + org.apache.maven.plugins + maven-shade-plugin + 2.4.1 + + + + package + + shade + + + + + + org.apache.flink:flink-annotations + org.apache.flink:flink-shaded-hadoop1 + org.apache.flink:flink-shaded-hadoop2 + org.apache.flink:flink-shaded-curator-recipes + org.apache.flink:flink-core + org.apache.flink:flink-java + org.apache.flink:flink-scala_2.10 + org.apache.flink:flink-runtime_2.10 + org.apache.flink:flink-optimizer_2.10 + org.apache.flink:flink-clients_2.10 + org.apache.flink:flink-avro_2.10 + org.apache.flink:flink-examples-batch_2.10 + org.apache.flink:flink-examples-streaming_2.10 + org.apache.flink:flink-streaming-java_2.10 + + + org.scala-lang:scala-library + org.scala-lang:scala-compiler + org.scala-lang:scala-reflect + com.amazonaws:aws-java-sdk + com.typesafe.akka:akka-actor_* + com.typesafe.akka:akka-remote_* + com.typesafe.akka:akka-slf4j_* + io.netty:netty-all + io.netty:netty + commons-fileupload:commons-fileupload + org.apache.avro:avro + commons-collections:commons-collections + org.codehaus.jackson:jackson-core-asl + org.codehaus.jackson:jackson-mapper-asl + com.thoughtworks.paranamer:paranamer + org.xerial.snappy:snappy-java + org.apache.commons:commons-compress + org.tukaani:xz + com.esotericsoftware.kryo:kryo + com.esotericsoftware.minlog:minlog + org.objenesis:objenesis + com.twitter:chill_* + com.twitter:chill-java + com.twitter:chill-avro_* + com.twitter:chill-bijection_* + com.twitter:bijection-core_* + com.twitter:bijection-avro_* + commons-lang:commons-lang + junit:junit + de.javakaffee:kryo-serializers + joda-time:joda-time + org.apache.commons:commons-lang3 + org.slf4j:slf4j-api + org.slf4j:slf4j-log4j12 + log4j:log4j + org.apache.commons:commons-math + org.apache.sling:org.apache.sling.commons.json + commons-logging:commons-logging + commons-codec:commons-codec + com.fasterxml.jackson.core:jackson-core + com.fasterxml.jackson.core:jackson-databind + com.fasterxml.jackson.core:jackson-annotations + stax:stax-api + com.typesafe:config + org.uncommons.maths:uncommons-maths + com.github.scopt:scopt_* + commons-io:commons-io + commons-cli:commons-cli + + + + + org.apache.flink:* + + + org/apache/flink/shaded/com/** + web-docs/** + + + + + xerces:* + + ** + + + + + *:* + + META-INF/*.SF + META-INF/*.DSA + META-INF/*.RSA + + + + + + + argo.streaming.AmsStreamStatus + + + false + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.1 + + 1.7 + 1.7 + + + + + + + + + + diff --git a/flink_jobs/old-models/stream_status/src/main/java/argo/avro/Downtime.java b/flink_jobs/old-models/stream_status/src/main/java/argo/avro/Downtime.java new file mode 100644 index 00000000..b73e100d --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/java/argo/avro/Downtime.java @@ -0,0 +1,286 @@ +/** + * Autogenerated by Avro + * + * DO NOT EDIT DIRECTLY + */ +package argo.avro; +@SuppressWarnings("all") +@org.apache.avro.specific.AvroGenerated +public class Downtime extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"Downtime\",\"namespace\":\"argo.avro\",\"fields\":[{\"name\":\"hostname\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"service\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"start_time\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"end_time\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}}]}"); + public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } + @Deprecated public java.lang.String hostname; + @Deprecated public java.lang.String service; + @Deprecated public java.lang.String start_time; + @Deprecated public java.lang.String end_time; + + /** + * Default constructor. + */ + public Downtime() {} + + /** + * All-args constructor. + */ + public Downtime(java.lang.String hostname, java.lang.String service, java.lang.String start_time, java.lang.String end_time) { + this.hostname = hostname; + this.service = service; + this.start_time = start_time; + this.end_time = end_time; + } + + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + // Used by DatumWriter. Applications should not call. + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return hostname; + case 1: return service; + case 2: return start_time; + case 3: return end_time; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + // Used by DatumReader. Applications should not call. + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: hostname = (java.lang.String)value$; break; + case 1: service = (java.lang.String)value$; break; + case 2: start_time = (java.lang.String)value$; break; + case 3: end_time = (java.lang.String)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + + /** + * Gets the value of the 'hostname' field. + */ + public java.lang.String getHostname() { + return hostname; + } + + /** + * Sets the value of the 'hostname' field. + * @param value the value to set. + */ + public void setHostname(java.lang.String value) { + this.hostname = value; + } + + /** + * Gets the value of the 'service' field. + */ + public java.lang.String getService() { + return service; + } + + /** + * Sets the value of the 'service' field. + * @param value the value to set. + */ + public void setService(java.lang.String value) { + this.service = value; + } + + /** + * Gets the value of the 'start_time' field. + */ + public java.lang.String getStartTime() { + return start_time; + } + + /** + * Sets the value of the 'start_time' field. + * @param value the value to set. + */ + public void setStartTime(java.lang.String value) { + this.start_time = value; + } + + /** + * Gets the value of the 'end_time' field. + */ + public java.lang.String getEndTime() { + return end_time; + } + + /** + * Sets the value of the 'end_time' field. + * @param value the value to set. + */ + public void setEndTime(java.lang.String value) { + this.end_time = value; + } + + /** Creates a new Downtime RecordBuilder */ + public static argo.avro.Downtime.Builder newBuilder() { + return new argo.avro.Downtime.Builder(); + } + + /** Creates a new Downtime RecordBuilder by copying an existing Builder */ + public static argo.avro.Downtime.Builder newBuilder(argo.avro.Downtime.Builder other) { + return new argo.avro.Downtime.Builder(other); + } + + /** Creates a new Downtime RecordBuilder by copying an existing Downtime instance */ + public static argo.avro.Downtime.Builder newBuilder(argo.avro.Downtime other) { + return new argo.avro.Downtime.Builder(other); + } + + /** + * RecordBuilder for Downtime instances. + */ + public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase + implements org.apache.avro.data.RecordBuilder { + + private java.lang.String hostname; + private java.lang.String service; + private java.lang.String start_time; + private java.lang.String end_time; + + /** Creates a new Builder */ + private Builder() { + super(argo.avro.Downtime.SCHEMA$); + } + + /** Creates a Builder by copying an existing Builder */ + private Builder(argo.avro.Downtime.Builder other) { + super(other); + } + + /** Creates a Builder by copying an existing Downtime instance */ + private Builder(argo.avro.Downtime other) { + super(argo.avro.Downtime.SCHEMA$); + if (isValidValue(fields()[0], other.hostname)) { + this.hostname = data().deepCopy(fields()[0].schema(), other.hostname); + fieldSetFlags()[0] = true; + } + if (isValidValue(fields()[1], other.service)) { + this.service = data().deepCopy(fields()[1].schema(), other.service); + fieldSetFlags()[1] = true; + } + if (isValidValue(fields()[2], other.start_time)) { + this.start_time = data().deepCopy(fields()[2].schema(), other.start_time); + fieldSetFlags()[2] = true; + } + if (isValidValue(fields()[3], other.end_time)) { + this.end_time = data().deepCopy(fields()[3].schema(), other.end_time); + fieldSetFlags()[3] = true; + } + } + + /** Gets the value of the 'hostname' field */ + public java.lang.String getHostname() { + return hostname; + } + + /** Sets the value of the 'hostname' field */ + public argo.avro.Downtime.Builder setHostname(java.lang.String value) { + validate(fields()[0], value); + this.hostname = value; + fieldSetFlags()[0] = true; + return this; + } + + /** Checks whether the 'hostname' field has been set */ + public boolean hasHostname() { + return fieldSetFlags()[0]; + } + + /** Clears the value of the 'hostname' field */ + public argo.avro.Downtime.Builder clearHostname() { + hostname = null; + fieldSetFlags()[0] = false; + return this; + } + + /** Gets the value of the 'service' field */ + public java.lang.String getService() { + return service; + } + + /** Sets the value of the 'service' field */ + public argo.avro.Downtime.Builder setService(java.lang.String value) { + validate(fields()[1], value); + this.service = value; + fieldSetFlags()[1] = true; + return this; + } + + /** Checks whether the 'service' field has been set */ + public boolean hasService() { + return fieldSetFlags()[1]; + } + + /** Clears the value of the 'service' field */ + public argo.avro.Downtime.Builder clearService() { + service = null; + fieldSetFlags()[1] = false; + return this; + } + + /** Gets the value of the 'start_time' field */ + public java.lang.String getStartTime() { + return start_time; + } + + /** Sets the value of the 'start_time' field */ + public argo.avro.Downtime.Builder setStartTime(java.lang.String value) { + validate(fields()[2], value); + this.start_time = value; + fieldSetFlags()[2] = true; + return this; + } + + /** Checks whether the 'start_time' field has been set */ + public boolean hasStartTime() { + return fieldSetFlags()[2]; + } + + /** Clears the value of the 'start_time' field */ + public argo.avro.Downtime.Builder clearStartTime() { + start_time = null; + fieldSetFlags()[2] = false; + return this; + } + + /** Gets the value of the 'end_time' field */ + public java.lang.String getEndTime() { + return end_time; + } + + /** Sets the value of the 'end_time' field */ + public argo.avro.Downtime.Builder setEndTime(java.lang.String value) { + validate(fields()[3], value); + this.end_time = value; + fieldSetFlags()[3] = true; + return this; + } + + /** Checks whether the 'end_time' field has been set */ + public boolean hasEndTime() { + return fieldSetFlags()[3]; + } + + /** Clears the value of the 'end_time' field */ + public argo.avro.Downtime.Builder clearEndTime() { + end_time = null; + fieldSetFlags()[3] = false; + return this; + } + + @Override + public Downtime build() { + try { + Downtime record = new Downtime(); + record.hostname = fieldSetFlags()[0] ? this.hostname : (java.lang.String) defaultValue(fields()[0]); + record.service = fieldSetFlags()[1] ? this.service : (java.lang.String) defaultValue(fields()[1]); + record.start_time = fieldSetFlags()[2] ? this.start_time : (java.lang.String) defaultValue(fields()[2]); + record.end_time = fieldSetFlags()[3] ? this.end_time : (java.lang.String) defaultValue(fields()[3]); + return record; + } catch (Exception e) { + throw new org.apache.avro.AvroRuntimeException(e); + } + } + } +} diff --git a/flink_jobs/old-models/stream_status/src/main/java/argo/avro/GroupEndpoint.java b/flink_jobs/old-models/stream_status/src/main/java/argo/avro/GroupEndpoint.java new file mode 100644 index 00000000..2386b1d2 --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/java/argo/avro/GroupEndpoint.java @@ -0,0 +1,336 @@ +/** + * Autogenerated by Avro + * + * DO NOT EDIT DIRECTLY + */ +package argo.avro; +@SuppressWarnings("all") +@org.apache.avro.specific.AvroGenerated +public class GroupEndpoint extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"GroupEndpoint\",\"namespace\":\"argo.avro\",\"fields\":[{\"name\":\"type\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"group\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"service\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"hostname\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"tags\",\"type\":[\"null\",{\"type\":\"map\",\"values\":{\"type\":\"string\",\"avro.java.string\":\"String\"},\"avro.java.string\":\"String\"}]}]}"); + public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } + @Deprecated public java.lang.String type; + @Deprecated public java.lang.String group; + @Deprecated public java.lang.String service; + @Deprecated public java.lang.String hostname; + @Deprecated public java.util.Map tags; + + /** + * Default constructor. + */ + public GroupEndpoint() {} + + /** + * All-args constructor. + */ + public GroupEndpoint(java.lang.String type, java.lang.String group, java.lang.String service, java.lang.String hostname, java.util.Map tags) { + this.type = type; + this.group = group; + this.service = service; + this.hostname = hostname; + this.tags = tags; + } + + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + // Used by DatumWriter. Applications should not call. + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return type; + case 1: return group; + case 2: return service; + case 3: return hostname; + case 4: return tags; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + // Used by DatumReader. Applications should not call. + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: type = (java.lang.String)value$; break; + case 1: group = (java.lang.String)value$; break; + case 2: service = (java.lang.String)value$; break; + case 3: hostname = (java.lang.String)value$; break; + case 4: tags = (java.util.Map)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + + /** + * Gets the value of the 'type' field. + */ + public java.lang.String getType() { + return type; + } + + /** + * Sets the value of the 'type' field. + * @param value the value to set. + */ + public void setType(java.lang.String value) { + this.type = value; + } + + /** + * Gets the value of the 'group' field. + */ + public java.lang.String getGroup() { + return group; + } + + /** + * Sets the value of the 'group' field. + * @param value the value to set. + */ + public void setGroup(java.lang.String value) { + this.group = value; + } + + /** + * Gets the value of the 'service' field. + */ + public java.lang.String getService() { + return service; + } + + /** + * Sets the value of the 'service' field. + * @param value the value to set. + */ + public void setService(java.lang.String value) { + this.service = value; + } + + /** + * Gets the value of the 'hostname' field. + */ + public java.lang.String getHostname() { + return hostname; + } + + /** + * Sets the value of the 'hostname' field. + * @param value the value to set. + */ + public void setHostname(java.lang.String value) { + this.hostname = value; + } + + /** + * Gets the value of the 'tags' field. + */ + public java.util.Map getTags() { + return tags; + } + + /** + * Sets the value of the 'tags' field. + * @param value the value to set. + */ + public void setTags(java.util.Map value) { + this.tags = value; + } + + /** Creates a new GroupEndpoint RecordBuilder */ + public static argo.avro.GroupEndpoint.Builder newBuilder() { + return new argo.avro.GroupEndpoint.Builder(); + } + + /** Creates a new GroupEndpoint RecordBuilder by copying an existing Builder */ + public static argo.avro.GroupEndpoint.Builder newBuilder(argo.avro.GroupEndpoint.Builder other) { + return new argo.avro.GroupEndpoint.Builder(other); + } + + /** Creates a new GroupEndpoint RecordBuilder by copying an existing GroupEndpoint instance */ + public static argo.avro.GroupEndpoint.Builder newBuilder(argo.avro.GroupEndpoint other) { + return new argo.avro.GroupEndpoint.Builder(other); + } + + /** + * RecordBuilder for GroupEndpoint instances. + */ + public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase + implements org.apache.avro.data.RecordBuilder { + + private java.lang.String type; + private java.lang.String group; + private java.lang.String service; + private java.lang.String hostname; + private java.util.Map tags; + + /** Creates a new Builder */ + private Builder() { + super(argo.avro.GroupEndpoint.SCHEMA$); + } + + /** Creates a Builder by copying an existing Builder */ + private Builder(argo.avro.GroupEndpoint.Builder other) { + super(other); + } + + /** Creates a Builder by copying an existing GroupEndpoint instance */ + private Builder(argo.avro.GroupEndpoint other) { + super(argo.avro.GroupEndpoint.SCHEMA$); + if (isValidValue(fields()[0], other.type)) { + this.type = data().deepCopy(fields()[0].schema(), other.type); + fieldSetFlags()[0] = true; + } + if (isValidValue(fields()[1], other.group)) { + this.group = data().deepCopy(fields()[1].schema(), other.group); + fieldSetFlags()[1] = true; + } + if (isValidValue(fields()[2], other.service)) { + this.service = data().deepCopy(fields()[2].schema(), other.service); + fieldSetFlags()[2] = true; + } + if (isValidValue(fields()[3], other.hostname)) { + this.hostname = data().deepCopy(fields()[3].schema(), other.hostname); + fieldSetFlags()[3] = true; + } + if (isValidValue(fields()[4], other.tags)) { + this.tags = data().deepCopy(fields()[4].schema(), other.tags); + fieldSetFlags()[4] = true; + } + } + + /** Gets the value of the 'type' field */ + public java.lang.String getType() { + return type; + } + + /** Sets the value of the 'type' field */ + public argo.avro.GroupEndpoint.Builder setType(java.lang.String value) { + validate(fields()[0], value); + this.type = value; + fieldSetFlags()[0] = true; + return this; + } + + /** Checks whether the 'type' field has been set */ + public boolean hasType() { + return fieldSetFlags()[0]; + } + + /** Clears the value of the 'type' field */ + public argo.avro.GroupEndpoint.Builder clearType() { + type = null; + fieldSetFlags()[0] = false; + return this; + } + + /** Gets the value of the 'group' field */ + public java.lang.String getGroup() { + return group; + } + + /** Sets the value of the 'group' field */ + public argo.avro.GroupEndpoint.Builder setGroup(java.lang.String value) { + validate(fields()[1], value); + this.group = value; + fieldSetFlags()[1] = true; + return this; + } + + /** Checks whether the 'group' field has been set */ + public boolean hasGroup() { + return fieldSetFlags()[1]; + } + + /** Clears the value of the 'group' field */ + public argo.avro.GroupEndpoint.Builder clearGroup() { + group = null; + fieldSetFlags()[1] = false; + return this; + } + + /** Gets the value of the 'service' field */ + public java.lang.String getService() { + return service; + } + + /** Sets the value of the 'service' field */ + public argo.avro.GroupEndpoint.Builder setService(java.lang.String value) { + validate(fields()[2], value); + this.service = value; + fieldSetFlags()[2] = true; + return this; + } + + /** Checks whether the 'service' field has been set */ + public boolean hasService() { + return fieldSetFlags()[2]; + } + + /** Clears the value of the 'service' field */ + public argo.avro.GroupEndpoint.Builder clearService() { + service = null; + fieldSetFlags()[2] = false; + return this; + } + + /** Gets the value of the 'hostname' field */ + public java.lang.String getHostname() { + return hostname; + } + + /** Sets the value of the 'hostname' field */ + public argo.avro.GroupEndpoint.Builder setHostname(java.lang.String value) { + validate(fields()[3], value); + this.hostname = value; + fieldSetFlags()[3] = true; + return this; + } + + /** Checks whether the 'hostname' field has been set */ + public boolean hasHostname() { + return fieldSetFlags()[3]; + } + + /** Clears the value of the 'hostname' field */ + public argo.avro.GroupEndpoint.Builder clearHostname() { + hostname = null; + fieldSetFlags()[3] = false; + return this; + } + + /** Gets the value of the 'tags' field */ + public java.util.Map getTags() { + return tags; + } + + /** Sets the value of the 'tags' field */ + public argo.avro.GroupEndpoint.Builder setTags(java.util.Map value) { + validate(fields()[4], value); + this.tags = value; + fieldSetFlags()[4] = true; + return this; + } + + /** Checks whether the 'tags' field has been set */ + public boolean hasTags() { + return fieldSetFlags()[4]; + } + + /** Clears the value of the 'tags' field */ + public argo.avro.GroupEndpoint.Builder clearTags() { + tags = null; + fieldSetFlags()[4] = false; + return this; + } + + @Override + public GroupEndpoint build() { + try { + GroupEndpoint record = new GroupEndpoint(); + record.type = fieldSetFlags()[0] ? this.type : (java.lang.String) defaultValue(fields()[0]); + record.group = fieldSetFlags()[1] ? this.group : (java.lang.String) defaultValue(fields()[1]); + record.service = fieldSetFlags()[2] ? this.service : (java.lang.String) defaultValue(fields()[2]); + record.hostname = fieldSetFlags()[3] ? this.hostname : (java.lang.String) defaultValue(fields()[3]); + record.tags = fieldSetFlags()[4] ? this.tags : (java.util.Map) defaultValue(fields()[4]); + return record; + } catch (Exception e) { + throw new org.apache.avro.AvroRuntimeException(e); + } + } + } +} diff --git a/flink_jobs/old-models/stream_status/src/main/java/argo/avro/GroupGroup.java b/flink_jobs/old-models/stream_status/src/main/java/argo/avro/GroupGroup.java new file mode 100644 index 00000000..a7712d67 --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/java/argo/avro/GroupGroup.java @@ -0,0 +1,286 @@ +/** + * Autogenerated by Avro + * + * DO NOT EDIT DIRECTLY + */ +package argo.avro; +@SuppressWarnings("all") +@org.apache.avro.specific.AvroGenerated +public class GroupGroup extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"GroupGroup\",\"namespace\":\"argo.avro\",\"fields\":[{\"name\":\"type\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"group\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"subgroup\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"tags\",\"type\":[\"null\",{\"type\":\"map\",\"values\":{\"type\":\"string\",\"avro.java.string\":\"String\"},\"avro.java.string\":\"String\"}]}]}"); + public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } + @Deprecated public java.lang.String type; + @Deprecated public java.lang.String group; + @Deprecated public java.lang.String subgroup; + @Deprecated public java.util.Map tags; + + /** + * Default constructor. + */ + public GroupGroup() {} + + /** + * All-args constructor. + */ + public GroupGroup(java.lang.String type, java.lang.String group, java.lang.String subgroup, java.util.Map tags) { + this.type = type; + this.group = group; + this.subgroup = subgroup; + this.tags = tags; + } + + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + // Used by DatumWriter. Applications should not call. + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return type; + case 1: return group; + case 2: return subgroup; + case 3: return tags; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + // Used by DatumReader. Applications should not call. + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: type = (java.lang.String)value$; break; + case 1: group = (java.lang.String)value$; break; + case 2: subgroup = (java.lang.String)value$; break; + case 3: tags = (java.util.Map)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + + /** + * Gets the value of the 'type' field. + */ + public java.lang.String getType() { + return type; + } + + /** + * Sets the value of the 'type' field. + * @param value the value to set. + */ + public void setType(java.lang.String value) { + this.type = value; + } + + /** + * Gets the value of the 'group' field. + */ + public java.lang.String getGroup() { + return group; + } + + /** + * Sets the value of the 'group' field. + * @param value the value to set. + */ + public void setGroup(java.lang.String value) { + this.group = value; + } + + /** + * Gets the value of the 'subgroup' field. + */ + public java.lang.String getSubgroup() { + return subgroup; + } + + /** + * Sets the value of the 'subgroup' field. + * @param value the value to set. + */ + public void setSubgroup(java.lang.String value) { + this.subgroup = value; + } + + /** + * Gets the value of the 'tags' field. + */ + public java.util.Map getTags() { + return tags; + } + + /** + * Sets the value of the 'tags' field. + * @param value the value to set. + */ + public void setTags(java.util.Map value) { + this.tags = value; + } + + /** Creates a new GroupGroup RecordBuilder */ + public static argo.avro.GroupGroup.Builder newBuilder() { + return new argo.avro.GroupGroup.Builder(); + } + + /** Creates a new GroupGroup RecordBuilder by copying an existing Builder */ + public static argo.avro.GroupGroup.Builder newBuilder(argo.avro.GroupGroup.Builder other) { + return new argo.avro.GroupGroup.Builder(other); + } + + /** Creates a new GroupGroup RecordBuilder by copying an existing GroupGroup instance */ + public static argo.avro.GroupGroup.Builder newBuilder(argo.avro.GroupGroup other) { + return new argo.avro.GroupGroup.Builder(other); + } + + /** + * RecordBuilder for GroupGroup instances. + */ + public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase + implements org.apache.avro.data.RecordBuilder { + + private java.lang.String type; + private java.lang.String group; + private java.lang.String subgroup; + private java.util.Map tags; + + /** Creates a new Builder */ + private Builder() { + super(argo.avro.GroupGroup.SCHEMA$); + } + + /** Creates a Builder by copying an existing Builder */ + private Builder(argo.avro.GroupGroup.Builder other) { + super(other); + } + + /** Creates a Builder by copying an existing GroupGroup instance */ + private Builder(argo.avro.GroupGroup other) { + super(argo.avro.GroupGroup.SCHEMA$); + if (isValidValue(fields()[0], other.type)) { + this.type = data().deepCopy(fields()[0].schema(), other.type); + fieldSetFlags()[0] = true; + } + if (isValidValue(fields()[1], other.group)) { + this.group = data().deepCopy(fields()[1].schema(), other.group); + fieldSetFlags()[1] = true; + } + if (isValidValue(fields()[2], other.subgroup)) { + this.subgroup = data().deepCopy(fields()[2].schema(), other.subgroup); + fieldSetFlags()[2] = true; + } + if (isValidValue(fields()[3], other.tags)) { + this.tags = data().deepCopy(fields()[3].schema(), other.tags); + fieldSetFlags()[3] = true; + } + } + + /** Gets the value of the 'type' field */ + public java.lang.String getType() { + return type; + } + + /** Sets the value of the 'type' field */ + public argo.avro.GroupGroup.Builder setType(java.lang.String value) { + validate(fields()[0], value); + this.type = value; + fieldSetFlags()[0] = true; + return this; + } + + /** Checks whether the 'type' field has been set */ + public boolean hasType() { + return fieldSetFlags()[0]; + } + + /** Clears the value of the 'type' field */ + public argo.avro.GroupGroup.Builder clearType() { + type = null; + fieldSetFlags()[0] = false; + return this; + } + + /** Gets the value of the 'group' field */ + public java.lang.String getGroup() { + return group; + } + + /** Sets the value of the 'group' field */ + public argo.avro.GroupGroup.Builder setGroup(java.lang.String value) { + validate(fields()[1], value); + this.group = value; + fieldSetFlags()[1] = true; + return this; + } + + /** Checks whether the 'group' field has been set */ + public boolean hasGroup() { + return fieldSetFlags()[1]; + } + + /** Clears the value of the 'group' field */ + public argo.avro.GroupGroup.Builder clearGroup() { + group = null; + fieldSetFlags()[1] = false; + return this; + } + + /** Gets the value of the 'subgroup' field */ + public java.lang.String getSubgroup() { + return subgroup; + } + + /** Sets the value of the 'subgroup' field */ + public argo.avro.GroupGroup.Builder setSubgroup(java.lang.String value) { + validate(fields()[2], value); + this.subgroup = value; + fieldSetFlags()[2] = true; + return this; + } + + /** Checks whether the 'subgroup' field has been set */ + public boolean hasSubgroup() { + return fieldSetFlags()[2]; + } + + /** Clears the value of the 'subgroup' field */ + public argo.avro.GroupGroup.Builder clearSubgroup() { + subgroup = null; + fieldSetFlags()[2] = false; + return this; + } + + /** Gets the value of the 'tags' field */ + public java.util.Map getTags() { + return tags; + } + + /** Sets the value of the 'tags' field */ + public argo.avro.GroupGroup.Builder setTags(java.util.Map value) { + validate(fields()[3], value); + this.tags = value; + fieldSetFlags()[3] = true; + return this; + } + + /** Checks whether the 'tags' field has been set */ + public boolean hasTags() { + return fieldSetFlags()[3]; + } + + /** Clears the value of the 'tags' field */ + public argo.avro.GroupGroup.Builder clearTags() { + tags = null; + fieldSetFlags()[3] = false; + return this; + } + + @Override + public GroupGroup build() { + try { + GroupGroup record = new GroupGroup(); + record.type = fieldSetFlags()[0] ? this.type : (java.lang.String) defaultValue(fields()[0]); + record.group = fieldSetFlags()[1] ? this.group : (java.lang.String) defaultValue(fields()[1]); + record.subgroup = fieldSetFlags()[2] ? this.subgroup : (java.lang.String) defaultValue(fields()[2]); + record.tags = fieldSetFlags()[3] ? this.tags : (java.util.Map) defaultValue(fields()[3]); + return record; + } catch (Exception e) { + throw new org.apache.avro.AvroRuntimeException(e); + } + } + } +} diff --git a/flink_jobs/old-models/stream_status/src/main/java/argo/avro/MetricData.java b/flink_jobs/old-models/stream_status/src/main/java/argo/avro/MetricData.java new file mode 100644 index 00000000..77800770 --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/java/argo/avro/MetricData.java @@ -0,0 +1,811 @@ +/** + * Autogenerated by Avro + * + * DO NOT EDIT DIRECTLY + */ +package argo.avro; + +import org.apache.avro.specific.SpecificData; + +@SuppressWarnings("all") +@org.apache.avro.specific.AvroGenerated +public class MetricData extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + private static final long serialVersionUID = 3861438289744595870L; + public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"MetricData\",\"namespace\":\"argo.avro\",\"fields\":[{\"name\":\"timestamp\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"service\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"hostname\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"metric\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"status\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"monitoring_host\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}]},{\"name\":\"actual_data\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}],\"default\":null},{\"name\":\"summary\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}]},{\"name\":\"message\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}]},{\"name\":\"tags\",\"type\":[\"null\",{\"type\":\"map\",\"values\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}],\"avro.java.string\":\"String\"}]}]}"); + public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } + @Deprecated public java.lang.String timestamp; + @Deprecated public java.lang.String service; + @Deprecated public java.lang.String hostname; + @Deprecated public java.lang.String metric; + @Deprecated public java.lang.String status; + @Deprecated public java.lang.String monitoring_host; + @Deprecated public java.lang.String actual_data; + @Deprecated public java.lang.String summary; + @Deprecated public java.lang.String message; + @Deprecated public java.util.Map tags; + + /** + * Default constructor. Note that this does not initialize fields + * to their default values from the schema. If that is desired then + * one should use newBuilder(). + */ + public MetricData() {} + + /** + * All-args constructor. + * @param timestamp The new value for timestamp + * @param service The new value for service + * @param hostname The new value for hostname + * @param metric The new value for metric + * @param status The new value for status + * @param monitoring_host The new value for monitoring_host + * @param actual_data The new value for actual_data + * @param summary The new value for summary + * @param message The new value for message + * @param tags The new value for tags + */ + public MetricData(java.lang.String timestamp, java.lang.String service, java.lang.String hostname, java.lang.String metric, java.lang.String status, java.lang.String monitoring_host, java.lang.String actual_data, java.lang.String summary, java.lang.String message, java.util.Map tags) { + this.timestamp = timestamp; + this.service = service; + this.hostname = hostname; + this.metric = metric; + this.status = status; + this.monitoring_host = monitoring_host; + this.actual_data = actual_data; + this.summary = summary; + this.message = message; + this.tags = tags; + } + + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + // Used by DatumWriter. Applications should not call. + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return timestamp; + case 1: return service; + case 2: return hostname; + case 3: return metric; + case 4: return status; + case 5: return monitoring_host; + case 6: return actual_data; + case 7: return summary; + case 8: return message; + case 9: return tags; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + + // Used by DatumReader. Applications should not call. + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: timestamp = (java.lang.String)value$; break; + case 1: service = (java.lang.String)value$; break; + case 2: hostname = (java.lang.String)value$; break; + case 3: metric = (java.lang.String)value$; break; + case 4: status = (java.lang.String)value$; break; + case 5: monitoring_host = (java.lang.String)value$; break; + case 6: actual_data = (java.lang.String)value$; break; + case 7: summary = (java.lang.String)value$; break; + case 8: message = (java.lang.String)value$; break; + case 9: tags = (java.util.Map)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + + /** + * Gets the value of the 'timestamp' field. + * @return The value of the 'timestamp' field. + */ + public java.lang.String getTimestamp() { + return timestamp; + } + + /** + * Sets the value of the 'timestamp' field. + * @param value the value to set. + */ + public void setTimestamp(java.lang.String value) { + this.timestamp = value; + } + + /** + * Gets the value of the 'service' field. + * @return The value of the 'service' field. + */ + public java.lang.String getService() { + return service; + } + + /** + * Sets the value of the 'service' field. + * @param value the value to set. + */ + public void setService(java.lang.String value) { + this.service = value; + } + + /** + * Gets the value of the 'hostname' field. + * @return The value of the 'hostname' field. + */ + public java.lang.String getHostname() { + return hostname; + } + + /** + * Sets the value of the 'hostname' field. + * @param value the value to set. + */ + public void setHostname(java.lang.String value) { + this.hostname = value; + } + + /** + * Gets the value of the 'metric' field. + * @return The value of the 'metric' field. + */ + public java.lang.String getMetric() { + return metric; + } + + /** + * Sets the value of the 'metric' field. + * @param value the value to set. + */ + public void setMetric(java.lang.String value) { + this.metric = value; + } + + /** + * Gets the value of the 'status' field. + * @return The value of the 'status' field. + */ + public java.lang.String getStatus() { + return status; + } + + /** + * Sets the value of the 'status' field. + * @param value the value to set. + */ + public void setStatus(java.lang.String value) { + this.status = value; + } + + /** + * Gets the value of the 'monitoring_host' field. + * @return The value of the 'monitoring_host' field. + */ + public java.lang.String getMonitoringHost() { + return monitoring_host; + } + + /** + * Sets the value of the 'monitoring_host' field. + * @param value the value to set. + */ + public void setMonitoringHost(java.lang.String value) { + this.monitoring_host = value; + } + + /** + * Gets the value of the 'actual_data' field. + * @return The value of the 'actual_data' field. + */ + public java.lang.String getActualData() { + return actual_data; + } + + /** + * Sets the value of the 'actual_data' field. + * @param value the value to set. + */ + public void setActualData(java.lang.String value) { + this.actual_data = value; + } + + /** + * Gets the value of the 'summary' field. + * @return The value of the 'summary' field. + */ + public java.lang.String getSummary() { + return summary; + } + + /** + * Sets the value of the 'summary' field. + * @param value the value to set. + */ + public void setSummary(java.lang.String value) { + this.summary = value; + } + + /** + * Gets the value of the 'message' field. + * @return The value of the 'message' field. + */ + public java.lang.String getMessage() { + return message; + } + + /** + * Sets the value of the 'message' field. + * @param value the value to set. + */ + public void setMessage(java.lang.String value) { + this.message = value; + } + + /** + * Gets the value of the 'tags' field. + * @return The value of the 'tags' field. + */ + public java.util.Map getTags() { + return tags; + } + + /** + * Sets the value of the 'tags' field. + * @param value the value to set. + */ + public void setTags(java.util.Map value) { + this.tags = value; + } + + /** + * Creates a new MetricData RecordBuilder. + * @return A new MetricData RecordBuilder + */ + public static argo.avro.MetricData.Builder newBuilder() { + return new argo.avro.MetricData.Builder(); + } + + /** + * Creates a new MetricData RecordBuilder by copying an existing Builder. + * @param other The existing builder to copy. + * @return A new MetricData RecordBuilder + */ + public static argo.avro.MetricData.Builder newBuilder(argo.avro.MetricData.Builder other) { + return new argo.avro.MetricData.Builder(other); + } + + /** + * Creates a new MetricData RecordBuilder by copying an existing MetricData instance. + * @param other The existing instance to copy. + * @return A new MetricData RecordBuilder + */ + public static argo.avro.MetricData.Builder newBuilder(argo.avro.MetricData other) { + return new argo.avro.MetricData.Builder(other); + } + + /** + * RecordBuilder for MetricData instances. + */ + public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase + implements org.apache.avro.data.RecordBuilder { + + private java.lang.String timestamp; + private java.lang.String service; + private java.lang.String hostname; + private java.lang.String metric; + private java.lang.String status; + private java.lang.String monitoring_host; + private java.lang.String actual_data; + private java.lang.String summary; + private java.lang.String message; + private java.util.Map tags; + + /** Creates a new Builder */ + private Builder() { + super(SCHEMA$); + } + + /** + * Creates a Builder by copying an existing Builder. + * @param other The existing Builder to copy. + */ + private Builder(argo.avro.MetricData.Builder other) { + super(other); + if (isValidValue(fields()[0], other.timestamp)) { + this.timestamp = data().deepCopy(fields()[0].schema(), other.timestamp); + fieldSetFlags()[0] = true; + } + if (isValidValue(fields()[1], other.service)) { + this.service = data().deepCopy(fields()[1].schema(), other.service); + fieldSetFlags()[1] = true; + } + if (isValidValue(fields()[2], other.hostname)) { + this.hostname = data().deepCopy(fields()[2].schema(), other.hostname); + fieldSetFlags()[2] = true; + } + if (isValidValue(fields()[3], other.metric)) { + this.metric = data().deepCopy(fields()[3].schema(), other.metric); + fieldSetFlags()[3] = true; + } + if (isValidValue(fields()[4], other.status)) { + this.status = data().deepCopy(fields()[4].schema(), other.status); + fieldSetFlags()[4] = true; + } + if (isValidValue(fields()[5], other.monitoring_host)) { + this.monitoring_host = data().deepCopy(fields()[5].schema(), other.monitoring_host); + fieldSetFlags()[5] = true; + } + if (isValidValue(fields()[6], other.actual_data)) { + this.actual_data = data().deepCopy(fields()[6].schema(), other.actual_data); + fieldSetFlags()[6] = true; + } + if (isValidValue(fields()[7], other.summary)) { + this.summary = data().deepCopy(fields()[7].schema(), other.summary); + fieldSetFlags()[7] = true; + } + if (isValidValue(fields()[8], other.message)) { + this.message = data().deepCopy(fields()[8].schema(), other.message); + fieldSetFlags()[8] = true; + } + if (isValidValue(fields()[9], other.tags)) { + this.tags = data().deepCopy(fields()[9].schema(), other.tags); + fieldSetFlags()[9] = true; + } + } + + /** + * Creates a Builder by copying an existing MetricData instance + * @param other The existing instance to copy. + */ + private Builder(argo.avro.MetricData other) { + super(SCHEMA$); + if (isValidValue(fields()[0], other.timestamp)) { + this.timestamp = data().deepCopy(fields()[0].schema(), other.timestamp); + fieldSetFlags()[0] = true; + } + if (isValidValue(fields()[1], other.service)) { + this.service = data().deepCopy(fields()[1].schema(), other.service); + fieldSetFlags()[1] = true; + } + if (isValidValue(fields()[2], other.hostname)) { + this.hostname = data().deepCopy(fields()[2].schema(), other.hostname); + fieldSetFlags()[2] = true; + } + if (isValidValue(fields()[3], other.metric)) { + this.metric = data().deepCopy(fields()[3].schema(), other.metric); + fieldSetFlags()[3] = true; + } + if (isValidValue(fields()[4], other.status)) { + this.status = data().deepCopy(fields()[4].schema(), other.status); + fieldSetFlags()[4] = true; + } + if (isValidValue(fields()[5], other.monitoring_host)) { + this.monitoring_host = data().deepCopy(fields()[5].schema(), other.monitoring_host); + fieldSetFlags()[5] = true; + } + if (isValidValue(fields()[6], other.actual_data)) { + this.actual_data = data().deepCopy(fields()[6].schema(), other.actual_data); + fieldSetFlags()[6] = true; + } + if (isValidValue(fields()[7], other.summary)) { + this.summary = data().deepCopy(fields()[7].schema(), other.summary); + fieldSetFlags()[7] = true; + } + if (isValidValue(fields()[8], other.message)) { + this.message = data().deepCopy(fields()[8].schema(), other.message); + fieldSetFlags()[8] = true; + } + if (isValidValue(fields()[9], other.tags)) { + this.tags = data().deepCopy(fields()[9].schema(), other.tags); + fieldSetFlags()[9] = true; + } + } + + /** + * Gets the value of the 'timestamp' field. + * @return The value. + */ + public java.lang.String getTimestamp() { + return timestamp; + } + + /** + * Sets the value of the 'timestamp' field. + * @param value The value of 'timestamp'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setTimestamp(java.lang.String value) { + validate(fields()[0], value); + this.timestamp = value; + fieldSetFlags()[0] = true; + return this; + } + + /** + * Checks whether the 'timestamp' field has been set. + * @return True if the 'timestamp' field has been set, false otherwise. + */ + public boolean hasTimestamp() { + return fieldSetFlags()[0]; + } + + + /** + * Clears the value of the 'timestamp' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearTimestamp() { + timestamp = null; + fieldSetFlags()[0] = false; + return this; + } + + /** + * Gets the value of the 'service' field. + * @return The value. + */ + public java.lang.String getService() { + return service; + } + + /** + * Sets the value of the 'service' field. + * @param value The value of 'service'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setService(java.lang.String value) { + validate(fields()[1], value); + this.service = value; + fieldSetFlags()[1] = true; + return this; + } + + /** + * Checks whether the 'service' field has been set. + * @return True if the 'service' field has been set, false otherwise. + */ + public boolean hasService() { + return fieldSetFlags()[1]; + } + + + /** + * Clears the value of the 'service' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearService() { + service = null; + fieldSetFlags()[1] = false; + return this; + } + + /** + * Gets the value of the 'hostname' field. + * @return The value. + */ + public java.lang.String getHostname() { + return hostname; + } + + /** + * Sets the value of the 'hostname' field. + * @param value The value of 'hostname'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setHostname(java.lang.String value) { + validate(fields()[2], value); + this.hostname = value; + fieldSetFlags()[2] = true; + return this; + } + + /** + * Checks whether the 'hostname' field has been set. + * @return True if the 'hostname' field has been set, false otherwise. + */ + public boolean hasHostname() { + return fieldSetFlags()[2]; + } + + + /** + * Clears the value of the 'hostname' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearHostname() { + hostname = null; + fieldSetFlags()[2] = false; + return this; + } + + /** + * Gets the value of the 'metric' field. + * @return The value. + */ + public java.lang.String getMetric() { + return metric; + } + + /** + * Sets the value of the 'metric' field. + * @param value The value of 'metric'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setMetric(java.lang.String value) { + validate(fields()[3], value); + this.metric = value; + fieldSetFlags()[3] = true; + return this; + } + + /** + * Checks whether the 'metric' field has been set. + * @return True if the 'metric' field has been set, false otherwise. + */ + public boolean hasMetric() { + return fieldSetFlags()[3]; + } + + + /** + * Clears the value of the 'metric' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearMetric() { + metric = null; + fieldSetFlags()[3] = false; + return this; + } + + /** + * Gets the value of the 'status' field. + * @return The value. + */ + public java.lang.String getStatus() { + return status; + } + + /** + * Sets the value of the 'status' field. + * @param value The value of 'status'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setStatus(java.lang.String value) { + validate(fields()[4], value); + this.status = value; + fieldSetFlags()[4] = true; + return this; + } + + /** + * Checks whether the 'status' field has been set. + * @return True if the 'status' field has been set, false otherwise. + */ + public boolean hasStatus() { + return fieldSetFlags()[4]; + } + + + /** + * Clears the value of the 'status' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearStatus() { + status = null; + fieldSetFlags()[4] = false; + return this; + } + + /** + * Gets the value of the 'monitoring_host' field. + * @return The value. + */ + public java.lang.String getMonitoringHost() { + return monitoring_host; + } + + /** + * Sets the value of the 'monitoring_host' field. + * @param value The value of 'monitoring_host'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setMonitoringHost(java.lang.String value) { + validate(fields()[5], value); + this.monitoring_host = value; + fieldSetFlags()[5] = true; + return this; + } + + /** + * Checks whether the 'monitoring_host' field has been set. + * @return True if the 'monitoring_host' field has been set, false otherwise. + */ + public boolean hasMonitoringHost() { + return fieldSetFlags()[5]; + } + + + /** + * Clears the value of the 'monitoring_host' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearMonitoringHost() { + monitoring_host = null; + fieldSetFlags()[5] = false; + return this; + } + + /** + * Gets the value of the 'actual_data' field. + * @return The value. + */ + public java.lang.String getActualData() { + return actual_data; + } + + /** + * Sets the value of the 'actual_data' field. + * @param value The value of 'actual_data'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setActualData(java.lang.String value) { + validate(fields()[6], value); + this.actual_data = value; + fieldSetFlags()[6] = true; + return this; + } + + /** + * Checks whether the 'actual_data' field has been set. + * @return True if the 'actual_data' field has been set, false otherwise. + */ + public boolean hasActualData() { + return fieldSetFlags()[6]; + } + + + /** + * Clears the value of the 'actual_data' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearActualData() { + actual_data = null; + fieldSetFlags()[6] = false; + return this; + } + + /** + * Gets the value of the 'summary' field. + * @return The value. + */ + public java.lang.String getSummary() { + return summary; + } + + /** + * Sets the value of the 'summary' field. + * @param value The value of 'summary'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setSummary(java.lang.String value) { + validate(fields()[7], value); + this.summary = value; + fieldSetFlags()[7] = true; + return this; + } + + /** + * Checks whether the 'summary' field has been set. + * @return True if the 'summary' field has been set, false otherwise. + */ + public boolean hasSummary() { + return fieldSetFlags()[7]; + } + + + /** + * Clears the value of the 'summary' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearSummary() { + summary = null; + fieldSetFlags()[7] = false; + return this; + } + + /** + * Gets the value of the 'message' field. + * @return The value. + */ + public java.lang.String getMessage() { + return message; + } + + /** + * Sets the value of the 'message' field. + * @param value The value of 'message'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setMessage(java.lang.String value) { + validate(fields()[8], value); + this.message = value; + fieldSetFlags()[8] = true; + return this; + } + + /** + * Checks whether the 'message' field has been set. + * @return True if the 'message' field has been set, false otherwise. + */ + public boolean hasMessage() { + return fieldSetFlags()[8]; + } + + + /** + * Clears the value of the 'message' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearMessage() { + message = null; + fieldSetFlags()[8] = false; + return this; + } + + /** + * Gets the value of the 'tags' field. + * @return The value. + */ + public java.util.Map getTags() { + return tags; + } + + /** + * Sets the value of the 'tags' field. + * @param value The value of 'tags'. + * @return This builder. + */ + public argo.avro.MetricData.Builder setTags(java.util.Map value) { + validate(fields()[9], value); + this.tags = value; + fieldSetFlags()[9] = true; + return this; + } + + /** + * Checks whether the 'tags' field has been set. + * @return True if the 'tags' field has been set, false otherwise. + */ + public boolean hasTags() { + return fieldSetFlags()[9]; + } + + + /** + * Clears the value of the 'tags' field. + * @return This builder. + */ + public argo.avro.MetricData.Builder clearTags() { + tags = null; + fieldSetFlags()[9] = false; + return this; + } + + @Override + public MetricData build() { + try { + MetricData record = new MetricData(); + record.timestamp = fieldSetFlags()[0] ? this.timestamp : (java.lang.String) defaultValue(fields()[0]); + record.service = fieldSetFlags()[1] ? this.service : (java.lang.String) defaultValue(fields()[1]); + record.hostname = fieldSetFlags()[2] ? this.hostname : (java.lang.String) defaultValue(fields()[2]); + record.metric = fieldSetFlags()[3] ? this.metric : (java.lang.String) defaultValue(fields()[3]); + record.status = fieldSetFlags()[4] ? this.status : (java.lang.String) defaultValue(fields()[4]); + record.monitoring_host = fieldSetFlags()[5] ? this.monitoring_host : (java.lang.String) defaultValue(fields()[5]); + record.actual_data = fieldSetFlags()[6] ? this.actual_data : (java.lang.String) defaultValue(fields()[6]); + record.summary = fieldSetFlags()[7] ? this.summary : (java.lang.String) defaultValue(fields()[7]); + record.message = fieldSetFlags()[8] ? this.message : (java.lang.String) defaultValue(fields()[8]); + record.tags = fieldSetFlags()[9] ? this.tags : (java.util.Map) defaultValue(fields()[9]); + return record; + } catch (Exception e) { + throw new org.apache.avro.AvroRuntimeException(e); + } + } + } + +} diff --git a/flink_jobs/old-models/stream_status/src/main/java/argo/avro/MetricProfile.java b/flink_jobs/old-models/stream_status/src/main/java/argo/avro/MetricProfile.java new file mode 100644 index 00000000..1fe15e09 --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/java/argo/avro/MetricProfile.java @@ -0,0 +1,286 @@ +/** + * Autogenerated by Avro + * + * DO NOT EDIT DIRECTLY + */ +package argo.avro; +@SuppressWarnings("all") +@org.apache.avro.specific.AvroGenerated +public class MetricProfile extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"MetricProfile\",\"namespace\":\"argo.avro\",\"fields\":[{\"name\":\"profile\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"service\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"metric\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"tags\",\"type\":[\"null\",{\"type\":\"map\",\"values\":{\"type\":\"string\",\"avro.java.string\":\"String\"},\"avro.java.string\":\"String\"}]}]}"); + public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } + @Deprecated public java.lang.String profile; + @Deprecated public java.lang.String service; + @Deprecated public java.lang.String metric; + @Deprecated public java.util.Map tags; + + /** + * Default constructor. + */ + public MetricProfile() {} + + /** + * All-args constructor. + */ + public MetricProfile(java.lang.String profile, java.lang.String service, java.lang.String metric, java.util.Map tags) { + this.profile = profile; + this.service = service; + this.metric = metric; + this.tags = tags; + } + + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + // Used by DatumWriter. Applications should not call. + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return profile; + case 1: return service; + case 2: return metric; + case 3: return tags; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + // Used by DatumReader. Applications should not call. + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: profile = (java.lang.String)value$; break; + case 1: service = (java.lang.String)value$; break; + case 2: metric = (java.lang.String)value$; break; + case 3: tags = (java.util.Map)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + + /** + * Gets the value of the 'profile' field. + */ + public java.lang.String getProfile() { + return profile; + } + + /** + * Sets the value of the 'profile' field. + * @param value the value to set. + */ + public void setProfile(java.lang.String value) { + this.profile = value; + } + + /** + * Gets the value of the 'service' field. + */ + public java.lang.String getService() { + return service; + } + + /** + * Sets the value of the 'service' field. + * @param value the value to set. + */ + public void setService(java.lang.String value) { + this.service = value; + } + + /** + * Gets the value of the 'metric' field. + */ + public java.lang.String getMetric() { + return metric; + } + + /** + * Sets the value of the 'metric' field. + * @param value the value to set. + */ + public void setMetric(java.lang.String value) { + this.metric = value; + } + + /** + * Gets the value of the 'tags' field. + */ + public java.util.Map getTags() { + return tags; + } + + /** + * Sets the value of the 'tags' field. + * @param value the value to set. + */ + public void setTags(java.util.Map value) { + this.tags = value; + } + + /** Creates a new MetricProfile RecordBuilder */ + public static argo.avro.MetricProfile.Builder newBuilder() { + return new argo.avro.MetricProfile.Builder(); + } + + /** Creates a new MetricProfile RecordBuilder by copying an existing Builder */ + public static argo.avro.MetricProfile.Builder newBuilder(argo.avro.MetricProfile.Builder other) { + return new argo.avro.MetricProfile.Builder(other); + } + + /** Creates a new MetricProfile RecordBuilder by copying an existing MetricProfile instance */ + public static argo.avro.MetricProfile.Builder newBuilder(argo.avro.MetricProfile other) { + return new argo.avro.MetricProfile.Builder(other); + } + + /** + * RecordBuilder for MetricProfile instances. + */ + public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase + implements org.apache.avro.data.RecordBuilder { + + private java.lang.String profile; + private java.lang.String service; + private java.lang.String metric; + private java.util.Map tags; + + /** Creates a new Builder */ + private Builder() { + super(argo.avro.MetricProfile.SCHEMA$); + } + + /** Creates a Builder by copying an existing Builder */ + private Builder(argo.avro.MetricProfile.Builder other) { + super(other); + } + + /** Creates a Builder by copying an existing MetricProfile instance */ + private Builder(argo.avro.MetricProfile other) { + super(argo.avro.MetricProfile.SCHEMA$); + if (isValidValue(fields()[0], other.profile)) { + this.profile = data().deepCopy(fields()[0].schema(), other.profile); + fieldSetFlags()[0] = true; + } + if (isValidValue(fields()[1], other.service)) { + this.service = data().deepCopy(fields()[1].schema(), other.service); + fieldSetFlags()[1] = true; + } + if (isValidValue(fields()[2], other.metric)) { + this.metric = data().deepCopy(fields()[2].schema(), other.metric); + fieldSetFlags()[2] = true; + } + if (isValidValue(fields()[3], other.tags)) { + this.tags = data().deepCopy(fields()[3].schema(), other.tags); + fieldSetFlags()[3] = true; + } + } + + /** Gets the value of the 'profile' field */ + public java.lang.String getProfile() { + return profile; + } + + /** Sets the value of the 'profile' field */ + public argo.avro.MetricProfile.Builder setProfile(java.lang.String value) { + validate(fields()[0], value); + this.profile = value; + fieldSetFlags()[0] = true; + return this; + } + + /** Checks whether the 'profile' field has been set */ + public boolean hasProfile() { + return fieldSetFlags()[0]; + } + + /** Clears the value of the 'profile' field */ + public argo.avro.MetricProfile.Builder clearProfile() { + profile = null; + fieldSetFlags()[0] = false; + return this; + } + + /** Gets the value of the 'service' field */ + public java.lang.String getService() { + return service; + } + + /** Sets the value of the 'service' field */ + public argo.avro.MetricProfile.Builder setService(java.lang.String value) { + validate(fields()[1], value); + this.service = value; + fieldSetFlags()[1] = true; + return this; + } + + /** Checks whether the 'service' field has been set */ + public boolean hasService() { + return fieldSetFlags()[1]; + } + + /** Clears the value of the 'service' field */ + public argo.avro.MetricProfile.Builder clearService() { + service = null; + fieldSetFlags()[1] = false; + return this; + } + + /** Gets the value of the 'metric' field */ + public java.lang.String getMetric() { + return metric; + } + + /** Sets the value of the 'metric' field */ + public argo.avro.MetricProfile.Builder setMetric(java.lang.String value) { + validate(fields()[2], value); + this.metric = value; + fieldSetFlags()[2] = true; + return this; + } + + /** Checks whether the 'metric' field has been set */ + public boolean hasMetric() { + return fieldSetFlags()[2]; + } + + /** Clears the value of the 'metric' field */ + public argo.avro.MetricProfile.Builder clearMetric() { + metric = null; + fieldSetFlags()[2] = false; + return this; + } + + /** Gets the value of the 'tags' field */ + public java.util.Map getTags() { + return tags; + } + + /** Sets the value of the 'tags' field */ + public argo.avro.MetricProfile.Builder setTags(java.util.Map value) { + validate(fields()[3], value); + this.tags = value; + fieldSetFlags()[3] = true; + return this; + } + + /** Checks whether the 'tags' field has been set */ + public boolean hasTags() { + return fieldSetFlags()[3]; + } + + /** Clears the value of the 'tags' field */ + public argo.avro.MetricProfile.Builder clearTags() { + tags = null; + fieldSetFlags()[3] = false; + return this; + } + + @Override + public MetricProfile build() { + try { + MetricProfile record = new MetricProfile(); + record.profile = fieldSetFlags()[0] ? this.profile : (java.lang.String) defaultValue(fields()[0]); + record.service = fieldSetFlags()[1] ? this.service : (java.lang.String) defaultValue(fields()[1]); + record.metric = fieldSetFlags()[2] ? this.metric : (java.lang.String) defaultValue(fields()[2]); + record.tags = fieldSetFlags()[3] ? this.tags : (java.util.Map) defaultValue(fields()[3]); + return record; + } catch (Exception e) { + throw new org.apache.avro.AvroRuntimeException(e); + } + } + } +} diff --git a/flink_jobs/old-models/stream_status/src/main/java/argo/avro/Weight.java b/flink_jobs/old-models/stream_status/src/main/java/argo/avro/Weight.java new file mode 100644 index 00000000..0238d7cf --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/java/argo/avro/Weight.java @@ -0,0 +1,236 @@ +/** + * Autogenerated by Avro + * + * DO NOT EDIT DIRECTLY + */ +package argo.avro; +@SuppressWarnings("all") +@org.apache.avro.specific.AvroGenerated +public class Weight extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"Weight\",\"namespace\":\"argo.avro\",\"fields\":[{\"name\":\"type\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"site\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"weight\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}}]}"); + public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } + @Deprecated public java.lang.String type; + @Deprecated public java.lang.String site; + @Deprecated public java.lang.String weight; + + /** + * Default constructor. + */ + public Weight() {} + + /** + * All-args constructor. + */ + public Weight(java.lang.String type, java.lang.String site, java.lang.String weight) { + this.type = type; + this.site = site; + this.weight = weight; + } + + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + // Used by DatumWriter. Applications should not call. + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return type; + case 1: return site; + case 2: return weight; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + // Used by DatumReader. Applications should not call. + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: type = (java.lang.String)value$; break; + case 1: site = (java.lang.String)value$; break; + case 2: weight = (java.lang.String)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + + /** + * Gets the value of the 'type' field. + */ + public java.lang.String getType() { + return type; + } + + /** + * Sets the value of the 'type' field. + * @param value the value to set. + */ + public void setType(java.lang.String value) { + this.type = value; + } + + /** + * Gets the value of the 'site' field. + */ + public java.lang.String getSite() { + return site; + } + + /** + * Sets the value of the 'site' field. + * @param value the value to set. + */ + public void setSite(java.lang.String value) { + this.site = value; + } + + /** + * Gets the value of the 'weight' field. + */ + public java.lang.String getWeight() { + return weight; + } + + /** + * Sets the value of the 'weight' field. + * @param value the value to set. + */ + public void setWeight(java.lang.String value) { + this.weight = value; + } + + /** Creates a new Weight RecordBuilder */ + public static argo.avro.Weight.Builder newBuilder() { + return new argo.avro.Weight.Builder(); + } + + /** Creates a new Weight RecordBuilder by copying an existing Builder */ + public static argo.avro.Weight.Builder newBuilder(argo.avro.Weight.Builder other) { + return new argo.avro.Weight.Builder(other); + } + + /** Creates a new Weight RecordBuilder by copying an existing Weight instance */ + public static argo.avro.Weight.Builder newBuilder(argo.avro.Weight other) { + return new argo.avro.Weight.Builder(other); + } + + /** + * RecordBuilder for Weight instances. + */ + public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase + implements org.apache.avro.data.RecordBuilder { + + private java.lang.String type; + private java.lang.String site; + private java.lang.String weight; + + /** Creates a new Builder */ + private Builder() { + super(argo.avro.Weight.SCHEMA$); + } + + /** Creates a Builder by copying an existing Builder */ + private Builder(argo.avro.Weight.Builder other) { + super(other); + } + + /** Creates a Builder by copying an existing Weight instance */ + private Builder(argo.avro.Weight other) { + super(argo.avro.Weight.SCHEMA$); + if (isValidValue(fields()[0], other.type)) { + this.type = data().deepCopy(fields()[0].schema(), other.type); + fieldSetFlags()[0] = true; + } + if (isValidValue(fields()[1], other.site)) { + this.site = data().deepCopy(fields()[1].schema(), other.site); + fieldSetFlags()[1] = true; + } + if (isValidValue(fields()[2], other.weight)) { + this.weight = data().deepCopy(fields()[2].schema(), other.weight); + fieldSetFlags()[2] = true; + } + } + + /** Gets the value of the 'type' field */ + public java.lang.String getType() { + return type; + } + + /** Sets the value of the 'type' field */ + public argo.avro.Weight.Builder setType(java.lang.String value) { + validate(fields()[0], value); + this.type = value; + fieldSetFlags()[0] = true; + return this; + } + + /** Checks whether the 'type' field has been set */ + public boolean hasType() { + return fieldSetFlags()[0]; + } + + /** Clears the value of the 'type' field */ + public argo.avro.Weight.Builder clearType() { + type = null; + fieldSetFlags()[0] = false; + return this; + } + + /** Gets the value of the 'site' field */ + public java.lang.String getSite() { + return site; + } + + /** Sets the value of the 'site' field */ + public argo.avro.Weight.Builder setSite(java.lang.String value) { + validate(fields()[1], value); + this.site = value; + fieldSetFlags()[1] = true; + return this; + } + + /** Checks whether the 'site' field has been set */ + public boolean hasSite() { + return fieldSetFlags()[1]; + } + + /** Clears the value of the 'site' field */ + public argo.avro.Weight.Builder clearSite() { + site = null; + fieldSetFlags()[1] = false; + return this; + } + + /** Gets the value of the 'weight' field */ + public java.lang.String getWeight() { + return weight; + } + + /** Sets the value of the 'weight' field */ + public argo.avro.Weight.Builder setWeight(java.lang.String value) { + validate(fields()[2], value); + this.weight = value; + fieldSetFlags()[2] = true; + return this; + } + + /** Checks whether the 'weight' field has been set */ + public boolean hasWeight() { + return fieldSetFlags()[2]; + } + + /** Clears the value of the 'weight' field */ + public argo.avro.Weight.Builder clearWeight() { + weight = null; + fieldSetFlags()[2] = false; + return this; + } + + @Override + public Weight build() { + try { + Weight record = new Weight(); + record.type = fieldSetFlags()[0] ? this.type : (java.lang.String) defaultValue(fields()[0]); + record.site = fieldSetFlags()[1] ? this.site : (java.lang.String) defaultValue(fields()[1]); + record.weight = fieldSetFlags()[2] ? this.weight : (java.lang.String) defaultValue(fields()[2]); + return record; + } catch (Exception e) { + throw new org.apache.avro.AvroRuntimeException(e); + } + } + } +} diff --git a/flink_jobs/old-models/stream_status/src/main/java/argo/streaming/AmsStreamStatus.java b/flink_jobs/old-models/stream_status/src/main/java/argo/streaming/AmsStreamStatus.java new file mode 100644 index 00000000..68808965 --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/java/argo/streaming/AmsStreamStatus.java @@ -0,0 +1,753 @@ +package argo.streaming; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.text.ParseException; +import java.util.ArrayList; +import java.util.Date; +import java.util.Map; +import java.util.Properties; + +import org.apache.avro.io.DatumReader; +import org.apache.avro.io.Decoder; +import org.apache.avro.io.DecoderFactory; +import org.apache.avro.specific.SpecificData; +import org.apache.avro.specific.SpecificDatumReader; +import org.apache.commons.codec.binary.Base64; + +import org.apache.flink.api.common.io.OutputFormat; +import org.apache.flink.api.java.utils.ParameterTool; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.streaming.api.datastream.DataStream; +import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; +import org.apache.flink.streaming.api.functions.co.RichCoFlatMapFunction; +import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer09; +import org.apache.flink.streaming.util.serialization.SimpleStringSchema; +import org.apache.flink.util.Collector; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.util.Bytes; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParser; + +import argo.avro.Downtime; +import argo.avro.GroupEndpoint; +import argo.avro.MetricData; +import argo.avro.MetricProfile; +import org.apache.flink.api.java.tuple.Tuple3; +import org.apache.flink.core.fs.FileSystem; +import status.StatusManager; +import sync.EndpointGroupManagerV2; +import sync.MetricProfileManager; + +/** + * Flink Job : Streaming status computation with multiple destinations (hbase, + * kafka, fs) job required cli parameters + * --ams.endpoint : ARGO messaging api endpoint to connect to msg.example.com + * --ams.port : ARGO messaging api port + * --ams.token : ARGO messaging api token + * --ams.project : ARGO messaging api project to connect to + * --ams.sub.metric : ARGO messaging subscription to pull metric data from + * --ams.sub.sync : ARGO messaging subscription to pull sync data from + * --sync.mps : metric-profile file used + * --sync.egp : endpoint-group file used for topology + * --sync.aps : availability profile used + * --sync.ops : operations profile used + * --sync.downtimes : initial downtime file (same for run date) + * --report : report name + * --report.uuid : report uuid + * + * Job optional cli parameters: + * --ams.batch : num of messages to be retrieved per request to AMS service + * --ams.interval : interval (in ms) between AMS service requests + * --kafka.servers : list of kafka servers to connect to + * --kafka.topic : kafka topic name to publish events + * --mongo.uri : mongo uri to store latest status results + * --mongo.method : mongo method to use (insert,upsert) + * --hbase.master : hbase master hostname + * --hbase.port : hbase master.port + * --hbase.zk.quorum : hbase zookeeper quorum + * --hbase.namespace : hbase namespace --hbase.table : hbase table name + * --fs.ouput : filesystem output path (local or hdfs) mostly for debugging + * --ams.proxy : http proxy url + * --timeout : time in ms - Optional timeout parameter (used in notifications) + * --daily : true/false - Optional daily event generation parameter (not needed in notifications) + */ +public class AmsStreamStatus { + // setup logger + + static Logger LOG = LoggerFactory.getLogger(AmsStreamStatus.class); + + /** + * Sets configuration parameters to streaming enviroment + * + * @param config A StatusConfig object that holds configuration parameters + * for this job + * @return Stream execution enviroment + */ + private static StreamExecutionEnvironment setupEnvironment(StatusConfig config) { + StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); + env.getConfig().setGlobalJobParameters(config.getParameters()); + + return env; + } + + /** + * Check if flink job has been called with ams rate params + */ + public static boolean hasAmsRateArgs(ParameterTool paramTool) { + String args[] = {"ams.batch", "ams.interval"}; + return hasArgs(args, paramTool); + } + + public static boolean hasKafkaArgs(ParameterTool paramTool) { + String kafkaArgs[] = {"kafka.servers", "kafka.topic"}; + return hasArgs(kafkaArgs, paramTool); + } + + public static boolean hasHbaseArgs(ParameterTool paramTool) { + String hbaseArgs[] = {"hbase.master", "hbase.master.port", "hbase.zk.quorum", "hbase.namespace", + "hbase.table"}; + return hasArgs(hbaseArgs, paramTool); + } + + public static boolean hasFsOutArgs(ParameterTool paramTool) { + String fsOutArgs[] = {"fs.output"}; + return hasArgs(fsOutArgs, paramTool); + } + + public static boolean hasMongoArgs(ParameterTool paramTool) { + String mongoArgs[] = {"mongo.uri", "mongo.method"}; + return hasArgs(mongoArgs, paramTool); + } + + public static boolean hasArgs(String[] reqArgs, ParameterTool paramTool) { + + for (String reqArg : reqArgs) { + if (!paramTool.has(reqArg)) { + return false; + } + } + + return true; + } + + /** + * Main dataflow of flink job + */ + public static void main(String[] args) throws Exception { + + // Initialize cli parameter tool + final ParameterTool parameterTool = ParameterTool.fromArgs(args); + + final StatusConfig conf = new StatusConfig(parameterTool); + + StreamExecutionEnvironment see = setupEnvironment(conf); + see.setParallelism(1); + + // Initialize Input Source : ARGO Messaging Source + String endpoint = parameterTool.getRequired("ams.endpoint"); + String port = parameterTool.getRequired("ams.port"); + String token = parameterTool.getRequired("ams.token"); + String project = parameterTool.getRequired("ams.project"); + String subMetric = parameterTool.getRequired("ams.sub.metric"); + String subSync = parameterTool.getRequired("ams.sub.sync"); + + // set ams client batch and interval to default values + int batch = 1; + long interval = 100L; + + if (hasAmsRateArgs(parameterTool)) { + batch = parameterTool.getInt("ams.batch"); + interval = parameterTool.getLong("ams.interval"); + } + + // Establish the metric data AMS stream + // Ingest sync avro encoded data from AMS endpoint + ArgoMessagingSource amsMetric = new ArgoMessagingSource(endpoint, port, token, project, subMetric, batch, interval); + ArgoMessagingSource amsSync = new ArgoMessagingSource(endpoint, port, token, project, subSync, batch, interval); + + if (parameterTool.has("ams.verify")) { + boolean verify = parameterTool.getBoolean("ams.verify"); + amsMetric.setVerify(verify); + amsSync.setVerify(verify); + } + + if (parameterTool.has("ams.proxy")) { + String proxyURL = parameterTool.get("ams.proxy"); + amsMetric.setProxy(proxyURL); + amsSync.setProxy(proxyURL); + } + + DataStream metricAMS = see.addSource(amsMetric).setParallelism(1); + + // Establish the sync data AMS stream + DataStream syncAMS = see.addSource(amsSync).setParallelism(1); + + // Forward syncAMS data to two paths + // - one with parallelism 1 to connect in the first processing step and + // - one with max parallelism for status event generation step + // (scalable) + DataStream syncA = syncAMS.forward(); + DataStream syncB = syncAMS.broadcast(); + + DataStream> groupMdata = metricAMS.connect(syncA) + .flatMap(new MetricDataWithGroup(conf)).setParallelism(1); + + DataStream events = groupMdata.connect(syncB).flatMap(new StatusMap(conf)); + events.print(); + if (hasKafkaArgs(parameterTool)) { + // Initialize kafka parameters + String kafkaServers = parameterTool.get("kafka.servers"); + String kafkaTopic = parameterTool.get("kafka.topic"); + Properties kafkaProps = new Properties(); + kafkaProps.setProperty("bootstrap.servers", kafkaServers); + FlinkKafkaProducer09 kSink = new FlinkKafkaProducer09(kafkaTopic, new SimpleStringSchema(), + kafkaProps); + + events.addSink(kSink); + } + + if (hasHbaseArgs(parameterTool)) { + // Initialize Output : Hbase Output Format + HBaseOutputFormat hbf = new HBaseOutputFormat(); + hbf.setMaster(parameterTool.get("hbase.master")); + hbf.setMasterPort(parameterTool.get("hbase.master.port")); + hbf.setZkQuorum(parameterTool.get("hbase.zk.quorum")); + hbf.setZkPort(parameterTool.get("hbase.zk.port")); + hbf.setNamespace(parameterTool.get("hbase.namespace")); + hbf.setTableName(parameterTool.get("hbase.table")); + hbf.setReport(parameterTool.get("report")); + events.writeUsingOutputFormat(hbf); + } + + if (hasMongoArgs(parameterTool)) { + + MongoStatusOutput mongoOut = new MongoStatusOutput(parameterTool.get("mongo.uri"), "status_metrics", + "status_endpoints", "status_services", "status_endpoint_groups", parameterTool.get("mongo.method"), + parameterTool.get("report.uuid")); + events.writeUsingOutputFormat(mongoOut); + } + + if (hasFsOutArgs(parameterTool)) { + events.writeAsText(parameterTool.get("fs.output"),FileSystem.WriteMode.OVERWRITE); + //events.print(); + } + + // Create a job title message to discern job in flink dashboard/cli + StringBuilder jobTitleSB = new StringBuilder(); + jobTitleSB.append("Streaming status using data from "); + jobTitleSB.append(endpoint); + jobTitleSB.append(":"); + jobTitleSB.append(port); + jobTitleSB.append("/v1/projects/"); + jobTitleSB.append(project); + jobTitleSB.append("/subscriptions/["); + jobTitleSB.append(subMetric); + jobTitleSB.append(","); + jobTitleSB.append(subSync); + jobTitleSB.append("]"); + + // Execute flink dataflow + see.execute(jobTitleSB.toString()); + } + + /** + * MetricDataWithGroup implements a map function that adds group information + * to the metric data message + */ + private static class MetricDataWithGroup extends RichCoFlatMapFunction> { + + private static final long serialVersionUID = 1L; + + public EndpointGroupManagerV2 egp; + public MetricProfileManager mps; + + public StatusConfig config; + + public MetricDataWithGroup(StatusConfig config) { + LOG.info("Created new Status map"); + this.config = config; + } + + /** + * Initializes constructs in the beginning of operation + * + * @param parameters Configuration parameters to initialize structures + * @throws URISyntaxException + */ + @Override + public void open(Configuration parameters) throws IOException, ParseException, URISyntaxException { + + SyncData sd = new SyncData(); + + ArrayList mpsList = sd.readMetricProfile(config.mps); + ArrayList egpList = sd.readGroupEndpoint(config.egp); + + mps = new MetricProfileManager(); + mps.loadFromList(mpsList); + String validMetricProfile = mps.getProfiles().get(0); + ArrayList validServices = mps.getProfileServices(validMetricProfile); + + // Trim profile services + ArrayList egpTrim = new ArrayList(); + // Use optimized Endpoint Group Manager + for (GroupEndpoint egpItem : egpList) { + if (validServices.contains(egpItem.getService())) { + egpTrim.add(egpItem); + } + } + egp = new EndpointGroupManagerV2(); + egp.loadFromList(egpTrim); + + } + + /** + * The main flat map function that accepts metric data and generates + * metric data with group information + * + * @param value Input metric data in base64 encoded format from AMS + * service + * @param out Collection of generated Tuple2 objects + */ + @Override + public void flatMap1(String value, Collector> out) + throws IOException, ParseException { + + JsonParser jsonParser = new JsonParser(); + // parse the json root object + JsonElement jRoot = jsonParser.parse(value); + // parse the json field "data" and read it as string + // this is the base64 string payload + String data = jRoot.getAsJsonObject().get("data").getAsString(); + // Decode from base64 + byte[] decoded64 = Base64.decodeBase64(data.getBytes("UTF-8")); + // Decode from avro + DatumReader avroReader = new SpecificDatumReader(MetricData.getClassSchema(), + MetricData.getClassSchema(), new SpecificData()); + Decoder decoder = DecoderFactory.get().binaryDecoder(decoded64, null); + MetricData item; + + try { + item = avroReader.read(null, decoder); + } catch (java.io.EOFException ex) { + //convert from old to new + avroReader = new SpecificDatumReader(MetricData.getClassSchema(), MetricData.getClassSchema()); + decoder = DecoderFactory.get().binaryDecoder(decoded64, null); + item = avroReader.read(null, decoder); + } + + //System.out.println("metric data item received" + item.toString()); + // generate events and get them + String service = item.getService().toString(); + String hostname = item.getHostname().toString(); + + ArrayList groups = egp.getGroup(hostname, service); + + //System.out.println(egp.getList()); + for (String groupItem : groups) { + String url = egp.getTagUrl(groupItem, hostname, service); + Tuple3 curItem = new Tuple3(); + curItem.f0 = groupItem; + curItem.f1 = url; + curItem.f2 = item; + out.collect(curItem); + //System.out.println("item enriched: " + curItem.toString()); + } + + } + + public void flatMap2(String value, Collector> out) + throws IOException, ParseException { + + JsonParser jsonParser = new JsonParser(); + // parse the json root object + JsonElement jRoot = jsonParser.parse(value); + // parse the json field "data" and read it as string + // this is the base64 string payload + String data = jRoot.getAsJsonObject().get("data").getAsString(); + // Decode from base64 + byte[] decoded64 = Base64.decodeBase64(data.getBytes("UTF-8")); + JsonElement jAttr = jRoot.getAsJsonObject().get("attributes"); + Map attr = SyncParse.parseAttributes(jAttr); + if (attr.containsKey("type")) { + + String sType = attr.get("type"); + if (sType.equalsIgnoreCase("metric_profile")) { + // Update mps + ArrayList mpsList = SyncParse.parseMetricProfile(decoded64); + mps = new MetricProfileManager(); + mps.loadFromList(mpsList); + } else if (sType.equals("group_endpoint")) { + // Update egp + ArrayList egpList = SyncParse.parseGroupEndpoint(decoded64); + egp = new EndpointGroupManagerV2(); + + String validMetricProfile = mps.getProfiles().get(0); + ArrayList validServices = mps.getProfileServices(validMetricProfile); + // Trim profile services + ArrayList egpTrim = new ArrayList(); + // Use optimized Endpoint Group Manager + for (GroupEndpoint egpItem : egpList) { + if (validServices.contains(egpItem.getService())) { + egpTrim.add(egpItem); + } + } + } + } + + } + + } + + /** + * StatusMap implements a rich flat map function which holds status + * information for all entities in topology and for each received metric + * generates the appropriate status events + */ + private static class StatusMap extends RichCoFlatMapFunction, String, String> { + + private static final long serialVersionUID = 1L; + + private String pID; + + public StatusManager sm; + + public StatusConfig config; + + public int initStatus; + + public StatusMap(StatusConfig config) { + LOG.info("Created new Status map"); + this.config = config; + } + + /** + * Initializes constructs in the beginning of operation + * + * @param parameters Configuration parameters to initialize structures + * @throws URISyntaxException + */ + @Override + public void open(Configuration parameters) throws IOException, ParseException, URISyntaxException { + + pID = Integer.toString(getRuntimeContext().getIndexOfThisSubtask()); + + SyncData sd = new SyncData(); + + String opsJSON = sd.readText(config.ops); + String apsJSON = sd.readText(config.aps); + ArrayList downList = sd.readDowntime(config.downtime); + ArrayList mpsList = sd.readMetricProfile(config.mps); + ArrayList egpListFull = sd.readGroupEndpoint(config.egp); + + // create a new status manager + sm = new StatusManager(); + sm.setTimeout(config.timeout); + sm.setReport(config.report); + // load all the connector data + sm.loadAll(config.runDate, downList, egpListFull, mpsList, apsJSON, opsJSON); + + // Set the default status as integer + initStatus = sm.getOps().getIntStatus(config.initStatus); + LOG.info("Initialized status manager:" + pID + " (with timeout:" + sm.getTimeout() + ")"); + + } + + /** + * The main flat map function that accepts metric data and generates + * status events + * + * @param value Input metric data in base64 encoded format from AMS + * service + * @param out Collection of generated status events as json strings + */ + @Override + public void flatMap1(Tuple3 value, Collector out) + throws IOException, ParseException { + + MetricData item = value.f2; + String group = value.f0; + String url = value.f1; + + String service = null, hostname = null, metric = null, status = null, tsMon = null, monHost = null, message = null, summary = null; + + if (item.getService() != null) { + + service = item.getService(); + } + if (item.getHostname() != null) { + hostname = item.getHostname(); + } + if (item.getMetric() != null) { + metric = item.getMetric(); + } + if (item.getStatus() != null) { + status = item.getStatus(); + } + if (item.getTimestamp() != null) { + tsMon = item.getTimestamp(); + } + if (item.getHostname() != null) { + monHost = item.getMonitoringHost(); + } + if (item.getMessage() != null) { + message = item.getMessage(); + } + if (item.getSummary() != null) { + summary = item.getSummary(); + } + +// +// String service = item.getService().toString(); +// String hostname = item.getHostname().toString(); +// String metric = item.getMetric().toString(); +// String status = item.getStatus().toString(); +// String tsMon = item.getTimestamp().toString(); +// String monHost = item.getMonitoringHost().toString(); +// String message = item.getMessage().toString(); +// String summary = item.getSummary().toString(); +// + // if daily generation is enable check if has day changed? + if (config.daily + && sm.hasDayChanged(sm.getTsLatest(), tsMon)) { + ArrayList eventsDaily = sm.dumpStatus(tsMon); + sm.setTsLatest(tsMon); + for (String event : eventsDaily) { + out.collect(event); + LOG.info("sm-" + pID + ": daily event produced: " + event); + } + } + + // check if group is handled by this operator instance - if not + // construct the group based on sync data + if (!sm.hasGroup(group)) { + // Get start of the day to create new entries + Date dateTS = sm.setDate(tsMon); + sm.addNewGroup(group, initStatus, dateTS); + } + + ArrayList events = sm.setStatus(group, service, hostname, metric, status, monHost, tsMon, summary, message, url); + + for (String event : events) { + out.collect(event); + LOG.info("sm-" + pID + ": event produced: " + item); + } + } + + public void flatMap2(String value, Collector out) throws IOException, ParseException { + + JsonParser jsonParser = new JsonParser(); + // parse the json root object + JsonElement jRoot = jsonParser.parse(value); + // parse the json field "data" and read it as string + // this is the base64 string payload + String data = jRoot.getAsJsonObject().get("data").getAsString(); + // Decode from base64 + byte[] decoded64 = Base64.decodeBase64(data.getBytes("UTF-8")); + JsonElement jAttr = jRoot.getAsJsonObject().get("attributes"); + + Map attr = SyncParse.parseAttributes(jAttr); + // The sync dataset should have a type and report attribute and report should be the job's report + if (attr.containsKey("type") && attr.containsKey("report") && attr.get("report") == config.report) { + + String sType = attr.get("type"); + LOG.info("Accepted " + sType + " for report: " + attr.get("report")); + if (sType.equalsIgnoreCase("metric_profile")) { + // Update mps + ArrayList mpsList = SyncParse.parseMetricProfile(decoded64); + sm.mps = new MetricProfileManager(); + sm.mps.loadFromList(mpsList); + } else if (sType.equals("group_endpoints")) { + // Update egp + ArrayList egpList = SyncParse.parseGroupEndpoint(decoded64); + + String validMetricProfile = sm.mps.getProfiles().get(0); + ArrayList validServices = sm.mps.getProfileServices(validMetricProfile); + // Trim profile services + ArrayList egpTrim = new ArrayList(); + // Use optimized Endpoint Group Manager + for (GroupEndpoint egpItem : egpList) { + if (validServices.contains(egpItem.getService())) { + egpTrim.add(egpItem); + } + } + sm.egp = new EndpointGroupManagerV2(); + sm.egp.loadFromList(egpTrim); + } else if (sType.equals("downtimes") && attr.containsKey("partition_date")) { + String pDate = attr.get("partition_date"); + ArrayList downList = SyncParse.parseDowntimes(decoded64); + // Update downtime cache in status manager + sm.addDowntimeSet(pDate, downList); + } + } else { + LOG.info("Declined " + attr.get("type") + "for report: " + attr.get("report")); + } + + } + + } + + /** + * HbaseOutputFormat implements a custom output format for storing results + * in hbase + */ + private static class HBaseOutputFormat implements OutputFormat { + + private String report = null; + private String master = null; + private String masterPort = null; + private String zkQuorum = null; + private String zkPort = null; + private String namespace = null; + private String tname = null; + private Connection connection = null; + private Table ht = null; + + private static final long serialVersionUID = 1L; + + // Setters + public void setMasterPort(String masterPort) { + this.masterPort = masterPort; + } + + public void setMaster(String master) { + this.master = master; + } + + public void setZkQuorum(String zkQuorum) { + this.zkQuorum = zkQuorum; + } + + public void setZkPort(String zkPort) { + this.zkPort = zkPort; + } + + public void setNamespace(String namespace) { + this.namespace = namespace; + } + + public void setTableName(String tname) { + this.tname = tname; + } + + public void setReport(String report) { + this.report = report; + } + + @Override + public void configure(Configuration parameters) { + + } + + /** + * Structure initialization + */ + @Override + public void open(int taskNumber, int numTasks) throws IOException { + // Create hadoop based configuration for hclient to use + org.apache.hadoop.conf.Configuration config = HBaseConfiguration.create(); + // Modify configuration to job needs + config.setInt("timeout", 120000); + if (masterPort != null && !masterPort.isEmpty()) { + config.set("hbase.master", master + ":" + masterPort); + } else { + config.set("hbase.master", master + ":60000"); + } + + config.set("hbase.zookeeper.quorum", zkQuorum); + config.set("hbase.zookeeper.property.clientPort", (zkPort)); + // Create the connection + connection = ConnectionFactory.createConnection(config); + if (namespace != null) { + ht = connection.getTable(TableName.valueOf(namespace + ":" + tname)); + } else { + ht = connection.getTable(TableName.valueOf(tname)); + } + + } + + /** + * Extract json representation as string to be used as a field value + */ + private String extractJson(String field, JsonObject root) { + JsonElement el = root.get(field); + if (el != null && !(el.isJsonNull())) { + + return el.getAsString(); + + } + return ""; + } + + /** + * Accepts status event as json string and stores it in hbase table + * + * @parameter record A string with json represantation of a status event + */ + @Override + public void writeRecord(String record) throws IOException { + + JsonParser jsonParser = new JsonParser(); + // parse the json root object + JsonObject jRoot = jsonParser.parse(record).getAsJsonObject(); + // Get fields + + String rep = this.report; + String tp = extractJson("type", jRoot); + String dt = extractJson("date", jRoot); + String eGroup = extractJson("endpoint_group", jRoot); + String service = extractJson("service", jRoot); + String hostname = extractJson("hostname", jRoot); + String metric = extractJson("metric", jRoot); + String status = extractJson("status", jRoot); + String prevStatus = extractJson("prev_status", jRoot); + String prevTs = extractJson("prev_ts", jRoot); + String tsm = extractJson("ts_monitored", jRoot); + String tsp = extractJson("ts_processed", jRoot); + + // Compile key + // Key is constructed based on + // report > metric_type > date(day) > endpoint group > service > + // hostname > metric + String key = rep + "|" + tp + "|" + dt + "|" + eGroup + "|" + service + "|" + hostname + "|" + metric + "|" + + tsm; + + // Prepare columns + Put put = new Put(Bytes.toBytes(key)); + put.addColumn(Bytes.toBytes("data"), Bytes.toBytes("report"), Bytes.toBytes(rep)); + put.addColumn(Bytes.toBytes("data"), Bytes.toBytes("type"), Bytes.toBytes(tp)); + put.addColumn(Bytes.toBytes("data"), Bytes.toBytes("endpoint_group"), Bytes.toBytes(eGroup)); + put.addColumn(Bytes.toBytes("data"), Bytes.toBytes("service"), Bytes.toBytes(service)); + put.addColumn(Bytes.toBytes("data"), Bytes.toBytes("hostname"), Bytes.toBytes(hostname)); + put.addColumn(Bytes.toBytes("data"), Bytes.toBytes("metric"), Bytes.toBytes(metric)); + put.addColumn(Bytes.toBytes("data"), Bytes.toBytes("status"), Bytes.toBytes(status)); + put.addColumn(Bytes.toBytes("data"), Bytes.toBytes("prev_status"), Bytes.toBytes(prevStatus)); + put.addColumn(Bytes.toBytes("data"), Bytes.toBytes("prev_ts"), Bytes.toBytes(prevTs)); + put.addColumn(Bytes.toBytes("data"), Bytes.toBytes("ts_monitored"), Bytes.toBytes(tsm)); + put.addColumn(Bytes.toBytes("data"), Bytes.toBytes("ts_processed"), Bytes.toBytes(tsp)); + + // Insert row in hbase + ht.put(put); + + } + + /** + * Closes hbase table and hbase connection + */ + @Override + public void close() throws IOException { + ht.close(); + connection.close(); + } + } + +} diff --git a/flink_jobs/old-models/stream_status/src/main/java/argo/streaming/ArgoMessagingClient.java b/flink_jobs/old-models/stream_status/src/main/java/argo/streaming/ArgoMessagingClient.java new file mode 100644 index 00000000..4e6e1527 --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/java/argo/streaming/ArgoMessagingClient.java @@ -0,0 +1,334 @@ +package argo.streaming; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.net.URI; +import java.net.URISyntaxException; +import java.security.KeyManagementException; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; + +import org.apache.http.HttpEntity; +import org.apache.http.HttpHost; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.conn.ssl.NoopHostnameVerifier; +import org.apache.http.conn.ssl.SSLConnectionSocketFactory; +import org.apache.http.conn.ssl.TrustSelfSignedStrategy; +import org.apache.http.entity.StringEntity; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClients; +import org.apache.http.ssl.SSLContextBuilder; +import org.mortbay.log.Log; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.gson.JsonArray; +import com.google.gson.JsonElement; +import com.google.gson.JsonParser; + +import org.apache.http.client.config.RequestConfig; +import org.apache.http.client.methods.CloseableHttpResponse; + +/** + * Simple http client for pulling and acknowledging messages from AMS service + * http API + */ +public class ArgoMessagingClient { + + static Logger LOG = LoggerFactory.getLogger(ArgoMessagingClient.class); + // Http Client for contanting AMS service + private CloseableHttpClient httpClient = null; + // AMS endpoint (hostname:port or hostname) + private String endpoint = null; + // AMS project (/v1/projects/{project}) + private String project = null; + // AMS token (?key={token}) + private String token = null; + // AMS subscription (/v1/projects/{project}/subscriptions/{sub}) + private String sub = null; + // protocol (https,http) + private String proto = null; + // numer of message to be pulled; + private String maxMessages = ""; + // ssl verify or not + private boolean verify = true; + // proxy + private URI proxy = null; + + // Utility inner class for holding list of messages and acknowledgements + private class MsgAck { + String[] msgs; + String[] ackIds; + + private MsgAck(String[] msgs, String[] ackIds) { + this.msgs = msgs; + this.ackIds = ackIds; + } + + } + + public ArgoMessagingClient() { + this.httpClient = HttpClients.createDefault(); + this.proto = "https"; + this.token = "token"; + this.endpoint = "localhost"; + this.project = "test_project"; + this.sub = "test_sub"; + this.maxMessages = "100"; + this.proxy = null; + } + + public ArgoMessagingClient(String method, String token, String endpoint, String project, String sub, int batch, + boolean verify) throws NoSuchAlgorithmException, KeyStoreException, KeyManagementException { + + this.proto = method; + this.token = token; + this.endpoint = endpoint; + this.project = project; + this.sub = sub; + this.maxMessages = String.valueOf(batch); + this.verify = verify; + + this.httpClient = buildHttpClient(); + + } + + /** + * Initializes Http Client (if not initialized during constructor) + * + * @return + */ + private CloseableHttpClient buildHttpClient() + throws NoSuchAlgorithmException, KeyStoreException, KeyManagementException { + if (this.verify) { + return this.httpClient = HttpClients.createDefault(); + } else { + return this.httpClient = HttpClients.custom().setSSLSocketFactory(selfSignedSSLF()).build(); + } + } + + /** + * Create an SSL Connection Socket Factory with a strategy to trust self signed + * certificates + */ + private SSLConnectionSocketFactory selfSignedSSLF() + throws NoSuchAlgorithmException, KeyStoreException, KeyManagementException { + SSLContextBuilder sslBuild = new SSLContextBuilder(); + sslBuild.loadTrustMaterial(null, new TrustSelfSignedStrategy()); + return new SSLConnectionSocketFactory(sslBuild.build(), NoopHostnameVerifier.INSTANCE); + } + + /** + * Set AMS http client to use http proxy + */ + public void setProxy(String proxyURL) throws URISyntaxException { + // parse proxy url + this.proxy = URI.create(proxyURL); + } + + /** + * Set AMS http client to NOT use an http proxy + */ + public void unsetProxy() { + this.proxy = null; + } + + /** + * Create a configuration for using http proxy on each request + */ + private RequestConfig createProxyCfg() { + HttpHost proxy = new HttpHost(this.proxy.getHost(), this.proxy.getPort(), this.proxy.getScheme()); + RequestConfig config = RequestConfig.custom().setProxy(proxy).build(); + return config; + } + + public void logIssue(CloseableHttpResponse resp) throws UnsupportedOperationException, IOException { + InputStreamReader isRdr = new InputStreamReader(resp.getEntity().getContent()); + BufferedReader bRdr = new BufferedReader(isRdr); + int statusCode = resp.getStatusLine().getStatusCode(); + + // Parse error content from api response + StringBuilder result = new StringBuilder(); + String rLine; + while ((rLine = bRdr.readLine()) != null) + result.append(rLine); + isRdr.close(); + Log.warn("ApiStatusCode={}, ApiErrorMessage={}", statusCode, result); + + } + + /** + * Properly compose url for each AMS request + */ + public String composeURL(String method) { + return proto + "://" + endpoint + "/v1/projects/" + project + "/subscriptions/" + sub + ":" + method + "?key=" + + token; + } + + /** + * Executes a pull request against AMS api + */ + public MsgAck doPull() throws IOException, KeyManagementException, NoSuchAlgorithmException, KeyStoreException { + + ArrayList msgList = new ArrayList(); + ArrayList ackIdList = new ArrayList(); + + // Create the http post to pull + HttpPost postPull = new HttpPost(this.composeURL("pull")); + StringEntity postBody = new StringEntity( + "{\"maxMessages\":\"" + this.maxMessages + "\",\"returnImmediately\":\"true\"}"); + postBody.setContentType("application/json"); + postPull.setEntity(postBody); + + if (this.httpClient == null) { + this.httpClient = buildHttpClient(); + } + + // check for proxy + if (this.proxy != null) { + postPull.setConfig(createProxyCfg()); + } + + CloseableHttpResponse response = this.httpClient.execute(postPull); + String msg = ""; + String ackId = ""; + StringBuilder result = new StringBuilder(); + + HttpEntity entity = response.getEntity(); + + int statusCode = response.getStatusLine().getStatusCode(); + + if (entity != null && statusCode == 200) { + + InputStreamReader isRdr = new InputStreamReader(entity.getContent()); + BufferedReader bRdr = new BufferedReader(isRdr); + + String rLine; + + while ((rLine = bRdr.readLine()) != null) + result.append(rLine); + + // Gather message from json + JsonParser jsonParser = new JsonParser(); + // parse the json root object + Log.info("response: {}", result.toString()); + JsonElement jRoot = jsonParser.parse(result.toString()); + + JsonArray jRec = jRoot.getAsJsonObject().get("receivedMessages").getAsJsonArray(); + + // if has elements + for (JsonElement jMsgItem : jRec) { + JsonElement jMsg = jMsgItem.getAsJsonObject().get("message"); + JsonElement jAckId = jMsgItem.getAsJsonObject().get("ackId"); + msg = jMsg.toString(); + ackId = jAckId.toString(); + msgList.add(msg); + ackIdList.add(ackId); + } + + isRdr.close(); + + } else { + + logIssue(response); + + } + + response.close(); + + String[] msgArr = msgList.toArray(new String[0]); + String[] ackIdArr = ackIdList.toArray(new String[0]); + + // Return a Message array + return new MsgAck(msgArr, ackIdArr); + + } + + /** + * Executes a combination of Pull & Ack requests against AMS api + */ + public String[] consume() throws KeyManagementException, NoSuchAlgorithmException, KeyStoreException { + String[] msgs = new String[0]; + // Try first to pull a message + try { + + MsgAck msgAck = doPull(); + // get last ackid + String ackId = ""; + if (msgAck.ackIds.length > 0) { + ackId = msgAck.ackIds[msgAck.ackIds.length - 1]; + } + + if (ackId != "") { + // Do an ack for the received message + String ackRes = doAck(ackId); + if (ackRes == "") { + Log.info("Message Acknowledged ackid:" + ackId); + msgs = msgAck.msgs; + + } else { + Log.warn("No acknowledment for ackid:" + ackId + "-" + ackRes); + } + } + } catch (IOException e) { + LOG.error(e.getMessage()); + } + return msgs; + + } + + /** + * Executes an Acknowledge request against AMS api + */ + public String doAck(String ackId) throws IOException { + + // Create the http post to ack + HttpPost postAck = new HttpPost(this.composeURL("acknowledge")); + StringEntity postBody = new StringEntity("{\"ackIds\":[" + ackId + "]}"); + postBody.setContentType("application/json"); + postAck.setEntity(postBody); + + // check for proxy + if (this.proxy != null) { + postAck.setConfig(createProxyCfg()); + } + + CloseableHttpResponse response = httpClient.execute(postAck); + String resMsg = ""; + StringBuilder result = new StringBuilder(); + + HttpEntity entity = response.getEntity(); + int status = response.getStatusLine().getStatusCode(); + + if (status != 200) { + + InputStreamReader isRdr = new InputStreamReader(entity.getContent()); + BufferedReader bRdr = new BufferedReader(isRdr); + + String rLine; + + while ((rLine = bRdr.readLine()) != null) + result.append(rLine); + + resMsg = result.toString(); + isRdr.close(); + + } else { + // Log any api errors + logIssue(response); + } + response.close(); + // Return a resposeMessage + return resMsg; + + } + + /** + * Close AMS http client + */ + public void close() throws IOException { + this.httpClient.close(); + } +} diff --git a/flink_jobs/old-models/stream_status/src/main/java/argo/streaming/ArgoMessagingSource.java b/flink_jobs/old-models/stream_status/src/main/java/argo/streaming/ArgoMessagingSource.java new file mode 100644 index 00000000..5def0a1b --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/java/argo/streaming/ArgoMessagingSource.java @@ -0,0 +1,136 @@ +package argo.streaming; + +import java.security.KeyManagementException; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; + +import org.apache.flink.configuration.Configuration; +import org.apache.flink.streaming.api.functions.source.RichSourceFunction; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Custom source to connect to AMS service. Uses ArgoMessaging client + */ +public class ArgoMessagingSource extends RichSourceFunction { + + private static final long serialVersionUID = 1L; + + // setup logger + static Logger LOG = LoggerFactory.getLogger(ArgoMessagingSource.class); + + private String endpoint = null; + private String port = null; + private String token = null; + private String project = null; + private String sub = null; + private int batch = 1; + private long interval = 100L; + private boolean verify = true; + private boolean useProxy = false; + private String proxyURL = ""; + private transient Object rateLck; // lock for waiting to establish rate + + private volatile boolean isRunning = true; + + private ArgoMessagingClient client = null; + + public ArgoMessagingSource(String endpoint, String port, String token, String project, String sub, int batch, Long interval) { + this.endpoint = endpoint; + this.port = port; + this.token = token; + this.project = project; + this.sub = sub; + this.interval = interval; + this.batch = batch; + this.verify = true; + + } + + /** + * Set verify to true or false. If set to false AMS client will be able to + * contact AMS endpoints that use self-signed certificates + */ + public void setVerify(boolean verify) { + this.verify = verify; + } + + /** + * Set proxy details for AMS client + */ + public void setProxy(String proxyURL) { + this.useProxy = true; + this.proxyURL = proxyURL; + } + + /** + * Unset proxy details for AMS client + */ + public void unsetProxy(String proxyURL) { + this.useProxy = false; + this.proxyURL = ""; + } + + @Override + public void cancel() { + isRunning = false; + + } + + @Override + public void run(SourceContext ctx) throws Exception { + // This is the main run logic + while (isRunning) { + String[] res = this.client.consume(); + if (res.length > 0) { + for (String msg : res) { + ctx.collect(msg); + } + + } + synchronized (rateLck) { + rateLck.wait(this.interval); + } + + } + + } + + /** + * AMS Source initialization + */ + @Override + public void open(Configuration parameters) throws Exception { + // init rate lock + rateLck = new Object(); + // init client + String fendpoint = this.endpoint; + if (this.port != null && !this.port.isEmpty()) { +// fendpoint = this.endpoint + ":" + port; + fendpoint = this.endpoint; + } + try { + client = new ArgoMessagingClient("https", this.token, fendpoint, this.project, this.sub, this.batch, this.verify); + if (this.useProxy) { + client.setProxy(this.proxyURL); + } + } catch (KeyManagementException e) { + e.printStackTrace(); + } catch (NoSuchAlgorithmException e) { + e.printStackTrace(); + } catch (KeyStoreException e) { + e.printStackTrace(); + } + } + + @Override + public void close() throws Exception { + if (this.client != null) { + client.close(); + } + synchronized (rateLck) { + rateLck.notify(); + } + } + +} diff --git a/flink_jobs/old-models/stream_status/src/main/java/argo/streaming/MongoStatusOutput.java b/flink_jobs/old-models/stream_status/src/main/java/argo/streaming/MongoStatusOutput.java new file mode 100644 index 00000000..2c0002da --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/java/argo/streaming/MongoStatusOutput.java @@ -0,0 +1,287 @@ +package argo.streaming; + +import java.io.IOException; + +import org.apache.flink.api.common.io.OutputFormat; +import org.apache.flink.configuration.Configuration; +import org.bson.Document; +import org.bson.conversions.Bson; + +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParser; +import com.mongodb.MongoClient; +import com.mongodb.MongoClientURI; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.model.Filters; +import com.mongodb.client.model.UpdateOptions; + +import status.StatusEvent; + + +/** + * MongoOutputFormat for storing status data to mongodb + */ +public class MongoStatusOutput implements OutputFormat { + + public enum MongoMethod { + INSERT, UPSERT + }; + + + private static final long serialVersionUID = 1L; + + private String mongoHost; + private int mongoPort; + private String dbName; + private String metricName; + private String endpointName; + private String serviceName; + private String egroupName; + private MongoMethod method; + private String report; + + private MongoClient mClient; + private MongoDatabase mDB; + private MongoCollection metricCol; + private MongoCollection endpointCol; + private MongoCollection serviceCol; + private MongoCollection egroupCol; + + // constructor + public MongoStatusOutput(String uri, String metricName,String serviceName, String endpointName, String egroupName, String method , String report) { + + if (method.equalsIgnoreCase("upsert")) { + this.method = MongoMethod.UPSERT; + } else { + this.method = MongoMethod.INSERT; + } + + + this.report = report; + + MongoClientURI mURI = new MongoClientURI(uri); + String[] hostParts = mURI.getHosts().get(0).split(":"); + String hostname = hostParts[0]; + int port = Integer.parseInt(hostParts[1]); + + this.mongoHost = hostname; + this.mongoPort = port; + this.dbName = mURI.getDatabase(); + this.metricName = metricName; + this.serviceName = serviceName; + this.endpointName = endpointName; + this.egroupName = egroupName; + } + + // constructor + public MongoStatusOutput(String host, int port, String db, String metricName,String serviceName, String endpointName, String egroupName, MongoMethod method, + String report) { + this.mongoHost = host; + this.mongoPort = port; + this.dbName = db; + this.metricName = metricName; + this.serviceName = serviceName; + this.endpointName = endpointName; + this.egroupName = egroupName; + this.method = method; + this.report = report; + } + + private void initMongo() { + this.mClient = new MongoClient(mongoHost, mongoPort); + this.mDB = mClient.getDatabase(dbName); + this.metricCol = mDB.getCollection(metricName); + this.endpointCol = mDB.getCollection(endpointName); + this.serviceCol = mDB.getCollection(serviceName); + this.egroupCol = mDB.getCollection(egroupName); + } + + /** + * Initialize MongoDB remote connection + */ + @Override + public void open(int taskNumber, int numTasks) throws IOException { + // Configure mongo + initMongo(); + } + + /** + * Prepare correct MongoDocument according to record values and selected StatusType. + * A different document is needed for storing Status Metric results than Endpoint, + * Service or Endpoint Group ones. + */ + private Document prepDoc(StatusEvent record) { + Document doc = new Document("report",this.report) + .append("endpoint_group", record.getGroup()); + + + if (record.getType().equalsIgnoreCase("service")) { + + doc.append("service",record.getService()); + + } else if (record.getType().equalsIgnoreCase("endpoint")) { + + doc.append("service", record.getService()) + .append("host", record.getHostname()); + + } else if (record.getType().equalsIgnoreCase("metric")) { + + doc.append("service", record.getService()) + .append("host", record.getHostname()) + .append("metric", record.getMetric()) + .append("message", record.getMessage()) + .append("summary", record.getSummary()) + .append("time_integer",record.getTimeInt()) + .append("previous_state",record.getPrevStatus()) + .append("previous_ts", record.getPrevTs()); + } + + + doc.append("status",record.getStatus()) + .append("timestamp",record.getTsMonitored()) + .append("date_integer",record.getDateInt()); + + return doc; + } + + /** + * Prepare correct Update filter according to record values and selected StatusType. + * A different update filter is needed for updating Status Metric results than Endpoint, + * Service or Endpoint Group ones. + */ + private Bson prepFilter(StatusEvent record) { + + if (record.getType().equalsIgnoreCase("metric")) { + + return Filters.and(Filters.eq("report", this.report), Filters.eq("date_integer", record.getDateInt()), + Filters.eq("endpoint_group", record.getGroup()), Filters.eq("service", record.getService()), + Filters.eq("host", record.getHostname()), Filters.eq("metric", record.getMetric()), + Filters.eq("timestamp", record.getTsMonitored())); + + } else if (record.getType().equalsIgnoreCase("endpoint")) { + + return Filters.and(Filters.eq("report", this.report), Filters.eq("date_integer", record.getDateInt()), + Filters.eq("endpoint_group", record.getGroup()), Filters.eq("service", record.getService()), + Filters.eq("host", record.getHostname()), Filters.eq("timestamp", record.getTsMonitored())); + + } else if (record.getType().equalsIgnoreCase("service")) { + + return Filters.and(Filters.eq("report", this.report), Filters.eq("date_integer", record.getDateInt()), + Filters.eq("endpoint_group", record.getGroup()), Filters.eq("service", record.getService()), + Filters.eq("timestamp", record.getTsMonitored())); + + } else if (record.getType().equalsIgnoreCase("endpoint_group")) { + + return Filters.and(Filters.eq("report", this.report), Filters.eq("date_integer", record.getDateInt()), + Filters.eq("endpoint_group", record.getGroup()), Filters.eq("timestamp", record.getTsMonitored())); + + } + + return null; + + + } + + /** + * Extract json representation as string to be used as a field value + */ + private String extractJson(String field, JsonObject root) { + JsonElement el = root.get(field); + if (el != null && !(el.isJsonNull())) { + + return el.getAsString(); + + } + return ""; + } + + public StatusEvent jsonToStatusEvent(JsonObject jRoot) { + + String rep = this.report; + String tp = extractJson("type", jRoot); + String dt = extractJson("date", jRoot); + String eGroup = extractJson("endpoint_group", jRoot); + String service = extractJson("service", jRoot); + String hostname = extractJson("hostname", jRoot); + String metric = extractJson("metric", jRoot); + String status = extractJson("status", jRoot); + String prevStatus = extractJson("prev_status", jRoot); + String prevTs = extractJson("prev_ts", jRoot); + String tsm = extractJson("ts_monitored", jRoot); + String tsp = extractJson("ts_processed", jRoot); + String monHost = extractJson("monitor_host",jRoot); + String repeat = extractJson("repeat",jRoot); + String message = extractJson("message",jRoot); + String summary = extractJson("summary",jRoot); + String url=extractJson("url", jRoot); + return new StatusEvent(rep,tp,dt,eGroup,service,hostname,metric,status,monHost,tsm,tsp,prevStatus,prevTs,repeat,summary,message, url); + } + + /** + * Store a MongoDB document record + */ + @Override + public void writeRecord(String recordJSON) throws IOException { + + JsonParser jsonParser = new JsonParser(); + // parse the json root object + JsonObject jRoot = jsonParser.parse(recordJSON).getAsJsonObject(); + StatusEvent record = jsonToStatusEvent(jRoot); + + + // Mongo Document to be prepared according to StatusType of input + Document doc = prepDoc(record); + + if (this.method == MongoMethod.UPSERT) { + + // Filter for upsert to be prepared according to StatusType of input + Bson f = prepFilter(record); + UpdateOptions opts = new UpdateOptions().upsert(true); + if (record.getType().equalsIgnoreCase("metric")) { + metricCol.replaceOne(f, doc, opts); + } else if (record.getType().equalsIgnoreCase("endpoint")) { + endpointCol.replaceOne(f, doc, opts); + } else if (record.getType().equalsIgnoreCase("service")) { + serviceCol.replaceOne(f, doc, opts); + } else if (record.getType().equalsIgnoreCase("endpoint_group")) { + egroupCol.replaceOne(f, doc, opts); + } + + } else { + if (record.getType().equalsIgnoreCase("metric")) { + metricCol.insertOne(doc); + } else if (record.getType().equalsIgnoreCase("endpoint")) { + endpointCol.insertOne(doc); + } else if (record.getType().equalsIgnoreCase("service")) { + serviceCol.insertOne(doc); + } else if (record.getType().equalsIgnoreCase("endpoint_group")) { + egroupCol.insertOne(doc); + } + } + } + + /** + * Close MongoDB Connection + */ + @Override + public void close() throws IOException { + if (mClient != null) { + mClient.close(); + mClient = null; + mDB = null; + metricCol = null; + endpointCol = null; + serviceCol = null; + egroupCol = null; + } + } + + @Override + public void configure(Configuration arg0) { + // configure + + } + +} \ No newline at end of file diff --git a/flink_jobs/old-models/stream_status/src/main/java/argo/streaming/StatusConfig.java b/flink_jobs/old-models/stream_status/src/main/java/argo/streaming/StatusConfig.java new file mode 100644 index 00000000..ee320155 --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/java/argo/streaming/StatusConfig.java @@ -0,0 +1,82 @@ +package argo.streaming; + +import java.io.Serializable; + +import org.apache.flink.api.java.utils.ParameterTool; + +public class StatusConfig implements Serializable { + + /** + * + */ + private static final long serialVersionUID = 1L; + public String runDate; + + // Ams parameters + public String amsHost; + public String amsPort; + public String amsToken; + public String amsProject; + public String amsSub; + + // Avro schema + public String avroSchema; + + public String report; + + // Sync files + public String aps; + public String mps; + public String egp; + public String ops; + public String downtime; + // Parameter used in alert timeouts for notifications + public long timeout; + // Parameter used for daily event generation (not used in notifications) + public boolean daily; + // Parameter used to initialize a status to a default value (OK optimistically, MISSING pessimistically) + public String initStatus; + + // Raw parameters + public final ParameterTool pt; + + public StatusConfig(ParameterTool pt){ + this.pt = pt; + this.amsHost = pt.getRequired("ams.endpoint"); + this.amsPort = pt.getRequired("ams.port"); + this.amsToken = pt.getRequired("ams.token"); + this.amsProject = pt.getRequired("ams.project"); + + this.aps = pt.get("sync.aps"); + this.mps = pt.get("sync.mps"); + this.egp = pt.get("sync.egp"); + this.ops = pt.get("sync.ops"); + this.runDate = pt.get("run.date"); + this.downtime = pt.get("sync.downtimes"); + this.report = pt.get("report"); + // Optional timeout parameter + if (pt.has("timeout")){ + this.timeout = pt.getLong("timeout"); + } else { + this.timeout = 86400000L; + } + + // optional cli parameter to configure default status + if (pt.has("init.status")) { + this.initStatus = pt.get("init.status"); + } else { + // by default, default initial status should be optimistically OK + this.initStatus = "OK"; + } + + // Optional set daily parameter + this.daily = pt.getBoolean("daily",false); + + } + + public ParameterTool getParameters(){ + return this.pt; + } + + +} diff --git a/flink_jobs/old-models/stream_status/src/main/java/argo/streaming/SyncData.java b/flink_jobs/old-models/stream_status/src/main/java/argo/streaming/SyncData.java new file mode 100644 index 00000000..9cedbaad --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/java/argo/streaming/SyncData.java @@ -0,0 +1,379 @@ +package argo.streaming; + +import java.io.BufferedInputStream; +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; + +import org.apache.avro.Schema; +import org.apache.avro.file.DataFileStream; +import org.apache.avro.generic.GenericData; +import org.apache.avro.generic.GenericDatumReader; +import org.apache.avro.generic.GenericRecord; +import org.apache.avro.io.DatumReader; +import org.apache.avro.specific.SpecificData; +import org.apache.avro.specific.SpecificDatumReader; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; + +import argo.avro.Downtime; +import argo.avro.GroupEndpoint; +import argo.avro.GroupGroup; +import argo.avro.MetricData; +import argo.avro.MetricProfile; +import argo.avro.Weight; + +/** + * Utility class that can target text or connector avro encoded files (metric data, + * metric profiles, topology, weights, downtimes etc). + */ +public class SyncData { + + /** + * Convert a string url (mostly hdfs://) to URI object + */ + private URI toURI(String url) throws URISyntaxException { + + return new URI(url); + + } + + /** + * Utility class that can target text or connector avro encoded files (metric data, + * metric profiles, topology, weights, downtimes etc). + */ + public String readText(String url) throws URISyntaxException, IOException { + + URI uri; + + uri = toURI(url); + + String proto = uri.getScheme(); + if (proto.equalsIgnoreCase("hdfs")) { + BufferedReader bfr = bfrHDFS(uri); + return readTextFile(bfr); + + } + + return ""; + + } + + /** + * Read a plain text file + */ + public String readTextFile(BufferedReader bfr) throws IOException { + String line = null; + String full = ""; + while ((line = bfr.readLine()) != null) { + full = full + line; + } + + return full; + } + + /** + * Read a list of GroupEndpoint Avro Objects from url + */ + public ArrayList readGroupEndpoint(String url) throws URISyntaxException, IOException { + URI uri; + + uri = toURI(url); + + String proto = uri.getScheme(); + if (proto.equalsIgnoreCase("hdfs")) { + BufferedInputStream bis = bisHDFS(uri); + return readGroupEndpointFile(bis); + + } + + return new ArrayList(); + } + + /** + * Read a list of GroupEndpoint Avro Objects from an InputStream + */ + public ArrayList readGroupEndpointFile(BufferedInputStream bis) throws IOException { + DatumReader datumReader = new SpecificDatumReader(GroupEndpoint.getClassSchema(),GroupEndpoint.getClassSchema(),new SpecificData()); + DataFileStream dataFileStream = new DataFileStream(bis, datumReader); + + ArrayList list = new ArrayList(); + + while (dataFileStream.hasNext()) { + // read the row + GroupEndpoint cur = dataFileStream.next(); + list.add(cur); + } + + dataFileStream.close(); + + return list; + } + + /** + * Read a list of GroupGroup Avro Objects from a url + */ + public ArrayList readGroupGroup(String url) throws URISyntaxException, IOException { + URI uri; + + uri = toURI(url); + + String proto = uri.getScheme(); + if (proto.equalsIgnoreCase("hdfs")) { + BufferedInputStream bis = bisHDFS(uri); + return readGroupGroupFile(bis); + + } + + return new ArrayList(); + } + + /** + * Read a list of GroupGroup Avro Objects from an input stream + */ + public ArrayList readGroupGroupFile(BufferedInputStream bis) throws IOException { + DatumReader datumReader = new SpecificDatumReader(GroupGroup.getClassSchema(),GroupGroup.getClassSchema(),new SpecificData()); + DataFileStream dataFileStream = new DataFileStream(bis, datumReader); + + ArrayList list = new ArrayList(); + + while (dataFileStream.hasNext()) { + // read the row + GroupGroup cur = dataFileStream.next(); + list.add(cur); + } + + dataFileStream.close(); + + return list; + } + + /** + * Read a list of Downtime Avro Objects from a url + */ + public ArrayList readDowntime(String url) throws URISyntaxException, IOException { + URI uri; + + uri = toURI(url); + + String proto = uri.getScheme(); + if (proto.equalsIgnoreCase("hdfs")) { + BufferedInputStream bis = bisHDFS(uri); + return readDowntimeFile(bis); + + } + + return new ArrayList(); + } + + /** + * Read a list of Downtime Avro Objects from an input stream + */ + public ArrayList readDowntimeFile(BufferedInputStream bis) throws IOException { + DatumReader datumReader = new SpecificDatumReader(Downtime.getClassSchema(),Downtime.getClassSchema(),new SpecificData()); + DataFileStream dataFileStream = new DataFileStream(bis, datumReader); + + ArrayList list = new ArrayList(); + + while (dataFileStream.hasNext()) { + // read the row + Downtime cur = dataFileStream.next(); + list.add(cur); + } + + dataFileStream.close(); + + return list; + } + + /** + * Read a list of MetricProfile Avro Objects from a url + */ + public ArrayList readMetricProfile(String url) throws URISyntaxException, IOException { + URI uri; + + uri = toURI(url); + + String proto = uri.getScheme(); + if (proto.equalsIgnoreCase("hdfs")) { + BufferedInputStream bis = bisHDFS(uri); + return readMetricProfileFile(bis); + + } + + return new ArrayList(); + } + + /** + * Read a list of MetricProfile Avro Objects from an input stream + */ + public ArrayList readMetricProfileFile(BufferedInputStream bis) throws IOException { + DatumReader datumReader = new SpecificDatumReader(MetricProfile.getClassSchema(),MetricProfile.getClassSchema(),new SpecificData()); + DataFileStream dataFileStream = new DataFileStream(bis, datumReader); + + + + ArrayList list = new ArrayList(); + + while (dataFileStream.hasNext()) { + // read the row + MetricProfile cur = dataFileStream.next(); + list.add(cur); + } + + dataFileStream.close(); + + return list; + } + + /** + * Read a list of MetricData Avro Objects from a url + */ + public ArrayList readMetricData(String url) throws URISyntaxException, IOException { + URI uri; + + uri = toURI(url); + + String proto = uri.getScheme(); + if (proto.equalsIgnoreCase("hdfs")) { + BufferedInputStream bis = bisHDFS(uri); + return readMetricDataFile(bis); + + } + + return new ArrayList(); + } + + /** + * Read a list of MetricData Avro Objects from an input stream + */ + public ArrayList readMetricDataFile(BufferedInputStream bis) throws IOException { + DatumReader datumReader = new SpecificDatumReader(MetricData.getClassSchema(),MetricData.getClassSchema(),new SpecificData()); + DataFileStream dataFileStream = new DataFileStream(bis, datumReader); + + ArrayList list = new ArrayList(); + + while (dataFileStream.hasNext()) { + // read the row + MetricData cur = dataFileStream.next(); + list.add(cur); + } + + dataFileStream.close(); + + return list; + } + + /** + * Read a list of Weight Avro Objects from a url + */ + public ArrayList readWeight(String url) throws URISyntaxException, IOException { + URI uri; + + uri = toURI(url); + + String proto = uri.getScheme(); + if (proto.equalsIgnoreCase("hdfs")) { + BufferedInputStream bis = bisHDFS(uri); + return readWeightFile(bis); + + } + + return new ArrayList(); + } + + /** + * Read a list of Weight Avro Objects from an input stream + */ + public ArrayList readWeightFile(BufferedInputStream bis) throws IOException { + DatumReader datumReader = new SpecificDatumReader(Weight.getClassSchema(),Weight.getClassSchema(),new SpecificData()); + DataFileStream dataFileStream = new DataFileStream(bis, datumReader); + + ArrayList list = new ArrayList(); + + while (dataFileStream.hasNext()) { + // read the row + Weight cur = dataFileStream.next(); + list.add(cur); + } + + dataFileStream.close(); + + return list; + } + + /** + * Read a list of GenericRecord Avro Objects from a url + */ + public String readGenericAvro(String url) throws URISyntaxException, IOException { + URI uri; + + uri = toURI(url); + + String proto = uri.getScheme(); + if (proto.equalsIgnoreCase("hdfs")) { + BufferedInputStream bis = bisHDFS(uri); + return readGenericAvroFile(bis); + + } + + return ""; + } + + /** + * Read a list of GenericRecord Avro Objects from an input stream + */ + public String readGenericAvroFile(BufferedInputStream bis) throws IOException { + + DatumReader datumReader = new GenericDatumReader(); + DataFileStream dataFileStream = new DataFileStream(bis, datumReader); + Schema avroSchema = dataFileStream.getSchema(); + + GenericRecord avroRow = new GenericData.Record(avroSchema); + + String str = ""; + + while (dataFileStream.hasNext()) { + // read the row + avroRow = dataFileStream.next(avroRow); + str = str + avroRow.toString(); + } + + dataFileStream.close(); + + return str; + } + + /** + * Create a buffered reader from hdfs uri + */ + public BufferedReader bfrHDFS(URI uri) throws IOException { + Configuration conf = new Configuration(); + conf.set("fs.defaultFS", uri.getScheme() + "://" + uri.getHost() + ":" + uri.getPort()); + FileSystem fs; + + fs = FileSystem.get(conf); + BufferedReader bfr = new BufferedReader(new InputStreamReader(fs.open(new Path(uri.getPath())))); + return bfr; + + } + + /** + * Create a buffered input stream from hdfs uri + */ + public BufferedInputStream bisHDFS(URI uri) throws IOException { + Configuration conf = new Configuration(); + conf.set("fs.defaultFS", uri.getScheme() + "://" + uri.getHost() + ":" + uri.getPort()); + FileSystem fs; + + fs = FileSystem.get(conf); + BufferedInputStream bis = new BufferedInputStream(fs.open(new Path(uri.getPath()))); + return bis; + + } + +} diff --git a/flink_jobs/old-models/stream_status/src/main/java/argo/streaming/SyncParse.java b/flink_jobs/old-models/stream_status/src/main/java/argo/streaming/SyncParse.java new file mode 100644 index 00000000..ff2726c8 --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/java/argo/streaming/SyncParse.java @@ -0,0 +1,100 @@ +package argo.streaming; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; +import java.util.Map.Entry; + +import org.apache.avro.io.BinaryDecoder; +import org.apache.avro.io.DatumReader; +import org.apache.avro.io.DecoderFactory; +import org.apache.avro.specific.SpecificData; +import org.apache.avro.specific.SpecificDatumReader; + +import com.google.gson.JsonElement; + +import argo.avro.Downtime; +import argo.avro.GroupEndpoint; +import argo.avro.MetricProfile; + + +/** + * SyncParse is a utility class providing methods to parse specific connector data in avro format + */ +public class SyncParse { + + /** + * Parses a byte array and decodes avro GroupEndpoint objects + */ + public static ArrayList parseGroupEndpoint(byte[] avroBytes) throws IOException{ + + ArrayList result = new ArrayList(); + + DatumReader avroReader = new SpecificDatumReader(GroupEndpoint.getClassSchema(),GroupEndpoint.getClassSchema(),new SpecificData()); + BinaryDecoder decoder = DecoderFactory.get().binaryDecoder(avroBytes, null); + + while (!decoder.isEnd()){ + GroupEndpoint cur = avroReader.read(null, decoder); + result.add(cur); + } + + return result; + } + + /** + * Parses a byte array and decodes avro MetricProfile objects + */ + public static ArrayList parseMetricProfile(byte[] avroBytes) throws IOException{ + + ArrayList result = new ArrayList(); + + DatumReader avroReader = new SpecificDatumReader(MetricProfile.getClassSchema(),MetricProfile.getClassSchema(),new SpecificData()); + BinaryDecoder decoder = DecoderFactory.get().binaryDecoder(avroBytes, null); + + while (!decoder.isEnd()){ + MetricProfile cur = avroReader.read(null, decoder); + result.add(cur); + } + + return result; + } + + /** + * Parses a byte array and decodes avro Downtime objects + */ + public static ArrayList parseDowntimes(byte[] avroBytes) throws IOException{ + + ArrayList result = new ArrayList(); + + DatumReader avroReader = new SpecificDatumReader(Downtime.getClassSchema(),Downtime.getClassSchema(),new SpecificData()); + BinaryDecoder decoder = DecoderFactory.get().binaryDecoder(avroBytes, null); + + while (!decoder.isEnd()){ + Downtime cur = avroReader.read(null, decoder); + result.add(cur); + } + + return result; + } + + /** + * Parses attributes from a json attribute element + */ + public static Map parseAttributes(JsonElement jAttr) throws IOException{ + + Map result = new HashMap(); + if (jAttr!=null){ + Set> jItems = jAttr.getAsJsonObject().entrySet(); + + for (Entry jItem : jItems){ + result.put(jItem.getKey(), jItem.getValue().getAsString()); + } + } + + return result; + } + + +} diff --git a/flink_jobs/old-models/stream_status/src/main/java/ops/ConfigManager.java b/flink_jobs/old-models/stream_status/src/main/java/ops/ConfigManager.java new file mode 100644 index 00000000..511529fe --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/java/ops/ConfigManager.java @@ -0,0 +1,188 @@ +package ops; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileReader; +import java.io.IOException; +import java.util.List; +import java.util.TreeMap; + +import org.apache.commons.io.IOUtils; +import org.apache.log4j.Logger; + +import com.google.gson.JsonArray; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParseException; +import com.google.gson.JsonParser; + + +public class ConfigManager { + + private static final Logger LOG = Logger.getLogger(ConfigManager.class.getName()); + + public String id; // report uuid reference + public String report; + public String tenant; + public String egroup; // endpoint group + public String ggroup; // group of groups + public String weight; // weight factor type + public TreeMap egroupTags; + public TreeMap ggroupTags; + public TreeMap mdataTags; + + public ConfigManager() { + this.report = null; + this.id = null; + this.tenant = null; + this.egroup = null; + this.ggroup = null; + this.weight = null; + this.egroupTags = new TreeMap(); + this.ggroupTags = new TreeMap(); + this.mdataTags = new TreeMap(); + + } + + public void clear() { + this.id = null; + this.report = null; + this.tenant = null; + this.egroup = null; + this.ggroup = null; + this.weight = null; + this.egroupTags.clear(); + this.ggroupTags.clear(); + this.mdataTags.clear(); + + } + + public String getReportID() { + return id; + } + + public String getReport() { + return report; + } + + public String getTenant() { + return tenant; + } + + + public String getEgroup() { + return egroup; + } + + public void loadJson(File jsonFile) throws IOException { + // Clear data + this.clear(); + + BufferedReader br = null; + try { + br = new BufferedReader(new FileReader(jsonFile)); + + JsonParser jsonParser = new JsonParser(); + JsonElement jElement = jsonParser.parse(br); + JsonObject jObj = jElement.getAsJsonObject(); + // Get the simple fields + this.id = jObj.get("id").getAsString(); + this.tenant = jObj.get("tenant").getAsString(); + this.report = jObj.get("info").getAsJsonObject().get("name").getAsString(); + + // get topology schema names + JsonObject topoGroup = jObj.get("topology_schema").getAsJsonObject().getAsJsonObject("group"); + this.ggroup = topoGroup.get("type").getAsString(); + this.egroup = topoGroup.get("group").getAsJsonObject().get("type").getAsString(); + + this.weight = jObj.get("weight").getAsString(); + // Get compound fields + JsonArray jTags = jObj.getAsJsonArray("filter_tags"); + + // Iterate tags + if (jTags != null) { + for (JsonElement tag : jTags) { + JsonObject jTag = tag.getAsJsonObject(); + String name = jTag.get("name").getAsString(); + String value = jTag.get("value").getAsString(); + String ctx = jTag.get("context").getAsString(); + if (ctx.equalsIgnoreCase("group_of_groups")){ + this.ggroupTags.put(name, value); + } else if (ctx.equalsIgnoreCase("endpoint_groups")){ + this.egroupTags.put(name, value); + } else if (ctx.equalsIgnoreCase("metric_data")) { + this.mdataTags.put(name, value); + } + + } + } + + + } catch (FileNotFoundException ex) { + LOG.error("Could not open file:" + jsonFile.getName()); + throw ex; + + } catch (JsonParseException ex) { + LOG.error("File is not valid json:" + jsonFile.getName()); + throw ex; + } finally { + // Close quietly without exceptions the buffered reader + IOUtils.closeQuietly(br); + } + + } + + + /** + * Loads Report config information from a config json string + * + */ + public void loadJsonString(List confJson) throws JsonParseException { + // Clear data + this.clear(); + + try { + + JsonParser jsonParser = new JsonParser(); + // Grab the first - and only line of json from ops data + JsonElement jElement = jsonParser.parse(confJson.get(0)); + JsonObject jObj = jElement.getAsJsonObject(); + // Get the simple fields + this.id = jObj.get("id").getAsString(); + this.tenant = jObj.get("tenant").getAsString(); + this.report = jObj.get("info").getAsJsonObject().get("name").getAsString(); + // get topology schema names + JsonObject topoGroup = jObj.get("topology_schema").getAsJsonObject().getAsJsonObject("group"); + this.ggroup = topoGroup.get("type").getAsString(); + this.egroup = topoGroup.get("group").getAsJsonObject().get("type").getAsString(); + this.weight = jObj.get("weight").getAsString(); + // Get compound fields + JsonArray jTags = jObj.getAsJsonArray("tags"); + + // Iterate tags + if (jTags != null) { + for (JsonElement tag : jTags) { + JsonObject jTag = tag.getAsJsonObject(); + String name = jTag.get("name").getAsString(); + String value = jTag.get("value").getAsString(); + String ctx = jTag.get("context").getAsString(); + if (ctx.equalsIgnoreCase("group_of_groups")){ + this.ggroupTags.put(name, value); + } else if (ctx.equalsIgnoreCase("endpoint_groups")){ + this.egroupTags.put(name, value); + } else if (ctx.equalsIgnoreCase("metric_data")) { + this.mdataTags.put(name, value); + } + + } + } + + } catch (JsonParseException ex) { + LOG.error("Not valid json contents"); + throw ex; + } + + } + +} diff --git a/flink_jobs/old-models/stream_status/src/main/java/ops/OpsManager.java b/flink_jobs/old-models/stream_status/src/main/java/ops/OpsManager.java new file mode 100644 index 00000000..fded09ee --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/java/ops/OpsManager.java @@ -0,0 +1,312 @@ +package ops; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileReader; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map.Entry; + +import org.apache.commons.io.IOUtils; +import org.apache.log4j.Logger; + +import com.google.gson.JsonArray; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParseException; +import com.google.gson.JsonParser; + +public class OpsManager { + + private static final Logger LOG = Logger.getLogger(OpsManager.class.getName()); + + private HashMap states; + private HashMap ops; + private ArrayList revStates; + private ArrayList revOps; + + private int[][][] truthTable; + + private String defaultDownState; + private String defaultMissingState; + private String defaultUnknownState; + + private boolean order; + + public OpsManager() { + this.states = new HashMap(); + this.ops = new HashMap(); + this.revStates = new ArrayList(); + this.revOps = new ArrayList(); + + this.truthTable = null; + + this.order = false; + + } + + public OpsManager(boolean _order) { + this.states = new HashMap(); + this.ops = new HashMap(); + this.revStates = new ArrayList(); + this.revOps = new ArrayList(); + this.order = _order; + + this.truthTable = null; + } + + public String getDefaultDown() { + return this.defaultDownState; + } + + public String getDefaultUnknown() { + return this.defaultUnknownState; + } + + public int getDefaultUnknownInt() { + return this.getIntStatus(this.defaultUnknownState); + } + + public int getDefaultDownInt() { + return this.getIntStatus(this.defaultDownState); + } + + public String getDefaultMissing() { + return this.defaultMissingState; + } + + public int getDefaultMissingInt() { + return this.getIntStatus(this.defaultMissingState); + } + + public void clear() { + this.states = new HashMap(); + this.ops = new HashMap(); + this.revStates = new ArrayList(); + this.revOps = new ArrayList(); + + this.truthTable = null; + } + + public int opInt(int op, int a, int b) { + int result = -1; + try { + result = this.truthTable[op][a][b]; + } catch (IndexOutOfBoundsException ex) { + LOG.info(ex); + result = -1; + } + + return result; + } + + public int opInt(String op, String a, String b) { + + int opInt = this.ops.get(op); + int aInt = this.states.get(a); + int bInt = this.states.get(b); + + return this.truthTable[opInt][aInt][bInt]; + } + + public String op(int op, int a, int b) { + return this.revStates.get(this.truthTable[op][a][b]); + } + + public String op(String op, String a, String b) { + int opInt = this.ops.get(op); + int aInt = this.states.get(a); + int bInt = this.states.get(b); + + return this.revStates.get(this.truthTable[opInt][aInt][bInt]); + } + + public String getStrStatus(int status) { + return this.revStates.get(status); + } + + public int getIntStatus(String status) { + return this.states.get(status); + } + + public String getStrOperation(int op) { + return this.revOps.get(op); + } + + public int getIntOperation(String op) { + return this.ops.get(op); + } + + public ArrayList availableStates() { + + return this.revStates; + } + + public ArrayList availableOps() { + return this.revOps; + } + + public void loadJson(File jsonFile) throws IOException { + // Clear data + this.clear(); + + BufferedReader br = null; + try { + br = new BufferedReader(new FileReader(jsonFile)); + + JsonParser json_parser = new JsonParser(); + JsonElement j_element = json_parser.parse(br); + JsonObject j_obj = j_element.getAsJsonObject(); + JsonArray j_states = j_obj.getAsJsonArray("available_states"); + JsonArray j_ops = j_obj.getAsJsonArray("operations"); + this.defaultMissingState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("missing").getAsString(); + this.defaultDownState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("down").getAsString(); + this.defaultUnknownState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("unknown").getAsString(); + // Collect the available states + for (int i = 0; i < j_states.size(); i++) { + this.states.put(j_states.get(i).getAsString(), i); + this.revStates.add(j_states.get(i).getAsString()); + + } + + // Collect the available operations + int i = 0; + for (JsonElement item : j_ops) { + JsonObject jObjItem = item.getAsJsonObject(); + this.ops.put(jObjItem.getAsJsonPrimitive("name").getAsString(), i); + this.revOps.add(jObjItem.getAsJsonPrimitive("name").getAsString()); + i++; + } + // Initialize the truthtable + int num_ops = this.revOps.size(); + int num_states = this.revStates.size(); + this.truthTable = new int[num_ops][num_states][num_states]; + + for (int[][] surface : this.truthTable) { + for (int[] line : surface) { + Arrays.fill(line, -1); + } + } + + // Fill the truth table + for (JsonElement item : j_ops) { + JsonObject jObjItem = item.getAsJsonObject(); + String opname = jObjItem.getAsJsonPrimitive("name").getAsString(); + JsonArray tops = jObjItem.getAsJsonArray("truth_table"); + // System.out.println(tops); + + for (int j = 0; j < tops.size(); j++) { + // System.out.println(opname); + JsonObject row = tops.get(j).getAsJsonObject(); + + int a_val = this.states.get(row.getAsJsonPrimitive("a").getAsString()); + int b_val = this.states.get(row.getAsJsonPrimitive("b").getAsString()); + int x_val = this.states.get(row.getAsJsonPrimitive("x").getAsString()); + int op_val = this.ops.get(opname); + + // Fill in truth table + // Check if order sensitivity is off so to insert two truth + // values + // ...[a][b] and [b][a] + this.truthTable[op_val][a_val][b_val] = x_val; + if (!this.order) { + this.truthTable[op_val][b_val][a_val] = x_val; + } + } + } + } catch (FileNotFoundException ex) { + LOG.error("Could not open file:" + jsonFile.getName()); + throw ex; + + } catch (JsonParseException ex) { + LOG.error("File is not valid json:" + jsonFile.getName()); + throw ex; + } finally { + // Close quietly without exceptions the buffered reader + IOUtils.closeQuietly(br); + } + + } + + public void loadJsonString(String opsJson) throws JsonParseException { + // Clear data + this.clear(); + + try { + + + JsonParser json_parser = new JsonParser(); + // Grab the first - and only line of json from ops data + JsonElement j_element = json_parser.parse(opsJson); + JsonObject j_obj = j_element.getAsJsonObject(); + JsonArray j_states = j_obj.getAsJsonArray("available_states"); + JsonArray j_ops = j_obj.getAsJsonArray("operations"); + this.defaultMissingState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("missing").getAsString(); + this.defaultDownState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("down").getAsString(); + this.defaultUnknownState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("unknown").getAsString(); + // Collect the available states + for (int i = 0; i < j_states.size(); i++) { + this.states.put(j_states.get(i).getAsString(), i); + this.revStates.add(j_states.get(i).getAsString()); + + } + + // Collect the available operations + int i = 0; + for (JsonElement item : j_ops) { + JsonObject jObjItem = item.getAsJsonObject(); + this.ops.put(jObjItem.getAsJsonPrimitive("name").getAsString(), i); + this.revOps.add(jObjItem.getAsJsonPrimitive("name").getAsString()); + i++; + } + // Initialize the truthtable + int num_ops = this.revOps.size(); + int num_states = this.revStates.size(); + this.truthTable = new int[num_ops][num_states][num_states]; + + for (int[][] surface : this.truthTable) { + for (int[] line : surface) { + Arrays.fill(line, -1); + } + } + + // Fill the truth table + for (JsonElement item : j_ops) { + JsonObject jObjItem = item.getAsJsonObject(); + String opname = jObjItem.getAsJsonPrimitive("name").getAsString(); + JsonArray tops = jObjItem.getAsJsonArray("truth_table"); + // System.out.println(tops); + + for (int j = 0; j < tops.size(); j++) { + // System.out.println(opname); + JsonObject row = tops.get(j).getAsJsonObject(); + + int a_val = this.states.get(row.getAsJsonPrimitive("a").getAsString()); + int b_val = this.states.get(row.getAsJsonPrimitive("b").getAsString()); + int x_val = this.states.get(row.getAsJsonPrimitive("x").getAsString()); + int op_val = this.ops.get(opname); + + // Fill in truth table + // Check if order sensitivity is off so to insert two truth + // values + // ...[a][b] and [b][a] + this.truthTable[op_val][a_val][b_val] = x_val; + if (!this.order) { + this.truthTable[op_val][b_val][a_val] = x_val; + } + } + } + + } catch (JsonParseException ex) { + LOG.error("Not valid json contents"); + throw ex; + } + + } + + +} diff --git a/flink_jobs/old-models/stream_status/src/main/java/status/StatusEvent.java b/flink_jobs/old-models/stream_status/src/main/java/status/StatusEvent.java new file mode 100644 index 00000000..2be08936 --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/java/status/StatusEvent.java @@ -0,0 +1,271 @@ +package status; + +import com.google.gson.annotations.SerializedName; + +public class StatusEvent { + + private String report; + private String type; + private @SerializedName("date") + String dt; + private @SerializedName("endpoint_group") + String group; + private String service; + private String hostname; + private String metric; + private @SerializedName("monitoring_host") + String monHost; + private @SerializedName("ts_monitored") + String tsMonitored; + private @SerializedName("ts_processed") + String tsProcessed; + private String repeat; + private String summary; + private String message; + private String url; + + private String status; + private @SerializedName("prev_status") + String prevStatus; + private @SerializedName("prev_ts") + String prevTs; + + // Record status changes from other layers + // Arrays include 4 store elements in the following order [status, previous_status, timestamp, previous_timestamp] + private @SerializedName("status_egroup") + String statusEgroup[]; + private @SerializedName("status_service") + String statusService[]; + private @SerializedName("status_endpoint") + String statusEndpoint[]; + private @SerializedName("status_metric") + String statusMetric[]; + + public StatusEvent() { + this.report = ""; + this.type = ""; + this.group = ""; + this.dt = ""; + this.service = ""; + this.hostname = ""; + this.metric = ""; + this.status = ""; + this.monHost = ""; + this.tsMonitored = ""; + this.tsProcessed = ""; + this.prevStatus = ""; + this.prevTs = ""; + this.repeat = ""; + this.summary = ""; + this.message = ""; + this.url = ""; + this.statusEgroup = new String[0]; + this.statusService = new String[0]; + this.statusEndpoint = new String[0]; + this.statusMetric = new String[0]; + + } + + public StatusEvent(String report, String type, String dt, String group, String service, String hostname, String metric, String status, String monHost, String tsMonitored, String tsProcessed, String prevStatus, String prevTs, String repeat, String summary, String message, String url) { + this.report = report; + this.type = type; + this.group = group; + this.dt = dt; + this.service = service; + this.hostname = hostname; + this.metric = metric; + this.status = status; + this.monHost = monHost; + this.tsMonitored = tsMonitored; + this.tsProcessed = tsProcessed; + this.prevStatus = prevStatus; + this.prevTs = prevTs; + this.repeat = repeat; + this.summary = summary; + this.message = message; + this.url = url; + this.statusEgroup = null; + this.statusService = null; + this.statusEndpoint = null; + this.statusMetric = null; + + } + + public String[] getStatusEgroup() { + return this.statusEgroup; + } + + public String[] getStatusService() { + return this.statusService; + } + + public String[] getStatusEndpoint() { + return this.statusEndpoint; + } + + public String[] getStatusMetric() { + return this.statusMetric; + } + + public void setStatusEgroup(String[] statusEgroup) { + this.statusEgroup = statusEgroup; + } + + public void setStatusService(String[] statusService) { + this.statusService = statusService; + } + + public void setStatusEndpoint(String[] statusEndpoint) { + this.statusEndpoint = statusEndpoint; + } + + public void setStatusMetric(String[] statusMetric) { + this.statusMetric = statusMetric; + } + + public String getReport() { + return report; + } + + public String getType() { + return type; + } + + public String getDt() { + return dt; + } + + public String getGroup() { + return group; + } + + public String getService() { + return service; + } + + public String getHostname() { + return hostname; + } + + public String getMetric() { + return metric; + } + + public String getMonHost() { + return monHost; + } + + public String getTsMonitored() { + return tsMonitored; + } + + public String getTsProcessed() { + return tsProcessed; + } + + public String getRepeat() { + return repeat; + } + + public String getSummary() { + return this.summary; + } + + public String getMessage() { + return this.message; + } + + public String getUrl() { + return url; + } + + public void setUrl(String url) { + this.url = url; + } + + public void setReport(String report) { + this.report = report; + } + + public void setType(String type) { + this.type = type; + } + + public void setDt(String dt) { + this.dt = dt; + } + + public void setGroup(String group) { + this.group = group; + } + + public void setService(String service) { + this.service = service; + } + + public void setHostname(String hostname) { + this.hostname = hostname; + } + + public void setMetric(String metric) { + this.metric = metric; + } + + public void setMonHost(String monHost) { + this.monHost = monHost; + } + + public void setTsMonitored(String tsMonitored) { + this.tsMonitored = tsMonitored; + } + + public void setTsProcessed(String tsProcessed) { + this.tsProcessed = tsProcessed; + } + + public String getPrevStatus() { + return prevStatus; + } + + public String getPrevTs() { + return prevTs; + } + + public String getStatus() { + return status; + } + + public void setPrevStatus(String prevStatus) { + this.prevStatus = prevStatus; + } + + public void setPrevTs(String prevTs) { + this.prevTs = prevTs; + } + + public void setRepeat(String repeat) { + this.repeat = repeat; + } + + public void setStatus(String status) { + this.status = status; + } + + public void setSummary(String summary) { + this.summary = summary; + } + + public void setMessage(String message) { + this.message = message; + } + + public int getDateInt() { + return Integer.parseInt(this.dt); + } + + public int getTimeInt() { + String timePart = this.tsMonitored.replaceAll(":|Z", "").split("T")[1]; + return Integer.parseInt(timePart); + } + +} diff --git a/flink_jobs/old-models/stream_status/src/main/java/status/StatusManager.java b/flink_jobs/old-models/stream_status/src/main/java/status/StatusManager.java new file mode 100644 index 00000000..f5e8c154 --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/java/status/StatusManager.java @@ -0,0 +1,996 @@ +package status; + +import java.io.File; +import java.io.IOException; +import java.text.DateFormat; +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.Date; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.Map.Entry; + +import org.apache.hadoop.hdfs.tools.DFSAdmin; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.TimeZone; + +import sync.AggregationProfileManager; +import sync.DowntimeCache; +import sync.DowntimeManager; +import sync.EndpointGroupManagerV2; +import sync.EndpointGroupManagerV2.EndpointItem; +import sync.MetricProfileManager; +import ops.OpsManager; + +import com.google.gson.Gson; + +import argo.avro.Downtime; +import argo.avro.GroupEndpoint; +import argo.avro.MetricProfile; +import org.apache.commons.codec.binary.Base64; +import org.apache.commons.codec.binary.Base64; +import org.apache.commons.codec.binary.Base64; + + +/** + * Status Manager implements a live structure containing a topology of entities + * and the related statuses for each one + */ +public class StatusManager { + + // Initialize logger + static Logger LOG = LoggerFactory.getLogger(StatusManager.class); + + // Name of the report used + private String report; + + // Sync file structures necessary for status computation + public EndpointGroupManagerV2 egp = new EndpointGroupManagerV2(); + public MetricProfileManager mps = new MetricProfileManager(); + AggregationProfileManager aps = new AggregationProfileManager(); + OpsManager ops = new OpsManager(); + private Long timeout = 86400000L; + + // Add downtime manager cache - 5 slots are enough for status manager case + private DowntimeCache dc = new DowntimeCache(5); + + // Names of valid profiles and services used + String validMetricProfile; + String validAggProfile; + ArrayList validServices = new ArrayList(); + + // Structure to hold topology entities and related statuses + Map groups = new HashMap(); + + // Flag used in initial event generation + Boolean firstGen = true; + + // Timestamp of the latest processed event used as a daily event generation + // trigger + String tsLatest; + + public void setReport(String report) { + this.report = report; + } + + public String getReport() { + return this.report; + } + + public void setTimeout(Long timeout) { + this.timeout = timeout; + } + + public Long getTimeout() { + return this.timeout; + } + + // Get Operation Manager + public OpsManager getOps() { + return this.ops; + } + + /** + * Status Item is a simple structure holding the latest status for an entity + * along with the timestamp of the event + */ + public class StatusItem { + int status; + Date timestamp; + Date genTs; + } + + + public void addDowntimeSet(String dayStamp, ArrayList downList) { + this.dc.addFeed(dayStamp, downList); + } + + /** + * Status Node represents status information for entity in the topology tree An + * entity might contain other entities with status information + */ + public class StatusNode { + // Type of entity: endpoint_group,service,endpoint or metric + String type; + // Status information with timestamp + StatusItem item; + // A list of entities contained as children + Map children = new HashMap(); + // Reference to the parent node + StatusNode parent = null; + + /** + * Creates a new status node + * + * @param type + * A string containing the node type + * (endpoint_group,service,endpoint,metric) + * @param defStatus + * Default status value + * @param defTs + * Default timestamp + */ + public StatusNode(String type, int defStatus, Date defTs) { + this.type = type; + this.item = new StatusItem(); + this.item.status = defStatus; + this.item.timestamp = defTs; + this.item.genTs = defTs; + this.parent = null; + } + + /** + * Creates a new status node given a parent + * + * @param type + * A string containing the node type + * (endpoint_group,service,endpoint,metric) + * @param defStatus + * Default status value + * @param defTs + * Default timestamp + * @param parent + * Reference to the parent status node + */ + public StatusNode(String type, int defStatus, Date defTs, StatusNode parent) { + this.type = type; + this.item = new StatusItem(); + this.item.status = defStatus; + this.item.timestamp = defTs; + this.parent = parent; + } + + } + + /** + * Checks if this status manager handles the specific endpoint group + */ + public boolean hasGroup(String group) { + return this.groups.containsKey(group); + } + + /** + * Set the latest processed timestamp value + */ + public void setTsLatest(String ts) { + this.tsLatest = ts; + } + + /** + * Get the latest processed timestamp value + */ + public String getTsLatest() { + return this.tsLatest; + } + + /** + * Disable flag for initial event generation + */ + public void disableFirstGen() { + this.firstGen = false; + } + + /** + * Check if day has changed between two sequential timestamps + * + * @param tsOld + * Previous timestamp + * @param tsNew + * Newest timestamp + */ + public boolean hasDayChanged(String tsOld, String tsNew) { + if (tsOld == null) + return false; + + + String dtOld = tsOld.split("T")[0]; + String dtNew = tsNew.split("T")[0]; + + if (dtOld.compareToIgnoreCase(dtNew) != 0) { + return true; + } + + return false; + } + + /** + * Get firstGen parameter flag to check if initial event generation is needed + */ + public boolean getFirstGen() { + return this.firstGen; + } + + /** + * Get today's datetime at the beginning of day + * + * @return Date at the beginning of day + */ + public Date getToday() { + Calendar cal = Calendar.getInstance(); + cal.set(Calendar.HOUR_OF_DAY, 0); + cal.set(Calendar.MINUTE, 0); + cal.set(Calendar.SECOND, 0); + return cal.getTime(); + } + + /** + * Convert zulu timestamp in date object + * + * @param zulu + * String representing a zulu timestamp + * @return Date object + */ + public Date setDate(String zulu) throws ParseException { + String[] parts = zulu.split("T"); + return fromZulu(parts[0] + "T00:00:00Z"); + } + + /** + * Compare profiles for validity and extract valid services + */ + public void setValidProfileServices() { + // Get services from first profile + this.validMetricProfile = this.mps.getProfiles().get(0); + this.validAggProfile = this.aps.getAvProfiles().get(0); + this.validServices = this.mps.getProfileServices(this.validMetricProfile); + } + + /** + * Load all initial Profiles from object lists + * + * @param egpAvro + * endpoint group object list + * @param mpsAvro + * metric profile object list + * @param apsJson + * aggregation profile contents + * @param opsJson + * operation profile contents + */ + public void loadAll(String runDate, ArrayList downList, ArrayList egpList, ArrayList mpsList, String apsJson, + String opsJson) throws IOException { + aps.loadJsonString(apsJson); + ops.loadJsonString(opsJson); + mps.loadFromList(mpsList); + + // First downtime loaded in cache + dc.addFeed(runDate, downList); + + + setValidProfileServices(); + // Trim endpoint group list based on metric profile information (remove unwanted + // services) + + ArrayList egpTrim = new ArrayList(); + + for (GroupEndpoint egpItem : egpList) { + if (validServices.contains(egpItem.getService())) { + egpTrim.add(egpItem); + } + } + + egp.loadFromList(egpTrim); + + } + + /** + * Load all initial Profiles directly from files + * + * @param egpAvro + * endpoint group topology location + * @param mpsAvro + * metric profile location + * @param apsJson + * aggregation profile location + * @param opsJson + * operation profile location + */ + public void loadAllFiles(String dayStamp, File downAvro, File egpAvro, File mpsAvro, File apsJson, File opsJson) throws IOException { + dc.addFileFeed(dayStamp, downAvro); + egp.loadAvro(egpAvro); + mps.loadAvro(mpsAvro); + aps.loadJson(apsJson); + ops.loadJson(opsJson); + + setValidProfileServices(); + } + + /** + * Construct status topology with initial status value and timestamp + * + * @param defStatus + * Initial status to be used + * @param defTs + * Initial timestamp to be used + */ + public void addNewGroup(String group, int defStatus, Date defTs) { + // Get all the available group's hosts + Iterator hostIter = egp.getGroupIter(group); + // for each host in specific group iterate + while (hostIter.hasNext()) { + EndpointItem host = hostIter.next(); + String service = host.getService(); + String hostname = host.getHostname(); + + if (this.validServices.contains(service)) { + // Add host to groups + addGroup(group, service, hostname, defStatus, defTs); + } + + } + } + + /** + * Add a new endpoint group node to the status topology using metric data + * information + * + * @param group + * Name of the endpoint group + * @param service + * Name of the service flavor + * @param hostname + * Name of the endpoint + * @param defStatus + * Default status to be initialized to + * @param defTs + * Default timestamp to be initialized to + */ + public void addGroup(String group, String service, String hostname, int defStatus, Date defTs) { + // Check if group exists + if (!this.groups.containsKey(group)) { + StatusNode groupNode = new StatusNode("group", defStatus, defTs); + this.groups.put(group, groupNode); + // Add to the new node + addService(groupNode, service, hostname, defStatus, defTs); + return; + } + + // Find group node and continue adding service under there + addService(this.groups.get(group), service, hostname, defStatus, defTs); + + } + + /** + * Add a new service node to the status topology using metric data information + * + * @param groupNode + * Reference to the parent node + * @param service + * Name of the service flavor + * @param hostname + * Name of the endpoint + * @param defStatus + * Default status to be initialized to + * @param defTs + * Default timestamp to be initialized to + */ + public void addService(StatusNode groupNode, String service, String hostname, int defStatus, Date defTs) { + if (!groupNode.children.containsKey(service)) { + StatusNode serviceNode = new StatusNode("service", defStatus, defTs, groupNode); + groupNode.children.put(service, serviceNode); + // Add to the new node + addEndpoint(serviceNode, service, hostname, defStatus, defTs); + return; + } + + // Find service node and continue adding endpoint under there + addEndpoint(groupNode.children.get(service), service, hostname, defStatus, defTs); + } + + /** + * Add a new endpoint node to the status topology using metric data information + * + * @param serviceNode + * Reference to the parent node + * @param service + * Name of the service flavor + * @param hostname + * Name of the endpoint + * @param defStatus + * Default status to be initialized to + * @param defTs + * Default timestamp to be initialized to + */ + public void addEndpoint(StatusNode serviceNode, String service, String hostname, int defStatus, Date defTs) { + if (!serviceNode.children.containsKey(hostname)) { + StatusNode endpointNode = new StatusNode("endpoint", defStatus, defTs, serviceNode); + serviceNode.children.put(hostname, endpointNode); + // Add to the new node + addMetrics(endpointNode, service, hostname, defStatus, defTs); + return; + } + + // Find endpoint node and continue adding metrics under there + addMetrics(serviceNode.children.get(hostname), service, hostname, defStatus, defTs); + } + + /** + * Add a new metrics node to the status topology using metric data information + * + * @param endpointNode + * Reference to the parent node + * @param service + * Name of the service flavor + * @param hostname + * Name of the endpoint + * @param defStatus + * Default status to be initialized to + * @param defTs + * Default timestamp to be initialized to + */ + public void addMetrics(StatusNode endpointNode, String service, String hostname, int defStatus, Date defTs) { + ArrayList metrics = this.mps.getProfileServiceMetrics(this.validMetricProfile, service); + + // Check if metrics = null + if (metrics == null) { + String msg = endpointNode + "/" + service + "/" + hostname + " " + this.validMetricProfile; + throw new RuntimeException(msg); + } + + // For all available metrics create leaf metric nodes + for (String metric : metrics) { + StatusNode metricNode = new StatusNode("metric", defStatus, defTs, endpointNode); + metricNode.children = null; + endpointNode.children.put(metric, metricNode); + } + } + + /** + * Convert a timestamp string to date object + * + * @param zulu + * String with timestamp in zulu format + * @return Date object + */ + public Date fromZulu(String zulu) throws ParseException { + DateFormat utcFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'"); + utcFormat.setTimeZone(TimeZone.getTimeZone("UTC")); + Date date = utcFormat.parse(zulu); + return date; + } + + /** + * Convert a date object to a string timestamp in zulu format + * + * @param ts + * Date object to be converted + * @return String with timestamp in zulu format + */ + public String toZulu(Date ts) throws ParseException { + DateFormat utcFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'"); + utcFormat.setTimeZone(TimeZone.getTimeZone("UTC")); + return utcFormat.format(ts); + } + + /** + * For all entities in the topology generate status events + * + * @param tsStr + * String containing timestamp of status generation + * @return List of generated events in string json format + */ + public ArrayList dumpStatus(String tsStr) throws ParseException { + // Convert timestamp to date object + Date ts = fromZulu(tsStr); + // Initialize event list + ArrayList results = new ArrayList(); + + StatusEvent evtMetric = new StatusEvent(); + StatusEvent evtEndpoint = new StatusEvent(); + StatusEvent evtService = new StatusEvent(); + StatusEvent evtEgroup = new StatusEvent(); + + + String[] statusMetric = new String[4]; + String[] statusEndpoint = new String[4]; + String[] statusService = new String[4]; + String[] statusEgroup = new String[4]; + + + // For each endpoint group in topology + for (String groupName : groups.keySet()) { + StatusNode groupNode = groups.get(groupName); + String groupStatus = ops.getStrStatus(groupNode.item.status); + Date groupTs = groupNode.item.timestamp; + + // For each service in the specific endpoint group + for (String serviceName : groupNode.children.keySet()) { + StatusNode serviceNode = groupNode.children.get(serviceName); + String serviceStatus = ops.getStrStatus(serviceNode.item.status); + Date serviceTs = serviceNode.item.timestamp; + + // For each endpoint in the specific service + for (String endpointName : serviceNode.children.keySet()) { + StatusNode endpointNode = serviceNode.children.get(endpointName); + String endpointStatus = ops.getStrStatus(endpointNode.item.status); + Date endpointTs = endpointNode.item.timestamp; + + // For each metric in the specific service endpoint + for (String metricName : endpointNode.children.keySet()) { + StatusNode metricNode = endpointNode.children.get(metricName); + String metricStatus = ops.getStrStatus(metricNode.item.status); + Date metricTs = metricNode.item.timestamp; + // Generate metric status event + evtMetric = genEvent("metric", groupName, serviceName, endpointName, metricName, metricStatus, + "", metricTs, metricStatus, metricTs, true,"","", ""); + + statusMetric = new String[]{evtMetric.getStatus(),evtMetric.getPrevStatus(),evtMetric.getTsProcessed(),evtMetric.getPrevTs()}; + evtMetric.setStatusMetric(statusMetric); + results.add(eventToString(evtMetric)); + + + } + // Generate endpoint status event + evtEndpoint = genEvent("endpoint", groupName, serviceName, endpointName, "", endpointStatus, "", ts, + endpointStatus, endpointTs, true,"","", ""); + + statusEndpoint = new String[] {evtEndpoint.getStatus(),evtEndpoint.getPrevStatus(),evtEndpoint.getTsMonitored(),evtEndpoint.getPrevTs()}; + evtEndpoint.setStatusMetric(statusMetric); + evtEndpoint.setStatusEndpoint(statusEndpoint); + + results.add(eventToString(evtEndpoint)); + } + // Generate service status event + evtService = genEvent("service", groupName, serviceName, "", "", serviceStatus, "", ts, serviceStatus, + serviceTs, true,"","", ""); + + statusService = new String[] {evtService.getStatus(),evtService.getPrevStatus(),evtService.getTsMonitored(),evtService.getPrevTs()}; + evtService.setStatusMetric(statusMetric); + evtService.setStatusEndpoint(statusEndpoint); + evtService.setStatusService(statusService); + + results.add(eventToString(evtService)); + } + // Generate endpoint group status event + evtEgroup = genEvent("grpoup", groupName, "", "", "", groupStatus, "", ts, groupStatus, groupTs, true,"","", ""); + statusEgroup = new String[] {evtEgroup.getStatus(),evtEgroup.getPrevStatus(),evtEgroup.getTsMonitored(),evtEgroup.getPrevTs()}; + evtEgroup.setStatusMetric(statusMetric); + evtEgroup.setStatusEndpoint(statusEndpoint); + evtEgroup.setStatusService(statusService); + evtEgroup.setStatusEgroup(statusEgroup); + + results.add(eventToString(evtEgroup)); + + } + + return results; + } + + public boolean hasTimeDiff(Date d1, Date d2, long timeout) { + if (d2 == null || d1 == null) { + return false; + } + + Long diff = d1.getTime() - d2.getTime(); + + if (diff >= timeout) { + LOG.debug("Will regenerate event -time passed (hours):" + diff/3600000); + return true; + } + + return false; + + } + + public boolean hasDowntime(String timestamp, String hostname, String service ) { + String dayStamp = timestamp.split("T")[0]; + ArrayList period = this.dc.getDowntimePeriod(dayStamp, hostname, service); + // if no period was found return immediately fals + if (period == null) return false; + + // else check if ts lower than period's start time (first element in array list) + if (timestamp.compareTo(period.get(0)) <0 ) return false; + // else check if ts higher than period's end time (second element in array list) + if (timestamp.compareTo(period.get(1)) > 0) return false; + + // else everything is ok and timestamp belongs inside element's downtime period + return true; + } + + /** + * setStatus accepts an incoming metric event and checks which entities are + * affected (changes in status). For each affected entity generates a status + * event + * + * @param service + * Name of the service in the metric event + * @param hostname + * Name of the hostname in the metric event + * @param metric + * Name of the metric in the metric event + * @param statusStr + * Status value in string format + * @param monHost + * Name of the monitoring host that generated the event + * @param tsStr + * Timestamp value in string format + * @return List of generated events in string json format + */ + public ArrayList setStatus(String group, String service, String hostname, String metric, String statusStr, String monHost, + String tsStr, String summary, String message, String url) throws ParseException { + ArrayList results = new ArrayList(); + // prepare status events might come up + StatusEvent evtEgroup = new StatusEvent(); + StatusEvent evtService = new StatusEvent(); + StatusEvent evtEndpoint = new StatusEvent(); + StatusEvent evtMetric = new StatusEvent(); + + int status = ops.getIntStatus(statusStr); + Date ts = fromZulu(tsStr); + + + + + // Set StatusNodes + StatusNode groupNode = null; + StatusNode serviceNode = null; + StatusNode endpointNode = null; + StatusNode metricNode = null; + + String[] statusMetric = new String[4]; + String[] statusEndpoint = new String[4]; + String[] statusService = new String[4]; + String[] statusEgroup = new String[4]; + + + boolean updMetric = false; + boolean updEndpoint = false; + boolean updService = false; + + + Date oldGroupTS; + Date oldServiceTS; + Date oldEndpointTS; + Date oldMetricTS; + + int oldGroupStatus; + int oldServiceStatus; + int oldEndpointStatus; + int oldMetricStatus; + + // Open groups + groupNode = this.groups.get(group); + if (groupNode != null) { + // check if ts is behind groupNode ts + if (groupNode.item.timestamp.compareTo(ts) > 0) + return results; + // update ts + oldGroupTS = groupNode.item.timestamp; + oldGroupStatus = groupNode.item.status; + groupNode.item.timestamp = ts; + + // Open services + serviceNode = groupNode.children.get(service); + + if (serviceNode != null) { + // check if ts is behind groupNode ts + if (serviceNode.item.timestamp.compareTo(ts) > 0) + return results; + // update ts + oldServiceTS = serviceNode.item.timestamp; + oldServiceStatus = serviceNode.item.status; + serviceNode.item.timestamp = ts; + + // Open endpoints + endpointNode = serviceNode.children.get(hostname); + + if (endpointNode != null) { + // check if ts is behind groupNode ts + if (endpointNode.item.timestamp.compareTo(ts) > 0) + return results; + // update ts + oldEndpointTS = endpointNode.item.timestamp; + oldEndpointStatus = endpointNode.item.status; + endpointNode.item.timestamp = ts; + + // Open metrics + metricNode = endpointNode.children.get(metric); + + if (metricNode != null) { + + // check if ts is after previous timestamp + if (metricNode.item.timestamp.compareTo(ts) <= 0) { + // update status + boolean repeat = hasTimeDiff(ts,metricNode.item.genTs,this.timeout); + oldMetricTS = metricNode.item.timestamp; + oldMetricStatus = metricNode.item.status; + if (metricNode.item.status != status || repeat ) { + // generate event + evtMetric = genEvent("metric", group, service, hostname, metric, ops.getStrStatus(status), + monHost, ts, ops.getStrStatus(oldMetricStatus), oldMetricTS, repeat, summary, message, url); + + // Create metric status level object + statusMetric = new String[] {evtMetric.getStatus(),evtMetric.getPrevStatus(),evtMetric.getTsMonitored(),evtMetric.getPrevTs()}; + evtMetric.setStatusMetric(statusMetric); + + + results.add(eventToString(evtMetric)); + + + + metricNode.item.status = status; + metricNode.item.timestamp = ts; + metricNode.item.genTs = ts; + updMetric = true; + } + + + } + + } + // If metric indeed updated -> aggregate endpoint + if (updMetric) { + // calculate endpoint new status + int endpNewStatus = aggregate("", endpointNode, ts); + // check if status changed + boolean repeat = hasTimeDiff(ts,endpointNode.item.genTs,this.timeout); + if (true) { + + // generate event + evtEndpoint = genEvent("endpoint", group, service, hostname, metric, + ops.getStrStatus(endpNewStatus), monHost, ts, + ops.getStrStatus(oldEndpointStatus), oldEndpointTS,repeat,summary,message, url); + + // Create metric,endpoint status level object + statusEndpoint = new String[] {evtEndpoint.getStatus(),evtEndpoint.getPrevStatus(), evtEndpoint.getTsMonitored(), evtEndpoint.getPrevTs()}; + + evtEndpoint.setStatusMetric(statusMetric); + evtEndpoint.setStatusEndpoint(statusEndpoint); + results.add(eventToString(evtEndpoint)); + + endpointNode.item.status = endpNewStatus; + endpointNode.item.genTs = ts; + updEndpoint = true; + } + + } + } + // if endpoint indeed updated -> aggregate service + if (updEndpoint) { + // calculate service new status + int servNewStatus = aggregate(service, serviceNode, ts); + // check if status changed + boolean repeat = hasTimeDiff(ts,groupNode.item.genTs,this.timeout); + if (true) { + + // generate event + evtService = genEvent("service", group, service, hostname, metric, ops.getStrStatus(servNewStatus), + monHost, ts, ops.getStrStatus(oldServiceStatus), oldServiceTS,repeat,summary,message, url); + + + // Create metric, endpoint, service status metric objects + statusService = new String[] {evtService.getStatus(),evtService.getPrevStatus(), evtService.getTsMonitored(), evtService.getPrevTs()}; + + evtService.setStatusMetric(statusMetric); + evtService.setStatusEndpoint(statusEndpoint); + evtService.setStatusService(statusService); + + + results.add(eventToString(evtService)); + serviceNode.item.status = servNewStatus; + serviceNode.item.genTs=ts; + updService = true; + + } + + } + } + // if service indeed updated -> aggregate group + if (updService) { + // calculate group new status + int groupNewStatus = aggregate(group, groupNode, ts); + // check if status changed + boolean repeat = hasTimeDiff(ts,groupNode.item.genTs,this.timeout); + if (true){ + + // generate event + + evtEgroup = genEvent("endpoint_group", group, service, hostname, metric, ops.getStrStatus(groupNewStatus), + monHost, ts, ops.getStrStatus(oldGroupStatus), oldGroupTS,repeat,summary,message,""); + + // Create metric, endpoint, service, egroup status metric objects + statusEgroup = new String[] {evtEgroup.getStatus(),evtEgroup.getPrevStatus(), evtEgroup.getTsMonitored(), evtEgroup.getPrevTs()}; + + + evtEgroup.setStatusMetric(statusMetric); + evtEgroup.setStatusEndpoint(statusEndpoint); + evtEgroup.setStatusService(statusService); + evtEgroup.setStatusEgroup(statusEgroup); + + results.add(eventToString(evtEgroup)); + + groupNode.item.status = groupNewStatus; + groupNode.item.genTs = ts; + + } + } + } + // If service host combination has downtime clear result set + if (hasDowntime(tsStr,hostname,service)){ + LOG.info("Downtime encountered for group:{},service:{},host:{} - events will be discarded",group,service,hostname); + results.clear(); + } + + + return results; + + } + + + /** + * Generates a status event + * + * @param type + * Name of event type + * @param group + * Name of the endpoint group in the metric event + * @param service + * Name of the service in the metric event + * @param hostname + * Name of the hostname in the metric event + * @param metric + * Name of the metric in the metric event + * @param statusStr + * Status value in string format + * @param monHost + * Name of the monitoring host that affected the event + * @param tsStr + * Timestamp value in string format + * @return A string containing the event in json format + */ + private StatusEvent genEvent(String type, String group, String service, String hostname, String metric, String status, + String monHost, Date ts, String prevStatus, Date prevTs, boolean repeat, String summary, String message, String url) throws ParseException { + String tsStr = toZulu(ts); + String dt = tsStr.split("T")[0].replaceAll("-", ""); + String tsProc = toZulu(new Date()); + + if (summary==null) { + summary=""; + } + if (message==null) { + message=""; + } + + StatusEvent evnt = new StatusEvent(this.report, type, dt, group, service, hostname, metric, status, monHost, + toZulu(ts), tsProc, prevStatus, toZulu(prevTs), new Boolean(repeat).toString(),summary, message,url ); + + + return evnt; + } + + /** + * Accepts a StatusEvent object and returns a json string representation of it + * + * @param evnt + * @return A json string representation of a Status Event + */ + private String eventToString(StatusEvent evnt) { + Gson gson = new Gson(); + String evntJson = gson.toJson(evnt); + LOG.debug("Event Generated: " + evntJson); + return evntJson; + } + + /** + * Aggregate status values according to profiles + * + * @param node + * Status node used to aggregate its children + * @param ts + * Timestamp of the aggregation event + * @return Status value in integer format + */ + public int aggregate(String itemName, StatusNode node, Date ts) { + + // get aggregation profile used (1st one in the list) + String aggProfile = aps.getAvProfiles().get(0); + + + + + + // Iterate on children nodes + Iterator> valIter = node.children.entrySet().iterator(); + Entry item = valIter.next(); + StatusNode a = item.getValue(); + StatusNode b = null; + int res = a.item.status; + + + if (node.type.equals("group")) { + + // Create a hashmap for the aggregation groups + Map aGroups = new HashMap(); + // If aggregation target is group then each hashmap item key is the service name + String serviceName = item.getKey(); + String groupName = aps.getGroupByService(aggProfile, serviceName); + // aggregation hashmap is empty so insert the first item + aGroups.put(groupName, a.item.status); + // Iterate over rest of the service items + while (valIter.hasNext()) { + // next item in iteration + item = valIter.next(); + // get the service name from key + serviceName = item.getKey(); + // get the item status information + b = item.getValue(); + // get the aggregation group name based on service name + groupName = aps.getGroupByService(aggProfile, serviceName); + // Now that aggregation hashmap is surely not empty check if groupname exists + if (aGroups.containsKey(groupName)) { + // aggregate the existing value with the new one + // get the appropriate aggregation operation for this service group + int gOp = ops.getIntOperation(aps.getProfileGroupServiceOp(aggProfile, groupName, serviceName)); + // get the existing value from the hashmap + res = aGroups.get(groupName).intValue(); + // calculate the new value + res = ops.opInt(gOp, res, b.item.status); + aGroups.put(groupName, res); + + } + } + + // after completing the individual group aggregations aggregate the total value + int totalOp = ops.getIntOperation(aps.getTotalOp(aggProfile)); + // iterate over the group aggregations + Iterator> aggIter = aGroups.entrySet().iterator(); + res = aggIter.next().getValue(); + // second value to be aggregated in each iteration + int bItem; + while (aggIter.hasNext()) { + bItem = aggIter.next().getValue(); + res = ops.opInt(totalOp, res, bItem); + } + + } else { + + + + + + // aggregate according to rest of the types + while (valIter.hasNext()) { + b = valIter.next().getValue(); + if (node.type.equals("endpoint")) { + int mOp = ops.getIntOperation(aps.getMetricOp(aggProfile)); + res = ops.opInt(mOp, res, b.item.status); + } else if (node.type.equals("service")) { + + String groupName = aps.getGroupByService(aggProfile, itemName); + int eOp = ops.getIntOperation(aps.getProfileGroupServiceOp(aggProfile, groupName, itemName)); + res = ops.opInt(eOp, res, b.item.status); + } else if (node.type.equals("group")) { + //res = ops.opInt(sOp, res, b.item.status); + } + } + } + + return res; + } + +} \ No newline at end of file diff --git a/flink_jobs/old-models/stream_status/src/main/java/sync/AggregationProfileManager.java b/flink_jobs/old-models/stream_status/src/main/java/sync/AggregationProfileManager.java new file mode 100644 index 00000000..15a143c4 --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/java/sync/AggregationProfileManager.java @@ -0,0 +1,345 @@ +package sync; + + + +import org.apache.commons.io.IOUtils; +import org.apache.log4j.Logger; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileReader; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map.Entry; + +import com.google.gson.JsonArray; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParseException; +import com.google.gson.JsonParser; + + + +public class AggregationProfileManager { + + private HashMap list; + private static final Logger LOG = Logger.getLogger(AggregationProfileManager.class.getName()); + + public AggregationProfileManager() { + + this.list = new HashMap(); + + } + + private class AvProfileItem { + + private String name; + private String namespace; + private String metricProfile; + private String metricOp; + private String groupType; + private String op; + + private HashMap groups; + private HashMap serviceIndex; + + AvProfileItem() { + this.groups = new HashMap(); + this.serviceIndex = new HashMap(); + } + + private class ServGroupItem { + + String op; + HashMap services; + + ServGroupItem(String op) { + this.op = op; + this.services = new HashMap(); + } + } + + // ServGroupItem Declaration Ends Here + + public void insertGroup(String group, String op) { + if (!this.groups.containsKey(group)) { + this.groups.put(group, new ServGroupItem(op)); + } + } + + public void insertService(String group, String service, String op) { + if (this.groups.containsKey(group)) { + this.groups.get(group).services.put(service, op); + this.serviceIndex.put(service, group); + } + } + } + + // AvProfileItem Declaration Ends Here + + public void clearProfiles() { + this.list.clear(); + } + + public String getTotalOp(String avProfile) { + if (this.list.containsKey(avProfile)) { + return this.list.get(avProfile).op; + } + + return ""; + } + + public String getMetricOp(String avProfile) { + if (this.list.containsKey(avProfile)) { + return this.list.get(avProfile).metricOp; + } + + return ""; + } + + // Return the available Group Names of a profile + public ArrayList getProfileGroups(String avProfile) { + + if (this.list.containsKey(avProfile)) { + ArrayList result = new ArrayList(); + Iterator groupIterator = this.list.get(avProfile).groups.keySet().iterator(); + + while (groupIterator.hasNext()) { + result.add(groupIterator.next()); + } + + return result; + } + + return null; + } + + // Return the available group operation + public String getProfileGroupOp(String avProfile, String groupName) { + if (this.list.containsKey(avProfile)) { + if (this.list.get(avProfile).groups.containsKey(groupName)) { + return this.list.get(avProfile).groups.get(groupName).op; + } + } + + return null; + } + + public ArrayList getProfileGroupServices(String avProfile, String groupName) { + if (this.list.containsKey(avProfile)) { + if (this.list.get(avProfile).groups.containsKey(groupName)) { + ArrayList result = new ArrayList(); + Iterator srvIterator = this.list.get(avProfile).groups.get(groupName).services.keySet() + .iterator(); + + while (srvIterator.hasNext()) { + result.add(srvIterator.next()); + } + + return result; + } + } + + return null; + } + + public String getProfileGroupServiceOp(String avProfile, String groupName, String service) { + + if (this.list.containsKey(avProfile)) { + if (this.list.get(avProfile).groups.containsKey(groupName)) { + if (this.list.get(avProfile).groups.get(groupName).services.containsKey(service)) { + return this.list.get(avProfile).groups.get(groupName).services.get(service); + } + } + } + + return null; + } + + public ArrayList getAvProfiles() { + + if (this.list.size() > 0) { + ArrayList result = new ArrayList(); + Iterator avpIterator = this.list.keySet().iterator(); + while (avpIterator.hasNext()) { + result.add(avpIterator.next()); + } + + return result; + + } + + return null; + } + + public String getProfileNamespace(String avProfile) { + + if (this.list.containsKey(avProfile)) { + return this.list.get(avProfile).namespace; + } + + return null; + } + + public String getProfileMetricProfile(String avProfile) { + + if (this.list.containsKey(avProfile)) { + return this.list.get(avProfile).metricProfile; + } + + return null; + } + + public String getProfileGroupType(String avProfile) { + + if (this.list.containsKey(avProfile)) { + return this.list.get(avProfile).groupType; + } + + return null; + } + + public String getGroupByService(String avProfile, String service) { + + if (this.list.containsKey(avProfile)) { + + return this.list.get(avProfile).serviceIndex.get(service); + + } + return null; + + } + + public boolean checkService(String avProfile, String service) { + + if (this.list.containsKey(avProfile)) { + + if (this.list.get(avProfile).serviceIndex.containsKey(service)) { + return true; + } + + } + return false; + + } + + public void loadJson(File jsonFile) throws IOException { + + BufferedReader br = null; + try { + + br = new BufferedReader(new FileReader(jsonFile)); + + JsonParser jsonParser = new JsonParser(); + JsonElement jRootElement = jsonParser.parse(br); + JsonObject jRootObj = jRootElement.getAsJsonObject(); + + // Create new entry for this availability profile + AvProfileItem tmpAvp = new AvProfileItem(); + + JsonArray apGroups = jRootObj.getAsJsonArray("groups"); + + tmpAvp.name = jRootObj.get("name").getAsString(); + tmpAvp.namespace = jRootObj.get("namespace").getAsString(); + tmpAvp.metricProfile = jRootObj.get("metric_profile").getAsJsonObject().get("name").getAsString(); + tmpAvp.metricOp = jRootObj.get("metric_operation").getAsString(); + tmpAvp.groupType = jRootObj.get("endpoint_group").getAsString(); + tmpAvp.op = jRootObj.get("profile_operation").getAsString(); + + for ( JsonElement item : apGroups) { + // service name + JsonObject itemObj = item.getAsJsonObject(); + String itemName = itemObj.get("name").getAsString(); + String itemOp = itemObj.get("operation").getAsString(); + JsonArray itemServices = itemObj.get("services").getAsJsonArray(); + tmpAvp.insertGroup(itemName, itemOp); + + for (JsonElement subItem : itemServices) { + JsonObject subObj = subItem.getAsJsonObject(); + String serviceName = subObj.get("name").getAsString(); + String serviceOp = subObj.get("operation").getAsString(); + tmpAvp.insertService(itemName, serviceName,serviceOp); + } + + } + + + // Add profile to the list + this.list.put(tmpAvp.name, tmpAvp); + + } catch (FileNotFoundException ex) { + LOG.error("Could not open file:" + jsonFile.getName()); + throw ex; + + } catch (JsonParseException ex) { + LOG.error("File is not valid json:" + jsonFile.getName()); + throw ex; + } finally { + // Close quietly without exceptions the buffered reader + IOUtils.closeQuietly(br); + } + + } + + public void loadJsonString(String apsJson) throws IOException { + + + try { + + + + JsonParser jsonParser = new JsonParser(); + JsonElement jRootElement = jsonParser.parse(apsJson); + JsonObject jRootObj = jRootElement.getAsJsonObject(); + + // Create new entry for this availability profile + AvProfileItem tmpAvp = new AvProfileItem(); + + JsonArray apGroups = jRootObj.getAsJsonArray("groups"); + + tmpAvp.name = jRootObj.get("name").getAsString(); + tmpAvp.namespace = jRootObj.get("namespace").getAsString(); + tmpAvp.metricProfile = jRootObj.get("metric_profile").getAsJsonObject().get("name").getAsString(); + tmpAvp.metricOp = jRootObj.get("metric_operation").getAsString(); + tmpAvp.groupType = jRootObj.get("endpoint_group").getAsString(); + tmpAvp.op = jRootObj.get("profile_operation").getAsString(); + + for ( JsonElement item : apGroups) { + // service name + JsonObject itemObj = item.getAsJsonObject(); + String itemName = itemObj.get("name").getAsString(); + String itemOp = itemObj.get("operation").getAsString(); + JsonArray itemServices = itemObj.get("services").getAsJsonArray(); + tmpAvp.insertGroup(itemName, itemOp); + + for (JsonElement subItem : itemServices) { + JsonObject subObj = subItem.getAsJsonObject(); + String serviceName = subObj.get("name").getAsString(); + String serviceOp = subObj.get("operation").getAsString(); + tmpAvp.insertService(itemName, serviceName,serviceOp); + } + + } + + + // Add profile to the list + this.list.put(tmpAvp.name, tmpAvp); + + + + } catch (JsonParseException ex) { + LOG.error("Contents are not valid json"); + throw ex; + } + + } + + + + +} + diff --git a/flink_jobs/old-models/stream_status/src/main/java/sync/DowntimeCache.java b/flink_jobs/old-models/stream_status/src/main/java/sync/DowntimeCache.java new file mode 100644 index 00000000..833ff550 --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/java/sync/DowntimeCache.java @@ -0,0 +1,122 @@ +package sync; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.TreeMap; +import argo.avro.Downtime; + +/** + * Downtime Cache holds multiple DowntimeManagers and it's used to keep track of + * downtime feeds during streaming status computation (Downtime data sets for + * later days might come earlier so must be cached. + */ +public class DowntimeCache { + + // cache holding + private TreeMap cache; + private int maxItems; + + public DowntimeCache() { + this.maxItems = 1; + this.cache = new TreeMap(); + + } + + public DowntimeCache(int maxItems) { + this.maxItems = maxItems; + this.cache = new TreeMap(); + } + + /** + * Returns the max number of items that current cache holds + */ + public int getMaxItems() { + return this.maxItems; + } + + /** + * Get for a specific day the corresponding DowntimeManager + */ + public DowntimeManager getDowntimeManager(String dayStamp) { + return cache.get(dayStamp); + } + + /** + * Load downtime dataset from avro file and store it in downtime cache for a + * specific day + */ + public void addFileFeed(String dayStamp, File avroFile) throws IOException { + // check first if the item already exists in the cache so just update it + if (cache.containsKey(dayStamp)) { + cache.get(dayStamp).loadAvro(avroFile); + return; + } + + // check if item has daystamp older than the oldest item in cache + if (cache.size() > 0 && dayStamp.compareTo(cache.firstKey()) < 1) { + return; + } + + DowntimeManager downMgr = new DowntimeManager(); + downMgr.loadAvro(avroFile); + cache.put(dayStamp, downMgr); + + // check if item insertion grew cache outside its limits + if (cache.size() > maxItems) { + // remove oldest + cache.remove(cache.firstKey()); + } + } + + /** + * Load downtime dataset from downtime object list and store it in downtime cache for a + * specific day + */ + public void addFeed(String dayStamp, List downList) { + + // check first if the item already exists in the cache so just update it + if (cache.containsKey(dayStamp)) { + cache.get(dayStamp).loadFromList(downList); + return; + } + + // check if item has daystamp older than the oldest item in cache + if (cache.size() > 0 && dayStamp.compareTo(cache.firstKey()) < 1) { + return; + } + + DowntimeManager downMgr = new DowntimeManager(); + downMgr.loadFromList(downList); + cache.put(dayStamp, downMgr); + + // check if item insertion grew cache outside its limits + if (cache.size() > maxItems) { + // remove oldest + cache.remove(cache.firstKey()); + } + } + + /** + * Check if downtime period exists for a specific endpoint (service, hostname, timestamp) + */ + public ArrayList getDowntimePeriod(String dayStamp, String hostname, String service) { + // If downtime manager with data exists for specific day + if (cache.containsKey(dayStamp)) { + // return the downtime period from downtime manager of specific day + return cache.get(dayStamp).getPeriod(hostname, service); + } + + return null; + } + + public void clear() { + this.cache.clear(); + } + + public String toString() { + return this.cache.toString(); + } + +} diff --git a/flink_jobs/old-models/stream_status/src/main/java/sync/DowntimeManager.java b/flink_jobs/old-models/stream_status/src/main/java/sync/DowntimeManager.java new file mode 100644 index 00000000..37f3145a --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/java/sync/DowntimeManager.java @@ -0,0 +1,196 @@ +package sync; + +import org.apache.log4j.Logger; + +import argo.avro.Downtime; + + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; + +import org.apache.avro.Schema; + +import org.apache.avro.file.DataFileReader; +import org.apache.avro.generic.GenericData; +import org.apache.avro.generic.GenericDatumReader; +import org.apache.avro.generic.GenericRecord; +import org.apache.avro.io.DatumReader; +import org.apache.avro.util.Utf8; +import org.apache.commons.io.IOUtils; + +/** + * DowntimeManager manages supplementary downtime information that is needed in computation of a/r scores for endpoint groups + * Information can be loaded either directly from an avro file or from a list of avro objects + */ +public class DowntimeManager { + + /** + * List of Downtime information items + */ + private ArrayList list; + private static final Logger LOG = Logger.getLogger(DowntimeManager.class.getName()); + + /** + * Inner class that holds information about a downtime item which is a actually a 4-tuple (hostname,service,startTime,endTime) + */ + private class DowntimeItem { + String hostname; // name of host + String service; // name of service + String startTime; // declare start time of downtime + String endTime; // declare end time of downtime + + + + public DowntimeItem(String hostname, String service, String startTime, String endTime) { + this.hostname = hostname; + this.service = service; + this.startTime = startTime; + this.endTime = endTime; + } + + public String toString(){ + return String.format("(%s,%s,%s,%s)",hostname,service,startTime,endTime); + } + + } + + public DowntimeManager() { + this.list = new ArrayList(); + } + /** + * Inserts new downtime information to Donwtime Manager (hostname,service,startTime,endTime) + */ + public int insert(String hostname, String service, String startTime, String endTime) { + DowntimeItem tmpItem = new DowntimeItem(hostname, service, startTime, endTime); + this.list.add(tmpItem); + return 0; // All good + } + + /** + * Returns the downtime period (if any) for a specific service endpoint: (hostname,service) + */ + public ArrayList getPeriod(String hostname, String service) { + + ArrayList period = new ArrayList(); + + for (DowntimeItem item : this.list) { + + if (item.hostname.equals(hostname)) { + if (item.service.equals(service)) { + period.add(item.startTime); + period.add(item.endTime); + return period; + } + } + } + + return null; + + } + + /** + * Loads downtime information from an avro file + *

      + * This method loads downtimes information contained in an .avro file with + * specific avro schema. + * + *

      + * The following fields are expected to be found in each avro row: + *

        + *
      1. start_time: string
      2. + *
      3. end_time: string
      4. + *
      5. service: string
      6. + *
      7. hostname: string
      8. + *
      9. [optional] tags: hashmap (contains a map of arbitrary key values) + *
      10. + *
      + * + * @param avroFile + * a File object of the avro file that will be opened + * @throws IOException + * if there is an error during opening of the avro file + */ + @SuppressWarnings("unchecked") + public void loadAvro(File avroFile) throws IOException { + + // Prepare Avro File Readers + DatumReader datumReader = new GenericDatumReader(); + DataFileReader dataFileReader = null; + + try { + dataFileReader = new DataFileReader(avroFile, datumReader); + + // Grab avro schema + Schema avroSchema = dataFileReader.getSchema(); + + // Generate 1st level generic record reader (rows) + GenericRecord avroRow = new GenericData.Record(avroSchema); + + // For all rows in file repeat + while (dataFileReader.hasNext()) { + // read the row + avroRow = dataFileReader.next(avroRow); + HashMap tagMap = new HashMap(); + + // Generate 2nd level generic record reader (tags) + + HashMap tags = (HashMap) (avroRow.get("tags")); + + if (tags != null) { + for (Utf8 item : tags.keySet()) { + tagMap.put(item.toString(), String.valueOf(tags.get(item))); + } + } + + // Grab 1st level mandatory fields + String hostname = avroRow.get("hostname").toString(); + String service = avroRow.get("service").toString(); + String startTime = avroRow.get("start_time").toString(); + String endTime = avroRow.get("end_time").toString(); + + // Insert data to list + this.insert(hostname, service, startTime, endTime); + + } // end of avro rows + + } catch (IOException ex) { + LOG.error("Could not open avro file:" + avroFile.getName()); + throw ex; + } finally { + // Close quietly without exceptions the buffered reader + IOUtils.closeQuietly(dataFileReader); + } + + } + + + /** + * Loads downtime information from a list of downtime objects + * + */ + public void loadFromList( List dnt) { + // IF no downtimes collected return + if (dnt==null) return; + + // For each downtime object in list + for (Downtime item : dnt){ + String hostname = item.getHostname(); + String service = item.getService(); + String startTime = item.getStartTime(); + String endTime = item.getEndTime(); + // Insert data to list + this.insert(hostname,service,startTime,endTime); + } + + + } + + public String toString () { + return Arrays.toString(list.toArray()); + } + +} \ No newline at end of file diff --git a/flink_jobs/old-models/stream_status/src/main/java/sync/EndpointGroupManager.java b/flink_jobs/old-models/stream_status/src/main/java/sync/EndpointGroupManager.java new file mode 100644 index 00000000..66ccb47d --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/java/sync/EndpointGroupManager.java @@ -0,0 +1,273 @@ +package sync; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.Map.Entry; + +import org.apache.avro.Schema; +import org.apache.avro.Schema.Field; +import org.apache.avro.file.DataFileReader; +import org.apache.avro.generic.GenericData; +import org.apache.avro.generic.GenericDatumReader; +import org.apache.avro.generic.GenericRecord; +import org.apache.avro.io.DatumReader; +import org.apache.avro.util.Utf8; +import org.apache.commons.io.IOUtils; +import org.apache.log4j.Logger; + +import argo.avro.GroupEndpoint; +import argo.avro.MetricProfile; + +public class EndpointGroupManager { + + private static final Logger LOG = Logger.getLogger(EndpointGroupManager.class.getName()); + + private ArrayList list; + private ArrayList fList; + + public class EndpointItem { + String type; // type of group + String group; // name of the group + String service; // type of the service + String hostname; // name of host + HashMap tags; // Tag list + + public EndpointItem() { + // Initializations + this.type = ""; + this.group = ""; + this.service = ""; + this.hostname = ""; + this.tags = new HashMap(); + } + + public EndpointItem(String type, String group, String service, String hostname, HashMap tags) { + this.type = type; + this.group = group; + this.service = service; + this.hostname = hostname; + this.tags = tags; + + } + + public String getType() { return type; } + public String getGroup() { return group; } + public String getService() { return service; } + public String getHostname() { return hostname; } + + } + + public Iterator getIterator() { + return this.list.iterator(); + } + + public EndpointGroupManager() { + this.list = new ArrayList(); + this.fList = new ArrayList(); + + } + + public int insert(String type, String group, String service, String hostname, HashMap tags) { + EndpointItem new_item = new EndpointItem(type, group, service, hostname, tags); + this.list.add(new_item); + return 0; // All good + } + + public boolean checkEndpoint(String hostname, String service) { + + for (EndpointItem item : fList) { + if (item.hostname.equals(hostname) && item.service.equals(service)) { + return true; + } + } + + return false; + } + + public ArrayList getGroup(String type, String hostname, String service) { + + ArrayList results = new ArrayList(); + + for (EndpointItem item : fList) { + if (item.type.equals(type) && item.hostname.equals(hostname) && item.service.equals(service)) { + results.add(item.group); + } + } + + return results; + } + + public HashMap getGroupTags(String type, String hostname, String service) { + + for (EndpointItem item : fList) { + if (item.type.equals(type) && item.hostname.equals(hostname) && item.service.equals(service)) { + return item.tags; + } + } + + return null; + } + + public int count() { + return this.fList.size(); + } + + public void unfilter() { + this.fList.clear(); + for (EndpointItem item : this.list) { + this.fList.add(item); + } + } + + public void filter(TreeMap fTags) { + this.fList.clear(); + boolean trim; + for (EndpointItem item : this.list) { + trim = false; + HashMap itemTags = item.tags; + for (Entry fTagItem : fTags.entrySet()) { + + if (itemTags.containsKey(fTagItem.getKey())) { + // First Check binary tags as Y/N 0/1 + + if (fTagItem.getValue().equalsIgnoreCase("y") || fTagItem.getValue().equalsIgnoreCase("n")) { + String binValue = ""; + if (fTagItem.getValue().equalsIgnoreCase("y")) + binValue = "1"; + if (fTagItem.getValue().equalsIgnoreCase("n")) + binValue = "0"; + + if (itemTags.get(fTagItem.getKey()).equalsIgnoreCase(binValue) == false) { + trim = true; + } + } else if (itemTags.get(fTagItem.getKey()).equalsIgnoreCase(fTagItem.getValue()) == false) { + trim = true; + } + + } + } + + if (trim == false) { + fList.add(item); + } + } + } + + /** + * Loads endpoint grouping information from an avro file + *

      + * This method loads endpoint grouping information contained in an .avro + * file with specific avro schema. + * + *

      + * The following fields are expected to be found in each avro row: + *

        + *
      1. type: string (describes the type of grouping)
      2. + *
      3. group: string
      4. + *
      5. service: string
      6. + *
      7. hostname: string
      8. + *
      9. tags: hashmap (contains a map of arbitrary key values)
      10. + *
      + * + * @param avroFile + * a File object of the avro file that will be opened + * @throws IOException + * if there is an error during opening of the avro file + */ + @SuppressWarnings("unchecked") + public void loadAvro(File avroFile) throws IOException { + + // Prepare Avro File Readers + DatumReader datumReader = new GenericDatumReader(); + DataFileReader dataFileReader = null; + try { + dataFileReader = new DataFileReader(avroFile, datumReader); + + // Grab Avro schema + Schema avroSchema = dataFileReader.getSchema(); + + // Generate 1st level generic record reader (rows) + GenericRecord avroRow = new GenericData.Record(avroSchema); + + // For all rows in file repeat + while (dataFileReader.hasNext()) { + // read the row + avroRow = dataFileReader.next(avroRow); + HashMap tagMap = new HashMap(); + + // Generate 2nd level generic record reader (tags) + + HashMap tags = (HashMap) (avroRow.get("tags")); + + if (tags != null) { + for (Utf8 item : tags.keySet()) { + tagMap.put(item.toString(), String.valueOf(tags.get(item))); + } + } + + // Grab 1st level mandatory fields + String type = avroRow.get("type").toString(); + String group = avroRow.get("group").toString(); + String service = avroRow.get("service").toString(); + String hostname = avroRow.get("hostname").toString(); + + // Insert data to list + this.insert(type, group, service, hostname, tagMap); + + } // end of avro rows + + this.unfilter(); + + } catch (IOException ex) { + LOG.error("Could not open avro file:" + avroFile.getName()); + throw ex; + } finally { + // Close quietly without exceptions the buffered reader + IOUtils.closeQuietly(dataFileReader); + } + + } + + + public ArrayList getList(){ + return this.list; + } + + /** + * Loads information from a list of EndpointGroup objects + * + */ + @SuppressWarnings("unchecked") + public void loadFromList( List egp) { + + // For each endpoint group record + for (GroupEndpoint item : egp){ + String type = item.getType(); + String group = item.getGroup(); + String service = item.getService(); + String hostname = item.getHostname(); + HashMap tagMap = new HashMap(); + HashMap tags = (HashMap) item.getTags(); + + if (tags != null) { + for (String key : tags.keySet()) { + tagMap.put(key, tags.get(key)); + } + } + + // Insert data to list + this.insert(type, group, service, hostname, tagMap); + } + + this.unfilter(); + + + } + +} diff --git a/flink_jobs/old-models/stream_status/src/main/java/sync/EndpointGroupManagerV2.java b/flink_jobs/old-models/stream_status/src/main/java/sync/EndpointGroupManagerV2.java new file mode 100644 index 00000000..c0a406a0 --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/java/sync/EndpointGroupManagerV2.java @@ -0,0 +1,267 @@ +package sync; + + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + + +import org.apache.avro.Schema; +import org.apache.avro.file.DataFileReader; +import org.apache.avro.generic.GenericData; +import org.apache.avro.generic.GenericDatumReader; +import org.apache.avro.generic.GenericRecord; +import org.apache.avro.io.DatumReader; +import org.apache.avro.util.Utf8; +import org.apache.commons.io.IOUtils; +import org.apache.log4j.Logger; + +import argo.avro.GroupEndpoint; + +public class EndpointGroupManagerV2 { + + private static final Logger LOG = Logger.getLogger(EndpointGroupManager.class.getName()); + + private Map> list; + private Map> groupIndex; + + private String defaultType = null; + + public class EndpointItem { + String type; // type of group + String group; // name of the group + String service; // type of the service + String hostname; // name of host + HashMap tags; // Tag list + + public EndpointItem() { + // Initializations + this.type = ""; + this.group = ""; + this.service = ""; + this.hostname = ""; + this.tags = new HashMap(); + } + + public EndpointItem(String type, String group, String service, String hostname, HashMap tags) { + this.type = type; + this.group = group; + this.service = service; + this.hostname = hostname; + this.tags = tags; + + } + + public String getType() { return type; } + public String getGroup() { return group; } + public String getService() { return service; } + public String getHostname() { return hostname; } + + } + + public Map> getList(){ + return this.list; + } + + public EndpointGroupManagerV2() { + this.list = new HashMap>(); + this.groupIndex = new HashMap>(); + + + } + + + public int insert(String type, String group, String service, String hostname, HashMap tags) { + EndpointItem itemNew = new EndpointItem(type, group, service, hostname, tags); + String key = type + "|" + hostname + "|" + service; + if (!list.containsKey(key)){ + Map subList = new HashMap(); + subList.put(group, itemNew); + list.put(key,subList); + + } else { + Map subList = list.get(key); + subList.put(group, itemNew); + } + // Add item to the secondary group index + if (!groupIndex.containsKey(group)){ + groupIndex.put(group, new ArrayList(Arrays.asList(itemNew))); + } else { + groupIndex.get(group).add(itemNew); + } + + return 0; // All good + } + + public boolean checkEndpoint(String hostname, String service) { + + String key = defaultType + "|" + hostname + "|" + service; + return list.containsKey(key); + } + + public ArrayList getGroupFull(String type, String hostname, String service) { + + String key = type + "|" + hostname + "|" + service; + Map sublist = list.get(key); + if (sublist != null) { + return new ArrayList(list.get(key).keySet()); + } + + return new ArrayList(); + + } + + public Iterator getGroupIter(String group) { + ArrayList list = groupIndex.get(group); + if (list!=null){ + return list.iterator(); + } + + return null; + } + + public ArrayList getGroup(String hostname, String service) { + + + String key = defaultType + "|" + hostname + "|" + service; + Map sublist = list.get(key); + if (sublist != null) { + return new ArrayList(list.get(key).keySet()); + } + + return new ArrayList(); + + } + + public String getTagUrl(String group, String hostname, String service) { + + String key = defaultType + "|" + hostname + "|" + service; + Map sublist = list.get(key); + + EndpointItem item=sublist.get(group); + String url =""; + if(item.tags.get("info.URL")!=null){ + url=item.tags.get("info.URL"); + } + + return url; + + } + + + /** + * Loads endpoint grouping information from an avro file + *

      + * This method loads endpoint grouping information contained in an .avro + * file with specific avro schema. + * + *

      + * The following fields are expected to be found in each avro row: + *

        + *
      1. type: string (describes the type of grouping)
      2. + *
      3. group: string
      4. + *
      5. service: string
      6. + *
      7. hostname: string
      8. + *
      9. tags: hashmap (contains a map of arbitrary key values)
      10. + *
      + * + * @param avroFile + * a File object of the avro file that will be opened + * @throws IOException + * if there is an error during opening of the avro file + */ + @SuppressWarnings("unchecked") + public void loadAvro(File avroFile) throws IOException { + + // Prepare Avro File Readers + DatumReader datumReader = new GenericDatumReader(); + DataFileReader dataFileReader = null; + try { + dataFileReader = new DataFileReader(avroFile, datumReader); + + // Grab Avro schema + Schema avroSchema = dataFileReader.getSchema(); + + // Generate 1st level generic record reader (rows) + GenericRecord avroRow = new GenericData.Record(avroSchema); + + // For all rows in file repeat + while (dataFileReader.hasNext()) { + // read the row + avroRow = dataFileReader.next(avroRow); + HashMap tagMap = new HashMap(); + + // Generate 2nd level generic record reader (tags) + + HashMap tags = (HashMap) (avroRow.get("tags")); + + if (tags != null) { + for (Utf8 item : tags.keySet()) { + tagMap.put(item.toString(), String.valueOf(tags.get(item))); + } + } + + // Grab 1st level mandatory fields + String type = avroRow.get("type").toString(); + String group = avroRow.get("group").toString(); + String service = avroRow.get("service").toString(); + String hostname = avroRow.get("hostname").toString(); + + // Insert data to list + this.insert(type, group, service, hostname, tagMap); + defaultType=type; + + } // end of avro rows + + + + } catch (IOException ex) { + LOG.error("Could not open avro file:" + avroFile.getName()); + throw ex; + } finally { + // Close quietly without exceptions the buffered reader + IOUtils.closeQuietly(dataFileReader); + } + + } + + + + /** + * Loads information from a list of EndpointGroup objects + * + */ + @SuppressWarnings("unchecked") + public void loadFromList( List egp) { + + // For each endpoint group record + for (GroupEndpoint item : egp){ + String type = item.getType(); + String group = item.getGroup(); + String service = item.getService(); + String hostname = item.getHostname(); + HashMap tagMap = new HashMap(); + HashMap tags = (HashMap) item.getTags(); + + if (tags != null) { + for (String key : tags.keySet()) { + tagMap.put(key, tags.get(key)); + } + } + + // Insert data to list + this.insert(type, group, service, hostname, tagMap); + defaultType=type; + } + + + + + } + +} diff --git a/flink_jobs/old-models/stream_status/src/main/java/sync/GroupGroupManager.java b/flink_jobs/old-models/stream_status/src/main/java/sync/GroupGroupManager.java new file mode 100644 index 00000000..f27f3d67 --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/java/sync/GroupGroupManager.java @@ -0,0 +1,232 @@ +package sync; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map.Entry; +import java.util.TreeMap; + +import org.apache.avro.Schema; +import org.apache.avro.Schema.Field; +import org.apache.avro.file.DataFileReader; +import org.apache.avro.generic.GenericData; +import org.apache.avro.generic.GenericDatumReader; +import org.apache.avro.generic.GenericRecord; +import org.apache.avro.io.DatumReader; +import org.apache.commons.io.IOUtils; +import org.apache.log4j.Logger; + +import argo.avro.GroupEndpoint; +import argo.avro.GroupGroup; + +public class GroupGroupManager { + + static Logger log = Logger.getLogger(GroupGroupManager.class.getName()); + + private ArrayList list; + private ArrayList fList; + + private class GroupItem { + String type; // type of group + String group; // name of the group + String subgroup; // name of sub-group + HashMap tags; // Tag list + + public GroupItem() { + // Initializations + this.type = ""; + this.group = ""; + this.subgroup = ""; + this.tags = new HashMap(); + } + + public GroupItem(String type, String group, String subgroup, HashMap tags) { + this.type = type; + this.group = group; + this.subgroup = subgroup; + this.tags = tags; + + } + + } + + public GroupGroupManager() { + this.list = new ArrayList(); + this.fList = new ArrayList(); + } + + public int insert(String type, String group, String subgroup, HashMap tags) { + GroupItem new_item = new GroupItem(type, group, subgroup, tags); + this.list.add(new_item); + return 0; // All good + } + + public HashMap getGroupTags(String type, String subgroup) { + for (GroupItem item : this.fList) { + if (item.type.equals(type) && item.subgroup.equals(subgroup)) { + return item.tags; + } + } + + return null; + } + + public int count() { + return this.fList.size(); + } + + public String getGroup(String type, String subgroup) { + for (GroupItem item : this.fList) { + if (item.type.equals(type) && item.subgroup.equals(subgroup)) { + return item.group; + } + } + + return null; + } + + public void unfilter() { + this.fList.clear(); + for (GroupItem item : this.list) { + this.fList.add(item); + } + } + + public void filter(TreeMap fTags) { + this.fList.clear(); + boolean trim; + for (GroupItem item : this.list) { + trim = false; + HashMap itemTags = item.tags; + for (Entry fTagItem : fTags.entrySet()) { + + if (itemTags.containsKey(fTagItem.getKey())) { + if (itemTags.get(fTagItem.getKey()).equalsIgnoreCase(fTagItem.getValue()) == false) { + trim = true; + } + + } + } + + if (trim == false) { + fList.add(item); + } + } + } + + public boolean checkSubGroup(String subgroup) { + for (GroupItem item : fList) { + if (item.subgroup.equals(subgroup)) { + return true; + } + } + + return false; + } + + /** + * Loads groups of groups information from an avro file + *

      + * This method loads groups of groups information contained in an .avro file + * with specific avro schema. + * + *

      + * The following fields are expected to be found in each avro row: + *

        + *
      1. type: string (describes the type of grouping)
      2. + *
      3. group: string
      4. + *
      5. subgroup: string
      6. + *
      7. tags: hashmap (contains a map of arbitrary key values)
      8. + *
      + * + * @param avroFile + * a File object of the avro file that will be opened + * @throws IOException + * if there is an error during opening of the avro file + */ + public void loadAvro(File avroFile) throws IOException { + + // Prepare Avro File Readers + DatumReader datumReader = new GenericDatumReader(); + DataFileReader dataFileReader = null; + try { + dataFileReader = new DataFileReader(avroFile, datumReader); + + // Grab avro schema + Schema avroSchema = dataFileReader.getSchema(); + + // Generate 1st level generic record reader (rows) + GenericRecord avroRow = new GenericData.Record(avroSchema); + + // For all rows in file repeat + while (dataFileReader.hasNext()) { + // read the row + avroRow = dataFileReader.next(avroRow); + HashMap tagMap = new HashMap(); + + // Generate 2nd level generic record reader (tags) + HashMap tags = (HashMap) avroRow.get("tags"); + if (tags != null) { + for (Object item : tags.keySet()) { + tagMap.put(item.toString(), tags.get(item).toString()); + } + } + + // Grab 1st level mandatory fields + String type = avroRow.get("type").toString(); + String group = avroRow.get("group").toString(); + String subgroup = avroRow.get("subgroup").toString(); + + // Insert data to list + this.insert(type, group, subgroup, tagMap); + + } // end of avro rows + + this.unfilter(); + + dataFileReader.close(); + + } catch (IOException ex) { + log.error("Could not open avro file:" + avroFile.getName()); + throw ex; + } finally { + // Close quietly without exceptions the buffered reader + IOUtils.closeQuietly(dataFileReader); + } + + } + + + /** + * Loads group of group information from a list of GroupGroup objects + * + */ + @SuppressWarnings("unchecked") + public void loadFromList( List ggp) { + + // For each group of groups record + for (GroupGroup item : ggp){ + String type = item.getType(); + String group = item.getGroup(); + String subgroup = item.getSubgroup(); + + HashMap tagMap = new HashMap(); + HashMap tags = (HashMap) item.getTags(); + + if (tags != null) { + for (String key : tags.keySet()) { + tagMap.put(key, tags.get(key)); + } + } + + // Insert data to list + this.insert(type, group, subgroup, tagMap); + } + + this.unfilter(); + + } + +} diff --git a/flink_jobs/old-models/stream_status/src/main/java/sync/MetricProfileManager.java b/flink_jobs/old-models/stream_status/src/main/java/sync/MetricProfileManager.java new file mode 100644 index 00000000..12ea8143 --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/java/sync/MetricProfileManager.java @@ -0,0 +1,265 @@ +package sync; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.avro.Schema; +import org.apache.avro.Schema.Field; +import org.apache.avro.file.DataFileReader; +import org.apache.avro.generic.GenericData; +import org.apache.avro.generic.GenericDatumReader; +import org.apache.avro.generic.GenericRecord; +import org.apache.avro.io.DatumReader; +import org.apache.avro.util.Utf8; +import org.apache.commons.io.IOUtils; +import org.apache.log4j.Logger; + +import argo.avro.MetricProfile; + +public class MetricProfileManager { + + private static final Logger LOG = Logger.getLogger(MetricProfileManager.class.getName()); + + private ArrayList list; + private Map>> index; + + private class ProfileItem { + String profile; // Name of the profile + String service; // Name of the service type + String metric; // Name of the metric + HashMap tags; // Tag list + + public ProfileItem() { + // Initializations + this.profile = ""; + this.service = ""; + this.metric = ""; + this.tags = new HashMap(); + } + + public ProfileItem(String profile, String service, String metric, HashMap tags) { + this.profile = profile; + this.service = service; + this.metric = metric; + this.tags = tags; + } + } + + public MetricProfileManager() { + this.list = new ArrayList(); + this.index = new HashMap>>(); + } + + // Clear all profile data (both list and indexes) + public void clear() { + this.list = new ArrayList(); + this.index = new HashMap>>(); + } + + // Indexed List Functions + public int indexInsertProfile(String profile) { + if (!index.containsKey(profile)) { + index.put(profile, new HashMap>()); + return 0; + } + return -1; + } + + public void insert(String profile, String service, String metric, HashMap tags) { + ProfileItem tmpProfile = new ProfileItem(profile, service, metric, tags); + this.list.add(tmpProfile); + this.indexInsertMetric(profile, service, metric); + } + + public int indexInsertService(String profile, String service) { + if (index.containsKey(profile)) { + if (index.get(profile).containsKey(service)) { + return -1; + } else { + index.get(profile).put(service, new ArrayList()); + return 0; + } + + } + + index.put(profile, new HashMap>()); + index.get(profile).put(service, new ArrayList()); + return 0; + + } + + public int indexInsertMetric(String profile, String service, String metric) { + if (index.containsKey(profile)) { + if (index.get(profile).containsKey(service)) { + if (index.get(profile).get(service).contains(metric)) { + // Metric exists so no insertion + return -1; + } + // Metric doesn't exist and must be added + index.get(profile).get(service).add(metric); + return 0; + } else { + // Create the service and the metric + index.get(profile).put(service, new ArrayList()); + index.get(profile).get(service).add(metric); + return 0; + } + + } + // No profile - service - metric so add them all + index.put(profile, new HashMap>()); + index.get(profile).put(service, new ArrayList()); + index.get(profile).get(service).add(metric); + return 0; + + } + + // Getter Functions + + public ArrayList getProfileServices(String profile) { + if (index.containsKey(profile)) { + ArrayList ans = new ArrayList(); + ans.addAll(index.get(profile).keySet()); + return ans; + } + return null; + + } + + public ArrayList getProfiles() { + if (index.size() > 0) { + ArrayList ans = new ArrayList(); + ans.addAll(index.keySet()); + return ans; + } + return null; + } + + public ArrayList getProfileServiceMetrics(String profile, String service) { + if (index.containsKey(profile)) { + if (index.get(profile).containsKey(service)) { + return index.get(profile).get(service); + } + } + return null; + } + + public boolean checkProfileServiceMetric(String profile, String service, String metric) { + if (index.containsKey(profile)) { + if (index.get(profile).containsKey(service)) { + if (index.get(profile).get(service).contains(metric)) + return true; + } + } + + return false; + } + + /** + * Loads metric profile information from an avro file + *

      + * This method loads metric profile information contained in an .avro file + * with specific avro schema. + * + *

      + * The following fields are expected to be found in each avro row: + *

        + *
      1. profile: string
      2. + *
      3. service: string
      4. + *
      5. metric: string
      6. + *
      7. [optional] tags: hashmap (contains a map of arbitrary key values) + *
      8. + *
      + * + * @param avroFile + * a File object of the avro file that will be opened + * @throws IOException + * if there is an error during opening of the avro file + */ + @SuppressWarnings("unchecked") + public void loadAvro(File avroFile) throws IOException { + + // Prepare Avro File Readers + DatumReader datumReader = new GenericDatumReader(); + DataFileReader dataFileReader = null; + try { + dataFileReader = new DataFileReader(avroFile, datumReader); + + // Grab avro schema + Schema avroSchema = dataFileReader.getSchema(); + + // Generate 1st level generic record reader (rows) + GenericRecord avroRow = new GenericData.Record(avroSchema); + + // For all rows in file repeat + while (dataFileReader.hasNext()) { + // read the row + avroRow = dataFileReader.next(avroRow); + HashMap tagMap = new HashMap(); + + // Generate 2nd level generic record reader (tags) + + HashMap tags = (HashMap) (avroRow.get("tags")); + + if (tags != null) { + for (Utf8 item : tags.keySet()) { + tagMap.put(item.toString(), String.valueOf(tags.get(item))); + } + } + + // Grab 1st level mandatory fields + String profile = avroRow.get("profile").toString(); + String service = avroRow.get("service").toString(); + String metric = avroRow.get("metric").toString(); + + // Insert data to list + this.insert(profile, service, metric, tagMap); + + } // end of avro rows + + dataFileReader.close(); + + } catch (IOException ex) { + LOG.error("Could not open avro file:" + avroFile.getName()); + throw ex; + } finally { + // Close quietly without exceptions the buffered reader + IOUtils.closeQuietly(dataFileReader); + } + + } + + /** + * Loads metric profile information from a list of MetricProfile objects + * + */ + @SuppressWarnings("unchecked") + public void loadFromList( List mps) { + + // For each metric profile object in list + for (MetricProfile item : mps){ + String profile = item.getProfile(); + String service = item.getService(); + String metric = item.getMetric(); + HashMap tagMap = new HashMap(); + HashMap tags = (HashMap) item.getTags(); + + if (tags != null) { + for (String key : tags.keySet()) { + tagMap.put(key, tags.get(key)); + } + } + + // Insert data to list + this.insert(profile, service, metric, tagMap); + } + + + } + + +} diff --git a/flink_jobs/old-models/stream_status/src/main/resources/avro/downtimes_03.avro b/flink_jobs/old-models/stream_status/src/main/resources/avro/downtimes_03.avro new file mode 100644 index 00000000..ff0cd90a Binary files /dev/null and b/flink_jobs/old-models/stream_status/src/main/resources/avro/downtimes_03.avro differ diff --git a/flink_jobs/old-models/stream_status/src/main/resources/avro/downtimes_v2.avro b/flink_jobs/old-models/stream_status/src/main/resources/avro/downtimes_v2.avro new file mode 100644 index 00000000..b31d809c Binary files /dev/null and b/flink_jobs/old-models/stream_status/src/main/resources/avro/downtimes_v2.avro differ diff --git a/flink_jobs/old-models/stream_status/src/main/resources/avro/group_endpoints_v2.avro b/flink_jobs/old-models/stream_status/src/main/resources/avro/group_endpoints_v2.avro new file mode 100644 index 00000000..68b4dbf9 Binary files /dev/null and b/flink_jobs/old-models/stream_status/src/main/resources/avro/group_endpoints_v2.avro differ diff --git a/flink_jobs/old-models/stream_status/src/main/resources/avro/group_groups_v2.avro b/flink_jobs/old-models/stream_status/src/main/resources/avro/group_groups_v2.avro new file mode 100644 index 00000000..d4e82bfe Binary files /dev/null and b/flink_jobs/old-models/stream_status/src/main/resources/avro/group_groups_v2.avro differ diff --git a/flink_jobs/old-models/stream_status/src/main/resources/avro/poem_sync_2017_03_02.avro b/flink_jobs/old-models/stream_status/src/main/resources/avro/poem_sync_2017_03_02.avro new file mode 100644 index 00000000..d922a8f0 Binary files /dev/null and b/flink_jobs/old-models/stream_status/src/main/resources/avro/poem_sync_2017_03_02.avro differ diff --git a/flink_jobs/old-models/stream_status/src/main/resources/avro/poem_sync_v2.avro b/flink_jobs/old-models/stream_status/src/main/resources/avro/poem_sync_v2.avro new file mode 100644 index 00000000..fac926ca Binary files /dev/null and b/flink_jobs/old-models/stream_status/src/main/resources/avro/poem_sync_v2.avro differ diff --git a/flink_jobs/old-models/stream_status/src/main/resources/avro/weights_v2.avro b/flink_jobs/old-models/stream_status/src/main/resources/avro/weights_v2.avro new file mode 100644 index 00000000..7b45958b Binary files /dev/null and b/flink_jobs/old-models/stream_status/src/main/resources/avro/weights_v2.avro differ diff --git a/flink_jobs/old-models/stream_status/src/main/resources/base64/downtimes_01.base64 b/flink_jobs/old-models/stream_status/src/main/resources/base64/downtimes_01.base64 new file mode 100644 index 00000000..f4d87374 --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/resources/base64/downtimes_01.base64 @@ -0,0 +1 @@ +Nm9keXNzZXVzLnRyb2phbi5rZG0ud2Nzcy5wbCB1bmljb3JlNi5HYXRld2F5KDIwMTgtMDUtMDlUMDA6MDA6MDBaKDIwMTgtMDUtMDlUMjM6NTk6MDBaNm9keXNzZXVzLnRyb2phbi5rZG0ud2Nzcy5wbDh1bmljb3JlNi5UYXJnZXRTeXN0ZW1GYWN0b3J5KDIwMTgtMDUtMDlUMDA6MDA6MDBaKDIwMTgtMDUtMDlUMjM6NTk6MDBaNm9keXNzZXVzLnRyb2phbi5rZG0ud2Nzcy5wbDR1bmljb3JlNi5TdG9yYWdlTWFuYWdlbWVudCgyMDE4LTA1LTA5VDAwOjAwOjAwWigyMDE4LTA1LTA5VDIzOjU5OjAwWjZvZHlzc2V1cy50cm9qYW4ua2RtLndjc3MucGwidW5pY29yZTYuUmVnaXN0cnkoMjAxOC0wNS0wOVQwMDowMDowMFooMjAxOC0wNS0wOVQyMzo1OTowMFo2b2R5c3NldXMudHJvamFuLmtkbS53Y3NzLnBsInVuaWNvcmU2LlJlZ2lzdHJ5KDIwMTgtMDUtMDlUMDA6MDA6MDBaKDIwMTgtMDUtMDlUMjM6NTk6MDBaFHdtcy5panMuc2kGV01TKDIwMTgtMDUtMDlUMDA6MDA6MDBaKDIwMTgtMDUtMDlUMjM6NTk6MDBaGndtczIuYXJuZXMuc2kGV01TKDIwMTgtMDUtMDlUMDA6MDA6MDBaKDIwMTgtMDUtMDlUMjM6NTk6MDBaJmNsdXN0ZXI1MC5rbnUuYWMua3IIQVBFTCgyMDE4LTA1LTA5VDAwOjAwOjAwWigyMDE4LTA1LTA5VDIzOjU5OjAwWihjbHVzdGVyMTMxLmtudS5hYy5rchBUb3AtQkRJSSgyMDE4LTA1LTA5VDAwOjAwOjAwWigyMDE4LTA1LTA5VDIzOjU5OjAwWiZjbHVzdGVyNTEua251LmFjLmtyElNpdGUtQkRJSSgyMDE4LTA1LTA5VDAwOjAwOjAwWigyMDE4LTA1LTA5VDIzOjU5OjAwWiRjbHVzdGVyNi5rbnUuYWMua3IOTXlQcm94eSgyMDE4LTA1LTA5VDAwOjAwOjAwWigyMDE4LTA1LTA5VDIzOjU5OjAwWiZjbHVzdGVyNTAua251LmFjLmtyEENSRUFNLUNFKDIwMTgtMDUtMDlUMDA6MDA6MDBaKDIwMTgtMDUtMDlUMjM6NTk6MDBaKGNsdXN0ZXIxNDIua251LmFjLmtyBlNSTSgyMDE4LTA1LTA5VDAwOjAwOjAwWigyMDE4LTA1LTA5VDIzOjU5OjAwWiRjbHVzdGVyNC5rbnUuYWMua3IUZ0xpdGUtQVBFTCgyMDE4LTA1LTA5VDAwOjAwOjAwWigyMDE4LTA1LTA5VDIzOjU5OjAwWiZjbHVzdGVyNTAua251LmFjLmtyDGdMRXhlYygyMDE4LTA1LTA5VDAwOjAwOjAwWigyMDE4LTA1LTA5VDIzOjU5OjAwWihjbHVzdGVyMTQ0LmtudS5hYy5rcipvcmcuc3F1aWQtY2FjaGUuU3F1aWQoMjAxOC0wNS0wOVQwMDowMDowMFooMjAxOC0wNS0wOVQyMzo1OTowMFooY2x1c3RlcjM1MC5rbnUuYWMua3Iqb3JnLnNxdWlkLWNhY2hlLlNxdWlkKDIwMTgtMDUtMDlUMDA6MDA6MDBaKDIwMTgtMDUtMDlUMjM6NTk6MDBaKGNsdXN0ZXIxNDIua251LmFjLmtyMmV1LmVnaS5zdG9yYWdlLmFjY291bnRpbmcoMjAxOC0wNS0wOVQwMDowMDowMFooMjAxOC0wNS0wOVQyMzo1OTowMFoeZ3JpZDA1LnVuaWdlLmNoBlNSTSgyMDE4LTA1LTA5VDAwOjAwOjAwWigyMDE4LTA1LTA5VDIzOjU5OjAwWiJzaXRlYmRpaS51bmlnZS5jaBJTaXRlLUJESUkoMjAxOC0wNS0wOVQwMDowMDowMFooMjAxOC0wNS0wOVQyMzo1OTowMFogZ3JpZHZtMy51bmlnZS5jaAxBUkMtQ0UoMjAxOC0wNS0wOVQwMDowMDowMFooMjAxOC0wNS0wOVQyMzo1OTowMFosbGNnY2UwMS5waHkuYnJpcy5hYy51awxBUkMtQ0UoMjAxOC0wNS0wOVQwMDowMDowMFooMjAxOC0wNS0wOVQyMzo1OTowMFoubGNnbW9uMDIucGh5LmJyaXMuYWMudWsUZ0xpdGUtQVBFTCgyMDE4LTA1LTA5VDAwOjAwOjAwWigyMDE4LTA1LTA5VDIzOjU5OjAwWipsY2dhcmcucGh5LmJyaXMuYWMudWsSZW1pLkFSR1VTKDIwMTgtMDUtMDlUMDA6MDA6MDBaKDIwMTgtMDUtMDlUMjM6NTk6MDBaNGxjZ25ldG1vbjAyLnBoeS5icmlzLmFjLnVrLm5ldC5wZXJmU09OQVIuQmFuZHdpZHRoKDIwMTgtMDUtMDlUMDA6MDA6MDBaKDIwMTgtMDUtMDlUMjM6NTk6MDBaMGxjZ25ldG1vbi5waHkuYnJpcy5hYy51aypuZXQucGVyZlNPTkFSLkxhdGVuY3koMjAxOC0wNS0wOVQwMDowMDowMFooMjAxOC0wNS0wOVQyMzo1OTowMFosbGNnY2UwMS5waHkuYnJpcy5hYy51axRnTGl0ZS1BUEVMKDIwMTgtMDUtMDlUMDA6MDA6MDBaKDIwMTgtMDUtMDlUMjM6NTk6MDBaLGxjZ2NlMDEucGh5LmJyaXMuYWMudWsMZ0xFeGVjKDIwMTgtMDUtMDlUMDA6MDA6MDBaKDIwMTgtMDUtMDlUMjM6NTk6MDBaLGxjZ2NlMDEucGh5LmJyaXMuYWMudWskb3JnLm5vcmR1Z3JpZC5hcmV4KDIwMTgtMDUtMDlUMDA6MDA6MDBaKDIwMTgtMDUtMDlUMjM6NTk6MDBaMGxjZ2JkaWkwMi5waHkuYnJpcy5hYy51axJTaXRlLUJESUkoMjAxOC0wNS0wOVQwMDowMDowMFooMjAxOC0wNS0wOVQyMzo1OTowMFosbGNnY2UwMi5waHkuYnJpcy5hYy51awxnTEV4ZWMoMjAxOC0wNS0wOVQwMDowMDowMFooMjAxOC0wNS0wOVQyMzo1OTowMFosbGNnY2UwMi5waHkuYnJpcy5hYy51axRnTGl0ZS1BUEVMKDIwMTgtMDUtMDlUMDA6MDA6MDBaKDIwMTgtMDUtMDlUMjM6NTk6MDBaKmxjZ3NxMS5waHkuYnJpcy5hYy51aypvcmcuc3F1aWQtY2FjaGUuU3F1aWQoMjAxOC0wNS0wOVQwMDowMDowMFooMjAxOC0wNS0wOVQyMzo1OTowMFoqbGNnc3EyLnBoeS5icmlzLmFjLnVrKm9yZy5zcXVpZC1jYWNoZS5TcXVpZCgyMDE4LTA1LTA5VDAwOjAwOjAwWigyMDE4LTA1LTA5VDIzOjU5OjAwWixsY2dzZTAxLnBoeS5icmlzLmFjLnVrDFhSb290RCgyMDE4LTA1LTA5VDAwOjAwOjAwWigyMDE4LTA1LTA5VDIzOjU5OjAwWixsY2dzZTAxLnBoeS5icmlzLmFjLnVrHGdsb2J1cy1HUklERlRQKDIwMTgtMDUtMDlUMDA6MDA6MDBaKDIwMTgtMDUtMDlUMjM6NTk6MDBaLGxjZ2NlMDIucGh5LmJyaXMuYWMudWs8b3JnLm9wZW5zY2llbmNlZ3JpZC5odGNvbmRvcmNlKDIwMTgtMDUtMDlUMDA6MDA6MDBaKDIwMTgtMDUtMDlUMjM6NTk6MDBaLGxjZ2NlMDEucGh5LmJyaXMuYWMudWsIQVBFTCgyMDE4LTA1LTA5VDAwOjAwOjAwWigyMDE4LTA1LTA5VDIzOjU5OjAwWixsY2dzZTAxLnBoeS5icmlzLmFjLnVrMmV1LmVnaS5zdG9yYWdlLmFjY291bnRpbmcoMjAxOC0wNS0wOVQwMDowMDowMFooMjAxOC0wNS0wOVQyMzo1OTowMFosbGNnc2UwMS5waHkuYnJpcy5hYy51awx3ZWJkYXYoMjAxOC0wNS0wOVQwMDowMDowMFooMjAxOC0wNS0wOVQyMzo1OTowMFo= \ No newline at end of file diff --git a/flink_jobs/old-models/stream_status/src/main/resources/base64/downtimes_02.base64 b/flink_jobs/old-models/stream_status/src/main/resources/base64/downtimes_02.base64 new file mode 100644 index 00000000..bfdba2b0 --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/resources/base64/downtimes_02.base64 @@ -0,0 +1 @@ +Nm9keXNzZXVzLnRyb2phbi5rZG0ud2Nzcy5wbCB1bmljb3JlNi5HYXRld2F5KDIwMTgtMDUtMTBUMDA6MDA6MDBaKDIwMTgtMDUtMTBUMjM6NTk6MDBaNm9keXNzZXVzLnRyb2phbi5rZG0ud2Nzcy5wbDh1bmljb3JlNi5UYXJnZXRTeXN0ZW1GYWN0b3J5KDIwMTgtMDUtMTBUMDA6MDA6MDBaKDIwMTgtMDUtMTBUMjM6NTk6MDBaNm9keXNzZXVzLnRyb2phbi5rZG0ud2Nzcy5wbDR1bmljb3JlNi5TdG9yYWdlTWFuYWdlbWVudCgyMDE4LTA1LTEwVDAwOjAwOjAwWigyMDE4LTA1LTEwVDIzOjU5OjAwWjZvZHlzc2V1cy50cm9qYW4ua2RtLndjc3MucGwidW5pY29yZTYuUmVnaXN0cnkoMjAxOC0wNS0xMFQwMDowMDowMFooMjAxOC0wNS0xMFQyMzo1OTowMFo2b2R5c3NldXMudHJvamFuLmtkbS53Y3NzLnBsInVuaWNvcmU2LlJlZ2lzdHJ5KDIwMTgtMDUtMTBUMDA6MDA6MDBaKDIwMTgtMDUtMTBUMjM6NTk6MDBaFHdtcy5panMuc2kGV01TKDIwMTgtMDUtMTBUMDA6MDA6MDBaKDIwMTgtMDUtMTBUMjM6NTk6MDBaGndtczIuYXJuZXMuc2kGV01TKDIwMTgtMDUtMTBUMDA6MDA6MDBaKDIwMTgtMDUtMTBUMjM6NTk6MDBaJmNsdXN0ZXI1MC5rbnUuYWMua3IIQVBFTCgyMDE4LTA1LTEwVDAwOjAwOjAwWigyMDE4LTA1LTEwVDIzOjU5OjAwWihjbHVzdGVyMTMxLmtudS5hYy5rchBUb3AtQkRJSSgyMDE4LTA1LTEwVDAwOjAwOjAwWigyMDE4LTA1LTEwVDIzOjU5OjAwWiZjbHVzdGVyNTEua251LmFjLmtyElNpdGUtQkRJSSgyMDE4LTA1LTEwVDAwOjAwOjAwWigyMDE4LTA1LTEwVDIzOjU5OjAwWiRjbHVzdGVyNi5rbnUuYWMua3IOTXlQcm94eSgyMDE4LTA1LTEwVDAwOjAwOjAwWigyMDE4LTA1LTEwVDIzOjU5OjAwWiZjbHVzdGVyNTAua251LmFjLmtyEENSRUFNLUNFKDIwMTgtMDUtMTBUMDA6MDA6MDBaKDIwMTgtMDUtMTBUMjM6NTk6MDBaKGNsdXN0ZXIxNDIua251LmFjLmtyBlNSTSgyMDE4LTA1LTEwVDAwOjAwOjAwWigyMDE4LTA1LTEwVDIzOjU5OjAwWiRjbHVzdGVyNC5rbnUuYWMua3IUZ0xpdGUtQVBFTCgyMDE4LTA1LTEwVDAwOjAwOjAwWigyMDE4LTA1LTEwVDIzOjU5OjAwWiZjbHVzdGVyNTAua251LmFjLmtyDGdMRXhlYygyMDE4LTA1LTEwVDAwOjAwOjAwWigyMDE4LTA1LTEwVDIzOjU5OjAwWihjbHVzdGVyMTQ0LmtudS5hYy5rcipvcmcuc3F1aWQtY2FjaGUuU3F1aWQoMjAxOC0wNS0xMFQwMDowMDowMFooMjAxOC0wNS0xMFQyMzo1OTowMFooY2x1c3RlcjM1MC5rbnUuYWMua3Iqb3JnLnNxdWlkLWNhY2hlLlNxdWlkKDIwMTgtMDUtMTBUMDA6MDA6MDBaKDIwMTgtMDUtMTBUMjM6NTk6MDBaKGNsdXN0ZXIxNDIua251LmFjLmtyMmV1LmVnaS5zdG9yYWdlLmFjY291bnRpbmcoMjAxOC0wNS0xMFQwMDowMDowMFooMjAxOC0wNS0xMFQyMzo1OTowMFoeZ3JpZDA1LnVuaWdlLmNoBlNSTSgyMDE4LTA1LTEwVDAwOjAwOjAwWigyMDE4LTA1LTEwVDIzOjU5OjAwWiJzaXRlYmRpaS51bmlnZS5jaBJTaXRlLUJESUkoMjAxOC0wNS0xMFQwMDowMDowMFooMjAxOC0wNS0xMFQyMzo1OTowMFogZ3JpZHZtMy51bmlnZS5jaAxBUkMtQ0UoMjAxOC0wNS0xMFQwMDowMDowMFooMjAxOC0wNS0xMFQyMzo1OTowMFosbGNnY2UwMS5waHkuYnJpcy5hYy51awxBUkMtQ0UoMjAxOC0wNS0xMFQwMDowMDowMFooMjAxOC0wNS0xMFQyMzo1OTowMFoubGNnbW9uMDIucGh5LmJyaXMuYWMudWsUZ0xpdGUtQVBFTCgyMDE4LTA1LTEwVDAwOjAwOjAwWigyMDE4LTA1LTEwVDIzOjU5OjAwWipsY2dhcmcucGh5LmJyaXMuYWMudWsSZW1pLkFSR1VTKDIwMTgtMDUtMTBUMDA6MDA6MDBaKDIwMTgtMDUtMTBUMjM6NTk6MDBaNGxjZ25ldG1vbjAyLnBoeS5icmlzLmFjLnVrLm5ldC5wZXJmU09OQVIuQmFuZHdpZHRoKDIwMTgtMDUtMTBUMDA6MDA6MDBaKDIwMTgtMDUtMTBUMjM6NTk6MDBaMGxjZ25ldG1vbi5waHkuYnJpcy5hYy51aypuZXQucGVyZlNPTkFSLkxhdGVuY3koMjAxOC0wNS0xMFQwMDowMDowMFooMjAxOC0wNS0xMFQyMzo1OTowMFosbGNnY2UwMS5waHkuYnJpcy5hYy51axRnTGl0ZS1BUEVMKDIwMTgtMDUtMTBUMDA6MDA6MDBaKDIwMTgtMDUtMTBUMjM6NTk6MDBaLGxjZ2NlMDEucGh5LmJyaXMuYWMudWsMZ0xFeGVjKDIwMTgtMDUtMTBUMDA6MDA6MDBaKDIwMTgtMDUtMTBUMjM6NTk6MDBaLGxjZ2NlMDEucGh5LmJyaXMuYWMudWskb3JnLm5vcmR1Z3JpZC5hcmV4KDIwMTgtMDUtMTBUMDA6MDA6MDBaKDIwMTgtMDUtMTBUMjM6NTk6MDBaMGxjZ2JkaWkwMi5waHkuYnJpcy5hYy51axJTaXRlLUJESUkoMjAxOC0wNS0xMFQwMDowMDowMFooMjAxOC0wNS0xMFQyMzo1OTowMFosbGNnY2UwMi5waHkuYnJpcy5hYy51awxnTEV4ZWMoMjAxOC0wNS0xMFQwMDowMDowMFooMjAxOC0wNS0xMFQyMzo1OTowMFosbGNnY2UwMi5waHkuYnJpcy5hYy51axRnTGl0ZS1BUEVMKDIwMTgtMDUtMTBUMDA6MDA6MDBaKDIwMTgtMDUtMTBUMjM6NTk6MDBaKmxjZ3NxMS5waHkuYnJpcy5hYy51aypvcmcuc3F1aWQtY2FjaGUuU3F1aWQoMjAxOC0wNS0xMFQwMDowMDowMFooMjAxOC0wNS0xMFQyMzo1OTowMFoqbGNnc3EyLnBoeS5icmlzLmFjLnVrKm9yZy5zcXVpZC1jYWNoZS5TcXVpZCgyMDE4LTA1LTEwVDAwOjAwOjAwWigyMDE4LTA1LTEwVDIzOjU5OjAwWixsY2dzZTAxLnBoeS5icmlzLmFjLnVrDFhSb290RCgyMDE4LTA1LTEwVDAwOjAwOjAwWigyMDE4LTA1LTEwVDIzOjU5OjAwWixsY2dzZTAxLnBoeS5icmlzLmFjLnVrHGdsb2J1cy1HUklERlRQKDIwMTgtMDUtMTBUMDA6MDA6MDBaKDIwMTgtMDUtMTBUMjM6NTk6MDBaLGxjZ2NlMDIucGh5LmJyaXMuYWMudWs8b3JnLm9wZW5zY2llbmNlZ3JpZC5odGNvbmRvcmNlKDIwMTgtMDUtMTBUMDA6MDA6MDBaKDIwMTgtMDUtMTBUMjM6NTk6MDBaLGxjZ2NlMDEucGh5LmJyaXMuYWMudWsIQVBFTCgyMDE4LTA1LTEwVDAwOjAwOjAwWigyMDE4LTA1LTEwVDIzOjU5OjAwWixsY2dzZTAxLnBoeS5icmlzLmFjLnVrMmV1LmVnaS5zdG9yYWdlLmFjY291bnRpbmcoMjAxOC0wNS0xMFQwMDowMDowMFooMjAxOC0wNS0xMFQyMzo1OTowMFosbGNnc2UwMS5waHkuYnJpcy5hYy51awx3ZWJkYXYoMjAxOC0wNS0xMFQwMDowMDowMFooMjAxOC0wNS0xMFQyMzo1OTowMFoic2l0ZS5ocGMudXRmc20uY2wSU2l0ZS1CRElJKDIwMTgtMDUtMTBUMDI6MDA6MDBaKDIwMTgtMDUtMTBUMDg6MDA6MDBaImNlMDIuaHBjLnV0ZnNtLmNsEENSRUFNLUNFKDIwMTgtMDUtMTBUMDI6MDA6MDBaKDIwMTgtMDUtMTBUMDg6MDA6MDBaImNlMDEuaHBjLnV0ZnNtLmNsEENSRUFNLUNFKDIwMTgtMDUtMTBUMDI6MDA6MDBaKDIwMTgtMDUtMTBUMDg6MDA6MDBaHnNlLmhwYy51dGZzbS5jbAZTUk0oMjAxOC0wNS0xMFQwMjowMDowMFooMjAxOC0wNS0xMFQwODowMDowMFoiYXBlbC5ocGMudXRmc20uY2wUZ0xpdGUtQVBFTCgyMDE4LTA1LTEwVDAyOjAwOjAwWigyMDE4LTA1LTEwVDA4OjAwOjAwWiBwc2IuaHBjLnV0ZnNtLmNsLm5ldC5wZXJmU09OQVIuQmFuZHdpZHRoKDIwMTgtMDUtMTBUMDI6MDA6MDBaKDIwMTgtMDUtMTBUMDg6MDA6MDBaIHBzbC5ocGMudXRmc20uY2wqbmV0LnBlcmZTT05BUi5MYXRlbmN5KDIwMTgtMDUtMTBUMDI6MDA6MDBaKDIwMTgtMDUtMTBUMDg6MDA6MDBaJGFyZ3VzLmhwYy51dGZzbS5jbBJlbWkuQVJHVVMoMjAxOC0wNS0xMFQwMjowMDowMFooMjAxOC0wNS0xMFQwODowMDowMFoiY2UwMS5ocGMudXRmc20uY2wMZ0xFeGVjKDIwMTgtMDUtMTBUMDI6MDA6MDBaKDIwMTgtMDUtMTBUMDg6MDA6MDBaImNlMDIuaHBjLnV0ZnNtLmNsDGdMRXhlYygyMDE4LTA1LTEwVDAyOjAwOjAwWigyMDE4LTA1LTEwVDA4OjAwOjAwWiRwcm94eS5ocGMudXRmc20uY2wqb3JnLnNxdWlkLWNhY2hlLlNxdWlkKDIwMTgtMDUtMTBUMDI6MDA6MDBaKDIwMTgtMDUtMTBUMDg6MDA6MDBaImNlMDEuaHBjLnV0ZnNtLmNsCEFQRUwoMjAxOC0wNS0xMFQwMjowMDowMFooMjAxOC0wNS0xMFQwODowMDowMFoiY2UwMi5ocGMudXRmc20uY2wIQVBFTCgyMDE4LTA1LTEwVDAyOjAwOjAwWigyMDE4LTA1LTEwVDA4OjAwOjAwWiJhcGVsLmhwYy51dGZzbS5jbDJldS5lZ2kuc3RvcmFnZS5hY2NvdW50aW5nKDIwMTgtMDUtMTBUMDI6MDA6MDBaKDIwMTgtMDUtMTBUMDg6MDA6MDBaGm1xLmNyby1uZ2kuaHIaZWdpLk1TR0Jyb2tlcigyMDE4LTA1LTEwVDA4OjAwOjAwWigyMDE4LTA1LTEwVDEyOjAwOjAwWg== \ No newline at end of file diff --git a/flink_jobs/old-models/stream_status/src/main/resources/base64/downtimes_03.base64 b/flink_jobs/old-models/stream_status/src/main/resources/base64/downtimes_03.base64 new file mode 100644 index 00000000..43a72c61 --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/resources/base64/downtimes_03.base64 @@ -0,0 +1 @@ +Nm9keXNzZXVzLnRyb2phbi5rZG0ud2Nzcy5wbCB1bmljb3JlNi5HYXRld2F5KDIwMTgtMDUtMTJUMDA6MDA6MDBaKDIwMTgtMDUtMTJUMjM6NTk6MDBaNm9keXNzZXVzLnRyb2phbi5rZG0ud2Nzcy5wbDh1bmljb3JlNi5UYXJnZXRTeXN0ZW1GYWN0b3J5KDIwMTgtMDUtMTJUMDA6MDA6MDBaKDIwMTgtMDUtMTJUMjM6NTk6MDBaNm9keXNzZXVzLnRyb2phbi5rZG0ud2Nzcy5wbDR1bmljb3JlNi5TdG9yYWdlTWFuYWdlbWVudCgyMDE4LTA1LTEyVDAwOjAwOjAwWigyMDE4LTA1LTEyVDIzOjU5OjAwWjZvZHlzc2V1cy50cm9qYW4ua2RtLndjc3MucGwidW5pY29yZTYuUmVnaXN0cnkoMjAxOC0wNS0xMlQwMDowMDowMFooMjAxOC0wNS0xMlQyMzo1OTowMFo2b2R5c3NldXMudHJvamFuLmtkbS53Y3NzLnBsInVuaWNvcmU2LlJlZ2lzdHJ5KDIwMTgtMDUtMTJUMDA6MDA6MDBaKDIwMTgtMDUtMTJUMjM6NTk6MDBaJmNsdXN0ZXI1MC5rbnUuYWMua3IIQVBFTCgyMDE4LTA1LTEyVDAwOjAwOjAwWigyMDE4LTA1LTEyVDIzOjU5OjAwWihjbHVzdGVyMTMxLmtudS5hYy5rchBUb3AtQkRJSSgyMDE4LTA1LTEyVDAwOjAwOjAwWigyMDE4LTA1LTEyVDIzOjU5OjAwWiZjbHVzdGVyNTEua251LmFjLmtyElNpdGUtQkRJSSgyMDE4LTA1LTEyVDAwOjAwOjAwWigyMDE4LTA1LTEyVDIzOjU5OjAwWiRjbHVzdGVyNi5rbnUuYWMua3IOTXlQcm94eSgyMDE4LTA1LTEyVDAwOjAwOjAwWigyMDE4LTA1LTEyVDIzOjU5OjAwWiZjbHVzdGVyNTAua251LmFjLmtyEENSRUFNLUNFKDIwMTgtMDUtMTJUMDA6MDA6MDBaKDIwMTgtMDUtMTJUMjM6NTk6MDBaKGNsdXN0ZXIxNDIua251LmFjLmtyBlNSTSgyMDE4LTA1LTEyVDAwOjAwOjAwWigyMDE4LTA1LTEyVDIzOjU5OjAwWiRjbHVzdGVyNC5rbnUuYWMua3IUZ0xpdGUtQVBFTCgyMDE4LTA1LTEyVDAwOjAwOjAwWigyMDE4LTA1LTEyVDIzOjU5OjAwWiZjbHVzdGVyNTAua251LmFjLmtyDGdMRXhlYygyMDE4LTA1LTEyVDAwOjAwOjAwWigyMDE4LTA1LTEyVDIzOjU5OjAwWihjbHVzdGVyMTQ0LmtudS5hYy5rcipvcmcuc3F1aWQtY2FjaGUuU3F1aWQoMjAxOC0wNS0xMlQwMDowMDowMFooMjAxOC0wNS0xMlQyMzo1OTowMFooY2x1c3RlcjM1MC5rbnUuYWMua3Iqb3JnLnNxdWlkLWNhY2hlLlNxdWlkKDIwMTgtMDUtMTJUMDA6MDA6MDBaKDIwMTgtMDUtMTJUMjM6NTk6MDBaKGNsdXN0ZXIxNDIua251LmFjLmtyMmV1LmVnaS5zdG9yYWdlLmFjY291bnRpbmcoMjAxOC0wNS0xMlQwMDowMDowMFooMjAxOC0wNS0xMlQyMzo1OTowMFoeZ3JpZDA1LnVuaWdlLmNoBlNSTSgyMDE4LTA1LTEyVDAwOjAwOjAwWigyMDE4LTA1LTEyVDIzOjU5OjAwWiJzaXRlYmRpaS51bmlnZS5jaBJTaXRlLUJESUkoMjAxOC0wNS0xMlQwMDowMDowMFooMjAxOC0wNS0xMlQyMzo1OTowMFogZ3JpZHZtMy51bmlnZS5jaAxBUkMtQ0UoMjAxOC0wNS0xMlQwMDowMDowMFooMjAxOC0wNS0xMlQyMzo1OTowMFosbGNnY2UwMS5waHkuYnJpcy5hYy51awxBUkMtQ0UoMjAxOC0wNS0xMlQwMDowMDowMFooMjAxOC0wNS0xMlQyMzo1OTowMFoubGNnbW9uMDIucGh5LmJyaXMuYWMudWsUZ0xpdGUtQVBFTCgyMDE4LTA1LTEyVDAwOjAwOjAwWigyMDE4LTA1LTEyVDIzOjU5OjAwWipsY2dhcmcucGh5LmJyaXMuYWMudWsSZW1pLkFSR1VTKDIwMTgtMDUtMTJUMDA6MDA6MDBaKDIwMTgtMDUtMTJUMjM6NTk6MDBaNGxjZ25ldG1vbjAyLnBoeS5icmlzLmFjLnVrLm5ldC5wZXJmU09OQVIuQmFuZHdpZHRoKDIwMTgtMDUtMTJUMDA6MDA6MDBaKDIwMTgtMDUtMTJUMjM6NTk6MDBaMGxjZ25ldG1vbi5waHkuYnJpcy5hYy51aypuZXQucGVyZlNPTkFSLkxhdGVuY3koMjAxOC0wNS0xMlQwMDowMDowMFooMjAxOC0wNS0xMlQyMzo1OTowMFosbGNnY2UwMS5waHkuYnJpcy5hYy51axRnTGl0ZS1BUEVMKDIwMTgtMDUtMTJUMDA6MDA6MDBaKDIwMTgtMDUtMTJUMjM6NTk6MDBaLGxjZ2NlMDEucGh5LmJyaXMuYWMudWsMZ0xFeGVjKDIwMTgtMDUtMTJUMDA6MDA6MDBaKDIwMTgtMDUtMTJUMjM6NTk6MDBaLGxjZ2NlMDEucGh5LmJyaXMuYWMudWskb3JnLm5vcmR1Z3JpZC5hcmV4KDIwMTgtMDUtMTJUMDA6MDA6MDBaKDIwMTgtMDUtMTJUMjM6NTk6MDBaMGxjZ2JkaWkwMi5waHkuYnJpcy5hYy51axJTaXRlLUJESUkoMjAxOC0wNS0xMlQwMDowMDowMFooMjAxOC0wNS0xMlQyMzo1OTowMFosbGNnY2UwMi5waHkuYnJpcy5hYy51awxnTEV4ZWMoMjAxOC0wNS0xMlQwMDowMDowMFooMjAxOC0wNS0xMlQyMzo1OTowMFosbGNnY2UwMi5waHkuYnJpcy5hYy51axRnTGl0ZS1BUEVMKDIwMTgtMDUtMTJUMDA6MDA6MDBaKDIwMTgtMDUtMTJUMjM6NTk6MDBaKmxjZ3NxMS5waHkuYnJpcy5hYy51aypvcmcuc3F1aWQtY2FjaGUuU3F1aWQoMjAxOC0wNS0xMlQwMDowMDowMFooMjAxOC0wNS0xMlQyMzo1OTowMFoqbGNnc3EyLnBoeS5icmlzLmFjLnVrKm9yZy5zcXVpZC1jYWNoZS5TcXVpZCgyMDE4LTA1LTEyVDAwOjAwOjAwWigyMDE4LTA1LTEyVDIzOjU5OjAwWixsY2dzZTAxLnBoeS5icmlzLmFjLnVrDFhSb290RCgyMDE4LTA1LTEyVDAwOjAwOjAwWigyMDE4LTA1LTEyVDIzOjU5OjAwWixsY2dzZTAxLnBoeS5icmlzLmFjLnVrHGdsb2J1cy1HUklERlRQKDIwMTgtMDUtMTJUMDA6MDA6MDBaKDIwMTgtMDUtMTJUMjM6NTk6MDBaLGxjZ2NlMDIucGh5LmJyaXMuYWMudWs8b3JnLm9wZW5zY2llbmNlZ3JpZC5odGNvbmRvcmNlKDIwMTgtMDUtMTJUMDA6MDA6MDBaKDIwMTgtMDUtMTJUMjM6NTk6MDBaLGxjZ2NlMDEucGh5LmJyaXMuYWMudWsIQVBFTCgyMDE4LTA1LTEyVDAwOjAwOjAwWigyMDE4LTA1LTEyVDIzOjU5OjAwWixsY2dzZTAxLnBoeS5icmlzLmFjLnVrMmV1LmVnaS5zdG9yYWdlLmFjY291bnRpbmcoMjAxOC0wNS0xMlQwMDowMDowMFooMjAxOC0wNS0xMlQyMzo1OTowMFosbGNnc2UwMS5waHkuYnJpcy5hYy51awx3ZWJkYXYoMjAxOC0wNS0xMlQwMDowMDowMFooMjAxOC0wNS0xMlQyMzo1OTowMFoubGludWNzLXVpLTAxLmNzLmluZm4uaXQEVUkoMjAxOC0wNS0xMlQxODowMDowMFooMjAxOC0wNS0xMlQyMzo1OTowMFoubGludWNzLWNlLTAxLmNzLmluZm4uaXQIQVBFTCgyMDE4LTA1LTEyVDE4OjAwOjAwWigyMDE4LTA1LTEyVDIzOjU5OjAwWixsaW51Y3MtYmRpaS5jcy5pbmZuLml0ElNpdGUtQkRJSSgyMDE4LTA1LTEyVDE4OjAwOjAwWigyMDE4LTA1LTEyVDIzOjU5OjAwWi5saW51Y3MtY2UtMDEuY3MuaW5mbi5pdBBDUkVBTS1DRSgyMDE4LTA1LTEyVDE4OjAwOjAwWigyMDE4LTA1LTEyVDIzOjU5OjAwWjJhcGVsLXB1Ymxpc2hlci5jcy5pbmZuLml0FGdMaXRlLUFQRUwoMjAxOC0wNS0xMlQxODowMDowMFooMjAxOC0wNS0xMlQyMzo1OTowMFoscmVjYXMtY2UtMDIuY3MuaW5mbi5pdBBDUkVBTS1DRSgyMDE4LTA1LTEyVDE4OjAwOjAwWigyMDE4LTA1LTEyVDIzOjU5OjAwWixyZWNhcy11aS0wMS5jcy5pbmZuLml0BFVJKDIwMTgtMDUtMTJUMTg6MDA6MDBaKDIwMTgtMDUtMTJUMjM6NTk6MDBaLHJlY2FzLWNlLTAyLmNzLmluZm4uaXQUZXUuZWdpLk1QSSgyMDE4LTA1LTEyVDE4OjAwOjAwWigyMDE4LTA1LTEyVDIzOjU5OjAwWixyZWNhcy1jZS0wMi5jcy5pbmZuLml0CEFQRUwoMjAxOC0wNS0xMlQxODowMDowMFooMjAxOC0wNS0xMlQyMzo1OTowMFoscmVjYXMtc2UtMDEuY3MuaW5mbi5pdAZTUk0oMjAxOC0wNS0xMlQxODowMDowMFooMjAxOC0wNS0xMlQyMzo1OTowMFoscmVjYXMtc2UtMDEuY3MuaW5mbi5pdDJldS5lZ2kuc3RvcmFnZS5hY2NvdW50aW5nKDIwMTgtMDUtMTJUMTg6MDA6MDBaKDIwMTgtMDUtMTJUMjM6NTk6MDBa \ No newline at end of file diff --git a/flink_jobs/old-models/stream_status/src/main/resources/base64/downtimes_04.base64 b/flink_jobs/old-models/stream_status/src/main/resources/base64/downtimes_04.base64 new file mode 100644 index 00000000..19c133a7 --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/resources/base64/downtimes_04.base64 @@ -0,0 +1 @@ +Nm9keXNzZXVzLnRyb2phbi5rZG0ud2Nzcy5wbCB1bmljb3JlNi5HYXRld2F5KDIwMTgtMDUtMTNUMDA6MDA6MDBaKDIwMTgtMDUtMTNUMjM6NTk6MDBaNm9keXNzZXVzLnRyb2phbi5rZG0ud2Nzcy5wbDh1bmljb3JlNi5UYXJnZXRTeXN0ZW1GYWN0b3J5KDIwMTgtMDUtMTNUMDA6MDA6MDBaKDIwMTgtMDUtMTNUMjM6NTk6MDBaNm9keXNzZXVzLnRyb2phbi5rZG0ud2Nzcy5wbDR1bmljb3JlNi5TdG9yYWdlTWFuYWdlbWVudCgyMDE4LTA1LTEzVDAwOjAwOjAwWigyMDE4LTA1LTEzVDIzOjU5OjAwWjZvZHlzc2V1cy50cm9qYW4ua2RtLndjc3MucGwidW5pY29yZTYuUmVnaXN0cnkoMjAxOC0wNS0xM1QwMDowMDowMFooMjAxOC0wNS0xM1QyMzo1OTowMFo2b2R5c3NldXMudHJvamFuLmtkbS53Y3NzLnBsInVuaWNvcmU2LlJlZ2lzdHJ5KDIwMTgtMDUtMTNUMDA6MDA6MDBaKDIwMTgtMDUtMTNUMjM6NTk6MDBaJmNsdXN0ZXI1MC5rbnUuYWMua3IIQVBFTCgyMDE4LTA1LTEzVDAwOjAwOjAwWigyMDE4LTA1LTEzVDIzOjU5OjAwWihjbHVzdGVyMTMxLmtudS5hYy5rchBUb3AtQkRJSSgyMDE4LTA1LTEzVDAwOjAwOjAwWigyMDE4LTA1LTEzVDIzOjU5OjAwWiZjbHVzdGVyNTEua251LmFjLmtyElNpdGUtQkRJSSgyMDE4LTA1LTEzVDAwOjAwOjAwWigyMDE4LTA1LTEzVDIzOjU5OjAwWiRjbHVzdGVyNi5rbnUuYWMua3IOTXlQcm94eSgyMDE4LTA1LTEzVDAwOjAwOjAwWigyMDE4LTA1LTEzVDIzOjU5OjAwWiZjbHVzdGVyNTAua251LmFjLmtyEENSRUFNLUNFKDIwMTgtMDUtMTNUMDA6MDA6MDBaKDIwMTgtMDUtMTNUMjM6NTk6MDBaKGNsdXN0ZXIxNDIua251LmFjLmtyBlNSTSgyMDE4LTA1LTEzVDAwOjAwOjAwWigyMDE4LTA1LTEzVDIzOjU5OjAwWiRjbHVzdGVyNC5rbnUuYWMua3IUZ0xpdGUtQVBFTCgyMDE4LTA1LTEzVDAwOjAwOjAwWigyMDE4LTA1LTEzVDIzOjU5OjAwWiZjbHVzdGVyNTAua251LmFjLmtyDGdMRXhlYygyMDE4LTA1LTEzVDAwOjAwOjAwWigyMDE4LTA1LTEzVDIzOjU5OjAwWihjbHVzdGVyMTQ0LmtudS5hYy5rcipvcmcuc3F1aWQtY2FjaGUuU3F1aWQoMjAxOC0wNS0xM1QwMDowMDowMFooMjAxOC0wNS0xM1QyMzo1OTowMFooY2x1c3RlcjM1MC5rbnUuYWMua3Iqb3JnLnNxdWlkLWNhY2hlLlNxdWlkKDIwMTgtMDUtMTNUMDA6MDA6MDBaKDIwMTgtMDUtMTNUMjM6NTk6MDBaKGNsdXN0ZXIxNDIua251LmFjLmtyMmV1LmVnaS5zdG9yYWdlLmFjY291bnRpbmcoMjAxOC0wNS0xM1QwMDowMDowMFooMjAxOC0wNS0xM1QyMzo1OTowMFoeZ3JpZDA1LnVuaWdlLmNoBlNSTSgyMDE4LTA1LTEzVDAwOjAwOjAwWigyMDE4LTA1LTEzVDIzOjU5OjAwWiJzaXRlYmRpaS51bmlnZS5jaBJTaXRlLUJESUkoMjAxOC0wNS0xM1QwMDowMDowMFooMjAxOC0wNS0xM1QyMzo1OTowMFogZ3JpZHZtMy51bmlnZS5jaAxBUkMtQ0UoMjAxOC0wNS0xM1QwMDowMDowMFooMjAxOC0wNS0xM1QyMzo1OTowMFoubGludWNzLXVpLTAxLmNzLmluZm4uaXQEVUkoMjAxOC0wNS0xM1QwMDowMDowMFooMjAxOC0wNS0xM1QyMzo1OTowMFoubGludWNzLWNlLTAxLmNzLmluZm4uaXQIQVBFTCgyMDE4LTA1LTEzVDAwOjAwOjAwWigyMDE4LTA1LTEzVDIzOjU5OjAwWixsaW51Y3MtYmRpaS5jcy5pbmZuLml0ElNpdGUtQkRJSSgyMDE4LTA1LTEzVDAwOjAwOjAwWigyMDE4LTA1LTEzVDIzOjU5OjAwWi5saW51Y3MtY2UtMDEuY3MuaW5mbi5pdBBDUkVBTS1DRSgyMDE4LTA1LTEzVDAwOjAwOjAwWigyMDE4LTA1LTEzVDIzOjU5OjAwWjJhcGVsLXB1Ymxpc2hlci5jcy5pbmZuLml0FGdMaXRlLUFQRUwoMjAxOC0wNS0xM1QwMDowMDowMFooMjAxOC0wNS0xM1QyMzo1OTowMFoscmVjYXMtY2UtMDIuY3MuaW5mbi5pdBBDUkVBTS1DRSgyMDE4LTA1LTEzVDAwOjAwOjAwWigyMDE4LTA1LTEzVDIzOjU5OjAwWixyZWNhcy11aS0wMS5jcy5pbmZuLml0BFVJKDIwMTgtMDUtMTNUMDA6MDA6MDBaKDIwMTgtMDUtMTNUMjM6NTk6MDBaLHJlY2FzLWNlLTAyLmNzLmluZm4uaXQUZXUuZWdpLk1QSSgyMDE4LTA1LTEzVDAwOjAwOjAwWigyMDE4LTA1LTEzVDIzOjU5OjAwWixyZWNhcy1jZS0wMi5jcy5pbmZuLml0CEFQRUwoMjAxOC0wNS0xM1QwMDowMDowMFooMjAxOC0wNS0xM1QyMzo1OTowMFoscmVjYXMtc2UtMDEuY3MuaW5mbi5pdAZTUk0oMjAxOC0wNS0xM1QwMDowMDowMFooMjAxOC0wNS0xM1QyMzo1OTowMFoscmVjYXMtc2UtMDEuY3MuaW5mbi5pdDJldS5lZ2kuc3RvcmFnZS5hY2NvdW50aW5nKDIwMTgtMDUtMTNUMDA6MDA6MDBaKDIwMTgtMDUtMTNUMjM6NTk6MDBaLmNhcmNlcmkuaGVjLmxhbmNzLmFjLnVrCEFQRUwoMjAxOC0wNS0xM1QxNDowMDowMFooMjAxOC0wNS0xM1QyMzo1OTowMFouY2FyY2VyaS5oZWMubGFuY3MuYWMudWsQQ1JFQU0tQ0UoMjAxOC0wNS0xM1QxNDowMDowMFooMjAxOC0wNS0xM1QyMzo1OTowMFoyZmFsLXB5Z3JpZC0zMC5sYW5jcy5hYy51awZTUk0oMjAxOC0wNS0xM1QxNDowMDowMFooMjAxOC0wNS0xM1QyMzo1OTowMFoyZmFsLXB5Z3JpZC0xOS5sYW5jcy5hYy51axRnTGl0ZS1BUEVMKDIwMTgtMDUtMTNUMTQ6MDA6MDBaKDIwMTgtMDUtMTNUMjM6NTk6MDBaMnB5Z3JpZC1zb25hcjEubGFuY3MuYWMudWsubmV0LnBlcmZTT05BUi5CYW5kd2lkdGgoMjAxOC0wNS0xM1QxNDowMDowMFooMjAxOC0wNS0xM1QyMzo1OTowMFoycHlncmlkLXNvbmFyMi5sYW5jcy5hYy51aypuZXQucGVyZlNPTkFSLkxhdGVuY3koMjAxOC0wNS0xM1QxNDowMDowMFooMjAxOC0wNS0xM1QyMzo1OTowMFo6cHlncmlkLWtyYWtlbi5oZWMubGFuY3MuYWMudWsqb3JnLnNxdWlkLWNhY2hlLlNxdWlkKDIwMTgtMDUtMTNUMTQ6MDA6MDBaKDIwMTgtMDUtMTNUMjM6NTk6MDBaMmZhbC1weWdyaWQtMTUubGFuY3MuYWMudWsIQU1HQSgyMDE4LTA1LTEzVDE0OjAwOjAwWigyMDE4LTA1LTEzVDIzOjU5OjAwWi5ncmVuZGVsLmhlYy5sYW5jcy5hYy51awxBUkMtQ0UoMjAxOC0wNS0xM1QxNDowMDowMFooMjAxOC0wNS0xM1QyMzo1OTowMFouZ3JlbmRlbC5oZWMubGFuY3MuYWMudWsIQVBFTCgyMDE4LTA1LTEzVDE0OjAwOjAwWigyMDE4LTA1LTEzVDIzOjU5OjAwWipncmlkcHAtdm0ubGFuY3MuYWMudWsgdWsuYWMuZ3JpZHBwLnZhYygyMDE4LTA1LTEzVDE0OjAwOjAwWigyMDE4LTA1LTEzVDIzOjU5OjAwWipncmlkcHAtdm0ubGFuY3MuYWMudWsIQVBFTCgyMDE4LTA1LTEzVDE0OjAwOjAwWigyMDE4LTA1LTEzVDIzOjU5OjAwWjJweS1mamFsYXIuaGVjLmxhbmNzLmFjLnVrElNpdGUtQkRJSSgyMDE4LTA1LTEzVDE0OjAwOjAwWigyMDE4LTA1LTEzVDIzOjU5OjAwWkBweWdyaWQtbGV2aWF0aGFuLmhlYy5sYW5jcy5hYy51aypvcmcuc3F1aWQtY2FjaGUuU3F1aWQoMjAxOC0wNS0xM1QxNDowMDowMFooMjAxOC0wNS0xM1QyMzo1OTowMFosc3RvcmFnZS5kYXRhY2VudHJlZC5pby5jb20uY2VwaC5vYmplY3Qtc3RvcmFnZSgyMDE4LTA1LTEzVDE0OjAwOjAwWigyMDE4LTA1LTEzVDIzOjU5OjAwWjJmYWwtcHlncmlkLTMwLmxhbmNzLmFjLnVrMmV1LmVnaS5zdG9yYWdlLmFjY291bnRpbmcoMjAxOC0wNS0xM1QxNDowMDowMFooMjAxOC0wNS0xM1QyMzo1OTowMFoyZmFsLXB5Z3JpZC0zMC5sYW5jcy5hYy51awx3ZWJkYXYoMjAxOC0wNS0xM1QxNDowMDowMFooMjAxOC0wNS0xM1QyMzo1OTowMFo= \ No newline at end of file diff --git a/flink_jobs/old-models/stream_status/src/main/resources/base64/downtimes_05.base64 b/flink_jobs/old-models/stream_status/src/main/resources/base64/downtimes_05.base64 new file mode 100644 index 00000000..8c74000f --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/resources/base64/downtimes_05.base64 @@ -0,0 +1 @@ +Nm9keXNzZXVzLnRyb2phbi5rZG0ud2Nzcy5wbCB1bmljb3JlNi5HYXRld2F5KDIwMTgtMDUtMTRUMDA6MDA6MDBaKDIwMTgtMDUtMTRUMjM6NTk6MDBaNm9keXNzZXVzLnRyb2phbi5rZG0ud2Nzcy5wbDh1bmljb3JlNi5UYXJnZXRTeXN0ZW1GYWN0b3J5KDIwMTgtMDUtMTRUMDA6MDA6MDBaKDIwMTgtMDUtMTRUMjM6NTk6MDBaNm9keXNzZXVzLnRyb2phbi5rZG0ud2Nzcy5wbDR1bmljb3JlNi5TdG9yYWdlTWFuYWdlbWVudCgyMDE4LTA1LTE0VDAwOjAwOjAwWigyMDE4LTA1LTE0VDIzOjU5OjAwWjZvZHlzc2V1cy50cm9qYW4ua2RtLndjc3MucGwidW5pY29yZTYuUmVnaXN0cnkoMjAxOC0wNS0xNFQwMDowMDowMFooMjAxOC0wNS0xNFQyMzo1OTowMFo2b2R5c3NldXMudHJvamFuLmtkbS53Y3NzLnBsInVuaWNvcmU2LlJlZ2lzdHJ5KDIwMTgtMDUtMTRUMDA6MDA6MDBaKDIwMTgtMDUtMTRUMjM6NTk6MDBaJmNsdXN0ZXI1MC5rbnUuYWMua3IIQVBFTCgyMDE4LTA1LTE0VDAwOjAwOjAwWigyMDE4LTA1LTE0VDIzOjU5OjAwWihjbHVzdGVyMTMxLmtudS5hYy5rchBUb3AtQkRJSSgyMDE4LTA1LTE0VDAwOjAwOjAwWigyMDE4LTA1LTE0VDIzOjU5OjAwWiZjbHVzdGVyNTEua251LmFjLmtyElNpdGUtQkRJSSgyMDE4LTA1LTE0VDAwOjAwOjAwWigyMDE4LTA1LTE0VDIzOjU5OjAwWiRjbHVzdGVyNi5rbnUuYWMua3IOTXlQcm94eSgyMDE4LTA1LTE0VDAwOjAwOjAwWigyMDE4LTA1LTE0VDIzOjU5OjAwWiZjbHVzdGVyNTAua251LmFjLmtyEENSRUFNLUNFKDIwMTgtMDUtMTRUMDA6MDA6MDBaKDIwMTgtMDUtMTRUMjM6NTk6MDBaKGNsdXN0ZXIxNDIua251LmFjLmtyBlNSTSgyMDE4LTA1LTE0VDAwOjAwOjAwWigyMDE4LTA1LTE0VDIzOjU5OjAwWiRjbHVzdGVyNC5rbnUuYWMua3IUZ0xpdGUtQVBFTCgyMDE4LTA1LTE0VDAwOjAwOjAwWigyMDE4LTA1LTE0VDIzOjU5OjAwWiZjbHVzdGVyNTAua251LmFjLmtyDGdMRXhlYygyMDE4LTA1LTE0VDAwOjAwOjAwWigyMDE4LTA1LTE0VDIzOjU5OjAwWihjbHVzdGVyMTQ0LmtudS5hYy5rcipvcmcuc3F1aWQtY2FjaGUuU3F1aWQoMjAxOC0wNS0xNFQwMDowMDowMFooMjAxOC0wNS0xNFQyMzo1OTowMFooY2x1c3RlcjM1MC5rbnUuYWMua3Iqb3JnLnNxdWlkLWNhY2hlLlNxdWlkKDIwMTgtMDUtMTRUMDA6MDA6MDBaKDIwMTgtMDUtMTRUMjM6NTk6MDBaKGNsdXN0ZXIxNDIua251LmFjLmtyMmV1LmVnaS5zdG9yYWdlLmFjY291bnRpbmcoMjAxOC0wNS0xNFQwMDowMDowMFooMjAxOC0wNS0xNFQyMzo1OTowMFoeZ3JpZDA1LnVuaWdlLmNoBlNSTSgyMDE4LTA1LTE0VDAwOjAwOjAwWigyMDE4LTA1LTE0VDIzOjU5OjAwWiJzaXRlYmRpaS51bmlnZS5jaBJTaXRlLUJESUkoMjAxOC0wNS0xNFQwMDowMDowMFooMjAxOC0wNS0xNFQyMzo1OTowMFogZ3JpZHZtMy51bmlnZS5jaAxBUkMtQ0UoMjAxOC0wNS0xNFQwMDowMDowMFooMjAxOC0wNS0xNFQyMzo1OTowMFoubGludWNzLXVpLTAxLmNzLmluZm4uaXQEVUkoMjAxOC0wNS0xNFQwMDowMDowMFooMjAxOC0wNS0xNFQyMzo1OTowMFoubGludWNzLWNlLTAxLmNzLmluZm4uaXQIQVBFTCgyMDE4LTA1LTE0VDAwOjAwOjAwWigyMDE4LTA1LTE0VDIzOjU5OjAwWixsaW51Y3MtYmRpaS5jcy5pbmZuLml0ElNpdGUtQkRJSSgyMDE4LTA1LTE0VDAwOjAwOjAwWigyMDE4LTA1LTE0VDIzOjU5OjAwWi5saW51Y3MtY2UtMDEuY3MuaW5mbi5pdBBDUkVBTS1DRSgyMDE4LTA1LTE0VDAwOjAwOjAwWigyMDE4LTA1LTE0VDIzOjU5OjAwWjJhcGVsLXB1Ymxpc2hlci5jcy5pbmZuLml0FGdMaXRlLUFQRUwoMjAxOC0wNS0xNFQwMDowMDowMFooMjAxOC0wNS0xNFQyMzo1OTowMFoscmVjYXMtY2UtMDIuY3MuaW5mbi5pdBBDUkVBTS1DRSgyMDE4LTA1LTE0VDAwOjAwOjAwWigyMDE4LTA1LTE0VDIzOjU5OjAwWixyZWNhcy11aS0wMS5jcy5pbmZuLml0BFVJKDIwMTgtMDUtMTRUMDA6MDA6MDBaKDIwMTgtMDUtMTRUMjM6NTk6MDBaLHJlY2FzLWNlLTAyLmNzLmluZm4uaXQUZXUuZWdpLk1QSSgyMDE4LTA1LTE0VDAwOjAwOjAwWigyMDE4LTA1LTE0VDIzOjU5OjAwWixyZWNhcy1jZS0wMi5jcy5pbmZuLml0CEFQRUwoMjAxOC0wNS0xNFQwMDowMDowMFooMjAxOC0wNS0xNFQyMzo1OTowMFoscmVjYXMtc2UtMDEuY3MuaW5mbi5pdAZTUk0oMjAxOC0wNS0xNFQwMDowMDowMFooMjAxOC0wNS0xNFQyMzo1OTowMFoscmVjYXMtc2UtMDEuY3MuaW5mbi5pdDJldS5lZ2kuc3RvcmFnZS5hY2NvdW50aW5nKDIwMTgtMDUtMTRUMDA6MDA6MDBaKDIwMTgtMDUtMTRUMjM6NTk6MDBa \ No newline at end of file diff --git a/flink_jobs/old-models/stream_status/src/main/resources/base64/downtimes_06.base64 b/flink_jobs/old-models/stream_status/src/main/resources/base64/downtimes_06.base64 new file mode 100644 index 00000000..a023e9be --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/resources/base64/downtimes_06.base64 @@ -0,0 +1 @@ +Nm9keXNzZXVzLnRyb2phbi5rZG0ud2Nzcy5wbCB1bmljb3JlNi5HYXRld2F5KDIwMTgtMDUtMTVUMDA6MDA6MDBaKDIwMTgtMDUtMTVUMjM6NTk6MDBaNm9keXNzZXVzLnRyb2phbi5rZG0ud2Nzcy5wbDh1bmljb3JlNi5UYXJnZXRTeXN0ZW1GYWN0b3J5KDIwMTgtMDUtMTVUMDA6MDA6MDBaKDIwMTgtMDUtMTVUMjM6NTk6MDBaNm9keXNzZXVzLnRyb2phbi5rZG0ud2Nzcy5wbDR1bmljb3JlNi5TdG9yYWdlTWFuYWdlbWVudCgyMDE4LTA1LTE1VDAwOjAwOjAwWigyMDE4LTA1LTE1VDIzOjU5OjAwWjZvZHlzc2V1cy50cm9qYW4ua2RtLndjc3MucGwidW5pY29yZTYuUmVnaXN0cnkoMjAxOC0wNS0xNVQwMDowMDowMFooMjAxOC0wNS0xNVQyMzo1OTowMFo2b2R5c3NldXMudHJvamFuLmtkbS53Y3NzLnBsInVuaWNvcmU2LlJlZ2lzdHJ5KDIwMTgtMDUtMTVUMDA6MDA6MDBaKDIwMTgtMDUtMTVUMjM6NTk6MDBaJmNsdXN0ZXI1MC5rbnUuYWMua3IIQVBFTCgyMDE4LTA1LTE1VDAwOjAwOjAwWigyMDE4LTA1LTE1VDIzOjU5OjAwWihjbHVzdGVyMTMxLmtudS5hYy5rchBUb3AtQkRJSSgyMDE4LTA1LTE1VDAwOjAwOjAwWigyMDE4LTA1LTE1VDIzOjU5OjAwWiZjbHVzdGVyNTEua251LmFjLmtyElNpdGUtQkRJSSgyMDE4LTA1LTE1VDAwOjAwOjAwWigyMDE4LTA1LTE1VDIzOjU5OjAwWiRjbHVzdGVyNi5rbnUuYWMua3IOTXlQcm94eSgyMDE4LTA1LTE1VDAwOjAwOjAwWigyMDE4LTA1LTE1VDIzOjU5OjAwWiZjbHVzdGVyNTAua251LmFjLmtyEENSRUFNLUNFKDIwMTgtMDUtMTVUMDA6MDA6MDBaKDIwMTgtMDUtMTVUMjM6NTk6MDBaKGNsdXN0ZXIxNDIua251LmFjLmtyBlNSTSgyMDE4LTA1LTE1VDAwOjAwOjAwWigyMDE4LTA1LTE1VDIzOjU5OjAwWiRjbHVzdGVyNC5rbnUuYWMua3IUZ0xpdGUtQVBFTCgyMDE4LTA1LTE1VDAwOjAwOjAwWigyMDE4LTA1LTE1VDIzOjU5OjAwWiZjbHVzdGVyNTAua251LmFjLmtyDGdMRXhlYygyMDE4LTA1LTE1VDAwOjAwOjAwWigyMDE4LTA1LTE1VDIzOjU5OjAwWihjbHVzdGVyMTQ0LmtudS5hYy5rcipvcmcuc3F1aWQtY2FjaGUuU3F1aWQoMjAxOC0wNS0xNVQwMDowMDowMFooMjAxOC0wNS0xNVQyMzo1OTowMFooY2x1c3RlcjM1MC5rbnUuYWMua3Iqb3JnLnNxdWlkLWNhY2hlLlNxdWlkKDIwMTgtMDUtMTVUMDA6MDA6MDBaKDIwMTgtMDUtMTVUMjM6NTk6MDBaKGNsdXN0ZXIxNDIua251LmFjLmtyMmV1LmVnaS5zdG9yYWdlLmFjY291bnRpbmcoMjAxOC0wNS0xNVQwMDowMDowMFooMjAxOC0wNS0xNVQyMzo1OTowMFoeZ3JpZDA1LnVuaWdlLmNoBlNSTSgyMDE4LTA1LTE1VDAwOjAwOjAwWigyMDE4LTA1LTE1VDIzOjU5OjAwWiJzaXRlYmRpaS51bmlnZS5jaBJTaXRlLUJESUkoMjAxOC0wNS0xNVQwMDowMDowMFooMjAxOC0wNS0xNVQyMzo1OTowMFogZ3JpZHZtMy51bmlnZS5jaAxBUkMtQ0UoMjAxOC0wNS0xNVQwMDowMDowMFooMjAxOC0wNS0xNVQyMzo1OTowMFoibGNnY2UyLnNoZWYuYWMudWsIQVBFTCgyMDE4LTA1LTE1VDA3OjAwOjAwWigyMDE4LTA1LTE1VDA5OjAwOjAwWhxsY2cuc2hlZi5hYy51axJTaXRlLUJESUkoMjAxOC0wNS0xNVQwNzowMDowMFooMjAxOC0wNS0xNVQwOTowMDowMFoibGNnY2UyLnNoZWYuYWMudWsQQ1JFQU0tQ0UoMjAxOC0wNS0xNVQwNzowMDowMFooMjAxOC0wNS0xNVQwOTowMDowMFoibGNnc2UwLnNoZWYuYWMudWsGU1JNKDIwMTgtMDUtMTVUMDc6MDA6MDBaKDIwMTgtMDUtMTVUMDk6MDA6MDBaJGxjZ2FwZWwuc2hlZi5hYy51axRnTGl0ZS1BUEVMKDIwMTgtMDUtMTVUMDc6MDA6MDBaKDIwMTgtMDUtMTVUMDk6MDA6MDBaJGxjZ3BlcmYuc2hlZi5hYy51ay5uZXQucGVyZlNPTkFSLkJhbmR3aWR0aCgyMDE4LTA1LTE1VDA3OjAwOjAwWigyMDE4LTA1LTE1VDA5OjAwOjAwWiRsY2dwZXJmLnNoZWYuYWMudWsqbmV0LnBlcmZTT05BUi5MYXRlbmN5KDIwMTgtMDUtMTVUMDc6MDA6MDBaKDIwMTgtMDUtMTVUMDk6MDA6MDBaImxjZ2NlMi5zaGVmLmFjLnVrDGdMRXhlYygyMDE4LTA1LTE1VDA3OjAwOjAwWigyMDE4LTA1LTE1VDA5OjAwOjAwWiJsY2djZTEuc2hlZi5hYy51axBDUkVBTS1DRSgyMDE4LTA1LTE1VDA3OjAwOjAwWigyMDE4LTA1LTE1VDA5OjAwOjAwWiJsY2djZTEuc2hlZi5hYy51awhBUEVMKDIwMTgtMDUtMTVUMDc6MDA6MDBaKDIwMTgtMDUtMTVUMDk6MDA6MDBaImxjZ2NlMS5zaGVmLmFjLnVrDGdMRXhlYygyMDE4LTA1LTE1VDA3OjAwOjAwWigyMDE4LTA1LTE1VDA5OjAwOjAwWiZsY2dzcXVpZC5zaGVmLmFjLnVrKm9yZy5zcXVpZC1jYWNoZS5TcXVpZCgyMDE4LTA1LTE1VDA3OjAwOjAwWigyMDE4LTA1LTE1VDA5OjAwOjAwWiJsY2djZTMuc2hlZi5hYy51awxBUkMtQ0UoMjAxOC0wNS0xNVQwNzowMDowMFooMjAxOC0wNS0xNVQwOTowMDowMFoibGNnc2UwLnNoZWYuYWMudWsMWFJvb3REKDIwMTgtMDUtMTVUMDc6MDA6MDBaKDIwMTgtMDUtMTVUMDk6MDA6MDBaImxjZ3NlMC5zaGVmLmFjLnVrMmV1LmVnaS5zdG9yYWdlLmFjY291bnRpbmcoMjAxOC0wNS0xNVQwNzowMDowMFooMjAxOC0wNS0xNVQwOTowMDowMFoubGludWNzLXVpLTAxLmNzLmluZm4uaXQEVUkoMjAxOC0wNS0xNVQwMDowMDowMFooMjAxOC0wNS0xNVQyMDowMDowMFoubGludWNzLWNlLTAxLmNzLmluZm4uaXQIQVBFTCgyMDE4LTA1LTE1VDAwOjAwOjAwWigyMDE4LTA1LTE1VDIwOjAwOjAwWixsaW51Y3MtYmRpaS5jcy5pbmZuLml0ElNpdGUtQkRJSSgyMDE4LTA1LTE1VDAwOjAwOjAwWigyMDE4LTA1LTE1VDIwOjAwOjAwWi5saW51Y3MtY2UtMDEuY3MuaW5mbi5pdBBDUkVBTS1DRSgyMDE4LTA1LTE1VDAwOjAwOjAwWigyMDE4LTA1LTE1VDIwOjAwOjAwWjJhcGVsLXB1Ymxpc2hlci5jcy5pbmZuLml0FGdMaXRlLUFQRUwoMjAxOC0wNS0xNVQwMDowMDowMFooMjAxOC0wNS0xNVQyMDowMDowMFoscmVjYXMtY2UtMDIuY3MuaW5mbi5pdBBDUkVBTS1DRSgyMDE4LTA1LTE1VDAwOjAwOjAwWigyMDE4LTA1LTE1VDIwOjAwOjAwWixyZWNhcy11aS0wMS5jcy5pbmZuLml0BFVJKDIwMTgtMDUtMTVUMDA6MDA6MDBaKDIwMTgtMDUtMTVUMjA6MDA6MDBaLHJlY2FzLWNlLTAyLmNzLmluZm4uaXQUZXUuZWdpLk1QSSgyMDE4LTA1LTE1VDAwOjAwOjAwWigyMDE4LTA1LTE1VDIwOjAwOjAwWixyZWNhcy1jZS0wMi5jcy5pbmZuLml0CEFQRUwoMjAxOC0wNS0xNVQwMDowMDowMFooMjAxOC0wNS0xNVQyMDowMDowMFoscmVjYXMtc2UtMDEuY3MuaW5mbi5pdAZTUk0oMjAxOC0wNS0xNVQwMDowMDowMFooMjAxOC0wNS0xNVQyMDowMDowMFoscmVjYXMtc2UtMDEuY3MuaW5mbi5pdDJldS5lZ2kuc3RvcmFnZS5hY2NvdW50aW5nKDIwMTgtMDUtMTVUMDA6MDA6MDBaKDIwMTgtMDUtMTVUMjA6MDA6MDBaLmxpbnVjcy11aS0wMS5jcy5pbmZuLml0BFVJKDIwMTgtMDUtMTVUMTg6MDA6MDBaKDIwMTgtMDUtMTVUMjM6NTk6MDBaLmxpbnVjcy1jZS0wMS5jcy5pbmZuLml0CEFQRUwoMjAxOC0wNS0xNVQxODowMDowMFooMjAxOC0wNS0xNVQyMzo1OTowMFosbGludWNzLWJkaWkuY3MuaW5mbi5pdBJTaXRlLUJESUkoMjAxOC0wNS0xNVQxODowMDowMFooMjAxOC0wNS0xNVQyMzo1OTowMFoubGludWNzLWNlLTAxLmNzLmluZm4uaXQQQ1JFQU0tQ0UoMjAxOC0wNS0xNVQxODowMDowMFooMjAxOC0wNS0xNVQyMzo1OTowMFoyYXBlbC1wdWJsaXNoZXIuY3MuaW5mbi5pdBRnTGl0ZS1BUEVMKDIwMTgtMDUtMTVUMTg6MDA6MDBaKDIwMTgtMDUtMTVUMjM6NTk6MDBaLHJlY2FzLWNlLTAyLmNzLmluZm4uaXQQQ1JFQU0tQ0UoMjAxOC0wNS0xNVQxODowMDowMFooMjAxOC0wNS0xNVQyMzo1OTowMFoscmVjYXMtdWktMDEuY3MuaW5mbi5pdARVSSgyMDE4LTA1LTE1VDE4OjAwOjAwWigyMDE4LTA1LTE1VDIzOjU5OjAwWixyZWNhcy1jZS0wMi5jcy5pbmZuLml0FGV1LmVnaS5NUEkoMjAxOC0wNS0xNVQxODowMDowMFooMjAxOC0wNS0xNVQyMzo1OTowMFoscmVjYXMtY2UtMDIuY3MuaW5mbi5pdAhBUEVMKDIwMTgtMDUtMTVUMTg6MDA6MDBaKDIwMTgtMDUtMTVUMjM6NTk6MDBaLHJlY2FzLXNlLTAxLmNzLmluZm4uaXQGU1JNKDIwMTgtMDUtMTVUMTg6MDA6MDBaKDIwMTgtMDUtMTVUMjM6NTk6MDBaLHJlY2FzLXNlLTAxLmNzLmluZm4uaXQyZXUuZWdpLnN0b3JhZ2UuYWNjb3VudGluZygyMDE4LTA1LTE1VDE4OjAwOjAwWigyMDE4LTA1LTE1VDIzOjU5OjAwWg== \ No newline at end of file diff --git a/flink_jobs/old-models/stream_status/src/main/resources/base64/downtimes_07.base64 b/flink_jobs/old-models/stream_status/src/main/resources/base64/downtimes_07.base64 new file mode 100644 index 00000000..5c98e76b --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/resources/base64/downtimes_07.base64 @@ -0,0 +1 @@ +Nm9keXNzZXVzLnRyb2phbi5rZG0ud2Nzcy5wbCB1bmljb3JlNi5HYXRld2F5KDIwMTgtMDUtMTZUMDA6MDA6MDBaKDIwMTgtMDUtMTZUMjM6NTk6MDBaNm9keXNzZXVzLnRyb2phbi5rZG0ud2Nzcy5wbDh1bmljb3JlNi5UYXJnZXRTeXN0ZW1GYWN0b3J5KDIwMTgtMDUtMTZUMDA6MDA6MDBaKDIwMTgtMDUtMTZUMjM6NTk6MDBaNm9keXNzZXVzLnRyb2phbi5rZG0ud2Nzcy5wbDR1bmljb3JlNi5TdG9yYWdlTWFuYWdlbWVudCgyMDE4LTA1LTE2VDAwOjAwOjAwWigyMDE4LTA1LTE2VDIzOjU5OjAwWjZvZHlzc2V1cy50cm9qYW4ua2RtLndjc3MucGwidW5pY29yZTYuUmVnaXN0cnkoMjAxOC0wNS0xNlQwMDowMDowMFooMjAxOC0wNS0xNlQyMzo1OTowMFo2b2R5c3NldXMudHJvamFuLmtkbS53Y3NzLnBsInVuaWNvcmU2LlJlZ2lzdHJ5KDIwMTgtMDUtMTZUMDA6MDA6MDBaKDIwMTgtMDUtMTZUMjM6NTk6MDBaDmdndXMuZXUQZWdpLkdHVVMoMjAxOC0wNS0xNlQwNjowMDowMFooMjAxOC0wNS0xNlQwNzozMDowMFomY2x1c3RlcjUwLmtudS5hYy5rcghBUEVMKDIwMTgtMDUtMTZUMDA6MDA6MDBaKDIwMTgtMDUtMTZUMjM6NTk6MDBaKGNsdXN0ZXIxMzEua251LmFjLmtyEFRvcC1CRElJKDIwMTgtMDUtMTZUMDA6MDA6MDBaKDIwMTgtMDUtMTZUMjM6NTk6MDBaJmNsdXN0ZXI1MS5rbnUuYWMua3ISU2l0ZS1CRElJKDIwMTgtMDUtMTZUMDA6MDA6MDBaKDIwMTgtMDUtMTZUMjM6NTk6MDBaJGNsdXN0ZXI2LmtudS5hYy5rcg5NeVByb3h5KDIwMTgtMDUtMTZUMDA6MDA6MDBaKDIwMTgtMDUtMTZUMjM6NTk6MDBaJmNsdXN0ZXI1MC5rbnUuYWMua3IQQ1JFQU0tQ0UoMjAxOC0wNS0xNlQwMDowMDowMFooMjAxOC0wNS0xNlQyMzo1OTowMFooY2x1c3RlcjE0Mi5rbnUuYWMua3IGU1JNKDIwMTgtMDUtMTZUMDA6MDA6MDBaKDIwMTgtMDUtMTZUMjM6NTk6MDBaJGNsdXN0ZXI0LmtudS5hYy5rchRnTGl0ZS1BUEVMKDIwMTgtMDUtMTZUMDA6MDA6MDBaKDIwMTgtMDUtMTZUMjM6NTk6MDBaJmNsdXN0ZXI1MC5rbnUuYWMua3IMZ0xFeGVjKDIwMTgtMDUtMTZUMDA6MDA6MDBaKDIwMTgtMDUtMTZUMjM6NTk6MDBaKGNsdXN0ZXIxNDQua251LmFjLmtyKm9yZy5zcXVpZC1jYWNoZS5TcXVpZCgyMDE4LTA1LTE2VDAwOjAwOjAwWigyMDE4LTA1LTE2VDIzOjU5OjAwWihjbHVzdGVyMzUwLmtudS5hYy5rcipvcmcuc3F1aWQtY2FjaGUuU3F1aWQoMjAxOC0wNS0xNlQwMDowMDowMFooMjAxOC0wNS0xNlQyMzo1OTowMFooY2x1c3RlcjE0Mi5rbnUuYWMua3IyZXUuZWdpLnN0b3JhZ2UuYWNjb3VudGluZygyMDE4LTA1LTE2VDAwOjAwOjAwWigyMDE4LTA1LTE2VDIzOjU5OjAwWh5ncmlkMDUudW5pZ2UuY2gGU1JNKDIwMTgtMDUtMTZUMDA6MDA6MDBaKDIwMTgtMDUtMTZUMjM6NTk6MDBaInNpdGViZGlpLnVuaWdlLmNoElNpdGUtQkRJSSgyMDE4LTA1LTE2VDAwOjAwOjAwWigyMDE4LTA1LTE2VDIzOjU5OjAwWiBncmlkdm0zLnVuaWdlLmNoDEFSQy1DRSgyMDE4LTA1LTE2VDAwOjAwOjAwWigyMDE4LTA1LTE2VDIzOjU5OjAwWi5ncmlucjAwNC5pbnIudHJvaXRzay5ydQxWTy1ib3goMjAxOC0wNS0xNlQxMDowMDowMFooMjAxOC0wNS0xNlQyMzo1OTowMFosZ3JpbnIxMi5pbnIudHJvaXRzay5ydSpvcmcuc3F1aWQtY2FjaGUuU3F1aWQoMjAxOC0wNS0xNlQxMDowMDowMFooMjAxOC0wNS0xNlQyMzo1OTowMFowZ3JjcmVhbWNlLmluci50cm9pdHNrLnJ1CEFQRUwoMjAxOC0wNS0xNlQxMDowMDowMFooMjAxOC0wNS0xNlQyMzo1OTowMFosZ3JpbnIwNS5pbnIudHJvaXRzay5ydQZXTVMoMjAxOC0wNS0xNlQxMDowMDowMFooMjAxOC0wNS0xNlQyMzo1OTowMFosZ3JpbnIwOC5pbnIudHJvaXRzay5ydRBUb3AtQkRJSSgyMDE4LTA1LTE2VDEwOjAwOjAwWigyMDE4LTA1LTE2VDIzOjU5OjAwWi5ncmlucjAwNC5pbnIudHJvaXRzay5ydQRVSSgyMDE4LTA1LTE2VDEwOjAwOjAwWigyMDE4LTA1LTE2VDIzOjU5OjAwWixncmlucjA5Lmluci50cm9pdHNrLnJ1ElNpdGUtQkRJSSgyMDE4LTA1LTE2VDEwOjAwOjAwWigyMDE4LTA1LTE2VDIzOjU5OjAwWjBncmNyZWFtY2UuaW5yLnRyb2l0c2sucnUQQ1JFQU0tQ0UoMjAxOC0wNS0xNlQxMDowMDowMFooMjAxOC0wNS0xNlQyMzo1OTowMFo0Z3JjcmVhbWNlMDEuaW5yLnRyb2l0c2sucnUQQ1JFQU0tQ0UoMjAxOC0wNS0xNlQxMDowMDowMFooMjAxOC0wNS0xNlQyMzo1OTowMFosZ3JzZTAwMS5pbnIudHJvaXRzay5ydQZTUk0oMjAxOC0wNS0xNlQxMDowMDowMFooMjAxOC0wNS0xNlQyMzo1OTowMFosZ3JpbnIwMi5pbnIudHJvaXRzay5ydRRnTGl0ZS1BUEVMKDIwMTgtMDUtMTZUMTA6MDA6MDBaKDIwMTgtMDUtMTZUMjM6NTk6MDBaMGdyY3JlYW1jZS5pbnIudHJvaXRzay5ydQxnTEV4ZWMoMjAxOC0wNS0xNlQxMDowMDowMFooMjAxOC0wNS0xNlQyMzo1OTowMFo0Z3JjcmVhbWNlMDEuaW5yLnRyb2l0c2sucnUMZ0xFeGVjKDIwMTgtMDUtMTZUMTA6MDA6MDBaKDIwMTgtMDUtMTZUMjM6NTk6MDBaLG5vZGUyMDcuaW5yLnRyb2l0c2sucnUqb3JnLnNxdWlkLWNhY2hlLlNxdWlkKDIwMTgtMDUtMTZUMTA6MDA6MDBaKDIwMTgtMDUtMTZUMjM6NTk6MDBaLGdyaW5yMDYuaW5yLnRyb2l0c2sucnUqb3JnLnNxdWlkLWNhY2hlLlNxdWlkKDIwMTgtMDUtMTZUMTA6MDA6MDBaKDIwMTgtMDUtMTZUMjM6NTk6MDBaInNhbXBhY2UuaWYudXNwLmJyCEFQRUwoMjAxOC0wNS0xNlQxMTozMDowMFooMjAxOC0wNS0xNlQxNDowMDowMFoic2FtcGF2by5pZi51c3AuYnIMVk8tYm94KDIwMTgtMDUtMTZUMTE6MzA6MDBaKDIwMTgtMDUtMTZUMTQ6MDA6MDBaJnNhbXBhYmRpaS5pZi51c3AuYnISU2l0ZS1CRElJKDIwMTgtMDUtMTZUMTE6MzA6MDBaKDIwMTgtMDUtMTZUMTQ6MDA6MDBaInNhbXBhY2UuaWYudXNwLmJyEENSRUFNLUNFKDIwMTgtMDUtMTZUMTE6MzA6MDBaKDIwMTgtMDUtMTZUMTQ6MDA6MDBaJnNhbXBhYXBlbC5pZi51c3AuYnIUZ0xpdGUtQVBFTCgyMDE4LTA1LTE2VDExOjMwOjAwWigyMDE4LTA1LTE2VDE0OjAwOjAwWihzYW1wYWZzdDAxLmlmLnVzcC5iciJYUm9vdEQuUmVkaXJlY3RvcigyMDE4LTA1LTE2VDExOjMwOjAwWigyMDE4LTA1LTE2VDE0OjAwOjAwWiZzYW1wYXBzMDIuaWYudXNwLmJyLm5ldC5wZXJmU09OQVIuQmFuZHdpZHRoKDIwMTgtMDUtMTZUMTE6MzA6MDBaKDIwMTgtMDUtMTZUMTQ6MDA6MDBaKHNhbXBhcHJveHkuaWYudXNwLmJyKm9yZy5zcXVpZC1jYWNoZS5TcXVpZCgyMDE4LTA1LTE2VDExOjMwOjAwWigyMDE4LTA1LTE2VDE0OjAwOjAwWiZzYW1wYXBzMDEuaWYudXNwLmJyKm5ldC5wZXJmU09OQVIuTGF0ZW5jeSgyMDE4LTA1LTE2VDExOjMwOjAwWigyMDE4LTA1LTE2VDE0OjAwOjAwWi5saW51Y3MtdWktMDEuY3MuaW5mbi5pdARVSSgyMDE4LTA1LTE2VDAwOjAwOjAwWigyMDE4LTA1LTE2VDE3OjU3OjAwWi5saW51Y3MtY2UtMDEuY3MuaW5mbi5pdAhBUEVMKDIwMTgtMDUtMTZUMDA6MDA6MDBaKDIwMTgtMDUtMTZUMTc6NTc6MDBaLGxpbnVjcy1iZGlpLmNzLmluZm4uaXQSU2l0ZS1CRElJKDIwMTgtMDUtMTZUMDA6MDA6MDBaKDIwMTgtMDUtMTZUMTc6NTc6MDBaLmxpbnVjcy1jZS0wMS5jcy5pbmZuLml0EENSRUFNLUNFKDIwMTgtMDUtMTZUMDA6MDA6MDBaKDIwMTgtMDUtMTZUMTc6NTc6MDBaMmFwZWwtcHVibGlzaGVyLmNzLmluZm4uaXQUZ0xpdGUtQVBFTCgyMDE4LTA1LTE2VDAwOjAwOjAwWigyMDE4LTA1LTE2VDE3OjU3OjAwWixyZWNhcy1jZS0wMi5jcy5pbmZuLml0EENSRUFNLUNFKDIwMTgtMDUtMTZUMDA6MDA6MDBaKDIwMTgtMDUtMTZUMTc6NTc6MDBaLHJlY2FzLXVpLTAxLmNzLmluZm4uaXQEVUkoMjAxOC0wNS0xNlQwMDowMDowMFooMjAxOC0wNS0xNlQxNzo1NzowMFoscmVjYXMtY2UtMDIuY3MuaW5mbi5pdBRldS5lZ2kuTVBJKDIwMTgtMDUtMTZUMDA6MDA6MDBaKDIwMTgtMDUtMTZUMTc6NTc6MDBaLHJlY2FzLWNlLTAyLmNzLmluZm4uaXQIQVBFTCgyMDE4LTA1LTE2VDAwOjAwOjAwWigyMDE4LTA1LTE2VDE3OjU3OjAwWixyZWNhcy1zZS0wMS5jcy5pbmZuLml0BlNSTSgyMDE4LTA1LTE2VDAwOjAwOjAwWigyMDE4LTA1LTE2VDE3OjU3OjAwWixyZWNhcy1zZS0wMS5jcy5pbmZuLml0MmV1LmVnaS5zdG9yYWdlLmFjY291bnRpbmcoMjAxOC0wNS0xNlQwMDowMDowMFooMjAxOC0wNS0xNlQxNzo1NzowMFo= \ No newline at end of file diff --git a/flink_jobs/old-models/stream_status/src/main/resources/base64/downtimes_08.base64 b/flink_jobs/old-models/stream_status/src/main/resources/base64/downtimes_08.base64 new file mode 100644 index 00000000..26b2765c --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/resources/base64/downtimes_08.base64 @@ -0,0 +1 @@ +Nm9keXNzZXVzLnRyb2phbi5rZG0ud2Nzcy5wbCB1bmljb3JlNi5HYXRld2F5KDIwMTgtMDUtMTdUMDA6MDA6MDBaKDIwMTgtMDUtMTdUMjM6NTk6MDBaNm9keXNzZXVzLnRyb2phbi5rZG0ud2Nzcy5wbDh1bmljb3JlNi5UYXJnZXRTeXN0ZW1GYWN0b3J5KDIwMTgtMDUtMTdUMDA6MDA6MDBaKDIwMTgtMDUtMTdUMjM6NTk6MDBaNm9keXNzZXVzLnRyb2phbi5rZG0ud2Nzcy5wbDR1bmljb3JlNi5TdG9yYWdlTWFuYWdlbWVudCgyMDE4LTA1LTE3VDAwOjAwOjAwWigyMDE4LTA1LTE3VDIzOjU5OjAwWjZvZHlzc2V1cy50cm9qYW4ua2RtLndjc3MucGwidW5pY29yZTYuUmVnaXN0cnkoMjAxOC0wNS0xN1QwMDowMDowMFooMjAxOC0wNS0xN1QyMzo1OTowMFo2b2R5c3NldXMudHJvamFuLmtkbS53Y3NzLnBsInVuaWNvcmU2LlJlZ2lzdHJ5KDIwMTgtMDUtMTdUMDA6MDA6MDBaKDIwMTgtMDUtMTdUMjM6NTk6MDBaJmNsdXN0ZXI1MC5rbnUuYWMua3IIQVBFTCgyMDE4LTA1LTE3VDAwOjAwOjAwWigyMDE4LTA1LTE3VDIzOjU5OjAwWihjbHVzdGVyMTMxLmtudS5hYy5rchBUb3AtQkRJSSgyMDE4LTA1LTE3VDAwOjAwOjAwWigyMDE4LTA1LTE3VDIzOjU5OjAwWiZjbHVzdGVyNTEua251LmFjLmtyElNpdGUtQkRJSSgyMDE4LTA1LTE3VDAwOjAwOjAwWigyMDE4LTA1LTE3VDIzOjU5OjAwWiRjbHVzdGVyNi5rbnUuYWMua3IOTXlQcm94eSgyMDE4LTA1LTE3VDAwOjAwOjAwWigyMDE4LTA1LTE3VDIzOjU5OjAwWiZjbHVzdGVyNTAua251LmFjLmtyEENSRUFNLUNFKDIwMTgtMDUtMTdUMDA6MDA6MDBaKDIwMTgtMDUtMTdUMjM6NTk6MDBaKGNsdXN0ZXIxNDIua251LmFjLmtyBlNSTSgyMDE4LTA1LTE3VDAwOjAwOjAwWigyMDE4LTA1LTE3VDIzOjU5OjAwWiRjbHVzdGVyNC5rbnUuYWMua3IUZ0xpdGUtQVBFTCgyMDE4LTA1LTE3VDAwOjAwOjAwWigyMDE4LTA1LTE3VDIzOjU5OjAwWiZjbHVzdGVyNTAua251LmFjLmtyDGdMRXhlYygyMDE4LTA1LTE3VDAwOjAwOjAwWigyMDE4LTA1LTE3VDIzOjU5OjAwWihjbHVzdGVyMTQ0LmtudS5hYy5rcipvcmcuc3F1aWQtY2FjaGUuU3F1aWQoMjAxOC0wNS0xN1QwMDowMDowMFooMjAxOC0wNS0xN1QyMzo1OTowMFooY2x1c3RlcjM1MC5rbnUuYWMua3Iqb3JnLnNxdWlkLWNhY2hlLlNxdWlkKDIwMTgtMDUtMTdUMDA6MDA6MDBaKDIwMTgtMDUtMTdUMjM6NTk6MDBaKGNsdXN0ZXIxNDIua251LmFjLmtyMmV1LmVnaS5zdG9yYWdlLmFjY291bnRpbmcoMjAxOC0wNS0xN1QwMDowMDowMFooMjAxOC0wNS0xN1QyMzo1OTowMFoeZ3JpZDA1LnVuaWdlLmNoBlNSTSgyMDE4LTA1LTE3VDAwOjAwOjAwWigyMDE4LTA1LTE3VDIzOjU5OjAwWiJzaXRlYmRpaS51bmlnZS5jaBJTaXRlLUJESUkoMjAxOC0wNS0xN1QwMDowMDowMFooMjAxOC0wNS0xN1QyMzo1OTowMFogZ3JpZHZtMy51bmlnZS5jaAxBUkMtQ0UoMjAxOC0wNS0xN1QwMDowMDowMFooMjAxOC0wNS0xN1QyMzo1OTowMFouZ3JpbnIwMDQuaW5yLnRyb2l0c2sucnUMVk8tYm94KDIwMTgtMDUtMTdUMDA6MDA6MDBaKDIwMTgtMDUtMTdUMjM6NTk6MDBaLGdyaW5yMTIuaW5yLnRyb2l0c2sucnUqb3JnLnNxdWlkLWNhY2hlLlNxdWlkKDIwMTgtMDUtMTdUMDA6MDA6MDBaKDIwMTgtMDUtMTdUMjM6NTk6MDBaMGdyY3JlYW1jZS5pbnIudHJvaXRzay5ydQhBUEVMKDIwMTgtMDUtMTdUMDA6MDA6MDBaKDIwMTgtMDUtMTdUMjM6NTk6MDBaLGdyaW5yMDUuaW5yLnRyb2l0c2sucnUGV01TKDIwMTgtMDUtMTdUMDA6MDA6MDBaKDIwMTgtMDUtMTdUMjM6NTk6MDBaLGdyaW5yMDguaW5yLnRyb2l0c2sucnUQVG9wLUJESUkoMjAxOC0wNS0xN1QwMDowMDowMFooMjAxOC0wNS0xN1QyMzo1OTowMFouZ3JpbnIwMDQuaW5yLnRyb2l0c2sucnUEVUkoMjAxOC0wNS0xN1QwMDowMDowMFooMjAxOC0wNS0xN1QyMzo1OTowMFosZ3JpbnIwOS5pbnIudHJvaXRzay5ydRJTaXRlLUJESUkoMjAxOC0wNS0xN1QwMDowMDowMFooMjAxOC0wNS0xN1QyMzo1OTowMFowZ3JjcmVhbWNlLmluci50cm9pdHNrLnJ1EENSRUFNLUNFKDIwMTgtMDUtMTdUMDA6MDA6MDBaKDIwMTgtMDUtMTdUMjM6NTk6MDBaNGdyY3JlYW1jZTAxLmluci50cm9pdHNrLnJ1EENSRUFNLUNFKDIwMTgtMDUtMTdUMDA6MDA6MDBaKDIwMTgtMDUtMTdUMjM6NTk6MDBaLGdyc2UwMDEuaW5yLnRyb2l0c2sucnUGU1JNKDIwMTgtMDUtMTdUMDA6MDA6MDBaKDIwMTgtMDUtMTdUMjM6NTk6MDBaLGdyaW5yMDIuaW5yLnRyb2l0c2sucnUUZ0xpdGUtQVBFTCgyMDE4LTA1LTE3VDAwOjAwOjAwWigyMDE4LTA1LTE3VDIzOjU5OjAwWjBncmNyZWFtY2UuaW5yLnRyb2l0c2sucnUMZ0xFeGVjKDIwMTgtMDUtMTdUMDA6MDA6MDBaKDIwMTgtMDUtMTdUMjM6NTk6MDBaNGdyY3JlYW1jZTAxLmluci50cm9pdHNrLnJ1DGdMRXhlYygyMDE4LTA1LTE3VDAwOjAwOjAwWigyMDE4LTA1LTE3VDIzOjU5OjAwWixub2RlMjA3Lmluci50cm9pdHNrLnJ1Km9yZy5zcXVpZC1jYWNoZS5TcXVpZCgyMDE4LTA1LTE3VDAwOjAwOjAwWigyMDE4LTA1LTE3VDIzOjU5OjAwWixncmlucjA2Lmluci50cm9pdHNrLnJ1Km9yZy5zcXVpZC1jYWNoZS5TcXVpZCgyMDE4LTA1LTE3VDAwOjAwOjAwWigyMDE4LTA1LTE3VDIzOjU5OjAwWg== \ No newline at end of file diff --git a/flink_jobs/old-models/stream_status/src/main/resources/log4j.properties b/flink_jobs/old-models/stream_status/src/main/resources/log4j.properties new file mode 100644 index 00000000..da32ea0f --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/resources/log4j.properties @@ -0,0 +1,23 @@ +################################################################################ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +################################################################################ + +log4j.rootLogger=INFO, console + +log4j.appender.console=org.apache.log4j.ConsoleAppender +log4j.appender.console.layout=org.apache.log4j.PatternLayout +log4j.appender.console.layout.ConversionPattern=%d{HH:mm:ss,SSS} %-5p %-60c %x - %m%n diff --git a/flink_jobs/old-models/stream_status/src/main/resources/ops/EGI-algorithm.json b/flink_jobs/old-models/stream_status/src/main/resources/ops/EGI-algorithm.json new file mode 100644 index 00000000..b88d8c99 --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/resources/ops/EGI-algorithm.json @@ -0,0 +1,239 @@ +{ + "id": "1b0318f0-429d-44fc-8bba-07184354c73b", + "name": "egi_ops", + "available_states": [ + "OK", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME" + ], + "defaults": { + "down": "DOWNTIME", + "missing": "MISSING", + "unknown": "UNKNOWN" + }, + "operations": [ + { + "name": "AND", + "truth_table": [ + { + "a": "OK", + "b": "OK", + "x": "OK" + }, + { + "a": "OK", + "b": "WARNING", + "x": "WARNING" + }, + { + "a": "OK", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "OK", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "OK", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "OK", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "WARNING", + "b": "WARNING", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "WARNING", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "WARNING", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "WARNING", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "UNKNOWN", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "UNKNOWN", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "UNKNOWN", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "UNKNOWN", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "MISSING", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "MISSING", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "MISSING", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "CRITICAL", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "CRITICAL", + "b": "DOWNTIME", + "x": "CRITICAL" + }, + { + "a": "DOWNTIME", + "b": "DOWNTIME", + "x": "DOWNTIME" + } + ] + }, + { + "name": "OR", + "truth_table": [ + { + "a": "OK", + "b": "OK", + "x": "OK" + }, + { + "a": "OK", + "b": "WARNING", + "x": "OK" + }, + { + "a": "OK", + "b": "UNKNOWN", + "x": "OK" + }, + { + "a": "OK", + "b": "MISSING", + "x": "OK" + }, + { + "a": "OK", + "b": "CRITICAL", + "x": "OK" + }, + { + "a": "OK", + "b": "DOWNTIME", + "x": "OK" + }, + { + "a": "WARNING", + "b": "WARNING", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "UNKNOWN", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "MISSING", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "CRITICAL", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "DOWNTIME", + "x": "WARNING" + }, + { + "a": "UNKNOWN", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "UNKNOWN", + "b": "MISSING", + "x": "UNKNOWN" + }, + { + "a": "UNKNOWN", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "UNKNOWN", + "b": "DOWNTIME", + "x": "UNKNOWN" + }, + { + "a": "MISSING", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "MISSING", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "MISSING", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "CRITICAL", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "CRITICAL", + "b": "DOWNTIME", + "x": "CRITICAL" + }, + { + "a": "DOWNTIME", + "b": "DOWNTIME", + "x": "DOWNTIME" + } + ] + } + ] +} diff --git a/flink_jobs/old-models/stream_status/src/main/resources/ops/EGI-ar.json b/flink_jobs/old-models/stream_status/src/main/resources/ops/EGI-ar.json new file mode 100644 index 00000000..5d59b900 --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/resources/ops/EGI-ar.json @@ -0,0 +1,11 @@ +{ + "counters":{ + "up":["OK","WARNING"], + "unknown":["UNKNOWN","MISSING"], + "downtime":["DOWNTIME"] + }, + "computations":{ + "availability":"(up/t) - (1.0 - (unknown / t))", + "reliability":"(up/t) - (1.0 - (unknown / t) - (downtime/t))" + } +} \ No newline at end of file diff --git a/flink_jobs/old-models/stream_status/src/main/resources/ops/ap1.json b/flink_jobs/old-models/stream_status/src/main/resources/ops/ap1.json new file mode 100644 index 00000000..d754320c --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/resources/ops/ap1.json @@ -0,0 +1,64 @@ +{ + "id": "297c368a-524f-4144-9eb6-924fae5f08fa", + "name": "ap1", + "namespace": "test", + "endpoint_group": "sites", + "metric_operation": "AND", + "profile_operation": "AND", + "metric_profile": { + "name": "CH.CERN.SAM.ARGO_MON_CRITICAL", + "id": "c81fdb7b-d8f8-4ff9-96c5-6a0c336e2b25" + }, + "groups": [ + { + "name": "compute", + "operation": "OR", + "services": [ + { + "name": "CREAM-CE", + "operation": "OR" + }, + { + "name": "ARC-CE", + "operation": "OR" + }, + { + "name": "GRAM5", + "operation": "OR" + }, + { + "name": "unicore6.TargetSystemFactory", + "operation": "OR" + }, + { + "name": "QCG.Computing", + "operation": "OR" + } + ] + }, + { + "name": "storage", + "operation": "OR", + "services": [ + { + "name": "SRMv2", + "operation": "OR" + }, + { + "name": "SRM", + "operation": "OR" + } + ] + }, + { + "name": "information", + "operation": "OR", + "services": [ + { + "name": "Site-BDII", + "operation": "OR" + } + ] + } + ] + } diff --git a/flink_jobs/old-models/stream_status/src/main/resources/ops/ap2.json b/flink_jobs/old-models/stream_status/src/main/resources/ops/ap2.json new file mode 100644 index 00000000..fda7868f --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/resources/ops/ap2.json @@ -0,0 +1,54 @@ +{ + "id": "337c368a-524f-4144-9eb6-924fae5f08fa", + "name": "fedcloud", + "namespace": "egi", + "endpoint_group": "sites", + "metric_operation": "AND", + "profile_operation": "AND", + "metric_profile": { + "name": "ch.cern.sam.CLOUD-MON", + "id": "c88fdb7b-d8f8-4ff9-96c5-6a0c336e2b25" + }, + "groups": [ + { + "name": "accounting", + "operation": "OR", + "services": [ + { + "name": "eu.egi.cloud.accounting", + "operation": "OR" + } + ] + }, + { + "name": "information", + "operation": "OR", + "services": [ + { + "name": "eu.egi.cloud.information.bdii", + "operation": "OR" + } + ] + }, + { + "name": "storage-management", + "operation": "OR", + "services": [ + { + "name": "eu.egi.cloud.storage-management.cdmi", + "operation": "OR" + } + ] + }, + { + "name": "vm-management", + "operation": "OR", + "services": [ + { + "name": "eu.egi.cloud.vm-management.occi", + "operation": "OR" + } + ] + } + ] +} diff --git a/flink_jobs/old-models/stream_status/src/main/resources/ops/config.json b/flink_jobs/old-models/stream_status/src/main/resources/ops/config.json new file mode 100644 index 00000000..c2c550e5 --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/resources/ops/config.json @@ -0,0 +1,83 @@ +{ + "id": "c800846f-8478-4af8-85d1-a3f12fe4c18f", + "info": { + "name": "Critical", + "description": "EGI report for Roc critical", + "created": "2015-10-19 10:35:49", + "updated": "2015-10-19 10:35:49" + }, + "tenant": "EGI", + "topology_schema": { + "group": { + "type": "NGI", + "group": { + "type": "SITES" + } + } + }, + "weight": "hepspec", + "profiles": [ + { + "id": "433beb2c-45cc-49d4-a8e0-b132bb30327e", + "name": "ch.cern.sam.ROC_CRITICAL", + "type": "metric" + }, + { + "id": "17d1462f-8f91-4728-a253-1a6e8e2e848d", + "name": "ops1", + "type": "operations" + }, + { + "id": "1ef8c0c9-f9ef-4ca1-9ee7-bb8b36332036", + "name": "critical", + "type": "aggregation" + } + ], + "filter_tags": [ + { + "name": "production", + "value": "1", + "context": "endpoint_groups" + }, + { + "name": "monitored", + "value": "1", + "context": "endpoint_groups" + }, + { + "name": "scope", + "value": "EGI", + "context": "endpoint_groups" + }, + { + "name": "scope", + "value": "EGI", + "context": "group_of_groups" + }, + { + "name": "infrastructure", + "value": "Production", + "context": "group_of_groups" + }, + { + "name": "certification", + "value": "Certified", + "context": "group_of_groups" + }, + { + "name": "vo", + "value": "ops", + "context": "metric_data" + }, + { + "name": "vo_fqan", + "value": "ops", + "context": "metric_data" + }, + { + "name": "roc", + "value": "any", + "context": "metric_data" + } + ] + } diff --git a/flink_jobs/old-models/stream_status/src/main/resources/schemas/metric_data.avsc b/flink_jobs/old-models/stream_status/src/main/resources/schemas/metric_data.avsc new file mode 100644 index 00000000..737e0ead --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/main/resources/schemas/metric_data.avsc @@ -0,0 +1,18 @@ +{"namespace": "argo.avro", + "type": "record", + "name": "metric_data", + "fields": [ + {"name": "timestamp", "type": "string"}, + {"name": "service", "type": "string"}, + {"name": "hostname", "type": "string"}, + {"name": "metric", "type": "string"}, + {"name": "status", "type": "string"}, + {"name": "monitoring_host", "type": ["null", "string"]}, + {"name": "summary", "type": ["null", "string"]}, + {"name": "message", "type": ["null", "string"]}, + {"name": "tags", "type" : ["null", {"name" : "Tags", + "type" : "map", + "values" : ["null", "string"] + }] + }] +} \ No newline at end of file diff --git a/flink_jobs/old-models/stream_status/src/test/java/ops/ConfigManagerTest.java b/flink_jobs/old-models/stream_status/src/test/java/ops/ConfigManagerTest.java new file mode 100644 index 00000000..705bf107 --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/test/java/ops/ConfigManagerTest.java @@ -0,0 +1,58 @@ +package ops; + +import static org.junit.Assert.*; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; + +import org.junit.BeforeClass; +import org.junit.Test; + +public class ConfigManagerTest { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", ConfigManagerTest.class.getResource("/ops/config.json")); + } + + @Test + public void test() throws URISyntaxException, IOException { + // Load the resource file + URL resJsonFile = OpsManagerTest.class.getResource("/ops/config.json"); + File jsonFile = new File(resJsonFile.toURI()); + + // Instantiate a new ConfigManager and load the test file + ConfigManager cfgMgr = new ConfigManager(); + cfgMgr.loadJson(jsonFile); + + // Assert that the simple fields are loaded correctly + assertEquals("EGI", cfgMgr.tenant); + assertEquals("Critical", cfgMgr.report); + assertEquals("SITES", cfgMgr.egroup); + assertEquals("NGI", cfgMgr.ggroup); + assertEquals("hepspec", cfgMgr.weight); + assertEquals("c800846f-8478-4af8-85d1-a3f12fe4c18f",cfgMgr.id); + + // Assert compound fields + assertEquals("Production", cfgMgr.ggroupTags.get("infrastructure")); + assertEquals("Certified", cfgMgr.ggroupTags.get("certification")); + assertEquals("EGI", cfgMgr.ggroupTags.get("scope")); + + // Assert compound fields + assertEquals("1", cfgMgr.egroupTags.get("production")); + assertEquals("1", cfgMgr.egroupTags.get("monitored")); + assertEquals("EGI", cfgMgr.egroupTags.get("scope")); + + // Assert compound fields + assertEquals("ops", cfgMgr.mdataTags.get("vo")); + assertEquals("ops", cfgMgr.mdataTags.get("vo_fqan")); + assertEquals("any", cfgMgr.mdataTags.get("roc")); + + + } + +} diff --git a/flink_jobs/old-models/stream_status/src/test/java/ops/OpsManagerTest.java b/flink_jobs/old-models/stream_status/src/test/java/ops/OpsManagerTest.java new file mode 100644 index 00000000..c713da7b --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/test/java/ops/OpsManagerTest.java @@ -0,0 +1,64 @@ +package ops; + +import static org.junit.Assert.*; + +import java.io.File; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; + +import org.junit.BeforeClass; +import org.junit.Test; + + +public class OpsManagerTest { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", OpsManagerTest.class.getResource("/ops/EGI-algorithm.json")); + } + + @Test + public void test() throws URISyntaxException, IOException { + // Prepare Resource File + URL resJsonFile = OpsManagerTest.class.getResource("/ops/EGI-algorithm.json"); + File JsonFile = new File(resJsonFile.toURI()); + // Instatiate class + OpsManager opsMgr = new OpsManager(); + // Test loading file + opsMgr.loadJson(JsonFile); + + // Test the available states + ArrayList avStates = new ArrayList(); + avStates.add("OK"); + avStates.add("WARNING"); + avStates.add("UNKNOWN"); + avStates.add("MISSING"); + avStates.add("CRITICAL"); + avStates.add("DOWNTIME"); + + assertEquals("Retrieve Available States", opsMgr.availableStates(), avStates); + + // Test the available operations + ArrayList avOps = new ArrayList(); + avOps.add("AND"); + avOps.add("OR"); + assertEquals("Retrieve Available Operations", opsMgr.availableOps(), avOps); + + // Test the available operations on a variety of states + assertEquals("OK (OR) OK = OK", opsMgr.op("OR", "OK", "OK"), "OK"); + assertEquals("OK (OR) CRITICAL = OK", opsMgr.op("OR", "CRITICAL", "OK"), "OK"); + assertEquals("CRITICAL (OR) MISSING = CRITICAL", opsMgr.op("OR", "CRITICAL", "MISSING"), "CRITICAL"); + assertEquals("WARNING (OR) MISSING = WARNING", opsMgr.op("OR", "WARNING", "MISSING"), "WARNING"); + assertEquals("WARNING (AND) MISSING = MISSING", opsMgr.op("AND", "WARNING", "MISSING"), "MISSING"); + assertEquals("OK (AND) CRITICAL = CRITICAL", opsMgr.op("AND", "OK", "CRITICAL"), "CRITICAL"); + assertEquals("DOWNTIME (AND) UNKNOWN = DOWNTIME", opsMgr.op("AND", "DOWNTIME", "UNKNOWN"), "DOWNTIME"); + + assertEquals("Default Downtime Status = DOWNTIME", opsMgr.getDefaultDown(), "DOWNTIME"); + System.out.println(opsMgr.getDefaultMissingInt()); + } + +} diff --git a/flink_jobs/old-models/stream_status/src/test/java/status/StatusManagerTest.java b/flink_jobs/old-models/stream_status/src/test/java/status/StatusManagerTest.java new file mode 100644 index 00000000..0a8b09d9 --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/test/java/status/StatusManagerTest.java @@ -0,0 +1,332 @@ +package status; + +import static org.junit.Assert.*; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; +import java.text.ParseException; +import java.util.ArrayList; +import java.util.Date; + + + +import org.junit.BeforeClass; +import org.junit.Test; + +import com.google.gson.Gson; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParser; + +public class StatusManagerTest { + + + public JsonObject getJSON (String jsonSTR) { + + + // Gather message from json + JsonParser jsonParser = new JsonParser(); + // parse the json root object + JsonObject jRoot = jsonParser.parse(jsonSTR).getAsJsonObject(); + return jRoot; + } + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", StatusManagerTest.class.getResource("/ops/ap1.json")); + } + + @Test + public void test() throws URISyntaxException, IOException, ParseException { + + + + // Prepare Resource File + URL resAPSJsonFile = StatusManagerTest.class.getResource("/ops/ap1.json"); + File jsonAPSFile = new File(resAPSJsonFile.toURI()); + + URL resOPSJsonFile = StatusManagerTest.class.getResource("/ops/EGI-algorithm.json"); + File jsonOPSFile = new File(resOPSJsonFile.toURI()); + + URL resEGPAvroFile = StatusManagerTest.class.getResource("/avro/group_endpoints_v2.avro"); + File avroEGPFile = new File(resEGPAvroFile.toURI()); + + URL resMPSAvroFile = StatusManagerTest.class.getResource("/avro/poem_sync_2017_03_02.avro"); + File avroMPSFile = new File(resMPSAvroFile.toURI()); + + URL resDownAvroFile = StatusManagerTest.class.getResource("/avro/downtimes_03.avro"); + File avroDownFile = new File(resDownAvroFile.toURI()); + + StatusManager sm = new StatusManager(); + sm.setReport("Critical"); + sm.loadAllFiles("2017-03-03", avroDownFile, avroEGPFile, avroMPSFile, jsonAPSFile, jsonOPSFile); + + Date ts1 = sm.fromZulu("2017-03-03T00:00:00Z"); + + sm.addNewGroup("GR-01-AUTH",sm.ops.getIntStatus("OK"), ts1); + ArrayList list = sm.setStatus("GR-01-AUTH", "CREAM-CE", "cream01.grid.auth.gr", "emi.cream.CREAMCE-JobCancel", + "CRITICAL", "mon01.argo.eu", "2017-03-03T00:00:00Z","sum1","msg1",""); + ArrayList list2 = sm.setStatus("GR-01-AUTH","CREAM-CE", "cream01.grid.auth.gr", "eu.egi.CREAM-IGTF", "WARNING", + "mon01.argo.eu", "2017-03-03T05:00:00Z","sum2","msg2",""); + ArrayList list3 = sm.setStatus("GR-01-AUTH","CREAM-CE", "cream01.grid.auth.gr", "emi.cream.CREAMCE-JobCancel", "OK", + "mon01.argo.eu", "2017-03-03T09:00:00Z","sum3","msg3",""); + ArrayList list4 = sm.setStatus("GR-01-AUTH","CREAM-CE", "cream01.grid.auth.gr", "eu.egi.CREAM-IGTF", "OK", + "mon01.argo.eu", "2017-03-03T15:00:00Z","sum4","msg4",""); + + + Gson gson = new Gson(); + + // Gather message from json + JsonParser jsonParser = new JsonParser(); + // parse the json root object + JsonElement jRoot = jsonParser.parse(list4.get(0)); + + String jproc = jRoot.getAsJsonObject().get("ts_processed").getAsString(); + StatusEvent evnt = new StatusEvent("Critical","metric","20170303","GR-01-AUTH", "CREAM-CE", "cream01.grid.auth.gr", + "eu.egi.CREAM-IGTF", "OK", "mon01.argo.eu", "2017-03-03T15:00:00Z", jproc,"WARNING","2017-03-03T05:00:00Z", "false","sum4","msg4",""); + + evnt.setStatusMetric(new String[] {"OK","WARNING","2017-03-03T15:00:00Z","2017-03-03T05:00:00Z"}); + + + + assertTrue(gson.toJson(evnt).equals(list4.get(0))); + + + + + sm.addNewGroup("UKI-LT2-IC-HEP",sm.ops.getIntStatus("OK"), ts1); + + + // This should create 4 events + ArrayList elist01 = sm.setStatus("UKI-LT2-IC-HEP", "CREAM-CE", "ceprod05.grid.hep.ph.ic.ac.uk", "emi.cream.CREAMCE-JobCancel", + "CRITICAL", "mon01.argo.eu", "2017-03-03T11:00:00Z","sum_A","msg_A",""); + assertTrue(elist01.size()==4); + JsonObject j01 = getJSON(elist01.get(0)); + JsonObject j02 = getJSON(elist01.get(1)); + assertTrue(j01.get("type").getAsString().equals("metric")); + assertTrue(j02.get("type").getAsString().equals("endpoint")); + assertTrue(j01.get("ts_monitored").getAsString().equals("2017-03-03T11:00:00Z")); + assertTrue(j02.get("ts_monitored").getAsString().equals("2017-03-03T11:00:00Z")); + assertTrue(j01.get("metric").getAsString().equals("emi.cream.CREAMCE-JobCancel")); + assertTrue(j02.get("metric").getAsString().equals("emi.cream.CREAMCE-JobCancel")); + assertTrue(j01.get("hostname").getAsString().equals("ceprod05.grid.hep.ph.ic.ac.uk")); + assertTrue(j02.get("hostname").getAsString().equals("ceprod05.grid.hep.ph.ic.ac.uk")); + assertTrue(j01.get("status").getAsString().equals("CRITICAL")); + assertTrue(j02.get("status").getAsString().equals("CRITICAL")); + + assertTrue(j01.get("summary").getAsString().equals("sum_A")); + assertTrue(j01.get("message").getAsString().equals("msg_A")); + assertTrue(j02.get("summary").getAsString().equals("sum_A")); + assertTrue(j02.get("message").getAsString().equals("msg_A")); + + ArrayList elist02 = sm.setStatus("UKI-LT2-IC-HEP", "CREAM-CE", "ceprod06.grid.hep.ph.ic.ac.uk", "emi.cream.CREAMCE-JobCancel", + "CRITICAL", "mon01.argo.eu", "2017-03-03T12:00:00Z","sum_B","msg_B",""); + + assertTrue(elist02.size()==4); + j01 = getJSON(elist02.get(0)); + j02 = getJSON(elist02.get(1)); + assertTrue(j01.get("type").getAsString().equals("metric")); + assertTrue(j02.get("type").getAsString().equals("endpoint")); + assertTrue(j01.get("ts_monitored").getAsString().equals("2017-03-03T12:00:00Z")); + assertTrue(j02.get("ts_monitored").getAsString().equals("2017-03-03T12:00:00Z")); + assertTrue(j01.get("metric").getAsString().equals("emi.cream.CREAMCE-JobCancel")); + assertTrue(j02.get("metric").getAsString().equals("emi.cream.CREAMCE-JobCancel")); + assertTrue(j01.get("hostname").getAsString().equals("ceprod06.grid.hep.ph.ic.ac.uk")); + assertTrue(j02.get("hostname").getAsString().equals("ceprod06.grid.hep.ph.ic.ac.uk")); + assertTrue(j01.get("status").getAsString().equals("CRITICAL")); + assertTrue(j02.get("status").getAsString().equals("CRITICAL")); + + assertTrue(j01.get("summary").getAsString().equals("sum_B")); + assertTrue(j01.get("message").getAsString().equals("msg_B")); + assertTrue(j02.get("summary").getAsString().equals("sum_B")); + assertTrue(j02.get("message").getAsString().equals("msg_B")); + + + ArrayList elist03 = sm.setStatus("UKI-LT2-IC-HEP", "CREAM-CE", "ceprod07.grid.hep.ph.ic.ac.uk", "emi.cream.CREAMCE-JobCancel", + "CRITICAL", "mon01.argo.eu", "2017-03-03T14:00:00Z","sum_C","msg_C",""); + + assertTrue(elist03.size()==4); + j01 = getJSON(elist03.get(0)); + j02 = getJSON(elist03.get(1)); + assertTrue(j01.get("type").getAsString().equals("metric")); + assertTrue(j02.get("type").getAsString().equals("endpoint")); + assertTrue(j01.get("ts_monitored").getAsString().equals("2017-03-03T14:00:00Z")); + assertTrue(j02.get("ts_monitored").getAsString().equals("2017-03-03T14:00:00Z")); + assertTrue(j01.get("metric").getAsString().equals("emi.cream.CREAMCE-JobCancel")); + assertTrue(j02.get("metric").getAsString().equals("emi.cream.CREAMCE-JobCancel")); + assertTrue(j01.get("hostname").getAsString().equals("ceprod07.grid.hep.ph.ic.ac.uk")); + assertTrue(j02.get("hostname").getAsString().equals("ceprod07.grid.hep.ph.ic.ac.uk")); + assertTrue(j01.get("status").getAsString().equals("CRITICAL")); + assertTrue(j02.get("status").getAsString().equals("CRITICAL")); + + assertTrue(j01.get("summary").getAsString().equals("sum_C")); + assertTrue(j01.get("message").getAsString().equals("msg_C")); + assertTrue(j02.get("summary").getAsString().equals("sum_C")); + assertTrue(j02.get("message").getAsString().equals("msg_C")); + // This should create 3 events metric,endpoint and service as all services endpoints turned into critical + ArrayList elist04 = sm.setStatus("UKI-LT2-IC-HEP", "CREAM-CE", "ceprod08.grid.hep.ph.ic.ac.uk", "emi.cream.CREAMCE-JobCancel", + "CRITICAL", "mon01.argo.eu", "2017-03-03T16:00:00Z","sum_D","msg_D",""); + + assertTrue(elist04.size()==4); + j01 = getJSON(elist04.get(0)); + j02 = getJSON(elist04.get(1)); + JsonObject j03 = getJSON(elist04.get(2)); + + assertTrue(j01.get("type").getAsString().equals("metric")); + assertTrue(j02.get("type").getAsString().equals("endpoint")); + assertTrue(j03.get("type").getAsString().equals("service")); + assertTrue(j01.get("ts_monitored").getAsString().equals("2017-03-03T16:00:00Z")); + assertTrue(j02.get("ts_monitored").getAsString().equals("2017-03-03T16:00:00Z")); + assertTrue(j03.get("ts_monitored").getAsString().equals("2017-03-03T16:00:00Z")); + assertTrue(j01.get("metric").getAsString().equals("emi.cream.CREAMCE-JobCancel")); + assertTrue(j02.get("metric").getAsString().equals("emi.cream.CREAMCE-JobCancel")); + assertTrue(j03.get("metric").getAsString().equals("emi.cream.CREAMCE-JobCancel")); + assertTrue(j01.get("hostname").getAsString().equals("ceprod08.grid.hep.ph.ic.ac.uk")); + assertTrue(j02.get("hostname").getAsString().equals("ceprod08.grid.hep.ph.ic.ac.uk")); + assertTrue(j03.get("hostname").getAsString().equals("ceprod08.grid.hep.ph.ic.ac.uk")); + assertTrue(j01.get("status").getAsString().equals("CRITICAL")); + assertTrue(j02.get("status").getAsString().equals("CRITICAL")); + assertTrue(j03.get("status").getAsString().equals("CRITICAL")); + + assertTrue(j01.get("summary").getAsString().equals("sum_D")); + assertTrue(j01.get("message").getAsString().equals("msg_D")); + assertTrue(j02.get("summary").getAsString().equals("sum_D")); + assertTrue(j02.get("message").getAsString().equals("msg_D")); + assertTrue(j03.get("summary").getAsString().equals("sum_D")); + assertTrue(j03.get("message").getAsString().equals("msg_D")); + + // Site remains ok due to the ARC-CE service. + // Turn ARC-CE service to Critical + + + // This should create 2 events metric + ArrayList elist05 = sm.setStatus("UKI-LT2-IC-HEP", "ARC-CE", "cetest01.grid.hep.ph.ic.ac.uk", "org.nordugrid.ARC-CE-sw-csh", + "CRITICAL", "mon01.argo.eu", "2017-03-03T19:00:00Z","sum_E","msg_E",""); + assertTrue(elist05.size()==4); + j01 = getJSON(elist05.get(0)); + j02 = getJSON(elist05.get(1)); + + + + assertTrue(j01.get("type").getAsString().equals("metric")); + assertTrue(j02.get("type").getAsString().equals("endpoint")); + assertTrue(j01.get("ts_monitored").getAsString().equals("2017-03-03T19:00:00Z")); + assertTrue(j02.get("ts_monitored").getAsString().equals("2017-03-03T19:00:00Z")); + assertTrue(j01.get("metric").getAsString().equals("org.nordugrid.ARC-CE-sw-csh")); + assertTrue(j02.get("metric").getAsString().equals("org.nordugrid.ARC-CE-sw-csh")); + assertTrue(j01.get("hostname").getAsString().equals("cetest01.grid.hep.ph.ic.ac.uk")); + assertTrue(j02.get("hostname").getAsString().equals("cetest01.grid.hep.ph.ic.ac.uk")); + + assertTrue(j01.get("status").getAsString().equals("CRITICAL")); + assertTrue(j02.get("status").getAsString().equals("CRITICAL")); + + assertTrue(j01.get("summary").getAsString().equals("sum_E")); + assertTrue(j01.get("message").getAsString().equals("msg_E")); + assertTrue(j02.get("summary").getAsString().equals("sum_E")); + assertTrue(j02.get("message").getAsString().equals("msg_E")); + + + // This should create 4 events metric,endpoint,service and finally endpoint group (the whole site) + ArrayList elist06 = sm.setStatus("UKI-LT2-IC-HEP", "ARC-CE", "cetest02.grid.hep.ph.ic.ac.uk", "org.nordugrid.ARC-CE-sw-csh", + "CRITICAL", "mon01.argo.eu", "2017-03-03T21:30:00Z","sum_X","msg_X",""); + + + assertTrue(elist06.size()==4); + j01 = getJSON(elist06.get(0)); + j02 = getJSON(elist06.get(1)); + j03 = getJSON(elist06.get(2)); + JsonObject j04 = getJSON(elist06.get(3)); + + + + assertTrue(j01.get("type").getAsString().equals("metric")); + assertTrue(j02.get("type").getAsString().equals("endpoint")); + assertTrue(j03.get("type").getAsString().equals("service")); + assertTrue(j04.get("type").getAsString().equals("endpoint_group")); + assertTrue(j01.get("ts_monitored").getAsString().equals("2017-03-03T21:30:00Z")); + assertTrue(j02.get("ts_monitored").getAsString().equals("2017-03-03T21:30:00Z")); + assertTrue(j03.get("ts_monitored").getAsString().equals("2017-03-03T21:30:00Z")); + assertTrue(j04.get("ts_monitored").getAsString().equals("2017-03-03T21:30:00Z")); + assertTrue(j01.get("metric").getAsString().equals("org.nordugrid.ARC-CE-sw-csh")); + assertTrue(j02.get("metric").getAsString().equals("org.nordugrid.ARC-CE-sw-csh")); + assertTrue(j03.get("metric").getAsString().equals("org.nordugrid.ARC-CE-sw-csh")); + assertTrue(j04.get("metric").getAsString().equals("org.nordugrid.ARC-CE-sw-csh")); + assertTrue(j01.get("hostname").getAsString().equals("cetest02.grid.hep.ph.ic.ac.uk")); + assertTrue(j02.get("hostname").getAsString().equals("cetest02.grid.hep.ph.ic.ac.uk")); + assertTrue(j03.get("hostname").getAsString().equals("cetest02.grid.hep.ph.ic.ac.uk")); + assertTrue(j04.get("hostname").getAsString().equals("cetest02.grid.hep.ph.ic.ac.uk")); + assertTrue(j01.get("status").getAsString().equals("CRITICAL")); + assertTrue(j02.get("status").getAsString().equals("CRITICAL")); + assertTrue(j03.get("status").getAsString().equals("CRITICAL")); + assertTrue(j03.get("status").getAsString().equals("CRITICAL")); + assertTrue(j01.get("summary").getAsString().equals("sum_X")); + assertTrue(j01.get("message").getAsString().equals("msg_X")); + assertTrue(j02.get("summary").getAsString().equals("sum_X")); + assertTrue(j02.get("message").getAsString().equals("msg_X")); + assertTrue(j03.get("summary").getAsString().equals("sum_X")); + assertTrue(j03.get("message").getAsString().equals("msg_X")); + assertTrue(j04.get("summary").getAsString().equals("sum_X")); + assertTrue(j04.get("message").getAsString().equals("msg_X")); + + // This should create 4 events metric,endpoint,service and finally endpoint group (the whole site) + ArrayList elist07 = sm.setStatus("UKI-LT2-IC-HEP", "CREAM-CE", "ceprod05.grid.hep.ph.ic.ac.uk", "emi.cream.CREAMCE-JobCancel", + "OK", "mon01.argo.eu", "2017-03-03T22:30:00Z","","",""); + + + + assertTrue(elist07.size()==4); + j01 = getJSON(elist07.get(0)); + j02 = getJSON(elist07.get(1)); + j03 = getJSON(elist07.get(2)); + j04 = getJSON(elist07.get(3)); + + + assertTrue(j01.get("type").getAsString().equals("metric")); + assertTrue(j02.get("type").getAsString().equals("endpoint")); + assertTrue(j03.get("type").getAsString().equals("service")); + assertTrue(j04.get("type").getAsString().equals("endpoint_group")); + assertTrue(j01.get("ts_monitored").getAsString().equals("2017-03-03T22:30:00Z")); + assertTrue(j02.get("ts_monitored").getAsString().equals("2017-03-03T22:30:00Z")); + assertTrue(j03.get("ts_monitored").getAsString().equals("2017-03-03T22:30:00Z")); + assertTrue(j04.get("ts_monitored").getAsString().equals("2017-03-03T22:30:00Z")); + assertTrue(j01.get("metric").getAsString().equals("emi.cream.CREAMCE-JobCancel")); + assertTrue(j02.get("metric").getAsString().equals("emi.cream.CREAMCE-JobCancel")); + assertTrue(j03.get("metric").getAsString().equals("emi.cream.CREAMCE-JobCancel")); + assertTrue(j04.get("metric").getAsString().equals("emi.cream.CREAMCE-JobCancel")); + assertTrue(j01.get("hostname").getAsString().equals("ceprod05.grid.hep.ph.ic.ac.uk")); + assertTrue(j02.get("hostname").getAsString().equals("ceprod05.grid.hep.ph.ic.ac.uk")); + assertTrue(j03.get("hostname").getAsString().equals("ceprod05.grid.hep.ph.ic.ac.uk")); + assertTrue(j04.get("hostname").getAsString().equals("ceprod05.grid.hep.ph.ic.ac.uk")); + assertTrue(j01.get("status").getAsString().equals("OK")); + assertTrue(j02.get("status").getAsString().equals("OK")); + assertTrue(j03.get("status").getAsString().equals("OK")); + assertTrue(j03.get("status").getAsString().equals("OK")); + + assertTrue(j01.get("summary").getAsString().equals("")); + assertTrue(j01.get("message").getAsString().equals("")); + assertTrue(j02.get("summary").getAsString().equals("")); + assertTrue(j02.get("message").getAsString().equals("")); + assertTrue(j03.get("summary").getAsString().equals("")); + assertTrue(j03.get("message").getAsString().equals("")); + + + // downtime affected should not create event + sm.addNewGroup("GR-07-UOI-HEPLAB",sm.ops.getIntStatus("OK"), ts1); + ArrayList elist08 = sm.setStatus("GR-07-UOI-HEPLAB", "CREAM-CE", "grid01.physics.uoi.gr", "emi.cream.CREAMCE-JobCancel", "CRITICAL", "mon01.argo.eu", "2017-03-03T22:45:00Z", "", "",""); + assertEquals(0,elist08.size()); + + // downtime affected should not create events + sm.addNewGroup("ru-Moscow-FIAN-LCG2",sm.ops.getIntStatus("OK"), ts1); + ArrayList elist09 = sm.setStatus("ru-Moscow-FIAN-LCG2", "Site-BDII", "ce1.grid.lebedev.ru", "org.bdii.Freshness", "CRITICAL", "mon01.argo.eu", "2017-03-03T22:55:00Z", "", "",""); + assertEquals(0,elist09.size()); + + // not affected site-bdii should generate events + sm.addNewGroup("WUT",sm.ops.getIntStatus("OK"), ts1); + ArrayList elist10 = sm.setStatus("WUT", "Site-BDII", "bdii.if.pw.edu.pl", "org.bdii.Freshness", "CRITICAL", "mon01.argo.eu", "2017-03-03T23:00:00Z", "", "",""); + assertEquals(4,elist10.size()); + } + +} diff --git a/flink_jobs/old-models/stream_status/src/test/java/sync/.gitignore b/flink_jobs/old-models/stream_status/src/test/java/sync/.gitignore new file mode 100644 index 00000000..b83d2226 --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/test/java/sync/.gitignore @@ -0,0 +1 @@ +/target/ diff --git a/flink_jobs/old-models/stream_status/src/test/java/sync/AvailabilityProfilesTest.java b/flink_jobs/old-models/stream_status/src/test/java/sync/AvailabilityProfilesTest.java new file mode 100644 index 00000000..28f4b89c --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/test/java/sync/AvailabilityProfilesTest.java @@ -0,0 +1,102 @@ +package sync; + +import static org.junit.Assert.*; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; + +import ops.OpsManagerTest; + +import org.junit.BeforeClass; +import org.junit.Test; + +import junitx.framework.ListAssert; + +public class AvailabilityProfilesTest { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", AvailabilityProfilesTest.class.getResource("/ops/ap1.json")); + } + + @Test + public void test() throws URISyntaxException, IOException { + // Prepare Resource File + URL resJsonFile = OpsManagerTest.class.getResource("/ops/ap1.json"); + File jsonFile = new File(resJsonFile.toURI()); + // Instatiate class + AggregationProfileManager avp = new AggregationProfileManager(); + avp.clearProfiles(); + avp.loadJson(jsonFile); + + // Check that only one availability profile was loaded + assertEquals("Only 1 av profile present", avp.getAvProfiles().size(), 1); + + ArrayList expApList = new ArrayList(); + expApList.add("ap1"); + + // Check the profile list is correct + assertEquals("Profile list check", avp.getAvProfiles(), expApList); + + // Check the profile namespace + assertEquals("Profile namespace", avp.getProfileNamespace("ap1"), "test"); + + // Check the profile groupType + assertEquals("Profile group type", avp.getProfileGroupType("ap1"), "sites"); + + // Set the expected profile groups + ArrayList expGroups = new ArrayList(); + expGroups.add("information"); + expGroups.add("compute"); + expGroups.add("storage"); + // Check the available group list + ListAssert.assertEquals("Profile Groups", avp.getProfileGroups("ap1"), expGroups); + + // Check compute group service list + ArrayList expServices = new ArrayList(); + expServices.add("GRAM5"); + expServices.add("QCG.Computing"); + expServices.add("ARC-CE"); + expServices.add("unicore6.TargetSystemFactory"); + expServices.add("CREAM-CE"); + + ListAssert.assertEquals("compute service list", avp.getProfileGroupServices("ap1", "compute"), expServices); + + // Check storage group service list + expServices = new ArrayList(); + expServices.add("SRM"); + expServices.add("SRMv2"); + ListAssert.assertEquals("storage service list", avp.getProfileGroupServices("ap1", "storage"), expServices); + + // Check storage group service list + expServices = new ArrayList(); + expServices.add("Site-BDII"); + ListAssert.assertEquals("accounting list", avp.getProfileGroupServices("ap1", "information"), expServices); + + // Check Various Service Instances operation + assertEquals("group compute: CREAM-CE op", avp.getProfileGroupServiceOp("ap1", "compute", "CREAM-CE"), "OR"); + assertEquals("group compute: ARC-CE op", avp.getProfileGroupServiceOp("ap1", "compute", "ARC-CE"), "OR"); + assertEquals("group storage: SRMv2 op", avp.getProfileGroupServiceOp("ap1", "storage", "SRM"), "OR"); + assertEquals("group storage: SRM op", avp.getProfileGroupServiceOp("ap1", "storage", "SRMv2"), "OR"); + assertEquals("group information: Site-BDII op", avp.getProfileGroupServiceOp("ap1", "information", "Site-BDII"), + "OR"); + assertEquals("get group by service: ", avp.getGroupByService("ap1", "CREAM-CE"), "compute"); + assertEquals("get group by service: ", avp.getGroupByService("ap1", "SRMv2"), "storage"); + // we check for an unexpected operation + assertNotEquals("group compute: CREAM-CE op", avp.getProfileGroupServiceOp("ap1", "compute", "CREAM-CE"), + "AND"); + assertNotEquals("group compute: CREAM-CE op", avp.getProfileGroupServiceOp("ap1", "informationss", "CREAM-CE"), + "AND"); + assertNotEquals("group compute: CREAM-CE op", avp.getProfileGroupServiceOp("ap1", "storage", "CREAM-CE"), + "FOO"); + // check for metric profile operations and total operation + assertEquals("metric profile operations: AND", avp.getMetricOp("ap1"), "AND"); + assertEquals("total profile operations: AND", avp.getMetricOp("ap1"), "AND"); + + } + +} diff --git a/flink_jobs/old-models/stream_status/src/test/java/sync/DowntimeCacheTest.java b/flink_jobs/old-models/stream_status/src/test/java/sync/DowntimeCacheTest.java new file mode 100644 index 00000000..6834fa84 --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/test/java/sync/DowntimeCacheTest.java @@ -0,0 +1,146 @@ +package sync; + +import static org.junit.Assert.*; + +import java.io.IOException; +import java.net.URISyntaxException; + +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Map; + +import org.junit.BeforeClass; +import org.junit.Test; + +import argo.avro.Downtime; +import argo.streaming.SyncParse; + +import org.apache.commons.codec.binary.Base64; + +public class DowntimeCacheTest { + + private static String[] fileList = { "/base64/downtimes_01.base64", + "/base64/downtimes_02.base64", + "/base64/downtimes_03.base64", + "/base64/downtimes_04.base64", + "/base64/downtimes_05.base64", + "/base64/downtimes_06.base64", + "/base64/downtimes_07.base64", + "/base64/downtimes_08.base64" }; + + private static String[] dayList = {"2018-05-09", + "2018-05-10", + "2018-05-12", + "2018-05-13", + "2018-05-14", + "2018-05-15", + "2018-05-16", + "2018-05-17"}; + + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + + for (String fileName : fileList) { + assertNotNull("Test file missing", DowntimeCacheTest.class.getResource(fileName)); + } + + } + + @Test + public void test() throws IOException, URISyntaxException { + + String b64List[] = new String[8]; + Map> downtimeMap = new HashMap>(); + + + int i=0; + for (String fileName : fileList) { + b64List[i] = new String(Files + .readAllBytes(Paths.get(DowntimeCacheTest.class.getResource(fileName).toURI()))); + i++; + } + + i=0; + for (String b64:b64List) { + byte[] decoded = Base64.decodeBase64(b64.getBytes("UTF-8")); + ArrayList dt = SyncParse.parseDowntimes(decoded); + downtimeMap.put(dayList[i], dt); + i++; + } + + DowntimeCache dc2 = new DowntimeCache(2); + // Downtime set arrives for 2018-05-10 add it to the cache + dc2.addFeed("2018-05-10",downtimeMap.get("2018-05-10")); + // Downtime set arrives for 2018-05-09 (older than the oldest downtime day in cache 05/10) ignore it + assertEquals(null,dc2.getDowntimeManager("2018-05-09")); + assertNotEquals(null,dc2.getDowntimeManager("2018-05-10")); + dc2.addFeed("2018-05-13",downtimeMap.get("2018-05-13")); + dc2.addFeed("2018-05-14",downtimeMap.get("2018-05-14")); + + // Not cache should contain only 2018-05-13 and 2018-05-14 + assertEquals(null,dc2.getDowntimeManager("2018-05-09")); + assertEquals(null,dc2.getDowntimeManager("2018-05-10")); + + DowntimeManager dm13 = new DowntimeManager(); + DowntimeManager dm14 = new DowntimeManager(); + dm13.loadFromList(downtimeMap.get("2018-05-13")); + dm14.loadFromList(downtimeMap.get("2018-05-14")); + + + + assertEquals(dm13.toString(),dc2.getDowntimeManager("2018-05-13").toString()); + assertEquals(dm14.toString(),dc2.getDowntimeManager("2018-05-14").toString()); + + // Assert that max items are 2 + assertEquals(2,dc2.getMaxItems()); + + assertEquals("[2018-05-13T00:00:00Z, 2018-05-13T23:59:00Z]",dc2.getDowntimePeriod("2018-05-13", "cluster51.knu.ac.kr", "Site-BDII").toString()); + assertEquals("[2018-05-14T00:00:00Z, 2018-05-14T23:59:00Z]",dc2.getDowntimePeriod("2018-05-14", "cluster51.knu.ac.kr", "Site-BDII").toString()); + assertEquals("[2018-05-13T14:00:00Z, 2018-05-13T23:59:00Z]",dc2.getDowntimePeriod("2018-05-13", "fal-pygrid-30.lancs.ac.uk", "webdav").toString()); + // Insert new element 2018-05-16 that will remove 2018-05-13 + dc2.addFeed("2018-05-16", downtimeMap.get("2018-05-16")); + assertEquals(null,dc2.getDowntimePeriod("2018-05-13", "fal-pygrid-30.lancs.ac.uk", "webdav")); + // ..but 2018-05-14 still exists + assertEquals("[2018-05-14T00:00:00Z, 2018-05-14T23:59:00Z]",dc2.getDowntimePeriod("2018-05-14", "cluster51.knu.ac.kr", "Site-BDII").toString()); + // Insert new element 2018-05-17 that will remove 2018-05-14 + dc2.addFeed("2018-05-17", downtimeMap.get("2018-05-17")); + assertEquals(null,dc2.getDowntimePeriod("2018-05-14", "cluster51.knu.ac.kr", "Site-BDII")); + + DowntimeCache dc3 = new DowntimeCache(3); + + // Begin by adding downtime dataset for day 12 + dc3.addFeed("2018-05-12", downtimeMap.get("2018-05-12")); + // The following days should be ignored (10 and 09) + dc3.addFeed("2018-05-10", downtimeMap.get("2018-05-10")); + dc3.addFeed("2018-05-09", downtimeMap.get("2018-05-09")); + assertEquals(null,dc3.getDowntimeManager("2018-05-09")); + assertEquals(null,dc3.getDowntimeManager("2018-05-10")); + + // Add 17,18,15,16 (also having 12) at one moment 15 will replace 12 + dc3.addFeed("2018-05-17", downtimeMap.get("2018-05-17")); + dc3.addFeed("2018-05-18", downtimeMap.get("2018-05-18")); + dc3.addFeed("2018-05-15", downtimeMap.get("2018-05-15")); + assertEquals(null,dc3.getDowntimeManager("2018-05-12")); + + DowntimeManager dm15 = new DowntimeManager(); + DowntimeManager dm17 = new DowntimeManager(); + DowntimeManager dm18 = new DowntimeManager(); + + dm15.loadFromList(downtimeMap.get("2018-05-15")); + dm17.loadFromList(downtimeMap.get("2018-05-17")); + dm18.loadFromList(downtimeMap.get("2018-05-18")); + + assertEquals(dm15.toString(),dc3.getDowntimeManager("2018-05-15").toString()); + assertEquals(dm17.toString(),dc3.getDowntimeManager("2018-05-17").toString()); + assertEquals(dm18.toString(),dc3.getDowntimeManager("2018-05-18").toString()); + + + + + } + +} \ No newline at end of file diff --git a/flink_jobs/old-models/stream_status/src/test/java/sync/DowntimeManagerTest.java b/flink_jobs/old-models/stream_status/src/test/java/sync/DowntimeManagerTest.java new file mode 100644 index 00000000..e36a0875 --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/test/java/sync/DowntimeManagerTest.java @@ -0,0 +1,68 @@ +package sync; + +import static org.junit.Assert.*; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; + +import org.junit.BeforeClass; +import org.junit.Test; + +public class DowntimeManagerTest { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", DowntimeManagerTest.class.getResource("/avro/downtimes_v2.avro")); + } + + @Test + public void test() throws IOException, URISyntaxException { + // Prepare Resource File + URL resAvroFile = DowntimeManagerTest.class.getResource("/avro/downtimes_v2.avro"); + File avroFile = new File(resAvroFile.toURI()); + // Instantiate class + DowntimeManager dt = new DowntimeManager(); + // Test loading file + dt.loadAvro(avroFile); + assertNotNull("File Loaded", dt); + + // Test time period retrieval by service endpoint + + // test for cream-ce01.gridpp.rl.ac.uk CREAM-CE + ArrayList timePeriod = new ArrayList(); + timePeriod.add("2015-05-07T00:00:00Z"); + timePeriod.add("2015-05-07T23:59:00Z"); + assertEquals("Test timeperiod #1", dt.getPeriod("cream-ce01.gridpp.rl.ac.uk", "CREAM-CE"), timePeriod); + // test for px.ire.kharkov.ua, MyProxy + timePeriod.clear(); + timePeriod.add("2015-05-07T00:00:00Z"); + timePeriod.add("2015-05-07T23:59:00Z"); + assertEquals("Test timeperiod #2", dt.getPeriod("px.ire.kharkov.ua", "MyProxy"), timePeriod); + // test for gb-ui-nki.els.sara.nl, UI + timePeriod.clear(); + timePeriod.add("2015-05-07T00:00:00Z"); + timePeriod.add("2015-05-07T23:59:00Z"); + assertEquals("Test timeperiod #3", dt.getPeriod("gb-ui-nki.els.sara.nl", "UI"), timePeriod); + // test for cream-ce01.gridpp.rl.ac.uk, gLExec + timePeriod.clear(); + timePeriod.add("2015-05-07T00:00:00Z"); + timePeriod.add("2015-05-07T23:59:00Z"); + assertEquals("Test timeperiod #4", dt.getPeriod("cream-ce01.gridpp.rl.ac.uk", "gLExec"), timePeriod); + // test for gcvmfs.cat.cbpf.br, org.squid-cache.Squid + timePeriod.clear(); + timePeriod.add("2015-05-07T00:00:00Z"); + timePeriod.add("2015-05-07T20:00:00Z"); + assertEquals("Test timeperiod #5", dt.getPeriod("cvmfs.cat.cbpf.br", "org.squid-cache.Squid"), timePeriod); + // test for apel.ire.kharkov.ua, APEL + timePeriod.clear(); + timePeriod.add("2015-05-07T00:00:00Z"); + timePeriod.add("2015-05-07T23:59:00Z"); + assertEquals("Test timeperiod #6", dt.getPeriod("apel.ire.kharkov.ua", "APEL"), timePeriod); + + } + +} \ No newline at end of file diff --git a/flink_jobs/old-models/stream_status/src/test/java/sync/EndpointGroupManagerV2Test.java b/flink_jobs/old-models/stream_status/src/test/java/sync/EndpointGroupManagerV2Test.java new file mode 100644 index 00000000..708dc6d9 --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/test/java/sync/EndpointGroupManagerV2Test.java @@ -0,0 +1,83 @@ +package sync; + +import static org.junit.Assert.*; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; + + + + +import org.junit.BeforeClass; +import org.junit.Test; + +import argo.avro.GroupEndpoint; + + +public class EndpointGroupManagerV2Test { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", + EndpointGroupManagerV2Test.class.getResource("/avro/group_endpoints_v2.avro")); + } + + @Test + public void test() throws URISyntaxException, IOException { + // Prepare Resource File + URL resAvroFile = EndpointGroupManagerV2Test.class.getResource("/avro/group_endpoints_v2.avro"); + File avroFile = new File(resAvroFile.toURI()); + // Instantiate class + EndpointGroupManagerV2 ge = new EndpointGroupManagerV2(); + // Test loading file 2 + ge.loadAvro(avroFile); + + + + + assertNotNull("File Loaded", ge); + + // Test Check if service endpoint exists in topology + assertTrue(ge.checkEndpoint("storage1.grid.upjs.sk", "ARC-CE")); + assertTrue(ge.checkEndpoint("storage1.grid.upjs.sk", "ARC-CE")); + assertTrue(ge.checkEndpoint("se01.afroditi.hellasgrid.gr", "SRMv2")); + assertTrue(ge.checkEndpoint("grid-perfsonar.hpc.susx.ac.uk", "net.perfSONAR.Latency")); + assertTrue(ge.checkEndpoint("se.grid.tuke.sk", "SRMv2")); + assertTrue(ge.checkEndpoint("dpm.grid.atomki.hu", "SRMv2")); + assertTrue(ge.checkEndpoint("gt3.pnpi.nw.ru", "CREAM-CE")); + + + + // Test check Group retrieval + ArrayList result1 = new ArrayList(); + result1.add("ru-PNPI"); + assertEquals(ge.getGroup( "gt3.pnpi.nw.ru", "CREAM-CE"), result1); + + + // Check non-existent groups + assertTrue(ge.checkEndpoint("ce.etfos.cro-ngi.hr", "GRAM5") == false); + assertTrue(ge.checkEndpoint("grid129.sinp.msu.ru", "CREAM-CE") == false); + + ArrayList egpList = new ArrayList(); + for (int i=0; i<5; i++){ + GroupEndpoint itemNew = new GroupEndpoint(); + itemNew.setGroup("FOO"); + itemNew.setHostname("host_"+Integer.toString(i)); + itemNew.setType("SITE"); + egpList.add(itemNew); + } + + EndpointGroupManagerV2 egpMgr = new EndpointGroupManagerV2(); + egpMgr.loadFromList(egpList); + + assertTrue(egpMgr.getList().size()==5); + + + + } + +} diff --git a/flink_jobs/old-models/stream_status/src/test/java/sync/EndpointGroupsTest.java b/flink_jobs/old-models/stream_status/src/test/java/sync/EndpointGroupsTest.java new file mode 100644 index 00000000..2dddd6ce --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/test/java/sync/EndpointGroupsTest.java @@ -0,0 +1,61 @@ +package sync; + +import static org.junit.Assert.*; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; + +import ops.ConfigManager; + +import org.junit.BeforeClass; +import org.junit.Test; + +public class EndpointGroupsTest { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", EndpointGroupsTest.class.getResource("/avro/group_endpoints_v2.avro")); + } + + @Test + public void test() throws URISyntaxException, IOException { + // Prepare Resource File + URL resAvroFile = EndpointGroupsTest.class.getResource("/avro/group_endpoints_v2.avro"); + File avroFile = new File(resAvroFile.toURI()); + // Instatiate class + EndpointGroupManager ge = new EndpointGroupManager(); + // Test loading file 2 + ge.loadAvro(avroFile); + assertNotNull("File Loaded", ge); + + // Test Check if service endpoint exists in topology + assertTrue(ge.checkEndpoint("storage1.grid.upjs.sk", "ARC-CE")); + assertTrue(ge.checkEndpoint("storage1.grid.upjs.sk", "ARC-CE")); + assertTrue(ge.checkEndpoint("se01.afroditi.hellasgrid.gr", "SRMv2")); + assertTrue(ge.checkEndpoint("grid-perfsonar.hpc.susx.ac.uk", "net.perfSONAR.Latency")); + assertTrue(ge.checkEndpoint("se.grid.tuke.sk", "SRMv2")); + assertTrue(ge.checkEndpoint("dpm.grid.atomki.hu", "SRMv2")); + // Test check Group retrieval + ArrayList result1 = new ArrayList(); + result1.add("ru-PNPI"); + assertEquals(ge.getGroup("SITES", "gt3.pnpi.nw.ru", "CREAM-CE"), result1); + + // Test Tag Filtering (Wont filter out anything since input is already + // filtered) + URL resJson = GroupsOfGroupsTest.class.getResource("/ops/config.json"); + File cfgFile = new File(resJson.toURI()); + ConfigManager cfgMgr = new ConfigManager(); + cfgMgr.loadJson(cfgFile); + ge.filter(cfgMgr.egroupTags); + + // Check non-existent groups + assertTrue(ge.checkEndpoint("ce.etfos.cro-ngi.hr", "GRAM5") == false); + assertTrue(ge.checkEndpoint("grid129.sinp.msu.ru", "CREAM-CE") == false); + + } + +} diff --git a/flink_jobs/old-models/stream_status/src/test/java/sync/GroupsOfGroupsTest.java b/flink_jobs/old-models/stream_status/src/test/java/sync/GroupsOfGroupsTest.java new file mode 100644 index 00000000..51d40f08 --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/test/java/sync/GroupsOfGroupsTest.java @@ -0,0 +1,64 @@ +package sync; + +import static org.junit.Assert.*; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.HashMap; + +import ops.ConfigManager; + +import org.junit.BeforeClass; +import org.junit.Test; + +public class GroupsOfGroupsTest { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", GroupsOfGroupsTest.class.getResource("/avro/group_groups_v2.avro")); + } + + @Test + public void test() throws URISyntaxException, IOException { + // Prepare Resource File + URL resAvroFile = GroupsOfGroupsTest.class.getResource("/avro/group_groups_v2.avro"); + File avroFile = new File(resAvroFile.toURI()); + // Instatiate class + GroupGroupManager gg = new GroupGroupManager(); + // Test loading file + gg.loadAvro(avroFile); + assertNotNull("File Loaded", gg); + // Test retrieve group by subgroup name and group type + assertEquals(gg.getGroup("NGI", "UNI-BONN"), "NGI_DE"); + assertEquals(gg.getGroup("NGI", "MSFG-OPEN"), "NGI_FRANCE"); + assertEquals(gg.getGroup("NGI", "HG-02-IASA"), "NGI_GRNET"); + assertEquals(gg.getGroup("NGI", "ZA-MERAKA"), "AfricaArabia"); + assertEquals(gg.getGroup("NGI", "RU-SPbSU"), "Russia"); + // Test to assert if groups exist + assertTrue(gg.checkSubGroup("UNI-BONN")); + assertTrue(gg.checkSubGroup("MSFG-OPEN")); + assertTrue(gg.checkSubGroup("HG-02-IASA")); + assertTrue(gg.checkSubGroup("ZA-MERAKA")); + assertTrue(gg.checkSubGroup("RU-SPbSU")); + + // Test Tag Filtering (Wont filter out anything since input is already + // filtered) + URL resJson = GroupsOfGroupsTest.class.getResource("/ops/config.json"); + File cfgFile = new File(resJson.toURI()); + ConfigManager cfgMgr = new ConfigManager(); + cfgMgr.loadJson(cfgFile); + gg.filter(cfgMgr.ggroupTags); + + // Test groups that are not present + assertNotEquals(gg.getGroup("NGI", "KE-UONBI-01"), "AfricaArabia"); + assertNotEquals(gg.getGroup("NGI", "RU-Novosibirsk-BINP"), "Russia"); + assertTrue(gg.checkSubGroup("FRANCE-GRILLES-TESTBED") == false); + + // Test exceptions + + } + +} diff --git a/flink_jobs/old-models/stream_status/src/test/java/sync/MetricProfilesTest.java b/flink_jobs/old-models/stream_status/src/test/java/sync/MetricProfilesTest.java new file mode 100644 index 00000000..c262ef20 --- /dev/null +++ b/flink_jobs/old-models/stream_status/src/test/java/sync/MetricProfilesTest.java @@ -0,0 +1,113 @@ +package sync; + +import static org.junit.Assert.*; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; + +import org.junit.BeforeClass; +import org.junit.Test; + +import junitx.framework.ListAssert; + +public class MetricProfilesTest { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", MetricProfilesTest.class.getResource("/avro/poem_sync_v2.avro")); + } + + @Test + public void test() throws URISyntaxException, IOException { + // Prepare Resource File + URL resAvroFile = MetricProfilesTest.class.getResource("/avro/poem_sync_v2.avro"); + File avroFile = new File(resAvroFile.toURI()); + // Instatiate class + MetricProfileManager mp = new MetricProfileManager(); + // Test loading file + mp.loadAvro(avroFile); + assertNotNull("File Loaded", mp); + + // Test Loaded Metric Profile + assertEquals("Only one metric profile must be loaded", mp.getProfiles().size(), 1); + assertEquals("Profile ch.cern.sam.roc_critical must be loaded", mp.getProfiles().get(0).toString(), + "ch.cern.sam.ROC_CRITICAL"); + + // Test Loaded Metric Profile Services + ArrayList serviceList = new ArrayList(); + serviceList.add("GRAM5"); + serviceList.add("QCG.Computing"); + serviceList.add("ARC-CE"); + serviceList.add("unicore6.TargetSystemFactory"); + serviceList.add("Site-BDII"); + serviceList.add("CREAM-CE"); + serviceList.add("SRMv2"); + + ListAssert.assertEquals("Test Presence of Loaded Profile Services", mp.getProfileServices("ch.cern.sam.ROC_CRITICAL"), + serviceList); + + // Test Loaded Metric Profile service metrics; + + // GRAM5 service + ArrayList gram5Metrics = new ArrayList(); + gram5Metrics.add("hr.srce.GRAM-Auth"); + gram5Metrics.add("hr.srce.GRAM-CertLifetime"); + gram5Metrics.add("hr.srce.GRAM-Command"); + + ListAssert.assertEquals("Test GRAM5 metrics", mp.getProfileServiceMetrics("ch.cern.sam.ROC_CRITICAL", "GRAM5"), + gram5Metrics); + // Test Loaded Metric Profile service metrics; + + // QCG service + ArrayList qcgMetrics = new ArrayList(); + qcgMetrics.add("hr.srce.QCG-Computing-CertLifetime"); + qcgMetrics.add("pl.plgrid.QCG-Computing"); + + ListAssert.assertEquals("Test QCG metrics", mp.getProfileServiceMetrics("ch.cern.sam.ROC_CRITICAL", "QCG.Computing"), + qcgMetrics); + + // Site-BDII + ArrayList siteBdiiMetrics = new ArrayList(); + siteBdiiMetrics.add("org.bdii.Entries"); + siteBdiiMetrics.add("org.bdii.Freshness"); + ListAssert.assertEquals("Test Site-BDII metrics", mp.getProfileServiceMetrics("ch.cern.sam.ROC_CRITICAL", "Site-BDII"), + siteBdiiMetrics); + + // SRMv2 + ArrayList srmv2metrics = new ArrayList(); + srmv2metrics.add("hr.srce.SRM2-CertLifetime"); + srmv2metrics.add("org.sam.SRM-Del"); + srmv2metrics.add("org.sam.SRM-Get"); + srmv2metrics.add("org.sam.SRM-GetSURLs"); + srmv2metrics.add("org.sam.SRM-GetTURLs"); + srmv2metrics.add("org.sam.SRM-Ls"); + srmv2metrics.add("org.sam.SRM-LsDir"); + srmv2metrics.add("org.sam.SRM-Put"); + ListAssert.assertEquals("SRMv2 ", (mp.getProfileServiceMetrics("ch.cern.sam.ROC_CRITICAL", "SRMv2")), srmv2metrics); + + // Check Existense of Profile Service Metric + + assertTrue("Existence of CREAM-CE Metric", + mp.checkProfileServiceMetric("ch.cern.sam.ROC_CRITICAL", "CREAM-CE", "emi.cream.CREAMCE-JobSubmit")); + assertTrue("Existence of CREAM-CE Metric", + mp.checkProfileServiceMetric("ch.cern.sam.ROC_CRITICAL", "CREAM-CE", "emi.cream.CREAMCE-JobSubmit")); + assertTrue("Existence of CREAM-CE Metric", + mp.checkProfileServiceMetric("ch.cern.sam.ROC_CRITICAL", "CREAM-CE", "emi.wn.WN-Bi")); + assertTrue("Existence of CREAM-CE Metric", + mp.checkProfileServiceMetric("ch.cern.sam.ROC_CRITICAL", "CREAM-CE", "emi.wn.WN-Csh")); + assertTrue("Existence of CREAM-CE Metric", + mp.checkProfileServiceMetric("ch.cern.sam.ROC_CRITICAL", "CREAM-CE", "emi.wn.WN-SoftVer")); + assertTrue("Existence of CREAM-CE Metric", + mp.checkProfileServiceMetric("ch.cern.sam.ROC_CRITICAL", "CREAM-CE", "hr.srce.CADist-Check")); + assertTrue("Existence of CREAM-CE Metric", + mp.checkProfileServiceMetric("ch.cern.sam.ROC_CRITICAL", "CREAM-CE", "hr.srce.CREAMCE-CertLifetime")); + // False results + assertTrue("ARC-CE doesn't have certLifetime", + !(mp.checkProfileServiceMetric("ch.cern.sam.ROC_CRITICAL", "ARC-CE", "hr.srce.CREAMCE-CertLifetime"))); + } + +} diff --git a/flink_jobs/status_trends/.gitignore b/flink_jobs/status_trends/.gitignore new file mode 100644 index 00000000..6c4e323f --- /dev/null +++ b/flink_jobs/status_trends/.gitignore @@ -0,0 +1,8 @@ +/target/ +.project +.settings/ +.classpath/ +.classpath +/nbproject +nbactions.xml + diff --git a/flink_jobs/status_trends/pom.xml b/flink_jobs/status_trends/pom.xml new file mode 100644 index 00000000..185f1560 --- /dev/null +++ b/flink_jobs/status_trends/pom.xml @@ -0,0 +1,433 @@ + + + 4.0.0 + + argo.batch + status-trends + 1.0 + jar + + ArgoStatusTrends + + + UTF-8 + 1.3.2 + 1.7.7 + 1.2.17 + 2.10 + 1.3.0 + 2.6.0 + + + + + apache.snapshots + Apache Development Snapshot Repository + https://repository.apache.org/content/repositories/snapshots/ + + false + + + true + + + + + + + + + + + de.javakaffee + kryo-serializers + 0.30 + + + com.google.code.gson + gson + 2.2.4 + + + org.apache.httpcomponents + httpclient + 4.5.13 + + + org.apache.httpcomponents + fluent-hc + 4.5.13 + + + + + org.apache.flink + flink-hadoop-compatibility_2.10 + ${flink.version} + + + + org.mongodb + mongo-java-driver + 3.2.2 + compile + + + com.googlecode.json-simple + json-simple + 1.1.1 + + + org.apache.flink + flink-avro_2.10 + ${flink.version} + + + + + + + org.apache.flink + flink-java + ${flink.version} + + + org.apache.flink + flink-streaming-java_2.10 + ${flink.version} + + + org.apache.flink + flink-clients_2.10 + ${flink.version} + + + + + org.slf4j + slf4j-log4j12 + ${slf4j.version} + + + log4j + log4j + ${log4j.version} + + + joda-time + joda-time + 1.6 + + + + + + + build-jar + + + false + + + + + org.apache.flink + flink-java + ${flink.version} + provided + + + org.apache.flink + flink-streaming-java_2.10 + ${flink.version} + provided + + + org.apache.flink + flink-clients_2.10 + ${flink.version} + provided + + + org.slf4j + slf4j-log4j12 + ${slf4j.version} + provided + + + log4j + log4j + ${log4j.version} + provided + + + + + + + + org.apache.maven.plugins + maven-shade-plugin + 2.4.1 + + + package + + shade + + + + + + + + + + + + + + + + + + + org.apache.maven.plugins + maven-shade-plugin + 2.4.1 + + + + package + + shade + + + + + + org.apache.flink:flink-annotations + org.apache.flink:flink-shaded-hadoop2 + org.apache.flink:flink-shaded-curator-recipes + org.apache.flink:flink-core + org.apache.flink:flink-java + org.apache.flink:flink-scala_2.10 + org.apache.flink:flink-runtime_2.10 + org.apache.flink:flink-optimizer_2.10 + org.apache.flink:flink-clients_2.10 + org.apache.flink:flink-avro_2.10 + org.apache.flink:flink-examples-batch_2.10 + org.apache.flink:flink-examples-streaming_2.10 + org.apache.flink:flink-streaming-java_2.10 + org.apache.flink:flink-streaming-scala_2.10 + org.apache.flink:flink-scala-shell_2.10 + org.apache.flink:flink-python + org.apache.flink:flink-metrics-core + org.apache.flink:flink-metrics-jmx + org.apache.flink:flink-statebackend-rocksdb_2.10 + + + + log4j:log4j + org.scala-lang:scala-library + org.scala-lang:scala-compiler + org.scala-lang:scala-reflect + com.data-artisans:flakka-actor_* + com.data-artisans:flakka-remote_* + com.data-artisans:flakka-slf4j_* + io.netty:netty-all + io.netty:netty + commons-fileupload:commons-fileupload + org.apache.avro:avro + commons-collections:commons-collections + org.codehaus.jackson:jackson-core-asl + org.codehaus.jackson:jackson-mapper-asl + com.thoughtworks.paranamer:paranamer + org.xerial.snappy:snappy-java + org.apache.commons:commons-compress + org.tukaani:xz + com.esotericsoftware.kryo:kryo + com.esotericsoftware.minlog:minlog + org.objenesis:objenesis + com.twitter:chill_* + com.twitter:chill-java + commons-lang:commons-lang + junit:junit + org.apache.commons:commons-lang3 + org.slf4j:slf4j-api + org.slf4j:slf4j-log4j12 + log4j:log4j + org.apache.commons:commons-math + org.apache.sling:org.apache.sling.commons.json + commons-logging:commons-logging + commons-codec:commons-codec + com.fasterxml.jackson.core:jackson-core + com.fasterxml.jackson.core:jackson-databind + com.fasterxml.jackson.core:jackson-annotations + stax:stax-api + com.typesafe:config + org.uncommons.maths:uncommons-maths + com.github.scopt:scopt_* + commons-io:commons-io + commons-cli:commons-cli + + + + + org.apache.flink:* + + + org/apache/flink/shaded/com/** + web-docs/** + + + + + *:* + + META-INF/*.SF + META-INF/*.DSA + META-INF/*.RSA + + + + + + false + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.1 + + 1.7 + 1.7 + + + + + + + + diff --git a/flink_jobs/status_trends/src/main/java/argo/avro/MetricData.java b/flink_jobs/status_trends/src/main/java/argo/avro/MetricData.java new file mode 100644 index 00000000..cf1dedf2 --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/avro/MetricData.java @@ -0,0 +1,574 @@ +/** + * Autogenerated by Avro + * + * DO NOT EDIT DIRECTLY + */ +package argo.avro; +@SuppressWarnings("all") +@org.apache.avro.specific.AvroGenerated +public class MetricData extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"MetricData\",\"namespace\":\"argo.avro\",\"fields\":[{\"name\":\"timestamp\",\"type\":\"string\"},{\"name\":\"service\",\"type\":\"string\"},{\"name\":\"hostname\",\"type\":\"string\"},{\"name\":\"metric\",\"type\":\"string\"},{\"name\":\"status\",\"type\":\"string\"},{\"name\":\"monitoring_host\",\"type\":[\"null\",\"string\"]},{\"name\":\"summary\",\"type\":[\"null\",\"string\"]},{\"name\":\"message\",\"type\":[\"null\",\"string\"]},{\"name\":\"tags\",\"type\":[\"null\",{\"type\":\"map\",\"values\":[\"null\",\"string\"]}]}]}"); + public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } + @Deprecated public java.lang.CharSequence timestamp; + @Deprecated public java.lang.CharSequence service; + @Deprecated public java.lang.CharSequence hostname; + @Deprecated public java.lang.CharSequence metric; + @Deprecated public java.lang.CharSequence status; + @Deprecated public java.lang.CharSequence monitoring_host; + @Deprecated public java.lang.CharSequence summary; + @Deprecated public java.lang.CharSequence message; + @Deprecated public java.util.Map tags; + + /** + * Default constructor. Note that this does not initialize fields + * to their default values from the schema. If that is desired then + * one should use newBuilder(). + */ + public MetricData() {} + + /** + * All-args constructor. + */ + public MetricData(java.lang.CharSequence timestamp, java.lang.CharSequence service, java.lang.CharSequence hostname, java.lang.CharSequence metric, java.lang.CharSequence status, java.lang.CharSequence monitoring_host, java.lang.CharSequence summary, java.lang.CharSequence message, java.util.Map tags) { + this.timestamp = timestamp; + this.service = service; + this.hostname = hostname; + this.metric = metric; + this.status = status; + this.monitoring_host = monitoring_host; + this.summary = summary; + this.message = message; + this.tags = tags; + } + + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + // Used by DatumWriter. Applications should not call. + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return timestamp; + case 1: return service; + case 2: return hostname; + case 3: return metric; + case 4: return status; + case 5: return monitoring_host; + case 6: return summary; + case 7: return message; + case 8: return tags; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + // Used by DatumReader. Applications should not call. + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: timestamp = (java.lang.CharSequence)value$; break; + case 1: service = (java.lang.CharSequence)value$; break; + case 2: hostname = (java.lang.CharSequence)value$; break; + case 3: metric = (java.lang.CharSequence)value$; break; + case 4: status = (java.lang.CharSequence)value$; break; + case 5: monitoring_host = (java.lang.CharSequence)value$; break; + case 6: summary = (java.lang.CharSequence)value$; break; + case 7: message = (java.lang.CharSequence)value$; break; + case 8: tags = (java.util.Map)value$; break; + default: throw new org.apache.avro.AvroRuntimeException("Bad index"); + } + } + + /** + * Gets the value of the 'timestamp' field. + */ + public java.lang.CharSequence getTimestamp() { + return timestamp; + } + + /** + * Sets the value of the 'timestamp' field. + * @param value the value to set. + */ + public void setTimestamp(java.lang.CharSequence value) { + this.timestamp = value; + } + + /** + * Gets the value of the 'service' field. + */ + public java.lang.CharSequence getService() { + return service; + } + + /** + * Sets the value of the 'service' field. + * @param value the value to set. + */ + public void setService(java.lang.CharSequence value) { + this.service = value; + } + + /** + * Gets the value of the 'hostname' field. + */ + public java.lang.CharSequence getHostname() { + return hostname; + } + + /** + * Sets the value of the 'hostname' field. + * @param value the value to set. + */ + public void setHostname(java.lang.CharSequence value) { + this.hostname = value; + } + + /** + * Gets the value of the 'metric' field. + */ + public java.lang.CharSequence getMetric() { + return metric; + } + + /** + * Sets the value of the 'metric' field. + * @param value the value to set. + */ + public void setMetric(java.lang.CharSequence value) { + this.metric = value; + } + + /** + * Gets the value of the 'status' field. + */ + public java.lang.CharSequence getStatus() { + return status; + } + + /** + * Sets the value of the 'status' field. + * @param value the value to set. + */ + public void setStatus(java.lang.CharSequence value) { + this.status = value; + } + + /** + * Gets the value of the 'monitoring_host' field. + */ + public java.lang.CharSequence getMonitoringHost() { + return monitoring_host; + } + + /** + * Sets the value of the 'monitoring_host' field. + * @param value the value to set. + */ + public void setMonitoringHost(java.lang.CharSequence value) { + this.monitoring_host = value; + } + + /** + * Gets the value of the 'summary' field. + */ + public java.lang.CharSequence getSummary() { + return summary; + } + + /** + * Sets the value of the 'summary' field. + * @param value the value to set. + */ + public void setSummary(java.lang.CharSequence value) { + this.summary = value; + } + + /** + * Gets the value of the 'message' field. + */ + public java.lang.CharSequence getMessage() { + return message; + } + + /** + * Sets the value of the 'message' field. + * @param value the value to set. + */ + public void setMessage(java.lang.CharSequence value) { + this.message = value; + } + + /** + * Gets the value of the 'tags' field. + */ + public java.util.Map getTags() { + return tags; + } + + /** + * Sets the value of the 'tags' field. + * @param value the value to set. + */ + public void setTags(java.util.Map value) { + this.tags = value; + } + + /** Creates a new MetricData RecordBuilder */ + public static argo.avro.MetricData.Builder newBuilder() { + return new argo.avro.MetricData.Builder(); + } + + /** Creates a new MetricData RecordBuilder by copying an existing Builder */ + public static argo.avro.MetricData.Builder newBuilder(argo.avro.MetricData.Builder other) { + return new argo.avro.MetricData.Builder(other); + } + + /** Creates a new MetricData RecordBuilder by copying an existing MetricData instance */ + public static argo.avro.MetricData.Builder newBuilder(argo.avro.MetricData other) { + return new argo.avro.MetricData.Builder(other); + } + + /** + * RecordBuilder for MetricData instances. + */ + public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase + implements org.apache.avro.data.RecordBuilder { + + private java.lang.CharSequence timestamp; + private java.lang.CharSequence service; + private java.lang.CharSequence hostname; + private java.lang.CharSequence metric; + private java.lang.CharSequence status; + private java.lang.CharSequence monitoring_host; + private java.lang.CharSequence summary; + private java.lang.CharSequence message; + private java.util.Map tags; + + /** Creates a new Builder */ + private Builder() { + super(argo.avro.MetricData.SCHEMA$); + } + + /** Creates a Builder by copying an existing Builder */ + private Builder(argo.avro.MetricData.Builder other) { + super(other); + if (isValidValue(fields()[0], other.timestamp)) { + this.timestamp = data().deepCopy(fields()[0].schema(), other.timestamp); + fieldSetFlags()[0] = true; + } + if (isValidValue(fields()[1], other.service)) { + this.service = data().deepCopy(fields()[1].schema(), other.service); + fieldSetFlags()[1] = true; + } + if (isValidValue(fields()[2], other.hostname)) { + this.hostname = data().deepCopy(fields()[2].schema(), other.hostname); + fieldSetFlags()[2] = true; + } + if (isValidValue(fields()[3], other.metric)) { + this.metric = data().deepCopy(fields()[3].schema(), other.metric); + fieldSetFlags()[3] = true; + } + if (isValidValue(fields()[4], other.status)) { + this.status = data().deepCopy(fields()[4].schema(), other.status); + fieldSetFlags()[4] = true; + } + if (isValidValue(fields()[5], other.monitoring_host)) { + this.monitoring_host = data().deepCopy(fields()[5].schema(), other.monitoring_host); + fieldSetFlags()[5] = true; + } + if (isValidValue(fields()[6], other.summary)) { + this.summary = data().deepCopy(fields()[6].schema(), other.summary); + fieldSetFlags()[6] = true; + } + if (isValidValue(fields()[7], other.message)) { + this.message = data().deepCopy(fields()[7].schema(), other.message); + fieldSetFlags()[7] = true; + } + if (isValidValue(fields()[8], other.tags)) { + this.tags = data().deepCopy(fields()[8].schema(), other.tags); + fieldSetFlags()[8] = true; + } + } + + /** Creates a Builder by copying an existing MetricData instance */ + private Builder(argo.avro.MetricData other) { + super(argo.avro.MetricData.SCHEMA$); + if (isValidValue(fields()[0], other.timestamp)) { + this.timestamp = data().deepCopy(fields()[0].schema(), other.timestamp); + fieldSetFlags()[0] = true; + } + if (isValidValue(fields()[1], other.service)) { + this.service = data().deepCopy(fields()[1].schema(), other.service); + fieldSetFlags()[1] = true; + } + if (isValidValue(fields()[2], other.hostname)) { + this.hostname = data().deepCopy(fields()[2].schema(), other.hostname); + fieldSetFlags()[2] = true; + } + if (isValidValue(fields()[3], other.metric)) { + this.metric = data().deepCopy(fields()[3].schema(), other.metric); + fieldSetFlags()[3] = true; + } + if (isValidValue(fields()[4], other.status)) { + this.status = data().deepCopy(fields()[4].schema(), other.status); + fieldSetFlags()[4] = true; + } + if (isValidValue(fields()[5], other.monitoring_host)) { + this.monitoring_host = data().deepCopy(fields()[5].schema(), other.monitoring_host); + fieldSetFlags()[5] = true; + } + if (isValidValue(fields()[6], other.summary)) { + this.summary = data().deepCopy(fields()[6].schema(), other.summary); + fieldSetFlags()[6] = true; + } + if (isValidValue(fields()[7], other.message)) { + this.message = data().deepCopy(fields()[7].schema(), other.message); + fieldSetFlags()[7] = true; + } + if (isValidValue(fields()[8], other.tags)) { + this.tags = data().deepCopy(fields()[8].schema(), other.tags); + fieldSetFlags()[8] = true; + } + } + + /** Gets the value of the 'timestamp' field */ + public java.lang.CharSequence getTimestamp() { + return timestamp; + } + + /** Sets the value of the 'timestamp' field */ + public argo.avro.MetricData.Builder setTimestamp(java.lang.CharSequence value) { + validate(fields()[0], value); + this.timestamp = value; + fieldSetFlags()[0] = true; + return this; + } + + /** Checks whether the 'timestamp' field has been set */ + public boolean hasTimestamp() { + return fieldSetFlags()[0]; + } + + /** Clears the value of the 'timestamp' field */ + public argo.avro.MetricData.Builder clearTimestamp() { + timestamp = null; + fieldSetFlags()[0] = false; + return this; + } + + /** Gets the value of the 'service' field */ + public java.lang.CharSequence getService() { + return service; + } + + /** Sets the value of the 'service' field */ + public argo.avro.MetricData.Builder setService(java.lang.CharSequence value) { + validate(fields()[1], value); + this.service = value; + fieldSetFlags()[1] = true; + return this; + } + + /** Checks whether the 'service' field has been set */ + public boolean hasService() { + return fieldSetFlags()[1]; + } + + /** Clears the value of the 'service' field */ + public argo.avro.MetricData.Builder clearService() { + service = null; + fieldSetFlags()[1] = false; + return this; + } + + /** Gets the value of the 'hostname' field */ + public java.lang.CharSequence getHostname() { + return hostname; + } + + /** Sets the value of the 'hostname' field */ + public argo.avro.MetricData.Builder setHostname(java.lang.CharSequence value) { + validate(fields()[2], value); + this.hostname = value; + fieldSetFlags()[2] = true; + return this; + } + + /** Checks whether the 'hostname' field has been set */ + public boolean hasHostname() { + return fieldSetFlags()[2]; + } + + /** Clears the value of the 'hostname' field */ + public argo.avro.MetricData.Builder clearHostname() { + hostname = null; + fieldSetFlags()[2] = false; + return this; + } + + /** Gets the value of the 'metric' field */ + public java.lang.CharSequence getMetric() { + return metric; + } + + /** Sets the value of the 'metric' field */ + public argo.avro.MetricData.Builder setMetric(java.lang.CharSequence value) { + validate(fields()[3], value); + this.metric = value; + fieldSetFlags()[3] = true; + return this; + } + + /** Checks whether the 'metric' field has been set */ + public boolean hasMetric() { + return fieldSetFlags()[3]; + } + + /** Clears the value of the 'metric' field */ + public argo.avro.MetricData.Builder clearMetric() { + metric = null; + fieldSetFlags()[3] = false; + return this; + } + + /** Gets the value of the 'status' field */ + public java.lang.CharSequence getStatus() { + return status; + } + + /** Sets the value of the 'status' field */ + public argo.avro.MetricData.Builder setStatus(java.lang.CharSequence value) { + validate(fields()[4], value); + this.status = value; + fieldSetFlags()[4] = true; + return this; + } + + /** Checks whether the 'status' field has been set */ + public boolean hasStatus() { + return fieldSetFlags()[4]; + } + + /** Clears the value of the 'status' field */ + public argo.avro.MetricData.Builder clearStatus() { + status = null; + fieldSetFlags()[4] = false; + return this; + } + + /** Gets the value of the 'monitoring_host' field */ + public java.lang.CharSequence getMonitoringHost() { + return monitoring_host; + } + + /** Sets the value of the 'monitoring_host' field */ + public argo.avro.MetricData.Builder setMonitoringHost(java.lang.CharSequence value) { + validate(fields()[5], value); + this.monitoring_host = value; + fieldSetFlags()[5] = true; + return this; + } + + /** Checks whether the 'monitoring_host' field has been set */ + public boolean hasMonitoringHost() { + return fieldSetFlags()[5]; + } + + /** Clears the value of the 'monitoring_host' field */ + public argo.avro.MetricData.Builder clearMonitoringHost() { + monitoring_host = null; + fieldSetFlags()[5] = false; + return this; + } + + /** Gets the value of the 'summary' field */ + public java.lang.CharSequence getSummary() { + return summary; + } + + /** Sets the value of the 'summary' field */ + public argo.avro.MetricData.Builder setSummary(java.lang.CharSequence value) { + validate(fields()[6], value); + this.summary = value; + fieldSetFlags()[6] = true; + return this; + } + + /** Checks whether the 'summary' field has been set */ + public boolean hasSummary() { + return fieldSetFlags()[6]; + } + + /** Clears the value of the 'summary' field */ + public argo.avro.MetricData.Builder clearSummary() { + summary = null; + fieldSetFlags()[6] = false; + return this; + } + + /** Gets the value of the 'message' field */ + public java.lang.CharSequence getMessage() { + return message; + } + + /** Sets the value of the 'message' field */ + public argo.avro.MetricData.Builder setMessage(java.lang.CharSequence value) { + validate(fields()[7], value); + this.message = value; + fieldSetFlags()[7] = true; + return this; + } + + /** Checks whether the 'message' field has been set */ + public boolean hasMessage() { + return fieldSetFlags()[7]; + } + + /** Clears the value of the 'message' field */ + public argo.avro.MetricData.Builder clearMessage() { + message = null; + fieldSetFlags()[7] = false; + return this; + } + + /** Gets the value of the 'tags' field */ + public java.util.Map getTags() { + return tags; + } + + /** Sets the value of the 'tags' field */ + public argo.avro.MetricData.Builder setTags(java.util.Map value) { + validate(fields()[8], value); + this.tags = value; + fieldSetFlags()[8] = true; + return this; + } + + /** Checks whether the 'tags' field has been set */ + public boolean hasTags() { + return fieldSetFlags()[8]; + } + + /** Clears the value of the 'tags' field */ + public argo.avro.MetricData.Builder clearTags() { + tags = null; + fieldSetFlags()[8] = false; + return this; + } + + @Override + public MetricData build() { + try { + MetricData record = new MetricData(); + record.timestamp = fieldSetFlags()[0] ? this.timestamp : (java.lang.CharSequence) defaultValue(fields()[0]); + record.service = fieldSetFlags()[1] ? this.service : (java.lang.CharSequence) defaultValue(fields()[1]); + record.hostname = fieldSetFlags()[2] ? this.hostname : (java.lang.CharSequence) defaultValue(fields()[2]); + record.metric = fieldSetFlags()[3] ? this.metric : (java.lang.CharSequence) defaultValue(fields()[3]); + record.status = fieldSetFlags()[4] ? this.status : (java.lang.CharSequence) defaultValue(fields()[4]); + record.monitoring_host = fieldSetFlags()[5] ? this.monitoring_host : (java.lang.CharSequence) defaultValue(fields()[5]); + record.summary = fieldSetFlags()[6] ? this.summary : (java.lang.CharSequence) defaultValue(fields()[6]); + record.message = fieldSetFlags()[7] ? this.message : (java.lang.CharSequence) defaultValue(fields()[7]); + record.tags = fieldSetFlags()[8] ? this.tags : (java.util.Map) defaultValue(fields()[8]); + return record; + } catch (Exception e) { + throw new org.apache.avro.AvroRuntimeException(e); + } + } + } +} diff --git a/flink_jobs/status_trends/src/main/java/argo/batch/BatchEndpointFlipFlopTrends.java b/flink_jobs/status_trends/src/main/java/argo/batch/BatchEndpointFlipFlopTrends.java new file mode 100644 index 00000000..5c8adb18 --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/batch/BatchEndpointFlipFlopTrends.java @@ -0,0 +1,142 @@ +package argo.batch; + +import argo.avro.MetricData; +import argo.functions.calctimelines.CalcLastTimeStatus; +import argo.functions.calctimelines.TopologyMetricFilter; +import argo.functions.calctrends.CalcEndpointFlipFlopTrends; +import argo.functions.calctrends.CalcMetricFlipFlopTrends; +import argo.pojos.EndpointTrends; +import argo.pojos.MetricTrends; +import argo.utils.Utils; +import org.apache.flink.api.java.DataSet; +import org.apache.flink.api.java.ExecutionEnvironment; +import org.apache.flink.api.java.utils.ParameterTool; +import argo.profiles.ProfilesLoader; +import org.apache.flink.api.common.functions.MapFunction; +import org.apache.flink.api.common.operators.Order; +import org.apache.flink.api.java.io.AvroInputFormat; +import org.apache.flink.core.fs.Path; +import org.joda.time.DateTime; + + +/** + * Implements an ARGO Status Trends Job in flink , to count the number of status changes + * that occur to the level of group, service, endpoint of the topology hierarchy + * + * Submit job in flink cluster using the following parameters + +* --date:the date for which the job runs and need to return results , yyyy-MM-dd +* --yesterdayData: path to the metric profile data, of the previous day , for which the jobs runs profile (For hdfs use: hdfs://namenode:port/path/to/file) +* --todayData: path to the metric profile data, of the current day , for which the jobs runs profile (For hdfs use: hdfs://namenode:port/path/to/file) +* --mongoUri: path to MongoDB destination (eg mongodb://localhost:27017/database +* --apiUri: path to the mongo db the , for which the jobs runs profile (For hdfs use: hdfs://namenode:port/path/to/file) +* --key: ARGO web api token +* --reportId: the id of the report the job will need to process +* --apiUri: ARGO wep api to connect to msg.example.com +*Optional: +* -- clearMongo: option to clear the mongo db before saving the new result or not, e.g true +* -- N : the number of the result the job will provide, if the parameter exists , e.g 10 +* +*/ +public class BatchEndpointFlipFlopTrends { + + private static DataSet yesterdayData; + private static DataSet todayData; + private static Integer rankNum; + private static final String endpointTrends = "flipflop_trends_endpoints"; + + private static String mongoUri; + private static ProfilesLoader profilesLoader; + private static DateTime profilesDate; + + private static String reportId; + private static String format = "yyyy-MM-dd"; + private static boolean clearMongo = false; + private static String profilesDateStr; + + public static void main(String[] args) throws Exception { + // set up the batch execution environment + final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); + + ParameterTool params = ParameterTool.fromArgs(args); + //check if all required parameters exist and if not exit program + + if (!Utils.checkParameters(params, "yesterdayData", "todayData", "mongoUri", "apiUri", "key", "reportId", "date")) { + System.exit(0); + + } + + if (params.get("clearMongo") != null && params.getBoolean("clearMongo") == true) { + clearMongo = true; + } + + profilesDate = Utils.convertStringtoDate(format, params.getRequired("date")); + profilesDateStr = Utils.convertDateToString(format, profilesDate); + if (params.get("N") != null) { + rankNum = params.getInt("N"); + } + reportId = params.getRequired("reportId"); + mongoUri = params.getRequired("mongoUri"); + profilesLoader = new ProfilesLoader(params); + yesterdayData = readInputData(env, params, "yesterdayData"); + todayData = readInputData(env, params, "todayData"); + + calcFlipFlops(); +// execute program + StringBuilder jobTitleSB = new StringBuilder(); + jobTitleSB.append("Group Endpoint Flip Flops for: "); + jobTitleSB.append(profilesLoader.getReportParser().getTenantReport().getTenant()); + jobTitleSB.append("/"); + jobTitleSB.append(profilesLoader.getReportParser().getTenantReport().getInfo()[0]); + jobTitleSB.append("/"); + jobTitleSB.append(profilesDate); + env.execute(jobTitleSB.toString()); + + } + +// filter yesterdaydata and exclude the ones not contained in topology and metric profile data and get the last timestamp data for each service endpoint metric +// filter todaydata and exclude the ones not contained in topology and metric profile data , union yesterday data and calculate status changes for each service endpoint metric +// rank results + // private static DataSet calcFlipFlops() { + private static void calcFlipFlops() { + + DataSet filteredYesterdayData = yesterdayData.filter(new TopologyMetricFilter(profilesLoader.getMetricProfileParser(), profilesLoader.getTopologyEndpointParser(), profilesLoader.getTopolGroupParser(), profilesLoader.getAggregationProfileParser())).groupBy("hostname", "service", "metric").reduceGroup(new CalcLastTimeStatus()); + DataSet filteredTodayData = todayData.filter(new TopologyMetricFilter(profilesLoader.getMetricProfileParser(), profilesLoader.getTopologyEndpointParser(), profilesLoader.getTopolGroupParser(), profilesLoader.getAggregationProfileParser())); + + //group data by service enpoint metric and return for each group , the necessary info and a treemap containing timestamps and status + + DataSet serviceEndpointMetricGroupData = filteredTodayData.union(filteredYesterdayData).groupBy("hostname", "service", "metric").reduceGroup(new CalcMetricFlipFlopTrends(profilesLoader.getOperationParser(), profilesLoader.getTopologyEndpointParser(), profilesLoader.getAggregationProfileParser(), profilesDate)); + + //group data by service endpoint and count flip flops + DataSet serviceEndpointGroupData = serviceEndpointMetricGroupData.groupBy("group", "endpoint", "service").reduceGroup(new CalcEndpointFlipFlopTrends(profilesLoader.getAggregationProfileParser().getMetricOp(), profilesLoader.getOperationParser())); + + if (rankNum != null) { //sort and rank data + serviceEndpointGroupData = serviceEndpointGroupData.sortPartition("flipflops", Order.DESCENDING).setParallelism(1).first(rankNum); + } else { + serviceEndpointGroupData = serviceEndpointGroupData.sortPartition("flipflops", Order.DESCENDING).setParallelism(1); + } + MongoTrendsOutput metricMongoOut = new MongoTrendsOutput(mongoUri, endpointTrends, MongoTrendsOutput.TrendsType.TRENDS_METRIC, reportId, profilesDateStr, clearMongo); + + DataSet trends = serviceEndpointGroupData.map(new MapFunction() { + + @Override + public Trends map(EndpointTrends in) throws Exception { + return new Trends(in.getGroup(), in.getService(), in.getEndpoint(), in.getFlipflops()); + } + }); + trends.output(metricMongoOut); + + // return serviceEndpointGroupData; + } //read input from file + + private static DataSet readInputData(ExecutionEnvironment env, ParameterTool params, String path) { + DataSet inputData; + Path input = new Path(params.getRequired(path)); + + AvroInputFormat inputAvroFormat = new AvroInputFormat(input, MetricData.class + ); + inputData = env.createInput(inputAvroFormat); + return inputData; + } + +} diff --git a/flink_jobs/status_trends/src/main/java/argo/batch/BatchFlipFlopCollection.java b/flink_jobs/status_trends/src/main/java/argo/batch/BatchFlipFlopCollection.java new file mode 100644 index 00000000..1817c004 --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/batch/BatchFlipFlopCollection.java @@ -0,0 +1,226 @@ +package argo.batch; + +import argo.avro.MetricData; +import argo.filter.zero.flipflops.ZeroEndpointTrendsFilter; +import argo.filter.zero.flipflops.ZeroGroupTrendsFilter; +import argo.filter.zero.flipflops.ZeroMetricTrendsFilter; +import argo.filter.zero.flipflops.ZeroServiceTrendsFilter; +import argo.functions.calctimelines.CalcLastTimeStatus; +import argo.functions.calctimelines.MapServices; +import argo.functions.calctimelines.ServiceFilter; +import argo.functions.calctimelines.TopologyMetricFilter; +import argo.functions.calctrends.CalcEndpointFlipFlopTrends; +import argo.functions.calctrends.CalcGroupFlipFlop; +import argo.functions.calctrends.CalcGroupFunctionFlipFlop; +import argo.functions.calctrends.CalcMetricFlipFlopTrends; +import argo.functions.calctrends.CalcServiceFlipFlop; +import argo.pojos.EndpointTrends; +import argo.pojos.GroupFunctionTrends; +import argo.pojos.GroupTrends; +import argo.pojos.MetricTrends; +import argo.pojos.ServiceTrends; +import argo.profiles.ProfilesLoader; +import argo.utils.Utils; +import de.javakaffee.kryoserializers.jodatime.JodaDateTimeSerializer; +import org.apache.flink.api.common.functions.MapFunction; +import org.apache.flink.api.common.operators.Order; +import org.apache.flink.api.java.DataSet; +import org.apache.flink.api.java.ExecutionEnvironment; +import org.apache.flink.api.java.io.AvroInputFormat; +import org.apache.flink.api.java.utils.ParameterTool; +import org.apache.flink.core.fs.Path; +import org.joda.time.DateTime; +/** + * Implements an ARGO Status Trends Job in flink , to count the number of status changes + * that occur to all levels of the topology hierarchy + * + * Submit job in flink cluster using the following parameters + +* --date:the date for which the job runs and need to return results , e.g yyyy-MM-dd +* --yesterdayData: path to the metric profile data, of the previous day , for which the jobs runs profile (For hdfs use: hdfs://namenode:port/path/to/file) +* --todayData: path to the metric profile data, of the current day , for which the jobs runs profile (For hdfs use: hdfs://namenode:port/path/to/file) +* --mongoUri: path to MongoDB destination (eg mongodb://localhost:27017/database +* --apiUri: path to the mongo db the , for which the jobs runs profile (For hdfs use: hdfs://namenode:port/path/to/file) +* --key: ARGO web api token +* --reportId: the id of the report the job will need to process +* --apiUri: ARGO wep api to connect to e.g msg.example.com +*Optional: +* -- clearMongo: option to clear the mongo db before saving the new result or not, e.g true +* -- N : the number of the result the job will provide, if the parameter exists , e.g 10 +* +*/ + +public class BatchFlipFlopCollection { + + private static DataSet yesterdayData; + private static DataSet todayData; + private static Integer rankNum; + private static final String groupTrends = "flipflop_trends_endpoint_groups"; + private static final String metricTrends = "flipflop_trends_metrics"; + private static final String endpointTrends = "flipflop_trends_endpoints"; + private static final String serviceTrends = "flipflop_trends_services"; + private static String mongoUri; + private static ProfilesLoader profilesLoader; + private static DateTime profilesDate; + private static String format = "yyyy-MM-dd"; + private static String reportId; + private static boolean clearMongo = false; + private static String profilesDateStr; + + public static void main(String[] args) throws Exception { + // set up the batch execution environment + final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); + env.addDefaultKryoSerializer(DateTime.class, JodaDateTimeSerializer.class); + ParameterTool params = ParameterTool.fromArgs(args); + //check if all required parameters exist and if not exit program + if (!Utils.checkParameters(params, "yesterdayData", "todayData", "mongoUri", "apiUri", "key", "date", "reportId")) { + System.exit(0); + } + + if (params.get("clearMongo") != null && params.getBoolean("clearMongo") == true) { + clearMongo = true; + } + reportId = params.getRequired("reportId"); + profilesDate = Utils.convertStringtoDate(format, params.getRequired("date")); + profilesDateStr = Utils.convertDateToString(format, profilesDate); + + + + if (params.get("N") != null) { + rankNum = params.getInt("N"); + } + mongoUri = params.get("mongoUri"); + profilesLoader = new ProfilesLoader(params); + yesterdayData = readInputData(env, params, "yesterdayData"); + todayData = readInputData(env, params, "todayData"); + + // calculate on data + calcFlipFlops(); + +// execute program + StringBuilder jobTitleSB = new StringBuilder(); + jobTitleSB.append("Collection Flip Flops for: "); + jobTitleSB.append(profilesLoader.getReportParser().getTenantReport().getTenant()); + jobTitleSB.append("/"); + jobTitleSB.append(profilesLoader.getReportParser().getTenantReport().getInfo()[0]); + jobTitleSB.append("/"); + jobTitleSB.append(profilesDate); + env.execute(jobTitleSB.toString()); + + } + +// filter yesterdaydata and exclude the ones not contained in topology and metric profile data and get the last timestamp data for each service endpoint metric +// filter todaydata and exclude the ones not contained in topology and metric profile data , union yesterday data and calculate status changes for each service endpoint metric +// rank results +// private static DataSet calcFlipFlops() { + private static void calcFlipFlops() { + + DataSet filteredYesterdayData = yesterdayData.filter(new TopologyMetricFilter(profilesLoader.getMetricProfileParser(), profilesLoader.getTopologyEndpointParser(), profilesLoader.getTopolGroupParser(), profilesLoader.getAggregationProfileParser())).groupBy("hostname", "service", "metric").reduceGroup(new CalcLastTimeStatus()); + DataSet filteredTodayData = todayData.filter(new TopologyMetricFilter(profilesLoader.getMetricProfileParser(), profilesLoader.getTopologyEndpointParser(), profilesLoader.getTopolGroupParser(), profilesLoader.getAggregationProfileParser())); + + //group data by service enpoint metric and return for each group , the necessary info and a treemap containing timestamps and status + + DataSet serviceEndpointMetricGroupData = filteredTodayData.union(filteredYesterdayData).groupBy("hostname", "service", "metric").reduceGroup(new CalcMetricFlipFlopTrends(profilesLoader.getOperationParser(), profilesLoader.getTopologyEndpointParser(), profilesLoader.getAggregationProfileParser(), profilesDate)); + + + DataSet noZeroServiceEndpointMetricGroupData = serviceEndpointMetricGroupData.filter(new ZeroMetricTrendsFilter()); + if (rankNum != null) { //sort and rank data + noZeroServiceEndpointMetricGroupData = noZeroServiceEndpointMetricGroupData.sortPartition("flipflops", Order.DESCENDING).setParallelism(1).first(rankNum); + } else { + noZeroServiceEndpointMetricGroupData = noZeroServiceEndpointMetricGroupData.sortPartition("flipflops", Order.DESCENDING).setParallelism(1); + } + + MongoTrendsOutput metricMongoOut = new MongoTrendsOutput(mongoUri, metricTrends, MongoTrendsOutput.TrendsType.TRENDS_METRIC, reportId, profilesDateStr, clearMongo); + + + DataSet trends = noZeroServiceEndpointMetricGroupData.map(new MapFunction() { + + @Override + public Trends map(MetricTrends in) throws Exception { + return new Trends(in.getGroup(), in.getService(), in.getEndpoint(), in.getMetric(), in.getFlipflops()); + } + }); + trends.output(metricMongoOut); + + //group data by service endpoint and count flip flops + DataSet serviceEndpointGroupData = serviceEndpointMetricGroupData.groupBy("group", "endpoint", "service").reduceGroup(new CalcEndpointFlipFlopTrends(profilesLoader.getAggregationProfileParser().getMetricOp(), profilesLoader.getOperationParser())); + DataSet noZeroserviceEndpointGroupData = serviceEndpointGroupData.filter(new ZeroEndpointTrendsFilter()); + + if (rankNum != null) { //sort and rank data + noZeroserviceEndpointGroupData = noZeroserviceEndpointGroupData.sortPartition("flipflops", Order.DESCENDING).setParallelism(1).first(rankNum); + } else { + noZeroserviceEndpointGroupData = noZeroserviceEndpointGroupData.sortPartition("flipflops", Order.DESCENDING).setParallelism(1); + } + metricMongoOut = new MongoTrendsOutput(mongoUri, endpointTrends, MongoTrendsOutput.TrendsType.TRENDS_ENDPOINT, reportId, profilesDateStr, clearMongo); + + trends = noZeroserviceEndpointGroupData.map(new MapFunction() { + + @Override + public Trends map(EndpointTrends in) throws Exception { + return new Trends(in.getGroup(), in.getService(), in.getEndpoint(), in.getFlipflops()); + } + }); + trends.output(metricMongoOut); + + //group data by service and count flip flops + DataSet serviceGroupData = serviceEndpointGroupData.filter(new ServiceFilter(profilesLoader.getAggregationProfileParser())).groupBy("group", "service").reduceGroup(new CalcServiceFlipFlop(profilesLoader.getOperationParser(), profilesLoader.getAggregationProfileParser())); + DataSet noZeroserviceGroupData = serviceGroupData.filter(new ZeroServiceTrendsFilter()); + + if (rankNum != null) { //sort and rank data + noZeroserviceGroupData = noZeroserviceGroupData.sortPartition("flipflops", Order.DESCENDING).setParallelism(1).first(rankNum); + } else { + noZeroserviceGroupData = noZeroserviceGroupData.sortPartition("flipflops", Order.DESCENDING).setParallelism(1); + } + metricMongoOut = new MongoTrendsOutput(mongoUri, serviceTrends, MongoTrendsOutput.TrendsType.TRENDS_SERVICE, reportId, profilesDateStr, clearMongo); + + trends = noZeroserviceGroupData.map(new MapFunction() { + + @Override + public Trends map(ServiceTrends in) throws Exception { + return new Trends(in.getGroup(), in.getService(), in.getFlipflops()); + } + }); + trends.output(metricMongoOut); + +//flat map data to add function as described in aggregation profile groups + serviceGroupData = serviceGroupData.flatMap(new MapServices(profilesLoader.getAggregationProfileParser())); + + //group data by group,function and count flip flops + DataSet groupFunction = serviceGroupData.groupBy("group", "function").reduceGroup(new CalcGroupFunctionFlipFlop(profilesLoader.getOperationParser(), profilesLoader.getAggregationProfileParser())); + // DataSet noZerogroupFunction =groupFunction.filter(new ZeroGroupFunctionFilter()); + + //group data by group and count flip flops + DataSet groupData = groupFunction.groupBy("group").reduceGroup(new CalcGroupFlipFlop(profilesLoader.getOperationParser(), profilesLoader.getAggregationProfileParser())); + DataSet noZerogroupData = groupData.filter(new ZeroGroupTrendsFilter()); + + if (rankNum != null) { //sort and rank data + noZerogroupData = noZerogroupData.sortPartition("flipflops", Order.DESCENDING).setParallelism(1).first(rankNum); + } else { + noZerogroupData = noZerogroupData.sortPartition("flipflops", Order.DESCENDING).setParallelism(1); + } + + metricMongoOut = new MongoTrendsOutput(mongoUri, groupTrends, MongoTrendsOutput.TrendsType.TRENDS_GROUP, reportId, profilesDateStr, clearMongo); + + trends = noZerogroupData.map(new MapFunction() { + + @Override + public Trends map(GroupTrends in) throws Exception { + return new Trends(in.getGroup(), in.getFlipflops()); + } + }); + trends.output(metricMongoOut); + + } + + //read input from file + private static DataSet readInputData(ExecutionEnvironment env, ParameterTool params, String path) { + DataSet inputData; + Path input = new Path(params.getRequired(path)); + + AvroInputFormat inputAvroFormat = new AvroInputFormat(input, MetricData.class + ); + inputData = env.createInput(inputAvroFormat); + return inputData; + } + +} diff --git a/flink_jobs/status_trends/src/main/java/argo/batch/BatchGroupFlipFlopTrends.java b/flink_jobs/status_trends/src/main/java/argo/batch/BatchGroupFlipFlopTrends.java new file mode 100644 index 00000000..623226ea --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/batch/BatchGroupFlipFlopTrends.java @@ -0,0 +1,156 @@ +package argo.batch; + +import argo.avro.MetricData; +import argo.functions.calctimelines.CalcLastTimeStatus; +import argo.functions.calctimelines.MapServices; +import argo.functions.calctimelines.ServiceFilter; +import argo.functions.calctimelines.TopologyMetricFilter; +import argo.functions.calctrends.CalcEndpointFlipFlopTrends; +import argo.functions.calctrends.CalcGroupFlipFlop; +import argo.functions.calctrends.CalcGroupFunctionFlipFlop; +import argo.functions.calctrends.CalcMetricFlipFlopTrends; +import argo.functions.calctrends.CalcServiceFlipFlop; +import argo.pojos.EndpointTrends; +import argo.pojos.GroupFunctionTrends; +import argo.pojos.GroupTrends; +import argo.pojos.MetricTrends; +import argo.pojos.ServiceTrends; +import argo.profiles.ProfilesLoader; +import argo.utils.Utils; +import org.apache.flink.api.common.functions.MapFunction; +import org.apache.flink.api.common.operators.Order; +import org.apache.flink.api.java.DataSet; +import org.apache.flink.api.java.ExecutionEnvironment; +import org.apache.flink.api.java.io.AvroInputFormat; +import org.apache.flink.api.java.utils.ParameterTool; +import org.apache.flink.core.fs.Path; +import org.joda.time.DateTime; +/** + * Implements an ARGO Status Trends Job in flink , to count the number of status changes + * that occur to the level of group of the topology hierarchy + * + * Submit job in flink cluster using the following parameters + +* --date:the date for which the job runs and need to return results , yyyy-MM-dd +* --yesterdayData: path to the metric profile data, of the previous day , for which the jobs runs profile (For hdfs use: hdfs://namenode:port/path/to/file) +* --todayData: path to the metric profile data, of the current day , for which the jobs runs profile (For hdfs use: hdfs://namenode:port/path/to/file) +* --mongoUri: path to MongoDB destination (eg mongodb://localhost:27017/database +* --apiUri: path to the mongo db the , for which the jobs runs profile (For hdfs use: hdfs://namenode:port/path/to/file) +* --key: ARGO web api token +* --reportId: the id of the report the job will need to process +* --apiUri: ARGO wep api to connect to msg.example.com +*Optional: +* -- clearMongo: option to clear the mongo db before saving the new result or not, e.g true +* -- N : the number of the result the job will provide, if the parameter exists , e.g 10 +* +*/ +public class BatchGroupFlipFlopTrends { + + private static DataSet yesterdayData; + private static DataSet todayData; + private static Integer rankNum; + private static final String groupTrends = "flipflop_trends_groups"; + private static String mongoUri; + private static ProfilesLoader profilesLoader; + private static DateTime profilesDate; + private static String format = "yyyy-MM-dd"; + + private static String reportId; + private static boolean clearMongo=false; + private static String profilesDateStr; + public static void main(String[] args) throws Exception { + // set up the batch execution environment + final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); + + ParameterTool params = ParameterTool.fromArgs(args); + //check if all required parameters exist and if not exit program + if (!Utils.checkParameters(params, "yesterdayData", "todayData", "mongoUri", "apiUri", "key", "date", "reportId")) { + System.exit(0); + } + + if (params.get("clearMongo") != null && params.getBoolean("clearMongo") == true) { + clearMongo = true; + + } + reportId = params.getRequired("reportId"); + profilesDate = Utils.convertStringtoDate(format, params.getRequired("date")); + profilesDateStr = Utils.convertDateToString(format, profilesDate); + + if (params.get("N") != null) { + rankNum = params.getInt("N"); + } + mongoUri = params.get("mongoUri"); + profilesLoader = new ProfilesLoader(params); + yesterdayData = readInputData(env, params, "yesterdayData"); + todayData = readInputData(env, params, "todayData"); + + // calculate on data + calcFlipFlops(); +// execute program + StringBuilder jobTitleSB = new StringBuilder(); + jobTitleSB.append("Group Flip Flops for: "); + jobTitleSB.append(profilesLoader.getReportParser().getTenantReport().getTenant()); + jobTitleSB.append("/"); + jobTitleSB.append(profilesLoader.getReportParser().getTenantReport().getInfo()[0]); + jobTitleSB.append("/"); + jobTitleSB.append(profilesDate); + env.execute(jobTitleSB.toString()); + + + } + +// filter yesterdaydata and exclude the ones not contained in topology and metric profile data and get the last timestamp data for each service endpoint metric +// filter todaydata and exclude the ones not contained in topology and metric profile data , union yesterday data and calculate status changes for each service endpoint metric +// rank results + private static void calcFlipFlops() { + + DataSet filteredYesterdayData = yesterdayData.filter(new TopologyMetricFilter(profilesLoader.getMetricProfileParser(), profilesLoader.getTopologyEndpointParser(), profilesLoader.getTopolGroupParser(), profilesLoader.getAggregationProfileParser())).groupBy("hostname", "service", "metric").reduceGroup(new CalcLastTimeStatus()); + DataSet filteredTodayData = todayData.filter(new TopologyMetricFilter(profilesLoader.getMetricProfileParser(), profilesLoader.getTopologyEndpointParser(), profilesLoader.getTopolGroupParser(), profilesLoader.getAggregationProfileParser())); + + //group data by service enpoint metric and return for each group , the necessary info and a treemap containing timestamps and status + DataSet serviceEndpointMetricGroupData = filteredTodayData.union(filteredYesterdayData).groupBy("hostname", "service", "metric").reduceGroup(new CalcMetricFlipFlopTrends(profilesLoader.getOperationParser(),profilesLoader.getTopologyEndpointParser(), profilesLoader.getAggregationProfileParser(), profilesDate)); + + //group data by service endpoint and count flip flops + DataSet serviceEndpointGroupData = serviceEndpointMetricGroupData.groupBy("group", "endpoint", "service").reduceGroup(new CalcEndpointFlipFlopTrends(profilesLoader.getAggregationProfileParser().getMetricOp(), profilesLoader.getOperationParser())); + + //group data by service and count flip flops + DataSet serviceGroupData = serviceEndpointGroupData.filter(new ServiceFilter(profilesLoader.getAggregationProfileParser())).groupBy("group", "service").reduceGroup(new CalcServiceFlipFlop(profilesLoader.getOperationParser(), profilesLoader.getAggregationProfileParser())); + //flat map data to add function as described in aggregation profile groups + serviceGroupData = serviceGroupData.flatMap(new MapServices(profilesLoader.getAggregationProfileParser())); + + //group data by group,function and count flip flops + DataSet groupFunction = serviceGroupData.groupBy("group", "function").reduceGroup(new CalcGroupFunctionFlipFlop(profilesLoader.getOperationParser(), profilesLoader.getAggregationProfileParser())); + + //group data by group and count flip flops + DataSet groupData = groupFunction.groupBy("group").reduceGroup(new CalcGroupFlipFlop(profilesLoader.getOperationParser(), profilesLoader.getAggregationProfileParser())); + + if (rankNum != null) { //sort and rank data + groupData = groupData.sortPartition("flipflops", Order.DESCENDING).setParallelism(1).first(rankNum); + } else { + groupData = groupData.sortPartition("flipflops", Order.DESCENDING).setParallelism(1); + } + + MongoTrendsOutput metricMongoOut = new MongoTrendsOutput(mongoUri, groupTrends, MongoTrendsOutput.TrendsType.TRENDS_GROUP, reportId, profilesDateStr, clearMongo); + + DataSet trends = groupData.map(new MapFunction() { + + @Override + public Trends map(GroupTrends in) throws Exception { + return new Trends(in.getGroup(), in.getFlipflops()); + } + }); + trends.output(metricMongoOut); + + } //read input from file + + private static DataSet readInputData(ExecutionEnvironment env, ParameterTool params, String path) { + DataSet inputData; + Path input = new Path(params.getRequired(path)); + + AvroInputFormat inputAvroFormat = new AvroInputFormat(input, MetricData.class + ); + inputData = env.createInput(inputAvroFormat); + return inputData; + } + +} diff --git a/flink_jobs/status_trends/src/main/java/argo/batch/BatchMetricFlipFlopTrends.java b/flink_jobs/status_trends/src/main/java/argo/batch/BatchMetricFlipFlopTrends.java new file mode 100644 index 00000000..ae890d4f --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/batch/BatchMetricFlipFlopTrends.java @@ -0,0 +1,141 @@ +package argo.batch; + +import argo.avro.MetricData; +import argo.functions.calctrends.CalcMetricFlipFlopTrends; +import argo.functions.calctimelines.CalcLastTimeStatus; +import argo.functions.calctimelines.TopologyMetricFilter; +import argo.pojos.MetricTrends; +import argo.utils.Utils; +import org.apache.flink.api.common.functions.MapFunction; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.flink.api.common.operators.Order; +import org.apache.flink.api.java.DataSet; +import org.apache.flink.api.java.ExecutionEnvironment; +import org.apache.flink.api.java.io.AvroInputFormat; +import org.apache.flink.api.java.utils.ParameterTool; +import org.apache.flink.core.fs.Path; +import argo.profiles.ProfilesLoader; +import org.joda.time.DateTime; + + +/** + * Implements an ARGO Status Trends Job in flink , to count the number of status changes + * that occur to the level of group, service, endpoint, metric of the topology hierarchy + * + * Submit job in flink cluster using the following parameters + +* --date:the date for which the job runs and need to return results , yyyy-MM-dd +* --yesterdayData: path to the metric profile data, of the previous day , for which the jobs runs profile (For hdfs use: hdfs://namenode:port/path/to/file) +* --todayData: path to the metric profile data, of the current day , for which the jobs runs profile (For hdfs use: hdfs://namenode:port/path/to/file) +* --mongoUri: path to MongoDB destination (eg mongodb://localhost:27017/database +* --apiUri: path to the mongo db the , for which the jobs runs profile (For hdfs use: hdfs://namenode:port/path/to/file) +* --key: ARGO web api token +* --reportId: the id of the report the job will need to process +* --apiUri: ARGO wep api to connect to msg.example.com +*Optional: +* -- clearMongo: option to clear the mongo db before saving the new result or not, e.g true +* -- N : the number of the result the job will provide, if the parameter exists , e.g 10 +*/ +public class BatchMetricFlipFlopTrends { + + static Logger LOG = LoggerFactory.getLogger(BatchMetricFlipFlopTrends.class); + + private static DataSet yesterdayData; + private static DataSet todayData; + private static Integer rankNum; + private static final String metricTrends = "flipflop_trends_metrics"; + private static String mongoUri; + + private static DateTime profilesDate; + private static String profilesDateStr; + private static String reportId; + private static final String format = "yyyy-MM-dd"; + private static ProfilesLoader profilesLoader; + + private static boolean clearMongo = false; + + public static void main(String[] args) throws Exception { + // set up the batch execution environment + final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); + + final ParameterTool params = ParameterTool.fromArgs(args); + //check if all required parameters exist and if not exit program + if (!Utils.checkParameters(params, "yesterdayData", "todayData", "mongoUri", "apiUri", "key", "reportId", "date")) { + System.exit(0); + } + + if (params.get("clearMongo") != null && params.getBoolean("clearMongo") == true) { + clearMongo = true; + } + + + profilesDate = Utils.convertStringtoDate(format, params.getRequired("date")); + profilesDateStr = Utils.convertDateToString(format, profilesDate); + + mongoUri = params.getRequired("mongoUri"); + if (params.get("N") != null) { + rankNum = params.getInt("N"); + } + reportId = params.getRequired("reportId"); + + profilesLoader = new ProfilesLoader(params); + yesterdayData = readInputData(env, params.getRequired("yesterdayData")); + todayData = readInputData(env, params.getRequired("todayData")); + + calcFlipFlops(); + +// execute program + StringBuilder jobTitleSB = new StringBuilder(); + jobTitleSB.append("Metric Flip Flops for: "); + jobTitleSB.append(profilesLoader.getReportParser().getTenantReport().getTenant()); + jobTitleSB.append("/"); + jobTitleSB.append(profilesLoader.getReportParser().getTenantReport().getInfo()[0]); + jobTitleSB.append("/"); + jobTitleSB.append(profilesDate); + env.execute(jobTitleSB.toString()); + } + + // filter yesterdaydata and exclude the ones not contained in topology and metric profile data and get the last timestamp data for each service endpoint metric + // filter todaydata and exclude the ones not contained in topology and metric profile data , union yesterday data and calculate status changes for each service endpoint metric + // rank results +// private static DataSet calcFlipFlops() { + private static void calcFlipFlops() { + + DataSet filteredYesterdayData = yesterdayData.filter(new TopologyMetricFilter(profilesLoader.getMetricProfileParser(), profilesLoader.getTopologyEndpointParser(), profilesLoader.getTopolGroupParser(), profilesLoader.getAggregationProfileParser())).groupBy("hostname", "service", "metric").reduceGroup(new CalcLastTimeStatus()); + + DataSet filteredTodayData = todayData.filter(new TopologyMetricFilter(profilesLoader.getMetricProfileParser(), profilesLoader.getTopologyEndpointParser(), profilesLoader.getTopolGroupParser(), profilesLoader.getAggregationProfileParser())); + + DataSet metricData = filteredTodayData.union(filteredYesterdayData).groupBy("hostname", "service", "metric").reduceGroup(new CalcMetricFlipFlopTrends(profilesLoader.getOperationParser(),profilesLoader.getTopologyEndpointParser(), profilesLoader.getAggregationProfileParser(),profilesDate)); + + if (rankNum != null) { + metricData = metricData.sortPartition("flipflops", Order.DESCENDING).first(rankNum); + + } else { + metricData = metricData.sortPartition("flipflops", Order.DESCENDING).setParallelism(1); + + } + + MongoTrendsOutput metricMongoOut = new MongoTrendsOutput(mongoUri, metricTrends, MongoTrendsOutput.TrendsType.TRENDS_METRIC, reportId, profilesDateStr, clearMongo); + DataSet trends = metricData.map(new MapFunction() { + + @Override + public Trends map(MetricTrends in) throws Exception { + return new Trends(in.getGroup(), in.getService(), in.getEndpoint(), in.getMetric(), in.getFlipflops()); + } + }); + trends.output(metricMongoOut); + + } + //read input from file + + private static DataSet readInputData(ExecutionEnvironment env, String path) { + DataSet inputData; + Path input = new Path(path); + + AvroInputFormat inputAvroFormat = new AvroInputFormat(input, MetricData.class); + inputData = env.createInput(inputAvroFormat); + return inputData; + } + +} diff --git a/flink_jobs/status_trends/src/main/java/argo/batch/BatchServiceFlipFlopTrends.java b/flink_jobs/status_trends/src/main/java/argo/batch/BatchServiceFlipFlopTrends.java new file mode 100644 index 00000000..e1604ac3 --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/batch/BatchServiceFlipFlopTrends.java @@ -0,0 +1,151 @@ +package argo.batch; + +import argo.avro.MetricData; +import argo.functions.calctimelines.CalcLastTimeStatus; +import argo.functions.calctimelines.ServiceFilter; +import argo.functions.calctimelines.TopologyMetricFilter; +import argo.functions.calctrends.CalcEndpointFlipFlopTrends; +import argo.functions.calctrends.CalcMetricFlipFlopTrends; +import argo.functions.calctrends.CalcServiceFlipFlop; +import argo.pojos.EndpointTrends; +import argo.pojos.MetricTrends; +import argo.pojos.ServiceTrends; +import argo.utils.Utils; +import org.apache.flink.api.java.DataSet; +import org.apache.flink.api.java.ExecutionEnvironment; +import org.apache.flink.api.java.utils.ParameterTool; +import argo.profiles.ProfilesLoader; +import org.apache.flink.api.common.functions.MapFunction; +import org.apache.flink.api.common.operators.Order; +import org.apache.flink.api.java.io.AvroInputFormat; +import org.apache.flink.core.fs.Path; +import org.joda.time.DateTime; + +/** + * Implements an ARGO Status Trends Job in flink , to count the number of status changes + * that occur to the level of group, service of the topology hierarchy + * + * Submit job in flink cluster using the following parameters + +* --date:the date for which the job runs and need to return results , yyyy-MM-dd +* --yesterdayData: path to the metric profile data, of the previous day , for which the jobs runs profile (For hdfs use: hdfs://namenode:port/path/to/file) +* --todayData: path to the metric profile data, of the current day , for which the jobs runs profile (For hdfs use: hdfs://namenode:port/path/to/file) +* --mongoUri: path to MongoDB destination (eg mongodb://localhost:27017/database +* --apiUri: path to the mongo db the , for which the jobs runs profile (For hdfs use: hdfs://namenode:port/path/to/file) +* --key: ARGO web api token +* --reportId: the id of the report the job will need to process +* --apiUri: ARGO wep api to connect to msg.example.com +*Optional: +* -- clearMongo: option to clear the mongo db before saving the new result or not, e.g true +* -- N : the number of the result the job will provide, if the parameter exists , e.g 10 +* +*/ +public class BatchServiceFlipFlopTrends { + + private static DataSet yesterdayData; + private static DataSet todayData; + private static Integer rankNum; + private static final String serviceTrends = "flipflop_trends_services"; + private static String mongoUri; + private static ProfilesLoader profilesLoader; + private static DateTime profilesDate; + + private static String reportId; + private static String format = "yyyy-MM-dd"; + + private static boolean clearMongo = false; + private static String profilesDateStr; + + public static void main(String[] args) throws Exception { + // set up the batch execution environment + final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); + + ParameterTool params = ParameterTool.fromArgs(args); + //check if all required parameters exist and if not exit program + if (!Utils.checkParameters(params, "yesterdayData", "todayData", "mongoUri", "apiUri", "key", "date", "reportId")) { + System.exit(0); + } + + + + if(params.get("clearMongo")!=null && params.getBoolean("clearMongo")==true){ + clearMongo=true; + } + profilesDate = Utils.convertStringtoDate(format, params.getRequired("date")); + profilesDateStr = Utils.convertDateToString(format, profilesDate); + if (params.get("N") != null) { + rankNum = params.getInt("N"); + } + reportId = params.getRequired("reportId"); + mongoUri = params.get("mongoUri"); + + profilesLoader = new ProfilesLoader(params); + yesterdayData = readInputData(env, params, "yesterdayData"); + todayData = readInputData(env, params, "todayData"); + + // calculate on data + calcFlipFlops(); + +// execute program + StringBuilder jobTitleSB = new StringBuilder(); + jobTitleSB.append("Service Flip Flops for: "); + jobTitleSB.append(profilesLoader.getReportParser().getTenantReport().getTenant()); + jobTitleSB.append("/"); + jobTitleSB.append(profilesLoader.getReportParser().getTenantReport().getInfo()[0]); + jobTitleSB.append("/"); + jobTitleSB.append(profilesDate); + env.execute(jobTitleSB.toString()); + + + + } + +// filter yesterdaydata and exclude the ones not contained in topology and metric profile data and get the last timestamp data for each service endpoint metric +// filter todaydata and exclude the ones not contained in topology and metric profile data , union yesterday data and calculate status changes for each service endpoint metric +// rank results + //private static DataSet calcFlipFlops() { + private static void calcFlipFlops() { + + DataSet filteredYesterdayData = yesterdayData.filter(new TopologyMetricFilter(profilesLoader.getMetricProfileParser(), profilesLoader.getTopologyEndpointParser(), profilesLoader.getTopolGroupParser(), profilesLoader.getAggregationProfileParser())).groupBy("hostname", "service", "metric").reduceGroup(new CalcLastTimeStatus()); + DataSet filteredTodayData = todayData.filter(new TopologyMetricFilter(profilesLoader.getMetricProfileParser(), profilesLoader.getTopologyEndpointParser(), profilesLoader.getTopolGroupParser(), profilesLoader.getAggregationProfileParser())); + + //group data by service enpoint metric and return for each group , the necessary info and a treemap containing timestamps and status + DataSet serviceEndpointMetricGroupData = filteredTodayData.union(filteredYesterdayData).groupBy("hostname", "service", "metric").reduceGroup(new CalcMetricFlipFlopTrends(profilesLoader.getOperationParser(),profilesLoader.getTopologyEndpointParser(), profilesLoader.getAggregationProfileParser(),profilesDate)); + + //group data by service endpoint and count flip flops + DataSet serviceEndpointGroupData = serviceEndpointMetricGroupData.groupBy("group", "endpoint", "service").reduceGroup(new CalcEndpointFlipFlopTrends(profilesLoader.getAggregationProfileParser().getMetricOp(), profilesLoader.getOperationParser())); + + //group data by service and count flip flops + DataSet< ServiceTrends> serviceGroupData = serviceEndpointGroupData.filter(new ServiceFilter(profilesLoader.getAggregationProfileParser())).groupBy("group", "service").reduceGroup(new CalcServiceFlipFlop(profilesLoader.getOperationParser(), profilesLoader.getAggregationProfileParser())); + + if (rankNum != null) { //sort and rank data + serviceGroupData = serviceGroupData.sortPartition("flipflops", Order.DESCENDING).setParallelism(1).first(rankNum); + } else { + serviceGroupData = serviceGroupData.sortPartition("flipflops", Order.DESCENDING).setParallelism(1); + } + + MongoTrendsOutput metricMongoOut = new MongoTrendsOutput(mongoUri, serviceTrends, MongoTrendsOutput.TrendsType.TRENDS_SERVICE, reportId, profilesDateStr, clearMongo); + + DataSet trends = serviceGroupData.map(new MapFunction() { + + @Override + public Trends map(ServiceTrends in) throws Exception { + return new Trends(in.getGroup(), in.getService(), in.getFlipflops()); + } + }); + trends.output(metricMongoOut); + + // return serviceGroupData; + } //read input from file + + private static DataSet readInputData(ExecutionEnvironment env, ParameterTool params, String path) { + DataSet inputData; + Path input = new Path(params.getRequired(path)); + + AvroInputFormat inputAvroFormat = new AvroInputFormat(input, MetricData.class + ); + inputData = env.createInput(inputAvroFormat); + return inputData; + } + +} diff --git a/flink_jobs/status_trends/src/main/java/argo/batch/BatchStatusTrends.java b/flink_jobs/status_trends/src/main/java/argo/batch/BatchStatusTrends.java new file mode 100644 index 00000000..2dbea5c2 --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/batch/BatchStatusTrends.java @@ -0,0 +1,150 @@ +package argo.batch; + +import argo.avro.MetricData; +import argo.functions.calctrends.CalcStatusTrends; +import argo.functions.calctimelines.TopologyMetricFilter; +import argo.functions.calctimelines.CalcLastTimeStatus; +import argo.functions.calctimelines.StatusFilter; +import argo.utils.Utils; +import org.apache.flink.api.common.functions.MapFunction; +import org.apache.flink.api.common.operators.Order; +import org.apache.flink.api.java.DataSet; +import org.apache.flink.api.java.ExecutionEnvironment; +import org.apache.flink.api.java.io.AvroInputFormat; +import org.apache.flink.api.java.tuple.Tuple6; +import org.apache.flink.api.java.utils.ParameterTool; +import org.apache.flink.core.fs.Path; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import argo.profiles.ProfilesLoader; +import org.joda.time.DateTime; +/** + * Implements an ARGO Status Trends Job in flink , to count the number of status appearances per status type + * that occur to the level of group, service, endpoint. metric of the topology hierarchy + * + * Submit job in flink cluster using the following parameters + +* --date:the date for which the job runs and need to return results , yyyy-MM-dd +* --yesterdayData: path to the metric profile data, of the previous day , for which the jobs runs profile (For hdfs use: hdfs://namenode:port/path/to/file) +* --todayData: path to the metric profile data, of the current day , for which the jobs runs profile (For hdfs use: hdfs://namenode:port/path/to/file) +* --mongoUri: path to MongoDB destination (eg mongodb://localhost:27017/database +* --apiUri: path to the mongo db the , for which the jobs runs profile (For hdfs use: hdfs://namenode:port/path/to/file) +* --key: ARGO web api token +* --reportId: the id of the report the job will need to process +* --apiUri: ARGO wep api to connect to msg.example.com +*Optional: +* -- clearMongo: option to clear the mongo db before saving the new result or not, e.g true +* -- N : the number of the result the job will provide, if the parameter exists , e.g 10 +* +*/ +public class BatchStatusTrends { + + static Logger LOG = LoggerFactory.getLogger(BatchStatusTrends.class); + + private static DataSet yesterdayData; + private static DataSet todayData; + private static Integer rankNum; + + private static final String statusTrendsCol = "status_trends_metrics"; + + + private static String mongoUri; + private static ProfilesLoader profilesLoader; + + private static String reportId; + private static DateTime profilesDate; + private static String format = "yyyy-MM-dd"; + + private static boolean clearMongo = false; + private static String profilesDateStr; + + public static void main(String[] args) throws Exception { + // set up the batch execution environment + final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); + + final ParameterTool params = ParameterTool.fromArgs(args); + //check if all required parameters exist and if not exit program + if (!Utils.checkParameters(params, "yesterdayData", "todayData", "apiUri", "key", "date", "reportId")) { + System.exit(0); + } + + + if (params.get("clearMongo") != null && params.getBoolean("clearMongo") == true) { + clearMongo = true; + } + profilesDate = Utils.convertStringtoDate(format, params.getRequired("date")); + profilesDateStr = Utils.convertDateToString(format, profilesDate); + if (params.get("N") != null) { + rankNum = params.getInt("N"); + } + reportId = params.getRequired("reportId"); + mongoUri = params.get("mongoUri"); + + profilesLoader = new ProfilesLoader(params); + yesterdayData = readInputData(env, params, "yesterdayData"); + todayData = readInputData(env, params, "todayData"); + + DataSet> rankedData = rankByStatus(); + filterByStatusAndWrite(statusTrendsCol, rankedData, "critical"); + filterByStatusAndWrite(statusTrendsCol, rankedData, "warning"); + filterByStatusAndWrite(statusTrendsCol, rankedData, "unknown"); + +// execute program + StringBuilder jobTitleSB = new StringBuilder(); + jobTitleSB.append("Status Trends for: "); + jobTitleSB.append(profilesLoader.getReportParser().getTenantReport().getTenant()); + jobTitleSB.append("/"); + jobTitleSB.append(profilesLoader.getReportParser().getTenantReport().getInfo()[0]); + jobTitleSB.append("/"); + jobTitleSB.append(profilesDate); + env.execute(jobTitleSB.toString()); + } + + //filters the yesterdayData and exclude the ones not in topology and metric profile data and keeps the last timestamp for each service endpoint metric + //filters the todayData and exclude the ones not in topology and metric profile data, union with yesterdayData and calculates the times each status (CRITICAL,WARNING.UNKNOW) appears + private static DataSet> rankByStatus() { + + DataSet filteredYesterdayData = yesterdayData.filter(new TopologyMetricFilter(profilesLoader.getMetricProfileParser(), profilesLoader.getTopologyEndpointParser(), profilesLoader.getTopolGroupParser(), profilesLoader.getAggregationProfileParser())).groupBy("hostname", "service", "metric").reduceGroup(new CalcLastTimeStatus()); + + DataSet filteredTodayData = todayData.filter(new TopologyMetricFilter(profilesLoader.getMetricProfileParser(), profilesLoader.getTopologyEndpointParser(), profilesLoader.getTopolGroupParser(), profilesLoader.getAggregationProfileParser())); + DataSet> rankedData = filteredTodayData.union(filteredYesterdayData).groupBy("hostname", "service", "metric").reduceGroup(new CalcStatusTrends(profilesLoader.getTopologyEndpointParser(), profilesLoader.getAggregationProfileParser())); + + return rankedData; + } + + // filter the data based on status (CRITICAL,WARNING,UNKNOWN), rank and write top N in seperate files for each status + private static void filterByStatusAndWrite(String uri, DataSet> data, String status) { + String collectionUri = mongoUri + "." + uri; + DataSet> filteredData = data.filter(new StatusFilter(status)); + + if (rankNum != null) { + filteredData = filteredData.sortPartition(5, Order.DESCENDING).setParallelism(1).first(rankNum); + } else { + filteredData = filteredData.sortPartition(5, Order.DESCENDING).setParallelism(1); + } + + MongoTrendsOutput metricMongoOut = new MongoTrendsOutput(mongoUri, uri, MongoTrendsOutput.TrendsType.TRENDS_STATUS, reportId, profilesDateStr, clearMongo); + + DataSet trends = filteredData.map(new MapFunction, Trends>() { + + @Override + public Trends map(Tuple6 in) throws Exception { + return new Trends(in.f0.toString(), in.f1.toString(), in.f2.toString(), in.f3.toString(), in.f4.toString(), in.f5); + } + }); + trends.output(metricMongoOut); + + // writeToMongo(collectionUri, filteredData); + } + + // reads input from file + private static DataSet readInputData(ExecutionEnvironment env, ParameterTool params, String path) { + DataSet inputData; + Path input = new Path(params.getRequired(path)); + + AvroInputFormat inputAvroFormat = new AvroInputFormat(input, MetricData.class); + inputData = env.createInput(inputAvroFormat); + return inputData; + } + +} diff --git a/flink_jobs/status_trends/src/main/java/argo/batch/MongoTrendsOutput.java b/flink_jobs/status_trends/src/main/java/argo/batch/MongoTrendsOutput.java new file mode 100644 index 00000000..f50cd250 --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/batch/MongoTrendsOutput.java @@ -0,0 +1,173 @@ +package argo.batch; + +import java.io.IOException; + +import org.apache.flink.api.common.io.OutputFormat; +import org.apache.flink.configuration.Configuration; +import org.bson.Document; + +import com.mongodb.MongoClient; +import com.mongodb.MongoClientURI; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.model.Filters; +import org.bson.conversions.Bson; + +/** + * MongoTrendsOutput for storing trends data to mongodb + */ +public class MongoTrendsOutput implements OutputFormat { + + // Select the type of status input + public enum TrendsType { + TRENDS_STATUS, TRENDS_METRIC, TRENDS_ENDPOINT, TRENDS_SERVICE, TRENDS_GROUP + } + + private static final long serialVersionUID = 1L; + + private String mongoHost; + private int mongoPort; + private String dbName; + private String colName; +// private MongoMethod method; + private TrendsType trendsType; + private String report; + private int date; + private MongoClient mClient; + private MongoDatabase mDB; + private MongoCollection mCol; + private boolean clearMongo; + + // constructor + public MongoTrendsOutput(String uri, String col, TrendsType trendsType, String report, String date, boolean clearMongo) { + + this.date = Integer.parseInt(date.replace("-", "")); + this.trendsType = trendsType; + this.report = report; + + MongoClientURI mURI = new MongoClientURI(uri); + String[] hostParts = mURI.getHosts().get(0).split(":"); + String hostname = hostParts[0]; + int port = Integer.parseInt(hostParts[1]); + + this.mongoHost = hostname; + this.mongoPort = port; + this.dbName = mURI.getDatabase(); + this.colName = col; + this.clearMongo = clearMongo; + } + + // constructor + public MongoTrendsOutput(String host, int port, String db, String col, TrendsType trendsType, String report, boolean clearMongo) { + this.mongoHost = host; + this.mongoPort = port; + this.dbName = db; + this.colName = col; + this.trendsType = trendsType; + this.report = report; + this.clearMongo = clearMongo; + } + + private void initMongo() { + this.mClient = new MongoClient(mongoHost, mongoPort); + this.mDB = mClient.getDatabase(dbName); + this.mCol = mDB.getCollection(colName); + if (this.clearMongo) { + deleteDoc(); + } + } + + /** + * Initialize MongoDB remote connection + */ + @Override + public void open(int taskNumber, int numTasks) throws IOException { + // Configure mongo + initMongo(); + } + + /** + * Prepare correct MongoDocument according to record values and selected + * StatusType. A different document is needed for storing Trends results + * than Metric , Endpoint, Service , Group + */ + private Document prepDoc(Trends record) { + Document doc = new Document("report", this.report) + .append("date", this.date); + + switch (this.trendsType) { + case TRENDS_GROUP: + doc.append("group", record.getGroup()); + doc.append("flipflop", record.getFlipflop()); + break; + case TRENDS_SERVICE: + doc.append("group", record.getGroup()); + doc.append("service", record.getService()); + doc.append("flipflop", record.getFlipflop()); + break; + case TRENDS_ENDPOINT: + doc.append("group", record.getGroup()); + doc.append("service", record.getService()); + doc.append("endpoint", record.getEndpoint()); + doc.append("flipflop", record.getFlipflop()); + break; + case TRENDS_METRIC: + doc.append("group", record.getGroup()); + doc.append("service", record.getService()); + doc.append("endpoint", record.getEndpoint()); + doc.append("metric", record.getMetric()); + doc.append("flipflop", record.getFlipflop()); + break; + case TRENDS_STATUS: + doc.append("group", record.getGroup()); + doc.append("service", record.getService()); + doc.append("endpoint", record.getEndpoint()); + doc.append("metric", record.getMetric()); + doc.append("status", record.getStatus()); + doc.append("trends", record.getTrends()); + break; + default: + break; + } + return doc; + } + + private void deleteDoc() { + + Bson filter = Filters.and(Filters.eq("report", this.report), Filters.eq("date", this.date)); + mCol.deleteMany(filter); + } + + /** + * Store a MongoDB document record + */ + @Override + public void writeRecord(Trends record) throws IOException { + + // Mongo Document to be prepared according to StatusType of input + Document doc = prepDoc(record); + + mCol.insertOne(doc); + + } + + /** + * Close MongoDB Connection + */ + @Override + public void close() throws IOException { + if (mClient != null) { + mClient.close(); + mClient = null; + mDB = null; + mCol = null; + } + } + + @Override + public void configure(Configuration arg0) { + // configure + + } + +} diff --git a/flink_jobs/status_trends/src/main/java/argo/batch/Trends.java b/flink_jobs/status_trends/src/main/java/argo/batch/Trends.java new file mode 100644 index 00000000..80497d40 --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/batch/Trends.java @@ -0,0 +1,122 @@ +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +package argo.batch; + +import argo.pojos.EndpointTrends; +import argo.pojos.GroupTrends; +import argo.pojos.MetricTrends; +import argo.pojos.ServiceTrends; + +/** + * + * @author cthermolia + */ +public class Trends { + private String group; + private String service; + private String endpoint; + private String metric; + private int flipflop; + + private String status; + private int trends; + + public Trends(String group, String service, String endpoint, String metric, String status, int trends) { + this.group = group; + this.service = service; + this.endpoint = endpoint; + this.metric = metric; + this.status = status; + this.trends = trends; + } + + + + public Trends(String group, String service, String endpoint, String metric, int flipflop) { + this.group = group; + this.service = service; + this.endpoint = endpoint; + this.metric = metric; + this.flipflop = flipflop; + } + + public Trends(String group, int flipflop) { + this.group = group; + this.flipflop = flipflop; + } + + public Trends(String group, String service, int flipflop) { + this.group = group; + this.service = service; + this.flipflop = flipflop; + } + + public Trends(String group, String service, String endpoint, int flipflop) { + this.group = group; + this.service = service; + this.endpoint = endpoint; + this.flipflop = flipflop; + } + + public String getGroup() { + return group; + } + + public void setGroup(String group) { + this.group = group; + } + + public String getService() { + return service; + } + + public void setService(String service) { + this.service = service; + } + + public String getEndpoint() { + return endpoint; + } + + public void setEndpoint(String endpoint) { + this.endpoint = endpoint; + } + + public String getMetric() { + return metric; + } + + public void setMetric(String metric) { + this.metric = metric; + } + + public int getFlipflop() { + return flipflop; + } + + public void setFlipflop(int flipflop) { + this.flipflop = flipflop; + } + + + public String getStatus() { + return status; + } + + public void setStatus(String status) { + this.status = status; + } + + public int getTrends() { + return trends; + } + + public void setTrends(int trends) { + this.trends = trends; + } + + +} diff --git a/flink_jobs/status_trends/src/main/java/argo/filter/zero/flipflops/ZeroEndpointTrendsFilter.java b/flink_jobs/status_trends/src/main/java/argo/filter/zero/flipflops/ZeroEndpointTrendsFilter.java new file mode 100644 index 00000000..79895d40 --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/filter/zero/flipflops/ZeroEndpointTrendsFilter.java @@ -0,0 +1,36 @@ +package argo.filter.zero.flipflops; + +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ + +import argo.functions.calctimelines.ServiceFilter; +import argo.pojos.EndpointTrends; +import argo.pojos.MetricTrends; +import org.apache.flink.api.common.functions.FilterFunction; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * + * @author cthermolia + * + * StatusFilter, filters data by status + */ +public class ZeroEndpointTrendsFilter implements FilterFunction { + + static Logger LOG = LoggerFactory.getLogger(ServiceFilter.class); + + + //if the status field value in Tuple equals the given status returns true, else returns false + @Override + public boolean filter(EndpointTrends t) throws Exception { + if (t.getFlipflops()>0) { + return true; + } + return false; + } + +} diff --git a/flink_jobs/status_trends/src/main/java/argo/filter/zero/flipflops/ZeroGroupFunctionFilter.java b/flink_jobs/status_trends/src/main/java/argo/filter/zero/flipflops/ZeroGroupFunctionFilter.java new file mode 100644 index 00000000..82404094 --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/filter/zero/flipflops/ZeroGroupFunctionFilter.java @@ -0,0 +1,33 @@ +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +package argo.filter.zero.flipflops; + +import argo.functions.calctimelines.ServiceFilter; +import argo.pojos.GroupFunctionTrends; +import org.apache.flink.api.common.functions.FilterFunction; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * + * @author cthermolia + */ + + public class ZeroGroupFunctionFilter implements FilterFunction { + + static Logger LOG = LoggerFactory.getLogger(ZeroGroupFunctionFilter.class); + + + //if the status field value in Tuple equals the given status returns true, else returns false + @Override + public boolean filter(GroupFunctionTrends t) throws Exception { + if (t.getFlipflops()>0) { + return true; + } + return false; + } + +} diff --git a/flink_jobs/status_trends/src/main/java/argo/filter/zero/flipflops/ZeroGroupTrendsFilter.java b/flink_jobs/status_trends/src/main/java/argo/filter/zero/flipflops/ZeroGroupTrendsFilter.java new file mode 100644 index 00000000..f341c4d4 --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/filter/zero/flipflops/ZeroGroupTrendsFilter.java @@ -0,0 +1,34 @@ +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +package argo.filter.zero.flipflops; + +import argo.pojos.GroupTrends; +import org.apache.flink.api.common.functions.FilterFunction; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * + * @author cthermolia + */ + + + public class ZeroGroupTrendsFilter implements FilterFunction { + + static Logger LOG = LoggerFactory.getLogger(ZeroGroupTrendsFilter.class); + + + //if the status field value in Tuple equals the given status returns true, else returns false + @Override + public boolean filter(GroupTrends t) throws Exception { + if (t.getFlipflops()>0) { + return true; + } + return false; + } + + +} diff --git a/flink_jobs/status_trends/src/main/java/argo/filter/zero/flipflops/ZeroMetricTrendsFilter.java b/flink_jobs/status_trends/src/main/java/argo/filter/zero/flipflops/ZeroMetricTrendsFilter.java new file mode 100644 index 00000000..e3ffa413 --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/filter/zero/flipflops/ZeroMetricTrendsFilter.java @@ -0,0 +1,35 @@ +package argo.filter.zero.flipflops; + +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ + +import argo.functions.calctimelines.ServiceFilter; +import argo.pojos.MetricTrends; +import org.apache.flink.api.common.functions.FilterFunction; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * + * @author cthermolia + * + * StatusFilter, filters data by status + */ +public class ZeroMetricTrendsFilter implements FilterFunction { + + static Logger LOG = LoggerFactory.getLogger(ServiceFilter.class); + + + //if the status field value in Tuple equals the given status returns true, else returns false + @Override + public boolean filter(MetricTrends t) throws Exception { + if (t.getFlipflops()>0) { + return true; + } + return false; + } + +} diff --git a/flink_jobs/status_trends/src/main/java/argo/filter/zero/flipflops/ZeroServiceTrendsFilter.java b/flink_jobs/status_trends/src/main/java/argo/filter/zero/flipflops/ZeroServiceTrendsFilter.java new file mode 100644 index 00000000..fbed0e37 --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/filter/zero/flipflops/ZeroServiceTrendsFilter.java @@ -0,0 +1,32 @@ +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +package argo.filter.zero.flipflops; + +import argo.functions.calctimelines.ServiceFilter; +import argo.pojos.ServiceTrends; +import org.apache.flink.api.common.functions.FilterFunction; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * + * @author cthermolia + */ + public class ZeroServiceTrendsFilter implements FilterFunction { + + static Logger LOG = LoggerFactory.getLogger(ZeroServiceTrendsFilter.class); + + + //if the status field value in Tuple equals the given status returns true, else returns false + @Override + public boolean filter(ServiceTrends t) throws Exception { + if (t.getFlipflops()>0) { + return true; + } + return false; + } + +} diff --git a/flink_jobs/status_trends/src/main/java/argo/functions/calctimelines/CalcLastTimeStatus.java b/flink_jobs/status_trends/src/main/java/argo/functions/calctimelines/CalcLastTimeStatus.java new file mode 100644 index 00000000..96bf5503 --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/functions/calctimelines/CalcLastTimeStatus.java @@ -0,0 +1,37 @@ +package argo.functions.calctimelines; + +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ + +import argo.avro.MetricData; +import java.util.TreeMap; +import org.apache.flink.api.common.functions.GroupReduceFunction; +import org.apache.flink.util.Collector; + +/** + * + * @author cthermolia + * CalcLastTimeStatus keeps data of the latest time entry +*/ + +public class CalcLastTimeStatus implements GroupReduceFunction { + + /** + * + * @param in, the initial dataset of the MetricData + * @param out , the output dataset containing the MetricData of the latest timestamp + * @throws Exception + */ + @Override + public void reduce(Iterable in, Collector out) throws Exception { + TreeMap timeStatusMap = new TreeMap<>(); + for (MetricData md : in) { + timeStatusMap.put(md.getTimestamp().toString(), md); + + } + out.collect(timeStatusMap.lastEntry().getValue()); + } +} \ No newline at end of file diff --git a/flink_jobs/status_trends/src/main/java/argo/functions/calctimelines/MapServices.java b/flink_jobs/status_trends/src/main/java/argo/functions/calctimelines/MapServices.java new file mode 100644 index 00000000..1a7ec07e --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/functions/calctimelines/MapServices.java @@ -0,0 +1,56 @@ +package argo.functions.calctimelines; + +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ + +import argo.pojos.ServiceTrends; +import argo.profiles.AggregationProfileParser; +import java.util.ArrayList; +import java.util.HashMap; +import org.apache.flink.api.common.functions.FlatMapFunction; +import org.apache.flink.util.Collector; + +/** + * @author cthermolia + * + * MapServices produces TimelineTrends for each service,that maps to the groups + * of functions as described in aggregation profile groups endpoint , metric + */ + + +public class MapServices implements FlatMapFunction { + + private AggregationProfileParser aggregationProfileParser; + + private HashMap> serviceFunctions; + + public MapServices(AggregationProfileParser aggregationProfileParser) { + this.aggregationProfileParser = aggregationProfileParser; + } + + /** + * if the service exist in one or more function groups , timeline trends are + * produced for each function that the service belongs and the function info + * is added to the timelinetrend + * + * @param t + * @param out + * @throws Exception + */ + @Override + public void flatMap(ServiceTrends t, Collector out) throws Exception { + String service = t.getService(); + + ArrayList functionList = aggregationProfileParser.retrieveServiceFunctions(service); + if (functionList != null) { + for (String f : functionList) { + ServiceTrends newT = t; + newT.setFunction(f); + out.collect(newT); + } + } + } +} diff --git a/flink_jobs/status_trends/src/main/java/argo/functions/calctimelines/ServiceFilter.java b/flink_jobs/status_trends/src/main/java/argo/functions/calctimelines/ServiceFilter.java new file mode 100644 index 00000000..178c6ac7 --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/functions/calctimelines/ServiceFilter.java @@ -0,0 +1,42 @@ +package argo.functions.calctimelines; + +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ + +import argo.pojos.EndpointTrends; +import argo.profiles.AggregationProfileParser; +import java.util.ArrayList; +import org.apache.flink.api.common.functions.FilterFunction; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * + * @author cthermolia + * + * StatusFilter, filters data by status + */ +public class ServiceFilter implements FilterFunction { + + static Logger LOG = LoggerFactory.getLogger(ServiceFilter.class); + + private AggregationProfileParser aggregationProfileParser; + + public ServiceFilter(AggregationProfileParser aggregationProfileParser) { + this.aggregationProfileParser = aggregationProfileParser; + } + + //if the status field value in Tuple equals the given status returns true, else returns false + @Override + public boolean filter(EndpointTrends t) throws Exception { + ArrayList services = new ArrayList<>(aggregationProfileParser.getServiceOperations().keySet()); + if (services.contains(t.getService())) { + return true; + } + return false; + } + +} diff --git a/flink_jobs/status_trends/src/main/java/argo/functions/calctimelines/StatusFilter.java b/flink_jobs/status_trends/src/main/java/argo/functions/calctimelines/StatusFilter.java new file mode 100644 index 00000000..0cc9f8b8 --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/functions/calctimelines/StatusFilter.java @@ -0,0 +1,38 @@ +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +package argo.functions.calctimelines; + +import org.apache.flink.api.common.functions.FilterFunction; +import org.apache.flink.api.java.tuple.Tuple6; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * + * @author cthermolia + * + * StatusFilter, filters data by status + */ +public class StatusFilter implements FilterFunction> { + + static Logger LOG = LoggerFactory.getLogger(StatusFilter.class); + private String status; + + public StatusFilter(String status) { + this.status = status; + } + //if the status field value in Tuple equals the given status returns true, else returns false + + @Override + public boolean filter(Tuple6 t) throws Exception { + + if (t.f4.toString().equalsIgnoreCase(status)) { + return true; + } + return false; + } + +} diff --git a/flink_jobs/status_trends/src/main/java/argo/functions/calctimelines/TopologyMetricFilter.java b/flink_jobs/status_trends/src/main/java/argo/functions/calctimelines/TopologyMetricFilter.java new file mode 100644 index 00000000..40cec30a --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/functions/calctimelines/TopologyMetricFilter.java @@ -0,0 +1,54 @@ +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +package argo.functions.calctimelines; + +import argo.avro.MetricData; +import argo.profiles.AggregationProfileParser; +import argo.profiles.MetricProfileParser; +import argo.profiles.TopologyEndpointParser; +import argo.profiles.TopologyGroupParser; +import java.util.ArrayList; +import java.util.HashMap; +import org.apache.flink.api.common.functions.FilterFunction; +//import org.apache.flink.api.common.functions.RichFilterFunction; + +/** + * + * @author cthermolia + * + * TopologyMetricFilter , filters service endpoint and exclude the ones that do + * not appear in topology and metric profile data inputs + */ +public class TopologyMetricFilter implements FilterFunction { + + private MetricProfileParser metricProfileParser; + private TopologyEndpointParser topologyEndpointParser; + private TopologyGroupParser topologyGroupParser; + private AggregationProfileParser aggregationProfileParser; + + public TopologyMetricFilter(MetricProfileParser metricProfileParser, TopologyEndpointParser topologyEndpointParser, TopologyGroupParser topologyGroupParser, AggregationProfileParser aggregationProfileParser) { + this.metricProfileParser = metricProfileParser; + this.topologyEndpointParser = topologyEndpointParser; + this.topologyGroupParser = topologyGroupParser; + + this.aggregationProfileParser = aggregationProfileParser; + } + @Override + public boolean filter(MetricData t) throws Exception { + + String group = topologyEndpointParser.retrieveGroup(aggregationProfileParser.getEndpointGroup().toUpperCase(), t.getHostname() + "-" + t.getService()); + boolean hasGroup = false; + if (topologyGroupParser.containsGroup(group) && group != null) { + hasGroup = true; + } + if (hasGroup && metricProfileParser.containsMetric(t.getService().toString(), t.getMetric().toString())) { + + + return true; + } + return false; + } +} diff --git a/flink_jobs/status_trends/src/main/java/argo/functions/calctrends/CalcEndpointFlipFlopTrends.java b/flink_jobs/status_trends/src/main/java/argo/functions/calctrends/CalcEndpointFlipFlopTrends.java new file mode 100644 index 00000000..1584b598 --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/functions/calctrends/CalcEndpointFlipFlopTrends.java @@ -0,0 +1,80 @@ +package argo.functions.calctrends; + +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +//import argo.pojos.Timeline; +//import argo.functions.calctimelines.TimelineMerger; +import argo.pojos.EndpointTrends; +import argo.pojos.MetricTrends; +import argo.profiles.OperationsParser; + +import java.util.HashMap; +import org.apache.flink.api.common.functions.RichGroupReduceFunction; +import org.apache.flink.util.Collector; +import org.joda.time.DateTime; +import timelines.Timeline; +import timelines.TimelineAggregator; +/** + * + * @author cthermolia + * + * CalcEndpointTrends, count status changes for each service endpoint group + */ +public class CalcEndpointFlipFlopTrends extends RichGroupReduceFunction { + + private OperationsParser operationsParser; + private String operation; + private DateTime date; + + public CalcEndpointFlipFlopTrends(String operation, OperationsParser operationsParser) { + this.operation = operation; + this.operationsParser = operationsParser; + + } + + /** + * + * @param in, a collection of MetricTrends as calculated on previous steps , + * from group, service, endpoint, metric groups + * @param out, a collection of EndpointTrends containing the information of + * the computation on group ,service, endpoint groups + * @throws Exception + */ + @Override + public void reduce(Iterable in, Collector< EndpointTrends> out) throws Exception { + String group = null; + String service = null; + String hostname = null; + //store the necessary info + //collect all timelines in a list + + HashMap timelinelist = new HashMap<>(); + + for (MetricTrends time : in) { + group = time.getGroup(); + service = time.getService(); + hostname = time.getEndpoint(); + Timeline timeline = time.getTimeline(); + + timelinelist.put(time.getMetric(),timeline); + + } + // merge the timelines into one timeline , + + // as multiple status (each status exist in each timeline) correspond to each timestamp, there is a need to conclude into one status/timestamp + //for each timestamp the status that prevails is concluded by the truth table that is defined for the operation + TimelineAggregator timelineAggregator = new TimelineAggregator(timelinelist); + timelineAggregator.aggregate( operationsParser.getTruthTable(), operationsParser.getIntOperation(operation)); + + Timeline mergedTimeline = timelineAggregator.getOutput(); //collect all timelines that correspond to the group service endpoint group , merge them in order to create one timeline + Integer flipflops = mergedTimeline.calcStatusChanges();//calculate flip flops on the concluded merged timeline + + if (group != null && service != null && hostname != null) { + EndpointTrends endpointTrends = new EndpointTrends(group, service, hostname, mergedTimeline, flipflops); + out.collect(endpointTrends); + } + } +} diff --git a/flink_jobs/status_trends/src/main/java/argo/functions/calctrends/CalcGroupFlipFlop.java b/flink_jobs/status_trends/src/main/java/argo/functions/calctrends/CalcGroupFlipFlop.java new file mode 100644 index 00000000..44daeacd --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/functions/calctrends/CalcGroupFlipFlop.java @@ -0,0 +1,60 @@ +package argo.functions.calctrends; + + +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +//import argo.functions.calctimelines.TimelineMerger; +import argo.pojos.GroupFunctionTrends; +import argo.pojos.GroupTrends; +//import argo.pojos.Timeline; +import argo.profiles.AggregationProfileParser; +import argo.profiles.OperationsParser; + +import java.util.HashMap; +import org.apache.flink.api.common.functions.GroupReduceFunction; +import org.apache.flink.util.Collector; +import timelines.Timeline; +import timelines.TimelineAggregator; + +/** + * + * @author cthermolia + * + * CalcServiceEndpointFlipFlop, count status changes for each service endpoint + * group + */ +public class CalcGroupFlipFlop implements GroupReduceFunction< GroupFunctionTrends, GroupTrends> { + + private OperationsParser operationsParser; + private String groupOperation; + + public CalcGroupFlipFlop(OperationsParser operationsParser, AggregationProfileParser aggregationProfileParser) { + this.operationsParser = operationsParser; + this.groupOperation = aggregationProfileParser.getProfileOp(); + } + + @Override + public void reduce(Iterable in, Collector< GroupTrends> out) throws Exception { + String group = null; + + HashMap timelist = new HashMap<>(); + for (GroupFunctionTrends time : in) { + group = time.getGroup(); + timelist.put(time.getFunction(),time.getTimeline()); + } + TimelineAggregator timelineAggregator = new TimelineAggregator(timelist); + + + timelineAggregator.aggregate(operationsParser.getTruthTable(),operationsParser.getIntOperation(groupOperation)); + Timeline timeline=timelineAggregator.getOutput(); + + int flipflops = timeline.calcStatusChanges(); + + GroupTrends groupTrends = new GroupTrends(group, timeline, flipflops); + out.collect(groupTrends); + } + +} diff --git a/flink_jobs/status_trends/src/main/java/argo/functions/calctrends/CalcGroupFunctionFlipFlop.java b/flink_jobs/status_trends/src/main/java/argo/functions/calctrends/CalcGroupFunctionFlipFlop.java new file mode 100644 index 00000000..a19894d0 --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/functions/calctrends/CalcGroupFunctionFlipFlop.java @@ -0,0 +1,66 @@ +package argo.functions.calctrends; + +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ + +//import argo.functions.calctimelines.TimelineMerger; + +import argo.pojos.GroupFunctionTrends; +import argo.pojos.ServiceTrends; +//import argo.pojos.Timeline; +import argo.profiles.AggregationProfileParser; +import argo.profiles.OperationsParser; +import java.util.HashMap; +import org.apache.flink.api.common.functions.GroupReduceFunction; +import org.apache.flink.util.Collector; +import timelines.Timeline; +import timelines.TimelineAggregator; + +/** + * + * @author cthermolia + * + * CalcGroupFunctionFlipFlop, count status changes for each group function + * group + */ +public class CalcGroupFunctionFlipFlop implements GroupReduceFunction< ServiceTrends, GroupFunctionTrends> { + + private OperationsParser operationsParser; + + private HashMap functionOperations; + + public CalcGroupFunctionFlipFlop(OperationsParser operationsParser, AggregationProfileParser aggregationProfileParser ) { + this.operationsParser = operationsParser; + this.functionOperations=aggregationProfileParser.getFunctionOperations(); + } + + @Override + public void reduce(Iterable in, Collector< GroupFunctionTrends> out) throws Exception { + String group = null; + String function = null; + // ArrayList list = new ArrayList<>(); + //construct a timeline containing all the timestamps of each metric timeline + + + HashMap timelist = new HashMap<>(); + for (ServiceTrends time : in) { + group = time.getGroup(); + function=time.getFunction(); + timelist.put(time.getService(),time.getTimeline()); + } + String operation=functionOperations.get(function); //for each function an operation exists , so retrieve the corresponding truth table + TimelineAggregator timelineAggregator = new TimelineAggregator(timelist); + timelineAggregator.aggregate( operationsParser.getTruthTable(), operationsParser.getIntOperation(operation)); + + Timeline timeline= timelineAggregator.getOutput(); + int flipflops = timeline.calcStatusChanges(); + + GroupFunctionTrends groupFunctionTrends = new GroupFunctionTrends(group, function, timeline, flipflops); + out.collect(groupFunctionTrends); + + } + +} diff --git a/flink_jobs/status_trends/src/main/java/argo/functions/calctrends/CalcMetricFlipFlopTrends.java b/flink_jobs/status_trends/src/main/java/argo/functions/calctrends/CalcMetricFlipFlopTrends.java new file mode 100644 index 00000000..014aec3e --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/functions/calctrends/CalcMetricFlipFlopTrends.java @@ -0,0 +1,83 @@ +package argo.functions.calctrends; + +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +import argo.avro.MetricData; +//import argo.pojos.Timeline; +import argo.pojos.MetricTrends; +import argo.profiles.AggregationProfileParser; +import argo.profiles.OperationsParser; +import argo.profiles.TopologyEndpointParser; +import argo.utils.Utils; +import java.util.ArrayList; +import java.util.TreeMap; +import org.apache.flink.api.common.functions.GroupReduceFunction; +import org.apache.flink.util.Collector; +import org.joda.time.DateTime; +import timelines.Timeline; + +/** + * + * @author cthermolia + * + * CalcMetricTrends, count status changes for each service endpoint metric + */ +public class CalcMetricFlipFlopTrends implements GroupReduceFunction { + + //private HashMap groupEndpoints; + private final String format = "yyyy-MM-dd'T'HH:mm:ss'Z'"; + private TopologyEndpointParser topologyEndpointParser; + private AggregationProfileParser aggregationProfileParser; + private OperationsParser operationsParser; + private DateTime date; +// +// public CalcMetricFlipFlopTrends(HashMap groupEndpoints) { +// this.groupEndpoints = groupEndpoints; +// } + + public CalcMetricFlipFlopTrends(OperationsParser operationsParser, TopologyEndpointParser topologyEndpointParser, AggregationProfileParser aggregationProfileParser, DateTime date) { + this.topologyEndpointParser = topologyEndpointParser; + this.aggregationProfileParser = aggregationProfileParser; + this.operationsParser = operationsParser; + this.date = date; + } + + /** + * + * @param in, the MetricData dataset + * @param out, the collection of MetricTrends, containing the information of + * the computations on group, service, endpoint, metric groups + * @throws Exception + */ + @Override + public void reduce(Iterable in, Collector out) throws Exception { + TreeMap timeStatusMap = new TreeMap<>(); + String group = null; + String hostname = null; + String service = null; + String metric = null; + for (MetricData md : in) { + hostname = md.getHostname().toString(); + service = md.getService().toString(); + metric = md.getMetric().toString(); + // group = groupEndpoints.get(md.getHostname().toString() + "-" + md.getService()); //retrieve the group for the service, as contained in file + group = topologyEndpointParser.retrieveGroup(aggregationProfileParser.getEndpointGroup().toUpperCase(), md.getHostname().toString() + "-" + md.getService().toString()); + int st = operationsParser.getIntStatus(md.getStatus().toString()); + timeStatusMap.put(Utils.convertStringtoDate(format, md.getTimestamp().toString()), st); + } + + Timeline timeline = new Timeline(); + timeline.insertDateTimeStamps(timeStatusMap); + + timeline.replacePreviousDateStatus(date, new ArrayList<>(operationsParser.getStates().keySet()));//handle the first timestamp to contain the previous days timestamp status if necessary and the last timestamp to contain the status of the last timelines's entry + Integer flipflop = timeline.calcStatusChanges(); + + if (group != null && service != null && hostname != null && metric != null) { + MetricTrends metricTrends = new MetricTrends(group, service, hostname, metric, timeline, flipflop); + out.collect(metricTrends); + } + } +} diff --git a/flink_jobs/status_trends/src/main/java/argo/functions/calctrends/CalcServiceFlipFlop.java b/flink_jobs/status_trends/src/main/java/argo/functions/calctrends/CalcServiceFlipFlop.java new file mode 100644 index 00000000..57e91565 --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/functions/calctrends/CalcServiceFlipFlop.java @@ -0,0 +1,72 @@ +package argo.functions.calctrends; + +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +//import argo.functions.calctimelines.TimelineMerger; +import argo.pojos.EndpointTrends; +import argo.pojos.ServiceTrends; +//import argo.pojos.Timeline; +import argo.profiles.AggregationProfileParser; +import argo.profiles.OperationsParser; +import java.util.ArrayList; +import java.util.HashMap; +import org.apache.flink.api.common.functions.GroupReduceFunction; +import org.apache.flink.util.Collector; +import timelines.Timeline; +import timelines.TimelineAggregator; + +/** + * + * @author cthermolia + * + * CalcServiceEndpointFlipFlop, count status changes for each service endpoint + * group + */ +public class CalcServiceFlipFlop implements GroupReduceFunction< EndpointTrends, ServiceTrends> { + + private HashMap> operationTruthTables; + + private HashMap serviceOperationMap; + private OperationsParser operationsParser; + + public CalcServiceFlipFlop(OperationsParser operationsParser, AggregationProfileParser aggregationProfileParser) { +// this.operationTruthTables = operationParser.getOpTruthTable(); + this.operationsParser = operationsParser; + this.serviceOperationMap = aggregationProfileParser.getServiceOperations(); + } + + @Override + public void reduce(Iterable in, Collector< ServiceTrends> out) throws Exception { + String group = null; + String service = null; + // String hostname = null; + ArrayList list = new ArrayList<>(); + //construct a timeline containing all the timestamps of each metric timeline + + + HashMap timelineList = new HashMap<>(); + + for (EndpointTrends endpointTrend : in) { + group = endpointTrend.getGroup(); + service = endpointTrend.getService(); + timelineList.put(endpointTrend.getEndpoint(), endpointTrend.getTimeline()); + } + String operation = serviceOperationMap.get(service); + + TimelineAggregator timelineAggregator = new TimelineAggregator(timelineList); + timelineAggregator.aggregate(operationsParser.getTruthTable(), operationsParser.getIntOperation(operation)); + + +/// HashMap opTruthTable = operationTruthTables.get(operation); + Timeline timeline = timelineAggregator.getOutput(); + int flipflops = timeline.calcStatusChanges(); + + if (group != null && service != null) { + ServiceTrends serviceTrends = new ServiceTrends(group, service, timeline, flipflops); + out.collect(serviceTrends); + } + } +} diff --git a/flink_jobs/status_trends/src/main/java/argo/functions/calctrends/CalcStatusTrends.java b/flink_jobs/status_trends/src/main/java/argo/functions/calctrends/CalcStatusTrends.java new file mode 100644 index 00000000..42fc3f0c --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/functions/calctrends/CalcStatusTrends.java @@ -0,0 +1,101 @@ +package argo.functions.calctrends; + +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +import argo.avro.MetricData; +import argo.profiles.AggregationProfileParser; +import argo.profiles.TopologyEndpointParser; +import java.text.ParseException; +import java.util.HashMap; +import java.util.TreeMap; +import org.apache.flink.api.common.functions.GroupReduceFunction; +import org.apache.flink.api.java.tuple.Tuple6; +import org.apache.flink.util.Collector; + +/** + * + * @author cthermolia + * + * CalcServiceEnpointMetricStatus, for each service endpoint metric group , + * keeps count for each status (CRITICAL,WARNING,UNKNOW) appearance and returns + * the group information (group, service,hostname, metric, status, + * statuscounter) + */ +public class CalcStatusTrends implements GroupReduceFunction> { + + //private HashMap groupEndpoints; + private TopologyEndpointParser topologyEndpointParser; + private AggregationProfileParser aggregationProfileParser; +// public CalcStatusTrends(HashMap groupEndpoints) { +// this.groupEndpoints = groupEndpoints; +// } + + public CalcStatusTrends(TopologyEndpointParser topologyEndpointParser,AggregationProfileParser aggregationProfileParser) { + this.topologyEndpointParser = topologyEndpointParser; + this.aggregationProfileParser=aggregationProfileParser; + } + + + /** + * for each service endpoint metric Iterable check the MetricData status , + * keep counter for each status(CRITICAL, WARNING,UNKNOWN) and provide + * results for each status + * + * @param in, the MetricData dataset + * @param out, the collection of 3 Tuple6 for each Iterable (one for each + * status) that keeps info for group ,service, hostname, metric, status, and + * status counter + * @throws Exception + */ + @Override + public void reduce(Iterable in, Collector> out) throws ParseException { + TreeMap timeStatusMap = new TreeMap<>(); + String group = null; + String hostname = null; + String service = null; + String status = null; + String metric = null; + int criticalSum = 0, warningSum = 0, unknownSum = 0; + + //for each MetricData in group check the status and increase counter accordingly + for (MetricData md : in) { + // group = groupEndpoints.get(md.getHostname().toString() + "-" + md.getService().toString()); //retrieve the group for the service, as contained in file group_endpoints. if group is null exit + group=topologyEndpointParser.retrieveGroup(aggregationProfileParser.getEndpointGroup().toUpperCase(), md.getHostname().toString() + "-" + md.getService().toString()); + hostname = md.getHostname().toString(); + service = md.getService().toString(); + status = md.getStatus().toString(); + metric = md.getMetric().toString(); + if (group != null && service != null && hostname != null && metric != null) { + + if (status.equalsIgnoreCase("critical")) { + criticalSum++; + } else if (status.equalsIgnoreCase("warning")) { + + warningSum++; + } else if (status.equalsIgnoreCase("unknown")) { + unknownSum++; + } + + } + } + // for the group create result for each status and keep group info + if (group != null && service != null && hostname != null && metric != null) { + + Tuple6 tupleCritical = new Tuple6( + group, service, hostname, metric, "CRITICAL", criticalSum); + out.collect(tupleCritical); + + Tuple6 tupleWarning = new Tuple6( + group, service, hostname, metric, "WARNING", warningSum); + out.collect(tupleWarning); + + Tuple6 tupleUnknown = new Tuple6( + group, service, hostname, metric, "UNKNOWN", unknownSum); + out.collect(tupleUnknown); + } + } + +} diff --git a/flink_jobs/status_trends/src/main/java/argo/pojos/EndpointTrends.java b/flink_jobs/status_trends/src/main/java/argo/pojos/EndpointTrends.java new file mode 100644 index 00000000..4873126e --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/pojos/EndpointTrends.java @@ -0,0 +1,78 @@ +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +package argo.pojos; + +import java.util.Date; +import java.util.TreeMap; +import timelines.Timeline; +/** + * + * @author cthermolia + * + * EndpointTrends, describes the computed trend information extracted from the timelines at the level of group service endpoints groups + */ +public class EndpointTrends { + + String group; + String service; + String endpoint; + Timeline timeline; + Integer flipflops; + + public EndpointTrends() { + } + + public EndpointTrends(String group, String service, String endpoint, Timeline timeline, Integer flipflops) { + this.group = group; + this.service = service; + this.endpoint = endpoint; + this.timeline = timeline; + this.flipflops = flipflops; + } + + public String getGroup() { + return group; + } + + public void setGroup(String group) { + this.group = group; + } + + public String getService() { + return service; + } + + public void setService(String service) { + this.service = service; + } + + public String getEndpoint() { + return endpoint; + } + + public void setEndpoint(String endpoint) { + this.endpoint = endpoint; + } + + public Timeline getTimeline() { + return timeline; + } + + public void setTimeline(Timeline timeline) { + this.timeline = timeline; + } + + public Integer getFlipflops() { + return flipflops; + } + + public void setFlipflops(Integer flipflops) { + this.flipflops = flipflops; + } + + + +} diff --git a/flink_jobs/status_trends/src/main/java/argo/pojos/GroupFunctionTrends.java b/flink_jobs/status_trends/src/main/java/argo/pojos/GroupFunctionTrends.java new file mode 100644 index 00000000..29d3d6dd --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/pojos/GroupFunctionTrends.java @@ -0,0 +1,65 @@ +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +package argo.pojos; +import timelines.Timeline; +/** + * + * @author cthermolia + * + * GroupFunctionTrends, describes the computed trend information extracted from the + * timelines at the level of group function groups + */ +public class GroupFunctionTrends { + + String group; + String function; + Timeline timeline; + Integer flipflops; + + public GroupFunctionTrends() { + } + + public GroupFunctionTrends(String group, String function, Timeline timeline, Integer flipflops) { + this.group = group; + this.function = function; + this.timeline = timeline; + this.flipflops = flipflops; + } + + public String getGroup() { + return group; + } + + public void setGroup(String group) { + this.group = group; + } + + public String getFunction() { + return function; + } + + public void setFunction(String function) { + this.function = function; + } + + public Timeline getTimeline() { + return timeline; + } + + public void setTimeline(Timeline timeline) { + this.timeline = timeline; + } + + public Integer getFlipflops() { + return flipflops; + } + + public void setFlipflops(Integer flipflops) { + this.flipflops = flipflops; + } + + +} diff --git a/flink_jobs/status_trends/src/main/java/argo/pojos/GroupTrends.java b/flink_jobs/status_trends/src/main/java/argo/pojos/GroupTrends.java new file mode 100644 index 00000000..b7d884de --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/pojos/GroupTrends.java @@ -0,0 +1,54 @@ +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +package argo.pojos; +import timelines.Timeline; +/** + * + * @author cthermolia MetricTrends, describes the computed trend information + * extracted from the set of the timelines at the level of group service + * endpoints metrics groups * + */ +public class GroupTrends { + + String group; + + Timeline timeline; + Integer flipflops; + + public GroupTrends() { + } + + public GroupTrends(String group, Timeline timeline, Integer flipflops) { + this.group = group; + this.timeline = timeline; + this.flipflops = flipflops; + } + + public String getGroup() { + return group; + } + + public void setGroup(String group) { + this.group = group; + } + + public Timeline getTimeline() { + return timeline; + } + + public void setTimeline(Timeline timeline) { + this.timeline = timeline; + } + + public Integer getFlipflops() { + return flipflops; + } + + public void setFlipflops(Integer flipflops) { + this.flipflops = flipflops; + } + +} diff --git a/flink_jobs/status_trends/src/main/java/argo/pojos/MetricTrends.java b/flink_jobs/status_trends/src/main/java/argo/pojos/MetricTrends.java new file mode 100644 index 00000000..a851a3c6 --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/pojos/MetricTrends.java @@ -0,0 +1,93 @@ +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +package argo.pojos; +import timelines.Timeline; + +/** + * + * @author cthermolia + * MetricTrends, describes the computed trend information extracted from the set of the timelines at the level of group service endpoints metrics groups + + */ +public class MetricTrends{ + + String group; + String service; + String endpoint; + String metric; + Timeline timeline; + Integer flipflops; + + public MetricTrends() { + } + +// public MetricTrends(String group, String service, String endpoint, String metric, Timeline timeline, Integer flipflops) { +// this.group = group; +// this.service = service; +// this.endpoint = endpoint; +// this.metric = metric; +// this.timeline = timeline; +// this.flipflops = flipflops; +// } + + public MetricTrends(String group, String service, String hostname, String metric, timelines.Timeline timeline, Integer flipflop) { + this.group = group; + this.service = service; + this.endpoint = hostname; + this.metric = metric; + this.timeline = timeline; + this.flipflops = flipflop; + } + + public String getGroup() { + return group; + } + + public void setGroup(String group) { + this.group = group; + } + + public String getService() { + return service; + } + + public void setService(String service) { + this.service = service; + } + + public String getEndpoint() { + return endpoint; + } + + public void setEndpoint(String endpoint) { + this.endpoint = endpoint; + } + + public String getMetric() { + return metric; + } + + public void setMetric(String metric) { + this.metric = metric; + } + + public Timeline getTimeline() { + return timeline; + } + + public void setTimeline(Timeline timeline) { + this.timeline = timeline; + } + + public Integer getFlipflops() { + return flipflops; + } + + public void setFlipflops(Integer flipflops) { + this.flipflops = flipflops; + } + +} diff --git a/flink_jobs/status_trends/src/main/java/argo/pojos/ServiceTrends.java b/flink_jobs/status_trends/src/main/java/argo/pojos/ServiceTrends.java new file mode 100644 index 00000000..53248c2a --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/pojos/ServiceTrends.java @@ -0,0 +1,75 @@ +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +package argo.pojos; +import timelines.Timeline; +/** + * + * @author cthermolia + * + * ServiceTrends, describes the computed trend information extracted from the timelines at the level of group service endpoints groups + */ +public class ServiceTrends { + + String group; + String service; + Timeline timeline; + Integer flipflops; + String function; + + public ServiceTrends() { + } + + + public ServiceTrends(String group, String service, Timeline timeline, Integer flipflops) { + this.group = group; + this.service = service; + this.timeline = timeline; + this.flipflops = flipflops; + } + + public String getGroup() { + return group; + } + + public void setGroup(String group) { + this.group = group; + } + + public String getService() { + return service; + } + + public void setService(String service) { + this.service = service; + } + + public Timeline getTimeline() { + return timeline; + } + + public void setTimeline(Timeline timeline) { + this.timeline = timeline; + } + + public Integer getFlipflops() { + return flipflops; + } + + public void setFlipflops(Integer flipflops) { + this.flipflops = flipflops; + } + + public String getFunction() { + return function; + } + + public void setFunction(String function) { + this.function = function; + } + + + +} diff --git a/flink_jobs/status_trends/src/main/java/argo/profiles/AggregationProfileParser.java b/flink_jobs/status_trends/src/main/java/argo/profiles/AggregationProfileParser.java new file mode 100644 index 00000000..09932450 --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/profiles/AggregationProfileParser.java @@ -0,0 +1,225 @@ +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +package argo.profiles; + +import argo.utils.RequestManager; +import java.io.IOException; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import org.json.simple.JSONArray; +import org.json.simple.JSONObject; +import org.json.simple.parser.ParseException; + +/** + * + * @author cthermolia AggregationProfileParser, collects data as described in + * the json received from web api aggregation profiles request + */ + +public class AggregationProfileParser implements Serializable { + private String id; + private String date; + private String name; + private String namespace; + private String endpointGroup; + private String metricOp; + private String profileOp; + private String[] metricProfile = new String[2]; + private ArrayList groups = new ArrayList<>(); + + private HashMap serviceOperations = new HashMap<>(); + private HashMap functionOperations = new HashMap<>(); + private HashMap> serviceFunctions = new HashMap<>(); + private final String url = "/aggregation_profiles"; + private JSONObject jsonObject; + public AggregationProfileParser() { + } + + + public AggregationProfileParser(String apiUri, String key, String proxy, String aggregationId, String dateStr) throws IOException, ParseException { + + String uri = apiUri + url + "/" + aggregationId; + if (dateStr != null) { + uri = uri + "?date=" + dateStr; + } + + loadAggrProfileInfo(uri, key, proxy); + } + + public AggregationProfileParser(JSONObject jsonObject) { + this.jsonObject = jsonObject; + readApiRequestResult(); + } + + + public void loadAggrProfileInfo(String uri, String key, String proxy) throws IOException, ParseException { + jsonObject = RequestManager.request(uri, key, proxy); + readApiRequestResult(); + } + public void readApiRequestResult(){ + JSONArray dataList = (JSONArray) jsonObject.get("data"); + + JSONObject dataObject = (JSONObject) dataList.get(0); + + id = (String) dataObject.get("id"); + date = (String) dataObject.get("date"); + name = (String) dataObject.get("name"); + namespace = (String) dataObject.get("namespace"); + endpointGroup = (String) dataObject.get("endpoint_group"); + metricOp = (String) dataObject.get("metric_operation"); + profileOp = (String) dataObject.get("profile_operation"); + + JSONObject metricProfileObject = (JSONObject) dataObject.get("metric_profile"); + + metricProfile[0] = (String) metricProfileObject.get("id"); + metricProfile[1] = (String) metricProfileObject.get("name"); + + JSONArray groupArray = (JSONArray) dataObject.get("groups"); + Iterator groupiterator = groupArray.iterator(); + + while (groupiterator.hasNext()) { + JSONObject groupObject=groupiterator.next(); + if (groupObject instanceof JSONObject) { + + String groupname = (String) groupObject.get("name"); + String groupoperation = (String) groupObject.get("operation"); + + functionOperations.put(groupname, groupoperation); + JSONArray serviceArray = (JSONArray) groupObject.get("services"); + Iterator serviceiterator = serviceArray.iterator(); + HashMap services = new HashMap<>(); + while (serviceiterator.hasNext()) { + JSONObject servObject = (JSONObject) serviceiterator.next(); + String servicename = (String) servObject.get("name"); + String serviceoperation = (String) servObject.get("operation"); + serviceOperations.put(servicename, serviceoperation); + services.put(servicename, serviceoperation); + + ArrayList serviceFunctionList = new ArrayList<>(); + if (serviceFunctions.get(servicename) != null) { + serviceFunctionList = serviceFunctions.get(servicename); + } + serviceFunctionList.add(groupname); + serviceFunctions.put(servicename, serviceFunctionList); + } + groups.add(new GroupOps(groupname, groupoperation, services)); + + } + } + } + + public JSONObject getJsonObject() { + return jsonObject; + } + + public void setJsonObject(JSONObject jsonObject) { + this.jsonObject = jsonObject; + } + + + public ArrayList retrieveServiceFunctions(String service) { + return serviceFunctions.get(service); + + } + + public String getServiceOperation(String service) { + return serviceOperations.get(service); + + } + + public String getFunctionOperation(String function) { + + return functionOperations.get(function); + + } + + public String getId() { + return id; + } + + public String getDate() { + return date; + } + + public String getName() { + return name; + } + + public String getNamespace() { + return namespace; + } + + public String getEndpointGroup() { + return endpointGroup; + } + + public String getMetricOp() { + return metricOp; + } + + public String getProfileOp() { + return profileOp; + } + + public String[] getMetricProfile() { + return metricProfile; + } + + public ArrayList getGroups() { + return groups; + } + + public HashMap getServiceOperations() { + return serviceOperations; + } + + public void setServiceOperations(HashMap serviceOperations) { + this.serviceOperations = serviceOperations; + } + public HashMap getFunctionOperations() { + return functionOperations; + } + + public void setFunctionOperations(HashMap functionOperations) { + this.functionOperations = functionOperations; + } + + public HashMap> getServiceFunctions() { + return serviceFunctions; + } + + public void setServiceFunctions(HashMap> serviceFunctions) { + this.serviceFunctions = serviceFunctions; + } + public static class GroupOps implements Serializable { + + private String name; + private String operation; + private HashMap services; + + public GroupOps(String name, String operation, HashMap services) { + this.name = name; + this.operation = operation; + this.services = services; + } + + public String getName() { + return name; + } + + public String getOperation() { + return operation; + } + + public HashMap getServices() { + return services; + } + + } + +} diff --git a/flink_jobs/status_trends/src/main/java/argo/profiles/MetricProfileParser.java b/flink_jobs/status_trends/src/main/java/argo/profiles/MetricProfileParser.java new file mode 100644 index 00000000..224a87eb --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/profiles/MetricProfileParser.java @@ -0,0 +1,157 @@ +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +package argo.profiles; + +import argo.utils.RequestManager; +import java.io.IOException; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import org.json.simple.JSONArray; +import org.json.simple.JSONObject; +import org.json.simple.parser.ParseException; + +/** + * + * @author cthermolia + * + * MetricProfileParser, collects data as described in the json received from web + * api metric profiles request + */ +public class MetricProfileParser implements Serializable{ + + private String id; + private String date; + private String name; + private String description; + private ArrayList services=new ArrayList<>(); + private HashMap> metricData=new HashMap<>(); + private final String url = "/metric_profiles"; + private JSONObject jsonObject; + public MetricProfileParser() { + } + + public MetricProfileParser(JSONObject jsonObject) { + this.jsonObject = jsonObject; + readApiRequestResult(); + } + + + public class Services implements Serializable{ + + private String service; + private ArrayList metrics; + + public Services(String service, ArrayList metrics) { + this.service = service; + this.metrics = metrics; + } + + public String getService() { + return service; + } + + public ArrayList getMetrics() { + return metrics; + } + } + + public MetricProfileParser(String apiUri, String key, String proxy, String metricId, String date) throws IOException, ParseException { + String uri = apiUri + url + "/" + metricId; + if (date != null) { + uri = uri + "?date=" + date; + } + loadMetricProfile(uri, key, proxy); + } + + private void loadMetricProfile(String uri, String key, String proxy) throws IOException, org.json.simple.parser.ParseException { + jsonObject = RequestManager.request(uri, key, proxy); + readApiRequestResult(); + } + public void readApiRequestResult(){ + + JSONArray data = (JSONArray) jsonObject.get("data"); + id = (String) jsonObject.get("id"); + date = (String) jsonObject.get("date"); + name = (String) jsonObject.get("name"); + description = (String) jsonObject.get("description"); + + Iterator dataIter = data.iterator(); + while (dataIter.hasNext()) { + Object dataobj = dataIter.next(); + if (dataobj instanceof JSONObject) { + JSONObject jsonDataObj = new JSONObject((Map) dataobj); + + JSONArray servicesArray = (JSONArray) jsonDataObj.get("services"); + + Iterator iterator = servicesArray.iterator(); + + while (iterator.hasNext()) { + Object obj = iterator.next(); + if (obj instanceof JSONObject) { + JSONObject servObj = new JSONObject((Map) obj); + String serviceName = (String) servObj.get("service"); + JSONArray metrics = (JSONArray) servObj.get("metrics"); + Iterator metrIter = metrics.iterator(); + ArrayList metricList = new ArrayList<>(); + + while (metrIter.hasNext()) { + Object metrObj = metrIter.next(); + metricList.add(metrObj.toString()); + } + + Services service = new Services(serviceName, metricList); + services.add(service); + metricData.put(serviceName, metricList); + } + } + } + } + } + + public boolean containsMetric(String service, String metric){ + + if(metricData.get(service)!=null && metricData.get(service).contains(metric)){ + return true; + } + return false; + } + + public JSONObject getJsonObject() { + return jsonObject; + } + + public void setJsonObject(JSONObject jsonObject) { + this.jsonObject = jsonObject; + } + + public String getId() { + return id; + } + + public String getDate() { + return date; + } + + public String getName() { + return name; + } + + public String getDescription() { + return description; + } + + public ArrayList getServices() { + return services; + } + + public HashMap> getMetricData() { + return metricData; + } + +} diff --git a/flink_jobs/status_trends/src/main/java/argo/profiles/OperationsParser.java b/flink_jobs/status_trends/src/main/java/argo/profiles/OperationsParser.java new file mode 100644 index 00000000..e18c4625 --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/profiles/OperationsParser.java @@ -0,0 +1,352 @@ +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +package argo.profiles; + +import com.google.gson.Gson; +import java.io.BufferedReader; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileReader; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; + +import org.apache.commons.io.IOUtils; +import org.apache.log4j.Logger; + +import com.google.gson.JsonArray; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParseException; +import com.google.gson.JsonParser; +import java.io.Serializable; +import org.json.simple.JSONObject; + +public class OperationsParser implements Serializable { + + private static final Logger LOG = Logger.getLogger(OperationsParser.class.getName()); + + private HashMap states; + private HashMap ops; + private ArrayList revStates; + private ArrayList revOps; + + private int[][][] truthTable; + + private String defaultDownState; + private String defaultMissingState; + private String defaultUnknownState; + + private boolean order; + private final String url = "/operations_profiles"; + + public OperationsParser() { + this.states = new HashMap(); + this.ops = new HashMap(); + this.revStates = new ArrayList(); + this.revOps = new ArrayList(); + + this.truthTable = null; + + this.order = false; + + } + + public OperationsParser(JSONObject jsonObj, boolean _order) { + this.states = new HashMap(); + this.ops = new HashMap(); + this.revStates = new ArrayList(); + this.revOps = new ArrayList(); + this.order = _order; + + this.truthTable = null; + JSONObject jsonObject = jsonObj; + // readApiRequestResult(); + } + + public OperationsParser(boolean _order) { + this.states = new HashMap(); + this.ops = new HashMap(); + this.revStates = new ArrayList(); + this.revOps = new ArrayList(); + this.order = _order; + + this.truthTable = null; + } + + + + public String getDefaultDown() { + return this.defaultDownState; + } + + public String getDefaultUnknown() { + return this.defaultUnknownState; + } + + public int getDefaultUnknownInt() { + return this.getIntStatus(this.defaultUnknownState); + } + + public int getDefaultDownInt() { + return this.getIntStatus(this.defaultDownState); + } + + public String getDefaultMissing() { + return this.defaultMissingState; + } + + public int getDefaultMissingInt() { + return this.getIntStatus(this.defaultMissingState); + } + + public void clear() { + this.states = new HashMap(); + this.ops = new HashMap(); + this.revStates = new ArrayList(); + this.revOps = new ArrayList(); + + this.truthTable = null; + } + + public int opInt(int op, int a, int b) { + int result = -1; + try { + result = this.truthTable[op][a][b]; + } catch (IndexOutOfBoundsException ex) { + LOG.info(ex); + result = -1; + } + + return result; + } + + public int opInt(String op, String a, String b) { + + int opInt = this.ops.get(op); + int aInt = this.states.get(a); + int bInt = this.states.get(b); + + return this.truthTable[opInt][aInt][bInt]; + } + + public String op(int op, int a, int b) { + return this.revStates.get(this.truthTable[op][a][b]); + } + + public String op(String op, String a, String b) { + int opInt = this.ops.get(op); + int aInt = this.states.get(a); + int bInt = this.states.get(b); + + return this.revStates.get(this.truthTable[opInt][aInt][bInt]); + } + + public String getStrStatus(int status) { + return this.revStates.get(status); + } + + public int getIntStatus(String status) { + return this.states.get(status); + } + + public String getStrOperation(int op) { + return this.revOps.get(op); + } + + public int getIntOperation(String op) { + return this.ops.get(op); + } + + public ArrayList availableStates() { + + return this.revStates; + } + + public ArrayList availableOps() { + return this.revOps; + } + + public void loadJson(File jsonFile) throws IOException { + // Clear data + this.clear(); + + BufferedReader br = null; + try { + br = new BufferedReader(new FileReader(jsonFile)); + + JsonParser json_parser = new JsonParser(); + JsonElement j_element = json_parser.parse(br); + readJson(j_element); + } catch (FileNotFoundException ex) { + LOG.error("Could not open file:" + jsonFile.getName()); + throw ex; + + } catch (JsonParseException ex) { + LOG.error("File is not valid json:" + jsonFile.getName()); + throw ex; + } finally { + // Close quietly without exceptions the buffered reader + IOUtils.closeQuietly(br); + } + + } + + public void readJson(JsonElement j_element) { + JsonObject j_obj = j_element.getAsJsonObject(); + JsonArray j_states = j_obj.getAsJsonArray("available_states"); + JsonArray j_ops = j_obj.getAsJsonArray("operations"); + this.defaultMissingState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("missing").getAsString(); + this.defaultDownState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("down").getAsString(); + this.defaultUnknownState = j_obj.getAsJsonObject("defaults").getAsJsonPrimitive("unknown").getAsString(); + // Collect the available states + for (int i = 0; i < j_states.size(); i++) { + this.states.put(j_states.get(i).getAsString(), i); + this.revStates.add(j_states.get(i).getAsString()); + + } + + // Collect the available operations + int i = 0; + for (JsonElement item : j_ops) { + JsonObject jObjItem = item.getAsJsonObject(); + this.ops.put(jObjItem.getAsJsonPrimitive("name").getAsString(), i); + this.revOps.add(jObjItem.getAsJsonPrimitive("name").getAsString()); + i++; + } + // Initialize the truthtable + int num_ops = this.revOps.size(); + int num_states = this.revStates.size(); + this.truthTable = new int[num_ops][num_states][num_states]; + + for (int[][] surface : this.truthTable) { + for (int[] line : surface) { + Arrays.fill(line, -1); + } + } + + // Fill the truth table + for (JsonElement item : j_ops) { + JsonObject jObjItem = item.getAsJsonObject(); + String opname = jObjItem.getAsJsonPrimitive("name").getAsString(); + JsonArray tops = jObjItem.getAsJsonArray("truth_table"); + // System.out.println(tops); + + for (int j = 0; j < tops.size(); j++) { + // System.out.println(opname); + JsonObject row = tops.get(j).getAsJsonObject(); + + int a_val = this.states.get(row.getAsJsonPrimitive("a").getAsString()); + int b_val = this.states.get(row.getAsJsonPrimitive("b").getAsString()); + int x_val = this.states.get(row.getAsJsonPrimitive("x").getAsString()); + int op_val = this.ops.get(opname); + + // Fill in truth table + // Check if order sensitivity is off so to insert two truth + // values + // ...[a][b] and [b][a] + this.truthTable[op_val][a_val][b_val] = x_val; + if (!this.order) { + this.truthTable[op_val][b_val][a_val] = x_val; + } + } + } + + } + + public void loadJsonString(List opsJson) throws JsonParseException { + // Clear data + this.clear(); + + JsonParser json_parser = new JsonParser(); + // Grab the first - and only line of json from ops data + JsonElement j_element = json_parser.parse(opsJson.get(0)); + readJson(j_element); + } + + public int[][][] getTruthTable() { + return truthTable; + } + + public void setTruthTable(int[][][] truthTable) { + this.truthTable = truthTable; + } + + public HashMap getStates() { + return states; + } + + public void setStates(HashMap states) { + this.states = states; + } + + public HashMap getOps() { + return ops; + } + + public void setOps(HashMap ops) { + this.ops = ops; + } + + public ArrayList getRevStates() { + return revStates; + } + + public void setRevStates(ArrayList revStates) { + this.revStates = revStates; + } + + public ArrayList getRevOps() { + return revOps; + } + + public void setRevOps(ArrayList revOps) { + this.revOps = revOps; + } + + public String getDefaultDownState() { + return defaultDownState; + } + + public void setDefaultDownState(String defaultDownState) { + this.defaultDownState = defaultDownState; + } + + public String getDefaultMissingState() { + return defaultMissingState; + } + + public void setDefaultMissingState(String defaultMissingState) { + this.defaultMissingState = defaultMissingState; + } + + public String getDefaultUnknownState() { + return defaultUnknownState; + } + + public void setDefaultUnknownState(String defaultUnknownState) { + this.defaultUnknownState = defaultUnknownState; + } + + public boolean isOrder() { + return order; + } + + public void setOrder(boolean order) { + this.order = order; + } + + public static Logger getLOG() { + return LOG; + } + + public String getUrl() { + return url; + } + +} \ No newline at end of file diff --git a/flink_jobs/status_trends/src/main/java/argo/profiles/ProfilesLoader.java b/flink_jobs/status_trends/src/main/java/argo/profiles/ProfilesLoader.java new file mode 100644 index 00000000..7c452306 --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/profiles/ProfilesLoader.java @@ -0,0 +1,102 @@ +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +package argo.profiles; +import argo.utils.RequestManager; +import com.google.gson.JsonElement; +import java.io.IOException; +import org.apache.flink.api.java.utils.ParameterTool; +import org.json.simple.JSONObject; +import org.json.simple.parser.ParseException; + +/** + * + * @author cthermolia + * + * ProfilesLoader, loads all the parser that will be used to collect the information from the web api + */ +public class ProfilesLoader { + + private ReportParser reportParser; + private TopologyEndpointParser topologyEndpointParser; + private MetricProfileParser metricProfileParser; + private OperationsParser operationParser; + private AggregationProfileParser aggregationProfileParser; + private TopologyGroupParser topolGroupParser; + + public ProfilesLoader() { + } + + + public ProfilesLoader(ParameterTool params) throws IOException, ParseException { + + reportParser = new ReportParser(params.getRequired("apiUri"), params.getRequired("key"), params.get("proxy"), params.getRequired("reportId")); + String[] reportInfo = reportParser.getTenantReport().getInfo(); + topolGroupParser = new TopologyGroupParser(params.getRequired("apiUri"), params.getRequired("key"), params.get("proxy"), params.getRequired("date"),reportInfo[0]); + topologyEndpointParser = new TopologyEndpointParser(params.getRequired("apiUri"), params.getRequired("key"), params.get("proxy"), params.getRequired("date"),reportInfo[0]); + + String aggregationId = reportParser.getAggregationReportId(); + String metricId = reportParser.getMetricReportId(); + String operationsId = reportParser.getOperationReportId(); + + aggregationProfileParser = new AggregationProfileParser(params.getRequired("apiUri"), params.getRequired("key"), params.get("proxy"), aggregationId, params.get("date")); + metricProfileParser = new MetricProfileParser(params.getRequired("apiUri"), params.getRequired("key"), params.get("proxy"), metricId, params.get("date")); + JsonElement opProfileJson=RequestManager.operationsProfileRequest(params.getRequired("apiUri"), operationsId, params.getRequired("key"), params.get("proxy"), params.get("date")); + + operationParser = new OperationsParser(); + operationParser.readJson(opProfileJson); + + } + + public ReportParser getReportParser() { + return reportParser; + } + + public void setReportParser(ReportParser reportParser) { + this.reportParser = reportParser; + } + + public TopologyEndpointParser getTopologyEndpointParser() { + return topologyEndpointParser; + } + + public void setTopologyEndpointParser(TopologyEndpointParser topologyEndpointParser) { + this.topologyEndpointParser = topologyEndpointParser; + } + + public MetricProfileParser getMetricProfileParser() { + return metricProfileParser; + } + + public void setMetricProfileParser(MetricProfileParser metricProfileParser) { + this.metricProfileParser = metricProfileParser; + } + + public OperationsParser getOperationParser() { + return operationParser; + } + + public void setOperationParser(OperationsParser operationParser) { + this.operationParser = operationParser; + } + + public AggregationProfileParser getAggregationProfileParser() { + return aggregationProfileParser; + } + + public void setAggregationProfileParser(AggregationProfileParser aggregationProfileParser) { + this.aggregationProfileParser = aggregationProfileParser; + } + + public TopologyGroupParser getTopolGroupParser() { + return topolGroupParser; + } + + public void setTopolGroupParser(TopologyGroupParser topolGroupParser) { + this.topolGroupParser = topolGroupParser; + } + + +} diff --git a/flink_jobs/status_trends/src/main/java/argo/profiles/ReportParser.java b/flink_jobs/status_trends/src/main/java/argo/profiles/ReportParser.java new file mode 100644 index 00000000..eecfe50b --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/profiles/ReportParser.java @@ -0,0 +1,338 @@ +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +package argo.profiles; + +import argo.utils.RequestManager; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Iterator; +import org.json.simple.JSONArray; +import org.json.simple.JSONObject; +import org.json.simple.parser.ParseException; + +/** + * + * @author cthermolia + * + * ReportParser, collects data as described in the json received from web api report request, that corresponds to the specific tenant + */ +public class ReportParser { + + private TenantReport tenantReport; + private final String url = "/reports/"; + private JSONObject jsonObject; + + public ReportParser(String apiUri, String key, String proxy, String reportId) throws IOException, ParseException { + String uri = apiUri + url + reportId; + loadReportInfo(uri, key, proxy); + } + + public ReportParser(JSONObject jsonObject) { + this.jsonObject = jsonObject; + readApiRequestResult(); + } + + public void loadReportInfo(String uri, String key, String proxy) throws IOException, ParseException { + + jsonObject = RequestManager.request(uri, key, proxy); + readApiRequestResult(); + } + + + public void readApiRequestResult(){ + JSONArray dataList = (JSONArray) jsonObject.get("data"); + + Iterator iterator = dataList.iterator(); + + while (iterator.hasNext()) { + JSONObject dataObject = (JSONObject) iterator.next(); + + String id = (String) dataObject.get("id"); + String tenant = (String) dataObject.get("tenant"); + boolean disabled = (boolean) dataObject.get("disabled"); + + JSONObject infoObject = (JSONObject) dataObject.get("info"); + String[] info = new String[4]; + info[0] = (String) infoObject.get("name"); + info[1] = (String) infoObject.get("description"); + info[2] = (String) infoObject.get("created"); + info[3] = (String) infoObject.get("updated"); + + JSONObject topologyObject = (JSONObject) dataObject.get("topology_schema"); + JSONObject groupObject = (JSONObject) topologyObject.get("group"); + + String type = (String) groupObject.get("type"); + JSONObject subGroupObject = (JSONObject) groupObject.get("group"); + + String grouptype = (String) subGroupObject.get("type"); + + Topology group = new Topology(grouptype, null); + Topology topologyGroup = new Topology(type, group); + + JSONObject thresholdsObject = (JSONObject) dataObject.get("thresholds"); + + Threshold threshold = new Threshold((Long) thresholdsObject.get("availability"), (Long) thresholdsObject.get("reliability"), (Double) thresholdsObject.get("uptime"), + (Double) thresholdsObject.get("unknown"), (Double) thresholdsObject.get("downtime")); + + JSONArray profiles = (JSONArray) dataObject.get("profiles"); + + Iterator profileIter = profiles.iterator(); + ArrayList profileList = new ArrayList<>(); + while (profileIter.hasNext()) { + JSONObject profileObject = (JSONObject) profileIter.next(); + Profiles profile = new Profiles((String) profileObject.get("id"), (String) profileObject.get("name"), (String) profileObject.get("type")); + profileList.add(profile); + } + + JSONArray filters = (JSONArray) dataObject.get("filter_tags"); + Iterator filterIter = filters.iterator(); + ArrayList filtersList = new ArrayList<>(); + while (filterIter.hasNext()) { + JSONObject filterObject = (JSONObject) filterIter.next(); + FilterTags filter = new FilterTags((String) filterObject.get("name"), (String) filterObject.get("value"), (String) filterObject.get("context")); + filtersList.add(filter); + } + + tenantReport = new TenantReport(id, tenant, disabled, info, group, threshold, profileList, filtersList); + } + + } + +// public String getProfileId(String profileName) { +// ArrayList profiles = tenantReport.getProfiles(); +// if (profiles != null) { +// for (Profiles profile : profiles) { +// if (profile.getType().equalsIgnoreCase(profileName)) { +// return profile.id; +// } +// } +// } +// return null; +// } + + public String getAggregationReportId() { + ArrayList profiles = tenantReport.getProfiles(); + if (profiles != null) { + for (Profiles profile : profiles) { + if (profile.getType().equalsIgnoreCase(ReportParser.ProfileType.AGGREGATION.name())) { + return profile.id; + } + } + } + return null; + } + + public String getMetricReportId() { + ArrayList profiles = tenantReport.getProfiles(); + if (profiles != null) { + for (Profiles profile : profiles) { + if (profile.getType().equalsIgnoreCase(ReportParser.ProfileType.METRIC.name())) { + return profile.id; + } + } + } + return null; + } + + public String getOperationReportId() { + ArrayList profiles = tenantReport.getProfiles(); + if (profiles != null) { + for (Profiles profile : profiles) { + if (profile.getType().equalsIgnoreCase(ReportParser.ProfileType.OPERATIONS.name())) { + return profile.id; + } + } + } + return null; + } + + public JSONObject getJsonObject() { + return jsonObject; + } + + public void setJsonObject(JSONObject jsonObject) { + this.jsonObject = jsonObject; + } + + public TenantReport getTenantReport() { + return tenantReport; + } + + public class Threshold { + + private Long availability; + private Long reliability; + private Double uptime; + private Double unknown; + private Double downtime; + + public Threshold(Long availability, Long reliability, Double uptime, Double unknown, Double downtime) { + this.availability = availability; + this.reliability = reliability; + this.uptime = uptime; + this.unknown = unknown; + this.downtime = downtime; + } + + public Long getAvailability() { + return availability; + } + + public Long getReliability() { + return reliability; + } + + public Double getUptime() { + return uptime; + } + + public Double getUnknown() { + return unknown; + } + + public Double getDowntime() { + return downtime; + } + + } + + private class Profiles { + + private String id; + private String name; + private String type; + + public Profiles(String id, String name, String type) { + this.id = id; + this.name = name; + this.type = type; + } + + public String getId() { + return id; + } + + public String getName() { + return name; + } + + public String getType() { + return type; + } + + } + + private class FilterTags { + + private String name; + private String value; + private String context; + + public FilterTags(String name, String value, String context) { + this.name = name; + this.value = value; + this.context = context; + } + + public String getName() { + return name; + } + + public String getValue() { + return value; + } + + public String getContext() { + return context; + } + + } + + public class Topology { + + private String type; + private Topology group; + + public Topology(String type, Topology group) { + this.type = type; + this.group = group; + } + + public String getType() { + return type; + } + + public Topology getGroup() { + return group; + } + + } + + public class TenantReport { + + private String id; + private String tenant; + private boolean disabled; + private String[] info; + private Topology group; + private Threshold threshold; + private ArrayList profiles; + private ArrayList filterTags; + + public TenantReport(String id, String tenant, boolean disabled, String[] info, Topology group, Threshold threshold, ArrayList profiles, ArrayList filterTags) { + this.id = id; + this.tenant = tenant; + this.disabled = disabled; + this.info = info; + this.group = group; + this.threshold = threshold; + this.profiles = profiles; + this.filterTags = filterTags; + + } + + public String getId() { + return id; + } + + public String getTenant() { + return tenant; + } + + public boolean isDisabled() { + return disabled; + } + + public String[] getInfo() { + return info; + } + + public Topology getGroup() { + return group; + } + + public Threshold getThreshold() { + return threshold; + } + + public ArrayList getProfiles() { + return profiles; + } + + public ArrayList getFilterTags() { + return filterTags; + } + + } + + public enum ProfileType { + + METRIC, + AGGREGATION, + OPERATIONS + + } +} diff --git a/flink_jobs/status_trends/src/main/java/argo/profiles/TopologyEndpointParser.java b/flink_jobs/status_trends/src/main/java/argo/profiles/TopologyEndpointParser.java new file mode 100644 index 00000000..80f91807 --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/profiles/TopologyEndpointParser.java @@ -0,0 +1,199 @@ +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +package argo.profiles; + +import argo.utils.RequestManager; +import java.io.IOException; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import org.json.simple.JSONArray; +import org.json.simple.JSONObject; +import org.json.simple.parser.ParseException; + +/** + * + * @author cthermolia + * + * * TopologyEndpointParser, collects data as described in the json received + * from web api topology endpoint request + */ +public class TopologyEndpointParser implements Serializable{ + + private HashMap> topologyEndPointsPerType=new HashMap<>(); + + private HashMap> topologyEndpoint=new HashMap<>(); + private final String url = "/topology/endpoints/by_report"; +// private final String url = "/topology/endpoints"; + private JSONObject jsonObject; + public TopologyEndpointParser() { + } + + + public TopologyEndpointParser(String apiUri, String key, String proxy, String date, String reportname) throws IOException, ParseException { + // by_report/{report-name}?date=YYYY-MM-DD + String uri = apiUri + url + "/" + reportname; + // String uri = apiUri + url; + + if (date != null) { + uri = uri + "?date=" + date; + } + loadTopologyEndpoints(uri, key, proxy); + } + + public TopologyEndpointParser(JSONObject jsonObject) { + this.jsonObject = jsonObject; + readApiRequestResult(); + } + + + public HashMap getTopology(String type) throws IOException, org.json.simple.parser.ParseException { + + return topologyEndpoint.get(type); + } + + + private void loadTopologyEndpoints(String uri, String key, String proxy) throws IOException, ParseException { + jsonObject = RequestManager.request(uri, key, proxy); + readApiRequestResult(); + } + public void readApiRequestResult(){ + JSONArray data = (JSONArray) jsonObject.get("data"); + + Iterator dataIter = data.iterator(); + while (dataIter.hasNext()) { + Object dataobj = dataIter.next(); + if (dataobj instanceof JSONObject) { + JSONObject jsonDataObj = new JSONObject((Map) dataobj); + String hostname = (String) jsonDataObj.get("hostname"); + String service = (String) jsonDataObj.get("service"); + String group = (String) jsonDataObj.get("group"); + String type = (String) jsonDataObj.get("type"); + JSONObject tagsObj = (JSONObject) jsonDataObj.get("tags"); + + String scope = (String) tagsObj.get("scope"); + String production = (String) tagsObj.get("production"); + String monitored = (String) tagsObj.get("monitored"); + Tags tag = new Tags(scope, production, monitored); + + String topologyEndpointKey = hostname + "-" + service; + + HashMap endpMap = new HashMap(); + if (topologyEndpoint.get(type) != null) { + endpMap = topologyEndpoint.get(type); + } + + endpMap.put(topologyEndpointKey, group); + topologyEndpoint.put(type, endpMap); + + EndpointGroup endpointGroup = new EndpointGroup(group, hostname, service, type, tag); + + ArrayList topologies = new ArrayList<>(); + if (topologyEndPointsPerType.get(type) != null) { + topologies = topologyEndPointsPerType.get(type); + } + topologies.add(endpointGroup); + } + } + + } + + public JSONObject getJsonObject() { + return jsonObject; + } + + public void setJsonObject(JSONObject jsonObject) { + this.jsonObject = jsonObject; + } + + public String retrieveGroup(String type, String serviceEndpoint){ + return topologyEndpoint.get(type).get(serviceEndpoint); + + } + public HashMap> getTopologyEndPointsPerType() { + return topologyEndPointsPerType; + } + + public void setTopologyEndPointsPerType(HashMap> topologyEndPointsPerType) { + this.topologyEndPointsPerType = topologyEndPointsPerType; + } + + public HashMap> getTopologyEndpoint() { + return topologyEndpoint; + } + + public void setTopologyEndpoint(HashMap> topologyEndpoint) { + this.topologyEndpoint = topologyEndpoint; + } + + public class EndpointGroup implements Serializable{ + + private String group; + + private String hostname; + private String service; + private String type; + private Tags tags; + + public EndpointGroup(String group, String hostname, String service, String type, Tags tags) { + this.group = group; + this.hostname = hostname; + this.service = service; + this.type = type; + this.tags = tags; + } + + public String getGroup() { + return group; + } + + public String getHostname() { + return hostname; + } + + public String getService() { + return service; + } + + public String getType() { + return type; + } + + public Tags getTags() { + return tags; + } + + } + + public class Tags { + + private String scope; + private String production; + private String monitored; + + public Tags(String scope, String production, String monitored) { + this.scope = scope; + this.production = production; + this.monitored = monitored; + } + + public String getScope() { + return scope; + } + + public String getProduction() { + return production; + } + + public String getMonitored() { + return monitored; + } + + } + +} diff --git a/flink_jobs/status_trends/src/main/java/argo/profiles/TopologyGroupParser.java b/flink_jobs/status_trends/src/main/java/argo/profiles/TopologyGroupParser.java new file mode 100644 index 00000000..ae4edfed --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/profiles/TopologyGroupParser.java @@ -0,0 +1,207 @@ +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +package argo.profiles; + +import argo.utils.RequestManager; +import java.io.IOException; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import org.json.simple.JSONArray; +import org.json.simple.JSONObject; +import org.json.simple.parser.ParseException; + +/** + * + * @author cthermolia + * + * TopologyGroupParser, collects data as described in the json received from web + * api topology group request + */ +public class TopologyGroupParser implements Serializable{ + + private HashMap> topologyGroupsPerType = new HashMap<>(); + private ArrayList topologyGroups = new ArrayList<>(); + private final String url = "/topology/groups/by_report"; + //private final String url = "/topology/groups"; + private JSONObject jsonObject; + public TopologyGroupParser() { + } + + + public TopologyGroupParser(String apiUri, String key, String proxy, String date, String reportname) throws IOException, ParseException { + String uri = apiUri + url + "/" + reportname; + // String uri = apiUri + url; + if (date != null) { + uri = uri + "?date=" + date; + } + loadTopologyGroups(uri, key, proxy); + + } + + public TopologyGroupParser(JSONObject jsonObject) { + this.jsonObject = jsonObject; + readApiRequestResult(); + } + + + public void loadTopologyGroups(String uri, String key, String proxy) throws IOException, ParseException { + + jsonObject = RequestManager.request(uri, key, proxy); + readApiRequestResult(); + } + public void readApiRequestResult(){ + JSONArray data = (JSONArray) jsonObject.get("data"); + + Iterator dataIter = data.iterator(); + while (dataIter.hasNext()) { + Object dataobj = dataIter.next(); + if (dataobj instanceof JSONObject) { + JSONObject jsonDataObj = new JSONObject((Map) dataobj); + String group = (String) jsonDataObj.get("group"); + String type = (String) jsonDataObj.get("type"); + String subgroup = (String) jsonDataObj.get("subgroup"); + topologyGroups.add(subgroup); + JSONObject tagsObj = (JSONObject) jsonDataObj.get("tags"); + Tags tag = null; + if (tagsObj != null) { + String scope = (String) tagsObj.get("scope"); + String production = (String) tagsObj.get("production"); + String monitored = (String) tagsObj.get("monitored"); + + tag = new Tags(scope, production, monitored); + } + Notifications notification = null; + JSONObject notificationsObj = (JSONObject) jsonDataObj.get("notifications"); + if (notificationsObj != null) { + String contacts = (String) notificationsObj.get("contacts"); + String enabled = (String) notificationsObj.get("enabled"); + notification = new Notifications(contacts, enabled); + + } + + TopologyGroup topologyGroup = new TopologyGroup(group, type, subgroup, tag, notification); + ArrayList groupList = new ArrayList<>(); + if (topologyGroupsPerType.get(type) != null) { + groupList = topologyGroupsPerType.get(type); + } + groupList.add(topologyGroup); + topologyGroupsPerType.put(type, groupList); + + } + + } + } + + public boolean containsGroup(String group){ + if(topologyGroups.contains(group)){ + return true; + } + return false; + } + public HashMap> getTopologyGroupsPerType() { + return topologyGroupsPerType; + } + + public void setTopologyGroupsPerType(HashMap> topologyGroupsPerType) { + this.topologyGroupsPerType = topologyGroupsPerType; + } + + public ArrayList getTopologyGroups() { + return topologyGroups; + } + + public void setTopologyGroups(ArrayList topologyGroups) { + this.topologyGroups = topologyGroups; + } + + public JSONObject getJsonObject() { + return jsonObject; + } + + public void setJsonObject(JSONObject jsonObject) { + this.jsonObject = jsonObject; + } + + public class TopologyGroup implements Serializable{ + private String group; + private String type; + private String subgroup; + + private Tags tags; + private Notifications notifications; + + public TopologyGroup(String group, String type, String subgroup, Tags tags, Notifications notifications) { + this.group = group; + this.type = type; + this.subgroup = subgroup; + this.tags = tags; + this.notifications = notifications; + } + + public String getGroup() { + return group; + } + + public String getType() { + return type; + } + + public String getSubgroup() { + return subgroup; + } + + public Tags getTags() { + return tags; + } + + public Notifications getNotifications() { + return notifications; + } + + } + + public class Tags implements Serializable{ + + private String scope; + private String infrastructure; + private String certification; + + public Tags(String scope, String infrastructure, String certification) { + this.scope = scope; + this.infrastructure = infrastructure; + this.certification = certification; + } + + public String getScope() { + return scope; + } + + public String getInfrastructure() { + return infrastructure; + } + + public String getCertification() { + return certification; + } + + } + + public class Notifications implements Serializable{ + + private String contacts; + private String enabled; + + public Notifications(String contacts, String enabled) { + this.contacts = contacts; + this.enabled = enabled; + } + + } + +} diff --git a/flink_jobs/status_trends/src/main/java/argo/utils/EnumStatus.java b/flink_jobs/status_trends/src/main/java/argo/utils/EnumStatus.java new file mode 100644 index 00000000..d240d336 --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/utils/EnumStatus.java @@ -0,0 +1,18 @@ +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +package argo.utils; + +/** + * + * @author cthermolia + */ +public enum EnumStatus { + CRITICAL, + WARNING, + OK, + MISSING, + UNKNOWN +} diff --git a/flink_jobs/status_trends/src/main/java/argo/utils/RequestManager.java b/flink_jobs/status_trends/src/main/java/argo/utils/RequestManager.java new file mode 100644 index 00000000..48f1be4f --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/utils/RequestManager.java @@ -0,0 +1,101 @@ +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +package argo.utils; + +import com.google.gson.JsonArray; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParser; +import java.io.IOException; +import org.apache.http.client.fluent.Executor; +import org.apache.http.client.fluent.Request; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClients; +import org.json.simple.JSONArray; +import org.json.simple.JSONObject; +import org.json.simple.parser.JSONParser; +import org.json.simple.parser.ParseException; + +/** + * + * @author cthermolia + */ +public class RequestManager { + + public static JSONObject request(String uri, String key, String proxy) throws ParseException { + JSONObject jsonresult = null; + + Request request = Request.Get(uri); + // add request headers + request.addHeader("x-api-key", key); + request.addHeader("Accept", "application/json"); + if (proxy != null) { + request = request.viaProxy(proxy); + } + String content = "{}"; + try { + CloseableHttpClient httpClient = HttpClients.custom().build(); + Executor executor = Executor.newInstance(httpClient); + content = executor.execute(request).returnContent().asString(); + + JSONParser parser = new JSONParser(); + jsonresult = (JSONObject) parser.parse(content); + } catch (IOException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + return jsonresult; + } + + public static JsonElement callRequest(String uri, String key, String proxy) throws ParseException { + JsonElement j_element = null; + + Request request = Request.Get(uri); + // add request headers + request.addHeader("x-api-key", key); + request.addHeader("Accept", "application/json"); + if (proxy != null) { + request = request.viaProxy(proxy); + } + String content = "{}"; + try { + CloseableHttpClient httpClient = HttpClients.custom().build(); + Executor executor = Executor.newInstance(httpClient); + content = executor.execute(request).returnContent().asString(); + + JsonParser parser = new JsonParser(); + j_element = (JsonElement) parser.parse(content); + + } catch (IOException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + return j_element; + } + + public static JsonElement operationsProfileRequest(String apiUri, String operationsId, String key, String proxy, String dateStr) throws IOException, ParseException { + + String uri = apiUri + "/operations_profiles"; + if (dateStr == null) { + uri = uri + operationsId; + } else { + uri = uri + "?date=" + dateStr; + } + return loadOperationProfile(uri, key, proxy); + + } + + public static JsonElement loadOperationProfile(String uri, String key, String proxy) throws IOException, org.json.simple.parser.ParseException { + JsonElement jsonElement = RequestManager.callRequest(uri, key, proxy); + JsonObject jsonObj=jsonElement.getAsJsonObject(); + JsonArray dataObj = jsonObj.getAsJsonArray("data"); + JsonElement dataElement=dataObj.get(0); + + + return dataElement; + } + +} diff --git a/flink_jobs/status_trends/src/main/java/argo/utils/Utils.java b/flink_jobs/status_trends/src/main/java/argo/utils/Utils.java new file mode 100644 index 00000000..abf810a8 --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/argo/utils/Utils.java @@ -0,0 +1,151 @@ +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +package argo.utils; + +import java.io.IOException; +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.Date; +import java.util.HashMap; +import java.util.Iterator; +import org.apache.flink.api.java.utils.ParameterTool; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import argo.profiles.TopologyEndpointParser; +import java.util.TimeZone; +import org.joda.time.DateTime; +import org.joda.time.format.DateTimeFormat; +import org.joda.time.format.DateTimeFormatter; + +/** + * + * @author cthermolia + */ +public class Utils { + + static Logger LOG = LoggerFactory.getLogger(Utils.class); + // String format = "yyyy-MM-dd'T'HH:mm:ss'Z'"; + +// public static String getParameterDate(String format, String paramDate) throws ParseException { +// DateTime date = convertStringtoDate(format, paramDate); +// +// DateTime dt = new DateTime(date); +// DateTimeFormatter dtf = DateTimeFormat.forPattern(format); +// String profileDate = dt.toString(dtf); +// +// return profileDate; +// +// } + + public static String convertDateToString(String format, DateTime date) throws ParseException { + + //String format = "yyyy-MM-dd'T'HH:mm:ss'Z'"; + DateTimeFormatter dtf = DateTimeFormat.forPattern(format); + String dateString = date.toString(dtf); + return dateString; + + } + + public static DateTime convertStringtoDate(String format, String dateStr) throws ParseException { + + // String format = "yyyy-MM-dd'T'HH:mm:ss'Z'"; +// SimpleDateFormat sdf = new SimpleDateFormat(format); +// sdf.setTimeZone(TimeZone.getDefault()); +// Calendar cal = Calendar.getInstance(); +// cal.setTime(sdf.parse(dateStr)); +// cal.set(Calendar.MILLISECOND, 0); +// return new DateTime(cal.getTime()); + DateTimeFormatter formatter = DateTimeFormat.forPattern(format); + DateTime dt = formatter.parseDateTime(dateStr); + + + return dt; + } + + public static DateTime createDate(String format, Date dateStr, int hour, int min, int sec) throws ParseException { + + //String format = "yyyy-MM-dd'T'HH:mm:ss'Z'"; + SimpleDateFormat sdf = new SimpleDateFormat(format); + sdf.setTimeZone(TimeZone.getDefault()); + Calendar newCalendar = Calendar.getInstance(); + newCalendar.setTime(dateStr); + + newCalendar.set(Calendar.HOUR_OF_DAY, hour); + newCalendar.set(Calendar.MINUTE, min); + newCalendar.set(Calendar.SECOND, sec); + newCalendar.set(Calendar.MILLISECOND, 0); + return new DateTime( newCalendar.getTime()); + } + + public static boolean isPreviousDate(String format, Date nowDate, Date firstDate) throws ParseException { + // String format = "yyyy-MM-dd'T'HH:mm:ss'Z'"; + + Calendar cal = Calendar.getInstance(); + SimpleDateFormat sdf = new SimpleDateFormat(format); + sdf.setTimeZone(TimeZone.getDefault()); + cal.setTime(nowDate); + + Calendar calFirst = Calendar.getInstance(); + calFirst.setTime(firstDate); + + if (firstDate.before(nowDate)) { + return true; + } else { + return false; + } + } + + public static boolean checkParameters(ParameterTool params, String... vars) { + + for (String var : vars) { + + if (params.get(var) == null) { + LOG.error("Parameter : " + var + " is required but is missing!\n Program exits!"); + return false; + } + } + return true; + + } + + public static DateTime createDate(String format, int year, int month, int day, int hour, int min, int sec) throws ParseException { + + // String format = "yyyy-MM-dd'T'HH:mm:ss'Z'"; + SimpleDateFormat sdf = new SimpleDateFormat(format); + sdf.setTimeZone(TimeZone.getDefault()); + Calendar newCalendar = Calendar.getInstance(); + newCalendar.set(Calendar.YEAR, year); + newCalendar.set(Calendar.MONTH, month); + newCalendar.set(Calendar.DAY_OF_MONTH, day); + + newCalendar.set(Calendar.HOUR_OF_DAY, hour); + newCalendar.set(Calendar.MINUTE, min); + newCalendar.set(Calendar.SECOND, sec); + newCalendar.set(Calendar.MILLISECOND, 0); + + return new DateTime(newCalendar.getTime()); + } + + public static HashMap getEndpoints(ArrayList endpointList) throws IOException, org.json.simple.parser.ParseException { + + HashMap jsonDataMap = new HashMap<>(); + + Iterator dataIter = endpointList.iterator(); + while (dataIter.hasNext()) { + TopologyEndpointParser.EndpointGroup dataobj = dataIter.next(); + + String hostname = dataobj.getHostname(); + String service = dataobj.getService(); + String group = dataobj.getGroup(); + jsonDataMap.put(hostname + "-" + service, group); + } + + return jsonDataMap; + } + +} diff --git a/flink_jobs/status_trends/src/main/java/timelines/Timeline.java b/flink_jobs/status_trends/src/main/java/timelines/Timeline.java new file mode 100644 index 00000000..9616aac8 --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/timelines/Timeline.java @@ -0,0 +1,314 @@ +package timelines; + +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +import java.util.ArrayList; +import java.util.Iterator; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.TreeMap; + +import org.joda.time.DateTime; +import org.joda.time.LocalDate; +import org.joda.time.format.DateTimeFormat; +import org.joda.time.format.DateTimeFormatter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class Timeline { + + private LocalDate date; + + static Logger LOG = LoggerFactory.getLogger(Timeline.class); + + private TreeMap samples; + + public Timeline() { + this.date = null; + this.samples = new TreeMap(); + + } + + public Timeline(String timestamp) { + DateTime tmp_date = new DateTime(); + DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + tmp_date = fmt.parseDateTime(timestamp); + tmp_date.withTime(0, 0, 0, 0); + this.date = tmp_date.toLocalDate(); + this.samples = new TreeMap(); + } + + Timeline(String timestamp, int state) { + DateTime tmp_date = new DateTime(); + DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + tmp_date = fmt.parseDateTime(timestamp); + tmp_date = tmp_date.withTime(0, 0, 0, 0); + this.date = tmp_date.toLocalDate(); + this.samples = new TreeMap(); + this.samples.put(tmp_date, state); + + } + + public int get(String timestamp) { + DateTime tmp_date = new DateTime(); + DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + tmp_date = fmt.parseDateTime(timestamp); + return this.samples.floorEntry(tmp_date).getValue(); + } + + public int get(DateTime point) { + if (this.samples.floorEntry(point) == null) { + + throw new RuntimeException("no item found in timeline, size of timeline:" + this.samples.size() + "," + point.toString()); + } + return this.samples.floorEntry(point).getValue(); + } + + public void insert(String timestamp, int status) { + + DateTime tmp_date = new DateTime(); + DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + tmp_date = fmt.parseDateTime(timestamp); + this.samples.put(tmp_date, status); + } + + public void insert(DateTime date, int status) { + samples.put(date, status); + + } + + public void insertStringTimeStamps(TreeMap timestamps) { + for (String dt : timestamps.keySet()) { + int status = timestamps.get(dt); + this.insert(dt, status); + + } + } + + public void insertDateTimeStamps(TreeMap timestamps) { + for (DateTime dt : timestamps.keySet()) { + int status = timestamps.get(dt); + this.insert(dt, status); + } + this.optimize(); + + } + + public void setFirst(String timestamp, int state) { + DateTime tmp_date = new DateTime(); + DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + tmp_date = fmt.parseDateTime(timestamp); + this.samples = new TreeMap(); + tmp_date = tmp_date.withTime(0, 0, 0, 0); + this.samples.put(tmp_date, state); + this.optimize(); + } + + public void clear() { + this.samples.clear(); + } + + public void bulkInsert(Set> samples) { + this.samples.clear(); + for (Map.Entry entry : samples) { + this.samples.put(entry.getKey(), entry.getValue()); + } + } + + public Set> getSamples() { + return samples.entrySet(); + } + + public LocalDate getDate() { + return this.date; + } + + public int getLength() { + return this.samples.size(); + } + + public boolean isEmpty() { + return this.samples.isEmpty(); + } + + public void optimize() { + TreeMap optimal = new TreeMap(); + int prevstate = -1; + for (DateTime key : this.samples.keySet()) { + int value = this.samples.get(key); + if (prevstate == -1) { + + optimal.put(key, value); + prevstate = value; + + } + if (prevstate != value) { + optimal.put(key, value); + prevstate = value; + } + } + + this.samples = optimal; + } +// + + public Set getPoints() { + return this.samples.keySet(); + } + + public void aggregate(Timeline second, int[][][] truthTable, int op) { + if (this.isEmpty()) { + this.bulkInsert(second.getSamples()); + // Optimize even when we have a single timeline for aggregation + this.optimize(); + return; + } + + Timeline result = new Timeline(); + + // Slice for first + for (DateTime point : this.getPoints()) { + result.insert(point, -1); + } + // Slice for second + for (DateTime point : second.getPoints()) { + result.insert(point, -1); + } + + // Iterate over result and ask + for (DateTime point : result.getPoints()) { + int a = this.get(point); + int b = second.get(point); + if (a != -1 && b != -1) { + int x = -1; + x = truthTable[op][a][b]; + if (x == -1) { + x = truthTable[op][b][a]; + } + + result.insert(point, x); + } + } + + result.optimize(); + + // Engrave the result in this timeline + this.clear(); + this.bulkInsert(result.getSamples()); + } + + public TreeMap buildStringTimeStampMap(ArrayList timestampList, ArrayList states) { + + TreeMap timestampMap = new TreeMap(); + + for (String[] timestamp : timestampList) { + + String time = timestamp[0]; + int status = states.indexOf(timestamp[1]); + timestampMap.put(time, status); + } + return timestampMap; + + } + + public TreeMap buildDateTimeStampMap(ArrayList timestampList, ArrayList states) { + + TreeMap timestampMap = new TreeMap(); + + for (String[] timestamp : timestampList) { + + DateTime tmp_date = new DateTime(); + DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + tmp_date = fmt.parseDateTime(timestamp[0]); + int status = states.indexOf(timestamp[1]); + timestampMap.put(tmp_date, status); + } + return timestampMap; + + } + + public void removeTimeStamp(DateTime timestamp) { + + if (this.samples.containsKey(timestamp)) { + Iterator iter = this.samples.keySet().iterator(); + while (iter.hasNext()) { + DateTime tmpTimestamp = (DateTime) iter.next(); + if (tmpTimestamp.equals(timestamp)) { + iter.remove(); + break; + } + } + } + + } + + public int calcStatusChanges() { + + return this.samples.keySet().size() - 1; + } + + public void replacePreviousDateStatus(DateTime date, ArrayList availStates) { + DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + + DateTime firsTime = date; + firsTime = firsTime.withTime(0, 0, 0, 0); + + DateTime firstEntry = this.samples.floorKey(firsTime); + if (firstEntry != null && !firstEntry.equals(firsTime)) { + int previousStatus = this.samples.get(firstEntry); + this.samples.put(firsTime, previousStatus); + this.samples.remove(firstEntry); + } else if (firstEntry == null) { + this.samples.put(firsTime, availStates.indexOf("MISSING")); + } + + this.optimize(); + + } + + @Override + public int hashCode() { + int hash = 7; + hash = 83 * hash + Objects.hashCode(this.date); + hash = 83 * hash + Objects.hashCode(this.samples); + return hash; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + final Timeline other = (Timeline) obj; + if (!Objects.equals(this.date, other.date)) { + return false; + } + if (!Objects.equals(this.samples, other.samples)) { + return false; + } + return true; + } + + public int opInt(int[][][] truthTable, int op, int a, int b) { + int result = -1; + try { + result = truthTable[op][a][b]; + } catch (IndexOutOfBoundsException ex) { + // LOG.info(ex); + result = -1; + } + + return result; + } + +} diff --git a/flink_jobs/status_trends/src/main/java/timelines/TimelineAggregator.java b/flink_jobs/status_trends/src/main/java/timelines/TimelineAggregator.java new file mode 100644 index 00000000..2795399b --- /dev/null +++ b/flink_jobs/status_trends/src/main/java/timelines/TimelineAggregator.java @@ -0,0 +1,123 @@ +package timelines; + + + +import java.text.ParseException; +import java.util.HashMap; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +import org.joda.time.DateTime; +import org.joda.time.LocalDate; +import org.joda.time.format.DateTimeFormat; +import org.joda.time.format.DateTimeFormatter; +import scala.concurrent.ops; +public class TimelineAggregator { + + private Timeline output; + private Map inputs; + + public TimelineAggregator(String timestamp) throws ParseException + { + this.output = new Timeline(timestamp); + this.inputs = new HashMap(); + } + + public TimelineAggregator(){ + this.output = new Timeline(); + this.inputs = new HashMap(); + + } + + public TimelineAggregator(Map inputs) { + this.inputs = inputs; + this.output = new Timeline(); + } + + + public void clear(){ + this.output.clear(); + this.inputs.clear(); + } + + public String tsFromDate(String date){ + DateTime tmp_date = new DateTime(); + DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd"); + tmp_date = fmt.parseDateTime(date); + tmp_date = tmp_date.withTime(0, 0, 0, 0); + return tmp_date.toString(DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'")); + } + + public void createTimeline(String name, String timestamp, int prevState){ + Timeline temp = new Timeline(timestamp,prevState); + this.inputs.put(name, temp); + } + + public void insert(String name, String timestamp, int status){ + // Check if timeline exists, if not create it + if (this.inputs.containsKey(name) == false) + { + Timeline temp = new Timeline(timestamp,status); + this.inputs.put(name, temp); + return; + } + + this.inputs.get(name).insert(timestamp, status); + } + + public void setFirst(String name, String timestamp, int status){ + // Check if timeline exists, if not create it + if (this.inputs.containsKey(name) == false) + { + Timeline temp = new Timeline(timestamp,status); + this.inputs.put(name, temp); + return; + } + + this.inputs.get(name).setFirst(timestamp, status); + } + + public LocalDate getDate(){ + return output.getDate(); + } + + public Set> getSamples(){ + return this.output.getSamples(); + } + + + public void clearAndSetDate(String timestamp) + { + this.output = new Timeline(timestamp); + this.inputs.clear(); + + } + + public void aggregate(int[][][] truthTable, int op ){ + if(this.output!=null){this.output.clear();} + + //Iterate through all available input timelines and aggregate + for (Timeline item : this.inputs.values()) { + this.output.aggregate(item, truthTable, op ); + } + + } + + public Timeline getOutput() { + return output; + } + + public void setOutput(Timeline output) { + this.output = output; + } + + public Map getInputs() { + return inputs; + } + + public void setInputs(Map inputs) { + this.inputs = inputs; + } + +} diff --git a/flink_jobs/status_trends/src/main/resources/calctimelines/aggregation.json b/flink_jobs/status_trends/src/main/resources/calctimelines/aggregation.json new file mode 100644 index 00000000..3b146665 --- /dev/null +++ b/flink_jobs/status_trends/src/main/resources/calctimelines/aggregation.json @@ -0,0 +1,882 @@ +{ + "status": { + "message": "Success", + "code": "200" + }, + "data": [ + { + "id": "00272336-7199-4ea4-bbe9-043aca02838c", + "date": "2021-01-01", + "name": "sla_test", + "namespace": "", + "endpoint_group": "servicegroups", + "metric_operation": "AND", + "profile_operation": "AND", + "metric_profile": { + "name": "ARGO_MON_CRITICAL", + "id": "5850d7fe-75f5-4aa4-bacc-9a5c10280b59" + }, + "groups": [ + { + "name": "compute", + "operation": "OR", + "services": [ + { + "name": "ARC-CE", + "operation": "OR" + }, + { + "name": "GRAM5", + "operation": "OR" + }, + { + "name": "QCG.Computing", + "operation": "OR" + }, + { + "name": "org.opensciencegrid.htcondorce", + "operation": "OR" + } + ] + }, + { + "name": "storage", + "operation": "OR", + "services": [ + { + "name": "SRM", + "operation": "OR" + } + ] + }, + { + "name": "information", + "operation": "OR", + "services": [ + { + "name": "Site-BDII", + "operation": "OR" + } + ] + }, + { + "name": "cloud", + "operation": "OR", + "services": [ + { + "name": "eu.egi.cloud.vm-management.occi", + "operation": "OR" + }, + { + "name": "org.openstack.nova", + "operation": "OR" + } + ] + } + ] + }, + { + "id": "963aa1ff-8372-4070-81a0-6d271a01b0cf", + "date": "2020-03-13", + "name": "ops-mon-critical", + "namespace": "", + "endpoint_group": "sites", + "metric_operation": "AND", + "profile_operation": "AND", + "metric_profile": { + "name": "OPS_MONITOR_CRITICAL", + "id": "7e7ff9dd-3edf-47d0-9f77-6b2bda8b2a31" + }, + "groups": [ + { + "name": "gstat", + "operation": "OR", + "services": [ + { + "name": "egi.GSTAT", + "operation": "OR" + } + ] + }, + { + "name": "vosam", + "operation": "OR", + "services": [ + { + "name": "vo.SAM", + "operation": "OR" + } + ] + }, + { + "name": "ngisam", + "operation": "OR", + "services": [ + { + "name": "ngi.SAM", + "operation": "OR" + } + ] + }, + { + "name": "egisam", + "operation": "OR", + "services": [ + { + "name": "egi.SAM", + "operation": "OR" + } + ] + }, + { + "name": "brokering", + "operation": "OR", + "services": [ + { + "name": "egi.MSGBroker", + "operation": "OR" + } + ] + }, + { + "name": "egiportal", + "operation": "OR", + "services": [ + { + "name": "egi.Portal", + "operation": "OR" + } + ] + }, + { + "name": "egiopsportal", + "operation": "OR", + "services": [ + { + "name": "egi.OpsPortal", + "operation": "OR" + } + ] + }, + { + "name": "ngiopsportal", + "operation": "OR", + "services": [ + { + "name": "ngi.OpsPortal", + "operation": "OR" + } + ] + }, + { + "name": "egimetricsportal", + "operation": "OR", + "services": [ + { + "name": "egi.MetricsPortal", + "operation": "OR" + } + ] + }, + { + "name": "registry", + "operation": "OR", + "services": [ + { + "name": "egi.GOCDB", + "operation": "OR" + } + ] + }, + { + "name": "helpdesk", + "operation": "OR", + "services": [ + { + "name": "egi.GGUS", + "operation": "OR" + } + ] + }, + { + "name": "applications", + "operation": "OR", + "services": [ + { + "name": "egi.AppDB", + "operation": "OR" + } + ] + }, + { + "name": "authentication", + "operation": "OR", + "services": [ + { + "name": "egi.Perun", + "operation": "OR" + } + ] + }, + { + "name": "tpm", + "operation": "OR", + "services": [ + { + "name": "egi.TMP", + "operation": "OR" + } + ] + }, + { + "name": "apelrepository", + "operation": "OR", + "services": [ + { + "name": "egi.APELRepository", + "operation": "OR" + } + ] + }, + { + "name": "accountingportal", + "operation": "OR", + "services": [ + { + "name": "egi.AccountingPortal", + "operation": "OR" + } + ] + }, + { + "name": "argomon", + "operation": "OR", + "services": [ + { + "name": "argo.mon", + "operation": "OR" + } + ] + }, + { + "name": "argowebui", + "operation": "OR", + "services": [ + { + "name": "argo.webui", + "operation": "OR" + } + ] + }, + { + "name": "vomsservice", + "operation": "OR", + "services": [ + { + "name": "VOMS", + "operation": "OR" + } + ] + }, + { + "name": "cernvmfs", + "operation": "OR", + "services": [ + { + "name": "ch.cern.cvmfs.stratum.0", + "operation": "OR" + } + ] + }, + { + "name": "aiioidc", + "operation": "OR", + "services": [ + { + "name": "egi.aai.oidc", + "operation": "OR" + } + ] + }, + { + "name": "aiisaml", + "operation": "OR", + "services": [ + { + "name": "egi.aai.saml", + "operation": "OR" + } + ] + }, + { + "name": "aiitts", + "operation": "OR", + "services": [ + { + "name": "egi.aai.tts", + "operation": "OR" + } + ] + }, + { + "name": "onedataoneprovider", + "operation": "OR", + "services": [ + { + "name": "org.onedata.oneprovider", + "operation": "OR" + } + ] + }, + { + "name": "onedataonezone", + "operation": "OR", + "services": [ + { + "name": "org.onedata.onezone", + "operation": "OR" + } + ] + }, + { + "name": "jupyterhub", + "operation": "OR", + "services": [ + { + "name": "eu.egi.notebooks.jupyterhub", + "operation": "OR" + } + ] + } + ] + }, + { + "id": "ad5f3de0-0afa-4b32-bfd4-e3805df78147", + "date": "2021-01-01", + "name": "critical", + "namespace": "", + "endpoint_group": "sites", + "metric_operation": "AND", + "profile_operation": "AND", + "metric_profile": { + "name": "ARGO_MON_CRITICAL", + "id": "5850d7fe-75f5-4aa4-bacc-9a5c10280b59" + }, + "groups": [ + { + "name": "compute", + "operation": "OR", + "services": [ + { + "name": "ARC-CE", + "operation": "OR" + }, + { + "name": "GRAM5", + "operation": "OR" + }, + { + "name": "QCG.Computing", + "operation": "OR" + }, + { + "name": "org.opensciencegrid.htcondorce", + "operation": "OR" + } + ] + }, + { + "name": "storage", + "operation": "OR", + "services": [ + { + "name": "SRMv2", + "operation": "OR" + }, + { + "name": "SRM", + "operation": "OR" + } + ] + }, + { + "name": "information", + "operation": "OR", + "services": [ + { + "name": "Site-BDII", + "operation": "OR" + } + ] + }, + { + "name": "cloud", + "operation": "OR", + "services": [ + { + "name": "eu.egi.cloud.vm-management.occi", + "operation": "OR" + }, + { + "name": "org.openstack.nova", + "operation": "OR" + } + ] + }, + { + "name": "storewebdav", + "operation": "OR", + "services": [ + { + "name": "webdav", + "operation": "OR" + } + ] + } + ] + }, + { + "id": "ad698ba7-1a35-4338-8c9a-144b8cd0e40d", + "date": "2020-12-22", + "name": "TEST_PROFILE", + "namespace": "", + "endpoint_group": "servicegroups", + "metric_operation": "OR", + "profile_operation": "AND", + "metric_profile": { + "name": "ARGO_MON", + "id": "bd396d06-e061-4df7-bac2-2e3aad3aa654" + }, + "groups": [ + { + "name": "Group1", + "operation": "AND", + "services": [ + { + "name": "APEL", + "operation": "AND" + }, + { + "name": "ARC-CE", + "operation": "AND" + }, + { + "name": "Central-LFC", + "operation": "AND" + }, + { + "name": "CREAM-CE", + "operation": "AND" + } + ] + }, + { + "name": "Group2", + "operation": "AND", + "services": [ + { + "name": "ARC-CE", + "operation": "OR" + }, + { + "name": "egi.aai.oidc", + "operation": "OR" + }, + { + "name": "eu.egi.notebooks.jupyterhub", + "operation": "OR" + } + ] + }, + { + "name": "Group4", + "operation": "AND", + "services": [ + { + "name": "APEL", + "operation": "AND" + }, + { + "name": "ARC-CE", + "operation": "AND" + }, + { + "name": "Central-LFC", + "operation": "AND" + } + ] + }, + { + "name": "Group5", + "operation": "AND", + "services": [ + { + "name": "APEL", + "operation": "AND" + }, + { + "name": "eu.egi.cloud.broker.compss", + "operation": "AND" + }, + { + "name": "ARC-CE", + "operation": "AND" + } + ] + }, + { + "name": "Group6", + "operation": "AND", + "services": [ + { + "name": "Central-LFC", + "operation": "AND" + }, + { + "name": "MyProxy", + "operation": "AND" + }, + { + "name": "org.onedata.onezone", + "operation": "AND" + }, + { + "name": "eu.egi.cloud.broker.compss", + "operation": "AND" + }, + { + "name": "egi.aai.oidc", + "operation": "AND" + }, + { + "name": "org.opensciencegrid.htcondorce", + "operation": "AND" + } + ] + }, + { + "name": "Group7", + "operation": "AND", + "services": [ + { + "name": "egi.aai.oidc", + "operation": "AND" + }, + { + "name": "egi.aai.saml", + "operation": "AND" + }, + { + "name": "CREAM-CE", + "operation": "AND" + }, + { + "name": "APEL", + "operation": "AND" + } + ] + }, + { + "name": "Group8", + "operation": "AND", + "services": [ + { + "name": "APEL", + "operation": "AND" + }, + { + "name": "ARC-CE", + "operation": "AND" + } + ] + }, + { + "name": "Group9", + "operation": "AND", + "services": [ + { + "name": "ARC-CE", + "operation": "AND" + }, + { + "name": "Central-LFC", + "operation": "AND" + }, + { + "name": "eu.egi.cloud.broker.compss", + "operation": "AND" + } + ] + }, + { + "name": "Group10", + "operation": "AND", + "services": [ + { + "name": "APEL", + "operation": "AND" + }, + { + "name": "Top-BDII", + "operation": "AND" + }, + { + "name": "org.opensciencegrid.htcondorce", + "operation": "AND" + } + ] + }, + { + "name": "Group11", + "operation": "OR", + "services": [ + { + "name": "eu.egi.cloud.accounting", + "operation": "AND" + } + ] + }, + { + "name": "Group12", + "operation": "AND", + "services": [ + { + "name": "APEL", + "operation": "AND" + }, + { + "name": "ARC-CE", + "operation": "AND" + }, + { + "name": "CREAM-CE", + "operation": "AND" + }, + { + "name": "eu.egi.cloud.broker.compss", + "operation": "AND" + }, + { + "name": "egi.aai.tts", + "operation": "AND" + } + ] + }, + { + "name": "Group13", + "operation": "AND", + "services": [ + { + "name": "egi.aai.oidc", + "operation": "AND" + }, + { + "name": "Central-LFC", + "operation": "AND" + }, + { + "name": "egi.aai.saml", + "operation": "AND" + }, + { + "name": "ARC-CE", + "operation": "AND" + }, + { + "name": "eu.egi.cloud.accounting", + "operation": "AND" + }, + { + "name": "APEL", + "operation": "AND" + }, + { + "name": "egi.aai.oidc", + "operation": "AND" + }, + { + "name": "org.onedata.onezone", + "operation": "AND" + }, + { + "name": "ARC-CE", + "operation": "AND" + }, + { + "name": "org.openstack.nova", + "operation": "AND" + } + ] + }, + { + "name": "Group14", + "operation": "AND", + "services": [ + { + "name": "APEL", + "operation": "AND" + }, + { + "name": "ARC-CE", + "operation": "AND" + }, + { + "name": "globus-GSISSHD", + "operation": "AND" + }, + { + "name": "GRAM5", + "operation": "AND" + }, + { + "name": "Central-LFC", + "operation": "AND" + } + ] + }, + { + "name": "Group15", + "operation": "AND", + "services": [ + { + "name": "eu.egi.cloud.accounting", + "operation": "AND" + }, + { + "name": "Central-LFC", + "operation": "AND" + }, + { + "name": "globus-GSISSHD", + "operation": "AND" + }, + { + "name": "Top-BDII", + "operation": "AND" + }, + { + "name": "ARC-CE", + "operation": "AND" + }, + { + "name": "WMS", + "operation": "AND" + }, + { + "name": "egi.aai.oidc", + "operation": "AND" + } + ] + }, + { + "name": "Group16", + "operation": "AND", + "services": [ + { + "name": "Central-LFC", + "operation": "AND" + }, + { + "name": "ARC-CE", + "operation": "AND" + }, + { + "name": "eu.egi.cloud.broker.proprietary.slipstream", + "operation": "AND" + } + ] + }, + { + "name": "Group17", + "operation": "AND", + "services": [ + { + "name": "eu.egi.cloud.broker.vmdirac", + "operation": "AND" + }, + { + "name": "eu.egi.cloud.vm-management.occi", + "operation": "AND" + }, + { + "name": "eu.egi.cloud.broker.proprietary.slipstream", + "operation": "AND" + }, + { + "name": "org.opensciencegrid.htcondorce", + "operation": "AND" + }, + { + "name": "org.openstack.swift", + "operation": "AND" + }, + { + "name": "egi.aai.saml", + "operation": "AND" + }, + { + "name": "eu.egi.cloud.broker.compss", + "operation": "AND" + }, + { + "name": "eu.egi.cloud.broker.proprietary.slipstream", + "operation": "AND" + } + ] + }, + { + "name": "Group18", + "operation": "AND", + "services": [ + { + "name": "ARC-CE", + "operation": "AND" + }, + { + "name": "Central-LFC", + "operation": "AND" + }, + { + "name": "egi.aai.tts", + "operation": "AND" + }, + { + "name": "egi.aai.saml", + "operation": "AND" + }, + { + "name": "eu.egi.cloud.accounting", + "operation": "AND" + }, + { + "name": "APEL", + "operation": "AND" + } + ] + }, + { + "name": "Group19", + "operation": "AND", + "services": [ + { + "name": "eu.egi.cloud.accounting", + "operation": "AND" + }, + { + "name": "egi.aai.tts", + "operation": "AND" + }, + { + "name": "eu.egi.cloud.broker.proprietary.slipstream", + "operation": "AND" + }, + { + "name": "ARC-CE", + "operation": "AND" + } + ] + } + ] + } + ] +} diff --git a/flink_jobs/status_trends/src/main/resources/calctimelines/operations.json b/flink_jobs/status_trends/src/main/resources/calctimelines/operations.json new file mode 100644 index 00000000..2a40a8aa --- /dev/null +++ b/flink_jobs/status_trends/src/main/resources/calctimelines/operations.json @@ -0,0 +1,248 @@ +{ + "status": { + "message": "Success", + "code": "200" + }, + "data": [ + { + "id": "8ce59c4d-3761-4f25-a364-f019e394bf8b", + "date": "2015-01-01", + "name": "egi_ops", + "available_states": [ + "OK", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME" + ], + "defaults": { + "down": "DOWNTIME", + "missing": "MISSING", + "unknown": "UNKNOWN" + }, + "operations": [ + { + "name": "AND", + "truth_table": [ + { + "a": "OK", + "b": "OK", + "x": "OK" + }, + { + "a": "OK", + "b": "WARNING", + "x": "WARNING" + }, + { + "a": "OK", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "OK", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "OK", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "OK", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "WARNING", + "b": "WARNING", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "WARNING", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "WARNING", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "WARNING", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "UNKNOWN", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "UNKNOWN", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "UNKNOWN", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "UNKNOWN", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "MISSING", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "MISSING", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "MISSING", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "CRITICAL", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "CRITICAL", + "b": "DOWNTIME", + "x": "CRITICAL" + }, + { + "a": "DOWNTIME", + "b": "DOWNTIME", + "x": "DOWNTIME" + } + ] + }, + { + "name": "OR", + "truth_table": [ + { + "a": "OK", + "b": "OK", + "x": "OK" + }, + { + "a": "OK", + "b": "WARNING", + "x": "OK" + }, + { + "a": "OK", + "b": "UNKNOWN", + "x": "OK" + }, + { + "a": "OK", + "b": "MISSING", + "x": "OK" + }, + { + "a": "OK", + "b": "CRITICAL", + "x": "OK" + }, + { + "a": "OK", + "b": "DOWNTIME", + "x": "OK" + }, + { + "a": "WARNING", + "b": "WARNING", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "UNKNOWN", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "MISSING", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "CRITICAL", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "DOWNTIME", + "x": "WARNING" + }, + { + "a": "UNKNOWN", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "UNKNOWN", + "b": "MISSING", + "x": "UNKNOWN" + }, + { + "a": "UNKNOWN", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "UNKNOWN", + "b": "DOWNTIME", + "x": "UNKNOWN" + }, + { + "a": "MISSING", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "MISSING", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "MISSING", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "CRITICAL", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "CRITICAL", + "b": "DOWNTIME", + "x": "CRITICAL" + }, + { + "a": "DOWNTIME", + "b": "DOWNTIME", + "x": "DOWNTIME" + } + ] + } + ] + } + ] +} diff --git a/flink_jobs/status_trends/src/main/resources/log4j.properties b/flink_jobs/status_trends/src/main/resources/log4j.properties new file mode 100644 index 00000000..da32ea0f --- /dev/null +++ b/flink_jobs/status_trends/src/main/resources/log4j.properties @@ -0,0 +1,23 @@ +################################################################################ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +################################################################################ + +log4j.rootLogger=INFO, console + +log4j.appender.console=org.apache.log4j.ConsoleAppender +log4j.appender.console.layout=org.apache.log4j.PatternLayout +log4j.appender.console.layout.ConversionPattern=%d{HH:mm:ss,SSS} %-5p %-60c %x - %m%n diff --git a/flink_jobs/status_trends/src/main/resources/timelines/timeline.json b/flink_jobs/status_trends/src/main/resources/timelines/timeline.json new file mode 100644 index 00000000..30b368fd --- /dev/null +++ b/flink_jobs/status_trends/src/main/resources/timelines/timeline.json @@ -0,0 +1,362 @@ +{ "data":{ + "operations":["AND","OR"], + "available_states": [ + "OK", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME" + ], + "operation":"AND", + "inputs": [ + { + "name":"timeline1", + "timestamps":[ + + {"timestamp": "2021-01-15T00:15:50Z", + "status": "WARNING" + }, + {"timestamp": "2021-01-15T01:15:50Z", + "status": "WARNING" + }, + {"timestamp": "2021-01-15T02:15:50Z", + "status": "OK" + }, + {"timestamp": "2021-01-15T03:15:50Z", + "status": "WARNING" + }, + {"timestamp": "2021-01-15T15:15:50Z", + "status": "OK" + }, + {"timestamp": "2021-01-15T20:16:50Z", + "status": "WARNING" + } + + ] + }, { + "name":"timeline2", + "timestamps":[ + {"timestamp": "2021-01-15T00:00:00Z" , "status": "OK" + }, + {"timestamp": "2021-01-15T00:05:00Z", + "status": "OK" + }, + {"timestamp": "2021-01-15T12:00:00Z", + "status": "WARNING" + }, + {"timestamp": "2021-01-15T14:00:00Z", + "status": "OK" + }, + + {"timestamp": "2021-01-15T23:05:00Z", + "status": "WARNING" + } + ] + }, { + "name":"timeline3", + "timestamps":[ + {"timestamp": "2021-01-15T00:00:00Z" , "status": "OK" + }, + {"timestamp": "2021-01-15T00:05:00Z", + "status": "UNKNOWN" + }, + + {"timestamp": "2021-01-15T02:00:00Z", + "status": "WARNING" + }, + {"timestamp": "2021-01-15T03:00:00Z", + "status": "OK" + }, + + {"timestamp": "2021-01-15T06:00:00Z", + "status": "OK" + } + ] + }, { + "name":"timeline4", + "timestamps":[ + {"timestamp": "2021-01-15T00:00:00Z" , "status": "OK" + }, + {"timestamp": "2021-01-15T20:00:00Z", + "status": "CRITICAL" + }, + + {"timestamp": "2021-01-15T21:00:00Z", + "status": "OK" + }, + {"timestamp": "2021-01-15T22:00:00Z", + "status": "CRITICAL" + }, + + {"timestamp": "2021-01-15T23:00:00Z", + "status": "OK" + } + ] + } + +], + "output":{ + "name":"merged", + "timestamps":[ + {"timestamp": "2021-01-15T00:00:00Z" , "status": "MISSING" + }, + {"timestamp": "2021-01-15T00:15:50Z", + "status": "UNKNOWN" + }, + {"timestamp": "2021-01-15T02:00:00Z", + "status": "WARNING" + }, + {"timestamp": "2021-01-15T03:00:00Z", + "status": "OK" + }, + + {"timestamp": "2021-01-15T03:15:50Z", + "status": "WARNING" + }, + + {"timestamp": "2021-01-15T15:15:50Z", + "status": "OK" + }, + {"timestamp": "2021-01-15T20:00:00Z", + "status": "CRITICAL" + } + , + {"timestamp": "2021-01-15T21:00:00Z", + "status": "WARNING" + }, + + {"timestamp": "2021-01-15T22:00:00Z", + "status": "CRITICAL" + }, + + {"timestamp": "2021-01-15T23:00:00Z", + "status": "WARNING" + } + ] + }, "operation_truth_table": [ + { + "name": "AND", + "truth_table": [ + { + "a": "OK", + "b": "OK", + "x": "OK" + }, + { + "a": "OK", + "b": "WARNING", + "x": "WARNING" + }, + { + "a": "OK", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "OK", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "OK", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "OK", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "WARNING", + "b": "WARNING", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "WARNING", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "WARNING", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "WARNING", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "UNKNOWN", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "UNKNOWN", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "UNKNOWN", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "UNKNOWN", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "MISSING", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "MISSING", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "MISSING", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "CRITICAL", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "CRITICAL", + "b": "DOWNTIME", + "x": "CRITICAL" + }, + { + "a": "DOWNTIME", + "b": "DOWNTIME", + "x": "DOWNTIME" + } + ] + }, + { + "name": "OR", + "truth_table": [ + { + "a": "OK", + "b": "OK", + "x": "OK" + }, + { + "a": "OK", + "b": "WARNING", + "x": "OK" + }, + { + "a": "OK", + "b": "UNKNOWN", + "x": "OK" + }, + { + "a": "OK", + "b": "MISSING", + "x": "OK" + }, + { + "a": "OK", + "b": "CRITICAL", + "x": "OK" + }, + { + "a": "OK", + "b": "DOWNTIME", + "x": "OK" + }, + { + "a": "WARNING", + "b": "WARNING", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "UNKNOWN", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "MISSING", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "CRITICAL", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "DOWNTIME", + "x": "WARNING" + }, + { + "a": "UNKNOWN", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "UNKNOWN", + "b": "MISSING", + "x": "UNKNOWN" + }, + { + "a": "UNKNOWN", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "UNKNOWN", + "b": "DOWNTIME", + "x": "UNKNOWN" + }, + { + "a": "MISSING", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "MISSING", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "MISSING", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "CRITICAL", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "CRITICAL", + "b": "DOWNTIME", + "x": "CRITICAL" + }, + { + "a": "DOWNTIME", + "b": "DOWNTIME", + "x": "DOWNTIME" + } + ] + } + ] +} + + +} diff --git a/flink_jobs/status_trends/src/test/java/timelines/TimelineAggregatorTest.java b/flink_jobs/status_trends/src/test/java/timelines/TimelineAggregatorTest.java new file mode 100644 index 00000000..89dd4556 --- /dev/null +++ b/flink_jobs/status_trends/src/test/java/timelines/TimelineAggregatorTest.java @@ -0,0 +1,498 @@ +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +package timelines; + +import argo.utils.Utils; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.text.ParseException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.TreeMap; +import org.joda.time.DateTime; +import org.joda.time.LocalDate; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import static org.junit.Assert.*; + +/** + * + * @author cthermolia + */ +public class TimelineAggregatorTest { + + public TimelineAggregatorTest() { + } + + @BeforeClass + public static void setUpClass() { + } + + @AfterClass + public static void tearDownClass() { + } + + @Before + public void setUp() { + } + + @After + public void tearDown() { + } + + /** + * Test of clear method, of class TimelineAggregator. + */ + @Test + public void testClear() { + System.out.println("clear"); + TimelineAggregator instance = new TimelineAggregator(); + instance.clear(); + // TODO review the generated test code and remove the default call to fail. + + } + + /** + * Test of tsFromDate method, of class TimelineAggregator. + */ +// @Test +// public void testTsFromDate() { +// System.out.println("tsFromDate"); +// String date = ""; +// TimelineAggregator instance = new TimelineAggregator(); +// String expResult = ""; +// String result = instance.tsFromDate(date); +// assertEquals(expResult, result); +// // TODO review the generated test code and remove the default call to fail. +// +// } + /** + * Test of createTimeline method, of class TimelineAggregator. + */ + @Test + public void testCreateTimeline() throws ParseException { + System.out.println("createTimeline"); + String name = "test"; + String timestamp = Utils.convertDateToString("yyyy-MM-dd'T'HH:mm:ss'Z'", Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 12, 50, 0)); + int prevState = 0; + TimelineAggregator instance = new TimelineAggregator(); + instance.createTimeline(name, timestamp, prevState); + HashMap expRes = new HashMap<>(); + Timeline exptimeline = new Timeline(timestamp); + exptimeline.insert(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 0, 0, 0), 0); + expRes.put(name, exptimeline); + + assertEquals(expRes.toString(), instance.getInputs().toString()); + // TODO review the generated test code and remove the default call to fail. + + } + + /** + * Test of insert method, of class TimelineAggregator. + */ + @Test + public void testInsert() throws ParseException { + System.out.println("insert"); + String name = "test"; + String timestamp = Utils.convertDateToString("yyyy-MM-dd'T'HH:mm:ss'Z'", Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 12, 50, 0)); + + int status = 0; + TimelineAggregator instance = new TimelineAggregator(); + instance.insert(name, timestamp, status); + HashMap expRes = new HashMap<>(); + Timeline exptimeline = new Timeline(timestamp); + exptimeline.insert(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 0, 0, 0), 0); + expRes.put(name, exptimeline); + + assertEquals(expRes.toString(), instance.getInputs().toString()); + + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of setFirst method, of class TimelineAggregator. + */ + @Test + public void testSetFirst() throws ParseException { + System.out.println("setFirst"); + String name = "test1"; + String timestamp = Utils.convertDateToString("yyyy-MM-dd'T'HH:mm:ss'Z'", Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 12, 50, 0)); + String name2 = "test2"; + String timestamp2 = Utils.convertDateToString("yyyy-MM-dd'T'HH:mm:ss'Z'", Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 21, 50, 0)); + HashMap map = new HashMap(); + map.put(name, new Timeline(timestamp)); + map.put(name2, new Timeline(timestamp2)); + + int status = 0; + TimelineAggregator instance = new TimelineAggregator(map); + instance.insert(name, timestamp, status); + instance.setFirst(name2, timestamp2, status); + // TODO review the generated test code and remove the default call to fail. + + HashMap expRes = new HashMap<>(); + Timeline exptimeline = new Timeline(timestamp); + exptimeline.insert(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 0, 0, 0), 0); + Timeline exptimeline2 = new Timeline(timestamp); + + exptimeline2.insert(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 12, 50, 0), 0); + expRes.put(name2, exptimeline); + expRes.put(name, exptimeline2); + + assertEquals(expRes, instance.getInputs()); + } + + /** + * Test of getDate method, of class TimelineAggregator. + */ + @Test + public void testGetDate() throws ParseException { + System.out.println("getDate"); + String name = "test1"; + String timestamp = Utils.convertDateToString("yyyy-MM-dd'T'HH:mm:ss'Z'", Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 12, 50, 0)); + int status = 0; + TimelineAggregator instance = new TimelineAggregator(timestamp); + instance.insert(name, timestamp, status); + + LocalDate expResult = Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 12, 50, 0).toLocalDate(); + LocalDate result = instance.getDate(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of getSamples method, of class TimelineAggregator. + */ + @Test + public void testGetSamples() throws ParseException { + System.out.println("getSamples"); + String name = "test1"; + String timestamp = Utils.convertDateToString("yyyy-MM-dd'T'HH:mm:ss'Z'", Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 12, 50, 0)); + String name2 = "test2"; + String timestamp2 = Utils.convertDateToString("yyyy-MM-dd'T'HH:mm:ss'Z'", Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 21, 50, 0)); + HashMap map = new HashMap(); + map.put(name, new Timeline(timestamp)); + map.put(name2, new Timeline(timestamp2)); + + TimelineAggregator instance = new TimelineAggregator(map); + instance.aggregate(createTruthTable(), 0); + TreeMap expRes = new TreeMap<>(); + Timeline exptimeline = new Timeline(); + exptimeline.insert(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 31, 12, 50, 0), 0); + Set> expResult = expRes.entrySet(); + Set> result = instance.getSamples(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of clearAndSetDate method, of class TimelineAggregator. + */ +// @Test +// public void testClearAndSetDate() { +// System.out.println("clearAndSetDate"); +// String timestamp = ""; +// TimelineAggregator instance = new TimelineAggregator(); +// instance.clearAndSetDate(timestamp); +// // TODO review the generated test code and remove the default call to fail. +// +// } + /** + * Test of aggregate method, of class TimelineAggregator. + */ + @Test + public void testAggregate() throws IOException, FileNotFoundException, org.json.simple.parser.ParseException, ParseException { + System.out.println("aggregate"); + TimelineUtils timelineUtils = new TimelineUtils(); + TimelineUtils.TimelineJson timelinejson = timelineUtils.readTimelines(); + + ArrayList inputTimelines = timelinejson.getInputTimelines(); + int op = timelinejson.getOperation(); + int[][][] truthTable = timelinejson.getTruthTable(); + ArrayList states = timelinejson.getStates(); + + TimelineAggregator instance = new TimelineAggregator(); + + HashMap inputs = new HashMap(); + int counter = 1; + for (TreeMap map : inputTimelines) { + Timeline timeline = new Timeline(); + checkForMissingMidnightStatus(map, states.indexOf("MISSING")); + + timeline.insertDateTimeStamps(map); + inputs.put(timeline + "_" + counter, timeline); + counter++; + } + instance.setInputs(inputs); + + instance.aggregate(truthTable, op); + + Set> expRes = timelinejson.getOutputTimeline().entrySet(); + Set> res = instance.getOutput().getSamples(); + assertEquals(expRes, res); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of getOutput method, of class TimelineAggregator. + */ + @Test + public void testGetOutput() { + System.out.println("getOutput"); + TimelineAggregator instance = new TimelineAggregator(); + Timeline expResult = null; + Timeline result = instance.getOutput(); + //assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of setOutput method, of class TimelineAggregator. + */ + @Test + public void testSetOutput() { + System.out.println("setOutput"); + Timeline output = null; + TimelineAggregator instance = new TimelineAggregator(); + instance.setOutput(output); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of getInputs method, of class TimelineAggregator. + */ + @Test + public void testGetInputs() { + System.out.println("getInputs"); + TimelineAggregator instance = new TimelineAggregator(); + Map expResult = null; + Map result = instance.getInputs(); +// assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of setInputs method, of class TimelineAggregator. + */ + @Test + public void testSetInputs() { + System.out.println("setInputs"); + Map inputs = null; + TimelineAggregator instance = new TimelineAggregator(); + instance.setInputs(inputs); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + private int[][][] createTruthTable() { + + int[][][] truthtable = new int[2][6][6]; + + truthtable[0][0][0] = 0; + truthtable[0][0][1] = 0; + truthtable[0][0][2] = 0; + truthtable[0][0][3] = 0; + truthtable[0][0][4] = 0; + truthtable[0][0][5] = 0; + + truthtable[0][1][0] = -1; + truthtable[0][1][1] = 1; + truthtable[0][1][2] = 1; + truthtable[0][1][3] = 1; + truthtable[0][1][4] = 1; + truthtable[0][1][5] = 1; + + truthtable[0][2][0] = -1; + truthtable[0][2][1] = -1; + truthtable[0][2][2] = 2; + truthtable[0][2][3] = 2; + truthtable[0][2][4] = 4; + truthtable[0][2][5] = 2; + + truthtable[0][3][0] = -1; + truthtable[0][3][1] = -1; + truthtable[0][3][2] = -1; + truthtable[0][3][3] = 3; + truthtable[0][3][4] = 4; + truthtable[0][3][5] = 5; + + truthtable[0][4][0] = -1; + truthtable[0][4][1] = -1; + truthtable[0][4][2] = -1; + truthtable[0][4][3] = -1; + truthtable[0][4][4] = 4; + truthtable[0][4][5] = 5; + + truthtable[0][5][0] = -1; + truthtable[0][5][1] = -1; + truthtable[0][5][2] = -1; + truthtable[0][5][3] = -1; + truthtable[0][5][4] = -1; + truthtable[0][5][5] = 5; + + truthtable[1][0][0] = 0; + truthtable[1][0][1] = 1; + truthtable[1][0][2] = 2; + truthtable[1][0][3] = 3; + truthtable[1][0][4] = 4; + truthtable[1][0][5] = 5; + + truthtable[1][1][0] = -1; + truthtable[1][1][1] = 1; + truthtable[1][1][2] = 2; + truthtable[1][1][3] = 3; + truthtable[1][1][4] = 4; + truthtable[1][1][5] = 5; + + truthtable[1][2][0] = -1; + truthtable[1][2][1] = -1; + truthtable[1][2][2] = 2; + truthtable[1][2][3] = 3; + truthtable[1][2][4] = 4; + truthtable[1][2][5] = 5; + + truthtable[1][3][0] = -1; + truthtable[1][3][1] = -1; + truthtable[1][3][2] = -1; + truthtable[1][3][3] = 3; + truthtable[1][3][4] = 4; + truthtable[1][3][5] = 5; + + truthtable[1][4][0] = -1; + truthtable[1][4][1] = -1; + truthtable[1][4][2] = -1; + truthtable[1][4][3] = -1; + truthtable[1][4][4] = 4; + truthtable[1][4][5] = 4; + + truthtable[1][5][0] = -1; + truthtable[1][5][1] = -1; + truthtable[1][5][2] = -1; + truthtable[1][5][3] = -1; + truthtable[1][5][4] = -1; + truthtable[1][5][5] = 5; + + return truthtable; + + } + + private void checkForMissingMidnightStatus(TreeMap map, int missingStatus) throws ParseException { + DateTime midnight = Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 0, 0, 00); + if (!map.containsKey(midnight)) { + map.put(midnight, missingStatus); + } + } + + private TreeMap buildTimeline1() throws ParseException { + System.out.println("timeline 1 :"); + TreeMap map = new TreeMap(); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 0, 0, 00), 0); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 0, 15, 50), 1); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 1, 15, 50), 1); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 2, 15, 50), 0); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 3, 15, 50), 1); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 15, 15, 50), 0); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 20, 16, 50), 1); + for (Entry entry : map.entrySet()) { + System.out.println(entry.getKey() + " --- " + entry.getValue()); + } + return map; + } + + private TreeMap buildTimeline2() throws ParseException { + System.out.println("timeline 2 :"); + TreeMap map = new TreeMap(); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 0, 0, 00), 0); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 0, 5, 00), 0); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 12, 0, 00), 1); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 14, 0, 00), 0); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 23, 05, 00), 1); + for (Entry entry : map.entrySet()) { + System.out.println(entry.getKey() + " --- " + entry.getValue()); + } + return map; + } + + private TreeMap buildTimeline3() throws ParseException { + System.out.println("timeline 3 :"); + TreeMap map = new TreeMap(); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 0, 0, 00), 0); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 0, 5, 00), 2); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 2, 0, 00), 1); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 3, 0, 00), 0); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 6, 00, 00), 0); + for (Entry entry : map.entrySet()) { + System.out.println(entry.getKey() + " --- " + entry.getValue()); + } + return map; + } + + private TreeMap buildTimeline4() throws ParseException { + System.out.println("timeline 4 :"); + TreeMap map = new TreeMap(); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 0, 0, 00), 0); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 20, 0, 00), 4); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 21, 0, 00), 0); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 22, 0, 00), 4); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 23, 00, 00), 0); + for (Entry entry : map.entrySet()) { + System.out.println(entry.getKey() + " --- " + entry.getValue()); + } + return map; + } + + private TreeMap buildAggregated() throws ParseException { + System.out.println("timeline all :"); + TreeMap map = new TreeMap(); + + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 0, 0, 00), 0); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 0, 5, 00), 2); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 2, 0, 00), 1); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 3, 0, 00), 0); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 3, 15, 50), 1); + + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 15, 15, 50), 0); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 20, 0, 00), 4); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 21, 0, 00), 1); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 22, 0, 00), 4); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 23, 00, 00), 1); + + for (Entry entry : map.entrySet()) { + System.out.println(entry.getKey() + " --- " + entry.getValue()); + } + return map; + } + + private TreeMap buildAggregated2() throws ParseException { + + TreeMap map = new TreeMap(); + + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 0, 0, 00), 0); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 0, 5, 00), 2); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 2, 0, 00), 1); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 3, 0, 00), 0); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 3, 15, 50), 1); + + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 15, 15, 50), 0); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 20, 0, 00), 4); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 21, 0, 00), 1); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 22, 0, 00), 4); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 23, 00, 00), 1); + return map; + } +} diff --git a/flink_jobs/status_trends/src/test/java/timelines/TimelineTest.java b/flink_jobs/status_trends/src/test/java/timelines/TimelineTest.java new file mode 100644 index 00000000..54265731 --- /dev/null +++ b/flink_jobs/status_trends/src/test/java/timelines/TimelineTest.java @@ -0,0 +1,577 @@ +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +package timelines; + +import argo.profiles.OperationsParser; +import argo.utils.Utils; +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.joda.time.LocalDate; +import org.joda.time.format.DateTimeFormat; +import org.joda.time.format.DateTimeFormatter; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import static org.junit.Assert.*; + +/** + * + * @author cthermolia + */ +public class TimelineTest { + + public TimelineTest() { + } + + @BeforeClass + public static void setUpClass() { + } + + @AfterClass + public static void tearDownClass() { + } + + @Before + public void setUp() { + } + + @After + public void tearDown() { + } + + /** + * Test of get method, of class Timeline. + */ + @Test + public void testGet_String() throws ParseException { + System.out.println("get"); + + DateTimeFormatter dtf = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + DateTime timestamp = Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 4, 31, 1); + String timestampStr = timestamp.toString(dtf); + Timeline instance = new Timeline(); + instance.insertDateTimeStamps(createTimestampList()); + int expResult = 1; + int result = instance.get(timestampStr); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of get method, of class Timeline. + */ + @Test + public void testGet_DateTime() throws ParseException { + System.out.println("get"); + DateTime point = Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 4, 31, 1); + Timeline instance = new Timeline(); + instance.insertDateTimeStamps(createTimestampList()); + int expResult = 1; + int result = instance.get(point); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of insert method, of class Timeline. + */ + @Test + public void testInsert_String_int() throws ParseException { + System.out.println("insert"); + DateTimeFormatter dtf = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + + DateTime timestamp = Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 4, 31, 1); + String timestampStr = timestamp.toString(dtf); + + int status = 1; + Timeline instance = new Timeline(); + instance.insert(timestampStr, status); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of insert method, of class Timeline. + */ + @Test + public void testInsert_DateTime_int() throws ParseException { + System.out.println("insert"); + DateTime timestamp = Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 4, 31, 1); + + int status = 0; + Timeline instance = new Timeline(); + instance.insert(timestamp, status); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of insertStringTimeStamps method, of class Timeline. + */ + @Test + public void testInsertStringTimeStamps() throws ParseException { + System.out.println("insertStringTimeStamps"); + TreeMap timestamps = createStringTimestampList(); + Timeline instance = new Timeline(); + instance.insertStringTimeStamps(timestamps); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of insertDateTimeStamps method, of class Timeline. + */ + @Test + public void testInsertDateTimeStamps() throws ParseException { + System.out.println("insertDateTimeStamps"); + TreeMap timestamps = createTimestampList(); + Timeline instance = new Timeline(); + instance.insertDateTimeStamps(timestamps); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of setFirst method, of class Timeline. + */ + @Test + public void testSetFirst() throws ParseException { + System.out.println("setFirst"); + DateTimeFormatter dtf = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + + DateTime timestamp = Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 0, 0, 0, 1); + String timestampStr = timestamp.toString(dtf); + + int state = 0; + Timeline instance = new Timeline(); + instance.setFirst(timestampStr, state); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of clear method, of class Timeline. + */ + @Test + public void testClear() throws ParseException { + System.out.println("clear"); + Timeline instance = new Timeline(); + instance.insertDateTimeStamps(createTimestampList()); + instance.clear(); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of bulkInsert method, of class Timeline. + */ + @Test + public void testBulkInsert() throws ParseException { + System.out.println("bulkInsert"); + Set> samples = createTimestampList().entrySet(); + Timeline instance = new Timeline(); + instance.bulkInsert(samples); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of getSamples method, of class Timeline. + */ + @Test + public void testGetSamples() throws ParseException { + System.out.println("getSamples"); + Timeline instance = new Timeline(); + instance.insertDateTimeStamps(createTimestampList()); + Set> expResult = instance.getSamples(); + Set> result = instance.getSamples(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of getDate method, of class Timeline. + */ + @Test + public void testGetDate() throws ParseException { + System.out.println("getDate"); + DateTimeFormatter dtf = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + + Timeline instance = new Timeline(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 1, 0, 0, 0, 0).toString(dtf)); + + LocalDate expResult = new LocalDate(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 1, 0, 0, 0, 0)); + LocalDate result = instance.getDate(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + //fail("The test case is a prototype."); + } + + /** + * Test of getLength method, of class Timeline. + */ + @Test + public void testGetLength() throws ParseException { + System.out.println("getLength"); + Timeline instance = new Timeline(); + instance.insertDateTimeStamps(createTimestampList()); + int expResult = 2; + int result = instance.getLength(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of isEmpty method, of class Timeline. + */ + @Test + public void testIsEmpty() throws ParseException { + System.out.println("isEmpty"); + Timeline instance = new Timeline(); + instance.insertDateTimeStamps(createTimestampList()); + + boolean expResult = false; + boolean result = instance.isEmpty(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of optimize method, of class Timeline. + */ + @Test + public void testOptimize() throws ParseException { + System.out.println("optimize"); + Timeline instance = new Timeline(); + instance.insertDateTimeStamps(createTimestampList()); + instance.optimize(); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of getPoints method, of class Timeline. + */ + @Test + public void testGetPoints() throws ParseException { + System.out.println("getPoints"); + Timeline instance = new Timeline(); + TreeMap map = createTimestampList(); + instance.insertDateTimeStamps(map); + Set expResult = new TreeSet<>(); + expResult.add(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 5, 20, 15)); + expResult.add(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 0, 12, 23)); + + Set result = instance.getPoints(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of aggregate method, of class Timeline. + */ + @Test + public void testAggregate() throws ParseException { + System.out.println("aggregate"); + Timeline second = new Timeline(); + second.insertDateTimeStamps(createSecondTimeline()); + int[][][] truthTable = createTruthTable(); + int op = 0; + Timeline instance = new Timeline(); + instance.aggregate(second, truthTable, op); + Set> expResult = createMerged().entrySet(); + Set> result = instance.getSamples(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of buildStringTimeStampMap method, of class Timeline. + */ +// @Test +// public void testBuildStringTimeStampMap() { +// System.out.println("buildStringTimeStampMap"); +// ArrayList timestampList = null; +// OperationsParser op = null; +// Timeline instance = new Timeline(); +// TreeMap expResult = null; +// TreeMap result = instance.buildStringTimeStampMap(timestampList, op); +// assertEquals(expResult, result); +// // TODO review the generated test code and remove the default call to fail. +// //fail("The test case is a prototype."); +// } +// +// /** +// * Test of buildDateTimeStampMap method, of class Timeline. +// */ +// @Test +// public void testBuildDateTimeStampMap() { +// System.out.println("buildDateTimeStampMap"); +// ArrayList timestampList = null; +// OperationsParser op = null; +// Timeline instance = new Timeline(); +// TreeMap expResult = null; +// TreeMap result = instance.buildDateTimeStampMap(timestampList, op); +// assertEquals(expResult, result); +// // TODO review the generated test code and remove the default call to fail. +// fail("The test case is a prototype."); +// } +// +// /** +// * Test of removeTimeStamp method, of class Timeline. +// */ +// @Test +// public void testRemoveTimeStamp() { +// System.out.println("removeTimeStamp"); +// DateTime timestamp = null; +// Timeline instance = new Timeline(); +// instance.removeTimeStamp(timestamp); +// // TODO review the generated test code and remove the default call to fail. +// fail("The test case is a prototype."); +// } + /** + * Test of calcStatusChanges method, of class Timeline. + */ + @Test + public void testCalcStatusChanges() throws ParseException { + System.out.println("calcStatusChanges"); + Timeline instance = new Timeline(); + instance.insertDateTimeStamps(createTimestampList()); + int expResult = 1; + int result = instance.calcStatusChanges(); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of replacePreviousDateStatus method, of class Timeline. + */ + @Test + public void testReplacePreviousDateStatus() throws ParseException { + System.out.println("replacePreviousDateStatus"); + DateTime date = Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 0, 0, 0); + OperationsParser opsMgr = null; + Timeline instance = new Timeline(); + instance.insertDateTimeStamps(createTimestampList()); + ArrayList availStates = new ArrayList<>(); + availStates.add("OK"); + availStates.add("WARNING"); + availStates.add("UKNOWN"); + availStates.add("MISSING"); + availStates.add("CRITICAL"); + availStates.add("DOWNTIME"); + + instance.replacePreviousDateStatus(date, availStates); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + /** + * Test of hashCode method, of class Timeline. + */ +// @Test +// public void testHashCode() { +// System.out.println("hashCode"); +// Timeline instance = new Timeline(); +// int expResult = 0; +// int result = instance.hashCode(); +// assertEquals(expResult, result); +// // TODO review the generated test code and remove the default call to fail. +// fail("The test case is a prototype."); +// } + /** + * Test of equals method, of class Timeline. + */ +// @Test +// public void testEquals() { +// System.out.println("equals"); +// Object obj = null; +// Timeline instance = new Timeline(); +// boolean expResult = false; +// boolean result = instance.equals(obj); +// assertEquals(expResult, result); +// // TODO review the generated test code and remove the default call to fail. +// fail("The test case is a prototype."); +// } + /** + * Test of opInt method, of class Timeline. + */ + @Test + public void testOpInt() throws ParseException { + System.out.println("opInt"); + int[][][] truthTable = createTruthTable(); + int op = 0; + int a = 0; + int b = 0; + Timeline instance = new Timeline(); + instance.insertDateTimeStamps(createTimestampList()); + int expResult = 0; + int result = instance.opInt(truthTable, op, a, b); + assertEquals(expResult, result); + // TODO review the generated test code and remove the default call to fail. + // fail("The test case is a prototype."); + } + + private TreeMap createTimestampList() throws ParseException { + TreeMap map = new TreeMap<>(); + + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 0, 12, 23), 1); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 1, 5, 10), 1); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 5, 20, 15), 0); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 4, 31, 1), 1); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 3, 50, 4), 1); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 21, 3, 5), 0); + return map; +// + } + + private TreeMap createStringTimestampList() throws ParseException { + TreeMap map = new TreeMap<>(); + + DateTimeFormatter dtf = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"); + + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 0, 12, 23).toString(dtf), 1); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 1, 5, 10).toString(dtf), 1); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 5, 20, 15).toString(dtf), 0); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 4, 31, 1).toString(dtf), 1); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 3, 50, 4).toString(dtf), 1); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 21, 3, 5).toString(dtf), 0); + return map; +// + } + + private TreeMap createSecondTimeline() throws ParseException { + TreeMap map = new TreeMap<>(); + + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 0, 15, 50), 1); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 2, 5, 10), 0); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 5, 20, 15), 0); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 4, 31, 1), 1); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 3, 50, 4), 1); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 21, 3, 5), 0); + return map; +// + } + + private int[][][] createTruthTable() { + + int[][][] truthtable = new int[2][6][6]; + + truthtable[0][0][0] = 0; + truthtable[0][0][1] = 0; + truthtable[0][0][2] = 0; + truthtable[0][0][3] = 0; + truthtable[0][0][4] = 0; + truthtable[0][0][5] = 0; + + truthtable[0][1][0] = -1; + truthtable[0][1][1] = 1; + truthtable[0][1][2] = 1; + truthtable[0][1][3] = 1; + truthtable[0][1][4] = 1; + truthtable[0][1][5] = 1; + + truthtable[0][2][0] = -1; + truthtable[0][2][1] = -1; + truthtable[0][2][2] = 2; + truthtable[0][2][3] = 2; + truthtable[0][2][4] = 4; + truthtable[0][2][5] = 2; + + truthtable[0][3][0] = -1; + truthtable[0][3][1] = -1; + truthtable[0][3][2] = -1; + truthtable[0][3][3] = 3; + truthtable[0][3][4] = 4; + truthtable[0][3][5] = 5; + + truthtable[0][4][0] = -1; + truthtable[0][4][1] = -1; + truthtable[0][4][2] = -1; + truthtable[0][4][3] = -1; + truthtable[0][4][4] = 4; + truthtable[0][4][5] = 5; + + truthtable[0][5][0] = -1; + truthtable[0][5][1] = -1; + truthtable[0][5][2] = -1; + truthtable[0][5][3] = -1; + truthtable[0][5][4] = -1; + truthtable[0][5][5] = 5; + + truthtable[1][0][0] = 0; + truthtable[1][0][1] = 1; + truthtable[1][0][2] = 2; + truthtable[1][0][3] = 3; + truthtable[1][0][4] = 4; + truthtable[1][0][5] = 5; + + truthtable[1][1][0] = -1; + truthtable[1][1][1] = 1; + truthtable[1][1][2] = 2; + truthtable[1][1][3] = 3; + truthtable[1][1][4] = 4; + truthtable[1][1][5] = 5; + + truthtable[1][2][0] = -1; + truthtable[1][2][1] = -1; + truthtable[1][2][2] = 2; + truthtable[1][2][3] = 3; + truthtable[1][2][4] = 4; + truthtable[1][2][5] = 5; + + truthtable[1][3][0] = -1; + truthtable[1][3][1] = -1; + truthtable[1][3][2] = -1; + truthtable[1][3][3] = 3; + truthtable[1][3][4] = 4; + truthtable[1][3][5] = 5; + + truthtable[1][4][0] = -1; + truthtable[1][4][1] = -1; + truthtable[1][4][2] = -1; + truthtable[1][4][3] = -1; + truthtable[1][4][4] = 4; + truthtable[1][4][5] = 4; + + truthtable[1][5][0] = -1; + truthtable[1][5][1] = -1; + truthtable[1][5][2] = -1; + truthtable[1][5][3] = -1; + truthtable[1][5][4] = -1; + truthtable[1][5][5] = 5; + + return truthtable; + + } + + private TreeMap createMerged() throws ParseException { + TreeMap map = new TreeMap(); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 0, 15, 50), 1); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 2, 5, 10), 0); + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 3, 50, 4), 1); + + map.put(Utils.createDate("yyyy-MM-dd'T'HH:mm:ss'Z'", 2021, 0, 15, 5, 20, 15), 0); + return map; + } + +} diff --git a/flink_jobs/status_trends/src/test/java/timelines/TimelineUtils.java b/flink_jobs/status_trends/src/test/java/timelines/TimelineUtils.java new file mode 100644 index 00000000..a281c684 --- /dev/null +++ b/flink_jobs/status_trends/src/test/java/timelines/TimelineUtils.java @@ -0,0 +1,189 @@ +/* + * To change this license header, choose License Headers in Project Properties. + * To change this template file, choose Tools | Templates + * and open the template in the editor. + */ +package timelines; + +import argo.utils.Utils; +import java.io.FileNotFoundException; +import java.io.FileReader; +import java.io.IOException; +import java.net.URL; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Iterator; +import java.util.TreeMap; +import org.joda.time.DateTime; +import org.json.simple.JSONArray; +import org.json.simple.JSONObject; +import org.json.simple.parser.JSONParser; +import org.json.simple.parser.ParseException; + +/** + * + * @author cthermolia + */ +public class TimelineUtils { + + public TimelineJson readTimelines() throws IOException, FileNotFoundException, ParseException, java.text.ParseException { + + JSONObject timelineJSONObj = readJsonFromFile(TimelineUtils.class.getResource("/timelines/timeline.json").getFile()); + TimelineJson timelinejson = buildTimelines(timelineJSONObj); + return timelinejson; + } + + private JSONObject readJsonFromFile(String path) throws FileNotFoundException, IOException, org.json.simple.parser.ParseException { + JSONParser parser = new JSONParser(); + URL url = TimelineUtils.class.getResource(path); + Object obj = parser.parse(new FileReader(path)); + + JSONObject jsonObject = (JSONObject) obj; + + return jsonObject; + } + + public TimelineJson buildTimelines(JSONObject jsonObject) throws java.text.ParseException { + + ArrayList states = new ArrayList<>(); + ArrayList operations = new ArrayList<>(); + ArrayList inputTimelines = new ArrayList<>(); + TreeMap outputTimeline = new TreeMap(); + JSONObject dataObject = (JSONObject) jsonObject.get("data"); + + JSONArray stateList = (JSONArray) dataObject.get("available_states"); + JSONArray operationList = (JSONArray) dataObject.get("operations"); + String operation = (String) dataObject.get("operation"); + Iterator operationsIter = operationList.iterator(); + + while (operationsIter.hasNext()) { + String op = operationsIter.next(); + operations.add(op); + } + JSONArray inputs = (JSONArray) dataObject.get("inputs"); + JSONObject output = (JSONObject) dataObject.get("output"); + Iterator stateIter = stateList.iterator(); + while (stateIter.hasNext()) { + String state = stateIter.next(); + states.add(state); + } + + Iterator inputIter = inputs.iterator(); + while (inputIter.hasNext()) { + JSONObject timelineJSONObj = inputIter.next(); + JSONArray timestampList = (JSONArray) timelineJSONObj.get("timestamps"); + Iterator timeIter = timestampList.iterator(); + TreeMap map = new TreeMap(); + while (timeIter.hasNext()) { + JSONObject timestatus = (JSONObject) timeIter.next(); + String time = (String) timestatus.get("timestamp"); + String status = (String) timestatus.get("status"); + map.put(Utils.convertStringtoDate("yyyy-MM-dd'T'HH:mm:ss'Z'", time), states.indexOf(status)); + inputTimelines.add(map); + } + + } + + JSONArray timestampList = (JSONArray) output.get("timestamps"); + Iterator timeIter = timestampList.iterator(); + + while (timeIter.hasNext()) { + JSONObject timestatus = (JSONObject) timeIter.next(); + String time = (String) timestatus.get("timestamp"); + String status = (String) timestatus.get("status"); + outputTimeline.put(Utils.convertStringtoDate("yyyy-MM-dd'T'HH:mm:ss'Z'", time), states.indexOf(status)); + + } + + JSONArray opTruthTable = (JSONArray) dataObject.get("operation_truth_table"); + + Iterator opTruthTableIter = opTruthTable.iterator(); + int[][][] table = new int[operations.size()][states.size()][states.size()]; + for (int[][] surface : table) { + for (int[] line : surface) { + Arrays.fill(line, -1); + } + } + while (opTruthTableIter.hasNext()) { + JSONObject truthOperationObj = (JSONObject) opTruthTableIter.next(); + String truthOp = (String) truthOperationObj.get("name"); + int truthOpInt = operations.indexOf(truthOp); + JSONArray truthTable = (JSONArray) truthOperationObj.get("truth_table"); + Iterator truthTableIter = truthTable.iterator(); + while (truthTableIter.hasNext()) { + + JSONObject truthTableObj = (JSONObject) truthTableIter.next(); + String a = (String) truthTableObj.get("a"); + int aInt = states.indexOf(a); + String b = (String) truthTableObj.get("b"); + int bInt = states.indexOf(b); + String x = (String) truthTableObj.get("b"); + int xInt = states.indexOf(x); + table[truthOpInt][aInt][bInt] = xInt; + + } + } + TimelineJson timelineJsonObject = new TimelineJson(inputTimelines, outputTimeline, operations.indexOf(operation),table,states); + return timelineJsonObject; + } + + public class TimelineJson { + + private ArrayList inputTimelines; + private TreeMap outputTimeline; + private Integer operation; + private int[][][] truthTable; + private ArrayList states; + + public TimelineJson(ArrayList inputTimelines, TreeMap outputTimeline, Integer operation, int[][][] truthTable, ArrayList states) { + this.inputTimelines = inputTimelines; + this.outputTimeline = outputTimeline; + this.operation = operation; + this.truthTable = truthTable; + this.states = states; + } + + public ArrayList getInputTimelines() { + return inputTimelines; + } + + public void setInputTimelines(ArrayList inputTimelines) { + this.inputTimelines = inputTimelines; + } + + public TreeMap getOutputTimeline() { + return outputTimeline; + } + + public void setOutputTimeline(TreeMap outputTimeline) { + this.outputTimeline = outputTimeline; + } + + public Integer getOperation() { + return operation; + } + + public void setOperation(Integer operation) { + this.operation = operation; + } + + public int[][][] getTruthTable() { + return truthTable; + } + + public void setTruthTable(int[][][] truthTable) { + this.truthTable = truthTable; + } + + public ArrayList getStates() { + return states; + } + + public void setStates(ArrayList states) { + this.states = states; + } + } + + +} diff --git a/flink_jobs/stream_status/.gitignore b/flink_jobs/stream_status/.gitignore index 108010ca..78fae2e1 100644 --- a/flink_jobs/stream_status/.gitignore +++ b/flink_jobs/stream_status/.gitignore @@ -3,3 +3,5 @@ .settings/ .classpath/ .classpath +/nbproject +/nbactions.xml diff --git a/flink_jobs/stream_status/pom.xml b/flink_jobs/stream_status/pom.xml index 41873d89..a7095a81 100644 --- a/flink_jobs/stream_status/pom.xml +++ b/flink_jobs/stream_status/pom.xml @@ -109,17 +109,17 @@ org.apache.httpcomponents httpclient - 4.5.2 + 4.5.13 org.apache.httpcomponents fluent-hc - 4.5.2 + 4.5.13 junit junit - 4.11 + 4.13.1 test @@ -132,7 +132,7 @@ xerces xercesImpl - 2.7.1 + 2.12.0 org.mongodb @@ -140,6 +140,13 @@ 3.2.2 compile + + + com.github.tomakehurst + wiremock + 1.58 + test + @@ -197,6 +204,16 @@ 3.2.2 compile + + org.apache.httpcomponents + httpclient + 4.5.13 + + + org.apache.httpcomponents + fluent-hc + 4.5.13 + diff --git a/flink_jobs/stream_status/src/main/java/argo/amr/ApiResource.java b/flink_jobs/stream_status/src/main/java/argo/amr/ApiResource.java new file mode 100644 index 00000000..d8cb13b5 --- /dev/null +++ b/flink_jobs/stream_status/src/main/java/argo/amr/ApiResource.java @@ -0,0 +1,5 @@ +package argo.amr; + +public enum ApiResource { + CONFIG, OPS, METRIC, AGGREGATION, THRESHOLDS, TOPOENDPOINTS, TOPOGROUPS, WEIGHTS, DOWNTIMES, RECOMPUTATIONS +} \ No newline at end of file diff --git a/flink_jobs/stream_status/src/main/java/argo/amr/ApiResourceManager.java b/flink_jobs/stream_status/src/main/java/argo/amr/ApiResourceManager.java new file mode 100644 index 00000000..c4375701 --- /dev/null +++ b/flink_jobs/stream_status/src/main/java/argo/amr/ApiResourceManager.java @@ -0,0 +1,644 @@ +package argo.amr; + +import java.io.IOException; +import java.security.KeyManagementException; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.SecureRandom; +import java.util.ArrayList; +import java.util.EnumMap; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +import javax.net.ssl.SSLContext; + +import org.apache.http.client.ClientProtocolException; +import org.apache.http.client.fluent.Executor; +import org.apache.http.client.fluent.Request; +import org.apache.http.conn.ssl.NoopHostnameVerifier; +import org.apache.http.conn.ssl.SSLConnectionSocketFactory; +import org.apache.http.conn.ssl.TrustSelfSignedStrategy; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClients; +import org.apache.http.ssl.SSLContextBuilder; + +import com.google.gson.JsonArray; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParser; + +import argo.avro.Downtime; +import argo.avro.GroupEndpoint; +import argo.avro.GroupGroup; +import argo.avro.MetricProfile; +import argo.avro.Weight; + + +/** + * APIResourceManager class fetches remote argo-web-api resources such as + * report configuration, profiles, topology, weights in JSON format + */ + + +public class ApiResourceManager { + + private EnumMap data = new EnumMap<>(ApiResource.class); + + private String endpoint; + private String token; + private String reportID; + private String date; + private String proxy; + + private String metricID; + private String aggregationID; + private String opsID; + private String threshID; + private String reportName; + private String weightsID; + private boolean verify; + + + public ApiResourceManager(String endpoint, String token) { + this.endpoint = endpoint; + this.token = token; + this.metricID = ""; + this.aggregationID = ""; + this.opsID = ""; + this.threshID = ""; + this.reportName = ""; + this.reportID = ""; + this.date = ""; + this.proxy = ""; + this.weightsID = ""; + this.verify = true; + + } + + public boolean getVerify() { + return verify; + } + + public void setVerify(boolean verify) { + this.verify = verify; + } + + public String getEndpoint() { + return endpoint; + } + + public void setEndpoint(String endpoint) { + this.endpoint = endpoint; + } + + public String getToken() { + return token; + } + + public void setToken(String token) { + this.token = token; + } + + public String getReportID() { + return reportID; + } + + public void setReportID(String reportID) { + this.reportID = reportID; + } + + public String getReportName() { + return this.reportName; + } + + public String getOpsID() { + return this.opsID; + } + + + public String getAggregationID() { + return this.aggregationID; + } + + public String getMetricID() { + return this.metricID; + } + + public String getThresholdsID() { + return this.threshID; + } + + + public String getDate() { + return date; + } + + public void setDate(String date) { + this.date = date; + } + + public String getProxy() { + return proxy; + } + + public void setProxy(String proxy) { + this.proxy = proxy; + } + + public String getWeightsID() { + return weightsID; + } + + public void setWeightsID(String weightsID) { + this.weightsID = weightsID; + } + + /** + * Create an SSL Connection Socket Factory with a strategy to trust self signed + * certificates + */ + private SSLConnectionSocketFactory selfSignedSSLF() + throws NoSuchAlgorithmException, KeyStoreException, KeyManagementException { + SSLContextBuilder sslBuild = new SSLContextBuilder(); + sslBuild.loadTrustMaterial(null, new TrustSelfSignedStrategy()); + return new SSLConnectionSocketFactory(sslBuild.build(), NoopHostnameVerifier.INSTANCE); + } + + /** + * Contacts remote argo-web-api based on the full url of a resource its content (expected in json format) + * + * @param fullURL String containing the full url representation of the argo-web-api resource + * @return A string representation of the resource json content + * @throws ClientProtocolException + * @throws IOException + * @throws KeyStoreException + * @throws NoSuchAlgorithmException + * @throws KeyManagementException + */ + private String getResource(String fullURL) { + + + Request r = Request.Get(fullURL).addHeader("Accept", "application/json").addHeader("Content-type", + "application/json").addHeader("x-api-key",this.token); + if (!this.proxy.isEmpty()) { + r = r.viaProxy(proxy); + } + + r = r.connectTimeout(1000).socketTimeout(1000); + + String content = "{}"; + + try { + if (this.verify == false) { + CloseableHttpClient httpClient = HttpClients.custom().setSSLSocketFactory(selfSignedSSLF()).build(); + Executor executor = Executor.newInstance(httpClient); + content = executor.execute(r).returnContent().asString(); + } else { + + content = r.execute().returnContent().asString(); + } + } catch (KeyManagementException | NoSuchAlgorithmException | KeyStoreException | IOException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + + return content; + } + + /** + * Retrieves the remote report configuration based on reportID main class attribute and + * stores the content in the enum map + * + * @throws ClientProtocolException + * @throws IOException + * @throws KeyStoreException + * @throws NoSuchAlgorithmException + * @throws KeyManagementException + */ + public void getRemoteConfig() { + String path = "https://%s/api/v2/reports/%s"; + String fullURL = String.format(path, this.endpoint, this.reportID); + String content = getResource(fullURL); + this.data.put(ApiResource.CONFIG, getJsonData(content, false)); + } + + + /** + * Retrieves the metric profile content based on the metric_id attribute and stores it to the enum map + * + * @throws ClientProtocolException + * @throws IOException + * @throws KeyStoreException + * @throws NoSuchAlgorithmException + * @throws KeyManagementException + */ + public void getRemoteMetric() { + + String path = "https://%s/api/v2/metric_profiles/%s?date=%s"; + String fullURL = String.format(path, this.endpoint, this.metricID, this.date); + String content = getResource(fullURL); + this.data.put(ApiResource.METRIC, getJsonData(content, false)); + } + + /** + * Retrieves the aggregation profile content based on the aggreagation_id attribute and stores it to the enum map + * + * @throws ClientProtocolException + * @throws IOException + * @throws KeyStoreException + * @throws NoSuchAlgorithmException + * @throws KeyManagementException + */ + public void getRemoteAggregation() { + + String path = "https://%s/api/v2/aggregation_profiles/%s?date=%s"; + String fullURL = String.format(path, this.endpoint, this.aggregationID, this.date); + String content = getResource(fullURL); + this.data.put(ApiResource.AGGREGATION, getJsonData(content, false)); + } + + /** + * Retrieves the ops profile content based on the ops_id attribute and stores it to the enum map + * + * @throws ClientProtocolException + * @throws IOException + * @throws KeyStoreException + * @throws NoSuchAlgorithmException + * @throws KeyManagementException + */ + public void getRemoteOps() { + + String path = "https://%s/api/v2/operations_profiles/%s?date=%s"; + String fullURL = String.format(path, this.endpoint, this.opsID, this.date); + + String content = getResource(fullURL); + this.data.put(ApiResource.OPS, getJsonData(content, false)); + } + + /** + * Retrieves the thresholds profile content based on the thresh_id attribute and stores it to the enum map + * + * @throws ClientProtocolException + * @throws IOException + * @throws KeyStoreException + * @throws NoSuchAlgorithmException + * @throws KeyManagementException + */ + public void getRemoteThresholds() { + + String path = "https://%s/api/v2/thresholds_profiles/%s?date=%s"; + String fullURL = String.format(path, this.endpoint, this.threshID, this.date); + String content = getResource(fullURL); + this.data.put(ApiResource.THRESHOLDS, getJsonData(content, false)); + } + + /** + * Retrieves the topology endpoint content and stores it to the enum map + * + * @throws ClientProtocolException + * @throws IOException + * @throws KeyStoreException + * @throws NoSuchAlgorithmException + * @throws KeyManagementException + */ + public void getRemoteTopoEndpoints() { + String path = "https://%s/api/v2/topology/endpoints/by_report/%s?date=%s"; + String fullURL = String.format(path, this.endpoint, this.reportName, this.date); + String content = getResource(fullURL); + this.data.put(ApiResource.TOPOENDPOINTS, getJsonData(content, true)); + } + + /** + * Retrieves the topology groups content and stores it to the enum map + * + * @throws ClientProtocolException + * @throws IOException + * @throws KeyStoreException + * @throws NoSuchAlgorithmException + * @throws KeyManagementException + */ + public void getRemoteTopoGroups() { + String path = "https://%s/api/v2/topology/groups/by_report/%s?date=%s"; + String fullURL = String.format(path, this.endpoint, this.reportName, this.date); + String content = getResource(fullURL); + this.data.put(ApiResource.TOPOGROUPS, getJsonData(content, true)); + } + + /** + * Retrieves the weights content and stores it to the enum map + * + * @throws ClientProtocolException + * @throws IOException + * @throws KeyStoreException + * @throws NoSuchAlgorithmException + * @throws KeyManagementException + */ + public void getRemoteWeights() { + String path = "https://%s/api/v2/weights/%s?date=%s"; + String fullURL = String.format(path, this.endpoint, this.weightsID, this.date); + String content = getResource(fullURL); + this.data.put(ApiResource.WEIGHTS, getJsonData(content, false)); + } + + /** + * Retrieves the downtimes content and stores it to the enum map + * + * @throws ClientProtocolException + * @throws IOException + * @throws KeyStoreException + * @throws NoSuchAlgorithmException + * @throws KeyManagementException + */ + public void getRemoteDowntimes() { + String path = "https://%s/api/v2/downtimes?date=%s"; + String fullURL = String.format(path, this.endpoint, this.date); + String content = getResource(fullURL); + this.data.put(ApiResource.DOWNTIMES, getJsonData(content, false)); + } + + public void getRemoteRecomputations() { + String path = "https://%s/api/v2/recomputations?date=%s"; + String fullURL = String.format(path, this.endpoint, this.date); + String content = getResource(fullURL); + this.data.put(ApiResource.RECOMPUTATIONS, getJsonData(content, true)); + } + + /** + * Returns local resource (after has been retrieved) content based on resource type + * + * @param res + * @return The extracted items JSON value as string + */ + public String getResourceJSON(ApiResource res) { + return this.data.get(res); + } + + /** + * Exectues all steps to retrieve the complete amount of the available profile, + * topology, weights and downtime information from argo-web-api + * + * @throws ClientProtocolException + * @throws IOException + * @throws KeyStoreException + * @throws NoSuchAlgorithmException + * @throws KeyManagementException + */ + public void getRemoteAll() { + // Start with report and configuration + this.getRemoteConfig(); + // parse remote report config to be able to get the other profiles + this.parseReport(); + // Go on to the profiles + this.getRemoteMetric(); + this.getRemoteOps(); + this.getRemoteAggregation(); + if (!this.threshID.equals("")) this.getRemoteThresholds(); + // Go to topology + this.getRemoteTopoEndpoints(); + this.getRemoteTopoGroups(); + // get weights + if (!this.weightsID.equals("")) this.getRemoteWeights(); + // get downtimes + this.getRemoteDowntimes(); + // get recomptations + this.getRemoteRecomputations(); + + } + + /** + * Parses the report content to extract the report's name and the various profile IDs + */ + public void parseReport() { + // check if report configuration has been retrieved + if (!this.data.containsKey(ApiResource.CONFIG)) + return; + + String content = this.data.get(ApiResource.CONFIG); + JsonParser jsonParser = new JsonParser(); + JsonElement jElement = jsonParser.parse(content); + JsonObject jRoot = jElement.getAsJsonObject(); + JsonArray jProfiles = jRoot.get("profiles").getAsJsonArray(); + + JsonObject jInfo = jRoot.get("info").getAsJsonObject(); + this.reportName = jInfo.get("name").getAsString(); + + // for each profile iterate and store it's id in profile manager for later + // reference + for (int i = 0; i < jProfiles.size(); i++) { + JsonObject jProf = jProfiles.get(i).getAsJsonObject(); + String profType = jProf.get("type").getAsString(); + String profID = jProf.get("id").getAsString(); + if (profType.equalsIgnoreCase("metric")) { + this.metricID = profID; + } else if (profType.equalsIgnoreCase("aggregation")) { + this.aggregationID = profID; + } else if (profType.equalsIgnoreCase("operations")) { + this.opsID = profID; + } else if (profType.equalsIgnoreCase("thresholds")) { + this.threshID = profID; + } + + } + + } + + /** + * Parses the Downtime content retrieved from argo-web-api and provides a list of Downtime avro objects + * to be used in the next steps of the pipeline + */ + public Downtime[] getListDowntimes() { + List results = new ArrayList(); + if (!this.data.containsKey(ApiResource.DOWNTIMES)) { + Downtime[] rArr = new Downtime[results.size()]; + rArr = results.toArray(rArr); + } + + + String content = this.data.get(ApiResource.DOWNTIMES); + JsonParser jsonParser = new JsonParser(); + JsonElement jElement = jsonParser.parse(content); + JsonObject jRoot = jElement.getAsJsonObject(); + JsonArray jElements = jRoot.get("endpoints").getAsJsonArray(); + for (int i = 0; i < jElements.size(); i++) { + JsonObject jItem= jElements.get(i).getAsJsonObject(); + String hostname = jItem.get("hostname").getAsString(); + String service = jItem.get("service").getAsString(); + String startTime = jItem.get("start_time").getAsString(); + String endTime = jItem.get("end_time").getAsString(); + + Downtime d = new Downtime(hostname,service,startTime,endTime); + results.add(d); + } + + Downtime[] rArr = new Downtime[results.size()]; + rArr = results.toArray(rArr); + return rArr; + } + + /** + * Parses the Topology endpoint content retrieved from argo-web-api and provides a list of GroupEndpoint avro objects + * to be used in the next steps of the pipeline + */ + public GroupEndpoint[] getListGroupEndpoints() { + List results = new ArrayList(); + if (!this.data.containsKey(ApiResource.TOPOENDPOINTS)) { + GroupEndpoint[] rArr = new GroupEndpoint[results.size()]; + rArr = results.toArray(rArr); + return rArr; + } + + + String content = this.data.get(ApiResource.TOPOENDPOINTS); + JsonParser jsonParser = new JsonParser(); + JsonElement jElement = jsonParser.parse(content); + JsonArray jRoot = jElement.getAsJsonArray(); + for (int i = 0; i < jRoot.size(); i++) { + JsonObject jItem= jRoot.get(i).getAsJsonObject(); + String group = jItem.get("group").getAsString(); + String gType = jItem.get("type").getAsString(); + String service = jItem.get("service").getAsString(); + String hostname = jItem.get("hostname").getAsString(); + JsonObject jTags = jItem.get("tags").getAsJsonObject(); + Map tags = new HashMap(); + for ( Entry kv : jTags.entrySet()) { + tags.put(kv.getKey(), kv.getValue().getAsString()); + } + GroupEndpoint ge = new GroupEndpoint(gType,group,service,hostname,tags); + results.add(ge); + } + + GroupEndpoint[] rArr = new GroupEndpoint[results.size()]; + rArr = results.toArray(rArr); + return rArr; + } + + /** + * Parses the Topology Groups content retrieved from argo-web-api and provides a list of GroupGroup avro objects + * to be used in the next steps of the pipeline + */ + public GroupGroup[] getListGroupGroups() { + List results = new ArrayList(); + if (!this.data.containsKey(ApiResource.TOPOGROUPS)){ + GroupGroup[] rArr = new GroupGroup[results.size()]; + rArr = results.toArray(rArr); + return rArr; + } + + String content = this.data.get(ApiResource.TOPOGROUPS); + JsonParser jsonParser = new JsonParser(); + JsonElement jElement = jsonParser.parse(content); + JsonArray jRoot = jElement.getAsJsonArray(); + for (int i = 0; i < jRoot.size(); i++) { + JsonObject jItem= jRoot.get(i).getAsJsonObject(); + String group = jItem.get("group").getAsString(); + String gType = jItem.get("type").getAsString(); + String subgroup = jItem.get("subgroup").getAsString(); + JsonObject jTags = jItem.get("tags").getAsJsonObject(); + Map tags = new HashMap(); + for ( Entry kv : jTags.entrySet()) { + tags.put(kv.getKey(), kv.getValue().getAsString()); + } + GroupGroup gg = new GroupGroup(gType,group,subgroup,tags); + results.add(gg); + } + + GroupGroup[] rArr = new GroupGroup[results.size()]; + rArr = results.toArray(rArr); + return rArr; + } + + /** + * Parses the Weights content retrieved from argo-web-api and provides a list of Weights avro objects + * to be used in the next steps of the pipeline + */ + public Weight[] getListWeights() { + List results = new ArrayList(); + if (!this.data.containsKey(ApiResource.WEIGHTS)) { + Weight[] rArr = new Weight[results.size()]; + rArr = results.toArray(rArr); + return rArr; + } + + + String content = this.data.get(ApiResource.WEIGHTS); + JsonParser jsonParser = new JsonParser(); + JsonElement jElement = jsonParser.parse(content); + JsonObject jRoot = jElement.getAsJsonObject(); + String wType = jRoot.get("weight_type").getAsString(); + JsonArray jElements = jRoot.get("groups").getAsJsonArray(); + for (int i = 0; i < jElements.size(); i++) { + JsonObject jItem= jElements.get(i).getAsJsonObject(); + String group = jItem.get("name").getAsString(); + String weight = jItem.get("value").getAsString(); + + Weight w = new Weight(wType,group,weight); + results.add(w); + } + + Weight[] rArr = new Weight[results.size()]; + rArr = results.toArray(rArr); + return rArr; + } + + /** + * Parses the Metric profile content retrieved from argo-web-api and provides a list of MetricProfile avro objects + * to be used in the next steps of the pipeline + */ + public MetricProfile[] getListMetrics() { + List results = new ArrayList(); + if (!this.data.containsKey(ApiResource.METRIC)) { + MetricProfile[] rArr = new MetricProfile[results.size()]; + rArr = results.toArray(rArr); + return rArr; + } + + + String content = this.data.get(ApiResource.METRIC); + JsonParser jsonParser = new JsonParser(); + JsonElement jElement = jsonParser.parse(content); + JsonObject jRoot = jElement.getAsJsonObject(); + String profileName = jRoot.get("name").getAsString(); + JsonArray jElements = jRoot.get("services").getAsJsonArray(); + for (int i = 0; i < jElements.size(); i++) { + JsonObject jItem= jElements.get(i).getAsJsonObject(); + String service = jItem.get("service").getAsString(); + JsonArray jMetrics = jItem.get("metrics").getAsJsonArray(); + for (int j=0; j < jMetrics.size(); j++) { + String metric = jMetrics.get(j).getAsString(); + + Map tags = new HashMap(); + MetricProfile mp = new MetricProfile(profileName,service,metric,tags); + results.add(mp); + } + + } + + MetricProfile[] rArr = new MetricProfile[results.size()]; + rArr = results.toArray(rArr); + return rArr; + } + + /** + * Extract first JSON item from data JSON array in api response + * + * @param content JSON content of the full repsonse (status + data) + * @return First available item in data array as JSON string representation + * + */ + private String getJsonData(String content, boolean asArray) { + JsonParser jsonParser = new JsonParser(); + // Grab the first - and only line of json from ops data + JsonElement jElement = jsonParser.parse(content); + JsonObject jRoot = jElement.getAsJsonObject(); + // Get the data array and the first item + if (asArray) { + return jRoot.get("data").toString(); + } + JsonArray jData = jRoot.get("data").getAsJsonArray(); + JsonElement jItem = jData.get(0); + return jItem.toString(); + } + +} diff --git a/flink_jobs/stream_status/src/main/java/argo/streaming/AmsStreamStatus.java b/flink_jobs/stream_status/src/main/java/argo/streaming/AmsStreamStatus.java index 6e9b9614..79bd436f 100644 --- a/flink_jobs/stream_status/src/main/java/argo/streaming/AmsStreamStatus.java +++ b/flink_jobs/stream_status/src/main/java/argo/streaming/AmsStreamStatus.java @@ -4,7 +4,9 @@ import java.io.IOException; import java.net.URISyntaxException; import java.text.ParseException; +import java.time.Instant; import java.util.ArrayList; +import java.util.Arrays; import java.util.Date; import java.util.Map; import java.util.Properties; @@ -41,6 +43,8 @@ import com.google.gson.JsonObject; import com.google.gson.JsonParser; +import argo.amr.ApiResource; +import argo.amr.ApiResourceManager; import argo.avro.Downtime; import argo.avro.GroupEndpoint; import argo.avro.MetricData; @@ -162,7 +166,23 @@ public static void main(String[] args) throws Exception { String token = parameterTool.getRequired("ams.token"); String project = parameterTool.getRequired("ams.project"); String subMetric = parameterTool.getRequired("ams.sub.metric"); - String subSync = parameterTool.getRequired("ams.sub.sync"); + + String apiEndpoint = parameterTool.getRequired("api.endpoint"); + String apiToken = parameterTool.getRequired("api.token"); + String reportID = parameterTool.getRequired("report.id"); + int apiInterval = parameterTool.getInt("api.interval"); + + ApiResourceManager amr = new ApiResourceManager(apiEndpoint,apiToken); + + // fetch + + // set params + if (parameterTool.has("api.proxy")) { + amr.setProxy(parameterTool.get("api.proxy")); + } + + amr.setReportID(reportID); + amr.getRemoteAll(); // set ams client batch and interval to default values @@ -179,31 +199,31 @@ public static void main(String[] args) throws Exception { // Establish the metric data AMS stream // Ingest sync avro encoded data from AMS endpoint ArgoMessagingSource amsMetric = new ArgoMessagingSource(endpoint, port, token, project, subMetric, batch, interval); - ArgoMessagingSource amsSync = new ArgoMessagingSource(endpoint, port, token, project, subSync, batch, interval); + ArgoApiSource apiSync = new ArgoApiSource(apiEndpoint,apiToken,reportID,apiInterval,interval); if (parameterTool.has("ams.verify")) { boolean verify = parameterTool.getBoolean("ams.verify"); amsMetric.setVerify(verify); - amsSync.setVerify(verify); + } if (parameterTool.has("ams.proxy")) { String proxyURL = parameterTool.get("ams.proxy"); amsMetric.setProxy(proxyURL); - amsSync.setProxy(proxyURL); + } DataStream metricAMS = see.addSource(amsMetric).setParallelism(1); - // Establish the sync data AMS stream - DataStream syncAMS = see.addSource(amsSync).setParallelism(1); + // Establish the sync stream from argowebapi + DataStream> syncAMS = see.addSource(apiSync).setParallelism(1); // Forward syncAMS data to two paths // - one with parallelism 1 to connect in the first processing step and // - one with max parallelism for status event generation step // (scalable) - DataStream syncA = syncAMS.forward(); - DataStream syncB = syncAMS.broadcast(); + DataStream> syncA = syncAMS.forward(); + DataStream> syncB = syncAMS.broadcast(); DataStream> groupMdata = metricAMS.connect(syncA) .flatMap(new MetricDataWithGroup(conf)).setParallelism(1); @@ -259,11 +279,8 @@ public static void main(String[] args) throws Exception { jobTitleSB.append(port); jobTitleSB.append("/v1/projects/"); jobTitleSB.append(project); - jobTitleSB.append("/subscriptions/["); + jobTitleSB.append("/subscriptions/"); jobTitleSB.append(subMetric); - jobTitleSB.append(","); - jobTitleSB.append(subSync); - jobTitleSB.append("]"); // Execute flink dataflow see.execute(jobTitleSB.toString()); @@ -273,7 +290,7 @@ public static void main(String[] args) throws Exception { * MetricDataWithGroup implements a map function that adds group information to * the metric data message */ - private static class MetricDataWithGroup extends RichCoFlatMapFunction> { + private static class MetricDataWithGroup extends RichCoFlatMapFunction, Tuple2> { private static final long serialVersionUID = 1L; @@ -297,10 +314,18 @@ public MetricDataWithGroup(StatusConfig config) { @Override public void open(Configuration parameters) throws IOException, ParseException, URISyntaxException { - SyncData sd = new SyncData(); + ApiResourceManager amr = new ApiResourceManager(config.apiEndpoint,config.apiToken); + + if (config.apiProxy != null) { + amr.setProxy(config.apiProxy); + } + + amr.setReportID(config.reportID); + amr.getRemoteAll(); + - ArrayList mpsList = sd.readMetricProfile(config.mps); - ArrayList egpList = sd.readGroupEndpoint(config.egp); + ArrayList mpsList = (ArrayList) (Arrays.asList(amr.getListMetrics())); + ArrayList egpList = (ArrayList) (Arrays.asList(amr.getListGroupEndpoints())); mps = new MetricProfileManager(); mps.loadFromList(mpsList); @@ -343,7 +368,7 @@ public void flatMap1(String value, Collector> out) byte[] decoded64 = Base64.decodeBase64(data.getBytes("UTF-8")); // Decode from avro DatumReader avroReader = new SpecificDatumReader(MetricData.getClassSchema(), - MetricDataOld.getClassSchema(), new SpecificData()); + MetricData.getClassSchema(), new SpecificData()); Decoder decoder = DecoderFactory.get().binaryDecoder(decoded64, null); MetricData item; @@ -379,30 +404,18 @@ public void flatMap1(String value, Collector> out) } - public void flatMap2(String value, Collector> out) + public void flatMap2(Tuple2 value, Collector> out) throws IOException, ParseException { - JsonParser jsonParser = new JsonParser(); - // parse the json root object - JsonElement jRoot = jsonParser.parse(value); - // parse the json field "data" and read it as string - // this is the base64 string payload - String data = jRoot.getAsJsonObject().get("data").getAsString(); - // Decode from base64 - byte[] decoded64 = Base64.decodeBase64(data.getBytes("UTF-8")); - JsonElement jAttr = jRoot.getAsJsonObject().get("attributes"); - Map attr = SyncParse.parseAttributes(jAttr); - if (attr.containsKey("type")) { - - String sType = attr.get("type"); - if (sType.equalsIgnoreCase("metric_profile")) { + + if (value.f0.equalsIgnoreCase("metric_profile")) { // Update mps - ArrayList mpsList = SyncParse.parseMetricProfile(decoded64); + ArrayList mpsList = SyncParse.parseMetricJSON(value.f1); mps = new MetricProfileManager(); mps.loadFromList(mpsList); - } else if (sType.equals("group_endpoint")) { + } else if (value.f0.equalsIgnoreCase("group_endpoints")) { // Update egp - ArrayList egpList = SyncParse.parseGroupEndpoint(decoded64); + ArrayList egpList = SyncParse.parseGroupEndpointJSON(value.f1); egp = new EndpointGroupManagerV2(); String validMetricProfile = mps.getProfiles().get(0); @@ -416,7 +429,7 @@ public void flatMap2(String value, Collector> out) } } } - } + } @@ -427,7 +440,7 @@ public void flatMap2(String value, Collector> out) * for all entities in topology and for each received metric generates the * appropriate status events */ - private static class StatusMap extends RichCoFlatMapFunction, String, String> { + private static class StatusMap extends RichCoFlatMapFunction, Tuple2, String> { private static final long serialVersionUID = 1L; @@ -460,14 +473,24 @@ public void open(Configuration parameters) throws IOException, ParseException, U pID = Integer.toString(getRuntimeContext().getIndexOfThisSubtask()); - SyncData sd = new SyncData(); - String opsJSON = sd.readText(config.ops); - String apsJSON = sd.readText(config.aps); - ArrayList downList = sd.readDowntime(config.downtime); - ArrayList mpsList = sd.readMetricProfile(config.mps); - ArrayList egpListFull = sd.readGroupEndpoint(config.egp); + + ApiResourceManager amr = new ApiResourceManager(config.apiEndpoint,config.apiToken); + + if (config.apiProxy != null) { + amr.setProxy(config.apiProxy); + } + + amr.setReportID(config.reportID); + amr.getRemoteAll(); + + String opsJSON = amr.getResourceJSON(ApiResource.OPS); + String apsJSON = amr.getResourceJSON(ApiResource.AGGREGATION); + ArrayList downList = (ArrayList)(Arrays.asList(amr.getListDowntimes())); + ArrayList mpsList = (ArrayList)(Arrays.asList(amr.getListMetrics())); + ArrayList egpListFull = (ArrayList)(Arrays.asList(amr.getListGroupEndpoints())); + // create a new status manager sm = new StatusManager(); sm.setTimeout(config.timeout); @@ -534,32 +557,20 @@ public void flatMap1(Tuple2 value, Collector out) } } - public void flatMap2(String value, Collector out) throws IOException, ParseException { + public void flatMap2(Tuple2 value, Collector out) throws IOException, ParseException { - JsonParser jsonParser = new JsonParser(); - // parse the json root object - JsonElement jRoot = jsonParser.parse(value); - // parse the json field "data" and read it as string - // this is the base64 string payload - String data = jRoot.getAsJsonObject().get("data").getAsString(); - // Decode from base64 - byte[] decoded64 = Base64.decodeBase64(data.getBytes("UTF-8")); - JsonElement jAttr = jRoot.getAsJsonObject().get("attributes"); - Map attr = SyncParse.parseAttributes(jAttr); - // The sync dataset should have a type and report attribute and report should be the job's report - if (attr.containsKey("type") && attr.containsKey("report") && attr.get("report") == config.report ) { + - String sType = attr.get("type"); - LOG.info("Accepted " + sType + " for report: " + attr.get("report")); - if (sType.equalsIgnoreCase("metric_profile")) { + + if (value.f0.equalsIgnoreCase("metric_profile")) { // Update mps - ArrayList mpsList = SyncParse.parseMetricProfile(decoded64); + ArrayList mpsList = SyncParse.parseMetricJSON(value.f1); sm.mps = new MetricProfileManager(); sm.mps.loadFromList(mpsList); - } else if (sType.equals("group_endpoints")) { + } else if (value.f0.equals("group_endpoints")) { // Update egp - ArrayList egpList = SyncParse.parseGroupEndpoint(decoded64); + ArrayList egpList = SyncParse.parseGroupEndpointJSON(value.f1); String validMetricProfile = sm.mps.getProfiles().get(0); ArrayList validServices = sm.mps.getProfileServices(validMetricProfile); @@ -571,17 +582,24 @@ public void flatMap2(String value, Collector out) throws IOException, Pa egpTrim.add(egpItem); } } - sm.egp = new EndpointGroupManagerV2(); - sm.egp.loadFromList(egpTrim); - } else if (sType.equals("downtimes") && attr.containsKey("partition_date")) { - String pDate = attr.get("partition_date"); - ArrayList downList = SyncParse.parseDowntimes(decoded64); + // load next topology into a temporary endpoint group manager + EndpointGroupManagerV2 egpNext = new EndpointGroupManagerV2(); + egpNext.loadFromList(egpTrim); + + // Use existing topology manager inside status manager to make a comparison + // with the new topology stored in the temp endpoint group manager + // update topology also sets the next topology manager as status manager current + // topology manager only after removal of decomissioned items + sm.updateTopology(egpNext); + + + } else if (value.f0.equalsIgnoreCase("downtimes")) { + String pDate = Instant.now().toString().split("T")[0]; + ArrayList downList = SyncParse.parseDowntimesJSON(value.f1); // Update downtime cache in status manager sm.addDowntimeSet(pDate, downList); } - } else { - LOG.info("Declined " + attr.get("type") + "for report: " + attr.get("report")); - } + } diff --git a/flink_jobs/stream_status/src/main/java/argo/streaming/ArgoApiSource.java b/flink_jobs/stream_status/src/main/java/argo/streaming/ArgoApiSource.java new file mode 100644 index 00000000..f39b313d --- /dev/null +++ b/flink_jobs/stream_status/src/main/java/argo/streaming/ArgoApiSource.java @@ -0,0 +1,148 @@ +package argo.streaming; + + +import java.time.Duration; +import java.time.Instant; + +import org.apache.flink.api.java.tuple.Tuple2; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.streaming.api.functions.source.RichSourceFunction; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import argo.amr.ApiResource; +import argo.amr.ApiResourceManager; + +/** + * Custom source to connect to ArgoWebApi service. Uses API Resource Manager + */ +public class ArgoApiSource extends RichSourceFunction> { + + private static final long serialVersionUID = 1L; + + // setup logger + static Logger LOG = LoggerFactory.getLogger(ArgoApiSource.class); + + private String endpoint = null; + private String token = null; + private String reportID = null; + private int hourCheck = 24; + private long interval = 100L; + private boolean verify = true; + private boolean useProxy = false; + private String proxyURL = ""; + private transient Object rateLck; // lock for waiting to establish rate + + private volatile boolean isRunning = true; + + private ApiResourceManager client = null; + private Instant timeSnapshot = null; + + + + + + public ArgoApiSource(String endpoint, String token, String reportID, int hourCheck, Long interval) { + this.endpoint = endpoint; + this.token = token; + this.reportID = reportID; + this.hourCheck = hourCheck; + this.interval = interval; + this.verify = true; + + } + + /** + * Set verify to true or false. If set to false AMS client will be able to contact AMS endpoints that use self-signed certificates + */ + public void setVerify(boolean verify) { + this.verify=verify; + } + /** + * Set proxy details for AMS client + */ + public void setProxy(String proxyURL) { + this.useProxy = true; + this.proxyURL = proxyURL; + } + + /** + * Unset proxy details for AMS client + */ + public void unsetProxy(String proxyURL) { + this.useProxy = false; + this.proxyURL = ""; + } + + + @Override + public void cancel() { + isRunning = false; + + } + + @Override + public void run(SourceContext> ctx) throws Exception { + // This is the main run logic + while (isRunning) { + + // check if interval in hours has passed to make a move + Instant ti = Instant.now(); + Duration td = Duration.between(this.timeSnapshot,ti); + // interval has passed do consume from api + if (td.toHours() > this.hourCheck) { + this.timeSnapshot = ti; + // retrieve info from api + this.client.getRemoteAll(); + + Tuple2 mt = new Tuple2("metric_profile",client.getResourceJSON(ApiResource.METRIC)); + Tuple2 gt = new Tuple2("group_endpoints",client.getResourceJSON(ApiResource.TOPOENDPOINTS)); + Tuple2 dt = new Tuple2("downtimes",client.getResourceJSON(ApiResource.DOWNTIMES)); + + ctx.collect(mt); + ctx.collect(gt); + ctx.collect(dt); + + + + } + synchronized (rateLck) { + rateLck.wait(this.interval); + } + + } + + } + + /** + * Argo-web-api Source initialization + */ + @Override + public void open(Configuration parameters) throws Exception { + // init rate lock + rateLck = new Object(); + + + this.timeSnapshot = Instant.now(); + + this.client = new ApiResourceManager(this.endpoint, this.token); + client.setReportID(this.reportID); + client.setVerify(this.verify); + if (this.useProxy) { + client.setProxy(this.proxyURL); + } + + + } + + @Override + public void close() throws Exception { + if (this.client != null) { + this.client = null; + } + synchronized (rateLck) { + rateLck.notify(); + } + } + +} diff --git a/flink_jobs/stream_status/src/main/java/argo/streaming/StatusConfig.java b/flink_jobs/stream_status/src/main/java/argo/streaming/StatusConfig.java index af1e355b..add15650 100644 --- a/flink_jobs/stream_status/src/main/java/argo/streaming/StatusConfig.java +++ b/flink_jobs/stream_status/src/main/java/argo/streaming/StatusConfig.java @@ -24,6 +24,14 @@ public class StatusConfig implements Serializable { public String report; + public String apiEndpoint; + public String apiToken; + public String apiProxy; + public Boolean apiVerify = false; + public int hourCheck = 24; + public String reportID; + public long interval = 100L; + // Sync files public String aps; public String mps; @@ -46,6 +54,8 @@ public StatusConfig(ParameterTool pt){ this.amsPort = pt.getRequired("ams.port"); this.amsToken = pt.getRequired("ams.token"); this.amsProject = pt.getRequired("ams.project"); + this.apiEndpoint = pt.getRequired("api.endpoint"); + this.aps = pt.getRequired("sync.apr"); this.mps = pt.getRequired("sync.mps"); @@ -70,6 +80,16 @@ public StatusConfig(ParameterTool pt){ } // Optional set daily parameter + + this.apiEndpoint = pt.getRequired("api.endpoint"); + this.apiToken = pt.getRequired("apiToken"); + this.reportID = pt.getRequired("reportID"); + + if (pt.has("api.proxy")) this.apiProxy = pt.get("api.proxy",""); + if (pt.has("api.verify")) this.apiVerify = pt.getBoolean("api.verify",false); + if (pt.has("api.interval")) this.hourCheck = pt.getInt("api.interval",24); + if (pt.has("ams.interval")) this.interval = pt.getLong("interval",100L); + this.daily = pt.getBoolean("daily",false); } diff --git a/flink_jobs/stream_status/src/main/java/argo/streaming/SyncParse.java b/flink_jobs/stream_status/src/main/java/argo/streaming/SyncParse.java index ff2726c8..fe5161a2 100644 --- a/flink_jobs/stream_status/src/main/java/argo/streaming/SyncParse.java +++ b/flink_jobs/stream_status/src/main/java/argo/streaming/SyncParse.java @@ -3,6 +3,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.Map.Entry; @@ -13,8 +14,12 @@ import org.apache.avro.specific.SpecificData; import org.apache.avro.specific.SpecificDatumReader; +import com.google.gson.JsonArray; import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParser; +import argo.amr.ApiResource; import argo.avro.Downtime; import argo.avro.GroupEndpoint; import argo.avro.MetricProfile; @@ -96,5 +101,82 @@ public static Map parseAttributes(JsonElement jAttr) throws IOExc return result; } + public static ArrayList parseMetricJSON(String content) { + ArrayList results = new ArrayList(); + + + JsonParser jsonParser = new JsonParser(); + JsonElement jElement = jsonParser.parse(content); + JsonObject jRoot = jElement.getAsJsonObject(); + String profileName = jRoot.get("name").getAsString(); + JsonArray jElements = jRoot.get("services").getAsJsonArray(); + for (int i = 0; i < jElements.size(); i++) { + JsonObject jItem= jElements.get(i).getAsJsonObject(); + String service = jItem.get("service").getAsString(); + JsonArray jMetrics = jItem.get("metrics").getAsJsonArray(); + for (int j=0; j < jMetrics.size(); j++) { + String metric = jMetrics.get(j).getAsString(); + + Map tags = new HashMap(); + MetricProfile mp = new MetricProfile(profileName,service,metric,tags); + results.add(mp); + } + + } + + return results; + } + + public static ArrayList parseDowntimesJSON (String content) { + + ArrayList results = new ArrayList(); + + JsonParser jsonParser = new JsonParser(); + JsonElement jElement = jsonParser.parse(content); + JsonObject jRoot = jElement.getAsJsonObject(); + JsonArray jElements = jRoot.get("endpoints").getAsJsonArray(); + for (int i = 0; i < jElements.size(); i++) { + JsonObject jItem= jElements.get(i).getAsJsonObject(); + String hostname = jItem.get("hostname").getAsString(); + String service = jItem.get("service").getAsString(); + String startTime = jItem.get("start_time").getAsString(); + String endTime = jItem.get("end_time").getAsString(); + + Downtime d = new Downtime(hostname,service,startTime,endTime); + results.add(d); + } + + return results; + + } + + public static ArrayList parseGroupEndpointJSON (String content) { + + ArrayList results = new ArrayList(); + + JsonParser jsonParser = new JsonParser(); + JsonElement jElement = jsonParser.parse(content); + JsonArray jRoot = jElement.getAsJsonArray(); + for (int i = 0; i < jRoot.size(); i++) { + JsonObject jItem= jRoot.get(i).getAsJsonObject(); + String group = jItem.get("group").getAsString(); + String gType = jItem.get("type").getAsString(); + String service = jItem.get("service").getAsString(); + String hostname = jItem.get("hostname").getAsString(); + JsonObject jTags = jItem.get("tags").getAsJsonObject(); + Map tags = new HashMap(); + for ( Entry kv : jTags.entrySet()) { + tags.put(kv.getKey(), kv.getValue().getAsString()); + } + GroupEndpoint ge = new GroupEndpoint(gType,group,service,hostname,tags); + results.add(ge); + } + + return results; + + } + + + } diff --git a/flink_jobs/stream_status/src/main/java/status/StatusEvent.java b/flink_jobs/stream_status/src/main/java/status/StatusEvent.java index 709ef57e..aec06845 100644 --- a/flink_jobs/stream_status/src/main/java/status/StatusEvent.java +++ b/flink_jobs/stream_status/src/main/java/status/StatusEvent.java @@ -28,6 +28,14 @@ public class StatusEvent{ private @SerializedName("status_service") String statusService[]; private @SerializedName("status_endpoint") String statusEndpoint[]; private @SerializedName("status_metric") String statusMetric[]; + // Record statuses of the other groups + private @SerializedName("group_statuses") String groupStatuses[]; + private @SerializedName("group_endpoints") String groupEndpoints[]; + private @SerializedName("group_services") String groupServices[]; + + // Record all statuses of endpoint's metrics + private @SerializedName("metric_statuses") String metricStatuses[]; + private @SerializedName("metric_names") String metricNames[]; public StatusEvent() { @@ -51,6 +59,11 @@ public StatusEvent() { this.statusService = new String[0]; this.statusEndpoint = new String[0]; this.statusMetric = new String[0]; + this.groupEndpoints = new String[0]; + this.groupServices= new String[0]; + this.groupStatuses = new String[0]; + this.metricStatuses = new String[0]; + this.metricNames = new String[0]; } @@ -76,6 +89,12 @@ public StatusEvent (String report, String type, String dt, String group,String s this.statusService = null; this.statusEndpoint = null; this.statusMetric = null; + this.groupEndpoints = null; + this.groupServices = null; + this.groupStatuses = null; + this.metricStatuses = null; + this.metricNames = null; + } @@ -109,7 +128,47 @@ public void setStatusMetric(String[] statusMetric ) { this.statusMetric = statusMetric; } + public void setGroupStatuses(String[] groupStatuses) { + this.groupStatuses = groupStatuses; + } + + public void setGroupEndpoints(String[] groupEndpoints) { + this.groupEndpoints = groupEndpoints; + } + + + public void setGroupServices(String[] groupServices) { + this.groupServices = groupServices; + } + + public String[] getGroupStatuses() { + return this.groupStatuses; + } + + public String[] getGroupServices() { + return this.groupServices; + } + + public String[] getGroupEndpoints() { + return this.groupEndpoints; + } + public String[] getMetricStatuses() { + return this.metricStatuses; + } + + public String[] getMetricNames() { + return this.metricNames; + } + + + public void setMetricNames(String[] metricNames) { + this.metricNames = metricNames; + } + + public void setMetricStatuses(String[] metricStatuses) { + this.metricStatuses = metricStatuses; + } public String getReport() { return report; } diff --git a/flink_jobs/stream_status/src/main/java/status/StatusManager.java b/flink_jobs/stream_status/src/main/java/status/StatusManager.java index 689dd800..5b05448c 100644 --- a/flink_jobs/stream_status/src/main/java/status/StatusManager.java +++ b/flink_jobs/stream_status/src/main/java/status/StatusManager.java @@ -13,7 +13,7 @@ import java.util.Map; import java.util.Map.Entry; -import org.apache.hadoop.hdfs.tools.DFSAdmin; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -21,20 +21,17 @@ import sync.AggregationProfileManager; import sync.DowntimeCache; -import sync.DowntimeManager; import sync.EndpointGroupManagerV2; import sync.EndpointGroupManagerV2.EndpointItem; import sync.MetricProfileManager; import ops.OpsManager; +import com.esotericsoftware.minlog.Log; import com.google.gson.Gson; import argo.avro.Downtime; import argo.avro.GroupEndpoint; import argo.avro.MetricProfile; -import org.apache.commons.codec.binary.Base64; -import org.apache.commons.codec.binary.Base64; -import org.apache.commons.codec.binary.Base64; /** @@ -166,7 +163,76 @@ public StatusNode(String type, int defStatus, Date defTs, StatusNode parent) { } } + + + public void removeEndpoint(String endpointDef) { + String[] tokens = endpointDef.split(","); + if (tokens.length != 4) return; //endpoint definition must split to 4 tokens + String etype = tokens[0]; + String group = tokens[1]; + String service = tokens[2]; + String hostname = tokens[3]; + + if (!this.groups.containsKey(group)) return; + StatusNode groupNode = this.groups.get(group); + + if (!groupNode.children.containsKey(service)) return; + StatusNode serviceNode = groupNode.children.get(service); + + if (!serviceNode.children.containsKey(hostname)) return; + + // Remove endpoint + serviceNode.children.remove(hostname); + Log.info("Removed endpoint:" + hostname + " from the tree"); + // if service node contains other items return + if (!serviceNode.children.isEmpty()) return; + groupNode.children.remove(service); + Log.info("Removed service:" + service + " from the tree"); + + // if group node contains other items return + if (!groupNode.children.isEmpty()) return; + this.groups.remove(group); + Log.info("Removed group:" + group + " from the tree"); + + + } + + public void updateTopology(EndpointGroupManagerV2 egpNext) { + // find a list of lost items to remove them from status tree + ArrayList lostItems = this.egp.compareToBeRemoved(egpNext); + + // removal performed + for (String item : lostItems) { + removeEndpoint(item); + } + + // set the next topology as status manager's current topology + this.egp = egpNext; + + } + public boolean hasEndpoint(String group, String service, String hostname) { + if (hasService(group, service)) { + StatusNode groupNode = this.groups.get(group); + StatusNode serviceNode = groupNode.children.get(service); + return serviceNode.children.containsKey(hostname); + } + + return false; + } + + /** + * Checks if this status manager handles the specific endpoint group service + */ + public boolean hasService(String group, String service) { + if (hasGroup(group)) { + StatusNode groupNode = this.groups.get(group); + return groupNode.children.containsKey(service); + } + + return false; + } + /** * Checks if this status manager handles the specific endpoint group */ @@ -486,6 +552,10 @@ public String toZulu(Date ts) throws ParseException { utcFormat.setTimeZone(TimeZone.getTimeZone("UTC")); return utcFormat.format(ts); } + + + + /** * For all entities in the topology generate status events @@ -553,6 +623,7 @@ public ArrayList dumpStatus(String tsStr) throws ParseException { evtEndpoint.setStatusMetric(statusMetric); evtEndpoint.setStatusEndpoint(statusEndpoint); + results.add(eventToString(evtEndpoint)); } // Generate service status event @@ -611,6 +682,90 @@ public boolean hasDowntime(String timestamp, String hostname, String service ) { // else everything is ok and timestamp belongs inside element's downtime period return true; } + + + /** + * getMetricStatuses receives a StatusNode of type "endpoint" iterates over the + * nested children nodes and captures information about all metric nodes included in the group + * + * @param egroup + * StatusNode input object of type "endpoint" + * @param ops + * OpsManager reference object to translate status ids to string names + * + * @return Map> a hashmap of two string arraylists keyed: "metrics", "statuses" + * + */ + public Map> getMetricStatuses(StatusNode endpoint, OpsManager ops) { + Map> results = new HashMap>(); + + ArrayList metrics = new ArrayList(); + ArrayList statuses = new ArrayList(); + + results.put("metrics", metrics); + results.put("statuses", statuses); + // check if StatusNode is indeed of endpoint group type + if (endpoint.type.equalsIgnoreCase("endpoint") == false) { + return results; + } + + + for (Entry metricEntry : endpoint.children.entrySet()) { + String metricName = metricEntry.getKey(); + StatusNode metric = metricEntry.getValue(); + // Add endpoint information to results + results.get("metrics").add(metricName); + results.get("statuses").add(ops.getStrStatus(metric.item.status)); + } + + + + return results; + } + + + /** + * getGroupEndpointStatuses receives a StatusNode of type "endpoint_group" iterates over the + * nested children nodes and captures information about all endpoint nodes included in the group + * + * @param egroup + * StatusNode input object of type "endpoint group" + * @param ops + * OpsManager reference object to translate status ids to string names + * + * @return Map> a hashmap of three string arraylists keyed: "endpoints", "services", "statuses" + * + */ + public Map> getGroupEndpointStatuses(StatusNode egroup, OpsManager ops) { + Map> results = new HashMap>(); + ArrayList endpoints = new ArrayList(); + ArrayList services = new ArrayList(); + ArrayList statuses = new ArrayList(); + results.put("endpoints", endpoints); + results.put("services", services); + results.put("statuses", statuses); + // check if StatusNode is indeed of endpoint group type + if (egroup.type.equalsIgnoreCase("group") == false) { + return results; + } + + for (Entry serviceEntry : egroup.children.entrySet()) { + String serviceName = serviceEntry.getKey(); + StatusNode service = serviceEntry.getValue(); + for (Entry endpointEntry : service.children.entrySet()) { + String endpointName = endpointEntry.getKey(); + StatusNode endpoint = endpointEntry.getValue(); + // Add endpoint information to results + results.get("endpoints").add(endpointName); + results.get("services").add(serviceName); + results.get("statuses").add(ops.getStrStatus(endpoint.item.status)); + } + } + + + + return results; + } /** * setStatus accepts an incoming metric event and checks which entities are @@ -645,8 +800,6 @@ public ArrayList setStatus(String group, String service, String hostname Date ts = fromZulu(tsStr); - - // Set StatusNodes StatusNode groupNode = null; StatusNode serviceNode = null; @@ -751,24 +904,32 @@ public ArrayList setStatus(String group, String service, String hostname int endpNewStatus = aggregate("", endpointNode, ts); // check if status changed boolean repeat = hasTimeDiff(ts,endpointNode.item.genTs,this.timeout); - if (true) { + - // generate event - evtEndpoint = genEvent("endpoint", group, service, hostname, metric, - ops.getStrStatus(endpNewStatus), monHost, ts, - ops.getStrStatus(oldEndpointStatus), oldEndpointTS,repeat,summary,message); - - // Create metric,endpoint status level object - statusEndpoint = new String[] {evtEndpoint.getStatus(),evtEndpoint.getPrevStatus(), evtEndpoint.getTsMonitored(), evtEndpoint.getPrevTs()}; - - evtEndpoint.setStatusMetric(statusMetric); - evtEndpoint.setStatusEndpoint(statusEndpoint); - results.add(eventToString(evtEndpoint)); - - endpointNode.item.status = endpNewStatus; - endpointNode.item.genTs = ts; - updEndpoint = true; - } + // generate event + evtEndpoint = genEvent("endpoint", group, service, hostname, metric, + ops.getStrStatus(endpNewStatus), monHost, ts, + ops.getStrStatus(oldEndpointStatus), oldEndpointTS,repeat,summary,message); + + // Create metric,endpoint status level object + statusEndpoint = new String[] {evtEndpoint.getStatus(),evtEndpoint.getPrevStatus(), evtEndpoint.getTsMonitored(), evtEndpoint.getPrevTs()}; + + evtEndpoint.setStatusMetric(statusMetric); + evtEndpoint.setStatusEndpoint(statusEndpoint); + + // generate group endpoint information + Map> metricStatuses = getMetricStatuses(endpointNode,ops); + evtEndpoint.setMetricNames( metricStatuses.get("metrics").toArray(new String[0])); + evtEndpoint.setMetricStatuses( metricStatuses.get("statuses").toArray(new String[0])); + + results.add(eventToString(evtEndpoint)); + + + + endpointNode.item.status = endpNewStatus; + endpointNode.item.genTs = ts; + updEndpoint = true; + } } @@ -778,27 +939,27 @@ public ArrayList setStatus(String group, String service, String hostname int servNewStatus = aggregate(service, serviceNode, ts); // check if status changed boolean repeat = hasTimeDiff(ts,groupNode.item.genTs,this.timeout); - if (true) { + - // generate event - evtService = genEvent("service", group, service, hostname, metric, ops.getStrStatus(servNewStatus), - monHost, ts, ops.getStrStatus(oldServiceStatus), oldServiceTS,repeat,summary,message); - - - // Create metric, endpoint, service status metric objects - statusService = new String[] {evtService.getStatus(),evtService.getPrevStatus(), evtService.getTsMonitored(), evtService.getPrevTs()}; - - evtService.setStatusMetric(statusMetric); - evtService.setStatusEndpoint(statusEndpoint); - evtService.setStatusService(statusService); - - - results.add(eventToString(evtService)); - serviceNode.item.status = servNewStatus; - serviceNode.item.genTs=ts; - updService = true; + // generate event + evtService = genEvent("service", group, service, hostname, metric, ops.getStrStatus(servNewStatus), + monHost, ts, ops.getStrStatus(oldServiceStatus), oldServiceTS,repeat,summary,message); + + + // Create metric, endpoint, service status metric objects + statusService = new String[] {evtService.getStatus(),evtService.getPrevStatus(), evtService.getTsMonitored(), evtService.getPrevTs()}; + + evtService.setStatusMetric(statusMetric); + evtService.setStatusEndpoint(statusEndpoint); + evtService.setStatusService(statusService); + + + results.add(eventToString(evtService)); + serviceNode.item.status = servNewStatus; + serviceNode.item.genTs=ts; + updService = true; - } + } } @@ -808,28 +969,34 @@ public ArrayList setStatus(String group, String service, String hostname int groupNewStatus = aggregate(group, groupNode, ts); // check if status changed boolean repeat = hasTimeDiff(ts,groupNode.item.genTs,this.timeout); - if (true){ - - // generate event - - evtEgroup = genEvent("endpoint_group", group, service, hostname, metric, ops.getStrStatus(groupNewStatus), - monHost, ts, ops.getStrStatus(oldGroupStatus), oldGroupTS,repeat,summary,message); - - // Create metric, endpoint, service, egroup status metric objects - statusEgroup = new String[] {evtEgroup.getStatus(),evtEgroup.getPrevStatus(), evtEgroup.getTsMonitored(), evtEgroup.getPrevTs()}; - - - evtEgroup.setStatusMetric(statusMetric); - evtEgroup.setStatusEndpoint(statusEndpoint); - evtEgroup.setStatusService(statusService); - evtEgroup.setStatusEgroup(statusEgroup); - - results.add(eventToString(evtEgroup)); + - groupNode.item.status = groupNewStatus; - groupNode.item.genTs = ts; + // generate event + + evtEgroup = genEvent("endpoint_group", group, service, hostname, metric, ops.getStrStatus(groupNewStatus), + monHost, ts, ops.getStrStatus(oldGroupStatus), oldGroupTS,repeat,summary,message); + + // Create metric, endpoint, service, egroup status metric objects + statusEgroup = new String[] {evtEgroup.getStatus(),evtEgroup.getPrevStatus(), evtEgroup.getTsMonitored(), evtEgroup.getPrevTs()}; + + // generate group endpoint information + Map> groupStatuses = getGroupEndpointStatuses(groupNode,ops); + evtEgroup.setGroupEndpoints( groupStatuses.get("endpoints").toArray(new String[0])); + evtEgroup.setGroupServices( groupStatuses.get("services").toArray(new String[0])); + evtEgroup.setGroupStatuses( groupStatuses.get("statuses").toArray(new String[0])); + + + evtEgroup.setStatusMetric(statusMetric); + evtEgroup.setStatusEndpoint(statusEndpoint); + evtEgroup.setStatusService(statusService); + evtEgroup.setStatusEgroup(statusEgroup); + + results.add(eventToString(evtEgroup)); + + groupNode.item.status = groupNewStatus; + groupNode.item.genTs = ts; - } + } } // If service host combination has downtime clear result set @@ -954,6 +1121,8 @@ public int aggregate(String itemName, StatusNode node, Date ts) { res = ops.opInt(gOp, res, b.item.status); aGroups.put(groupName, res); + } else { // if groupname doesn't exist add it + aGroups.put(groupName, b.item.status); } } diff --git a/flink_jobs/stream_status/src/main/java/sync/EndpointGroupManagerV2.java b/flink_jobs/stream_status/src/main/java/sync/EndpointGroupManagerV2.java index b1f2289a..38f8e201 100644 --- a/flink_jobs/stream_status/src/main/java/sync/EndpointGroupManagerV2.java +++ b/flink_jobs/stream_status/src/main/java/sync/EndpointGroupManagerV2.java @@ -6,10 +6,11 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; - +import java.util.Set; import org.apache.avro.Schema; import org.apache.avro.file.DataFileReader; @@ -62,6 +63,10 @@ public EndpointItem(String type, String group, String service, String hostname, public String getService() { return service; } public String getHostname() { return hostname; } + public String toString() { + return type+","+group+","+service+","+hostname; + } + } public Map> getList(){ @@ -75,6 +80,38 @@ public EndpointGroupManagerV2() { } + + public Set getEndpointSet() { + Set curItems = new HashSet(); + for (String groupKey:this.groupIndex.keySet()) { + ArrayList eList = this.groupIndex.get(groupKey); + for (EndpointItem item: eList) { + curItems.add(item.toString()); + } + } + return curItems; + } + + public ArrayList getGroupList() { + ArrayList results = new ArrayList(); + results.addAll(this.groupIndex.keySet()); + return results; + } + + public ArrayList compareToBeRemoved(EndpointGroupManagerV2 egp) { + + ArrayList results = new ArrayList(); + + Set curItems = this.getEndpointSet(); + Set futurItems = egp.getEndpointSet(); + + // lost items is cur items minus future set + curItems.removeAll(futurItems); + + results.addAll(curItems); + + return results; + } public int insert(String type, String group, String service, String hostname, HashMap tags) { EndpointItem itemNew = new EndpointItem(type, group, service, hostname, tags); diff --git a/flink_jobs/stream_status/src/main/resources/amr/agg_profile.json b/flink_jobs/stream_status/src/main/resources/amr/agg_profile.json new file mode 100644 index 00000000..66f9474d --- /dev/null +++ b/flink_jobs/stream_status/src/main/resources/amr/agg_profile.json @@ -0,0 +1,33 @@ +{ + "status": { + "message": "Success", + "code": "200" + }, + "data": [ + { + "id": "2744247f-40f8-4dd6-b22c-76a3b38334d8", + "date": "2020-06-24", + "name": "test-agg2", + "namespace": "", + "endpoint_group": "servicegroups", + "metric_operation": "AND", + "profile_operation": "AND", + "metric_profile": { + "name": "test-mon", + "id": "92fa5d74-015c-4122-b8b9-7b344f3154d4" + }, + "groups": [ + { + "name": "webportal", + "operation": "AND", + "services": [ + { + "name": "WebPortal", + "operation": "OR" + } + ] + } + ] + } + ] +} diff --git a/flink_jobs/stream_status/src/main/resources/amr/data_AGGREGATION.json b/flink_jobs/stream_status/src/main/resources/amr/data_AGGREGATION.json new file mode 100644 index 00000000..f0f40f2c --- /dev/null +++ b/flink_jobs/stream_status/src/main/resources/amr/data_AGGREGATION.json @@ -0,0 +1 @@ +{"id":"2744247f-40f8-4dd6-b22c-76a3b38334d8","date":"2020-06-24","name":"test-agg2","namespace":"","endpoint_group":"servicegroups","metric_operation":"AND","profile_operation":"AND","metric_profile":{"name":"test-mon","id":"92fa5d74-015c-4122-b8b9-7b344f3154d4"},"groups":[{"name":"webportal","operation":"AND","services":[{"name":"WebPortal","operation":"OR"}]}]} diff --git a/flink_jobs/stream_status/src/main/resources/amr/data_CONFIG.json b/flink_jobs/stream_status/src/main/resources/amr/data_CONFIG.json new file mode 100644 index 00000000..8220787f --- /dev/null +++ b/flink_jobs/stream_status/src/main/resources/amr/data_CONFIG.json @@ -0,0 +1 @@ +{"id":"f29eeb59-ab38-4aa0-b372-5d3c0709dfb2","tenant":"demo","disabled":false,"info":{"name":"Critical","description":"test report","created":"2020-09-24 12:05:04","updated":"2020-10-08 09:32:46"},"thresholds":{"availability":80,"reliability":85,"uptime":0.8,"unknown":0.1,"downtime":0.1},"topology_schema":{"group":{"type":"PROJECT","group":{"type":"SERVICEGROUPS"}}},"profiles":[{"id":"92fa5d74-015c-4122-b8b9-7b344f3154d4","name":"test-mon","type":"metric"},{"id":"2744247f-40f8-4dd6-b22c-76a3b38334d8","name":"test-agg2","type":"aggregation"},{"id":"ea62ff1e-c6e1-438b-83c7-9262b3a4f179","name":"demo_ops","type":"operations"},{"id":"3345c3c1-322a-47f1-982c-1d9df1fc065e","name":"endpoint_example","type":"thresholds"}],"filter_tags":[]} diff --git a/flink_jobs/stream_status/src/main/resources/amr/data_DOWNTIMES.json b/flink_jobs/stream_status/src/main/resources/amr/data_DOWNTIMES.json new file mode 100644 index 00000000..b7d181aa --- /dev/null +++ b/flink_jobs/stream_status/src/main/resources/amr/data_DOWNTIMES.json @@ -0,0 +1 @@ +{"date":"2020-11-10","endpoints":[{"hostname":"hostA.foo","service":"WebPortal","start_time":"2020-11-10T00:00:00Z","end_time":"2020-11-10T23:59:00Z"},{"hostname":"hostB.foo","service":"WebPortal","start_time":"2020-11-10T00:00:00Z","end_time":"2020-11-10T23:59:00Z"},{"hostname":"hostB.foo","service":"WebPortald","start_time":"2020-11-10T00:00:00Z","end_time":"2020-11-10T23:59:00Z"}]} \ No newline at end of file diff --git a/flink_jobs/stream_status/src/main/resources/amr/data_METRIC.json b/flink_jobs/stream_status/src/main/resources/amr/data_METRIC.json new file mode 100644 index 00000000..b4681fcb --- /dev/null +++ b/flink_jobs/stream_status/src/main/resources/amr/data_METRIC.json @@ -0,0 +1 @@ +{"id":"392fa5d74-015c-4122-b8b9-7b344f3154d4","date":"2020-09-24","name":"test-mon","description":"Generic monitoring profile","services":[{"service":"WebPortal","metrics":["org.nagios.WebCheck"]}]} diff --git a/flink_jobs/stream_status/src/main/resources/amr/data_OPS.json b/flink_jobs/stream_status/src/main/resources/amr/data_OPS.json new file mode 100644 index 00000000..ff505f0a --- /dev/null +++ b/flink_jobs/stream_status/src/main/resources/amr/data_OPS.json @@ -0,0 +1 @@ +{"id":"ea62ff1e-c6e1-438b-83c7-9262b3a4f179","date":"2020-06-24","name":"demo_ops","available_states":["OK","WARNING","UNKNOWN","MISSING","CRITICAL","DOWNTIME"],"defaults":{"down":"DOWNTIME","missing":"MISSING","unknown":"UNKNOWN"},"operations":[{"name":"AND","truth_table":[{"a":"OK","b":"OK","x":"OK"},{"a":"OK","b":"WARNING","x":"WARNING"},{"a":"OK","b":"UNKNOWN","x":"UNKNOWN"},{"a":"OK","b":"MISSING","x":"MISSING"},{"a":"OK","b":"CRITICAL","x":"CRITICAL"},{"a":"OK","b":"DOWNTIME","x":"DOWNTIME"},{"a":"WARNING","b":"WARNING","x":"WARNING"},{"a":"WARNING","b":"UNKNOWN","x":"UNKNOWN"},{"a":"WARNING","b":"MISSING","x":"MISSING"},{"a":"WARNING","b":"CRITICAL","x":"CRITICAL"},{"a":"WARNING","b":"DOWNTIME","x":"DOWNTIME"},{"a":"UNKNOWN","b":"UNKNOWN","x":"UNKNOWN"},{"a":"UNKNOWN","b":"MISSING","x":"MISSING"},{"a":"UNKNOWN","b":"CRITICAL","x":"CRITICAL"},{"a":"UNKNOWN","b":"DOWNTIME","x":"DOWNTIME"},{"a":"MISSING","b":"MISSING","x":"MISSING"},{"a":"MISSING","b":"CRITICAL","x":"CRITICAL"},{"a":"MISSING","b":"DOWNTIME","x":"DOWNTIME"},{"a":"CRITICAL","b":"CRITICAL","x":"CRITICAL"},{"a":"CRITICAL","b":"DOWNTIME","x":"CRITICAL"},{"a":"DOWNTIME","b":"DOWNTIME","x":"DOWNTIME"}]},{"name":"OR","truth_table":[{"a":"OK","b":"OK","x":"OK"},{"a":"OK","b":"WARNING","x":"OK"},{"a":"OK","b":"UNKNOWN","x":"OK"},{"a":"OK","b":"MISSING","x":"OK"},{"a":"OK","b":"CRITICAL","x":"OK"},{"a":"OK","b":"DOWNTIME","x":"OK"},{"a":"WARNING","b":"WARNING","x":"WARNING"},{"a":"WARNING","b":"UNKNOWN","x":"WARNING"},{"a":"WARNING","b":"MISSING","x":"WARNING"},{"a":"WARNING","b":"CRITICAL","x":"WARNING"},{"a":"WARNING","b":"DOWNTIME","x":"WARNING"},{"a":"UNKNOWN","b":"UNKNOWN","x":"UNKNOWN"},{"a":"UNKNOWN","b":"MISSING","x":"UNKNOWN"},{"a":"UNKNOWN","b":"CRITICAL","x":"CRITICAL"},{"a":"UNKNOWN","b":"DOWNTIME","x":"UNKNOWN"},{"a":"MISSING","b":"MISSING","x":"MISSING"},{"a":"MISSING","b":"CRITICAL","x":"CRITICAL"},{"a":"MISSING","b":"DOWNTIME","x":"DOWNTIME"},{"a":"CRITICAL","b":"CRITICAL","x":"CRITICAL"},{"a":"CRITICAL","b":"DOWNTIME","x":"CRITICAL"},{"a":"DOWNTIME","b":"DOWNTIME","x":"DOWNTIME"}]}]} diff --git a/flink_jobs/stream_status/src/main/resources/amr/data_RECOMPUTATIONS.json b/flink_jobs/stream_status/src/main/resources/amr/data_RECOMPUTATIONS.json new file mode 100644 index 00000000..052b03aa --- /dev/null +++ b/flink_jobs/stream_status/src/main/resources/amr/data_RECOMPUTATIONS.json @@ -0,0 +1 @@ +[{"id":"56db4f1a-f331-46ca-b0fd-4555b4aa1cfc","requester_name":"john foo","requester_email":"foo1@email.com","reason":"ggus-reason01","start_time":"2018-01-21T23:01:00Z","end_time":"2018-01-23T12:01:00Z","report":"Critical","exclude":["SITE-1","SITE-2"],"status":"done","timestamp":"2018-03-17 17:03:55","history":[{"status":"pending","timestamp":"2018-01-30T11:41:26Z"}]},{"id":"66db4f55-f331-46ca-b0fd-4555b4aa1cfc","requester_name":"john foo","requester_email":"foo1@email.com","reason":"ggus-reason01","start_time":"2018-05-21T23:01:00Z","end_time":"2018-05-23T12:01:00Z","report":"Critical","exclude":["SITE-3","SITE-4"],"status":"done","timestamp":"2018-06-17 17:03:55","history":[{"status":"pending","timestamp":"2018-06-30T11:41:26Z"}]},{"id":"76db4444-f331-46ca-b0fd-4555b4aa1cfc","requester_name":"john foo","requester_email":"foo1@email.com","reason":"ggus-reason01","start_time":"2018-09-10T23:01:00Z","end_time":"2018-09-15T12:01:00Z","report":"Critical","exclude":["SITE-6","SITE-7","SITE-8"],"status":"done","timestamp":"2018-03-17 17:03:55","history":[{"status":"pending","timestamp":"2018-01-30T11:41:26Z"}]}] diff --git a/flink_jobs/stream_status/src/main/resources/amr/data_THRESHOLDS.json b/flink_jobs/stream_status/src/main/resources/amr/data_THRESHOLDS.json new file mode 100644 index 00000000..453e5bdf --- /dev/null +++ b/flink_jobs/stream_status/src/main/resources/amr/data_THRESHOLDS.json @@ -0,0 +1 @@ +{"id":"3345c3c1-322a-47f1-982c-1d9df1fc065e","date":"2015-01-01","name":"endpoint_example","rules":[{"host":"host1.foo.bar","metric":"service.freshness","thresholds":"freshness=1s;;0:;"}]} diff --git a/flink_jobs/stream_status/src/main/resources/amr/data_TOPOENDPOINTS.json b/flink_jobs/stream_status/src/main/resources/amr/data_TOPOENDPOINTS.json new file mode 100644 index 00000000..10dd42cf --- /dev/null +++ b/flink_jobs/stream_status/src/main/resources/amr/data_TOPOENDPOINTS.json @@ -0,0 +1 @@ +[{"date":"2020-11-10","group":"groupA","type":"SERVICEGROUPS","service":"webPortal","hostname":"host1.foo.bar","tags":{"monitored":"1","production":"1","scope":"FOO"}},{"date":"2020-11-10","group":"groupB","type":"SERVICEGROUPS","service":"webPortal","hostname":"host3.foo.bar","tags":{"monitored":"1","production":"1","scope":"FOO"}},{"date":"2020-11-10","group":"groupA","type":"SERVICEGROUPS","service":"webPortal","hostname":"host2.foo.bar","tags":{"monitored":"1","production":"1","scope":"FOO"}}] diff --git a/flink_jobs/stream_status/src/main/resources/amr/data_TOPOGROUPS.json b/flink_jobs/stream_status/src/main/resources/amr/data_TOPOGROUPS.json new file mode 100644 index 00000000..1c8e4316 --- /dev/null +++ b/flink_jobs/stream_status/src/main/resources/amr/data_TOPOGROUPS.json @@ -0,0 +1 @@ +[{"date":"2020-11-11","group":"ORG-A","type":"PROJECT","subgroup":"GROUP-101","tags":{"monitored":"0","scope":"Local"}},{"date":"2020-11-11","group":"ORG-A","type":"PROJECT","subgroup":"GROUP-202","tags":{"monitored":"1","scope":"Local"}}] diff --git a/flink_jobs/stream_status/src/main/resources/amr/data_WEIGHTS.json b/flink_jobs/stream_status/src/main/resources/amr/data_WEIGHTS.json new file mode 100644 index 00000000..399c31c1 --- /dev/null +++ b/flink_jobs/stream_status/src/main/resources/amr/data_WEIGHTS.json @@ -0,0 +1 @@ +{"id":"3b9602ed-49ec-42f3-8df7-7c35331ebf69","date":"2020-09-02","name":"demo","weight_type":"computationpower","group_type":"SERVICEGROUPS","groups":[{"name":"GROUP-A","value":366},{"name":"GROUP-B","value":4000},{"name":"GROUP-C","value":19838},{"name":"GROUP-D","value":19838}]} diff --git a/flink_jobs/stream_status/src/main/resources/amr/downtimes.json b/flink_jobs/stream_status/src/main/resources/amr/downtimes.json new file mode 100644 index 00000000..7bf3adee --- /dev/null +++ b/flink_jobs/stream_status/src/main/resources/amr/downtimes.json @@ -0,0 +1,31 @@ +{ + "status": { + "message": "Success", + "code": "200" + }, + "data": [ + { + "date": "2020-11-10", + "endpoints": [ + { + "hostname": "hostA.foo", + "service": "WebPortal", + "start_time": "2020-11-10T00:00:00Z", + "end_time": "2020-11-10T23:59:00Z" + }, + { + "hostname": "hostB.foo", + "service": "WebPortal", + "start_time": "2020-11-10T00:00:00Z", + "end_time": "2020-11-10T23:59:00Z" + }, + { + "hostname": "hostB.foo", + "service": "WebPortald", + "start_time": "2020-11-10T00:00:00Z", + "end_time": "2020-11-10T23:59:00Z" + } + ] + } + ] +} diff --git a/flink_jobs/stream_status/src/main/resources/amr/metric_profile.json b/flink_jobs/stream_status/src/main/resources/amr/metric_profile.json new file mode 100644 index 00000000..7ea5a470 --- /dev/null +++ b/flink_jobs/stream_status/src/main/resources/amr/metric_profile.json @@ -0,0 +1,22 @@ +{ + "status": { + "message": "Success", + "code": "200" + }, + "data": [ + { + "id": "392fa5d74-015c-4122-b8b9-7b344f3154d4", + "date": "2020-09-24", + "name": "test-mon", + "description": "Generic monitoring profile", + "services": [ + { + "service": "WebPortal", + "metrics": [ + "org.nagios.WebCheck" + ] + } + ] + } + ] +} diff --git a/flink_jobs/stream_status/src/main/resources/amr/ops_profile.json b/flink_jobs/stream_status/src/main/resources/amr/ops_profile.json new file mode 100644 index 00000000..9b00f14b --- /dev/null +++ b/flink_jobs/stream_status/src/main/resources/amr/ops_profile.json @@ -0,0 +1,248 @@ +{ + "status": { + "message": "Success", + "code": "200" + }, + "data": [ + { + "id": "ea62ff1e-c6e1-438b-83c7-9262b3a4f179", + "date": "2020-06-24", + "name": "demo_ops", + "available_states": [ + "OK", + "WARNING", + "UNKNOWN", + "MISSING", + "CRITICAL", + "DOWNTIME" + ], + "defaults": { + "down": "DOWNTIME", + "missing": "MISSING", + "unknown": "UNKNOWN" + }, + "operations": [ + { + "name": "AND", + "truth_table": [ + { + "a": "OK", + "b": "OK", + "x": "OK" + }, + { + "a": "OK", + "b": "WARNING", + "x": "WARNING" + }, + { + "a": "OK", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "OK", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "OK", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "OK", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "WARNING", + "b": "WARNING", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "WARNING", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "WARNING", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "WARNING", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "UNKNOWN", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "UNKNOWN", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "UNKNOWN", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "UNKNOWN", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "MISSING", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "MISSING", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "MISSING", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "CRITICAL", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "CRITICAL", + "b": "DOWNTIME", + "x": "CRITICAL" + }, + { + "a": "DOWNTIME", + "b": "DOWNTIME", + "x": "DOWNTIME" + } + ] + }, + { + "name": "OR", + "truth_table": [ + { + "a": "OK", + "b": "OK", + "x": "OK" + }, + { + "a": "OK", + "b": "WARNING", + "x": "OK" + }, + { + "a": "OK", + "b": "UNKNOWN", + "x": "OK" + }, + { + "a": "OK", + "b": "MISSING", + "x": "OK" + }, + { + "a": "OK", + "b": "CRITICAL", + "x": "OK" + }, + { + "a": "OK", + "b": "DOWNTIME", + "x": "OK" + }, + { + "a": "WARNING", + "b": "WARNING", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "UNKNOWN", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "MISSING", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "CRITICAL", + "x": "WARNING" + }, + { + "a": "WARNING", + "b": "DOWNTIME", + "x": "WARNING" + }, + { + "a": "UNKNOWN", + "b": "UNKNOWN", + "x": "UNKNOWN" + }, + { + "a": "UNKNOWN", + "b": "MISSING", + "x": "UNKNOWN" + }, + { + "a": "UNKNOWN", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "UNKNOWN", + "b": "DOWNTIME", + "x": "UNKNOWN" + }, + { + "a": "MISSING", + "b": "MISSING", + "x": "MISSING" + }, + { + "a": "MISSING", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "MISSING", + "b": "DOWNTIME", + "x": "DOWNTIME" + }, + { + "a": "CRITICAL", + "b": "CRITICAL", + "x": "CRITICAL" + }, + { + "a": "CRITICAL", + "b": "DOWNTIME", + "x": "CRITICAL" + }, + { + "a": "DOWNTIME", + "b": "DOWNTIME", + "x": "DOWNTIME" + } + ] + } + ] + } + ] +} diff --git a/flink_jobs/stream_status/src/main/resources/amr/recomputations.json b/flink_jobs/stream_status/src/main/resources/amr/recomputations.json new file mode 100644 index 00000000..b597ad09 --- /dev/null +++ b/flink_jobs/stream_status/src/main/resources/amr/recomputations.json @@ -0,0 +1,72 @@ +{ + "status": { + "message": "Success", + "code": "200" + }, + "data": [ + { + "id": "56db4f1a-f331-46ca-b0fd-4555b4aa1cfc", + "requester_name": "john foo", + "requester_email": "foo1@email.com", + "reason": "ggus-reason01", + "start_time": "2018-01-21T23:01:00Z", + "end_time": "2018-01-23T12:01:00Z", + "report": "Critical", + "exclude": [ + "SITE-1", + "SITE-2" + ], + "status": "done", + "timestamp": "2018-03-17 17:03:55", + "history": [ + { + "status": "pending", + "timestamp": "2018-01-30T11:41:26Z" + } + ] + }, + { + "id": "66db4f55-f331-46ca-b0fd-4555b4aa1cfc", + "requester_name": "john foo", + "requester_email": "foo1@email.com", + "reason": "ggus-reason01", + "start_time": "2018-05-21T23:01:00Z", + "end_time": "2018-05-23T12:01:00Z", + "report": "Critical", + "exclude": [ + "SITE-3", + "SITE-4" + ], + "status": "done", + "timestamp": "2018-06-17 17:03:55", + "history": [ + { + "status": "pending", + "timestamp": "2018-06-30T11:41:26Z" + } + ] + }, + { + "id": "76db4444-f331-46ca-b0fd-4555b4aa1cfc", + "requester_name": "john foo", + "requester_email": "foo1@email.com", + "reason": "ggus-reason01", + "start_time": "2018-09-10T23:01:00Z", + "end_time": "2018-09-15T12:01:00Z", + "report": "Critical", + "exclude": [ + "SITE-6", + "SITE-7", + "SITE-8" + ], + "status": "done", + "timestamp": "2018-03-17 17:03:55", + "history": [ + { + "status": "pending", + "timestamp": "2018-01-30T11:41:26Z" + } + ] + } + ] +} diff --git a/flink_jobs/stream_status/src/main/resources/amr/report.json b/flink_jobs/stream_status/src/main/resources/amr/report.json new file mode 100644 index 00000000..fa5a5f65 --- /dev/null +++ b/flink_jobs/stream_status/src/main/resources/amr/report.json @@ -0,0 +1,57 @@ +{ + "status": { + "message": "Success", + "code": "200" + }, + "data": [ + { + "id": "f29eeb59-ab38-4aa0-b372-5d3c0709dfb2", + "tenant": "demo", + "disabled": false, + "info": { + "name": "Critical", + "description": "test report", + "created": "2020-09-24 12:05:04", + "updated": "2020-10-08 09:32:46" + }, + "thresholds": { + "availability": 80, + "reliability": 85, + "uptime": 0.8, + "unknown": 0.1, + "downtime": 0.1 + }, + "topology_schema": { + "group": { + "type": "PROJECT", + "group": { + "type": "SERVICEGROUPS" + } + } + }, + "profiles": [ + { + "id": "92fa5d74-015c-4122-b8b9-7b344f3154d4", + "name": "test-mon", + "type": "metric" + }, + { + "id": "2744247f-40f8-4dd6-b22c-76a3b38334d8", + "name": "test-agg2", + "type": "aggregation" + }, + { + "id": "ea62ff1e-c6e1-438b-83c7-9262b3a4f179", + "name": "demo_ops", + "type": "operations" + }, + { + "id": "3345c3c1-322a-47f1-982c-1d9df1fc065e", + "name": "endpoint_example", + "type": "thresholds" + } + ], + "filter_tags": [] + } + ] +} diff --git a/flink_jobs/stream_status/src/main/resources/amr/thresholds.json b/flink_jobs/stream_status/src/main/resources/amr/thresholds.json new file mode 100644 index 00000000..1c1ac3fb --- /dev/null +++ b/flink_jobs/stream_status/src/main/resources/amr/thresholds.json @@ -0,0 +1,20 @@ +{ + "status": { + "message": "Success", + "code": "200" + }, + "data": [ + { + "id": "3345c3c1-322a-47f1-982c-1d9df1fc065e", + "date": "2015-01-01", + "name": "endpoint_example", + "rules": [ + { + "host": "host1.foo.bar", + "metric": "service.freshness", + "thresholds": "freshness=1s;;0:;" + } + ] + } + ] +} diff --git a/flink_jobs/stream_status/src/main/resources/amr/topoendpoints.json b/flink_jobs/stream_status/src/main/resources/amr/topoendpoints.json new file mode 100644 index 00000000..2b1cfed5 --- /dev/null +++ b/flink_jobs/stream_status/src/main/resources/amr/topoendpoints.json @@ -0,0 +1,44 @@ +{ + "status": { + "message": "Success", + "code": "200" + }, + "data": [ + { + "date": "2020-11-10", + "group": "groupA", + "type": "SERVICEGROUPS", + "service": "webPortal", + "hostname": "host1.foo.bar", + "tags": { + "monitored": "1", + "production": "1", + "scope": "FOO" + } + }, + { + "date": "2020-11-10", + "group": "groupB", + "type": "SERVICEGROUPS", + "service": "webPortal", + "hostname": "host3.foo.bar", + "tags": { + "monitored": "1", + "production": "1", + "scope": "FOO" + } + }, + { + "date": "2020-11-10", + "group": "groupA", + "type": "SERVICEGROUPS", + "service": "webPortal", + "hostname": "host2.foo.bar", + "tags": { + "monitored": "1", + "production": "1", + "scope": "FOO" + } + } + ] +} diff --git a/flink_jobs/stream_status/src/main/resources/amr/topogroups.json b/flink_jobs/stream_status/src/main/resources/amr/topogroups.json new file mode 100644 index 00000000..6286cc55 --- /dev/null +++ b/flink_jobs/stream_status/src/main/resources/amr/topogroups.json @@ -0,0 +1,28 @@ +{ + "status": { + "message": "Success", + "code": "200" + }, + "data": [ + { + "date": "2020-11-11", + "group": "ORG-A", + "type": "PROJECT", + "subgroup": "GROUP-101", + "tags": { + "monitored": "0", + "scope": "Local" + } + }, + { + "date": "2020-11-11", + "group": "ORG-A", + "type": "PROJECT", + "subgroup": "GROUP-202", + "tags": { + "monitored": "1", + "scope": "Local" + } + } + ] +} diff --git a/flink_jobs/stream_status/src/main/resources/amr/weights.json b/flink_jobs/stream_status/src/main/resources/amr/weights.json new file mode 100644 index 00000000..fc1dea3f --- /dev/null +++ b/flink_jobs/stream_status/src/main/resources/amr/weights.json @@ -0,0 +1,33 @@ +{ + "status": { + "message": "Success", + "code": "200" + }, + "data": [ + { + "id": "3b9602ed-49ec-42f3-8df7-7c35331ebf69", + "date": "2020-09-02", + "name": "demo", + "weight_type": "computationpower", + "group_type": "SERVICEGROUPS", + "groups": [ + { + "name": "GROUP-A", + "value": 366 + }, + { + "name": "GROUP-B", + "value": 4000 + }, + { + "name": "GROUP-C", + "value": 19838 + }, + { + "name": "GROUP-D", + "value": 19838 + } + ] + } + ] +} diff --git a/flink_jobs/stream_status/src/main/resources/avro/gp_day01.avro b/flink_jobs/stream_status/src/main/resources/avro/gp_day01.avro new file mode 100644 index 00000000..acd7170c Binary files /dev/null and b/flink_jobs/stream_status/src/main/resources/avro/gp_day01.avro differ diff --git a/flink_jobs/stream_status/src/main/resources/avro/gp_day02.avro b/flink_jobs/stream_status/src/main/resources/avro/gp_day02.avro new file mode 100644 index 00000000..5c9631d6 Binary files /dev/null and b/flink_jobs/stream_status/src/main/resources/avro/gp_day02.avro differ diff --git a/flink_jobs/stream_status/src/main/resources/avro/gp_day03.avro b/flink_jobs/stream_status/src/main/resources/avro/gp_day03.avro new file mode 100644 index 00000000..c4b9f742 Binary files /dev/null and b/flink_jobs/stream_status/src/main/resources/avro/gp_day03.avro differ diff --git a/flink_jobs/stream_status/src/test/java/argo/amr/ApiResourceManagerTest.java b/flink_jobs/stream_status/src/test/java/argo/amr/ApiResourceManagerTest.java new file mode 100644 index 00000000..4a384ada --- /dev/null +++ b/flink_jobs/stream_status/src/test/java/argo/amr/ApiResourceManagerTest.java @@ -0,0 +1,288 @@ +package argo.amr; + +import static org.junit.Assert.*; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.net.URISyntaxException; +import java.nio.charset.StandardCharsets; +import java.security.KeyManagementException; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.text.ParseException; +import java.util.List; +import java.util.stream.Collectors; + +import org.apache.http.client.ClientProtocolException; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; + +import com.github.tomakehurst.wiremock.WireMockServer; +import com.github.tomakehurst.wiremock.junit.WireMockRule; + +import argo.avro.Downtime; +import argo.avro.GroupEndpoint; +import argo.avro.GroupGroup; +import argo.avro.MetricProfile; +import argo.avro.Weight; + +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo; +import static com.github.tomakehurst.wiremock.client.WireMock.configureFor; +import static com.github.tomakehurst.wiremock.client.WireMock.get; +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig; + + +public class ApiResourceManagerTest { + + public static String loadResJSON(String resURL) { + + InputStream jsonInputStream + = ApiResourceManagerTest.class.getResourceAsStream(resURL); + String content = new BufferedReader( + new InputStreamReader(jsonInputStream, StandardCharsets.UTF_8)) + .lines() + .collect(Collectors.joining("\n")); + return content; + + } + + @Rule + public WireMockRule wireMockRule = new WireMockRule(wireMockConfig().httpsPort(8443)); + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/report.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/report.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/metric_profile.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/agg_profile.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/ops_profile.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/thresholds.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/topoendpoints.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/topogroups.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/downtimes.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/weights.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/recomputations.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_CONFIG.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_METRIC.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_AGGREGATION.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_OPS.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_THRESHOLDS.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_TOPOENDPOINTS.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_TOPOGROUPS.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_DOWNTIMES.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_WEIGHTS.json")); + assertNotNull("Test file missing", ApiResourceManagerTest.class.getResource("/amr/data_RECOMPUTATIONS.json")); + } + + @Test + public void test() throws URISyntaxException, IOException, ParseException { + // load mock api response content + String jsonReport = loadResJSON("/amr/report.json"); + String jsonMetric = loadResJSON("/amr/metric_profile.json"); + String jsonAgg = loadResJSON("/amr/agg_profile.json"); + String jsonOps = loadResJSON("/amr/ops_profile.json"); + String jsonThresholds = loadResJSON("/amr/thresholds.json"); + String jsonTopoEnd = loadResJSON("/amr/topoendpoints.json"); + String jsonTopoGroups = loadResJSON("/amr/topogroups.json"); + String jsonDowntimes = loadResJSON("/amr/downtimes.json"); + String jsonWeights = loadResJSON("/amr/weights.json"); + String jsonRecomp = loadResJSON("/amr/recomputations.json"); + + // get json data items + + String dataConfig = loadResJSON("/amr/data_CONFIG.json"); + String dataMetric = loadResJSON("/amr/data_METRIC.json"); + String dataAggr = loadResJSON("/amr/data_AGGREGATION.json"); + String dataOps = loadResJSON("/amr/data_OPS.json"); + String dataThresh = loadResJSON("/amr/data_THRESHOLDS.json"); + String dataTopoEnd = loadResJSON("/amr/data_TOPOENDPOINTS.json"); + String dataTopoGroup = loadResJSON("/amr/data_TOPOGROUPS.json"); + String dataDown = loadResJSON("/amr/data_DOWNTIMES.json"); + String dataWeights = loadResJSON("/amr/data_WEIGHTS.json"); + String dataRecomp = loadResJSON("/amr/data_RECOMPUTATIONS.json"); + + + + + stubFor(get(urlEqualTo("/api/v2/reports/f29eeb59-ab38-4aa0-b372-5d3c0709dfb2")) + .willReturn(aResponse().withBody(jsonReport))); + stubFor(get(urlEqualTo("/api/v2/metric_profiles/92fa5d74-015c-4122-b8b9-7b344f3154d4?date=2020-11-01")) + .willReturn(aResponse().withBody(jsonMetric))); + stubFor(get(urlEqualTo("/api/v2/aggregation_profiles/2744247f-40f8-4dd6-b22c-76a3b38334d8?date=2020-11-01")) + .willReturn(aResponse().withBody(jsonAgg))); + stubFor(get(urlEqualTo("/api/v2/operations_profiles/ea62ff1e-c6e1-438b-83c7-9262b3a4f179?date=2020-11-01")) + .willReturn(aResponse().withBody(jsonOps))); + stubFor(get(urlEqualTo("/api/v2/thresholds_profiles/3345c3c1-322a-47f1-982c-1d9df1fc065e?date=2020-11-01")) + .willReturn(aResponse().withBody(jsonThresholds))); + stubFor(get(urlEqualTo("/api/v2/topology/endpoints/by_report/Critical?date=2020-11-01")) + .willReturn(aResponse().withBody(jsonTopoEnd))); + stubFor(get(urlEqualTo("/api/v2/topology/groups/by_report/Critical?date=2020-11-01")) + .willReturn(aResponse().withBody(jsonTopoGroups))); + stubFor(get(urlEqualTo("/api/v2/downtimes?date=2020-11-01")) + .willReturn(aResponse().withBody(jsonDowntimes))); + stubFor(get(urlEqualTo("/api/v2/weights/3b9602ed-49ec-42f3-8df7-7c35331ebf69?date=2020-11-01")) + .willReturn(aResponse().withBody(jsonWeights))); + stubFor(get(urlEqualTo("/api/v2/recomputations?date=2020-11-01")) + .willReturn(aResponse().withBody(jsonRecomp))); + + ApiResourceManager amr = new ApiResourceManager("localhost:8443", "s3cr3t"); + amr.setDate("2020-11-01"); + amr.setReportID("f29eeb59-ab38-4aa0-b372-5d3c0709dfb2"); + amr.setToken("s3cr3t"); + amr.setWeightsID("3b9602ed-49ec-42f3-8df7-7c35331ebf69"); + amr.setVerify(false); + + // Get the report configuration first and parse it + amr.getRemoteConfig(); + amr.parseReport(); + + assertEquals("report name retrieved","Critical",amr.getReportName()); + assertEquals("metric id retrieved","92fa5d74-015c-4122-b8b9-7b344f3154d4",amr.getMetricID()); + assertEquals("ops id retrieved","ea62ff1e-c6e1-438b-83c7-9262b3a4f179",amr.getOpsID()); + assertEquals("aggregations id retrieved","2744247f-40f8-4dd6-b22c-76a3b38334d8",amr.getAggregationID()); + assertEquals("thresholds id retrieved","3345c3c1-322a-47f1-982c-1d9df1fc065e",amr.getThresholdsID()); + + assertEquals("retrieved config data",dataConfig,amr.getResourceJSON(ApiResource.CONFIG)); + + + // get the profiles metric, aggregation, ops and thresholds + amr.getRemoteMetric(); + amr.getRemoteAggregation(); + amr.getRemoteOps(); + amr.getRemoteThresholds(); + + assertEquals("retrieved metric profile data",dataMetric,amr.getResourceJSON(ApiResource.METRIC)); + assertEquals("retrieved aggregation profile data",dataAggr,amr.getResourceJSON(ApiResource.AGGREGATION)); + assertEquals("retrieved ops profile data",dataOps,amr.getResourceJSON(ApiResource.OPS)); + assertEquals("retrieved thresholds profile data",dataThresh,amr.getResourceJSON(ApiResource.THRESHOLDS)); + + // get remote topology + + amr.getRemoteTopoEndpoints(); + amr.getRemoteTopoGroups(); + + assertEquals("retrieved topology endpoints",dataTopoEnd,amr.getResourceJSON(ApiResource.TOPOENDPOINTS)); + assertEquals("retrieved topology groups",dataTopoGroup,amr.getResourceJSON(ApiResource.TOPOGROUPS)); + + + // get remote downtimes + amr.getRemoteDowntimes(); + assertEquals("retrieved downtimes",dataDown,amr.getResourceJSON(ApiResource.DOWNTIMES)); + + // get weights + amr.getRemoteWeights(); + assertEquals("retrieved downtimes",dataWeights,amr.getResourceJSON(ApiResource.WEIGHTS)); + + // get recomputations + amr.getRemoteRecomputations(); + assertEquals("retrieved recomputations",dataRecomp,amr.getResourceJSON(ApiResource.RECOMPUTATIONS)); + + // initate a second amr and check getRemoteAll routine + + + ApiResourceManager amr2 = new ApiResourceManager("localhost:8443", "s3cr3t"); + amr2.setDate("2020-11-01"); + amr2.setReportID("f29eeb59-ab38-4aa0-b372-5d3c0709dfb2"); + amr2.setToken("s3cr3t"); + amr2.setWeightsID("3b9602ed-49ec-42f3-8df7-7c35331ebf69"); + amr2.setVerify(false); + + amr2.getRemoteAll(); + + // test amr2 downtime list + Downtime[] dtl = amr2.getListDowntimes(); + assertEquals("downtime list size", 3, dtl.length); + assertEquals("downtime data", "WebPortal", dtl[0].getService()); + assertEquals("downtime data", "hostA.foo", dtl[0].getHostname()); + assertEquals("downtime data", "2020-11-10T00:00:00Z", dtl[0].getStartTime()); + assertEquals("downtime data", "2020-11-10T23:59:00Z", dtl[0].getEndTime()); + assertEquals("downtime data", "WebPortal", dtl[1].getService()); + assertEquals("downtime data", "hostB.foo", dtl[1].getHostname()); + assertEquals("downtime data", "2020-11-10T00:00:00Z", dtl[1].getStartTime()); + assertEquals("downtime data", "2020-11-10T23:59:00Z", dtl[1].getEndTime()); + assertEquals("downtime data", "WebPortald", dtl[2].getService()); + assertEquals("downtime data", "hostB.foo", dtl[2].getHostname()); + assertEquals("downtime data", "2020-11-10T00:00:00Z", dtl[2].getStartTime()); + assertEquals("downtime data", "2020-11-10T23:59:00Z", dtl[2].getEndTime()); + + // test amr2 group endpoint list + GroupEndpoint[] gel = amr2.getListGroupEndpoints(); + assertEquals("group endpoint list size", 3, gel.length); + assertEquals("group endpoint data", "SERVICEGROUPS", gel[0].getType()); + assertEquals("group endpoint data", "groupA", gel[0].getGroup()); + assertEquals("group endpoint data", "webPortal", gel[0].getService()); + assertEquals("group endpoint data", "host1.foo.bar", gel[0].getHostname()); + assertEquals("group endpoint data", "1", gel[0].getTags().get("monitored")); + assertEquals("group endpoint data", "1", gel[0].getTags().get("production")); + assertEquals("group endpoint data", "FOO", gel[0].getTags().get("scope")); + + assertEquals("group endpoint data", "SERVICEGROUPS", gel[1].getType()); + assertEquals("group endpoint data", "groupB", gel[1].getGroup()); + assertEquals("group endpoint data", "webPortal", gel[1].getService()); + assertEquals("group endpoint data", "host3.foo.bar", gel[1].getHostname()); + assertEquals("group endpoint data", "1", gel[1].getTags().get("monitored")); + assertEquals("group endpoint data", "1", gel[1].getTags().get("production")); + assertEquals("group endpoint data", "FOO", gel[1].getTags().get("scope")); + + assertEquals("group endpoint data", "SERVICEGROUPS", gel[2].getType()); + assertEquals("group endpoint data", "groupA", gel[2].getGroup()); + assertEquals("group endpoint data", "webPortal", gel[2].getService()); + assertEquals("group endpoint data", "host2.foo.bar", gel[2].getHostname()); + assertEquals("group endpoint data", "1", gel[2].getTags().get("monitored")); + assertEquals("group endpoint data", "1", gel[2].getTags().get("production")); + assertEquals("group endpoint data", "FOO", gel[2].getTags().get("scope")); + + // test amr2 group groups list + GroupGroup[] ggl = amr2.getListGroupGroups(); + assertEquals("group endpoint list size", 2, ggl.length); + assertEquals("group endpoint data", "PROJECT", ggl[0].getType()); + assertEquals("group endpoint data", "ORG-A", ggl[0].getGroup()); + assertEquals("group endpoint data", "GROUP-101", ggl[0].getSubgroup()); + assertEquals("group endpoint data", "0", ggl[0].getTags().get("monitored")); + assertEquals("group endpoint data", "Local", ggl[0].getTags().get("scope")); + + assertEquals("group endpoint data", "PROJECT", ggl[1].getType()); + assertEquals("group endpoint data", "ORG-A", ggl[1].getGroup()); + assertEquals("group endpoint data", "GROUP-202", ggl[1].getSubgroup()); + assertEquals("group endpoint data", "1", ggl[1].getTags().get("monitored")); + assertEquals("group endpoint data", "Local", ggl[1].getTags().get("scope")); + + // test amr2 weights list + Weight[] wl = amr2.getListWeights(); + assertEquals("group endpoint list size", 4, wl.length); + assertEquals("group endpoint data", "computationpower", wl[0].getType()); + assertEquals("group endpoint data", "GROUP-A", wl[0].getSite()); + assertEquals("group endpoint data", "366", wl[0].getWeight()); + + assertEquals("group endpoint data", "computationpower", wl[1].getType()); + assertEquals("group endpoint data", "GROUP-B", wl[1].getSite()); + assertEquals("group endpoint data", "4000", wl[1].getWeight()); + + assertEquals("group endpoint data", "computationpower", wl[2].getType()); + assertEquals("group endpoint data", "GROUP-C", wl[2].getSite()); + assertEquals("group endpoint data", "19838", wl[2].getWeight()); + + assertEquals("group endpoint data", "computationpower", wl[3].getType()); + assertEquals("group endpoint data", "GROUP-D", wl[3].getSite()); + assertEquals("group endpoint data", "19838", wl[3].getWeight()); + + // test amr2 metric profile list + MetricProfile[] mpl = amr2.getListMetrics(); + assertEquals("group endpoint list size", 1, mpl.length); + assertEquals("group endpoint data", "test-mon", mpl[0].getProfile()); + assertEquals("group endpoint data", "WebPortal", mpl[0].getService()); + assertEquals("group endpoint data", "org.nagios.WebCheck", mpl[0].getMetric()); + assertEquals("group endpoint data", 0, mpl[0].getTags().size()); + + + + + } + +} diff --git a/flink_jobs/stream_status/src/test/java/status/StatusManagerDecomissionTest.java b/flink_jobs/stream_status/src/test/java/status/StatusManagerDecomissionTest.java new file mode 100644 index 00000000..53d9bab1 --- /dev/null +++ b/flink_jobs/stream_status/src/test/java/status/StatusManagerDecomissionTest.java @@ -0,0 +1,140 @@ +package status; + +import static org.junit.Assert.*; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; +import java.text.ParseException; +import java.util.ArrayList; +import java.util.Date; + + + +import org.junit.BeforeClass; +import org.junit.Test; + +import com.google.gson.Gson; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParser; + +import status.StatusManager.StatusNode; +import sync.EndpointGroupManagerV2; +import sync.EndpointGroupManagerV2Test; + +public class StatusManagerDecomissionTest { + + + public JsonObject getJSON (String jsonSTR) { + + + // Gather message from json + JsonParser jsonParser = new JsonParser(); + // parse the json root object + JsonObject jRoot = jsonParser.parse(jsonSTR).getAsJsonObject(); + return jRoot; + } + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Assert that files are present + assertNotNull("Test file missing", StatusManagerDecomissionTest.class.getResource("/ops/ap1.json")); + assertNotNull("Test file missing", + EndpointGroupManagerV2Test.class.getResource("/avro/group_endpoints_v2.avro")); + assertNotNull("Test file missing", + EndpointGroupManagerV2Test.class.getResource("/avro/gp_day01.avro")); + assertNotNull("Test file missing", + EndpointGroupManagerV2Test.class.getResource("/avro/gp_day02.avro")); + assertNotNull("Test file missing", + EndpointGroupManagerV2Test.class.getResource("/avro/gp_day03.avro")); + } + + @Test + public void test() throws URISyntaxException, IOException, ParseException { + + + + // Prepare Resource File + URL resAPSJsonFile = StatusManagerDecomissionTest.class.getResource("/ops/ap1.json"); + File jsonAPSFile = new File(resAPSJsonFile.toURI()); + + URL resOPSJsonFile = StatusManagerDecomissionTest.class.getResource("/ops/EGI-algorithm.json"); + File jsonOPSFile = new File(resOPSJsonFile.toURI()); + + URL resEGPAvroFile = StatusManagerDecomissionTest.class.getResource("/avro/gp_day01.avro"); + File avroEGPFile = new File(resEGPAvroFile.toURI()); + + + + URL resMPSAvroFile = StatusManagerDecomissionTest.class.getResource("/avro/poem_sync_2017_03_02.avro"); + File avroMPSFile = new File(resMPSAvroFile.toURI()); + + URL resDownAvroFile = StatusManagerDecomissionTest.class.getResource("/avro/downtimes_03.avro"); + File avroDownFile = new File(resDownAvroFile.toURI()); + + StatusManager sm = new StatusManager(); + sm.setReport("Critical"); + sm.loadAllFiles("2019-06-01", avroDownFile, avroEGPFile, avroMPSFile, jsonAPSFile, jsonOPSFile); + + // Prepare Resource File + URL resAvroFile = EndpointGroupManagerV2Test.class.getResource("/avro/gp_day01.avro"); + File avroFile = new File(resAvroFile.toURI()); + // Instantiate class + EndpointGroupManagerV2 geDay01 = new EndpointGroupManagerV2(); + // Test loading file for day3 - topology change + geDay01.loadAvro(avroFile); + + Date ts = sm.fromZulu("2017-06-01T00:00:00Z"); + int status = sm.ops.getIntStatus("OK"); + + ArrayList groupList = geDay01.getGroupList(); + for (String group : groupList) { + sm.addNewGroup(group, status, ts); + } + + + + + // Prepare Resource File + resAvroFile = EndpointGroupManagerV2Test.class.getResource("/avro/gp_day03.avro"); + avroFile = new File(resAvroFile.toURI()); + // Instantiate class + EndpointGroupManagerV2 geDay03 = new EndpointGroupManagerV2(); + // Test loading file for day3 - topology change + geDay03.loadAvro(avroFile); + + ArrayList lost = geDay01.compareToBeRemoved(geDay03); + + + // Check that the items to be removed exists in the current status tree + for (String item : lost) { + String[] tokens = item.split(","); + // check only for services "CREAM-CE" "ARC-CE" + String group = tokens[1]; + String service = tokens[2]; + String hostname = tokens[3]; + if (service.equals("ARC-CE") || service.contentEquals("CREAM-CE")){ + assertTrue(sm.hasEndpoint(group, service, hostname)); + } + } + + sm.updateTopology(geDay03); + + // Check that the items to be removed dont exist in tree now + for (String item : lost) { + String[] tokens = item.split(","); + // check only for services "CREAM-CE" "ARC-CE" + String group = tokens[1]; + String service = tokens[2]; + String hostname = tokens[3]; + if (service.equals("ARC-CE") || service.contentEquals("CREAM-CE")){ + assertFalse(sm.hasEndpoint(group, service, hostname)); + } + } + + + } + +} diff --git a/flink_jobs/stream_status/src/test/java/status/StatusManagerTest.java b/flink_jobs/stream_status/src/test/java/status/StatusManagerTest.java index 9ca33b4e..b8c048fb 100644 --- a/flink_jobs/stream_status/src/test/java/status/StatusManagerTest.java +++ b/flink_jobs/stream_status/src/test/java/status/StatusManagerTest.java @@ -64,6 +64,8 @@ public void test() throws URISyntaxException, IOException, ParseException { sm.setReport("Critical"); sm.loadAllFiles("2017-03-03", avroDownFile, avroEGPFile, avroMPSFile, jsonAPSFile, jsonOPSFile); + System.out.println(""); + Date ts1 = sm.fromZulu("2017-03-03T00:00:00Z"); sm.addNewGroup("GR-01-AUTH",sm.ops.getIntStatus("OK"), ts1); @@ -275,7 +277,7 @@ public void test() throws URISyntaxException, IOException, ParseException { ArrayList elist07 = sm.setStatus("UKI-LT2-IC-HEP", "CREAM-CE", "ceprod05.grid.hep.ph.ic.ac.uk", "emi.cream.CREAMCE-JobCancel", "OK", "mon01.argo.eu", "2017-03-03T22:30:00Z","",""); - + assertTrue(elist07.size()==4); j01 = getJSON(elist07.get(0)); @@ -283,7 +285,14 @@ public void test() throws URISyntaxException, IOException, ParseException { j03 = getJSON(elist07.get(2)); j04 = getJSON(elist07.get(3)); + // check list of metric statuses and metric names included in the endpoint + assertTrue(j02.get("metric_statuses").toString().equals("[\"OK\",\"OK\",\"OK\",\"OK\",\"OK\",\"OK\"]")); + assertTrue(j02.get("metric_names").toString().equals("[\"emi.cream.CREAMCE-ServiceInfo\",\"emi.cream.CREAMCE-JobCancel\",\"hr.srce.CREAMCE-CertLifetime\",\"eu.egi.CREAM-IGTF\",\"emi.cream.CREAMCE-JobPurge\",\"emi.cream.CREAMCE-AllowedSubmission\"]")); + // check if endpoint groups have been captured + assertTrue(j04.get("group_endpoints").toString().equals("[\"cetest01.grid.hep.ph.ic.ac.uk\",\"cetest02.grid.hep.ph.ic.ac.uk\",\"bdii.grid.hep.ph.ic.ac.uk\",\"ceprod08.grid.hep.ph.ic.ac.uk\",\"ceprod06.grid.hep.ph.ic.ac.uk\",\"ceprod07.grid.hep.ph.ic.ac.uk\",\"ceprod05.grid.hep.ph.ic.ac.uk\"]")); + assertTrue(j04.get("group_services").toString().equals("[\"ARC-CE\",\"ARC-CE\",\"Site-BDII\",\"CREAM-CE\",\"CREAM-CE\",\"CREAM-CE\",\"CREAM-CE\"]")); + assertTrue(j04.get("group_statuses").toString().equals("[\"CRITICAL\",\"CRITICAL\",\"OK\",\"CRITICAL\",\"CRITICAL\",\"CRITICAL\",\"OK\"]")); assertTrue(j01.get("type").getAsString().equals("metric")); assertTrue(j02.get("type").getAsString().equals("endpoint")); assertTrue(j03.get("type").getAsString().equals("service")); diff --git a/flink_jobs/stream_status/src/test/java/sync/EndpointGroupManagerV2Test.java b/flink_jobs/stream_status/src/test/java/sync/EndpointGroupManagerV2Test.java index 708dc6d9..42ba8778 100644 --- a/flink_jobs/stream_status/src/test/java/sync/EndpointGroupManagerV2Test.java +++ b/flink_jobs/stream_status/src/test/java/sync/EndpointGroupManagerV2Test.java @@ -17,6 +17,7 @@ import argo.avro.GroupEndpoint; + public class EndpointGroupManagerV2Test { @BeforeClass @@ -24,6 +25,12 @@ public static void setUpBeforeClass() throws Exception { // Assert that files are present assertNotNull("Test file missing", EndpointGroupManagerV2Test.class.getResource("/avro/group_endpoints_v2.avro")); + assertNotNull("Test file missing", + EndpointGroupManagerV2Test.class.getResource("/avro/gp_day01.avro")); + assertNotNull("Test file missing", + EndpointGroupManagerV2Test.class.getResource("/avro/gp_day02.avro")); + assertNotNull("Test file missing", + EndpointGroupManagerV2Test.class.getResource("/avro/gp_day03.avro")); } @Test @@ -36,10 +43,35 @@ public void test() throws URISyntaxException, IOException { // Test loading file 2 ge.loadAvro(avroFile); - + // Prepare Resource File + resAvroFile = EndpointGroupManagerV2Test.class.getResource("/avro/gp_day01.avro"); + avroFile = new File(resAvroFile.toURI()); + // Instantiate class + EndpointGroupManagerV2 geDay01 = new EndpointGroupManagerV2(); + // Test loading file 2 + geDay01.loadAvro(avroFile); + // Prepare Resource File + resAvroFile = EndpointGroupManagerV2Test.class.getResource("/avro/gp_day02.avro"); + avroFile = new File(resAvroFile.toURI()); + // Instantiate class + EndpointGroupManagerV2 geDay02 = new EndpointGroupManagerV2(); + // Test loading file 2 + geDay02.loadAvro(avroFile); + // Prepare Resource File + resAvroFile = EndpointGroupManagerV2Test.class.getResource("/avro/gp_day03.avro"); + avroFile = new File(resAvroFile.toURI()); + // Instantiate class + EndpointGroupManagerV2 geDay03 = new EndpointGroupManagerV2(); + // Test loading file 2 + geDay03.loadAvro(avroFile); assertNotNull("File Loaded", ge); + assertNotNull("File Loaded", geDay01); + assertNotNull("File Loaded", geDay02); + assertNotNull("File Loaded", geDay03); + + // Test Check if service endpoint exists in topology assertTrue(ge.checkEndpoint("storage1.grid.upjs.sk", "ARC-CE")); @@ -76,8 +108,9 @@ public void test() throws URISyntaxException, IOException { assertTrue(egpMgr.getList().size()==5); + ArrayList toBeRemoved = geDay01.compareToBeRemoved(geDay03); + assertTrue(toBeRemoved.toString().equals("[SITES,RRC-KI-T1,CREAM-CE,calc1.t1.grid.kiae.ru, SITES,UKI-NORTHGRID-MAN-HEP,CREAM-CE,ce03.tier2.hep.manchester.ac.uk, SITES,INFN-ROMA1,gLExec,atlas-ce-02.roma1.infn.it, SITES,INFN-ROMA1,gLExec,atlas-creamce-02.roma1.infn.it, SITES,UKI-NORTHGRID-LANCS-HEP,com.ceph.object-storage,storage.datacentred.io, SITES,UKI-NORTHGRID-MAN-HEP,gLExec,ce03.tier2.hep.manchester.ac.uk, SITES,INFN-ROMA1,gLExec,atlas-creamce-01.roma1.infn.it, SITES,PSNC,ARC-CE,cream01.egee.man.poznan.pl, SITES,UKI-SCOTGRID-ECDF,gLite-APEL,mon2.glite.ecdf.ed.ac.uk, SITES,UKI-NORTHGRID-MAN-HEP,APEL,ce03.tier2.hep.manchester.ac.uk, SITES,INFN-ROMA1,CREAM-CE,atlas-creamce-01.roma1.infn.it]")); - } }