diff --git a/Dockerfile b/Dockerfile index d765459fa..827347bd2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,25 +10,62 @@ RUN yum install sudo -y && groupadd --gid $USER_GID $USERNAME \ && useradd --uid $USER_UID --gid $USER_GID -d /$USERNAME -m $USERNAME \ && echo $USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME \ && chmod 0440 /etc/sudoers.d/$USERNAME \ -&& mkdir /cd3user/tenancies/ && sudo chown -R $USERNAME:$USERNAME /cd3user/tenancies/ +&& mkdir -p /cd3user/tenancies && sudo chown -R $USERNAME:$USERNAME /cd3user/tenancies/ \ +&& yum install -y vim && echo 'alias vi="vim"' >> /etc/bashrc USER $USERNAME WORKDIR /cd3user/oci_tools/ -COPY . . +COPY cd3_automation_toolkit cd3_automation_toolkit/ + +WORKDIR /cd3user/ RUN sudo yum install -y oracle-softwarecollection-release-el7 \ && sudo chown -R $USERNAME:$USERNAME /cd3user/ - -WORKDIR /cd3user/ - RUN sudo sed -i -e 's/\r$//' /cd3user/oci_tools/cd3_automation_toolkit/shell_script.sh \ && bash /cd3user/oci_tools/cd3_automation_toolkit/shell_script.sh \ -&& sudo chown -R cd3user:cd3user /cd3user/ && sudo yum clean all && sudo rm -rf /var/cache/yum /root/ocswork \ -&& sudo rm -rf /cd3user/oci_tools/Dockerfile && sudo rm -rf /cd3user/oci_tools/Readme.md \ -&& sudo rm -rf /cd3user/oci_tools/.git && sudo rm -rf /cd3user/oci_tools/.gitignore && sudo chmod -R 740 /cd3user/ +&& sudo chown -R cd3user:cd3user /cd3user/ && sudo yum clean all && sudo rm -rf /var/cache/yum \ +&& sudo chmod -R 740 /cd3user/ + + +##################################### START INSTALLING JENKINS ################################### +ARG JENKINS_VERSION=2.401.1 +ARG JENKINS_SHA=600b73eabf797852e39919541b84f7686ff601b97c77b44eb00843eb91c7dd6c +ARG JENKINS_PLUGIN_MANAGER_VERSION=2.12.13 +ARG PLUGIN_CLI_URL=https://github.com/jenkinsci/plugin-installation-manager-tool/releases/download/${JENKINS_PLUGIN_MANAGER_VERSION}/jenkins-plugin-manager-${JENKINS_PLUGIN_MANAGER_VERSION}.jar + +ARG JENKINS_HOME=/cd3user/tenancies/jenkins_home +ARG JENKINS_INSTALL=/usr/share/jenkins +ARG REF=/usr/share/jenkins/ref + +RUN sudo yum remove java-1.8.0-openjdk-1.8.0.345.b01-1.el7_9.x86_64 \ +&& sudo yum install -y java-11-openjdk \ +&& sudo yum install -y java-11-openjdk-devel \ +&& sudo yum install unzip -y \ +&& sudo yum install git -y \ +&& sudo mkdir -p ${REF}/init.groovy.d \ +&& sudo chown -R cd3user:cd3user ${JENKINS_INSTALL} \ +&& sudo curl -fsSL http://updates.jenkins-ci.org/download/war/${JENKINS_VERSION}/jenkins.war -o ${JENKINS_INSTALL}/jenkins.war \ +&& echo "${JENKINS_SHA} ${JENKINS_INSTALL}/jenkins.war" | sha256sum -c - \ +&& sudo curl -fsSL ${PLUGIN_CLI_URL} -o ${JENKINS_INSTALL}/jenkins-plugin-manager.jar +ENV JAVA_HOME /usr/lib/jvm/java-11-openjdk-11.0.17.0.8-2.el8_6.x86_64 +ENV JENKINS_HOME ${JENKINS_HOME} +ENV JENKINS_INSTALL ${JENKINS_INSTALL} +ENV REF ${REF} +ENV JENKINS_UC https://updates.jenkins.io +ENV JENKINS_UC_EXPERIMENTAL=https://updates.jenkins.io/experimental +ENV JENKINS_INCREMENTALS_REPO_MIRROR=https://repo.jenkins-ci.org/incrementals +ENV JAVA_OPTS="-Djenkins.install.runSetupWizard=false" +ENV COPY_REFERENCE_FILE_LOG ${JENKINS_HOME}/copy_reference_file.log +ENV CASC_JENKINS_CONFIG ${JENKINS_HOME}/jcasc.yaml +COPY --chown=cd3user:cd3user jenkins_install ${JENKINS_INSTALL}/ +COPY --chown=cd3user:cd3user jenkins_install/init/*.groovy ${REF}/init.groovy.d/ +COPY --chown=cd3user:cd3user jenkins_install/plugins.txt ${REF}/plugins.txt +RUN sudo java -jar ${JENKINS_INSTALL}/jenkins-plugin-manager.jar --war ${JENKINS_INSTALL}/jenkins.war --verbose -f ${REF}/plugins.txt \ +&& sudo chown -R cd3user:cd3user ${JENKINS_INSTALL} \ +&& sudo chmod +x ${JENKINS_INSTALL}/jenkins.sh diff --git a/README.md b/README.md index fba80bf00..f44554af0 100755 --- a/README.md +++ b/README.md @@ -1,78 +1,79 @@ # CD3 Automation Toolkit [![License: UPL](https://img.shields.io/badge/license-UPL-green)](https://img.shields.io/badge/license-UPL-green) [![Quality gate](https://sonarcloud.io/api/project_badges/quality_gate?project=oracle-devrel_cd3-automation-toolkit)](https://sonarcloud.io/dashboard?id=oracle-devrel_cd3-automation-toolkit) - - - -### New Users +### New Users -
  • Using the Automation Toolkit - + +#### Using the Automation Toolkit via Jenkins + ### Existing Users - - - -### **Step 4 - Edit tenancyconfig.properties**: -Enter the details to **tenancyconfig.properties** file. Please make sure to review 'outdir_structure_file' parameter as per requirements. It is recommended to use seperate outdir structure in case the tenancy has large number of objects. -``` -[Default] -# Mandatory Fields -# Friendly name for the Customer Tenancy eg: demotenancy; -# The generated .auto.tfvars will be prefixed with this customer name -customer_name= -tenancy_ocid= -fingerprint= -user_ocid= - -# Path of API Private Key (PEM Key) File; If the PEM keys were generated by running createAPI.py, leave this field empty. -# Defaults to /cd3user/tenancies/keys/oci_api_private.pem when left empty. -key_path= - -# Region ; defaults to us-ashburn-1 when left empty. -region= -# The outdir_structure_file defines the grouping of the terraform auto.tf.vars for the various generated resources. -# To have all the files generated in the corresponding region, leave this variable blank. -# To group resources into different directories within each region - specify the absolute path to the file. -# The default file is specified below. You can make changes to the grouping in the below file to suit your deployment" -outdir_structure_file= -#or -#outdir_structure_file=/cd3user/oci_tools/cd3_automation_toolkit/user-scripts/outdir_structure_file.properties - -# Optional Fields -# SSH Key to launched instances -ssh_public_key= - -``` -### **Step 5 - Initialise the environment**: -Initialise your environment to use the Automation Toolkit. +### **Step 2 - Choose Authentication Mechanism for OCI SDK** +* Please click [here](/cd3_automation_toolkit/documentation/user_guide/Auth_Mechanisms_in_OCI.md) to configure any one of the available authentication mechanisms. + +### **Step 3 - Edit tenancyconfig.properties**: +* Run ```cd /cd3user/oci_tools/cd3_automation_toolkit/user-scripts/``` +* Fill the input parameters in **tenancyconfig.properties** file. +* Ensure to: + - Have the details ready for the Authentication mechanism you are planning to use. + - Use the same customer_name for a tenancy even if the script needs to be executed multiple times. + - Review **'outdir_structure_file'** parameter as per requirements. It is recommended to use seperate outdir structure to manage + a large number of resources.
    + - Review Advanced Parameters Section for CI/CD setup and be ready with user details that will be used to connect to DevOps Repo in OCI. Specifying these parameters as **'yes'** in properties file will create Object Storage Bucket and Devops Git Repo/Project/Topic in OCI + and enable toolkit usage via Jenkins. + > The toolkit supports users in primary IDCS stripes or default domains only for DevOps GIT operations. + + +### **Step 4 - Initialise the environment**: +* Initialise your environment to use the Automation Toolkit.
    ```python createTenancyConfig.py tenancyconfig.properties``` -**Note** - If the API Keys were generated and added to the OCI console using previous steps, it might take a couple of seconds to reflect. Thus, running the above command immediately might result in Authentication Errors.
    In such cases, please retry after a minute. +> Note +> * If you are running docker container on a linux VM host, please refer to [point no. 7](/cd3_automation_toolkit/documentation/user_guide/FAQ.md) under FAQ to avoid any permission issues. +> * Running the above command immediately after adding API key to the user profile in OCI might result in Authentication Errors. In such cases, please retry after a minute.
    -→ Example execution of the script: -
    ![image](https://user-images.githubusercontent.com/103508105/221942089-5c52b221-96f1-4a73-9a10-46159ae4a75c.png) +→ Example execution of the script with Advanced Parameters for CI/CD: + +Screenshot 2024-01-10 at 5 54 02 PM ## Appendix -→ Files created on successful execution of above steps - Description of the Generated files: + +
    Expand this to view the details of the files created on successful execution of above steps | Files Generated | At File Path | Comment/Purpose | | --------------- | ------------ | --------------- | -| Config File | ```/cd3user/tenancies//_config``` | Customer specific Config file is required for OCI API calls. | -| setUpOCI.properties | ```/cd3user/tenancies//_setUpOCI.properties``` | Customer Specific properties files will be created. | -| outdir_structure_file | ```/cd3user/tenancies//_outdir_structure_file``` | Customer Specific properties file for outdir structure.
    This file will not be generated if 'outdir_structure_file' parameter was set to empty(single outdir) in tenancyconfig.properties while running createTenancy.py | +| setUpOCI.properties | ```/cd3user/tenancies//_setUpOCI.properties``` | Customer Specific properties | +| outdir_structure_file.properties | ```/cd3user/tenancies//_outdir_structure_file``` | Customer Specific properties file for outdir structure.
    This file will not be generated if 'outdir_structure_file' parameter was set to empty(single outdir) in tenancyconfig.properties while running createTenancyConfig.py | | Region based directories | ```/cd3user/tenancies//terraform_files``` | Tenancy's subscribed regions based directories for the generation of terraform files.
    Each region directory will contain individual directory for each service based on the parameter 'outdir_structure_file' | -| Variables File,Provider File, Root and Sub modules | ```/cd3user/tenancies//terraform_files/``` | Required for terraform to work. | -| Public and Private Key Pair | Copied from ```/cd3user/tenancies/keys/```
    to
    ```/cd3user/tenancies//``` | API Keys that were previously generated are moved to customer specific out directory locations for easy access. | -| A log file with the commands to execute | ```/cd3user/tenancies//cmds.log``` | This file contains a copy of the Commands to execute section of the console output. | +| Variables File,Provider File, Root and Sub terraform modules | ```/cd3user/tenancies//terraform_files/``` | Required for terraform to work. Variables file and Provider file will be genrated based on authentication mechanism chosen.| +| out file | ```/cd3user/tenancies//createTenancyConfig.out``` | This file contains a copy of information displayed as the console output. | +| OCI Config File | ```/cd3user/tenancies//.config_files/_oci_config``` | Customer specific Config file for OCI API calls. This will have data based on authentication mechanism chosen. | +| Public and Private Key Pair | Copied from ```/cd3user/tenancies/keys/```
    to
    ```/cd3user/tenancies//.config_files``` | API Key for authentication mechanism as API_Key are copied to customer specific out directory locations for easy access. | +| GIT Config File | ```/cd3user/tenancies//.config_files/_git_config``` | Customer specific GIT Config file for OCI Dev Ops GIT operations. This is generated only if use_oci_devops_git is set to yes | +| S3 Credentials File | ```/cd3user/tenancies//.config_files/_s3_credentials``` | This file contains access key and secret for S3 compatible OS bucket to manage remote terraform state. This is generated only if use_remote_state is set to yes | +| Jenkins Home | ```/cd3user/tenancies/jenkins_home``` | This folder contains jenkins specific data. ```Single Jenkins instance can be setup for a single container.```| +| tenancyconfig.properties | ```/cd3user/tenancies//.config_files/_tenancyconfig.properties``` | The input properties file used to execute the script is copied to custome folder to retain for future reference. This can be used when the script needs to be re-run with same parameters at later stage.| +
    +The next pages will guide you to use the toolkit either via CLI or via Jenkins. Please proceed further. +



    -| :arrow_backward: Prev | Next :arrow_forward: | -| :---- | -------: | +| :arrow_backward: Prev | Automation Toolkit via CLI :arrow_forward: | Automation Toolkit via Jenkins :arrow_forward: | +| :---- | -------: |-------: |
    diff --git a/cd3_automation_toolkit/documentation/user_guide/ExcelTemplates.md b/cd3_automation_toolkit/documentation/user_guide/ExcelTemplates.md new file mode 100644 index 000000000..1d733001b --- /dev/null +++ b/cd3_automation_toolkit/documentation/user_guide/ExcelTemplates.md @@ -0,0 +1,33 @@ +# **Excel Sheet Templates** +CD3 Excel sheet is the main input for Automation Toolkit. + +Below are the CD3 templates for the latest release having standardised IAM Components (compartments, groups and policies), Network Components and Events & Notifications Rules as per CIS Foundations Benchmark for Oracle Cloud. + +Details on how to fill data into the Excel sheet can be found in the Blue section of each sheet inside the Excel file. Make appropriate changes to the templates eg region and use for deployment. + +
    + +**CD3 Excel templates for OCI core services:** + +|Excel Sheet | Purpose | +|-----------|----------------------------------------------------------------------------------------------------------------------------| +| [CD3-Blank-template.xlsx](/cd3_automation_toolkit/example) | Choose this template while exporting the existing resources from OCI into the CD3 and Terraform.| +| [CD3-CIS-template.xlsx](/cd3_automation_toolkit/example) | This template has auto-filled in data of CIS Landing Zone for DRGv2. Choose this template to create Core OCI Objects (IAM, Tags, Networking, Instances, LBR, Storage, Databases) | +|[CD3-HubSpoke-template](/cd3_automation_toolkit/example) | This template has auto-filled in data for a Hub and Spoke model of networking. Choose this template to create Core OCI Objects (IAM, Tags, Networking, Instances, LBR, Storage, Databases)| +|[CD3-SingleVCN-template](/cd3_automation_toolkit/example) | This template has auto-filled in data for a Single VCN model of networking. Choose this template to create Core OCI Objects (IAM, Tags, Networking, Instances, LBR, Storage, Databases)| + + +
    + +**CD3 Excel template for OCI Management services:** + + +|Excel Sheet| Purpose | +|-----------|----------------------------------------------------------------------------------------------------------------------------| +|[CD3-CIS-ManagementServices-template.xlsx](/cd3_automation_toolkit/example) | This template has auto-filled in data of CIS Landing Zone. Choose this template while creating the components of Events, Alarms, Notifications and Service Connectors| + +
    + +> The Excel Templates can also be found at _/cd3user/oci_tools/cd3_automation_toolkit/example_ inside the container. +> After deploying the infra using any of the templates, please run [CIS compliance checker script](/cd3_automation_toolkit/documentation/user_guide/learn_more/CISFeatures.md#1-run-cis-compliance-checker-script)) + diff --git a/cd3_automation_toolkit/documentation/user_guide/FAQ.md b/cd3_automation_toolkit/documentation/user_guide/FAQ.md index 7b7828a6f..22d30ac26 100644 --- a/cd3_automation_toolkit/documentation/user_guide/FAQ.md +++ b/cd3_automation_toolkit/documentation/user_guide/FAQ.md @@ -12,14 +12,7 @@ **3. If I am already using the toolkit and my OCI tenancy has been subscribed to a new region, how do i use the new region with toolkit?**
    -Follow below steps to start using the newly subscribed region with the toolkit: -
    - Take backup of the existing out directory. -
    - Create a new directory for the region say 'london' along with other region directories. -
    - Copy all the terraform modules and .tf files, except the .auto.tfvars and .tfstate files from existing region directory to the new one -
    - Modify the name of variables file (eg variables_london.tf) -
    - Modify the region parameter in this variables_london.tf -
    - Modify Image OCIDs in this variables file according to new region. - +Re-run createTenancyConfig.py with same details in tenancyConfig.properties file. It will keep existing region directories as is and create new directory for newly subscribed region. **4. How do I upgrade an existing version of the toolkit to the new one without disrupting my existing tenancy files/directories?**
    @@ -46,13 +39,7 @@ Terraform destroy on compartments or removing the compartments details from _ - Add _enable\_delete = true_ parameter to each of the compartment that needs to be deleted in _\_compartments.auto.tfvars_ -**7. I am getting Timeout Error during export of DRG Route Rules while exporting Network Components.** -
    - -Toolkit exports all Dynamic as well as Static DRG route Rules and timesout if there is a large number of dynamic rules. As a workaround, edit line no 220 in file _/cd3user/oci\_tools/cd3\_automation\_toolkit\Network\BaseNetwork\exportRoutetable.py_.
    -Change _vcn = VirtualNetworkClient(config, timeout=(30,120))_ to _vcn = VirtualNetworkClient(config, timeout=(90,300))_ - -**8. I am getting 'Permission Denied' error while executing any commands inside the container.** +**7. I am getting 'Permission Denied' error while executing any commands inside the container.**
    When you are running the docker container from a Linux OS, if the outdir is on the root, you may get a permission denied error while executing steps like createAPIKey.py. In such scenarios, please follow the steps given below - diff --git a/cd3_automation_toolkit/documentation/user_guide/GF-Jenkins.md b/cd3_automation_toolkit/documentation/user_guide/GF-Jenkins.md new file mode 100644 index 000000000..b09cda24f --- /dev/null +++ b/cd3_automation_toolkit/documentation/user_guide/GF-Jenkins.md @@ -0,0 +1,21 @@ +# Provisioning of Instances/OKE/SDDC/Database on OCI via Jenkins + +To provision OCI resources which require input ssh keys and source image details, update **variables_\.tf** file using CLI. + +**Step 1**: +
    Update required data in `/cd3user/tenancies//terraform_files///variables_.tf`. + +**Step 2**: +
    Execute GIT commands to sync these local changes with DevOps GIT Repo. Here are the steps. + +**Step 3**: +
    Execute setUpOCI pipeline from Jenkins dashboard with workflow type as **Create Resources in OCI(Greenfield Workflow)** and choose the respective options to create required services. + + +

    +
    + +| :arrow_backward: Prev | Next :arrow_forward: | +| :---- | -------: | + +
    diff --git a/cd3_automation_toolkit/documentation/user_guide/GreenField-Jenkins.md b/cd3_automation_toolkit/documentation/user_guide/GreenField-Jenkins.md new file mode 100644 index 000000000..c9287f8d6 --- /dev/null +++ b/cd3_automation_toolkit/documentation/user_guide/GreenField-Jenkins.md @@ -0,0 +1,88 @@ +# Create resources in OCI via Jenkins(Greenfield Workflow) + +## Execute setUpOCI Pipeline + +**Step 1**: +
    Choose the appropriate CD3 Excel sheet template from [Excel Templates](/cd3_automation_toolkit/documentation/user_guide/ExcelTemplates.md). +Fill the CD3 Excel with appropriate values. + + +**Step 2**: +
    Login to Jenkins URL with the user created after initialization and click on **setUpOCI pipeline** from Dashboard. Click on **'Build with Parameters'** from left side menu. + +Screenshot 2024-01-16 at 10 56 42 AM + +>Note - Only one user at a time using the Jenkins setup is supported in the current release of the toolkit. + + +**Step 3**: +
    Upload the above filled Excel sheet in **Excel_Template** section. + +Screenshot 2024-01-16 at 11 04 47 AM
    + +>This will copy the Excel file at `/cd3user/tenancies/` inside the container. It will also take backup of existing Excel on the container by appending the current datetime if same filename is uploaded in multiple executions. + + +**Step 4:** +
    Select the workflow as **Create Resources in OCI(Greenfield Workflow)**. Choose single or multiple MainOptions as required and then corresponding SubOptions. +
    Please [read](/cd3_automation_toolkit/documentation/user_guide/multiple_options_GF-Jenkins.md) while selcting multiple options simultaneously. +
    Below screenshot shows creation of Compartments (under Identity) and Tags. + +Screenshot 2024-01-16 at 2 44 38 PM + +Click on **Build** at the bottom. + + +**Step 5:** +
    setUpOCI pipeline is triggered and stages are executed as shown below.
    +This will run the python script to generate the terraform auto.tfvars. Once created, it will commit to the OCI Devops GIT Repo and then it will also launch terraform-apply pipelines for the services chosen (Stage:phoenix/identity and Stage:phoenix/tagging in the below screenshot). + +Screenshot 2024-01-17 at 11 57 14 AM + +## Execute terraform Pipelines +Terraform pipelines are auto triggered parallely from setUpOCI pipeline based on the services selected (the last two stages in above screenshot show trigger of terraform pipelines).
    + +**Step 1**: + +Click on 'Logs' for Stage: phoenix/identity and click on the pipeline link. +Screenshot 2024-01-17 at 11 58 15 AM
    +> ***Note - Navigating to Dashboard displays pipelines that are in running state at the bottom left corner.***
    +> ***Or you can also navigate from Dashboard using the region based view (Dashboard -> phoenix View -> service specific pipeline)***
    +> ***in this example it would be:***
    +>   ***terraform_files » phoenix » tagging » terraform-apply***
    +>   ***terraform_files » phoenix » identity » terraform-apply***
    + +**Step 2**: +
    Stages of the terraform pipeline for apply are shown below: + +Screenshot 2024-01-17 at 12 01 42 PM
    + +**Step 3**: +
    Review Logs for Terraform Plan and OPA stages by clicking on the stage and then 'Logs'. + +Screenshot 2024-01-17 at 12 13 57 PM
    + + +**Step 4**: +
    'Get Approval' stage has timeout of 24 hours, if no action is taken the pipeline will be aborted after 24 hours. Click on this stage and click 'Proceed' to proceed with terraform apply or 'Abort' to cancel the terraform apply. + +Screenshot 2024-01-17 at 12 04 15 PM
    + + +**Step 5**: +
    Below screenshot shows Stage View after clicking on 'Proceed'. Login to the OCI console and verify that resources got created as required. + +Screenshot 2024-01-17 at 12 13 15 PM
    + +**Step 6**: +
    Similarly click on 'Logs' for Stage: phoenix/tagging and click on the pipeline link and 'Proceed' or 'Abort' the terraform apply

    + + +

    + +
    + +| :arrow_backward: Prev | Next :arrow_forward: | +| :---- | -------: | + +
    diff --git a/cd3_automation_toolkit/documentation/user_guide/GreenField.md b/cd3_automation_toolkit/documentation/user_guide/GreenField.md index dc41ea08a..a3360a468 100644 --- a/cd3_automation_toolkit/documentation/user_guide/GreenField.md +++ b/cd3_automation_toolkit/documentation/user_guide/GreenField.md @@ -1,17 +1,36 @@ -# Green Field Tenancies - -## Detailed Steps -Below are the steps that will help to configure the Automation Tool Kit to support the Green Field Tenancies: +# Create resources in OCI (Greenfield Workflow) **Step 1**: -
    Choose the appropriate CD3 Excel sheet template from [Excel Templates](/cd3_automation_toolkit/documentation/user_guide/RunningAutomationToolkit.md#excel-sheet-templates) +
    Choose the appropriate Excel sheet template from [Excel Templates](/cd3_automation_toolkit/documentation/user_guide/ExcelTemplates.md) **Step 2**: -
    Fill the CD3 Excel with appropriate values specific to the client and put at the appropriate location. -
    Modify/Review [setUpOCI.properties](/cd3_automation_toolkit/documentation/user_guide/RunningAutomationToolkit.md#setupociproperties) with **non_gf_tenancy** set to **false** as shown below: +
    Fill the Excel with appropriate values and put at the appropriate location. +
    Modify/Review _/cd3user/tenancies//\_setUpOCI.properties_ with **workflow_type** set to **create_resources** as shown below: +```ini +#Input variables required to run setUpOCI script + +#path to output directory where terraform file will be generated. eg /cd3user/tenancies//terraform_files +outdir=/cd3user/tenancies/demotenancy/terraform_files/ + +#prefix for output terraform files eg like demotenancy +prefix=demotenancy + +# auth mechanism for OCI APIs - api_key,instance_principal,session_token +auth_mechanism=api_key + +#input config file for Python API communication with OCI eg /cd3user/tenancies//.config_files/_config; +config_file=/cd3user/tenancies/demotenancy/.config_files/demotenancy_oci_config + +# Leave it blank if you want single outdir or specify outdir_structure_file.properties containing directory structure for OCI services. +outdir_structure_file=/cd3user/tenancies/demotenancy/demotenancy_outdir_structure_file.properties -![image](https://user-images.githubusercontent.com/103508105/221797142-c780dbd6-883f-450f-9929-dce81d32079e.png) +#path to cd3 excel eg /cd3user/tenancies//CD3-Customer.xlsx +cd3file=/cd3user/tenancies/demotenancy/CD3-Blank-template.xlsx +#specify create_resources to create new resources in OCI(greenfield workflow) +#specify export_resources to export resources from OCI(non-greenfield workflow) +workflow_type=create_resources +``` **Step 3**:
    Execute the SetUpOCI.py script to start creating the terraform configuration files. @@ -57,7 +76,7 @@ Follow the below steps to quickly provision a compartment on OCI. 2. Edit the _setUpOCI.properties_ at location:_/cd3user/tenancies //\_setUpOCI.properties_ with appropriate values. - Update the _cd3file_ parameter to specify the CD3 excel sheet path. - - Set the _non_gf_tenancy_ parameter value to _false_. (for Greenfield Workflow.) + - Set the _workflow_type_ parameter value to _create_resources_. (for Greenfield Workflow.) 3. Change Directory to 'cd3_automation_toolkit' : ```cd /cd3user/oci_tools/cd3_automation_toolkit/``` @@ -66,7 +85,7 @@ Follow the below steps to quickly provision a compartment on OCI. ```python setUpOCI.py /cd3user/tenancies//_setUpOCI.properties``` -4. Choose option to create compartments under 'Identity' from the displayed menu. Once the execution is successful, _\_compartments.auto.tfvars_ file will be generated under the folder _/cd3user/tenancies//terraform_files/_ +4. Choose option to create compartments under 'Identity' from the displayed menu. Once the execution is successful, _\_compartments.auto.tfvars_ file will be generated under the folder _/cd3user/tenancies//terraform_files//_ Navigate to the above path and execute the terraform commands:

    _terraform init_ @@ -80,7 +99,7 @@ Follow the below steps to quickly provision a compartment on OCI.

    diff --git a/cd3_automation_toolkit/documentation/user_guide/Intro-Jenkins.md b/cd3_automation_toolkit/documentation/user_guide/Intro-Jenkins.md new file mode 100644 index 000000000..63490648d --- /dev/null +++ b/cd3_automation_toolkit/documentation/user_guide/Intro-Jenkins.md @@ -0,0 +1,80 @@ + +## **Introduction to Jenkins with the toolkit** + +### Jenkins Dashbord + +1. setUpOCI Pipeline +2. terraform_files Folder +3. Region based Views (including Global directory) + +Screenshot 2024-01-16 at 10 52 07 AM + + +### 1. setUpOCI Pipeline + +This is equivalent to running *setUpOCI.py* from CLI. This will generate the terraform **.auto.tfvars** files based on the CD3 Excel sheet input for the services chosen and commit them to OCI Devops GIT repo. This will also trigger **terraform-apply** pipelines for the corresponding services chosen in setUpOCI pipeline. + +Below table shows the stages executed in this pipeline along with their description: + +
    + +
    Expand this to view setUpOCI Pipeline Stages + +|Stage Name | Description | Possible Outcomes | +| --------------- | ------------ | ----------------- | +| Validate Input Parameters | Validates input file name/size, selected parameters | Displays Unstable if any of the validation fails. Pipeline stops further execution in that case. | +| Update setUpOCI.properties | Updates _setUpOCI.properties with input filename and workflow_type | Displays Failed if any issue during execution | +| Execute setUpOCI | Executes python code to generate required tfvars files. The console output for this stage is similar to setUpOCI.py execution via CLI.
    Multiple options selected will be processed sequentially in this stage. | Displays Failed if any issue occurs during its execution. Further stages are skipped in that case. | +| Run Import Commands | Based on the workflow_type as 'Export Resources from OCI', this stage invokes execution of tf_import_commands_\_nonGF.sh shell scripts which will import the exported objects into tfstate. tf_import_commands for multiple options selected will be processed sequentially in this stage.
    This stage is skipped for 'Create Resources in OCI' workflow | Displays Failed if any issue occurs during its execution. Further stages are skipped in that case. | +| Git Commit | Commits the terraform_files folder to OCI DevOps GIT Repo. This will trigger respective terraform_pipelines| Pipeline stops further execution if there is nothing to commit. In some cases when tfvars was generated in previous execution, you can navigate to terrafom-apply pipeline and trigger that manually | +| Trigger Terraform Pipelines | Corresponding terraform apply pipelines are auto triggered based on the service chosen | | +
    + +
    + +### 2. terraform_files Folder + +This is equivalent to **/cd3user/tenancies//terraform_files** folder on your local system. +The region directories along with all service directories, are present under this terraform_files folder. +Inside each service directory, pipelines for **terraform-apply** and **terraform-destroy** are present. + +The terraform pipelines are either triggered automatically from setUpOCI pipeline or they can be triggered manually by navigating to any service directory path. + +
    + +
    Expand this to view terraform-apply Pipeline Stages + +|Stage Name | Description | Possible Outcomes | +| --------------- | ------------ | ----------------- | +| Checkout SCM | Checks out the latest terraform_files folder from DevOps GIT repo | | +| Terraform Plan | Runs terraform plan against the checked out code and saves it in tfplan.out | Pipeline stops further execution if terraform plan shows no changes. Displays Failed if any issue while executing terraform plan | +| OPA | Runs the above genrated terraform plan against Open Policies and displays the violations if any | Displays Unstable if any OPA rule is violated | +| Get Approval | Approval Stage for reviewing the terraform plan. There is 24 hours timeout for this stage. | Proceed - goes ahead with Terraform Apply stage.
    Abort - pipeline is aborted and stops furter execution | +|Terraform Apply | Applies the terraform configurations | Displays Failed if any issue while executing terraform apply | +
    + +
    + + +
    Expand this to view terraform-destroy Pipeline Stages + +|Stage Name | Description | Possible Outcomes | +| --------------- | ------------ | ----------------- | +| Checkout SCM | Checks out the latest terraform_files folder from DevOps GIT repo | | +| Terraform Destroy Plan | Runs `terraform plan -destroy` against the checked out code | Displays Failed if any issue in plan output | +| Get Approval | Approval Stage for reviewing the terraform plan. There is 24 hours timeout for this stage. | Proceed - goes ahead with Terraform Destroy stage.
    Abort - pipeline is aborted and stops furter execution | +|Terraform Destroy | Destroys the terraform configurations | Displays Failed if any issue while executing terraform destroy | +
    +
    + +### 3. Region Based Views +When you click on any of the views, it displays all terraform-apply and terraform-destroy pipelines in single screen. This can also be used to trigger the terraform pipelines. This also includes Global view for global services like RPC. + +

    +
    + +| :arrow_backward: Prev | Next :arrow_forward: | +| :---- | -------: | + +
    + diff --git a/cd3_automation_toolkit/documentation/user_guide/Jobs_Migration.md b/cd3_automation_toolkit/documentation/user_guide/Jobs_Migration.md new file mode 100755 index 000000000..1932ac501 --- /dev/null +++ b/cd3_automation_toolkit/documentation/user_guide/Jobs_Migration.md @@ -0,0 +1,82 @@ +# Migrate Jobs from Automation Toolkit Jenkins to Customer Jenkins Environment + + +1. Copy Jobs Folder + - Copy the folders from the Automation Toolkit Jenkins home path `/cd3user/tenancies/jenkins_home/jobs/` to the corresponding home directory in the Customer Jenkins instance (typically `/var/jenkins_home`). + + ![image](https://github.com/unamachi/cd3-automation-toolkit/assets/103548537/5a1f54f1-3e50-4ec7-8634-494eec65ce56) + +2. Set up OCI Devops repository SSH Authentication + - Ensure SSH authentication is configured and operational on the Customer Jenkins instance. For detailed instructions, refer to the [OCI Code Repository documentation](https://docs.oracle.com/en-us/iaas/Content/devops/using/ssh_auth.htm).

    + + > Note - Steps to change the GIT repo are explained in next section. + +3. Ensure Availability of Ansi Color Plugin + - Confirm the presence of the Ansi color plugin in the Customer Jenkins instance. This plugin is utilized in Automation Toolkit pipeline Groovy code and is necessary if not already installed. Plugin link: [Ansicolor Plugin](https://plugins.jenkins.io/ansicolor/) + +4. Install Terraform Binary + - Make sure the Terraform binary is installed and accessible for the Jenkins user within the Jenkins instance. Installation guide: [Terraform Installation](https://developer.hashicorp.com/terraform/install) + +5. Update Optional Attribute Field inside Terraform Provider Block at `/cd3user/tenancies//terraform_files//provider.tf` + - Include an attribute as highlighted below within the Terraform provider block. This is optional but necessary in case Terraform plan encounters an error. + + experiments = [module_variable_optional_attrs] + + ![image](https://github.com/unamachi/cd3-automation-toolkit/assets/103548537/2e1593ee-e4cc-4439-8ffa-97d39dda16a6) + +6. Update the correct value for private_key_path variable in `/cd3user/tenancies//terraform_files//variables_.tf` + +7. Configure S3 Backend Credentials in Customer Jenkins Instance + - Update the correct path within the `backend.tf` file for Terraform. + + ![image](https://github.com/unamachi/cd3-automation-toolkit/assets/103548537/bfd6d2a2-7384-4bb0-a30b-5b7fd63c0e9b) + +8. Push the above changes to Devops GIT repository so that pipline can get the latest commits/changes and execute it. + +9. Stop/Start the Customer Jenkins Instance for the changes to take effect. This is applicable for any configuration changes in Jenkins. + +10. Job and Pipeline Configuration + - Verify that the specified jobs and pipelines, initialized by the Automation Toolkit, are visible in the Customer Jenkins instance. + + ![image](https://github.com/unamachi/cd3-automation-toolkit/assets/103548537/3fca2b65-78b0-4528-a821-c43b5950cc90) + +11. Pipeline Job Output + + ![image](https://github.com/unamachi/cd3-automation-toolkit/assets/103548537/4bb57802-1594-4361-9c54-46022abf190a) + + +# Update the Git URL for all pipeline jobs in the Customer Jenkins(if required). + +1. Remove terraform_files folder under /jobs folder +2. Create `jenkins.properties` File + - Copy the `jenkins.properties` file from Automation Toolkit Jenkins home folder `/cd3users/tenancies/jenkins_home/` to the customer jenkins home (typically `/var/jenkins_home/`) directory in customer Jenkins Instance (Below is sample content): + + git_url= "ssh://devops.scmservice.us-phoenix-1.oci.oraclecloud.com/namespaces//projects/toolkitdemo-automation-toolkit-project/repositories/toolkitdemo-automation-toolkit-repo" + regions=['london', 'phoenix'] + services=['identity', 'tagging', 'network', 'loadbalancer', 'vlan', 'nsg', 'compute', 'database', 'fss', 'oke', 'ocvs', 'security', 'managementservices', 'budget', 'cis', 'oss', 'dns'] + outdir_structure=["Multiple_Outdir"] + + +3. Update the `git_url` in the `jenkins.properties` File + - Open the `jenkins.properties` file located in the `/var/jenkins_home/` directory. + - Update the `git_url` in the file with the new Git server URL. + + ![image](https://github.com/unamachi/cd3-automation-toolkit/assets/103548537/2056b8a3-c27e-481a-893a-a2ffba628c03) + + +4. Copy `01_jenkins-config.groovy` File + - Copy the `01_jenkins-config.groovy` file from the Automation Toolkit Jenkins path (`/cd3user/tenancies/jenkins_home/init.groovy.d`) to the init path of the Customer Jenkins instance. + - Update the path to the groovy file accordingly. + + ![image](https://github.com/unamachi/cd3-automation-toolkit/assets/103548537/348db348-7eee-47ed-88f4-32f1ecd25e0b) + + +5. Restart Customer Jenkins Instance + - Stop and start the Customer Jenkins instance to apply the changes. + - After that, all Git URLs will be updated and point to new Git Url inside pipeline jobs. + + ![image](https://github.com/unamachi/cd3-automation-toolkit/assets/103548537/83dc5e7a-4ceb-44a1-871f-4d9e314a3ce1) + +6. Ensure SSH Authentication + - Confirm that SSH authentication is enabled for the new GIT repository from the Jenkins instance. + - Alternatively, use the respective authentication method if relying on other methods. diff --git a/cd3_automation_toolkit/documentation/user_guide/KnownBehaviour.md b/cd3_automation_toolkit/documentation/user_guide/KnownBehaviour.md index 69ff35261..db8ddef29 100644 --- a/cd3_automation_toolkit/documentation/user_guide/KnownBehaviour.md +++ b/cd3_automation_toolkit/documentation/user_guide/KnownBehaviour.md @@ -1,29 +1,35 @@ # Expected Behaviour Of Automation Toolkit -### NOTE: -1. Automation Tool Kit DOES NOT support the creation/export of duplicate resources. -2. DO NOT modify/remove any commented rows or column names. You may re-arrange the columns if needed. -3. A double colon (::) or Semi-Colon (;) has a special meaning in the Tool Kit. Do not use them in the OCI data / values. -4. Do not include any unwanted space in cells you fill in; do not place any empty rows in between. -5. Any entry made/moved post \ in any of the tabs of CD3 will not be processed. Any resources created by the automation & then moved after the \ will cause the resources to be removed. -6. The components that get created as part of VCNs Tab (Example: IGW, SGW, LPG, NGW, DRG) will have the same set of Tags attached to them. -7. Automation Tool Kit does not support sharing of Block Volumes. -8. Some points to consider while modifying networking components are: - - Converting the exported VCN to Hub/Spoke/Peer VCN is not allowed. Route Table rules based on the peering for new LPGs to existing VCNs will not be auto populated. Users are requested to add an entry to the RouteRulesInOCI sheet to support the peering rules. - - Adding a new VCN as Hub and other new VCNs as Spoke/Peer is allowed. Gateways will be created as specified in VCNs sheet. - - Adding new VCNs as None is allowed. Gateways will be created as specified in VCNs sheet. - - The addition of new Subnets to exported VCNs and new VCNs is allowed. -9. When you have exported Identity and Network services together in single outdirectory for the first time and executing identity import script. You might see import failure with below error message. Execute Major network import script first then run Identity import script. - -``` -!!!!!!!!!!!!!!!!!!!!!!!!!!! TERRAFORM CRASH !!!!!!!!!!!!!!!!!!!!!!!!!!!! -4 problems: -- Failed to serialize resource instance in state: Instance data.oci_core_drg_route_distributions.drg_route_distributions["DRG-ASH_Autogenerated-Import-Route-Distribution-for-ALL-routes"] has status ObjectPlanned, which cannot be saved in state. -- Failed to serialize resource instance in state: Instance data.oci_core_drg_route_distributions.drg_route_distributions["DRG-ASH_Autogenerated-Import-Route-Distribution-for-VCN-Routes"] has status ObjectPlanned, which cannot be saved in state. -``` +> [!NOTE] +> 1. Automation Tool Kit *DOES NOT* support the creation/export of duplicate resources.
    +> 2. Automation Tool Kit *DOES NOT* support sharing of Block Volumes. + +> [!WARNING] +> 1. DO NOT modify/remove any commented rows or column names. You may re-arrange the columns if needed. +> 2. A double colon (::) or Semi-Colon (;) has a special meaning in the Tool Kit. Do not use them in the OCI data / values. +> 3. Do not include any unwanted space in cells you fill in; do not place any empty rows in between. +> 4. Any entry made/moved post \ in any of the tabs of CD3 will not be processed. Any resources created by the automation & then moved after the \ will cause the resources to be removed. + +> [!IMPORTANT] +> The components that get created as part of VCNs Tab (Example: IGW, SGW, LPG, NGW, DRG) will have the same set of Tags attached to them.
    +> Some points to consider while modifying networking components are: +> 1. Converting the exported VCN to Hub/Spoke/Peer VCN is not allowed. Route Table rules based on the peering for new LPGs to existing VCNs will not be auto populated. Users are requested to add an entry to the RouteRulesInOCI sheet to support the peering rules. +> 2. Adding a new VCN as Hub and other new VCNs as Spoke/Peer is allowed. Gateways will be created as specified in VCNs sheet. +> 3. Adding new VCNs as None is allowed. Gateways will be created as specified in VCNs sheet. +> 4. The addition of new Subnets to exported VCNs and new VCNs is allowed. +> 5. You might come across below error during export of NSGs(while runnig terraform import commands for NSGs). It occurs when NSG and the VCN are in different compartments. In such cases, please modify \_nsgs.auto.tfvars, specify the compartment name of the VCN in network_compartment_id field of the problematic NSG. + image +> 6. When you have exported Identity and Network services together in single outdirectory for the first time and executing identity import script. You might see import failure with below error message. Execute Major network import script first then run Identity import script.
    + ``` + !!!!!!!!!!!!!!!!!!!!!!!!!!! TERRAFORM CRASH !!!!!!!!!!!!!!!!!!!!!!!!!!!! + 4 problems: + - Failed to serialize resource instance in state: Instance data.oci_core_drg_route_distributions.drg_route_distributions["DRG-ASH_Autogenerated-Import-Route-Distribution-for-ALL-routes"] has status ObjectPlanned, which cannot be saved in state. + - Failed to serialize resource instance in state: Instance data.oci_core_drg_route_distributions.drg_route_distributions["DRG-ASH_Autogenerated-Import-Route-Distribution-for-VCN-Routes"] has status ObjectPlanned, which cannot be saved in state. + ``` ## Terraform Behavior -- Create a Load Balancer with Reserved IP: When you create a LBaaS with reserved ip as "Y" and do a terraform apply, everything will go smooth and be in sync for the first time. If you do a terraform plan immediately (post apply), you will find that the plan changes the private ip of load balancer to null. +#### 1. +Create a Load Balancer with Reserved IP: When you create a LBaaS with reserved ip as "Y" and do a terraform apply, everything will go smooth and be in sync for the first time. If you do a terraform plan immediately (post apply), you will find that the plan changes the private ip of load balancer to null. ![image](https://user-images.githubusercontent.com/122371432/214501615-c84d26bb-1227-42b7-bc86-a6f82020aab0.png) @@ -35,29 +41,32 @@ Once you do the above change, and then execute a terraform plan/apply, you will get the below error and it can be ignored. ![image](https://user-images.githubusercontent.com/122371432/214502222-09eb5bb2-4a21-43fa-89b9-6540324c7f75.png) - - -- While exporting and synching the tfstate file for LBaaS Objects, the user may be notified that a few components will be modified on apply. In such scenarios, add the attributes that the Terraform notifies to be changed to the appropriate CD3 Tab of Load Balancer and uncomment the parameter from Jinja2 Templates and Terraform (.tf) files. Re-run the export. + +#### 2. +While exporting and synching the tfstate file for LBaaS Objects, the user may be notified that a few components will be modified on apply. In such scenarios, add the attributes that the Terraform notifies to be changed to the appropriate CD3 Tab of Load Balancer and uncomment the parameter from Jinja2 Templates and Terraform (.tf) files. Re-run the export. -- Add a new column - "Freeform Tags" to the CD3 Excel Sheets as per necessity, to export the tags associated with the resource as well. If executed as-is, Terraform may prompt you to modify resources based on Tags. +#### 3. +Add a new column - "Freeform Tags" to the CD3 Excel Sheets as per necessity, to export the tags associated with the resource as well. If executed as-is, Terraform may prompt you to modify resources based on Tags. **Example:** -- Toolkit will create TF for only those DRGs which are part of CD3 and skip Route Tables for the DRGs created outside of CD3. This will also synch DRG rules in your tenancy with the terraform state. +#### 4. +Toolkit will create TF for only those DRGs which are part of CD3 and skip Route Tables for the DRGs created outside of CD3. This will also synch DRG rules in your tenancy with the terraform state. > **Note** > When there are changes made in the OCI console manually, the above options of export and modify can be helpful to sync up the contents/objects in OCI to TF. -- Match All criteria specified for Route Distribution Statement In DRGs sheet will show below output each time you do terraform plan: +#### 5. +Match All criteria specified for Route Distribution Statement In DRGs sheet will show below output each time you do terraform plan: ![image](https://user-images.githubusercontent.com/122371432/214504858-2c5ba6af-b030-4f72-b6d9-8bc37b5902cf.png) The service api is designed in such a way that it expects an empty list for match all. And it sends back an empty list in the response every time. Hence this behaviour from terraform side. This can be safely ignored. - -- Export process for non greenfield tenancies v6.0 or higher will try to revert SGW for a VCN to point to all services if it was existing for just object storage. You will get output similiar to below when terraform plan is run (Option 3 with non-gf_tenancy set to true). +#### 6. +Export process for non greenfield tenancies v6.0 or higher will try to revert SGW for a VCN to point to all services if it was existing for just object storage. You will get output similiar to below when terraform plan is run (Option 3 with workflow_type set to export_resources). ``` # oci_core_service_gateway.VCN_sgw will be updated in-place @@ -101,7 +110,8 @@ } ``` -- If the description field is having any newlines in the tenancy then the export of the component and tf synch will show output similair to below: +#### 7. +If the description field is having any newlines in the tenancy then the export of the component and tf synch will show output similair to below: ``` # module.iam-policies[“ConnectorPolicy_notifications_2023-03-06T21-54-41-655Z”].oci_identity_policy.policy will be updated in-place @@ -123,12 +133,11 @@ This is how terraform handles newlines in the fields. Pleage ignore this and proceed with terraform apply. -- You might come across below error during export of NSGs(while runnig terraform import commands for NSGs) - ![image](https://github.com/oracle-devrel/cd3-automation-toolkit/assets/103508105/5a50cdb5-b6cf-49fa-b488-1419d32c6b13) - This occurs when NSG and the VCN are in different compartments. In such cases, please modify _nsgs.auto.tfvars, specify the compartment name of the VCN in network_compartment_id field of the problematic NSG. - -- Terraform ordering changes observed during plan phase for OCI compute plugin's. +#### 8. +Terraform ordering changes observed during plan phase for OCI compute plugins. ![image](https://github.com/oracle-devrel/cd3-automation-toolkit/assets/103548537/f6a2d481-5e79-484b-a24e-a8329e8b6626) - It changes the order of plugin's in terraform state file and doesn't change anything in OCI for compute resource. + It changes the order of plugin's in terraform state file and doesn't change anything in OCI console for compute resource. + + diff --git a/cd3_automation_toolkit/documentation/user_guide/Launch_Docker_container.md b/cd3_automation_toolkit/documentation/user_guide/Launch_Docker_container.md index f2e63d6f0..9bedaae0b 100644 --- a/cd3_automation_toolkit/documentation/user_guide/Launch_Docker_container.md +++ b/cd3_automation_toolkit/documentation/user_guide/Launch_Docker_container.md @@ -1,34 +1,39 @@ -# Launch Docker Container +# Launch the Container To ease the execution of toolkit, we have provided the steps to build an image which encloses the code base and its package dependencies. Follow the steps provided below to clone the repo, build the image and finally launch the container.
    ## Clone the repo -* Open your terminal and change the directory to the one where you want to download the git repo. +* Open your terminal and navigate to the directory where you plan to download the Git repo. * Run the git clone command as shown below:
            ```git clone https://github.com/oracle-devrel/cd3-automation-toolkit``` -* Once the cloning command completes successfully, the repo will replicate to the local directory. +* Once the cloning command is executed successfully, the repo will replicate to the local directory. ## Build an image * Change directory to 'cd3-automation-toolkit'(i.e. the cloned repo in your local). * Run ```docker build --platform linux/amd64 -t cd3toolkit:${image_tag} -f Dockerfile --pull --no-cache .```
    -
    Note : ${image_tag} should be replaced with suitable tag as per your requirements/standards. +
    Note : ${image_tag} should be replaced with suitable tag as per your requirements/standards. eg v2024.1.0
               The period (.) at the end of the docker build command is required. ## Save the image (Optional) * Run ```docker save cd3toolkit:${image_tag} | gzip > cd3toolkit_${image_tag}.tar.gz``` -## Run CD3 container alongwith VPN (Applicable for VPN users only) +## Run the container alongwith VPN (Applicable for VPN users only) * Connect to the VPN. * Make sure you are using version **1.9** for **Rancher deskop**, if not please install the latest. * Make sure to Enable **Networking Tunnel** under Rancher settings as shown in the screenshot below, image -* Login to the CD3 docker container using next section and set the proxies which helps to connect internet(if any) from container. +* Login to the CD3 docker container using next section and set the proxies(if any) which helps to connect internet from the container. + +## Run the container +* Run ```docker run --platform linux/amd64 -it -p :8443 -d -v :/cd3user/tenancies :``` + + Eg for Mac: ```docker run --platform linux/amd64 -it -p 8443:8443 -d -v /Users//mount_path:/cd3user/tenancies cd3toolkit:v2024.1.0``` + + Eg for Windows: ```docker run --platform linux/amd64 -it -p 8443:8443 -d -v D:/mount_path:/cd3user/tenancies cd3toolkit:v2024.1.0``` -## Run the CD3 container -* Run ```docker run --platform linux/amd64 -it -d -v :/cd3user/tenancies :``` * Run ```docker ps```

    diff --git a/cd3_automation_toolkit/documentation/user_guide/NetworkingScenariosGF-Jenkins.md b/cd3_automation_toolkit/documentation/user_guide/NetworkingScenariosGF-Jenkins.md new file mode 100644 index 000000000..165d6be48 --- /dev/null +++ b/cd3_automation_toolkit/documentation/user_guide/NetworkingScenariosGF-Jenkins.md @@ -0,0 +1,188 @@ +# Executing Networking Scenarios using toolkit via Jenkins + +## Managing Network for Greenfield Workflow +- [Create Network](#create-network) +- [Modify Network](#modify-network) +- [Modify Security Rules, Route Rules and DRG Route Rules](#modify-security-rules-route-rules-and-drg-route-rules) +- [Sync manual changes done in OCI of Security Rules, Route Rules and DRG Route Rules with CD3 Excel Sheet and Terraform](#sync-manual-changes-done-in-oci-of-security-rules-route-rules-and-drg-route-rules-with-cd3-excel-sheet-and-terraform) +- [Add/Modify/Delete NSGs](#addmodifydelete-nsgs) +- [Add/Modify/Delete VLANs](#addmodifydelete-vlans) +- [Remote Peering Connections](#rpcs) + + +**NOTE-** + +### Create Network +Creation of Networking components using Automation Toolkit involves four simple steps. + - Add the networking resource details to appropriate Excel Sheets. + - Running the setUpOCI pipeline in the toolkit to generate auto.tfvars. + - Executing terraform pipeline to provision the resources in OCI. + - Exporting the automatically generated Security Rules and Route Rules by the toolkit to CD3 Excel Sheet. + +Below are the steps to create Network that includes VCNs, Subnets, DHCP, DRG, Security List, Route Tables, DRG Route Tables, NSGs, etc. + +1. Choose appropriate excel sheet from [Excel Templates](/cd3_automation_toolkit/documentation/user_guide/ExcelTemplates.md) and fill the required Network details in the Networking Tabs - VCNs, DRGs, VCN Info, DHCP, Subnets, NSGs tabs. + +2. Execute the _setupOCI_ pipeline with _Workflow_ as _Create Resources in OCI(Greenfield Workflow)_ + +3. Choose option _'Validate CD3'_ and then _'Validate Networks'_ to check for syntax errors in Excel sheet. Examine the log file generated at _/cd3user/tenancies//\_cd3validator.log_. If there are errors, please rectify them accordingly and proceed to the next step. + +4. Choose _'Create Network'_ under _'Network'_ from the displayed options. Click on Build. +301705033-c16f8d7b-dd8d-484d-b873-f5ad36facfa9 + +5. It will show different stages of execution of _setUpOCI_ pipeline and also launch the _terraform-apply_ pipeline for 'network'. +6. Click on Proceed for 'Get Approval' stage of the terraform pipeline. + + This completes the creation of Networking components in OCI. Verify the components in console. However the details of the default security lists and default route tables are not available in the CD3 Excel sheet yet. Inorder to export that data please follow the below steps: + +7. Execute the _setupOCI.py_ pipeline with _Workflow_ as _Create Resources in OCI(Greenfield Workflow)_ +8. Choose _'Network'_ from the displayed options. Choose below sub-options: (Make sure to choose all the three optionsfor the first time) + - Security Rules + - Export Security Rules (From OCI into SecRulesinOCI sheet) + - Route Rules + - Export Route Rules (From OCI into RouteRulesinOCI sheet) + - DRG Route Rules + - Export DRG Route Rules (From OCI into DRGRouteRulesinOCI sheet) + Click on Build. + + Screenshot 2024-02-02 at 7 45 16 AM + + +This completes the steps for Creating the Network in OCI and exporting the default rules to the CD3 Excel Sheet using the Automation Toolkit. + +
    [Go back to Networking Scenarios](#executing-networking-scenarios-using-toolkit-via-jenkins) +### Modify Network +Modifying the Networking components using Automation Toolkit involves three simple steps. + - Add/modify the details of networking components like the VCNs, Subnets, DHCP and DRG in Excel Sheet. + - Running the the setUpOCI pipeline in the toolkit to generate auto.tfvars. + - Executing Terraform pipeline to provision/modify the resources in OCI. + + ***Note***: Follow [these steps](#modify-security-rules-route-rules-and-drg-route-rules) to modify Security Rules, Route Rules and DRG Route Rules + +_Steps in detail_: +1. Modify your excel sheet to update required data in the Tabs - VCNs, DRGs, VCN Info, DHCP and Subnets. +2. Execute the _setupOCI.py_ pipeline with _Workflow_ as _Create Resources in OCI(Greenfield Workflow)_ +3. To Validate the CD3 excel Tabs - Choose option _'Validate CD3'_ and _'Validate Networks'_ from sub-menu to check for syntax errors in Excel sheet. Examine the log file generated at _/cd3user/tenancies//\_cd3validator.logs_. If there are errors, please rectify them accordingly and proceed to the next step. +4. Choose option to _'Modify Network'_ under _'Network'_ from the displayed options. Once the execution is successful, multiple .tfvars related to networking like _\_major-objects.auto.tfvars_ and more will be generated under the folder _/cd3user/tenancies//terraform_files//_. Existing files will move into respective backup folders. + + **Note-**: Make sure to export Sec Rules, Route Rules, DRG Route Rules to CD3 Excel Sheet before executing this option. + +6. It will show different stages of execution of _setUpOCI_ pipeline and also launch the _terraform-apply_ pipeline for 'network'. +7. Click on Proceed for 'Get Approval' stage of the terraform pipeline. + +This completes the modification of Networking components in OCI. Verify the components in console. + +
    [Go back to Networking Scenarios](#executing-networking-scenarios-using-toolkit-via-jenkins) +### Modify Security Rules, Route Rules and DRG Route Rules + +Follow the below steps to add, update or delete the following components: +- Security Lists and Security Rules +- Route Table and Route Rules +- DRG Route Table and DRG Route Rules + +1. Modify your excel sheet to update required data in the Tabs - RouteRulesInOCI, SecRulesInOCI, DRGRouteRulesInOCI tabs. + +2. Execute the _setupOCI.py_ pipeline with _Workflow_ as _Create Resources in OCI(Greenfield Workflow)_ + +3. Choose _'Network'_ from the displayed options. Choose below sub-options: + - Security Rules + - Add/Modify/Delete Security Rules (Reads SecRulesinOCI sheet) + - Route Rules + - Add/Modify/Delete Route Rules (Reads RouteRulesinOCI sheet) + - DRG Route Rules + - Add/Modify/Delete DRG Route Rules (Reads DRGRouteRulesinOCI sheet) + + Once the execution is successful, _\_seclists.auto.tfvars_, _\_routetables.auto.tfvars_ and _\_drg-routetables.auto.tfvars_ file will be generated under the folder _/cd3user/tenancies//terraform_files/_. Existing files will move into respective backup folders. + + **NOTE**: This will create TF for only those Security Lists and Route Tables in VCNs which are part of cd3 and skip any VCNs that have been created outside of cd3 execution. + +4. It will show different stages of execution of _setUpOCI_ pipeline and also launch the _terraform-apply_ pipeline for 'network'. +5. Click on Proceed for 'Get Approval' stage of the terraform pipeline. + + This completes the modification of Security Rules, Route Rules and DRG Route Rules in OCI. Verify the components in console.
    + +
    [Go back to Networking Scenarios](#executing-networking-scenarios-using-toolkit-via-jenkins) +### Sync manual changes done in OCI of Security Rules, Route Rules and DRG Route Rules with CD3 Excel Sheet and Terraform +Follow the below process to export the rules to the same CD3 Excel Sheet as the one used to Create Network, and to sync the Terraform files with OCI whenever an user adds, modifies or deletes rules in OCI Console manually. + +**NOTE**: Make sure to close your Excel sheet during the export process. + +1. Execute the _setupOCI.py_ pipeline with _Workflow_ as _Create Resources in OCI(Greenfield Workflow)_ + +2. Choose _'Network'_ from the displayed menu. Choose below sub-options: + - Security Rules + - Export Security Rules (From OCI into SecRulesinOCI sheet) + - Add/Modify/Delete Security Rules (Reads SecRulesinOCI sheet) + - Route Rules + - Export Route Rules (From OCI into RouteRulesinOCI sheet) + - Add/Modify/Delete Route Rules (Reads RouteRulesinOCI sheet) + - DRG Route Rules + - Export DRG Route Rules (From OCI into DRGRouteRulesinOCI sheet) + - Add/Modify/Delete DRG Route Rules (Reads DRGRouteRulesinOCI sheet) + + Once the execution is successful, 'RouteRulesInOCI', 'SecRulesInOCI', 'DRGRouteRulesInOCI' tabs of the excel sheet will be updated with the rules exported from OCI. And _\_seclists.auto.tfvars_, _\routetables.auto.tfvars_ and _\drg-routetables.auto.tfvars_ file will be generated under the folder _/cd3user/tenancies//terraform_files//_ + + 4. It will show different stages of execution of _setUpOCI_ pipeline and also launch the _terraform-apply_ pipeline for 'network'. + 5. Click on Proceed for 'Get Approval' stage of the terraform pipeline. + + This completes the export of Security Rules, Route Rules and DRG Route Rules from OCI. Terraform plan/apply should be in sync with OCI. + +
    [Go back to Networking Scenarios](#executing-networking-scenarios-using-toolkit-via-jenkins) +### Add/Modify/Delete NSGs +Follow the below steps to update NSGs. + +1. Modify your excel sheet to update required data in the Tabs - NSGs. + +2. Execute the _setupOCI.py_ pipeline with _Workflow_ as _Create Resources in OCI(Greenfield Workflow)_ + +3. Choose _'Network'_ from the displayed menu. Choose below sub-option: + - Network Security Groups + - Add/Modify/Delete NSGs (Reads NSGs sheet) + + Once the execution is successful, _\_nsgs.auto.tfvars_ will be generated under the folder _/cd3user/tenancies//terraform_files//_. Existing files will move into respective backup folders. + +4. It will show different stages of execution of _setUpOCI_ pipeline and also launch the _terraform-apply_ pipeline for 'nsg'. +5. Click on Proceed for 'Get Approval' stage of the terraform pipeline. + +This completes the modification of NSGs in OCI. Verify the components in console. + +
    [Go back to Networking Scenarios](#executing-networking-scenarios-using-toolkit-via-jenkins) + +### Add/Modify/Delete VLANs +Follow the below steps to update VLANs. + +1. Modify your excel sheet to update required data in the Tabs - SubnetsVLANs. +2. Make sure that the RouteRulesinOCI sheet and corresponing terraform is in synch with route rules in OCI console. If not, please follow procedure specified in [Sync manual changes done in OCI of Security Rules, Route Rules and DRG Route Rules with CD3 Excel Sheet and Terraform](#sync-manual-changes-done-in-oci-of-security-rules-route-rules-and-drg-route-rules-with-cd3-excel-sheet-and-terraform) + +3. Execute the _setupOCI.py_ pipeline with _Workflow_ as _Create Resources in OCI(Greenfield Workflow)_ +4. Choose _'Network'_ from the displayed menu. Choose below sub-option: + - Add/Modify/Delete VLANs (Reads SubnetsVLANs sheet) + + Once the execution is successful, _\_vlans.auto.tfvars_ will be generated under the folder _/cd3user/tenancies//terraform_files//_. Existing files will move into respective backup folders. _\routetables.auto.tfvars_ file will also be updated with the route table information specified for each VLAN. + +4. It will show different stages of execution of _setUpOCI_ pipeline and also launch the _terraform-apply_ pipeline for 'vlan' and 'network'. +5. Click on Proceed for 'Get Approval' stage of the terraform pipeline. + +6. Again make sure to export the Route Rules in OCI into excel and terraform. Please follow procedure specified in [Sync manual changes done in OCI of Security Rules, Route Rules and DRG Route Rules with CD3 Excel Sheet and Terraform](#sync-manual-changes-done-in-oci-of-security-rules-route-rules-and-drg-route-rules-with-cd3-excel-sheet-and-terraform) + +This completes the modification of VLANs in OCI. Verify the components in console. + +### RPCs +Remote VCN peering is the process of connecting two VCNs in different regions (but the same tenancy). The peering allows the VCNs' resources to communicate using private IP addresses without routing the traffic over the internet or through your on-premises network. + + - Modify your excel sheet to update required data in the Tabs - DRGs. + - The source and target RPC details to be entered in DRG sheet for establishing a connection. Please check the example in excel file for reference. + - Make sure that the DRGRouteRulesinOCI sheet and corresponding to terraform is in synch with DRG route rules in OCI console. If not, please follow procedure specified in [Sync manual changes done in OCI of Security Rules, Route Rules and DRG Route Rules with CD3 Excel Sheet and Terraform](#sync-manual-changes-done-in-oci-of-security-rules-route-rules-and-drg-route-rules-with-cd3-excel-sheet-and-terraform) + - Global directory which is inside the customer outdir will have all RPC related files and scripts. + - The RPC resources(modules,provider configurations etc) are generated dynamically for the tenancy and can work along only with CD3 automation toolkit. + - Choose option 'Network' and then 'Customer Connectivity' for creating RPC in GreenField workflow. + - Output files are created under _/cd3user/tenancies//terraform_files/global/rpc_ directory + +
    [Go back to Networking Scenarios](#executing-networking-scenarios-using-toolkit-via-jenkins) +

    +
    + +| :arrow_backward: Prev | Next :arrow_forward: | +| :---- | -------: | + +
    diff --git a/cd3_automation_toolkit/documentation/user_guide/NetworkingScenariosGF.md b/cd3_automation_toolkit/documentation/user_guide/NetworkingScenariosGF.md index c91c48478..9abe016bb 100644 --- a/cd3_automation_toolkit/documentation/user_guide/NetworkingScenariosGF.md +++ b/cd3_automation_toolkit/documentation/user_guide/NetworkingScenariosGF.md @@ -1,6 +1,6 @@ # Networking Scenarios -## Greenfield Tenancies (Managing Network for Green Field Tenancies) +## Managing Network for Greenfield Workflow - [Create Network](#create-network) - [Use an existing DRG in OCI while creating the network](#use-an-existing-drg-in-oci-while-creating-the-network) - [Modify Network](#modify-network) @@ -23,15 +23,15 @@ Creation of Networking components using Automation Toolkit involves four simple Below are the steps in detail to create Network that includes VCNs, Subnets, DHCP, DRG, Security List, Route Tables, DRG Route Tables, NSGs, etc. -1. Choose appropriate excel sheet from [Excel Templates](/cd3_automation_toolkit/documentation/user_guide/RunningAutomationToolkit.md#excel-sheet-templates) and fill the required Network details in the Networking Tabs - VCNs, DRGs, VCN Info, DHCP, Subnets, NSGs tabs. +1. Choose appropriate excel sheet from [Excel Templates](/cd3_automation_toolkit/documentation/user_guide/ExcelTemplates.md) and fill the required Network details in the Networking Tabs - VCNs, DRGs, VCN Info, DHCP, Subnets, NSGs tabs. -2. Execute the _setupOCI.py_ file with _non_gf_tenancy_ parameter value to _false_: +2. Execute the _setupOCI.py_ file with _workflow_type_ parameter value to _create_resources_: ```python setUpOCI.py /cd3user/tenancies//_setUpOCI.properties``` 3. Choose option _'Validate CD3'_ and then _'Validate Network(VCNs, Subnets, DHCP, DRGs)'_ to check for syntax errors in Excel sheet. Examine the log file generated at _/cd3user/tenancies//\_cd3validator.log_. If there are errors, please rectify them accordingly and proceed to the next step. -4. Choose option to _'Create Network'_ under _'Network'_ from the displayed menu. Once the execution is successful, multiple .tfvars related to networking like _\_major-objects.auto.tfvars_ and more will be generated under the folder _/cd3user/tenancies//terraform_files/_ +4. Choose option to _'Create Network'_ under _'Network'_ from the displayed menu. Once the execution is successful, multiple .tfvars related to networking like _\_major-objects.auto.tfvars_ and more will be generated under the folder _/cd3user/tenancies//terraform_files//_ 5. Navigate to the above path and execute the terraform commands:

    _terraform init_ @@ -40,7 +40,7 @@ Below are the steps in detail to create Network that includes VCNs, Subnets, DHC This completes the creation of Networking components in OCI. Verify the components in console. However the details of the default security lists and default route tables may not be available in the CD3 Excel sheet yet. Inorder to export that data please follow the below steps: -6. Execute the _setupOCI.py_ file with _non_gf_tenancy_ parameter value to _false_: +6. Execute the _setupOCI.py_ file with _workflow_type_ parameter value to _create_resources_: ```python setUpOCI.py /cd3user/tenancies//_setUpOCI.properties``` @@ -66,7 +66,7 @@ In some scenarios, a DRG has already been created in the tenancy and rest of the
      → Terraform Plan will indicate to add all the other components except DRG.
    _terraform apply_ - Continue executing the remaining steps (from Step 6) of [Create Network](#1-create-network). + Continue executing the remaining steps (from Step 6) of [Create Network](#create-network).
    [Go back to Networking Scenarios](#networking-scenarios) ### Modify Network @@ -75,18 +75,18 @@ Modifying the Networking components using Automation Toolkit involves three simp - Running the toolkit to generate auto.tfvars. - Executing Terraform commands to provision/modify the resources in OCI. - ***Note***: Follow [these steps](#3-modify-security-rules-route-rules-and-drg-route-rules) to modify Security Rules, Route Rules and DRG Route Rules + ***Note***: Follow [these steps](#modify-security-rules-route-rules-and-drg-route-rules) to modify Security Rules, Route Rules and DRG Route Rules _Steps in detail_: 1. Modify your excel sheet to update required data in the Tabs - VCNs, DRGs, VCN Info, DHCP and Subnets. -2. Execute the _setupOCI.py_ file with _non_gf_tenancy_ parameter value to _false_: +2. Execute the _setupOCI.py_ file with _workflow_type_ parameter value to _create_resources_: ```python setUpOCI.py /cd3user/tenancies//_setUpOCI.properties``` 3. To Validate the CD3 excel Tabs - Choose option _'Validate CD3'_ and _'Validate Network(VCNs, Subnets, DHCP, DRGs)'_ from sub-menu to check for syntax errors in Excel sheet. Examine the log file generated at _/cd3user/tenancies//\_cd3validator.logs_. If there are errors, please rectify them accordingly and proceed to the next step. -4. Choose option to _'Modify Network'_ under _'Network'_ from the displayed menu. Once the execution is successful, multiple .tfvars related to networking like _\_major-objects.auto.tfvars_ and more will be generated under the folder _/cd3user/tenancies//terraform_files/_. Existing files will move into respective backup folders. +4. Choose option to _'Modify Network'_ under _'Network'_ from the displayed menu. Once the execution is successful, multiple .tfvars related to networking like _\_major-objects.auto.tfvars_ and more will be generated under the folder _/cd3user/tenancies//terraform_files//_. Existing files will move into respective backup folders. **Note-**: Make sure to export Sec Rules, Route Rules, DRG Route Rules to CD3 Excel Sheet before executing this option. @@ -107,7 +107,7 @@ Follow the below steps to add, update or delete the following components: 1. Modify your excel sheet to update required data in the Tabs - RouteRulesInOCI, SecRulesInOCI, DRGRouteRulesInOCI tabs. -2. Execute the _setupOCI.py_ file with _non_gf_tenancy_ parameter value to _false_: +2. Execute the _setupOCI.py_ file with _workflow_type_ parameter value to _create_resources_: ```python setUpOCI.py /cd3user/tenancies//_setUpOCI.properties``` @@ -136,7 +136,7 @@ Follow the below process to export the rules to the same CD3 Excel Sheet as the **NOTE**: Make sure to close your Excel sheet during the export process. -1. Execute the _setupOCI.py_ file with _non_gf_tenancy_ parameter value to _false_: +1. Execute the _setupOCI.py_ file with _workflow_type_ parameter value to _create_resources_: ```python setUpOCI.py /cd3user/tenancies//_setUpOCI.properties``` @@ -158,7 +158,7 @@ Follow the below process to export the rules to the same CD3 Excel Sheet as the - DRG Route Rules - Add/Modify/Delete DRG Route Rules (Reads DRGRouteRulesinOCI sheet) - Once the execution is successful, _\_seclists.auto.tfvars_, _\routetables.auto.tfvars_ and _\drg-routetables.auto.tfvars_ file will be generated under the folder _/cd3user/tenancies//terraform_files/_ + Once the execution is successful, _\_seclists.auto.tfvars_, _\routetables.auto.tfvars_ and _\drg-routetables.auto.tfvars_ file will be generated under the folder _/cd3user/tenancies//terraform_files//_ Navigate to the above path and execute the terraform commands:

    _terraform init_ @@ -173,7 +173,7 @@ Follow the below steps to update NSGs. 1. Modify your excel sheet to update required data in the Tabs - NSGs. -2. Execute the _setupOCI.py_ file with _non_gf_tenancy_ parameter value to _false_: +2. Execute the _setupOCI.py_ file with _workflow_type_ parameter value to _create_resources_: ```python setUpOCI.py /cd3user/tenancies//_setUpOCI.properties``` @@ -181,7 +181,7 @@ Follow the below steps to update NSGs. - Network Security Groups - Add/Modify/Delete NSGs (Reads NSGs sheet) - Once the execution is successful, _\_nsgs.auto.tfvars_ will be generated under the folder _/cd3user/tenancies//terraform_files/_. Existing files will move into respective backup folders. + Once the execution is successful, _\_nsgs.auto.tfvars_ will be generated under the folder _/cd3user/tenancies//terraform_files//_. Existing files will move into respective backup folders. 4. Navigate to the above path and execute the terraform commands:

    _terraform init_ @@ -198,14 +198,14 @@ Follow the below steps to update VLANs. 1. Modify your excel sheet to update required data in the Tabs - SubnetsVLANs. 2. Make sure that the RouteRulesinOCI sheet and corresponing terraform is in synch with route rules in OCI console. If not, please follow procedure specified in [Sync manual changes done in OCI of Security Rules, Route Rules and DRG Route Rules with CD3 Excel Sheet and Terraform](#sync-manual-changes-done-in-oci-of-security-rules-route-rules-and-drg-route-rules-with-cd3-excel-sheet-and-terraform) -3. Execute the _setupOCI.py_ file with _non_gf_tenancy_ parameter value to _false_: +3. Execute the _setupOCI.py_ file with _workflow_type_ parameter value to _create_resources_: ```python setUpOCI.py /cd3user/tenancies//_setUpOCI.properties``` 4. Choose _'Network'_ from the displayed menu. Choose below sub-option: - Add/Modify/Delete VLANs (Reads SubnetsVLANs sheet) - Once the execution is successful, _\_vlans.auto.tfvars_ will be generated under the folder _/cd3user/tenancies//terraform_files/_. Existing files will move into respective backup folders. _\routetables.auto.tfvars_ file will also be updated with the route table information specified for each VLAN. + Once the execution is successful, _\_vlans.auto.tfvars_ will be generated under the folder _/cd3user/tenancies//terraform_files//_. Existing files will move into respective backup folders. _\routetables.auto.tfvars_ file will also be updated with the route table information specified for each VLAN. 5. Navigate to the above path and execute the terraform commands:

    _terraform init_ @@ -231,7 +231,7 @@ Remote VCN peering is the process of connecting two VCNs in different regions (b

    diff --git a/cd3_automation_toolkit/documentation/user_guide/NetworkingScenariosNGF.md b/cd3_automation_toolkit/documentation/user_guide/NetworkingScenariosNGF.md index 6a57bad91..f0387f7a2 100644 --- a/cd3_automation_toolkit/documentation/user_guide/NetworkingScenariosNGF.md +++ b/cd3_automation_toolkit/documentation/user_guide/NetworkingScenariosNGF.md @@ -1,6 +1,6 @@ # Networking Scenarios -## Non-Greenfield Tenancies (Managing Network for Non Green Field Tenancies) +## Managing Network for Non-Greenfield Workflow - [Export Network](#non-greenfield-tenancies) - [Add a new or modify the existing networking components](#add-a-new-or-modify-the-existing-networking-components) @@ -14,7 +14,7 @@ Follow the below steps to export the Networking components that includes VCNs, S 1. Use the [CD3-Blank-Template.xlsx](/cd3_automation_toolkit/example) to export the networking resources into the Tabs - VCNs, DRGs, VCN Info, DHCP, Subnets, NSGs, RouteRulesInOCI, SecRulesInOCI,DRGRouteRulesInOCI tabs. -2. Execute the _setupOCI.py_ file with _non_gf_tenancy_ parameter value to _true_: +2. Execute the _setupOCI.py_ file with _workflow_type_ parameter value to _export_resources_: ```python setUpOCI.py /cd3user/tenancies//_setUpOCI.properties``` @@ -27,7 +27,7 @@ Follow the below steps to export the Networking components that includes VCNs, S - Export Network components for SubnetsVLANs Tab - Export Network components for NSGs Tab - Once the execution is successful, networking related .tfvars files and .sh files containing import statements will be generated under the folder _/cd3user/tenancies//terraform_files/_ + Once the execution is successful, networking related .tfvars files and .sh files containing import statements will be generated under the folder _/cd3user/tenancies//terraform_files//_ Also,The RPC related .tfvars and .sh files containing import statements will be generated in global directory which is inside the /cd3user/tenancies//terraform_files/ folder. @@ -55,8 +55,8 @@ Subnets tab:
    [Go back to Networking Scenarios](#networking-scenarios) ### Add a new or modify the existing networking components -1. Export the Networking components by following the steps [above](#1-export-network). (Note that here _non\_gf\_tenancy_ flag is set to true) -2. Follow the [process](/cd3_automation_toolkit/documentation/user_guide/NetworkingScenariosGF.md#modify-network) to add new components such as VCN/DHCP/DRG/IGW/NGW/SGW/LPG/Subnet etc. (Note that here _non\_gf\_tenancy_ flag is set to false) +1. Export the Networking components by following the steps [above](#export-network). (Note that here _workflow_type_ flag is set to export_resources) +2. Follow the [process](/cd3_automation_toolkit/documentation/user_guide/NetworkingScenariosGF.md#modify-network) to add new components such as VCN/DHCP/DRG/IGW/NGW/SGW/LPG/Subnet etc. (Note that here _workflow_type_ flag is set to create_resources)
    [Go back to Networking Scenarios](#networking-scenarios) @@ -64,7 +64,7 @@ Subnets tab:

    diff --git a/cd3_automation_toolkit/documentation/user_guide/NonGreenField-Jenkins.md b/cd3_automation_toolkit/documentation/user_guide/NonGreenField-Jenkins.md new file mode 100644 index 000000000..818a17a96 --- /dev/null +++ b/cd3_automation_toolkit/documentation/user_guide/NonGreenField-Jenkins.md @@ -0,0 +1,75 @@ +# Export Resources from OCI via Jenkins(Non-Greenfield Workflow) + + +**Step 1**: +
    Choose the appropriate CD3 Excel sheet template from [Excel Templates](/cd3_automation_toolkit/documentation/user_guide/ExcelTemplates.md) +Choose **CD3-Blank-template.xlsx** for an empty sheet. + +**Step 2**: +
    Login to Jenkins URL with user created after initialization and click on setUpOCI pipeline from Dashboard. Click on **Build with Parameters** from left side menu. + +Screenshot 2024-01-16 at 10 56 42 AM
    + +>Note - Only one user at a time using the Jenkins setup is supported in the current release of the toolkit. + +**Step 3**: +
    Upload the above chosen Excel sheet in **Excel_Template** section. + +Screenshot 2024-01-16 at 11 04 47 AM
    +>This will copy the Excel file at `/cd3user/tenancies/` inside the container. It will also take backup of existing Excel on the container by appending the current datetime if same filename is uploaded in multiple executions. + + +**Step 4:** +
    Select the workflow as **Export Resources from OCI**(Non-Greenfield Workflow). Choose single or multiple MainOptions as required and then corresponding SubOptions. +
    Below screenshot shows export of Network and Compute. + +Screenshot 2024-01-17 at 7 11 42 PM
    + + +**Step 5:** +
    Specify region and compartment from where you want to export the data. +
    It also asks for service specific filters like display name patterns for compute. Leave empty if no filter is needed. + +Screenshot 2024-01-17 at 7 10 56 PM
    +
    Click on **Build** at the bottom.
    + + +**Step 6:** +
    setUpOCI pipeline is triggered and stages are executed as shown below: + +Screenshot 2024-01-17 at 9 37 22 PM
    +
    + +**Expected Output of 'Execute setUpOCI' stage:**
    +
      +
    1. Overwrites the specific tabs of Excel sheet with the exported resource details from OCI.
    2. +
    3. Generates Terraform Configuration files - *.auto.tfvars.
    4. +
    5. Generates shell scripts with import commands - tf_import_commands_<resource>_nonGF.sh
    6. + +
    + +**Expected Output of 'Run Import Commands' stage:**
    +
      +
    1. Executes shell scripts with import commands(tf_import_commands_<resource>_nonGF.sh) generated in the previous stage
    2. + +
    + +**Expected Output of Terraform Pipelines:**
    +
      +
    1. Respective pipelines will get triggered automatically from setUpOCI pipeline based on the services chosen for export. You could also trigger manually when required.
    2. +
    3. If 'Run Import Commands' stage was successful (ie tf_import_commands_<resource>_nonGF.sh ran successfully for all services chosen for export), respective terraform pipelines triggered should have 'Terraform Plan' stage show as 'No Changes'
    4. + +
    + +
    + +> **Note:**
    +> Once you have exported the required resources and imported into tfstate, you can use the toolkit to modify them or create new on top of them using 'Create Resources in OCI' workflow. + +

    +
    + +| :arrow_backward: Prev | :arrow_forward: Next | +| :---- | -------: | + +
    diff --git a/cd3_automation_toolkit/documentation/user_guide/NonGreenField.md b/cd3_automation_toolkit/documentation/user_guide/NonGreenField.md index af9bf8c08..150207001 100644 --- a/cd3_automation_toolkit/documentation/user_guide/NonGreenField.md +++ b/cd3_automation_toolkit/documentation/user_guide/NonGreenField.md @@ -1,4 +1,4 @@ -# Non-Green Field Tenancies +# Export Resources from OCI (Non-Greenfield Workflow) > **Note** @@ -7,17 +7,38 @@ > * Tool Kit then generates the TF configuration files/auto.tfvars files for these exported resources. > * It also generates a shell script - tf_import_commands_``_nonGF.sh that has the import commands, to import the state of the resources to tfstate file.(This helps to manage the resources via Terraform in future). -## Detailed Steps -Below are the steps that will help to configure the Automation Tool Kit to support the Non - Green Field Tenancies: **Step 1:** -
    Chose the appropriate CD3 Excel sheet template from [Excel Templates](/cd3_automation_toolkit/documentation/user_guide/RunningAutomationToolkit.md#excel-sheet-templates) +
    Chose the appropriate CD3 Excel sheet template from [Excel Templates](/cd3_automation_toolkit/documentation/user_guide/ExcelTemplates.md) **Step 2:**
    Put CD3 Excel at the appropriate location. -
    Modify/Review [setUpOCI.properties](/cd3_automation_toolkit/documentation/user_guide/RunningAutomationToolkit.md#setupociproperties) with **non_gf_tenancy** set to **true** as shown below: -![image](https://user-images.githubusercontent.com/103508105/221798771-9bca7a1a-5ef3-4587-8138-97f65c4d7cf1.png) +
    Modify/Review _/cd3user/tenancies//\_setUpOCI.properties_ with **workflow_type** set to **export_resources** as shown below: +```ini +#Input variables required to run setUpOCI script +#path to output directory where terraform file will be generated. eg /cd3user/tenancies//terraform_files +outdir=/cd3user/tenancies/demotenancy/terraform_files/ + +#prefix for output terraform files eg like demotenancy +prefix=demotenancy + +# auth mechanism for OCI APIs - api_key,instance_principal,session_token +auth_mechanism=api_key + +#input config file for Python API communication with OCI eg /cd3user/tenancies//.config_files/_config; +config_file=/cd3user/tenancies/demotenancy/.config_files/demotenancy_oci_config + +# Leave it blank if you want single outdir or specify outdir_structure_file.properties containing directory structure for OCI services. +outdir_structure_file=/cd3user/tenancies/demotenancy/demotenancy_outdir_structure_file.properties + +#path to cd3 excel eg /cd3user/tenancies//CD3-Customer.xlsx +cd3file=/cd3user/tenancies/demotenancy/CD3-Blank-template.xlsx + +#specify create_resources to create new resources in OCI(greenfield workflow) +#specify export_resources to export resources from OCI(non-greenfield workflow) +workflow_type=export_resources +``` **Step 3:**
    Execute the SetUpOCI.py script to start exporting the resources to CD3 and creating the terraform configuration files. @@ -52,7 +73,7 @@ c. Shell Script with import commands - tf_import_commands_``_nonGF.sh > **Note**
    -> Once the export (including the execution of **tf_import_commands_``_nonGF.sh**) is complete, switch the value of **non_gf_tenancy** back to **false**. +> Once the export (including the execution of **tf_import_commands_``_nonGF.sh**) is complete, switch the value of **workflow_type** back to **create_resources**. > This allows the Tool Kit to support the tenancy as Green Field from this point onwards. ## Example - Export Identity @@ -62,7 +83,7 @@ Follow the below steps to quickly export Identity components from OCI. 2. Edit the _setUpOCI.properties_ at location:_/cd3user/tenancies //\_setUpOCI.properties_ with appropriate values. - Update the _cd3file_ parameter to specify the CD3 excel sheet path. - - Set the _non_gf_tenancy_ parameter value to _true_. (for Non Greenfield Workflow.) + - Set the _workflow_type_ parameter value to _export_resources_. (for Non Greenfield Workflow.) 3. Change Directory to 'cd3_automation_toolkit' : ```cd /cd3user/oci_tools/cd3_automation_toolkit/``` diff --git a/cd3_automation_toolkit/documentation/user_guide/RunningAutomationToolkit.md b/cd3_automation_toolkit/documentation/user_guide/RunningAutomationToolkit.md deleted file mode 100644 index 6780a6db8..000000000 --- a/cd3_automation_toolkit/documentation/user_guide/RunningAutomationToolkit.md +++ /dev/null @@ -1,97 +0,0 @@ -# **Getting Started with Automation Toolkit** -There are 2 main inputs to the Automation Toolkit. -- CD3 Excel Sheet -- setUpOCI.properties - -### **Excel Sheet Templates** - -Below are the CD3 templates for the latest release having standardised IAM Components (compartments, groups and policies), Network Components and Events & Notifications Rules as per CIS Foundations Benchmark for Oracle Cloud. - -Details on how to fill data into the excel sheet can be found in the Blue section of each sheet inside the excel file. Make appropriate changes to the templates eg region and use for deployment. - -|Excel Sheet| Purpose | -|-----------|----------------------------------------------------------------------------------------------------------------------------| -| [CD3-Blank-template.xlsx](/cd3_automation_toolkit/example) | Choose this template while exporting the existing resources from OCI into the CD3 and Terraform.| -| [CD3-CIS-template.xlsx](/cd3_automation_toolkit/example) | This template has auto-filled in data of CIS Landing Zone for DRGv2. Choose this template to create Core OCI Objects (IAM, Tags, Networking, Instances, LBR, Storage, Databases) | -|[CD3-HubSpoke-template](/cd3_automation_toolkit/example) | This template has auto-filled in data for a Hub and Spoke model of networking. Choose this template to create Core OCI Objects (IAM, Tags, Networking, Instances, LBR, Storage, Databases)| -|[CD3-SingleVCN-template](/cd3_automation_toolkit/example)| This template has auto-filled in data for a Single VCN model of networking. Choose this template to create Core OCI Objects (IAM, Tags, Networking, Instances, LBR, Storage, Databases)| -|[CD3-CIS-ManagementServices-template.xlsx](/cd3_automation_toolkit/example) | This template has auto-filled in data of CIS Landing Zone. Choose this template while creating the components of Events, Alarms, Notifications and Service Connectors| - - -> The Excel Templates can also be found at _/cd3user/oci_tools/cd3_automation_toolkit/example_ inside the container. -> After deploying the infra using any of the templates, please run [CIS compliance checker script](/cd3_automation_toolkit/documentation/user_guide/learn_more/CISFeatures.md#1-run-cis-compliance-checker-script)) - - -### **setUpOCI.properties** - -**Current Version: setUpOCI.properties v10.1** - -Make sure to use/modify the properties file at _/cd3user/tenancies //\_setUpOCI.properties_ during executions. - -``` -[Default] - -#Input variables required to run setUpOCI script - -#path to output directory where terraform file will be generated. eg /cd3user/tenancies//terraform_files -outdir= - -#prefix for output terraform files eg like demotenancy -prefix= - -#input config file for Python API communication with OCI eg /cd3user/tenancies//_config; -config_file= - -#path to cd3 excel eg /cd3user/tenancies//CD3-Customer.xlsx -cd3file= - -#Is it Non GreenField tenancy -non_gf_tenancy=false - -# Leave it blank if you want single outdir or specify outdir_structure_file.properties containing directory structure for OCI services. -outdir_structure_file= -``` - -| Variable | Description | Example | -|---|---|---| -|outdir|Path to output directory where terraform files will be generated| /cd3user/tenancies//terraform\_files| -|prefix|Prefix for output terraform files|\| -|config\_file|Python config file|/cd3user/tenancies//config| -| cd3file |Path to the CD3 input file |/cd3user/tenancies//testCD3. xlsx | -|non\_gf\_tenancy |Specify if its a Non Green field tenancy or not (**True** or **False**)| False| -|outdir\_structure\_file |Parameter specifying single outdir or different for different services|Blank or _gc2_outdir_structure_file| - -
    For more information on usage of non_gf_tenancy flag, refer to Automation Toolkit Workflows
    - -### **Execution Steps Overview:** -Choose the appropriate CD3 Excel Sheet and update the setUpOCI.properties file at _/cd3user/tenancies//\_setUpOCI.properties_ and run the commands below: - -**Step 1**: -
    Change Directory to 'cd3_automation_toolkit' -
    ```cd /cd3user/oci_tools/cd3_automation_toolkit/``` - -**Step 2**: -
    Place Excel sheet at appropriate location in your container and provide the corresponding path in _cd3file_ parmeter of: _/cd3user/tenancies //\_setUpOCI.properties_ file - -**Step 3** -
    -Execute the setUpOCI Script:
    ```python setUpOCI.py /cd3user/tenancies//_setUpOCI.properties``` -
    → Example execution of the script: - -``` -[cd3user@25260a87b137 cd3_automation_toolkit]$ python setUpOCI.py /cd3user/tenancies/demotenancy/demotenancy_setUpOCI.properties -Updated OCI_Regions file !!! -Script to fetch the compartment OCIDs into variables file has not been executed. -Do you want to run it now? (y|n): -``` -→ This prompt appears for the very first time when you run the toolkit or when any new compartments are created using the toolkit. Enter 'y' to fetch the details of compartment OCIDs into variables file. -
    → After fetching the compartment details, the toolkit will display the menu options. - -

    -
    - -| :arrow_backward: Prev | Next :arrow_forward: | -| :---- | -------: | - -
    - diff --git a/cd3_automation_toolkit/documentation/user_guide/Upgrade_Toolkit.md b/cd3_automation_toolkit/documentation/user_guide/Upgrade_Toolkit.md index b163bc824..cde6eb19a 100644 --- a/cd3_automation_toolkit/documentation/user_guide/Upgrade_Toolkit.md +++ b/cd3_automation_toolkit/documentation/user_guide/Upgrade_Toolkit.md @@ -1,5 +1,11 @@ # Steps to Upgrade Your Toolkit (For Existing Customers using older versions): +## Upgrade to Release v2024.1.0 +This is a major release with introduction of CI/CD using Jenkins. +1. Follow the steps in [Launch Docker Container](/cd3_automation_toolkit/documentation/user_guide/Launch_Docker_container.md) to build new image with latest code and launch the container by specifying new path for to create a fresh outdir. +2. Use Non Greenfield workflow to export the required OCI services into new excel sheet and the tfvars. Run terraform import commands also. +3. Once terraform is in synch, Switch to Greenfield workflow and use for any future modifications to the infra. + ## Upgrade to Release v12.1 from v12 1. Follow the steps in [Launch Docker Container](/cd3_automation_toolkit/documentation/user_guide/Launch_Docker_container.md) to build new image with latest code and launch the container by specifying same path for to keep using same outdir. 2. Copy sddc.tf from _/cd3user/oci_tools/cd3\_automation\_toolkit/user-scripts/terraform_files/_ to _/cd3user/tenancies//terraform\_files//_. @@ -7,13 +13,13 @@ 4. Copy the sddcs variable block from _/cd3user/oci_tools/cd3\_automation\_toolkit/user-scripts/terraform_files/variables_example.tf_ and replace it in your variables_\.tf file ## Upgrade to Release v12 -1. Follow the steps in Launch Docker Container to build new image with latest code and launch the container by specifying new path for to create a fresh outdir. -2. Use Non Greenfield workflow to export the required OCI services into new excel sheet and the tfvars. Run terraform import commands also. -3. Once terraform is in synch, Switch to Greenfield workflow and use for any future modifications to the infra. + ## Upgrade to Release v11.1 from v11 -1. Follow the steps in [Launch Docker Container](/cd3_automation_toolkit/documentation/user_guide/Launch_Docker_container.md) to build new image with latest code and launch the container by specifying same path for to keep using same outdir. +1. Follow the steps in [Launch Docker Container](/cd3_automation_toolkit/documentation/user_guide/Launch_Docker_container.md) to build new image with latest code and launch the container by specifying same path for to keep using same 1. Follow the steps in Launch Docker Container to build new image with latest code and launch the container by specifying new path for to create a fresh outdir. +2. Use Non Greenfield workflow to export the required OCI services into new excel sheet and the tfvars. Run terraform import commands also. +3. Once terraform is in synch, Switch to Greenfield workflow and use for any future modifications to the infra.outdir. ## Upgrade to Release v11 1. Follow the steps in [Launch Docker Container](/cd3_automation_toolkit/documentation/user_guide/Launch_Docker_container.md) to build new image with latest code and launch the container by specifying new path for to create a fresh outdir. diff --git a/cd3_automation_toolkit/documentation/user_guide/Workflows-jenkins.md b/cd3_automation_toolkit/documentation/user_guide/Workflows-jenkins.md new file mode 100644 index 000000000..68b0ae801 --- /dev/null +++ b/cd3_automation_toolkit/documentation/user_guide/Workflows-jenkins.md @@ -0,0 +1,34 @@ +# Using the Automation Toolkit via Jenkins + +Jenkins integraton with the toolkit is provided to jump start your journey with CI/CD for IaC in OCI. A beginner level of understanding of Jenkins is required. + +## **Pre-reqs for Jenkins Configuration** +* The configurations are done when you execute createTenancyConfig.py in [Connect container to OCI Tenancy](/cd3_automation_toolkit/documentation/user_guide/Connect_container_to_OCI_Tenancy.md). Please validate them: + - jenkins.properties file is created under _/cd3user/tenancies/jenkins\_home_ as per input parameters in tenancyConfig.properties
    + - An Object Storage bucket is created in OCI in the specified compartment to manage tfstate remotely.
    + - Customer Secret Key is configured for the user for S3 credentials of the bucket.
    + - A DevOps Project, Repo and Topic are created in OCI in the specified compartment to store terraform_files. GIT is configured on the container with config file at ```/cd3user/.ssh/config```
    + + +## **Bootstrapping of Jenkins in the toolkit** + +* Execute below cmd to start Jenkins -
    +```/usr/share/jenkins/jenkins.sh &``` + +* Access Jenkins URL using - + - https://\:\/
    + > Notes: + > - \ is the port mapped with local system while docker container creation Eg: 8443. + > - Network Connectivity should be allowed on this host and port. + - It will prompt you to create first user to access Jenkins URL. This will be the admin user. + - The Automation Toolkit only supports a single user Jenkins setup in this release. + - Once you login, Jenkins Dashbord will be displayed. + + +

    + diff --git a/cd3_automation_toolkit/documentation/user_guide/Workflows.md b/cd3_automation_toolkit/documentation/user_guide/Workflows.md index 185323cba..138a22ff6 100644 --- a/cd3_automation_toolkit/documentation/user_guide/Workflows.md +++ b/cd3_automation_toolkit/documentation/user_guide/Workflows.md @@ -1,15 +1,86 @@ -# Using the Automation Toolkit +# Using the Automation Toolkit via CLI + +### **Prepare setUpOCI.properties** +**Current Version: setUpOCI.properties v2024.1.0** + +Make sure to use/modify the properties file at _/cd3user/tenancies //\_setUpOCI.properties_ during executions. + +```ini +[Default] + +#Input variables required to run setUpOCI script + +#path to output directory where terraform file will be generated. eg /cd3user/tenancies//terraform_files +outdir= + +#prefix for output terraform files eg like demotenancy +prefix= + +# auth mechanism for OCI APIs - api_key,instance_principal,session_token +auth_mechanism= + +#input config file for Python API communication with OCI eg /cd3user/tenancies//.config_files/_config; +config_file= + +# Leave it blank if you want single outdir or specify outdir_structure_file.properties containing directory structure for OCI services. +outdir_structure_file= + +#path to cd3 excel eg /cd3user/tenancies/\CD3-Customer.xlsx +cd3file= + +#specify create_resources to create new resources in OCI(greenfield workflow) +#specify export_resources to export resources from OCI(non-greenfield workflow) +workflow_type=create_resources +``` + +| Variable | Description | Example | +|---|---|---| +|outdir|Path to output directory where terraform files will be generated| /cd3user/tenancies//terraform\_files| +|prefix|Prefix for output terraform files|\| +|auth_mechanism|Authentication Mechanism for OCI APIs|api_key| +|config\_file|Python config file|/cd3user/tenancies//.config_files/_config| +|outdir\_structure\_file |Parameter specifying single outdir or different for different services|Blank or _outdir_structure_file.properties| +| cd3file |Path to the Excel input file |/cd3user/tenancies//testCD3. xlsx | +|workflow\_type |greenfield workflow or non-greenfield workflow| See Automation Toolkit Workflows for more information| + + +### **Automation Toolkit Workflows:** CD3 Automation Tool Kit supports 2 main workflows: -1. Green Field Tenancies - Empty OCI tenancy (or) do not need to modify / use any existing resources. -2. Non Green Field Tenancies - Need to use / manage existing resources. Export existing resources into CD3 & TF State, then use the Greenfield workflow. +1. Create Resources in OCI (Greenfield Workflow) - Empty OCI tenancy (or) do not need to modify / use any existing resources. +2. Export Resources from OCI (Non-Greenfield Workflow) - Need to use / manage existing resources. Export existing resources into CD3 & TF State, then use the Greenfield workflow. + + + +### **Execution Steps Overview:** +Choose the appropriate CD3 Excel Sheet and update the setUpOCI.properties file at _/cd3user/tenancies//\_setUpOCI.properties_ and run the commands below: + +**Step 1**: +
    Change Directory to 'cd3_automation_toolkit' +
    ```cd /cd3user/oci_tools/cd3_automation_toolkit/``` + +**Step 2**: +
    Place Excel sheet at appropriate location in your container and provide the corresponding path in _cd3file_ parmeter of: _/cd3user/tenancies //\_setUpOCI.properties_ file + +**Step 3** +
    +Execute the setUpOCI Script:
    ```python setUpOCI.py /cd3user/tenancies//_setUpOCI.properties``` +
    → Example execution of the script: + +``` +[cd3user@25260a87b137 cd3_automation_toolkit]$ python setUpOCI.py /cd3user/tenancies/demotenancy/demotenancy_setUpOCI.properties +Updated OCI_Regions file !!! +Script to fetch the compartment OCIDs into variables file has not been executed. +Do you want to run it now? (y|n): +``` +→ This prompt appears for the very first time when you run the toolkit or when any new compartments are created using the toolkit. Enter 'y' to fetch the details of compartment OCIDs into variables file. +
    → After fetching the compartment details, the toolkit will display the menu options.

    - diff --git a/cd3_automation_toolkit/documentation/user_guide/cli_jenkins.md b/cd3_automation_toolkit/documentation/user_guide/cli_jenkins.md new file mode 100644 index 000000000..7b6a1ad01 --- /dev/null +++ b/cd3_automation_toolkit/documentation/user_guide/cli_jenkins.md @@ -0,0 +1,35 @@ +# Switch between using the toolkit via CLI and Jenkins UI + +> **Note -** + >***It is recommended to stick to using the toolkit either via CLI or via Jenkins.*** + +There can be scenarios when you need to update the **terraform_files** folder manually via CLI. Below are some examples: + +- You executed setUpOCI script to generate tfvars for some resources via CLI. +- You updated **variables_\.tf** file to update image OCID or SSH Key for Compute or Database etc. + +Please folow below steps to sync local terraform_files folder to OCI DevOps GIT Repo: + +- ```cd /cd3user/tenancies//terraform_files``` +- ```git status``` +
    Below screenshot shows changes in variables_phoenix.tf file under phoenix/compute folder. + + Screenshot 2024-01-17 at 9 12 39 PM + +- ```git add -A .``` + +- ```git commit -m "msg"``` + + Screenshot 2024-01-17 at 9 13 35 PM + +- ```git push``` + + Screenshot 2024-01-17 at 9 14 24 PM + +

    +
    + +| :arrow_backward: Prev | :arrow_forward: Next | +| :---- | -------: | + +
    diff --git a/cd3_automation_toolkit/documentation/user_guide/learn_more/CD3ExcelTabs.md b/cd3_automation_toolkit/documentation/user_guide/learn_more/CD3ExcelTabs.md index f97ebb4ae..f9cfe270b 100644 --- a/cd3_automation_toolkit/documentation/user_guide/learn_more/CD3ExcelTabs.md +++ b/cd3_automation_toolkit/documentation/user_guide/learn_more/CD3ExcelTabs.md @@ -108,22 +108,22 @@ Click on the links below to know about the specifics of each tab in the excel sh #### Developer Services - - [OKE](https://github.com/oracle-devrel/cd3-automation-toolkit/blob/develop/cd3_automation_toolkit/documentation/user_guide/learn_more/Tabs.md#oke-tab) + - [OKE](/cd3_automation_toolkit/documentation/user_guide/learn_more/Tabs.md#oke-tab) Click here to view sample auto.tfvars for OKE components- Clusters, Nodepools #### Logging Services - - [VCN Flow Logs](https://github.com/oracle-devrel/cd3-automation-toolkit/blob/develop/cd3_automation_toolkit/documentation/user_guide/learn_more/Tabs.md#vcn-flow-logs) - - [LBaaS Logs](https://github.com/oracle-devrel/cd3-automation-toolkit/blob/develop/cd3_automation_toolkit/documentation/user_guide/learn_more/Tabs.md#lbaas-logs) -- [OSS Logs](https://github.com/oracle-devrel/cd3-automation-toolkit/blob/develop/cd3_automation_toolkit/documentation/user_guide/learn_more/Tabs.md#oss-logs) + - [VCN Flow Logs](/cd3_automation_toolkit/documentation/user_guide/learn_more/Tabs.md#vcn-flow-logs) + - [LBaaS Logs](/cd3_automation_toolkit/documentation/user_guide/learn_more/Tabs.md#lbaas-logs) +- [OSS Logs](/cd3_automation_toolkit/documentation/user_guide/learn_more/Tabs.md#oss-logs) Click here to view sample auto.tfvars for Logging components #### SDDCs Tab - - [OCVS](https://github.com/oracle-devrel/cd3-automation-toolkit/blob/develop/cd3_automation_toolkit/documentation/user_guide/learn_more/Tabs.md#sddcs-tab) - + - [OCVS](/cd3_automation_toolkit/documentation/user_guide/learn_more/Tabs.md#sddcs-tab) + Click here to view sample auto.tfvars for OCVS diff --git a/cd3_automation_toolkit/documentation/user_guide/learn_more/OPAForCompliance.md b/cd3_automation_toolkit/documentation/user_guide/learn_more/OPAForCompliance.md index 87a43db80..722e16196 100755 --- a/cd3_automation_toolkit/documentation/user_guide/learn_more/OPAForCompliance.md +++ b/cd3_automation_toolkit/documentation/user_guide/learn_more/OPAForCompliance.md @@ -33,3 +33,11 @@ Alternatively, run the following command to evaluate just a sinle OPA rule say " This command will analyze the "tfplan.json" input file against the policy and display the evaluation results with a user-friendly format. + +

    +
    + +| :arrow_backward: Prev | Next :arrow_forward: | +| :---- | -------: | + +
    diff --git a/cd3_automation_toolkit/documentation/user_guide/learn_more/ResourceManagerUpload.md b/cd3_automation_toolkit/documentation/user_guide/learn_more/ResourceManagerUpload.md index 55b87ebca..4ce6e8408 100644 --- a/cd3_automation_toolkit/documentation/user_guide/learn_more/ResourceManagerUpload.md +++ b/cd3_automation_toolkit/documentation/user_guide/learn_more/ResourceManagerUpload.md @@ -3,8 +3,6 @@ This option will upload the created Terraform files & the tfstate (if present) to the OCI Resource Manager. -On choosing **"Developer Services"** in the SetUpOCI menu, choose **"Upload current terraform files/state to Resource Manager"** sub-option to upload the terraform outdir into OCI Resource Manager. - When prompted, specify the Region to create/upload the terraform files to Resource Manager Stack. Multiple regions can be specified as comma separated values. Specify 'global' to upload RPC related components which reside in 'global' directory. On the next prompt, enter the Compartment where the Stack should be created if it is for the first time. The toolkit will create a Stack for the region specified previously under the specified compartment. For global resources, stack will be created in the home region. @@ -27,4 +25,15 @@ Sample Execution: image - +

    + + + +> [!IMPORTANT] +> If you are using remote state and upload the stack to OCI Resource Manager using Upload current terraform files/state to Resource Manager under Developer Services, then running terraform plan/apply from OCI Resource Manager will not work and show below error: +> +Screenshot 2024-01-17 at 11 38 54 PM + +> You will have to remove backend.tf from the directory, bring the remote state into local and then re-upload the stack. +On choosing **"Developer Services"** in the SetUpOCI menu, choose **"Upload current terraform files/state to Resource Manager"** sub-option to upload the terraform outdir into OCI Resource Manager. + diff --git a/cd3_automation_toolkit/documentation/user_guide/learn_more/Tabs.md b/cd3_automation_toolkit/documentation/user_guide/learn_more/Tabs.md index 6d82b1d9d..21cbfb481 100644 --- a/cd3_automation_toolkit/documentation/user_guide/learn_more/Tabs.md +++ b/cd3_automation_toolkit/documentation/user_guide/learn_more/Tabs.md @@ -573,7 +573,10 @@ Note - ![image](https://user-images.githubusercontent.com/115973871/216242750-d84a79bf-5799-4e51-ba40-ca82a00d04aa.png) - Also, When the target kind is **'notifications'** the value for formatted messages parameter is set to **'true'** as default. Its set to **'false'** only when the source is 'streaming'. + +- After executing tf_import_commands during export of service connectors, the terraform plan will show log-sources ordering as changes and it rearranges the order for log-sources for that service connector if source/target kind is logging. This can be ignored and you can proceed with terraform apply. + ![image](https://github.com/oracle-devrel/cd3-automation-toolkit/assets/103548537/1005724e-ac03-4b45-8e3d-480c8826d065) ## OKE Tab @@ -678,6 +681,7 @@ Below TF file is created: Use this tab to create OCVS in your tenancy. >Note: +>As of now the toolkit supports single cluster SDDC. The column "SSH Key Var Name" accepts SSH key value directly or the name of variable declared in *variables.tf* under the **sddc_ssh_keys** variable containing the key value. Make sure to have an entry in variables_\.tf file with the name you enter in SSH Key Var Name field of the Excel sheet and put the value as SSH key value. >For Eg: If you enter the SSH Key Var Name as **ssh_public_key**, make an entry in variables_\.tf file as shown below: diff --git a/cd3_automation_toolkit/documentation/user_guide/multiple_options_GF-Jenkins.md b/cd3_automation_toolkit/documentation/user_guide/multiple_options_GF-Jenkins.md new file mode 100644 index 000000000..09a788918 --- /dev/null +++ b/cd3_automation_toolkit/documentation/user_guide/multiple_options_GF-Jenkins.md @@ -0,0 +1,28 @@ +# Provisioning of multiple services together + +>***Note - For any service that needs Network details eg compute, database, loadbalancers ets, 'network' pipeline needs to be executed prior to launching that service pipeline.*** + +Multiple options can be selected simultaneously while creating resources in OCI using setUpOCI pipeline . But if one of the services is dependent upon the availability of another service eg 'Network' (Create Network) and 'Compute' (Add Instances); In such scenarios, terraform-apply pipeline for compute will fail as shown in below screenshot (last stage in the pipeline) - +![tuxpi com 1706871371](https://github.com/oracle-devrel/cd3-automation-toolkit/assets/103508105/959dea07-b569-4908-967c-d4d1efbafe04) +
    + +* Clicking on 'Logs' for Stage: sanjose/compute and clicking on the pipeline will dispay below - + +![tuxpi com 1706871675](https://github.com/oracle-devrel/cd3-automation-toolkit/assets/103508105/65536e92-6612-4c6e-9d79-4a347a5cee9a) +
    + +* Clicking on 'Logs' for Stage Terraform Plan displays - + +![tuxpi com 1706871787](https://github.com/oracle-devrel/cd3-automation-toolkit/assets/103508105/711e1687-690f-4cbd-8abc-3fd4da108f9f) + +- This is expected because pipeline for 'compute' expects network to be already existing in OCI to launch a new instance. +- To resolve this, Proceed with terraform-apply pipeline for 'network' and once it is successfuly completed, trigger terraform-apply pipeline for 'compute' manually by clicking on 'Build Now' from left menu. + +![tuxpi com 1706871906](https://github.com/oracle-devrel/cd3-automation-toolkit/assets/103508105/c3b7adb9-183b-4b79-bf9e-d492b3a5f7aa) + + +

    +
    + +| :arrow_backward: Prev | Next :arrow_forward: | +| :---- | -------: | diff --git a/cd3_automation_toolkit/documentation/user_guide/remote_state.md b/cd3_automation_toolkit/documentation/user_guide/remote_state.md new file mode 100644 index 000000000..f8862640d --- /dev/null +++ b/cd3_automation_toolkit/documentation/user_guide/remote_state.md @@ -0,0 +1,44 @@ +# Store Terraform State into Object Storage Bucket + +> [!Caution] +> When utilizing remote state and deploying the stack to OCI Resource Manager through the **Upload current terraform files/state to Resource Manager** option under **Developer Services**, attempting to execute terraform plan/apply directly from OCI Resource Manager may result in below error. +> +Screenshot 2024-01-17 at 11 38 54 PM + +> This option is disabled while using the toolkit via Jenkins. While using it via CLI, you will have to remove backend.tf from the directory, bring the remote state into local and then upload the stack. + +

    +* Toolkit provides the option to store terraform state file(tfstate) into Object Storage bucket. +* This can be achieved by setting ```use_remote_state=yes``` under Advanced Parameters in ```tenancyconfig.properties``` file while executing ```createTenancyConfig.py```. +* Upon setting above parameter the script will - + - create a versioning enabled bucket in OCI tenancy in the specified region(if you don't specify anything in ```remote_state_bucket_name``` parameter to use an existing bucket) + - create a new customer secret key for the user, and configure it as S3 credentials to access the bucket. Before executing the createTenancyConfig.py script, ensure that the specified user in the DevOps User Details or identified by the user OCID does not already have the maximum limit of two customer secret keys assigned. + +* backend.tf file that gets generated - + + ``` + terraform { + backend "s3" { + key = "//terraform.tfstate" + bucket = "-automation-toolkit-bucket" + region = "" + endpoint = "https://.compat.objectstorage..oraclecloud.com" + shared_credentials_file = "/cd3user/tenancies//.config_files/_s3_credentials" + skip_region_validation = true + skip_credentials_validation = true + skip_metadata_api_check = true + force_path_style = true + } + } + ``` + +* For single outdir, tfstate for all subscribed regions will be stored as ```\terraform.tfstate``` eg ```london\terraform.tfstate``` for london ```phoenix\terraform.tfstate``` for phoenix. +* For multi outdir, tfstate for all services in all subscribed regions will be stored as ```\\terraform.tfstate``` eg ```london\tagging\terraform.tfstate``` for tagging dir in london region + +

    +
    + +| :arrow_backward: Prev | +| :---- | + +
    diff --git a/cd3_automation_toolkit/example/CD3-Blank-template.xlsx b/cd3_automation_toolkit/example/CD3-Blank-template.xlsx index 10ef50712..750f61012 100644 Binary files a/cd3_automation_toolkit/example/CD3-Blank-template.xlsx and b/cd3_automation_toolkit/example/CD3-Blank-template.xlsx differ diff --git a/cd3_automation_toolkit/example/CD3-CIS-ManagementServices-template.xlsx b/cd3_automation_toolkit/example/CD3-CIS-ManagementServices-template.xlsx index 02aef30da..742139022 100644 Binary files a/cd3_automation_toolkit/example/CD3-CIS-ManagementServices-template.xlsx and b/cd3_automation_toolkit/example/CD3-CIS-ManagementServices-template.xlsx differ diff --git a/cd3_automation_toolkit/example/CD3-CIS-template.xlsx b/cd3_automation_toolkit/example/CD3-CIS-template.xlsx index 3eb4aa442..4f94a294a 100644 Binary files a/cd3_automation_toolkit/example/CD3-CIS-template.xlsx and b/cd3_automation_toolkit/example/CD3-CIS-template.xlsx differ diff --git a/cd3_automation_toolkit/example/CD3-HubSpoke-template.xlsx b/cd3_automation_toolkit/example/CD3-HubSpoke-template.xlsx index 22d69f99f..891c5c726 100644 Binary files a/cd3_automation_toolkit/example/CD3-HubSpoke-template.xlsx and b/cd3_automation_toolkit/example/CD3-HubSpoke-template.xlsx differ diff --git a/cd3_automation_toolkit/example/CD3-SingleVCN-template.xlsx b/cd3_automation_toolkit/example/CD3-SingleVCN-template.xlsx index 60363d18a..6cf4e4339 100644 Binary files a/cd3_automation_toolkit/example/CD3-SingleVCN-template.xlsx and b/cd3_automation_toolkit/example/CD3-SingleVCN-template.xlsx differ diff --git a/cd3_automation_toolkit/setUpOCI.properties b/cd3_automation_toolkit/setUpOCI.properties index 4801f9fbb..237966fbf 100644 --- a/cd3_automation_toolkit/setUpOCI.properties +++ b/cd3_automation_toolkit/setUpOCI.properties @@ -8,14 +8,18 @@ outdir= #prefix for output terraform files eg like demotenancy prefix= -#input config file for Python API communication with OCI eg /cd3user/tenancies//_config; -config_file= - -#path to cd3 excel eg /cd3user/tenancies/\CD3-Customer.xlsx -cd3file= +# auth mechanism for OCI APIs - api_key,instance_principal,session_token +auth_mechanism= -#Is it Non GreenField tenancy -non_gf_tenancy=false +#input config file for Python API communication with OCI eg /cd3user/tenancies//.config_files/_config; +config_file= # Leave it blank if you want single outdir or specify outdir_structure_file.properties containing directory structure for OCI services. outdir_structure_file= + +#path to cd3 excel eg /cd3user/tenancies//CD3-Customer.xlsx +cd3file= + +#specify create_resources to create new resources in OCI(greenfield workflow) +#specify export_resources to export resources from OCI(non-greenfield workflow) +workflow_type=create_resources \ No newline at end of file diff --git a/cd3_automation_toolkit/setUpOCI.py b/cd3_automation_toolkit/setUpOCI.py index c2ea25355..2969efe47 100644 --- a/cd3_automation_toolkit/setUpOCI.py +++ b/cd3_automation_toolkit/setUpOCI.py @@ -59,68 +59,6 @@ def execute_options(options, *args, **kwargs): with section(option.text): option.callback(*args, **kwargs) -''' -def verify_outdir_is_empty(): - - print("\nChecking if the specified outdir contains tf files related to the OCI components being exported...") - tf_list = {} - for reg in ct.all_regions: - terraform_files = glob(f'{outdir}/{reg}/*.auto.tfvars') - tf_list[reg] = [file for file in terraform_files] - - - has_files = False - for reg in ct.all_regions: - if len(tf_list[reg]) > 0: - print(f'{outdir}/{reg} directory under outdir is not empty; contains below tf files.') - for files in tf_list[reg]: - print(files) - has_files = True - - if has_files: - print("\nMake sure you have clean tfstate file and outdir(other than provider.tf and variables_.tf) for fresh export.") - print("Existing tf files should not be conflicting with new tf files that are going to be generated with this process.") - proceed = input("Proceed y/n: ") - if proceed.lower() != 'y': - exit_menu("Exiting...") - else: - print("None Found. Proceeding to Export...") -''' - -''' -def get_compartment_list(ntk_compartment_ids,resource_name): - compartment_list_str = "Enter name of Compartment as it appears in OCI (comma separated without spaces if multiple)for which you want to export {};\nPress 'Enter' to export from all the Compartments: " - compartments = input(compartment_list_str.format(resource_name)) - input_compartment_names = list(map(lambda x: x.strip(), compartments.split(','))) if compartments else None - - remove_comps = [] - comp_list_fetch = [] - - print("\n") - # Process Compartment Filter - if input_compartment_names is not None: - for x in range(0, len(input_compartment_names)): - if (input_compartment_names[x] not in ntk_compartment_ids.keys()): - print("Input compartment: " + input_compartment_names[x] + " doesn't exist in OCI") - remove_comps.append(input_compartment_names[x]) - - input_compartment_names = [x for x in input_compartment_names if x not in remove_comps] - if (len(input_compartment_names) == 0): - print("None of the input compartments specified exist in OCI..Exiting!!!") - exit(1) - else: - print("Fetching for Compartments... " + str(input_compartment_names)) - comp_list_fetch = input_compartment_names - else: - print("Fetching for all Compartments...") - comp_ocids = [] - for key, val in ntk_compartment_ids.items(): - if val not in comp_ocids: - comp_ocids.append(val) - comp_list_fetch.append(key) - return comp_list_fetch -''' - def get_region_list(rm): if rm == False: resource_name = 'OCI resources' @@ -158,22 +96,18 @@ def get_region_list(rm): return region_list_fetch -def fetch_compartments(outdir, outdir_struct, config=DEFAULT_LOCATION): - configFileName = config - config = oci.config.from_file(config) - +def fetch_compartments(outdir, outdir_struct, ct): var_files={} var_data = {} - ct = commonTools() - ct.get_subscribedregions(configFileName) + home_region = ct.home_region print("outdir specified should contain region directories and then variables_.tf file inside the region directories eg /cd3user/tenancies//terraform_files") print("Verifying out directory and Taking backup of existing variables files...Please wait...") print("\nFetching Compartment Info...Please wait...") - ct.get_network_compartment_ids(config['tenancy'], "root", configFileName) + ct.get_network_compartment_ids(config['tenancy'], "root", config, signer) ct.all_regions.append('global') print("\nWriting to variables files...") @@ -266,7 +200,7 @@ def validate_cd3(execute_all=False): ] if not execute_all: options = show_options(options, quit=True, menu=False, index=1) - cd3Validator.validate_cd3(inputfile, var_file, prefix, outdir, options, config) + cd3Validator.validate_cd3(options, inputfile, var_file, prefix, outdir, ct) # config, signer, ct) print("Exiting CD3 Validation...") @@ -281,24 +215,24 @@ def export_identityOptions(): Option("Export Network Sources", export_networkSources, 'Exporting Network Sources') ] options = show_options(options, quit=True, menu=True, index=1) - execute_options(options, inputfile, outdir, service_dir, config, ct) + execute_options(options, inputfile, outdir, service_dir, config, signer, ct) -def export_compartmentPoliciesGroups(inputfile, outdir, service_dir, config,ct): +def export_compartmentPoliciesGroups(inputfile, outdir, service_dir, config, signer, ct): compartments = ct.get_compartment_map(var_file, 'Identity Objects') - Identity.export_identity(inputfile, outdir, service_dir, _config=config, export_compartments=compartments, ct=ct) + Identity.export_identity(inputfile, outdir, service_dir, config, signer, ct, export_compartments=compartments) create_identity(execute_all=True) print("\n\nExecute tf_import_commands_identity_nonGF.sh script created under home region directory to synch TF with OCI Identity objects\n") -def export_users(inputfile, outdir, service_dir, config,ct): - Identity.Users.export_users(inputfile, outdir, service_dir, _config=config, ct=ct) +def export_users(inputfile, outdir, service_dir, config,signer, ct): + Identity.Users.export_users(inputfile, outdir, service_dir, config, signer, ct) create_users(execute_all=True) print("\n\nExecute tf_import_commands_users_nonGF.sh script created under home region directory to synch TF with OCI Identity objects\n") -def export_networkSources(inputfile, outdir, service_dir, config,ct): +def export_networkSources(inputfile, outdir, service_dir, config, signer, ct): compartments = ct.get_compartment_map(var_file, 'Identity Objects') - Identity.NetworkSources.export_networkSources(inputfile, outdir, service_dir, _config=config, ct=ct) + Identity.NetworkSources.export_networkSources(inputfile, outdir, service_dir, config, signer, ct) create_networkSources(execute_all=True) print("\n\nExecute tf_import_commands_networkSources_nonGF.sh script created under home region directory to synch TF with OCI Identity objects\n") @@ -309,7 +243,7 @@ def export_tags(): service_dir = "" compartments = ct.get_compartment_map(var_file, 'Tagging Objects') - Governance.export_tags_nongreenfield(inputfile, outdir, service_dir, _config=config, export_compartments=compartments,ct=ct) + Governance.export_tags_nongreenfield(inputfile, outdir, service_dir, config, signer, ct, export_compartments=compartments) create_tags() print("\n\nExecute tf_import_commands_tags_nonGF.sh script created under home region directory to synch TF with OCI Tags\n") @@ -337,16 +271,16 @@ def export_network(): ] options = show_options(options, quit=True, menu=True, index=1) - execute_options(options, inputfile, outdir, service_dir,prefix, config, export_regions,ct) + execute_options(options, inputfile, outdir, service_dir, config, signer, ct, export_regions) print("=====================================================================================================================") print("NOTE: Make sure to execute tf_import_commands_network_major-objects_nonGF.sh before executing the other scripts.") print("=====================================================================================================================") -def export_networking(inputfile, outdir, service_dir, prefix,config,export_regions,ct): +def export_networking(inputfile, outdir, service_dir,config, signer, ct, export_regions): compartments = ct.get_compartment_map(var_file,'Network Objects') - Network.export_networking(inputfile, outdir, service_dir,_config=config, export_compartments=compartments, export_regions=export_regions,ct=ct) + Network.export_networking(inputfile, outdir, service_dir,config, signer, ct, export_compartments=compartments, export_regions=export_regions) if len(service_dir) != 0: @@ -355,30 +289,37 @@ def export_networking(inputfile, outdir, service_dir, prefix,config,export_regio service_dir_network = "" options = [ Option(None, Network.create_major_objects, 'Processing VCNs and DRGs Tab'), + ] + execute_options(options, inputfile, outdir, service_dir_network, prefix, ct, non_gf_tenancy) + + options = [ Option(None, Network.create_rpc_resource, 'Processing RPCs in DRGs Tab'), + ] + execute_options(options, inputfile, outdir, service_dir_network, prefix, auth_mechanism, config_file_path, ct, non_gf_tenancy) + + options = [ Option(None, Network.create_terraform_dhcp_options, 'Processing DHCP Tab'), Option(None, Network.modify_terraform_secrules, 'Processing SecRulesinOCI Tab'), Option(None, Network.modify_terraform_routerules, 'Processing RouteRulesinOCI Tab'), - #Option(None, Network.create_terraform_drg_route,'Processing DRGs tab for DRG Route Tables and Route Distribution creation'), Option(None, Network.modify_terraform_drg_routerules, 'Processing DRGRouteRulesinOCI Tab'), ] - execute_options(options, inputfile, outdir, service_dir_network, prefix, non_gf_tenancy, config=config) + execute_options(options, inputfile, outdir, service_dir_network, prefix, ct, non_gf_tenancy) options = [ Option(None, Network.create_terraform_drg_route,'Processing DRGs tab for DRG Route Tables and Route Distribution creation'), ] - execute_options(options, inputfile, outdir, service_dir_network, prefix, non_gf_tenancy, config=config, + execute_options(options, inputfile, outdir, service_dir_network, prefix, ct, non_gf_tenancy, network_connectivity_in_setupoci='', modify_network=False) options = [ Option(None, Network.create_terraform_subnet_vlan, 'Processing SubnetsVLANs Tab for Subnets'), ] - execute_options(options, inputfile, outdir, service_dir, prefix, non_gf_tenancy, config=config,network_vlan_in_setupoci='network') + execute_options(options, inputfile, outdir, service_dir, prefix, ct, non_gf_tenancy, network_vlan_in_setupoci='network') options = [ Option(None, Network.create_terraform_subnet_vlan, 'Processing SubnetsVLANs Tab for VLANs'), ] - execute_options(options, inputfile, outdir, service_dir, prefix, non_gf_tenancy, config=config,network_vlan_in_setupoci='vlan') + execute_options(options, inputfile, outdir, service_dir, prefix, ct, non_gf_tenancy, network_vlan_in_setupoci='vlan') if len(service_dir) != 0: @@ -388,102 +329,105 @@ def export_networking(inputfile, outdir, service_dir, prefix,config,export_regio options = [ Option(None, Network.create_terraform_nsg, 'Processing NSGs Tab'), ] - execute_options(options, inputfile, outdir, service_dir_nsg, prefix, non_gf_tenancy, config=config) + execute_options(options, inputfile, outdir, service_dir_nsg, prefix, ct) print("\n\nExecute tf_import_commands_network_*_nonGF.sh script created under each region directory to synch TF with OCI Network objects\n") -def export_major_objects(inputfile, outdir, service_dir,prefix,config,export_regions,ct): +def export_major_objects(inputfile, outdir, service_dir, config, signer, ct, export_regions): if len(service_dir) != 0: service_dir_network = service_dir['network'] else: service_dir_network = "" compartments = ct.get_compartment_map(var_file,'VCN Major Objects') - Network.export_major_objects(inputfile, outdir, service_dir_network, _config=config, export_compartments=compartments, export_regions=export_regions,ct=ct) - Network.export_drg_routetable(inputfile, export_compartments=compartments, export_regions=export_regions, service_dir=service_dir_network,_config=config, _tf_import_cmd=True, outdir=outdir,ct=ct) + Network.export_major_objects(inputfile, outdir, service_dir_network, config, signer, ct, export_compartments=compartments, export_regions=export_regions) + Network.export_drg_routetable(inputfile, outdir, service_dir_network, config, signer, ct, export_compartments=compartments, export_regions=export_regions, _tf_import_cmd=True) options = [ Option(None, Network.create_major_objects, 'Processing VCNs and DRGs Tab'), - Option(None, Network.create_rpc_resource, 'Processing RPCs in DRGs Tab') ] - execute_options(options, inputfile, outdir,service_dir_network, prefix, non_gf_tenancy, config=config) + execute_options(options, inputfile, outdir,service_dir_network, prefix, ct, non_gf_tenancy) + + options = [ + Option(None, Network.create_rpc_resource, 'Processing RPCs in DRGs Tab'), + ] + execute_options(options, inputfile, outdir, service_dir_network, prefix, auth_mechanism, config_file_path, ct, non_gf_tenancy) options = [ Option(None, Network.create_terraform_drg_route,'Processing DRGs tab for DRG Route Tables and Route Distribution creation'), ] - execute_options(options, inputfile, outdir, service_dir_network, prefix, non_gf_tenancy, config=config, - network_connectivity_in_setupoci='', modify_network=False) + execute_options(options, inputfile, outdir, service_dir_network, prefix, ct, non_gf_tenancy,network_connectivity_in_setupoci='', modify_network=False) print("\n\nExecute tf_import_commands_network_major-objects_nonGF.sh and tf_import_commands_network_drg_routerules_nonGF.sh scripts created under each region directory to synch TF with OCI Network objects\n") -def export_dhcp(inputfile, outdir, service_dir,prefix,config,export_regions,ct): +def export_dhcp(inputfile, outdir, service_dir,config,signer,ct,export_regions): if len(service_dir) != 0: service_dir_network = service_dir['network'] else: service_dir_network = "" compartments = ct.get_compartment_map(var_file,'DHCP') - Network.export_dhcp(inputfile, outdir, service_dir_network,_config=config, export_compartments=compartments, export_regions=export_regions,ct=ct) + Network.export_dhcp(inputfile, outdir, service_dir_network,config, signer, ct, export_compartments=compartments, export_regions=export_regions) options = [ Option(None, Network.create_terraform_dhcp_options, 'Processing DHCP Tab'), ] - execute_options(options, inputfile, outdir, service_dir_network,prefix, non_gf_tenancy, config=config) + execute_options(options, inputfile, outdir, service_dir_network,prefix, ct, non_gf_tenancy, ct) print("\n\nExecute tf_import_commands_network_dhcp_nonGF.sh script created under each region directory to synch TF with OCI Network objects\n") -def export_secrules(inputfile, outdir, service_dir,prefix,config,export_regions,ct): +def export_secrules(inputfile, outdir, service_dir,config,signer,ct,export_regions): if len(service_dir) != 0: service_dir_network = service_dir['network'] else: service_dir_network = "" compartments = ct.get_compartment_map(var_file,'SecRulesInOCI') - Network.export_seclist(inputfile, export_compartments=compartments, export_regions=export_regions, service_dir=service_dir_network,_config=config, _tf_import_cmd=True, outdir=outdir,ct=ct) + Network.export_seclist(inputfile, outdir, service_dir_network, config, signer, ct, export_compartments=compartments, export_regions=export_regions, _tf_import_cmd=True) options = [ Option(None, Network.modify_terraform_secrules, 'Processing SecRulesinOCI Tab'), ] - execute_options(options, inputfile, outdir,service_dir_network, prefix, non_gf_tenancy, config=config) + execute_options(options, inputfile, outdir,service_dir_network, prefix, ct, non_gf_tenancy) print("\n\nExecute tf_import_commands_network_secrules_nonGF.sh script created under each region directory to synch TF with OCI Network objects\n") -def export_routerules(inputfile, outdir, service_dir,prefix,config,export_regions,ct): +def export_routerules(inputfile, outdir, service_dir,config,signer,ct,export_regions): if len(service_dir) != 0: service_dir_network = service_dir['network'] else: service_dir_network = "" compartments = ct.get_compartment_map(var_file,'RouteRulesInOCI') - Network.export_routetable(inputfile, export_compartments=compartments, export_regions=export_regions, service_dir=service_dir_network,_config=config, _tf_import_cmd=True, outdir=outdir,ct=ct) + Network.export_routetable(inputfile, outdir, service_dir_network, config, signer, ct, export_compartments=compartments, export_regions=export_regions, _tf_import_cmd=True) options = [ Option(None, Network.modify_terraform_routerules, 'Processing RouteRulesinOCI Tab'), ] - execute_options(options, inputfile, outdir, service_dir_network,prefix, non_gf_tenancy, config=config) + execute_options(options, inputfile, outdir, service_dir_network,prefix, ct, non_gf_tenancy) print("\n\nExecute tf_import_commands_network_routerules_nonGF.sh script created under each region directory to synch TF with OCI Network objects\n") -def export_subnets_vlans(inputfile, outdir, service_dir,prefix,config,export_regions, ct): +def export_subnets_vlans(inputfile, outdir, service_dir,config,signer,ct,export_regions): compartments = ct.get_compartment_map(var_file,'Subnets') - Network.export_subnets_vlans(inputfile, outdir, service_dir,_config=config, export_compartments=compartments, export_regions=export_regions,ct=ct) + Network.export_subnets_vlans(inputfile, outdir, service_dir,config, signer, ct, export_compartments=compartments, export_regions=export_regions) options = [ Option(None, Network.create_terraform_subnet_vlan, 'Processing SubnetsVLANs Tab for Subnets'), ] - execute_options(options, inputfile, outdir, service_dir, prefix, non_gf_tenancy, config=config, + execute_options(options, inputfile, outdir, service_dir, prefix, ct, non_gf_tenancy, network_vlan_in_setupoci='network') options = [ Option(None, Network.create_terraform_subnet_vlan, 'Processing SubnetsVLANs Tab for VLANs'), ] - execute_options(options, inputfile, outdir, service_dir, prefix, non_gf_tenancy, config=config, + execute_options(options, inputfile, outdir, service_dir, prefix, ct, non_gf_tenancy, network_vlan_in_setupoci='vlan') print("\n\nExecute tf_import_commands_network_subnets_nonGF.sh script created under each region directory to synch TF with OCI Network objects") print("\nExecute tf_import_commands_network_vlans_nonGF.sh script created under each region directory to synch TF with OCI Network objects\n") -def export_nsg(inputfile, outdir, service_dir, prefix,config,export_regions,ct): +def export_nsg(inputfile, outdir, service_dir,config,signer,ct,export_regions): if len(service_dir) != 0: service_dir_nsg = service_dir['nsg'] else: service_dir_nsg = "" compartments = ct.get_compartment_map(var_file,'NSGs') - Network.export_nsg(inputfile, export_compartments=compartments, export_regions=export_regions, service_dir=service_dir_nsg,_config=config, _tf_import_cmd=True, outdir=outdir,ct=ct) + Network.export_nsg(inputfile, outdir,service_dir_nsg, config,signer,ct, export_compartments=compartments, export_regions=export_regions, _tf_import_cmd=True) options = [ Option(None, Network.create_terraform_nsg, 'Processing NSGs Tab'), ] - execute_options(options, inputfile, outdir, service_dir_nsg,prefix, non_gf_tenancy, config=config) + execute_options(options, inputfile, outdir, service_dir_nsg,prefix, ct) print("\n\nExecute tf_import_commands_network_nsg_nonGF.sh script created under each region directory to synch TF with OCI Network objects\n") def export_compute(): @@ -491,19 +435,19 @@ def export_compute(): Option("Export Instances (excludes instances launched by OKE)", export_instances, 'Exporting Instances')] options = show_options(options, quit=True, menu=True, index=1) - execute_options(options, inputfile, outdir, prefix, config, export_regions, ct) + execute_options(options, inputfile, outdir, config, signer, ct, export_regions) -def export_dedicatedvmhosts(inputfile, outdir, prefix,config, export_regions, ct): +def export_dedicatedvmhosts(inputfile, outdir, config, signer, ct, export_regions): if len(outdir_struct) != 0: service_dir = outdir_struct['dedicated-vm-host'] else: service_dir = "" compartments = ct.get_compartment_map(var_file,'Dedicated VM Hosts') - Compute.export_dedicatedvmhosts(inputfile, outdir, service_dir, _config=config, export_compartments=compartments, export_regions=export_regions,ct=ct) - create_dedicatedvmhosts(inputfile, outdir, service_dir, prefix, config) + Compute.export_dedicatedvmhosts(inputfile, outdir, service_dir, config, signer, ct, export_compartments=compartments, export_regions=export_regions) + create_dedicatedvmhosts(inputfile, outdir, service_dir, prefix, ct) print("\n\nExecute tf_import_commands_dedicatedvmhosts_nonGF.sh script created under each region directory to synch TF with OCI Dedicated VM Hosts\n") -def export_instances(inputfile, outdir, prefix,config,export_regions,ct): +def export_instances(inputfile, outdir,config,signer, ct, export_regions): if len(outdir_struct) != 0: service_dir = outdir_struct['instance'] else: @@ -517,8 +461,8 @@ def export_instances(inputfile, outdir, prefix,config,export_regions,ct): display_names = list(map(lambda x: x.strip(), display_name_str.split(','))) if display_name_str else None ad_names = list(map(lambda x: x.strip(), ad_name_str.split(','))) if ad_name_str else None - Compute.export_instances(inputfile, outdir, service_dir,config=config, export_compartments=compartments, export_regions=export_regions, ct=ct, display_names = display_names, ad_names = ad_names) - create_instances(inputfile, outdir, service_dir,prefix, config) + Compute.export_instances(inputfile, outdir, service_dir,config,signer,ct, export_compartments=compartments, export_regions=export_regions, display_names = display_names, ad_names = ad_names) + create_instances(inputfile, outdir, service_dir,prefix, ct) print("\n\nExecute tf_import_commands_instances_nonGF.sh script created under each region directory to synch TF with OCI Instances\n") @@ -528,9 +472,9 @@ def export_storage(): Option("Export Object Storage Buckets", export_buckets, 'Exporting Object Storage')] options = show_options(options, quit=True, menu=True, index=1) - execute_options(options, inputfile, outdir, prefix, config, export_regions, ct) + execute_options(options, inputfile, outdir, config, signer, ct, export_regions) -def export_block_volumes(inputfile, outdir, prefix,config,export_regions,ct): +def export_block_volumes(inputfile, outdir,config,signer,ct, export_regions): if len(outdir_struct) != 0: service_dir = outdir_struct['block-volume'] else: @@ -545,30 +489,30 @@ def export_block_volumes(inputfile, outdir, prefix,config,export_regions,ct): display_names = list(map(lambda x: x.strip(), display_name_str.split(','))) if display_name_str else None ad_names = list(map(lambda x: x.strip(), ad_name_str.split(','))) if ad_name_str else None - Storage.export_blockvolumes(inputfile, outdir, service_dir, _config=config, export_compartments=compartments, export_regions=export_regions, display_names = display_names, ad_names = ad_names,ct=ct) - create_block_volumes(inputfile, outdir, prefix, config=config) + Storage.export_blockvolumes(inputfile, outdir, service_dir, config,signer,ct, export_compartments=compartments, export_regions=export_regions, display_names = display_names, ad_names = ad_names) + create_block_volumes(inputfile, outdir, prefix, ct) print("\n\nExecute tf_import_commands_blockvolumes_nonGF.sh script created under each region directory to synch TF with OCI Block Volume Objects\n") -def export_fss(inputfile, outdir, prefix,config,export_regions,ct): +def export_fss(inputfile, outdir,config, signer, ct, export_regions): if len(outdir_struct) != 0: service_dir = outdir_struct['fss'] else: service_dir = "" compartments = ct.get_compartment_map(var_file,'FSS objects') - Storage.export_fss(inputfile, outdir, service_dir, config=config, export_compartments=compartments, export_regions=export_regions,ct=ct) - create_fss(inputfile, outdir, prefix, config=config) + Storage.export_fss(inputfile, outdir, service_dir, config,signer,ct, export_compartments=compartments, export_regions=export_regions) + create_fss(inputfile, outdir, prefix, ct) print("\n\nExecute tf_import_commands_fss_nonGF.sh script created under each region directory to synch TF with OCI FSS objects\n") -def export_buckets(inputfile, outdir, prefix, config, export_regions, ct): +def export_buckets(inputfile, outdir, config, signer, ct, export_regions): if len(outdir_struct) != 0: service_dir = outdir_struct['object-storage'] else: service_dir = "" compartments = ct.get_compartment_map(var_file, 'Buckets') - Storage.export_buckets(inputfile, outdir, service_dir, _config=config, export_compartments=compartments, export_regions=export_regions,ct=ct) - Storage.create_terraform_oss(inputfile, outdir, service_dir, prefix, config=config) + Storage.export_buckets(inputfile, outdir, service_dir, config,signer,ct, export_compartments=compartments, export_regions=export_regions) + Storage.create_terraform_oss(inputfile, outdir, service_dir, prefix, ct) print("\n\nExecute tf_import_commands_buckets_nonGF.sh script created under each region directory to synch TF with OCI Object Storage Buckets\n") def export_loadbalancer(): @@ -576,26 +520,26 @@ def export_loadbalancer(): Option("Export Network Load Balancers", export_nlb,'Exporting NLB Objects')] options = show_options(options, quit=True, menu=True, index=1) - execute_options(options, inputfile, outdir, prefix, config, export_regions,ct) + execute_options(options, inputfile, outdir, config, signer, ct, export_regions) -def export_lbr(inputfile, outdir, prefix,config,export_regions,ct): +def export_lbr(inputfile, outdir,config, signer, ct, export_regions): if len(outdir_struct) != 0: service_dir = outdir_struct['loadbalancer'] else: service_dir = "" compartments = ct.get_compartment_map(var_file,'LBR objects') - Network.export_lbr(inputfile, outdir, service_dir, _config=config, export_compartments=compartments, export_regions=export_regions,ct=ct) - create_lb(inputfile, outdir, prefix, config=config) + Network.export_lbr(inputfile, outdir, service_dir, config,signer,ct, export_compartments=compartments, export_regions=export_regions) + create_lb(inputfile, outdir, prefix, ct) print("\n\nExecute tf_import_commands_lbr_nonGF.sh script created under each region directory to synch TF with OCI LBR objects\n") -def export_nlb(inputfile, outdir, prefix,config,export_regions,ct): +def export_nlb(inputfile, outdir,config,signer, ct, export_regions): if len(outdir_struct) != 0: service_dir = outdir_struct['networkloadbalancer'] else: service_dir = "" compartments = ct.get_compartment_map(var_file,'NLB objects') - Network.export_nlb(inputfile, outdir, service_dir, _config=config, export_compartments=compartments, export_regions=export_regions,ct=ct) - create_nlb(inputfile, outdir, prefix, config=config) + Network.export_nlb(inputfile, outdir, service_dir, config,signer,ct, export_compartments=compartments, export_regions=export_regions) + create_nlb(inputfile, outdir, prefix, ct) print("\n\nExecute tf_import_commands_nlb_nonGF.sh script created under each region directory to synch TF with OCI NLB objects\n") def export_databases(): @@ -604,38 +548,38 @@ def export_databases(): Option('Export ADBs', export_adbs, 'Exporting Autonomous Databases')] options = show_options(options, quit=True, menu=True, index=1) - execute_options(options, inputfile, outdir, prefix, config, export_regions, ct) + execute_options(options, inputfile, outdir, config, signer, ct, export_regions) -def export_dbsystems_vm_bm(inputfile, outdir, prefix,config,export_regions,ct): +def export_dbsystems_vm_bm(inputfile, outdir,config,signer, ct,export_regions): if len(outdir_struct) != 0: service_dir = outdir_struct['dbsystem-vm-bm'] else: service_dir = "" compartments = ct.get_compartment_map(var_file,'VM and BM DB Systems') - Database.export_dbsystems_vm_bm(inputfile, outdir, service_dir, _config=config, export_compartments=compartments, export_regions= export_regions, ct=ct) - Database.create_terraform_dbsystems_vm_bm(inputfile, outdir, service_dir, prefix, config=config) + Database.export_dbsystems_vm_bm(inputfile, outdir, service_dir, config,signer,ct, export_compartments=compartments, export_regions= export_regions) + Database.create_terraform_dbsystems_vm_bm(inputfile, outdir, service_dir, prefix, ct) print("\n\nExecute tf_import_commands_dbsystems-vm-bm_nonGF.sh script created under each region directory to synch TF with DBSystems\n") -def export_exa_infra_vmclusters(inputfile, outdir, prefix,config,export_regions,ct): +def export_exa_infra_vmclusters(inputfile, outdir,config, signer, ct, export_regions): if len(outdir_struct) != 0: service_dir = outdir_struct['database-exacs'] else: service_dir = "" compartments = ct.get_compartment_map(var_file,'EXA Infra and EXA VMClusters') - Database.export_exa_infra(inputfile, outdir, service_dir, _config=config, export_compartments=compartments, export_regions= export_regions, ct=ct) - Database.export_exa_vmclusters(inputfile, outdir, service_dir, _config=config, export_compartments=compartments, export_regions= export_regions, ct=ct) - create_exa_infra_vmclusters(inputfile, outdir, service_dir, prefix,config=config) + Database.export_exa_infra(inputfile, outdir, service_dir, config,signer,ct, export_compartments=compartments, export_regions= export_regions) + Database.export_exa_vmclusters(inputfile, outdir, service_dir, config,signer,ct, export_compartments=compartments, export_regions= export_regions) + create_exa_infra_vmclusters(inputfile, outdir, service_dir, prefix,ct) print("\n\nExecute tf_import_commands_exa-infra_nonGF.sh and tf_import_commands_exa-vmclusters_nonGF.sh scripts created under each region directory to synch TF with Exa-Infra and Exa-VMClusters\n") -def export_adbs(inputfile, outdir, prefix,config,export_regions,ct): +def export_adbs(inputfile, outdir,config, signer, ct, export_regions): if len(outdir_struct) != 0: service_dir = outdir_struct['adb'] else: service_dir = "" compartments = ct.get_compartment_map(var_file,'ADBs') - Database.export_adbs(inputfile, outdir, service_dir, _config=config, export_compartments=compartments, export_regions= export_regions, ct=ct) - Database.create_terraform_adb(inputfile, outdir, service_dir, prefix, config) + Database.export_adbs(inputfile, outdir, service_dir, config,signer,ct, export_compartments=compartments, export_regions= export_regions) + Database.create_terraform_adb(inputfile, outdir, service_dir, prefix, ct) print("\n\nExecute tf_import_commands_adb_nonGF.sh script created under each region directory to synch TF with OCI ADBs\n") def export_management_services(): @@ -649,46 +593,46 @@ def export_management_services(): Option("Export Service Connectors", export_service_connectors, 'Exporting Service Connectors')] options = show_options(options, quit=True, menu=True, index=1) - execute_options(options, inputfile, outdir, service_dir, prefix, config, export_regions,ct) + execute_options(options, inputfile, outdir, service_dir, config, signer, ct, export_regions) -def export_notifications(inputfile, outdir, service_dir, prefix,config, export_regions,ct): +def export_notifications(inputfile, outdir, service_dir, config, signer, ct, export_regions): compartments = ct.get_compartment_map(var_file,'Notifications') - ManagementServices.export_notifications(inputfile, outdir, service_dir, _config=config, export_compartments=compartments, export_regions=export_regions,ct=ct) - ManagementServices.create_terraform_notifications(inputfile, outdir, service_dir, prefix, config=config) + ManagementServices.export_notifications(inputfile, outdir, service_dir, config,signer,ct, export_compartments=compartments, export_regions=export_regions) + ManagementServices.create_terraform_notifications(inputfile, outdir, service_dir, ct) print("\n\nExecute tf_import_commands_notifications_nonGF.sh script created under each region directory to synch TF with OCI Notifications\n") -def export_events(inputfile, outdir, service_dir, prefix,config, export_regions,ct): +def export_events(inputfile, outdir, service_dir, config, signer, ct, export_regions): compartments = ct.get_compartment_map(var_file,'Events') - ManagementServices.export_events(inputfile, outdir, service_dir, _config=config, export_compartments=compartments, export_regions=export_regions,ct=ct) - ManagementServices.create_terraform_events(inputfile, outdir, service_dir, prefix, config=config) + ManagementServices.export_events(inputfile, outdir, service_dir, config,signer,ct, export_compartments=compartments, export_regions=export_regions) + ManagementServices.create_terraform_events(inputfile, outdir, service_dir, prefix, ct) print("\n\nExecute tf_import_commands_events_nonGF.sh script created under each region directory to synch TF with OCI Events\n") -def export_alarms(inputfile, outdir, service_dir, prefix,config, export_regions, ct): +def export_alarms(inputfile, outdir, service_dir, config, signer, ct, export_regions): compartments = ct.get_compartment_map(var_file,'Alarms') - ManagementServices.export_alarms(inputfile, outdir, service_dir, _config=config, export_compartments=compartments, export_regions=export_regions,ct=ct) - ManagementServices.create_terraform_alarms(inputfile, outdir,service_dir, prefix, config=config) + ManagementServices.export_alarms(inputfile, outdir, service_dir, config,signer,ct, export_compartments=compartments, export_regions=export_regions) + ManagementServices.create_terraform_alarms(inputfile, outdir,service_dir, prefix, ct) print("\n\nExecute tf_import_commands_alarms_nonGF.sh script created under each region directory to synch TF with OCI Alarms\n") -def export_service_connectors(inputfile, outdir, service_dir, prefix, config, export_regions,ct): +def export_service_connectors(inputfile, outdir, service_dir, config, signer, ct, export_regions): compartments = ct.get_compartment_map(var_file,'Service Connectors') - ManagementServices.export_service_connectors(inputfile, outdir, service_dir, _config=config,export_compartments=compartments, export_regions=export_regions,ct=ct) - ManagementServices.create_service_connectors(inputfile, outdir, service_dir, prefix, config=config) + ManagementServices.export_service_connectors(inputfile, outdir, service_dir, config,signer,ct, export_compartments=compartments, export_regions=export_regions) + ManagementServices.create_service_connectors(inputfile, outdir, service_dir, prefix, ct) print("\n\nExecute tf_import_commands_serviceconnectors_nonGF.sh script created under each region directory to synch TF with OCI Service Connectors\n") -def export_development_services(): +def export_developer_services(): options = [Option("Export OKE cluster and Nodepools", export_oke, 'Exporting OKE'), ] options = show_options(options, quit=True, menu=True, index=1) - execute_options(options, inputfile, outdir, prefix, config, export_regions,ct) + execute_options(options, inputfile, outdir, config, signer, ct, export_regions) -def export_oke(inputfile, outdir, prefix,config,export_regions,ct): +def export_oke(inputfile, outdir, config,signer, ct, export_regions): if len(outdir_struct) != 0: service_dir = outdir_struct['oke'] else: service_dir = "" compartments = ct.get_compartment_map(var_file,'OKE') - DeveloperServices.export_oke(inputfile, outdir, service_dir,_config=config, export_compartments=compartments, export_regions=export_regions,ct=ct) - DeveloperServices.create_terraform_oke(inputfile, outdir, service_dir,prefix, config=config) + DeveloperServices.export_oke(inputfile, outdir, service_dir,config,signer,ct, export_compartments=compartments, export_regions=export_regions) + DeveloperServices.create_terraform_oke(inputfile, outdir, service_dir,prefix, ct) print("\n\nExecute tf_import_commands_oke_nonGF.sh script created under each region directory to synch TF with OKE\n") def export_sddc(): @@ -697,8 +641,8 @@ def export_sddc(): else: service_dir = "" compartments = ct.get_compartment_map(var_file,'SDDCs') - SDDC.export_sddc(inputfile, outdir, service_dir,config=config, export_compartments=compartments, export_regions=export_regions,ct=ct) - SDDC.create_terraform_sddc(inputfile, outdir, service_dir, prefix, config=config) + SDDC.export_sddc(inputfile, outdir, service_dir,config,signer,ct, export_compartments=compartments, export_regions=export_regions) + SDDC.create_terraform_sddc(inputfile, outdir, service_dir, prefix, ct) print("\n\nExecute tf_import_commands_sddcs_nonGF.sh script created under each region directory to synch TF with SDDC\n") def export_dns(): @@ -712,19 +656,19 @@ def export_dns(): ] options = show_options(options, quit=True, menu=True, index=1) - execute_options(options, inputfile, outdir, service_dir, prefix, config, export_regions, ct) + execute_options(options, inputfile, outdir, service_dir, config, signer, ct, export_regions) -def export_dns_views_zones_rrsets(inputfile, outdir, service_dir, prefix, config, export_regions, ct): +def export_dns_views_zones_rrsets(inputfile, outdir, service_dir, config, signer, ct, export_regions): compartments = ct.get_compartment_map(var_file, 'DNS Views ,attached zones and rrsets') filter_str1 = "Do you want to export default views/zones/records (y|n), Default is n: " dns_filter = "n" if input(filter_str1).lower() != 'y' else "y" - Network.export_dns_views_zones_rrsets(inputfile, _outdir=outdir, service_dir=service_dir, _config=config, ct=ct, dns_filter=dns_filter, export_compartments=compartments, export_regions=export_regions) - create_terraform_dns(inputfile, outdir, service_dir, prefix, config) + Network.export_dns_views_zones_rrsets(inputfile, outdir, service_dir, config, signer, ct, dns_filter=dns_filter, export_compartments=compartments, export_regions=export_regions) + create_terraform_dns(inputfile, outdir, service_dir, prefix, ct) -def export_dns_resolvers(inputfile, outdir, service_dir, prefix, config, export_regions, ct): +def export_dns_resolvers(inputfile, outdir, service_dir, config, signer, ct, export_regions): compartments = ct.get_compartment_map(var_file, 'DNS Resolvers') - Network.export_dns_resolvers(inputfile, _outdir=outdir, service_dir=service_dir, _config=config, ct=ct, export_compartments=compartments, export_regions=export_regions) - Network.create_terraform_dns_resolvers(inputfile, outdir, service_dir, prefix, config) + Network.export_dns_resolvers(inputfile, outdir, service_dir, config, signer, ct, export_compartments=compartments, export_regions=export_regions) + Network.create_terraform_dns_resolvers(inputfile, outdir, service_dir, prefix, ct) def cd3_services(): @@ -734,9 +678,9 @@ def cd3_services(): Option('Fetch Protocols to OCI_Protocols', fetch_protocols, 'Fetch Protocols'), ] options = show_options(options, quit=True, menu=True, index=1) - execute_options(options, outdir, outdir_struct, config=config) + execute_options(options, outdir, outdir_struct, ct) -def fetch_protocols(outdir, outdir_struct, config): +def fetch_protocols(outdir, outdir_struct, ct): cd3service.fetch_protocols() ################## Create Functions ########################## @@ -755,7 +699,7 @@ def create_identity(execute_all=False): if not execute_all: options = show_options(options, quit=True, menu=True, index=1) - execute_options(options, inputfile, outdir,service_dir, prefix, config=config) + execute_options(options, inputfile, outdir,service_dir, prefix, ct) def create_networkSources(execute_all=False): if len(outdir_struct) != 0: @@ -768,7 +712,7 @@ def create_networkSources(execute_all=False): if not execute_all: options = show_options(options, quit=True, menu=True, index=1) - execute_options(options, inputfile, outdir, service_dir, prefix, config=config) + execute_options(options, inputfile, outdir, service_dir, prefix, ct) def create_users(execute_all=False): @@ -782,7 +726,16 @@ def create_users(execute_all=False): if not execute_all: options = show_options(options, quit=True, menu=True, index=1) - execute_options(options, inputfile, outdir, service_dir, prefix, config=config) + execute_options(options, inputfile, outdir, service_dir, prefix, ct) + +def create_tags(): + if len(outdir_struct) != 0: + service_dir = outdir_struct['tagging'] + else: + service_dir = "" + options = [Option(None, Governance.create_terraform_tags, 'Processing Tags Tab')] + execute_options(options, inputfile, outdir, service_dir, prefix, ct) + def create_network(execute_all=False): if len(outdir_struct) != 0: @@ -793,7 +746,6 @@ def create_network(execute_all=False): options = [ Option('Create Network - overwrites all TF files; reverts all SecLists and RouteTables to original rules', Network.create_all_tf_objects, 'Create All Objects'), Option('Modify Network - It will read VCNs, DRGs, SubnetsVLANs and DHCP sheets and update the TF', modify_terraform_network, 'Modifying Network'), - #Option('Enable VCN Flow Logs', create_cis_vcnflow_logs, 'VCN Flow Logs'), Option('Security Rules', export_modify_security_rules, 'Security Rules'), Option('Route Rules', export_modify_route_rules, 'Route Rules'), Option('DRG Route Rules', export_modify_drg_route_rules, 'DRG Route Rules'), @@ -803,12 +755,12 @@ def create_network(execute_all=False): ] if not execute_all: options = show_options(options, quit=True, menu=True, index=1) - execute_options(options, inputfile, outdir, service_dir, prefix, config=config, non_gf_tenancy=non_gf_tenancy) + execute_options(options, inputfile, outdir, service_dir, prefix, ct, non_gf_tenancy=non_gf_tenancy) -def modify_terraform_network(inputfile, outdir, service_dir, prefix, non_gf_tenancy, config): - Network.create_all_tf_objects(inputfile, outdir, service_dir, prefix, config=config, non_gf_tenancy=non_gf_tenancy, modify_network=True) +def modify_terraform_network(inputfile, outdir, service_dir, prefix, ct, non_gf_tenancy): + Network.create_all_tf_objects(inputfile, outdir, service_dir, prefix, ct, non_gf_tenancy=non_gf_tenancy, modify_network=True, ) -def export_modify_security_rules(inputfile, outdir, service_dir, prefix, non_gf_tenancy, config): +def export_modify_security_rules(inputfile, outdir, service_dir, prefix, ct, non_gf_tenancy): execute_all = False if len(service_dir) != 0: service_dir = service_dir['network'] @@ -821,13 +773,21 @@ def export_modify_security_rules(inputfile, outdir, service_dir, prefix, non_gf_ ] if not execute_all: options = show_options(options, quit=True, menu=True, index=1) - execute_options(options, inputfile, outdir, service_dir, prefix, config=config, non_gf_tenancy=non_gf_tenancy) -def export_security_rules(inputfile, outdir, prefix, service_dir, config, non_gf_tenancy): + for option in options: + options1 = [] + options1.append(option) + if (option.name == 'Export Security Rules (From OCI into SecRulesinOCI sheet)'): + execute_options(options1, inputfile, outdir, service_dir, config, signer, ct, non_gf_tenancy=non_gf_tenancy) + elif (option.name == 'Add/Modify/Delete Security Rules (Reads SecRulesinOCI sheet)'): + execute_options(options1, inputfile, outdir, service_dir, prefix, ct, non_gf_tenancy) + + +def export_security_rules(inputfile, outdir, service_dir, config, signer, ct, non_gf_tenancy): compartments = ct.get_compartment_map(var_file, 'OCI Security Rules') - Network.export_seclist(inputfile, export_compartments=compartments, export_regions= export_regions, service_dir=service_dir, _config=config, _tf_import_cmd=False, outdir=None,ct=ct) + Network.export_seclist(inputfile, outdir, service_dir, config, signer, ct, export_compartments=compartments, export_regions= export_regions, _tf_import_cmd=False) -def export_modify_route_rules(inputfile, outdir, service_dir, prefix, non_gf_tenancy, config): +def export_modify_route_rules(inputfile, outdir, service_dir, prefix, ct, non_gf_tenancy): execute_all = False if len(service_dir) != 0: service_dir = service_dir['network'] @@ -840,13 +800,21 @@ def export_modify_route_rules(inputfile, outdir, service_dir, prefix, non_gf_ten ] if not execute_all: options = show_options(options, quit=True, menu=True, index=1) - execute_options(options, inputfile, outdir, service_dir, prefix, config=config, non_gf_tenancy=non_gf_tenancy) -def export_route_rules(inputfile, outdir, service_dir, prefix, config, non_gf_tenancy): + for option in options: + options1 = [] + options1.append(option) + if (option.name == 'Export Route Rules (From OCI into RouteRulesinOCI sheet)'): + execute_options(options1, inputfile, outdir, service_dir, config, signer, ct, non_gf_tenancy=non_gf_tenancy) + elif (option.name == 'Add/Modify/Delete Route Rules (Reads RouteRulesinOCI sheet)'): + execute_options(options1, inputfile, outdir, service_dir, prefix, ct, non_gf_tenancy) + + +def export_route_rules(inputfile, outdir, service_dir, config, signer, ct, non_gf_tenancy): compartments = ct.get_compartment_map(var_file, 'OCI Route Rules') - Network.export_routetable(inputfile, export_compartments=compartments, export_regions= export_regions,service_dir=service_dir, _config=config, _tf_import_cmd=False, outdir=None,ct=ct) + Network.export_routetable(inputfile, outdir, service_dir, config, signer, ct, export_compartments=compartments, export_regions= export_regions, _tf_import_cmd=False) -def export_modify_drg_route_rules(inputfile, outdir, service_dir, prefix, non_gf_tenancy, config): +def export_modify_drg_route_rules(inputfile, outdir, service_dir, prefix, ct, non_gf_tenancy): execute_all = False if len(service_dir) != 0: service_dir = service_dir['network'] @@ -859,14 +827,22 @@ def export_modify_drg_route_rules(inputfile, outdir, service_dir, prefix, non_gf ] if not execute_all: options = show_options(options, quit=True, menu=True, index=1) - execute_options(options, inputfile, outdir, service_dir, prefix, config=config, non_gf_tenancy=non_gf_tenancy) -def export_drg_route_rules(inputfile, outdir, service_dir, prefix, config, non_gf_tenancy): + for option in options: + options1 = [] + options1.append(option) + if (option.name == 'Export DRG Route Rules (From OCI into DRGRouteRulesinOCI sheet)'): + execute_options(options1, inputfile, outdir, service_dir, config, signer, ct, non_gf_tenancy=non_gf_tenancy) + elif (option.name == 'Add/Modify/Delete DRG Route Rules (Reads DRGRouteRulesinOCI sheet)'): + execute_options(options1, inputfile, outdir, service_dir, prefix, ct, non_gf_tenancy) + + +def export_drg_route_rules(inputfile, outdir, service_dir, config, signer, ct, non_gf_tenancy): compartments = ct.get_compartment_map(var_file,'OCI DRG Route Rules') - Network.export_drg_routetable(inputfile, export_compartments=compartments, export_regions= export_regions,service_dir=service_dir, _config=config, _tf_import_cmd=False, outdir=None,ct=ct) + Network.export_drg_routetable(inputfile, outdir, service_dir, config, signer, ct, export_compartments=compartments, export_regions= export_regions, _tf_import_cmd=False) -def export_modify_nsgs(inputfile, outdir, service_dir, prefix, non_gf_tenancy, config): +def export_modify_nsgs(inputfile, outdir, service_dir, prefix, ct, non_gf_tenancy): if len(service_dir) != 0: service_dir = service_dir['nsg'] else: @@ -878,22 +854,22 @@ def export_modify_nsgs(inputfile, outdir, service_dir, prefix, non_gf_tenancy, c ] if not execute_all: options = show_options(options, quit=True, menu=True, index=1) - execute_options(options, inputfile, outdir, service_dir, prefix, config=config, non_gf_tenancy=non_gf_tenancy) + execute_options(options, inputfile, outdir, service_dir, prefix, ct) -def export_nsgs(inputfile, outdir, service_dir, prefix, config, non_gf_tenancy): +def export_nsgs(inputfile, outdir, service_dir, prefix, ct): compartments = ct.get_compartment_map(var_file,'OCI NSGs') - Network.export_nsg(inputfile, export_compartments=compartments, export_regions= export_regions,service_dir=service_dir, _config=config, _tf_import_cmd=False, outdir=None,ct=ct) + Network.export_nsg(inputfile, outdir, service_dir, config, signer, ct, export_compartments=compartments, export_regions= export_regions, _tf_import_cmd=False) -def create_vlans(inputfile, outdir, service_dir, prefix, non_gf_tenancy, config,network_vlan_in_setupoci='vlan'): +def create_vlans(inputfile, outdir, service_dir, prefix,ct, non_gf_tenancy, network_vlan_in_setupoci='vlan'): if len(service_dir) != 0: service_dir_network = service_dir['network'] else: service_dir_network = "" - Network.create_terraform_subnet_vlan(inputfile, outdir, service_dir, prefix, config=config, non_gf_tenancy=non_gf_tenancy, network_vlan_in_setupoci='vlan',modify_network=True) - Network.create_terraform_route(inputfile, outdir, service_dir_network, prefix, config=config, non_gf_tenancy=non_gf_tenancy, network_vlan_in_setupoci='vlan',modify_network=True) + Network.create_terraform_subnet_vlan(inputfile, outdir, service_dir, prefix, ct, non_gf_tenancy=non_gf_tenancy, network_vlan_in_setupoci='vlan',modify_network=True) + Network.create_terraform_route(inputfile, outdir, service_dir_network, prefix, ct, non_gf_tenancy=non_gf_tenancy, network_vlan_in_setupoci='vlan',modify_network=True) -def create_drg_connectivity(inputfile, outdir, service_dir, prefix, non_gf_tenancy, config,network_vlan_in_setupoci='vlan'): +def create_drg_connectivity(inputfile, outdir, service_dir, prefix, ct, non_gf_tenancy,network_vlan_in_setupoci='vlan'): execute_all = False if len(service_dir) != 0: service_dir_network = service_dir['network'] @@ -906,21 +882,11 @@ def create_drg_connectivity(inputfile, outdir, service_dir, prefix, non_gf_tena ] if not execute_all: options = show_options(options, quit=True, menu=True, index=1) - execute_options(options, inputfile, outdir, service_dir, service_dir_network, prefix, config=config, non_gf_tenancy=non_gf_tenancy) - -def create_rpc(inputfile, outdir, service_dir, service_dir_network, prefix, non_gf_tenancy, config): - Network.create_rpc_resource(inputfile, outdir, service_dir, prefix, config=config, non_gf_tenancy=non_gf_tenancy) - Network.create_terraform_drg_route(inputfile, outdir, service_dir_network, prefix, config=config, - non_gf_tenancy=non_gf_tenancy, network_connectivity_in_setupoci='connectivity', modify_network=True) - -def create_tags(): - if len(outdir_struct) != 0: - service_dir = outdir_struct['tagging'] - else: - service_dir = "" - options = [Option(None, Governance.create_terraform_tags, 'Processing Tags Tab')] - execute_options(options, inputfile, outdir, service_dir, prefix, config=config) + execute_options(options, inputfile, outdir, service_dir, service_dir_network, prefix, auth_mechanism, config_file_path, ct, non_gf_tenancy=non_gf_tenancy) +def create_rpc(inputfile, outdir, service_dir, service_dir_network, prefix, auth_mechanism, config_file_path, ct, non_gf_tenancy): + Network.create_rpc_resource(inputfile, outdir, service_dir, prefix, auth_mechanism, config_file_path, ct, non_gf_tenancy=non_gf_tenancy) + Network.create_terraform_drg_route(inputfile, outdir, service_dir_network, prefix, non_gf_tenancy=non_gf_tenancy, ct=ct, network_connectivity_in_setupoci='connectivity', modify_network=True) def create_compute(): if len(outdir_struct) != 0: @@ -929,13 +895,13 @@ def create_compute(): service_dir = "" options = [ - Option('Add/Modify/Delete Dedicated VM Hosts', Compute.create_terraform_dedicatedhosts, 'Processing Dedicated VM Hosts Tab'), + Option('Add/Modify/Delete Dedicated VM Hosts', create_dedicatedvmhosts, ''), Option('Add/Modify/Delete Instances/Boot Backup Policy', create_instances,''), ] options = show_options(options, quit=True, menu=True, index=1) - execute_options(options, inputfile, outdir, service_dir,prefix,config) + execute_options(options, inputfile, outdir, service_dir,prefix, ct) -def create_instances(inputfile, outdir, service_dir_nt,prefix,config): +def create_instances(inputfile, outdir, service_dir_nt,prefix,ct): if len(outdir_struct) != 0: service_dir = outdir_struct['instance'] else: @@ -944,11 +910,11 @@ def create_instances(inputfile, outdir, service_dir_nt,prefix,config): options = [ Option(None, Compute.create_terraform_instances, 'Processing Instances Tab') ] - execute_options(options, inputfile, outdir, service_dir, prefix,config) + execute_options(options, inputfile, outdir, service_dir, prefix, ct) -def create_dedicatedvmhosts(inputfile, outdir, service_dir, prefix,config): +def create_dedicatedvmhosts(inputfile, outdir, service_dir, prefix,ct): options = [Option(None, Compute.create_terraform_dedicatedhosts, 'Processing Dedicated VM Hosts Tab')] - execute_options(options, inputfile, outdir, service_dir,prefix,config=config) + execute_options(options, inputfile, outdir, service_dir,prefix, ct) def create_storage(execute_all=False): @@ -960,9 +926,9 @@ def create_storage(execute_all=False): ] options = show_options(options, quit=True, menu=True, index=1) if not execute_all: - execute_options(options, inputfile, outdir,prefix, config) + execute_options(options, inputfile, outdir,prefix, ct) -def create_block_volumes(inputfile, outdir, prefix,config): +def create_block_volumes(inputfile, outdir, prefix,ct): if len(outdir_struct) != 0: service_dir = outdir_struct['block-volume'] else: @@ -970,23 +936,23 @@ def create_block_volumes(inputfile, outdir, prefix,config): options = [ Option(None, Storage.create_terraform_block_volumes, 'Processing BlockVolumes Tab') ] - execute_options(options, inputfile, outdir, service_dir, prefix, config=config) + execute_options(options, inputfile, outdir, service_dir, prefix, ct) -def create_fss(inputfile, outdir, prefix,config): +def create_fss(inputfile, outdir, prefix,ct): if len(outdir_struct) != 0: service_dir = outdir_struct['fss'] else: service_dir = "" options = [Option(None, Storage.create_terraform_fss, 'Processing FSS Tab')] - execute_options(options, inputfile, outdir, service_dir, prefix, config=config) + execute_options(options, inputfile, outdir, service_dir, prefix, ct) -def create_buckets(inputfile, outdir, prefix,config): +def create_buckets(inputfile, outdir, prefix,ct): if len(outdir_struct) != 0: service_dir = outdir_struct['object-storage'] else: service_dir = "" options = [Option(None, Storage.create_terraform_oss, 'Processing Buckets Tab')] - execute_options(options, inputfile, outdir, service_dir, prefix, config=config) + execute_options(options, inputfile, outdir, service_dir, prefix, ct) def create_loadbalancer(execute_all=False): @@ -997,9 +963,9 @@ def create_loadbalancer(execute_all=False): ] options = show_options(options, quit=True, menu=True, index=1) if not execute_all: - execute_options(options, inputfile, outdir, prefix, config) + execute_options(options, inputfile, outdir, prefix, ct) -def create_lb(inputfile, outdir, prefix, config): +def create_lb(inputfile, outdir, prefix, ct): if len(outdir_struct) != 0: service_dir = outdir_struct['loadbalancer'] else: @@ -1011,9 +977,9 @@ def create_lb(inputfile, outdir, prefix, config): Option(None, Network.create_path_route_set, 'Creating Path Route Sets'), Option(None, Network.create_ruleset, 'Creating Rule Sets'), ] - execute_options(options, inputfile, outdir, service_dir, prefix, config=config) + execute_options(options, inputfile, outdir, service_dir, prefix, ct) -def create_nlb(inputfile, outdir, prefix, config): +def create_nlb(inputfile, outdir, prefix, ct): if len(outdir_struct) != 0: service_dir = outdir_struct['networkloadbalancer'] else: @@ -1022,7 +988,7 @@ def create_nlb(inputfile, outdir, prefix, config): Option(None, Network.create_terraform_nlb_listener, 'Creating NLB and Listeners'), Option(None, Network.create_nlb_backendset_backendservers, 'Creating NLB Backend Sets and Backend Servers'), ] - execute_options(options, inputfile, outdir, service_dir, prefix, config=config) + execute_options(options, inputfile, outdir, service_dir, prefix, ct) def create_databases(execute_all=False): if len(outdir_struct) != 0: @@ -1036,23 +1002,23 @@ def create_databases(execute_all=False): ] if not execute_all: options = show_options(options, quit=True, menu=True, index=1) - execute_options(options, inputfile, outdir, service_dir, prefix, config=config) + execute_options(options, inputfile, outdir, service_dir, prefix, ct) -def create_terraform_dbsystems_vm_bm(inputfile, outdir,service_dir_nt, prefix,config): +def create_terraform_dbsystems_vm_bm(inputfile, outdir,service_dir_nt, prefix,ct): if len(outdir_struct) != 0: service_dir = outdir_struct['dbsystem-vm-bm'] else: service_dir = "" - Database.create_terraform_dbsystems_vm_bm(inputfile, outdir, service_dir, prefix, config=config) + Database.create_terraform_dbsystems_vm_bm(inputfile, outdir, service_dir, prefix, ct) -def create_exa_infra_vmclusters(inputfile, outdir,service_dir_nt, prefix,config): +def create_exa_infra_vmclusters(inputfile, outdir,service_dir_nt, prefix,ct): if len(outdir_struct) != 0: service_dir = outdir_struct['database-exacs'] else: service_dir = "" options = [Option(None, Database.create_terraform_exa_infra, 'Processing Exa-Infra Tab'), Option(None, Database.create_terraform_exa_vmclusters, 'Processing Exa-VM-Clusters Tab')] - execute_options(options, inputfile, outdir, service_dir, prefix, config) + execute_options(options, inputfile, outdir, service_dir, prefix, ct) def create_management_services(execute_all=False): if len(outdir_struct) != 0: @@ -1068,7 +1034,7 @@ def create_management_services(execute_all=False): ] if not execute_all: options = show_options(options, quit=True, menu=True, index=1) - execute_options(options, inputfile, outdir, service_dir, prefix, config=config) + execute_options(options, inputfile, outdir, service_dir, prefix, ct) def create_developer_services(execute_all=False): @@ -1078,25 +1044,25 @@ def create_developer_services(execute_all=False): ] if not execute_all: options = show_options(options, quit=True, menu=True, index=1) - execute_options(options, inputfile, outdir, prefix, config=config) + execute_options(options, inputfile, outdir, prefix, auth_mechanism, config_file_path,ct) -def create_rm_stack(inputfile, outdir, prefix, config): +def create_rm_stack(inputfile, outdir, prefix, auth_mechanism, config_file, ct): regions = get_region_list(rm = True) - DeveloperServices.create_resource_manager(outdir,var_file, outdir_struct, prefix, regions, config) + DeveloperServices.create_resource_manager(outdir,var_file, outdir_struct, prefix, auth_mechanism, config_file, ct, regions) -def create_oke(inputfile, outdir, prefix, config): +def create_oke(inputfile, outdir, prefix, auth_mechanism, config_file, ct): if len(outdir_struct) != 0: service_dir = outdir_struct['oke'] else: service_dir = "" - DeveloperServices.create_terraform_oke(inputfile, outdir, service_dir, prefix, config) + DeveloperServices.create_terraform_oke(inputfile, outdir, service_dir, prefix, ct) def create_sddc(): if len(outdir_struct) != 0: service_dir = outdir_struct['sddc'] else: service_dir = "" - SDDC.create_terraform_sddc(inputfile, outdir, service_dir, prefix, config=config) + SDDC.create_terraform_sddc(inputfile, outdir, service_dir, prefix, ct) def create_dns(): if len(outdir_struct) != 0: @@ -1110,12 +1076,12 @@ def create_dns(): 'Processing DNS-Resolvers Tab') ] options = show_options(options, quit=True, menu=True, index=1) - execute_options(options, inputfile, outdir, service_dir, prefix, config) + execute_options(options, inputfile, outdir, service_dir, prefix, ct) -def create_terraform_dns(inputfile, outdir, service_dir, prefix, config): - Network.create_terraform_dns_views(inputfile, outdir, service_dir, prefix, config) - Network.create_terraform_dns_zones(inputfile, outdir, service_dir, prefix, config) - Network.create_terraform_dns_rrsets(inputfile, outdir, service_dir, prefix, config) +def create_terraform_dns(inputfile, outdir, service_dir, prefix, ct): + Network.create_terraform_dns_views(inputfile, outdir, service_dir, prefix, ct) + Network.create_terraform_dns_zones(inputfile, outdir, service_dir, prefix, ct) + Network.create_terraform_dns_rrsets(inputfile, outdir, service_dir, prefix, ct) def create_logging(): if len(outdir_struct) != 0: @@ -1128,34 +1094,34 @@ def create_logging(): Option('Enable Object Storage Buckets Write Logs', create_cis_oss_logs, 'OSS Write Logs') ] options = show_options(options, quit=True, menu=True, index=1) - execute_options(options, inputfile, outdir, service_dir, prefix, config) + execute_options(options, inputfile, outdir, service_dir, prefix, ct) -def create_cis_vcnflow_logs(inputfile, outdir, service_dir, prefix, config): +def create_cis_vcnflow_logs(inputfile, outdir, service_dir, prefix, ct): if len(service_dir) != 0: service_dir = service_dir['network'] else: service_dir = "" options = [Option(None, ManagementServices.enable_cis_vcnflow_logging, 'Enabling VCN Flow Logs')] - execute_options(options, inputfile, outdir, service_dir, prefix, config=config) + execute_options(options, inputfile, outdir, service_dir, prefix, ct) -def enable_lb_logs(inputfile, outdir, service_dir, prefix, config): +def enable_lb_logs(inputfile, outdir, service_dir, prefix, ct): if len(service_dir) != 0: service_dir = service_dir['loadbalancer'] else: service_dir = "" options = [Option(None, ManagementServices.enable_load_balancer_logging, 'Enabling LBaaS Logs')] - execute_options(options, inputfile, outdir, service_dir, prefix, config=config) + execute_options(options, inputfile, outdir, service_dir, prefix, ct) -def create_cis_oss_logs(inputfile, outdir, service_dir, prefix, config): +def create_cis_oss_logs(inputfile, outdir, service_dir, prefix, ct): if len(service_dir) != 0: service_dir = service_dir['object-storage'] else: service_dir = "" options = [Option(None, ManagementServices.enable_cis_oss_logging, 'Enabling OSS Write Logs')] - execute_options(options, inputfile, outdir, service_dir, prefix, config=config) + execute_options(options, inputfile, outdir, service_dir, prefix, ct) def create_cis_features(): @@ -1165,7 +1131,7 @@ def create_cis_features(): Option("Enable Cloud Guard", enable_cis_cloudguard, 'Enable Cloud Guard'),] options = show_options(options, quit=True, menu=True, index=1) - execute_options(options, outdir, prefix, config) + execute_options(options, outdir, prefix, config_file_path) def create_cis_keyvault(*args,**kwargs): if len(outdir_struct) != 0: @@ -1179,7 +1145,7 @@ def create_cis_keyvault(*args,**kwargs): comp_name = input("Enter name of compartment as it appears in OCI Console: ") options = [Option(None, Security.create_cis_keyvault, 'Creating KeyVault')] - execute_options(options, outdir, service_dir, service_dir_iam,prefix,region_name, comp_name, config=config) + execute_options(options, outdir, service_dir, service_dir_iam,prefix, ct, region_name, comp_name) def create_cis_budget(*args,**kwargs): if len(outdir_struct) != 0: @@ -1190,7 +1156,7 @@ def create_cis_budget(*args,**kwargs): amount = input("Enter Monthly Budget Amount (in US$): ") threshold = input("Enter Threshold Percentage of Budget: ") options = [Option(None, Governance.create_cis_budget, 'Creating Budget')] - execute_options(options, outdir, service_dir, prefix,amount,threshold, config=config) + execute_options(options, outdir, service_dir, prefix,ct, amount,threshold) def enable_cis_cloudguard(*args,**kwargs): if len(outdir_struct) != 0: @@ -1200,17 +1166,17 @@ def enable_cis_cloudguard(*args,**kwargs): region = input("Enter Reporting Region for Cloud Guard eg london: ") options = [Option(None, Security.enable_cis_cloudguard, 'Enabling Cloud Guard')] - execute_options(options, outdir, service_dir, prefix,region,config=config) + execute_options(options, outdir, service_dir, prefix, ct, region) -def initiate_cis_scan(outdir, prefix, config): +def initiate_cis_scan(outdir, prefix, config_file): options = [ Option("CD3 Image already contains the latest CIS compliance checking script available at the time of cd3 image release.\n Download latest only if new version of the script is available", start_cis_download, 'Download CIS script'), Option("Execute compliance checking script", start_cis_scan, 'Execute CIS script'), ] options = show_options(options, quit=True, menu=True, index=1) - execute_options(options, outdir, prefix, config) + execute_options(options, outdir, prefix, config_file) -def start_cis_download(outdir, prefix, config): +def start_cis_download(outdir, prefix, config_file): print("Downloading the script file as 'cis_reports.py' at location "+os.getcwd()) resp = requests.get("https://raw.githubusercontent.com/oracle-quickstart/oci-cis-landingzone-quickstart/main/scripts/cis_reports.py") resp_contents = resp.text @@ -1218,7 +1184,7 @@ def start_cis_download(outdir, prefix, config): fd.write(resp_contents) print("Download complete!!") -def start_cis_scan(outdir, prefix, config): +def start_cis_scan(outdir, prefix, config_file): cmd = "python cis_reports.py" user_input = input("Enter command to execute the script. Press Enter to execute {} : ".format(cmd)) if user_input!='': @@ -1236,16 +1202,16 @@ def start_cis_scan(outdir, prefix, config): else: commonTools.backup_file(outdir, resource, out_rep) - out = ["-c", config, '--report-directory', out_rep] + out = ["-c", config_file, '--report-directory', out_rep] cmd = cmd +" "+ out[0] + " "+out[1] + " "+ out[2] + " " +out[3] split.extend(out) print("Executing: "+cmd) print("Scan started!") - execute(split) + execute(split, config_file) -def execute(command): - export_cmd_windows = "set OCI_CONFIG_HOME="+config - export_cmd_linux = "export OCI_CONFIG_HOME=" + config +def execute(command,config_file): + export_cmd_windows = "set OCI_CONFIG_HOME="+config_file + export_cmd_linux = "export OCI_CONFIG_HOME=" + config_file export_cmd = "" if "linux" in sys.platform: export_cmd = export_cmd_linux @@ -1254,7 +1220,7 @@ def execute(command): if export_cmd == "": print("Failed to get OS details. Exiting!!") - exit() + exit(1) split_export_cmd = str.split(export_cmd) #subprocess.Popen(split_export_cmd, stdout=subprocess.PIPE,bufsize=1) @@ -1270,16 +1236,23 @@ def execute(command): parser = argparse.ArgumentParser(description='Sets Up OCI via TF') parser.add_argument('propsfile', help="Full Path of properties file containing input variables. eg setUpOCI.properties") args = parser.parse_args() -config1 = configparser.RawConfigParser() -config1.read(args.propsfile) +setUpOCI_props = configparser.RawConfigParser() +setUpOCI_props.read(args.propsfile) #Read Config file Variables try: - non_gf_tenancy = config1.get('Default', 'non_gf_tenancy').strip().lower() == 'true' - inputfile = config1.get('Default','cd3file').strip() - outdir = config1.get('Default', 'outdir').strip() - prefix = config1.get('Default', 'prefix').strip() - config = config1.get('Default', 'config_file').strip() or DEFAULT_LOCATION + workflow_type = setUpOCI_props.get('Default', 'workflow_type').strip().lower() + + if (workflow_type == 'export_resources'): + non_gf_tenancy = True + else: + non_gf_tenancy = False + + inputfile = setUpOCI_props.get('Default','cd3file').strip() + outdir = setUpOCI_props.get('Default', 'outdir').strip() + prefix = setUpOCI_props.get('Default', 'prefix').strip() + auth_mechanism = setUpOCI_props.get('Default', 'auth_mechanism').strip().lower() + config_file_path = setUpOCI_props.get('Default', 'config_file').strip() or DEFAULT_LOCATION if not outdir: exit_menu('input outdir location cannot be left blank. Exiting... ') @@ -1293,7 +1266,7 @@ def execute(command): exit_menu(str(e) + ". Check input properties file and try again. Exiting... ") try: - outdir_structure = config1.get('Default', 'outdir_structure_file').strip() + outdir_structure = setUpOCI_props.get('Default', 'outdir_structure_file').strip() except Exception as e: outdir_structure = '' @@ -1317,19 +1290,23 @@ def execute(command): print("Invalid outdir_structure_file. Please provide correct file path. Exiting... ") exit(1) -## Fetch OCI_regions -cd3service = cd3Services() -cd3service.fetch_regions(config) - +## Authenticate Params ct=None ct = commonTools() +config,signer = ct.authenticate(auth_mechanism, config_file_path) + + +## Fetch OCI_regions +cd3service = cd3Services() +cd3service.fetch_regions(config,signer) ## Check if fetch compartments script needs to be run run_fetch_script = 0 ## Fetch Subscribed Regions -ct.get_subscribedregions(config) +ct.get_subscribedregions(config,signer) home_region = ct.home_region + if len(outdir_struct.items())==0: var_file = f'{outdir}/{home_region}/variables_{home_region}.tf' else: @@ -1369,7 +1346,7 @@ def execute(command): if user_input.lower()!='y': user_input = 'n' if(user_input.lower() == 'y'): - fetch_compartments(outdir,outdir_struct, config=config) + fetch_compartments(outdir,outdir_struct, ct) else: print("Make sure to execute the script for 'Fetch Compartments OCIDs to variables file' under 'CD3 Services' menu option atleast once before you continue!") @@ -1385,14 +1362,14 @@ def execute(command): Option('Export Databases', export_databases, 'Databases'), Option('Export Load Balancers', export_loadbalancer, 'Load Balancers'), Option('Export Management Services', export_management_services, 'Management Services'), - Option('Export Developer Services', export_development_services, 'Development Services'), + Option('Export Developer Services', export_developer_services, 'Development Services'), Option('Export Software-Defined Data Centers - OCVS', export_sddc, 'OCVS'), Option('CD3 Services', cd3_services, 'CD3 Services') ] #verify_outdir_is_empty() - print("\nnon_gf_tenancy in properties file is set to true..Export existing OCI objects and Synch with TF state") + print("\nworkflow_type set to export_resources. Export existing OCI objects and Synch with TF state") print("We recommend to not have any existing tfvars/tfstate files for export out directory") export_regions = get_region_list(rm=False) diff --git a/cd3_automation_toolkit/setUpOCI_jenkins.py b/cd3_automation_toolkit/setUpOCI_jenkins.py new file mode 100644 index 000000000..151fdfd6d --- /dev/null +++ b/cd3_automation_toolkit/setUpOCI_jenkins.py @@ -0,0 +1,1172 @@ +import argparse +import configparser +import Database +import Identity +import Compute +import ManagementServices +import DeveloperServices +import Security +import cd3Validator +import Storage +import Network +import SDDC +import Governance +from commonTools import * +from collections import namedtuple +import requests +import subprocess +import datetime,glob,os + + +def show_options(options, quit=False, menu=False, extra=None, index=0): + # Just add whitespace between number and option. It just makes it look better + number_offset = len(str(len(options))) + 1 + # Iterate over options. Print number and option + for i, option in enumerate(options, index): + print(f'{str(i)+".":<{number_offset}} {option.name}') + if quit: + print(f'{"q"+".":<{number_offset}} Press q to quit') + if menu: + print(f'{"m"+".":<{number_offset}} Press m to go back to Main Menu') + if extra: + print(extra) + user_input = input('Enter your choice (specify comma separated to choose multiple choices): ') + user_input = user_input.split(',') + if 'q' in user_input or 'm' in user_input: + return user_input + # Subtract one to account for zero-indexing. The options start at 1 + # #return [options[int(choice)-1] for choice in user_input] + try: + return [options[int(choice)-index] for choice in user_input] + except IndexError as ie: + print("\nInvalid Option.....Exiting!!") + exit(1) + except ValueError as ie: + print("\nInvalid Input.....Try again!!\n") + options = show_options(inputs, quit=True, index=index) + return options + + +def execute_options(options, *args, **kwargs): + global menu, quit + if 'm' in options or 'q' in options: + menu = 'm' in options + quit = 'q' in options + else: + for option in options: + with section(option.text): + option.callback(*args, **kwargs) + +def get_region_list(rm): + if rm == False: + input_region_names = ct.reg_filter + else: + input_region_names = ct.orm_reg_filter + input_region_names = list(map(lambda x: x.strip(), input_region_names.split(','))) if input_region_names else None + remove_regions = [] + region_list_fetch = [] + #validate input regions + if (input_region_names is not None): + for x in range(0, len(input_region_names)): + if (input_region_names[x].lower() not in ct.all_regions and input_region_names[x].lower()!='global'): + print("Input region: " + input_region_names[x] + " is not subscribed to OCI Tenancy") + remove_regions.append(input_region_names[x]) + + input_region_names = [x.lower() for x in input_region_names if x not in remove_regions] + if (len(input_region_names) == 0): + print("None of the input regions specified are subscribed to OCI..Exiting!!!") + exit(1) + else: + print("\nFetching for Regions... " + str(input_region_names)) + region_list_fetch = input_region_names + else: + print("Fetching for all Regions OCI tenancy is subscribed to...") + region_list_fetch = ct.all_regions + # include global dir for RM stack upload + if rm == True: + region_list_fetch.append('global') + return region_list_fetch + +def update_path_list(regions_path=[],service_dirs=[]): + # Update modified path list + for current_dir in service_dirs: + for reg in regions_path: + path_value = ((outdir + "/" + reg + "/" + current_dir).rstrip('/')).replace("//","/") + items = glob.glob(path_value + "/*") + files = [f for f in items if + (os.path.isfile(f) and (datetime.datetime.fromtimestamp(os.path.getmtime(f)) >= exec_start_time))] + if files: + if path_value not in updated_paths: + updated_paths.append(path_value) + for script_file in files: + if script_file.endswith(".sh") and script_file not in import_scripts: + import_scripts.append(script_file) + +def fetch_compartments(outdir, outdir_struct, ct): + var_files={} + var_data = {} + home_region = ct.home_region + print("outdir specified should contain region directories and then variables_.tf file inside the region directories eg /cd3user/tenancies//terraform_files") + print("Verifying out directory and Taking backup of existing variables files...Please wait...") + print("\nFetching Compartment Info...Please wait...") + ct.get_network_compartment_ids(config['tenancy'], "root", config, signer) + ct.all_regions.append('global') + print("\nWriting to variables files...") + home_region_services = ['identity', 'tagging', 'budget'] + for region in ct.all_regions: + # for global directory + if region == 'global': + file = f'{outdir}/{region}/rpc/variables_{region}.tf' + var_files[region] = file + try: + # Read variables file data + with open(file, 'r') as f: + var_data[region] = f.read() + except FileNotFoundError as e: + print(f'\nVariables file not found in - {region}.......') + print("Continuing") + + # Fetch variables file inside region directories - single outdir + elif len(outdir_struct) == 0: + file = f'{outdir}/{region}/variables_{region}.tf' + var_files[region]=file + try: + # Read variables file data + with open(file, 'r') as f: + var_data[region] = f.read() + except FileNotFoundError as e: + print(f'\nVariables file not found in - {region}.......') + print("Continuing") + + # Fetch variables file inside service directories - separate outdir + else: + for k, v in outdir_struct.items(): + if ((k not in home_region_services) or ((k in home_region_services) and region == home_region)) and v != '': + file = f'{outdir}/{region}/{v}/variables_{region}.tf' + var_files[region + "-" + v] = file + try: + # Read variables file data + with open(file, 'r') as f: + var_data[region + "-" + v] = f.read() + except FileNotFoundError as e: + print(f'\nVariables file not found in - {region}/{v}/.......') + print("Continuing") + + compocidsStr = '' + for k,v in ct.ntk_compartment_ids.items(): + k = commonTools.check_tf_variable(k) + v = "\"" + v + "\"" + compocidsStr = "\t" + k + " = " + v + "\n" + compocidsStr + compocidsStr = "\n" + compocidsStr + finalCompStr = "#START_compartment_ocids#" + compocidsStr + "\t#compartment_ocids_END#" + for k, v in var_data.items(): + var_data[k] = re.sub('#START_compartment_ocids#.*?#compartment_ocids_END#', finalCompStr, + var_data[k], flags=re.DOTALL) + # Write variables file data + with open(var_files[k], "w") as f: + # Backup the existing Routes tf file + file = var_files[k] + shutil.copy(file, file + "_backup") + f.write(var_data[k]) + print("\nCompartment info written to all variables files under outdir...\n") + # update fetchcompinfo.safe + fetch_comp_file = f'{outdir}/fetchcompinfo.safe' + with open(fetch_comp_file, 'w+') as f: + f.write('run_fetch_script=0') + f.close() + ct.all_regions = ct.all_regions[:-1] + +################## Validate Function ######################### +def validate_cd3(options=[]): + choices = [] + choice_items = [] + for opt in options: + choice_items = [] + if opt in ['Validate Compartments','Validate Groups','Validate Policies','Validate Tags','Validate Networks','Validate DNS','Validate Instances','Validate Block Volumes','Validate FSS','Validate Buckets']: + if opt == "Validate Networks": + opt = "Validate Network(VCNs, SubnetsVLANs, DHCP, DRGs)" + choice_items.append(opt) + choices.append(choice_items) + cd3Validator.validate_cd3(choices,inputfile, var_file, prefix, outdir, ct) # config, signer, ct) + print("Exiting CD3 Validation...") + +################## Export Identity ########################## +def export_identityOptions(options=[]): + service_dirs = [] + for opt in options: + if opt == "Export Compartments/Groups/Policies": + export_compartmentPoliciesGroups(inputfile, outdir, service_dir_identity, config,signer, ct) + service_dirs = [service_dir_identity] + elif opt == "Export Users": + export_users(inputfile, outdir, service_dir_identity, config,signer, ct) + service_dirs = [service_dir_identity] + elif opt == "Export Network Sources": + export_networkSources(inputfile, outdir, service_dir_identity, config,signer, ct) + service_dirs = [service_dir_identity] + # Update modified path list + update_path_list(regions_path=[ct.home_region], service_dirs=service_dirs) + + +def export_compartmentPoliciesGroups(inputfile, outdir, service_dir, config, signer, ct): + compartments = ct.get_compartment_map(var_file, 'Identity Objects') + Identity.export_identity(inputfile, outdir, service_dir, config, signer, ct, export_compartments=compartments) + create_identity(options=['Add/Modify/Delete Compartments','Add/Modify/Delete Groups','Add/Modify/Delete Policies']) + print("\n\nExecute tf_import_commands_identity_nonGF.sh script created under home region directory to synch TF with OCI Identity objects\n") + + +def export_users(inputfile, outdir, service_dir, config,signer, ct): + Identity.Users.export_users(inputfile, outdir, service_dir, config, signer, ct) + create_identity(options=['Add/Modify/Delete Users']) + print("\n\nExecute tf_import_commands_users_nonGF.sh script created under home region directory to synch TF with OCI Identity objects\n") + + +def export_networkSources(inputfile, outdir, service_dir, config, signer, ct): + compartments = ct.get_compartment_map(var_file, 'Identity Objects') + Identity.NetworkSources.export_networkSources(inputfile, outdir, service_dir, config, signer, ct) + create_identity(options=['Add/Modify/Delete Network Sources']) + print("\n\nExecute tf_import_commands_networkSources_nonGF.sh script created under home region directory to synch TF with OCI Identity objects\n") + + +def export_tags(options=[]): + compartments = ct.get_compartment_map(var_file, 'Tagging Objects') + Governance.export_tags_nongreenfield(inputfile, outdir, service_dir_tagging, config, signer, ct, export_compartments=compartments) + create_tags() + print("\n\nExecute tf_import_commands_tags_nonGF.sh script created under home region directory to synch TF with OCI Tags\n") + # Update modified path list + update_path_list(regions_path=[ct.home_region], service_dirs=[service_dir_tagging]) + + +def export_network(options=[]): + service_dirs = [] + for opt in options: + if opt == "Export all Network Components": + export_networking(inputfile, outdir, outdir_struct, config, signer, ct, export_regions) + service_dirs = [service_dir_network, service_dir_nsg, service_dir_vlan] + if opt == "Export Network components for VCNs/DRGs/DRGRouteRulesinOCI Tabs": + export_major_objects(inputfile, outdir, service_dir_network, config, signer, ct, export_regions) + service_dirs.append(service_dir_network) if service_dir_network not in service_dirs else service_dirs + if opt == "Export Network components for DHCP Tab": + export_dhcp(inputfile, outdir, service_dir_network, config, signer, ct, export_regions) + service_dirs.append(service_dir_network) if service_dir_network not in service_dirs else service_dirs + if opt == "Export Network components for SecRulesinOCI Tab": + export_secrules(inputfile, outdir, service_dir_network, config, signer, ct, export_regions) + service_dirs.append(service_dir_network) if service_dir_network not in service_dirs else service_dirs + if opt == "Export Network components for RouteRulesinOCI Tab": + export_routerules(inputfile, outdir, service_dir_network, config, signer, ct, export_regions) + service_dirs.append(service_dir_network) if service_dir_network not in service_dirs else service_dirs + if opt == "Export Network components for SubnetsVLANs Tab": + export_subnets_vlans(inputfile, outdir, outdir_struct, config, signer, ct, export_regions) + service_dirs.append(service_dir_vlan) if service_dir_vlan not in service_dirs else service_dirs + service_dirs.append(service_dir_network) if service_dir_network not in service_dirs else service_dirs + if opt == "Export Network components for NSGs Tab": + export_nsg(inputfile, outdir, service_dir_nsg, config, signer, ct, export_regions) + service_dirs.append(service_dir_nsg) if service_dir_nsg not in service_dirs else service_dirs + + print("=====================================================================================================================") + print("NOTE: Make sure to execute tf_import_commands_network_major-objects_nonGF.sh before executing the other scripts.") + print("=====================================================================================================================") + + # Update modified path list + regions_path = export_regions.copy() + regions_path.append("global") + service_dirs.append("rpc") + update_path_list(regions_path=regions_path, service_dirs=service_dirs) + + +def export_networking(inputfile, outdir, service_dir,config, signer, ct, export_regions): + compartments = ct.get_compartment_map(var_file,'Network Objects') + Network.export_networking(inputfile, outdir, service_dir,config, signer, ct, export_compartments=compartments, export_regions=export_regions) + Network.create_major_objects(inputfile, outdir, service_dir_network, prefix, ct, non_gf_tenancy) + Network.create_rpc_resource(inputfile, outdir, service_dir_network, prefix, auth_mechanism, config_file_path, ct, non_gf_tenancy) + Network.create_terraform_dhcp_options(inputfile, outdir, service_dir_network, prefix, ct, non_gf_tenancy) + Network.modify_terraform_secrules(inputfile, outdir, service_dir_network, prefix, ct, non_gf_tenancy) + Network.modify_terraform_routerules(inputfile, outdir, service_dir_network, prefix, ct, non_gf_tenancy) + Network.modify_terraform_drg_routerules(inputfile, outdir, service_dir_network, prefix, ct, non_gf_tenancy) + Network.create_terraform_drg_route(inputfile, outdir, service_dir_network, prefix, ct, non_gf_tenancy, + network_connectivity_in_setupoci='', modify_network=False) + Network.create_terraform_subnet_vlan(inputfile, outdir, service_dir, prefix, ct, non_gf_tenancy, network_vlan_in_setupoci='network') + Network.create_terraform_subnet_vlan(inputfile, outdir, service_dir, prefix, ct, non_gf_tenancy, network_vlan_in_setupoci='vlan') + Network.create_terraform_nsg(inputfile, outdir, service_dir_nsg, prefix, ct) + + print("\n\nExecute tf_import_commands_network_*_nonGF.sh script created under each region directory to synch TF with OCI Network objects\n") + + +def export_major_objects(inputfile, outdir, service_dir_network, config, signer, ct, export_regions): + compartments = ct.get_compartment_map(var_file,'VCN Major Objects') + Network.export_major_objects(inputfile, outdir, service_dir_network, config, signer, ct, export_compartments=compartments, export_regions=export_regions) + Network.export_drg_routetable(inputfile, outdir, service_dir_network, config, signer, ct, export_compartments=compartments, export_regions=export_regions, _tf_import_cmd=True) + Network.create_major_objects(inputfile, outdir,service_dir_network, prefix, ct, non_gf_tenancy) + Network.create_rpc_resource(inputfile, outdir, service_dir_network, prefix, auth_mechanism, config_file_path, ct, non_gf_tenancy) + Network.create_terraform_drg_route(inputfile, outdir, service_dir_network, prefix, ct, non_gf_tenancy,network_connectivity_in_setupoci='', modify_network=False) + print("\n\nExecute tf_import_commands_network_major-objects_nonGF.sh and tf_import_commands_network_drg_routerules_nonGF.sh scripts created under each region directory to synch TF with OCI Network objects\n") + + +def export_dhcp(inputfile, outdir, service_dir_network,config,signer,ct,export_regions): + compartments = ct.get_compartment_map(var_file,'DHCP') + Network.export_dhcp(inputfile, outdir, service_dir_network,config, signer, ct, export_compartments=compartments, export_regions=export_regions) + Network.create_terraform_dhcp_options(inputfile, outdir, service_dir_network,prefix, ct, non_gf_tenancy, ct) + print("\n\nExecute tf_import_commands_network_dhcp_nonGF.sh script created under each region directory to synch TF with OCI Network objects\n") + + +def export_secrules(inputfile, outdir, service_dir_network,config,signer,ct,export_regions): + compartments = ct.get_compartment_map(var_file,'SecRulesInOCI') + Network.export_seclist(inputfile, outdir, service_dir_network, config, signer, ct, export_compartments=compartments, export_regions=export_regions, _tf_import_cmd=True) + Network.modify_terraform_secrules(inputfile, outdir,service_dir_network, prefix, ct, non_gf_tenancy) + print("\n\nExecute tf_import_commands_network_secrules_nonGF.sh script created under each region directory to synch TF with OCI Network objects\n") + + +def export_routerules(inputfile, outdir, service_dir_network,config,signer,ct,export_regions): + compartments = ct.get_compartment_map(var_file,'RouteRulesInOCI') + Network.export_routetable(inputfile, outdir, service_dir_network, config, signer, ct, export_compartments=compartments, export_regions=export_regions, _tf_import_cmd=True) + Network.modify_terraform_routerules(inputfile, outdir, service_dir_network,prefix, ct, non_gf_tenancy) + print("\n\nExecute tf_import_commands_network_routerules_nonGF.sh script created under each region directory to synch TF with OCI Network objects\n") + + +def export_subnets_vlans(inputfile, outdir, service_dir,config,signer,ct,export_regions): + compartments = ct.get_compartment_map(var_file,'Subnets') + Network.export_subnets_vlans(inputfile, outdir, service_dir,config, signer, ct, export_compartments=compartments, export_regions=export_regions) + Network.create_terraform_subnet_vlan(inputfile, outdir, service_dir, prefix, ct, non_gf_tenancy, network_vlan_in_setupoci='network') + Network.create_terraform_subnet_vlan(inputfile, outdir, service_dir, prefix, ct, non_gf_tenancy, network_vlan_in_setupoci='vlan') + print("\n\nExecute tf_import_commands_network_subnets_nonGF.sh script created under each region directory to synch TF with OCI Network objects") + print("\nExecute tf_import_commands_network_vlans_nonGF.sh script created under each region directory to synch TF with OCI Network objects\n") + + +def export_nsg(inputfile, outdir, service_dir_nsg,config,signer,ct,export_regions): + compartments = ct.get_compartment_map(var_file,'NSGs') + Network.export_nsg(inputfile, outdir,service_dir_nsg, config,signer,ct, export_compartments=compartments, export_regions=export_regions, _tf_import_cmd=True) + Network.create_terraform_nsg(inputfile, outdir, service_dir_nsg,prefix, ct) + print("\n\nExecute tf_import_commands_network_nsg_nonGF.sh script created under each region directory to synch TF with OCI Network objects\n") + + +def export_compute(options=[]): + for opt in options: + if opt == "Export Dedicated VM Hosts": + export_dedicatedvmhosts(inputfile, outdir, config, signer, ct, export_regions) + if opt == "Export Instances (excludes instances launched by OKE)": + export_instances(inputfile, outdir, config, signer, ct, export_regions) + + +def export_dedicatedvmhosts(inputfile, outdir, config, signer, ct, export_regions): + compartments = ct.get_compartment_map(var_file,'Dedicated VM Hosts') + Compute.export_dedicatedvmhosts(inputfile, outdir, service_dir_dedicated_vm_host, config, signer, ct, export_compartments=compartments, export_regions=export_regions) + create_dedicatedvmhosts(inputfile, outdir, service_dir_dedicated_vm_host, prefix, ct) + print("\n\nExecute tf_import_commands_dedicatedvmhosts_nonGF.sh script created under each region directory to synch TF with OCI Dedicated VM Hosts\n") + # Update modified path list + update_path_list(regions_path=export_regions, service_dirs=[service_dir_dedicated_vm_host]) + + +def export_instances(inputfile, outdir,config,signer, ct, export_regions): + compartments = ct.get_compartment_map(var_file,'Instances') + display_name_str = ct.ins_pattern_filter if ct.ins_pattern_filter else None + ad_name_str = ct.ins_ad_filter if ct.ins_ad_filter else None + display_names = list(map(lambda x: x.strip(), display_name_str.split(','))) if display_name_str else None + ad_names = list(map(lambda x: x.strip(), ad_name_str.split(','))) if ad_name_str else None + Compute.export_instances(inputfile, outdir, service_dir_instance,config,signer,ct, export_compartments=compartments, export_regions=export_regions, display_names = display_names, ad_names = ad_names) + create_instances(inputfile, outdir, service_dir_instance,prefix, ct) + print("\n\nExecute tf_import_commands_instances_nonGF.sh script created under each region directory to synch TF with OCI Instances\n") + # Update modified path list + update_path_list(regions_path=export_regions, service_dirs=[service_dir_instance]) + + +def export_storage(options=[]): + for opt in options: + if opt == "Export Block Volumes/Block Backup Policy": + export_block_volumes(inputfile, outdir, config, signer, ct, export_regions) + if opt == "Export File Systems": + export_fss(inputfile, outdir, config, signer, ct, export_regions) + if opt == "Export Object Storage Buckets": + export_buckets(inputfile, outdir, config, signer, ct, export_regions) + + +def export_block_volumes(inputfile, outdir,config,signer,ct, export_regions): + compartments = ct.get_compartment_map(var_file,'Block Volumes') + display_name_str = ct.bv_pattern_filter if ct.bv_pattern_filter else None + ad_name_str = ct.bv_ad_filter if ct.bv_ad_filter else None + display_names = list(map(lambda x: x.strip(), display_name_str.split(','))) if display_name_str else None + ad_names = list(map(lambda x: x.strip(), ad_name_str.split(','))) if ad_name_str else None + Storage.export_blockvolumes(inputfile, outdir, service_dir_block_volume, config,signer,ct, export_compartments=compartments, export_regions=export_regions, display_names = display_names, ad_names = ad_names) + Storage.create_terraform_block_volumes(inputfile, outdir, service_dir_block_volume, prefix, ct) + print("\n\nExecute tf_import_commands_blockvolumes_nonGF.sh script created under each region directory to synch TF with OCI Block Volume Objects\n") + # Update modified path list + update_path_list(regions_path=export_regions, service_dirs=[service_dir_block_volume]) + + +def export_fss(inputfile, outdir,config, signer, ct, export_regions): + compartments = ct.get_compartment_map(var_file,'FSS objects') + Storage.export_fss(inputfile, outdir, service_dir_fss, config,signer,ct, export_compartments=compartments, export_regions=export_regions) + Storage.create_terraform_fss(inputfile, outdir, service_dir_fss, prefix, ct) + print("\n\nExecute tf_import_commands_fss_nonGF.sh script created under each region directory to synch TF with OCI FSS objects\n") + # Update modified path list + update_path_list(regions_path=export_regions, service_dirs=[service_dir_fss]) + + +def export_buckets(inputfile, outdir, config, signer, ct, export_regions): + compartments = ct.get_compartment_map(var_file, 'Buckets') + Storage.export_buckets(inputfile, outdir, service_dir_object_storage, config,signer,ct, export_compartments=compartments, export_regions=export_regions) + Storage.create_terraform_oss(inputfile, outdir, service_dir_object_storage, prefix, ct) + print("\n\nExecute tf_import_commands_buckets_nonGF.sh script created under each region directory to synch TF with OCI Object Storage Buckets\n") + # Update modified path list + update_path_list(regions_path=export_regions, service_dirs=[service_dir_object_storage]) + + +def export_loadbalancer(options=[]): + for opt in options: + if opt == "Export Load Balancers": + export_lbr(inputfile, outdir, config, signer, ct, export_regions) + if opt == "Export Network Load Balancers": + export_nlb(inputfile, outdir, config, signer, ct, export_regions) + + +def export_lbr(inputfile, outdir,config, signer, ct, export_regions): + compartments = ct.get_compartment_map(var_file,'LBR objects') + Network.export_lbr(inputfile, outdir, service_dir_loadbalancer, config,signer,ct, export_compartments=compartments, export_regions=export_regions) + create_lb(inputfile, outdir,service_dir_loadbalancer, prefix, ct) + print("\n\nExecute tf_import_commands_lbr_nonGF.sh script created under each region directory to synch TF with OCI LBR objects\n") + # Update modified path list + update_path_list(regions_path=export_regions, service_dirs=[service_dir_loadbalancer]) + + +def export_nlb(inputfile, outdir,config,signer, ct, export_regions): + compartments = ct.get_compartment_map(var_file,'NLB objects') + Network.export_nlb(inputfile, outdir, service_dir_networkloadbalancer, config,signer,ct, export_compartments=compartments, export_regions=export_regions) + create_nlb(inputfile, outdir,service_dir_networkloadbalancer, prefix, ct) + print("\n\nExecute tf_import_commands_nlb_nonGF.sh script created under each region directory to synch TF with OCI NLB objects\n") + # Update modified path list + update_path_list(regions_path=export_regions, service_dirs=[service_dir_networkloadbalancer]) + + +def export_databases(options=[]): + for opt in options: + if opt == "Export Virtual Machine or Bare Metal DB Systems": + export_dbsystems_vm_bm(inputfile, outdir, config, signer, ct, export_regions) + if opt == "Export EXA Infra and EXA VMClusters": + export_exa_infra_vmclusters(inputfile, outdir, config, signer, ct, export_regions) + if opt == 'Export ADBs': + export_adbs(inputfile, outdir, config, signer, ct, export_regions) + + +def export_dbsystems_vm_bm(inputfile, outdir,config,signer, ct,export_regions): + compartments = ct.get_compartment_map(var_file,'VM and BM DB Systems') + Database.export_dbsystems_vm_bm(inputfile, outdir, service_dir_dbsystem_vm_bm, config,signer,ct, export_compartments=compartments, export_regions= export_regions) + Database.create_terraform_dbsystems_vm_bm(inputfile, outdir, service_dir_dbsystem_vm_bm, prefix, ct) + print("\n\nExecute tf_import_commands_dbsystems-vm-bm_nonGF.sh script created under each region directory to synch TF with DBSystems\n") + # Update modified path list + update_path_list(regions_path=export_regions, service_dirs=[service_dir_dbsystem_vm_bm]) + + +def export_exa_infra_vmclusters(inputfile, outdir,config, signer, ct, export_regions): + compartments = ct.get_compartment_map(var_file,'EXA Infra and EXA VMClusters') + Database.export_exa_infra(inputfile, outdir, service_dir_database_exacs, config,signer,ct, export_compartments=compartments, export_regions= export_regions) + Database.export_exa_vmclusters(inputfile, outdir, service_dir_database_exacs, config,signer,ct, export_compartments=compartments, export_regions= export_regions) + create_exa_infra_vmclusters(inputfile, outdir, service_dir_database_exacs, prefix,ct) + print("\n\nExecute tf_import_commands_exa-infra_nonGF.sh and tf_import_commands_exa-vmclusters_nonGF.sh scripts created under each region directory to synch TF with Exa-Infra and Exa-VMClusters\n") + # Update modified path list + update_path_list(regions_path=export_regions, service_dirs=[service_dir_database_exacs]) + + +def export_adbs(inputfile, outdir,config, signer, ct, export_regions): + compartments = ct.get_compartment_map(var_file,'ADBs') + Database.export_adbs(inputfile, outdir, service_dir_adb, config,signer,ct, export_compartments=compartments, export_regions= export_regions) + Database.create_terraform_adb(inputfile, outdir, service_dir_adb, prefix, ct) + print("\n\nExecute tf_import_commands_adb_nonGF.sh script created under each region directory to synch TF with OCI ADBs\n") + # Update modified path list + update_path_list(regions_path=export_regions, service_dirs=[service_dir_adb]) + + +def export_management_services(options=[]): + service_dirs = [] + for opt in options: + if opt == "Export Notifications": + export_notifications(inputfile, outdir, service_dir_managementservices, config, signer, ct, export_regions) + service_dirs = [service_dir_managementservices] + if opt == "Export Events": + export_events(inputfile, outdir, service_dir_managementservices, config, signer, ct, export_regions) + service_dirs = [service_dir_managementservices] + if opt == "Export Alarms": + export_alarms(inputfile, outdir, service_dir_managementservices, config, signer, ct, export_regions) + service_dirs = [service_dir_managementservices] + if opt == "Export Service Connectors": + export_service_connectors(inputfile, outdir, service_dir_managementservices, config, signer, ct, export_regions) + service_dirs = [service_dir_managementservices] + # Update modified path list + update_path_list(regions_path=export_regions, service_dirs=[service_dir_managementservices]) + + +def export_notifications(inputfile, outdir, service_dir, config, signer, ct, export_regions): + compartments = ct.get_compartment_map(var_file,'Notifications') + ManagementServices.export_notifications(inputfile, outdir, service_dir, config,signer,ct, export_compartments=compartments, export_regions=export_regions) + ManagementServices.create_terraform_notifications(inputfile, outdir, service_dir, prefix, ct) + print("\n\nExecute tf_import_commands_notifications_nonGF.sh script created under each region directory to synch TF with OCI Notifications\n") + + +def export_events(inputfile, outdir, service_dir, config, signer, ct, export_regions): + compartments = ct.get_compartment_map(var_file,'Events') + ManagementServices.export_events(inputfile, outdir, service_dir, config,signer,ct, export_compartments=compartments, export_regions=export_regions) + ManagementServices.create_terraform_events(inputfile, outdir, service_dir, prefix, ct) + print("\n\nExecute tf_import_commands_events_nonGF.sh script created under each region directory to synch TF with OCI Events\n") + + +def export_alarms(inputfile, outdir, service_dir, config, signer, ct, export_regions): + compartments = ct.get_compartment_map(var_file,'Alarms') + ManagementServices.export_alarms(inputfile, outdir, service_dir, config,signer,ct, export_compartments=compartments, export_regions=export_regions) + ManagementServices.create_terraform_alarms(inputfile, outdir,service_dir, prefix, ct) + print("\n\nExecute tf_import_commands_alarms_nonGF.sh script created under each region directory to synch TF with OCI Alarms\n") + + +def export_service_connectors(inputfile, outdir, service_dir, config, signer, ct, export_regions): + compartments = ct.get_compartment_map(var_file,'Service Connectors') + ManagementServices.export_service_connectors(inputfile, outdir, service_dir, config,signer,ct, export_compartments=compartments, export_regions=export_regions) + ManagementServices.create_service_connectors(inputfile, outdir, service_dir, prefix, ct) + print("\n\nExecute tf_import_commands_serviceconnectors_nonGF.sh script created under each region directory to synch TF with OCI Service Connectors\n") + + +def export_developer_services(options=[]): + for opt in options: + if opt == "Export OKE cluster and Nodepools": + export_oke(inputfile, outdir, config, signer, ct, export_regions) + + +def export_oke(inputfile, outdir, config,signer, ct, export_regions): + compartments = ct.get_compartment_map(var_file,'OKE') + DeveloperServices.export_oke(inputfile, outdir, service_dir_oke,config,signer,ct, export_compartments=compartments, export_regions=export_regions) + DeveloperServices.create_terraform_oke(inputfile, outdir, service_dir_oke,prefix, ct) + print("\n\nExecute tf_import_commands_oke_nonGF.sh script created under each region directory to synch TF with OKE\n") + # Update modified path list + update_path_list(regions_path=export_regions, service_dirs=[service_dir_oke]) + + +def export_sddc(): + compartments = ct.get_compartment_map(var_file,'SDDCs') + SDDC.export_sddc(inputfile, outdir, service_dir_sddc,config,signer,ct, export_compartments=compartments, export_regions=export_regions) + SDDC.create_terraform_sddc(inputfile, outdir, service_dir_sddc, prefix, ct) + print("\n\nExecute tf_import_commands_sddcs_nonGF.sh script created under each region directory to synch TF with SDDC\n") + # Update modified path list + update_path_list(regions_path=export_regions, service_dirs=[service_dir_sddc]) + + +def export_dns(options=[]): + service_dirs = [] + for opt in options: + if opt == "Export DNS Views/Zones/Records": + export_dns_views_zones_rrsets(inputfile, outdir, service_dir_dns, config, signer, ct, export_regions) + service_dirs = [service_dir_dns] + if opt == "Export DNS Resolvers": + export_dns_resolvers(inputfile, outdir, service_dir_dns, config, signer, ct, export_regions) + service_dirs = [service_dir_dns] + # Update modified path list + update_path_list(regions_path=export_regions, service_dirs=service_dirs) + + +def export_dns_views_zones_rrsets(inputfile, outdir, service_dir, config, signer, ct, export_regions): + compartments = ct.get_compartment_map(var_file, 'DNS Views ,attached zones and rrsets') + dns_filter = None + if ct.default_dns: + if ct.default_dns.lower() == "false": + dns_filter = "n" + if ct.default_dns.lower() == "true": + dns_filter = "y" + + dns_filter = dns_filter if dns_filter else None + Network.export_dns_views_zones_rrsets(inputfile, outdir, service_dir, config, signer, ct, dns_filter=dns_filter, export_compartments=compartments, export_regions=export_regions) + create_terraform_dns(inputfile, outdir, service_dir, prefix, ct) + +def export_dns_resolvers(inputfile, outdir, service_dir, config, signer, ct, export_regions): + compartments = ct.get_compartment_map(var_file, 'DNS Resolvers') + Network.export_dns_resolvers(inputfile, outdir, service_dir, config, signer, ct, export_compartments=compartments, export_regions=export_regions) + Network.create_terraform_dns_resolvers(inputfile, outdir, service_dir, prefix, ct) + + +def cd3_services(options=[]): + for opt in options: + if opt == 'Fetch Compartments OCIDs to variables file': + fetch_compartments(outdir, outdir_struct, ct) + if opt == 'Fetch Protocols to OCI_Protocols': + fetch_protocols(outdir, outdir_struct, ct) + + +def fetch_protocols(outdir, outdir_struct, ct): + cd3service.fetch_protocols() + +################## Create Functions ########################## +def create_identity(options=[]): + service_dirs = [] + for opt in options: + if opt == 'Add/Modify/Delete Compartments': + Identity.create_terraform_compartments(inputfile, outdir,service_dir_identity, prefix, ct) + service_dirs = [service_dir_identity] + if opt == 'Add/Modify/Delete Groups': + Identity.create_terraform_groups(inputfile, outdir,service_dir_identity, prefix, ct) + service_dirs = [service_dir_identity] + if opt == 'Add/Modify/Delete Policies': + Identity.create_terraform_policies(inputfile, outdir,service_dir_identity, prefix, ct) + service_dirs = [service_dir_identity] + if opt == 'Add/Modify/Delete Users': + Identity.Users.create_terraform_users(inputfile, outdir,service_dir_identity, prefix, ct) + service_dirs = [service_dir_identity] + if opt == 'Add/Modify/Delete Network Sources': + Identity.NetworkSources.create_terraform_networkSources(inputfile, outdir,service_dir_identity, prefix, ct) + service_dirs = [service_dir_identity] + # Update modified path list + update_path_list(regions_path=[ct.home_region], service_dirs=[service_dir_identity]) + + +def create_tags(): + Governance.create_terraform_tags(inputfile, outdir, service_dir_tagging, prefix, ct) + # Update modified path list + update_path_list(regions_path=[ct.home_region], service_dirs=[service_dir_tagging]) + + +def create_network(options=[], sub_options=[]): + service_dirs = [] + for opt in options: + if opt == 'Create Network': + Network.create_all_tf_objects(inputfile, outdir, outdir_struct, prefix, ct, non_gf_tenancy=non_gf_tenancy) + service_dirs = [service_dir_network, service_dir_nsg, service_dir_vlan] + if opt == 'Modify Network': + modify_terraform_network(inputfile, outdir, outdir_struct, prefix, ct, non_gf_tenancy=non_gf_tenancy) + service_dirs.append(service_dir_network) if service_dir_network not in service_dirs else service_dirs + if opt == 'Security Rules': + export_modify_security_rules(sub_options, inputfile, outdir, service_dir_network, prefix, ct, non_gf_tenancy=non_gf_tenancy) + service_dirs.append(service_dir_network) if service_dir_network not in service_dirs else service_dirs + if opt == 'Route Rules': + export_modify_route_rules(sub_options, inputfile, outdir, service_dir_network, prefix, ct, non_gf_tenancy=non_gf_tenancy) + service_dirs.append(service_dir_network) if service_dir_network not in service_dirs else service_dirs + if opt == 'DRG Route Rules': + export_modify_drg_route_rules(sub_options, inputfile, outdir, service_dir_network, prefix, ct, non_gf_tenancy=non_gf_tenancy) + service_dirs.append(service_dir_network) if service_dir_network not in service_dirs else service_dirs + if opt == 'Network Security Groups': + export_modify_nsgs(sub_options, inputfile, outdir, service_dir_nsg, prefix, ct, non_gf_tenancy=non_gf_tenancy) + service_dirs.append(service_dir_nsg) if service_dir_nsg not in service_dirs else service_dirs + if opt == 'Add/Modify/Delete VLANs': + create_vlans(inputfile, outdir, outdir_struct, prefix, ct, non_gf_tenancy=non_gf_tenancy) + service_dirs.append(service_dir_vlan) if service_dir_vlan not in service_dirs else service_dirs + service_dirs.append(service_dir_network) if service_dir_network not in service_dirs else service_dirs + if opt == 'Customer Connectivity': + create_drg_connectivity(inputfile, outdir, service_dir_network, prefix, ct, non_gf_tenancy=non_gf_tenancy) + service_dirs.append(service_dir_network) if service_dir_network not in service_dirs else service_dirs + # Update modified path list + regions_path = subscribed_regions.copy() + regions_path.append("global") + service_dirs.append("rpc") + update_path_list(regions_path=regions_path, service_dirs=service_dirs) + + +def modify_terraform_network(inputfile, outdir, service_dir, prefix, ct, non_gf_tenancy): + Network.create_all_tf_objects(inputfile, outdir, service_dir, prefix, ct, non_gf_tenancy=non_gf_tenancy, modify_network=True, ) + +def export_modify_security_rules(sub_options,inputfile, outdir, service_dir, prefix, ct, non_gf_tenancy): + for opt in sub_options: + if opt == 'Export Security Rules (From OCI into SecRulesinOCI sheet)': + export_security_rules(inputfile, outdir, service_dir, config, signer, ct, non_gf_tenancy=non_gf_tenancy) + if opt == 'Add/Modify/Delete Security Rules (Reads SecRulesinOCI sheet)': + Network.modify_terraform_secrules(inputfile, outdir, service_dir, prefix, ct, non_gf_tenancy) + +def export_security_rules(inputfile, outdir, service_dir, config, signer, ct, non_gf_tenancy): + compartments = ct.get_compartment_map(var_file, 'OCI Security Rules') + Network.export_seclist(inputfile, outdir, service_dir, config, signer, ct, export_compartments=compartments, export_regions= export_regions, _tf_import_cmd=False) + +def export_modify_route_rules(sub_options,inputfile, outdir, service_dir, prefix, ct, non_gf_tenancy): + execute_all = False + for opt in sub_options: + if opt == 'Export Route Rules (From OCI into RouteRulesinOCI sheet)': + export_route_rules(inputfile, outdir, service_dir, config, signer, ct, non_gf_tenancy=non_gf_tenancy) + if opt == 'Add/Modify/Delete Route Rules (Reads RouteRulesinOCI sheet)': + Network.modify_terraform_routerules(inputfile, outdir, service_dir, prefix, ct, non_gf_tenancy) + + +def export_route_rules(inputfile, outdir, service_dir, config, signer, ct, non_gf_tenancy): + compartments = ct.get_compartment_map(var_file, 'OCI Route Rules') + Network.export_routetable(inputfile, outdir, service_dir, config, signer, ct, export_compartments=compartments, export_regions= export_regions, _tf_import_cmd=False) + +def export_modify_drg_route_rules(sub_options, inputfile, outdir, service_dir, prefix, ct, non_gf_tenancy): + execute_all = False + for opt in sub_options: + if opt == 'Export DRG Route Rules (From OCI into DRGRouteRulesinOCI sheet)': + export_drg_route_rules(inputfile, outdir, service_dir, config, signer, ct, non_gf_tenancy=non_gf_tenancy) + if opt == 'Add/Modify/Delete DRG Route Rules (Reads DRGRouteRulesinOCI sheet)': + Network.modify_terraform_drg_routerules(inputfile, outdir, service_dir, prefix, ct, non_gf_tenancy) + + +def export_drg_route_rules(inputfile, outdir, service_dir, config, signer, ct, non_gf_tenancy): + compartments = ct.get_compartment_map(var_file,'OCI DRG Route Rules') + Network.export_drg_routetable(inputfile, outdir, service_dir, config, signer, ct, export_compartments=compartments, export_regions= export_regions, _tf_import_cmd=False) + + +def export_modify_nsgs(sub_options, inputfile, outdir, service_dir, prefix, ct, non_gf_tenancy): + execute_all = False + for opt in sub_options: + if opt == 'Export NSGs (From OCI into NSGs sheet)': + export_nsgs(inputfile, outdir, service_dir, prefix, ct) + if opt == 'Add/Modify/Delete NSGs (Reads NSGs sheet)': + Network.create_terraform_nsg(inputfile, outdir, service_dir, prefix, ct) + +def export_nsgs(inputfile, outdir, service_dir, prefix, ct): + compartments = ct.get_compartment_map(var_file,'OCI NSGs') + Network.export_nsg(inputfile, outdir, service_dir, config, signer, ct, export_compartments=compartments, export_regions= export_regions, _tf_import_cmd=False) + +def create_vlans(inputfile, outdir, service_dir, prefix,ct, non_gf_tenancy, network_vlan_in_setupoci='vlan'): + Network.create_terraform_subnet_vlan(inputfile, outdir, service_dir, prefix, ct, non_gf_tenancy=non_gf_tenancy, network_vlan_in_setupoci='vlan',modify_network=True) + Network.create_terraform_route(inputfile, outdir, service_dir_network, prefix, ct, non_gf_tenancy=non_gf_tenancy, network_vlan_in_setupoci='vlan',modify_network=True) + +def create_drg_connectivity(inputfile, outdir, service_dir, prefix, ct, non_gf_tenancy,network_vlan_in_setupoci='vlan'): + execute_all = False + create_rpc( inputfile, outdir, service_dir, service_dir, prefix, auth_mechanism, config_file_path, ct, non_gf_tenancy=non_gf_tenancy) + +def create_rpc(inputfile, outdir, service_dir, service_dir_network, prefix, auth_mechanism, config_file_path, ct, non_gf_tenancy): + Network.create_rpc_resource(inputfile, outdir, service_dir, prefix, auth_mechanism, config_file_path, ct, non_gf_tenancy=non_gf_tenancy) + Network.create_terraform_drg_route(inputfile, outdir, service_dir_network, prefix, non_gf_tenancy=non_gf_tenancy, ct=ct, network_connectivity_in_setupoci='connectivity', modify_network=True) + +def create_compute(options=[]): + service_dirs = [] + for opt in options: + if opt == 'Add/Modify/Delete Dedicated VM Hosts': + create_dedicatedvmhosts(inputfile, outdir, service_dir_dedicated_vm_host,prefix, ct) + service_dirs.append(service_dir_dedicated_vm_host) if service_dir_dedicated_vm_host not in service_dirs else service_dirs + + if opt == 'Add/Modify/Delete Instances/Boot Backup Policy': + create_instances(inputfile, outdir, service_dir_instance,prefix, ct) + service_dirs.append(service_dir_instance) if service_dir_instance not in service_dirs else service_dirs + # Update modified path list + update_path_list(regions_path=subscribed_regions, service_dirs=service_dirs) + + +def create_instances(inputfile, outdir, service_dir,prefix,ct): + Compute.create_terraform_instances(inputfile, outdir, service_dir, prefix, ct) + + +def create_dedicatedvmhosts(inputfile, outdir, service_dir, prefix,ct): + Compute.create_terraform_dedicatedhosts(inputfile, outdir, service_dir,prefix, ct) + + +def create_storage(options=[]): + service_dirs = [] + for opt in options: + if opt == 'Add/Modify/Delete Block Volumes/Block Backup Policy': + Storage.create_terraform_block_volumes(inputfile, outdir, service_dir_block_volume, prefix, ct) + service_dirs.append(service_dir_block_volume) if service_dir_block_volume not in service_dirs else service_dirs + if opt == 'Add/Modify/Delete File Systems': + Storage.create_terraform_fss(inputfile, outdir, service_dir_fss, prefix, ct) + service_dirs.append(service_dir_fss) if service_dir_fss not in service_dirs else service_dirs + if opt == 'Add/Modify/Delete Object Storage Buckets': + Storage.create_terraform_oss( inputfile, outdir, service_dir_object_storage, prefix, ct) + service_dirs.append(service_dir_object_storage) if service_dir_object_storage not in service_dirs else service_dirs + #Option('Enable Object Storage Buckets Write Logs', create_cis_oss_logs, '') + # Update modified path list + update_path_list(regions_path=subscribed_regions, service_dirs=service_dirs) + + +def create_loadbalancer(options=[]): + service_dirs = [] + for opt in options: + if opt == 'Add/Modify/Delete Load Balancers': + create_lb(inputfile, outdir,service_dir_loadbalancer, prefix, ct) + service_dirs.append(service_dir_loadbalancer) if service_dir_loadbalancer not in service_dirs else service_dirs + if opt == 'Add/Modify/Delete Network Load Balancers': + create_nlb(inputfile, outdir,service_dir_networkloadbalancer, prefix, ct) + service_dirs.append(service_dir_networkloadbalancer) if service_dir_networkloadbalancer not in service_dirs else service_dirs + #Option('Enable LBaaS Logs', enable_lb_logs, 'LBaaS Logs') + # Update modified path list + update_path_list(regions_path=subscribed_regions, service_dirs=service_dirs) + + +def create_lb(inputfile, outdir,service_dir, prefix, ct): + Network.create_terraform_lbr_hostname_certs(inputfile, outdir, service_dir, prefix, ct) + Network.create_backendset_backendservers(inputfile, outdir, service_dir, prefix, ct) + Network.create_listener(inputfile, outdir, service_dir, prefix, ct) + Network.create_path_route_set(inputfile, outdir, service_dir, prefix, ct) + Network.create_ruleset(inputfile, outdir, service_dir, prefix, ct) + + +def create_nlb(inputfile, outdir,service_dir, prefix, ct): + Network.create_terraform_nlb_listener(inputfile, outdir, service_dir, prefix, ct) + Network.create_nlb_backendset_backendservers(inputfile, outdir, service_dir, prefix, ct) + + +def create_databases(options=[]): + service_dirs = [] + for opt in options: + if opt == 'Add/Modify/Delete Virtual Machine or Bare Metal DB Systems': + Database.create_terraform_dbsystems_vm_bm(inputfile, outdir, service_dir_dbsystem_vm_bm, prefix, ct) + service_dirs.append(service_dir_dbsystem_vm_bm) if service_dir_dbsystem_vm_bm not in service_dirs else service_dirs + if opt == 'Add/Modify/Delete EXA Infra and EXA VM Clusters': + create_exa_infra_vmclusters(inputfile, outdir,service_dir_database_exacs, prefix,ct) + service_dirs.append(service_dir_database_exacs) if service_dir_database_exacs not in service_dirs else service_dirs + if opt == 'Add/Modify/Delete ADBs': + Database.create_terraform_adb(inputfile, outdir, service_dir_adb, prefix, ct) + service_dirs.append(service_dir_adb) if service_dir_adb not in service_dirs else service_dirs + # Update modified path list + update_path_list(regions_path=subscribed_regions, service_dirs=service_dirs) + +def create_exa_infra_vmclusters(inputfile, outdir,service_dir, prefix,ct): + Database.create_terraform_exa_infra(inputfile, outdir, service_dir, prefix, ct) + Database.create_terraform_exa_vmclusters(inputfile, outdir, service_dir, prefix, ct) + +def create_management_services(options=[]): + service_dirs = [] + for opt in options: + if opt == "Add/Modify/Delete Notifications": + ManagementServices.create_terraform_notifications(inputfile, outdir, service_dir_managementservices, prefix, ct) + service_dirs = [service_dir_managementservices] + if opt == "Add/Modify/Delete Events": + ManagementServices.create_terraform_events(inputfile, outdir, service_dir_managementservices, prefix, ct) + service_dirs = [service_dir_managementservices] + if opt == "Add/Modify/Delete Alarms": + ManagementServices.create_terraform_alarms(inputfile, outdir, service_dir_managementservices, prefix, ct) + service_dirs = [service_dir_managementservices] + if opt == "Add/Modify/Delete ServiceConnectors": + ManagementServices.create_service_connectors(inputfile, outdir, service_dir_managementservices, prefix, ct) + service_dirs = [service_dir_managementservices] + + # Update modified path list + update_path_list(regions_path=subscribed_regions, service_dirs=[service_dir_managementservices]) + + +def create_developer_services(options=[]): + for opt in options: + if opt == "Upload current terraform files/state to Resource Manager": + create_rm_stack(inputfile, outdir, prefix, auth_mechanism, config_file_path,ct) + if opt == "Add/Modify/Delete OKE Cluster and Nodepools": + create_oke(inputfile, outdir, prefix, auth_mechanism, config_file_path,ct) + + +def create_rm_stack(inputfile, outdir, prefix, auth_mechanism, config_file, ct): + regions = get_region_list(rm = True) + DeveloperServices.create_resource_manager(outdir,var_file, outdir_struct, prefix, auth_mechanism, config_file, ct, regions) + +def create_oke(inputfile, outdir, prefix, auth_mechanism, config_file, ct): + DeveloperServices.create_terraform_oke(inputfile, outdir, service_dir_oke, prefix, ct) + # Update modified path list + update_path_list(regions_path=subscribed_regions, service_dirs=[service_dir_oke]) + + +def create_sddc(): + SDDC.create_terraform_sddc(inputfile, outdir, service_dir_sddc, prefix, ct) + # Update modified path list + update_path_list(regions_path=subscribed_regions, service_dirs=[service_dir_sddc]) + + +def create_dns(options=[]): + service_dirs = [] + for opt in options: + if opt == 'Add/Modify/Delete DNS Views/Zones/Records': + create_terraform_dns(inputfile, outdir, service_dir_dns, prefix, ct) + service_dirs = [service_dir_dns] + if opt == 'Add/Modify/Delete DNS Resolvers': + Network.create_terraform_dns_resolvers(inputfile, outdir, service_dir_dns, prefix, ct) + service_dirs = [service_dir_dns] + # Update modified path list + update_path_list(regions_path=subscribed_regions, service_dirs=service_dirs) + + +def create_terraform_dns(inputfile, outdir, service_dir, prefix, ct): + Network.create_terraform_dns_views(inputfile, outdir, service_dir, prefix, ct) + Network.create_terraform_dns_zones(inputfile, outdir, service_dir, prefix, ct) + Network.create_terraform_dns_rrsets(inputfile, outdir, service_dir, prefix, ct) + +def create_logging(options=[]): + service_dirs = [] + for opt in options: + if opt == 'Enable VCN Flow Logs': + ManagementServices.enable_cis_vcnflow_logging(inputfile, outdir, service_dir_network, prefix, ct) + service_dirs.append(service_dir_network) if service_dir_network not in service_dirs else service_dirs + if opt == 'Enable LBaaS Logs': + ManagementServices.enable_load_balancer_logging(inputfile, outdir, service_dir_loadbalancer, prefix, ct) + service_dirs.append(service_dir_loadbalancer) if service_dir_loadbalancer not in service_dirs else service_dirs + if opt == 'Enable Object Storage Buckets Write Logs': + ManagementServices.enable_cis_oss_logging(inputfile, outdir, service_dir_object_storage, prefix, ct) + service_dirs.append(service_dir_object_storage) if service_dir_object_storage not in service_dirs else service_dirs + + # Update modified path list + update_path_list(regions_path=subscribed_regions, service_dirs=service_dirs) + + +def create_cis_features(options=[], sub_options=[]): + service_dirs = [] + for opt in options: + if opt == 'CIS Compliance Checking Script': + initiate_cis_scan(sub_options,outdir, prefix, config_file_path) + if opt == "Create Key/Vault": + Security.create_cis_keyvault(outdir, service_dir_kms, service_dir_identity, prefix, ct, ct.vault_region, + ct.vault_comp) + service_dir = ct.vault_region+"/"+service_dir_identity + service_dirs.append(service_dir) if service_dir not in service_dirs else service_dirs + if opt == "Create Default Budget": + Governance.create_cis_budget(outdir, service_dir_budget, prefix, ct, ct.budget_amount, ct.budget_threshold) + service_dir = ct.home_region + "/" + service_dir_budget + service_dirs.append(service_dir) if service_dir not in service_dirs else service_dirs + if opt == "Enable Cloud Guard": + Security.enable_cis_cloudguard(outdir, service_dir_cloud_guard, prefix, ct, ct.cg_region) + service_dir = ct.cg_region + "/" + service_dir_cloud_guard + service_dirs.append(service_dir) if service_dir not in service_dirs else service_dirs + + # Update modified path list + update_path_list(regions_path=[""], service_dirs=service_dirs) + + +def initiate_cis_scan(sub_options,outdir, prefix, config_file): + for opt in sub_options: + if opt == "CD3 Image already contains the latest CIS compliance checking script available at the time of cd3 image release. Download latest only if new version of the script is available": + start_cis_download(outdir, prefix, config_file) + if opt == "Execute compliance checking script": + start_cis_scan(outdir, prefix, config_file) + +def start_cis_download(outdir, prefix, config_file): + print("Downloading the script file as 'cis_reports.py' at location "+os.getcwd()) + resp = requests.get("https://raw.githubusercontent.com/oracle-quickstart/oci-cis-landingzone-quickstart/main/scripts/cis_reports.py") + resp_contents = resp.text + with open("cis_reports.py", "w", encoding="utf-8") as fd: + fd.write(resp_contents) + print("Download complete!!") + +def start_cis_scan(outdir, prefix, config_file): + cmd = "python cis_reports.py" + #user_input = input("Enter command to execute the script. Press Enter to execute {} : ".format(cmd)) + #if user_input!='': + # cmd = "{}".format(user_input) + split = str.split(cmd) + + dirname = prefix + "_cis_report" + resource = "cis_report" + out_rep = outdir + '/'+ dirname + #config = "--config "+ config + commonTools.backup_file(outdir, resource, dirname) + + if not os.path.exists(out_rep): + os.makedirs(out_rep) + else: + commonTools.backup_file(outdir, resource, out_rep) + + out = ["-c", config_file, '--report-directory', out_rep] + cmd = cmd +" "+ out[0] + " "+out[1] + " "+ out[2] + " " +out[3] + split.extend(out) + print("Executing: "+cmd) + print("Scan started!") + execute(split, config_file) + +def execute(command,config_file): + export_cmd_windows = "set OCI_CONFIG_HOME="+config_file + export_cmd_linux = "export OCI_CONFIG_HOME=" + config_file + export_cmd = "" + if "linux" in sys.platform: + export_cmd = export_cmd_linux + elif "win" in sys.platform: + export_cmd = export_cmd_windows + + if export_cmd == "": + print("Failed to get OS details. Exiting!!") + exit(1) + + split_export_cmd = str.split(export_cmd) + #subprocess.Popen(split_export_cmd, stdout=subprocess.PIPE,bufsize=1) + popen = subprocess.Popen(command, stdout=subprocess.PIPE,bufsize=1) + lines_iterator = iter(popen.stdout.readline, b"") + while popen.poll() is None: + for line in lines_iterator: + nline = line.rstrip() + print(nline.decode("latin"), end="\r\n", flush=True)# yield line + + +#Execution starts here +parser = argparse.ArgumentParser(description='Sets Up OCI via TF') +parser.add_argument('propsfile', help="Full Path of properties file containing input variables. eg setUpOCI.properties") +parser.add_argument('--main_options', default="") +parser.add_argument('--sub_options', default="") +parser.add_argument('--sub_child_options', default="") +parser.add_argument('--add_filter', default=None) +args = parser.parse_args() +setUpOCI_props = configparser.RawConfigParser() +setUpOCI_props.read(args.propsfile) +main_options = args.main_options.split(",") +sub_options = args.sub_options.split(",") +sub_child_options = args.sub_child_options.split(",") + +#Read Config file Variables +try: + workflow_type = setUpOCI_props.get('Default', 'workflow_type').strip().lower() + + if (workflow_type == 'export_resources'): + non_gf_tenancy = True + else: + non_gf_tenancy = False + + inputfile = setUpOCI_props.get('Default','cd3file').strip() + outdir = setUpOCI_props.get('Default', 'outdir').strip() + prefix = setUpOCI_props.get('Default', 'prefix').strip() + auth_mechanism = setUpOCI_props.get('Default', 'auth_mechanism').strip().lower() + config_file_path = setUpOCI_props.get('Default', 'config_file').strip() or DEFAULT_LOCATION + + if not outdir: + exit_menu('input outdir location cannot be left blank. Exiting... ') + elif not prefix: + exit_menu('input prefix value cannot be left blank. Exiting... ') + elif not inputfile: + exit_menu('input cd3file location cannot be left blank. Exiting... ') + elif '.xls' not in inputfile: + exit_menu('valid formats for input cd3file are either .xls or .xlsx') +except Exception as e: + exit_menu(str(e) + ". Check input properties file and try again. Exiting... ") + +try: + outdir_structure = setUpOCI_props.get('Default', 'outdir_structure_file').strip() +except Exception as e: + outdir_structure = '' + +# Pre-work +if not os.path.exists(outdir): + os.makedirs(outdir) + +if (outdir_structure == '' or outdir_structure == "\n"): + outdir_struct = {} +else: + if os.path.isfile(outdir_structure): + outdir_config = configparser.RawConfigParser() + outdir_config.read(outdir_structure) + outdir_struct = dict(outdir_config.items("Default")) + else: + print("Invalid outdir_structure_file. Please provide correct file path. Exiting... ") + exit(1) + +## Authenticate Params +ct=None +ct = commonTools() +config,signer = ct.authenticate(auth_mechanism, config_file_path) + +# Set Export filters +export_filters = args.add_filter.split("@") if args.add_filter else [] +ct.get_export_filters(export_filters) + +## Fetch OCI_regions +cd3service = cd3Services() +cd3service.fetch_regions(config,signer) + +## Check if fetch compartments script needs to be run +run_fetch_script = 0 + +## Fetch Subscribed Regions +subscribed_regions = ct.get_subscribedregions(config,signer) +home_region = ct.home_region + +# Set service directories as per outdir_structure file +# Add service name from outdir_structure_file to dir_services here +dir_services = ["identity","tagging","network","loadbalancer","networkloadbalancer","vlan","nsg","instance", + "block-volume","dedicated-vm-host","adb","dbsystem-vm-bm","database-exacs","fss","oke","sddc", + "cloud-guard","managementservices","budget","kms","object-storage","dns"] +if len(outdir_struct.items())==0: + for item in dir_services: + varname = "service_dir_" + str(item.replace("-", "_")).strip() + exec(varname + "= \"\"") +else: + for key,value in outdir_struct.items(): + varname = "service_dir_"+str(key.replace("-","_")).strip() + exec(varname + "= value") + +var_file = (f'{outdir}/{home_region}/{service_dir_identity}/variables_{home_region}.tf').replace('//','/') + +try: + # read variables file + with open(var_file, 'r') as f: + var_data = f.read() + f.close() +except FileNotFoundError as e: + exit_menu(f'\nVariables file not found in home region - {home_region}.......Exiting!!!\n') + +## Check for the fetch compartment status +fetchcompinfo_data = "run_fetch_script=0" +try: + # read fetchcompinfo.safe + fetch_comp_file = f'{outdir}/fetchcompinfo.safe' + with open(fetch_comp_file, 'r') as f: + fetchcompinfo_data = f.read() + f.close() +except FileNotFoundError as e: + fetchcompinfo_data = "run_fetch_script=1" +if "# compartment ocids" in var_data or "run_fetch_script=1" in fetchcompinfo_data: + run_fetch_script = 1 + +if (run_fetch_script == 1): + print("Script to Fetch Compartments OCIDs to variables file has not been executed. Running it now.") + fetch_compartments(outdir,outdir_struct, ct) +else: + print("Make sure to execute the script for 'Fetch Compartments OCIDs to variables file' under 'CD3 Services' menu option at-least once before you continue!") +global updated_paths +global import_scripts +updated_paths = [] +import_scripts = [] +exec_start_time = datetime.datetime.now() + + +## Menu Options +if non_gf_tenancy: + print("\nworkflow_type set to export_resources. Export existing OCI objects and Synch with TF state") + print("We recommend to not have any existing tfvars/tfstate files for export out directory") + export_regions = get_region_list(rm=False) + for option in main_options: + if option == 'Export Identity': + export_identityOptions(options=sub_options) + if option == 'Export Tags': + export_tags(options=sub_options) + if option == 'Export Network': + export_network(options=sub_options) + if option == 'Export DNS Management': + export_dns(options=sub_options) + if option == 'Export Compute': + export_compute(options=sub_options) + if option == 'Export Storage': + export_storage(options=sub_options) + if option == 'Export Databases': + export_databases(options=sub_options) + if option == 'Export Load Balancers': + export_loadbalancer(options=sub_options) + if option == 'Export Management Services': + export_management_services(options=sub_options) + if option == 'Export Developer Services': + export_developer_services(options=sub_options) + if option == 'Export Software-Defined Data Centers - OCVS': + export_sddc() + if option == 'CD3 Services': + cd3_services(options=sub_options) +else: + export_regions = ct.all_regions + for option in main_options: + if option == 'Validate CD3': + validate_cd3(options=sub_options) + if option == 'Identity': + create_identity(options=sub_options) + if option == 'Tags': + create_tags() + if option == 'Network': + create_network(options=sub_options, sub_options=sub_child_options) + if option == 'DNS Management': + create_dns(options=sub_options) + if option == 'Compute': + create_compute(options=sub_options) + if option == 'Storage': + create_storage(options=sub_options) + if option == 'Database': + create_databases(options=sub_options) + if option == 'Load Balancers': + create_loadbalancer(options=sub_options) + if option == 'Management Services': + create_management_services(options=sub_options) + if option == 'Developer Services': + create_developer_services(options=sub_options) + if option == 'Logging Services': + create_logging(options=sub_options) + if option == 'Software-Defined Data Centers - OCVS': + create_sddc() + if option == 'CIS Compliance Features': + create_cis_features(options=sub_options,sub_options=sub_child_options) + if option == 'CD3 Services': + cd3_services(options=sub_options) + +# write updated paths to a file +updated_paths_file = f'{outdir}/updated_paths.safe' +with open(updated_paths_file, 'w+') as f: + for item in updated_paths: + f.write(str(item).replace('//','/')+"\n") +f.close() +import_scripts_file = f'{outdir}/import_scripts.safe' +with open(import_scripts_file, 'w+') as f: + for item in import_scripts: + f.write(str(item).replace('//','/')+"\n") +f.close() diff --git a/cd3_automation_toolkit/shell_script.sh b/cd3_automation_toolkit/shell_script.sh index 33b80be2c..f1dd087f7 100644 --- a/cd3_automation_toolkit/shell_script.sh +++ b/cd3_automation_toolkit/shell_script.sh @@ -17,7 +17,7 @@ source /cd3user/.bashrc python -m pip install --user --upgrade pip #non needed #python -m pip install --user oci==2.110.1 -python -m pip install --user oci-cli==3.31.0 +python -m pip install --user oci-cli==3.37.0 python -m pip install --user pycryptodomex==3.10.1 python -m pip install --user regex==2022.10.31 python -m pip install --user pandas==1.1.5 @@ -31,7 +31,10 @@ python -m pip install --user cfgparse==1.3 python -m pip install --user ipaddress==1.0.23 python -m pip install --user Jinja2==3.1.2 python -m pip install --user simplejson==3.18.3 +python -m pip install --user GitPython==3.1.40 +python -m pip install --user PyYAML==6.0.1 echo "export PYTHONPATH=${PYTHONPATH}:/root/.local/lib/python3.8/site-packages/:/cd3user/.local/lib/python3.8/site-packages/:/opt/rh/rh-python38/root/usr/lib/python3.8/site-packages/" >> /cd3user/.bashrc +echo "PATH=$PATH:/cd3user/.local/bin" >> /cd3user/.bashrc source /cd3user/.bashrc diff --git a/cd3_automation_toolkit/user-scripts/OPA/Logging_Monitoring/oci_resource_tags.rego b/cd3_automation_toolkit/user-scripts/OPA/Logging_Monitoring/oci_resource_tags.rego index 85af9b0a8..df067e0c2 100755 --- a/cd3_automation_toolkit/user-scripts/OPA/Logging_Monitoring/oci_resource_tags.rego +++ b/cd3_automation_toolkit/user-scripts/OPA/Logging_Monitoring/oci_resource_tags.rego @@ -7,8 +7,8 @@ package terraform import input as tfplan -#UPDATE the required tags here. -required_tags = ["owner", "department"] +#UPDATE the required tags here or pass it while calling the rule +#required_tags = ["owner", "department"] array_contains(arr, elem) { @@ -34,6 +34,7 @@ get_tags(resource) = labels { deny[reason] { resource := tfplan.resource_changes[_] + required_tags := input.required_tags action := resource.change.actions[count(resource.change.actions) - 1] array_contains(["create", "update"], action) tags := get_tags(resource) diff --git a/cd3_automation_toolkit/user-scripts/createTenancyConfig.py b/cd3_automation_toolkit/user-scripts/createTenancyConfig.py index 53b89db20..7d99dac22 100644 --- a/cd3_automation_toolkit/user-scripts/createTenancyConfig.py +++ b/cd3_automation_toolkit/user-scripts/createTenancyConfig.py @@ -1,24 +1,39 @@ #!/usr/bin/python3 # Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. # -# This script will help in initilizing the docker container; creates config and variables files. +# This script will help in initializing the docker container; creates config and variables files. # # Author: Shruthi Subramanian # -import glob import argparse import logging import os +import re import shutil import sys -import time +import datetime import configparser import distutils from distutils import dir_util - +import oci +from oci.object_storage import ObjectStorageClient +import git +import glob +import yaml sys.path.append(os.getcwd()+"/..") from commonTools import * +from copy import deepcopy + +global topic_name +global project_name +global repo_name +global devops_exists +global devops_repo +global commit_id +global bucket_name +global jenkins_home + def paginate(operation, *args, **kwargs): while True: @@ -29,329 +44,947 @@ def paginate(operation, *args, **kwargs): if not response.has_next_page: break -def seek_info(): +def create_devops_resources(config,signer): + resource_search = oci.resource_search.ResourceSearchClient(config, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY, + signer=signer) + toolkit_topic_id = '' + if not devops_exists: + # Check existence of Topic + + ons_query = 'query onstopic resources where displayname = \''+topic_name+'\'' + ons_search_details = oci.resource_search.models.StructuredSearchDetails(type='Structured', + query=ons_query) + ons_resources = oci.pagination.list_call_get_all_results(resource_search.search_resources, ons_search_details, + limit=1000) + for ons in ons_resources.data: + topic_state = ons.lifecycle_state + if topic_state != 'ACTIVE': + print("Topic exists with name(" + topic_name + ") but is not in ACTIVE state. Exiting...") + exit(1) + toolkit_topic_id = ons.identifier + topic_comp = ons.compartment_id + print("Topic exists with name(" + topic_name + ") in compartment '"+topic_comp+"' Reusing same.") + + # Create New Topic + if toolkit_topic_id=='': + # Initialize ONS service client with default config file + ons_client = oci.ons.NotificationControlPlaneClient(config=config, + retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY, + signer=signer) + + create_topic_response = ons_client.create_topic(create_topic_details=oci.ons.models.CreateTopicDetails( + name=topic_name, compartment_id=compartment_ocid, description="Created by Automation ToolKit")).data + toolkit_topic_id = create_topic_response.topic_id + + + # Check existence of DevOps Project + toolkit_project_id = '' + devops_query = 'query devopsproject resources where displayname = \'' + project_name + '\'' + devops_search_details = oci.resource_search.models.StructuredSearchDetails(type='Structured', + query=devops_query) + devops_resources = oci.pagination.list_call_get_all_results(resource_search.search_resources, devops_search_details, + limit=1000) + for project in devops_resources.data: + project_state = project.lifecycle_state + if project_state != 'ACTIVE': + print("Project exists with name(" + project_name + ") but is not in ACTIVE state. Exiting...") + exit(1) + toolkit_project_id = project.identifier + project_comp = project.compartment_id + print("Project exists with name(" + project_name + ") in compartment '"+project_comp+"' Reusing same.") + + # Initialize Devops service client with default config file + devops_client = oci.devops.DevopsClient(config=config, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY, + signer=signer) + # Create Devops Project + if toolkit_project_id=='': + if (toolkit_topic_id==''): + print("Topic ID is empty for Project - "+project_name+". Please check your input properties file") + exit(1) + create_project_response = devops_client.create_project( + create_project_details=oci.devops.models.CreateProjectDetails( + name=project_name, + notification_config=oci.devops.models.NotificationConfig( + topic_id=toolkit_topic_id), + compartment_id=compartment_ocid, + description="Created by Automation ToolKit")).data + + toolkit_project_id = create_project_response.id + # Check if repository exists + list_repository_response = devops_client.list_repositories(project_id=toolkit_project_id,name=repo_name).data + repo_state = "" + if list_repository_response.items: + for item in list_repository_response.items: + repo_state = item.lifecycle_state + repo_id = item.id + if repo_state != "ACTIVE": + print("Repository exists with name("+repo_name+") but is not in ACTIVE state. Please retry with different customer_name. Exiting...") + exit(1) + else: + repo_url = item.ssh_url + print("Repository exists with name("+repo_name+") and is in ACTIVE state. Reusing same.") - parser = argparse.ArgumentParser(description="Creates OCS Work related components") - parser.add_argument("propsfile", help="Full Path of properties file. eg tenancyconfig.properties") - args = parser.parse_args() - config = configparser.RawConfigParser() - config.read(args.propsfile) + else: + # Create Repository + #print("Creating Repository with name(" + repo_name + ").") + create_repository_response = devops_client.create_repository( + create_repository_details=oci.devops.models.CreateRepositoryDetails( + name=repo_name, + project_id=toolkit_project_id, + repository_type="HOSTED", + description="Created by Automation ToolKit")).data + repo_url = create_repository_response.ssh_url + repo_id = create_repository_response.id + print("Waiting for repository ("+repo_name+") to be in ACTIVE state.") + while repo_state != "ACTIVE": + repo_data = devops_client.get_repository(repository_id=repo_id).data + repo_state = repo_data.lifecycle_state + list_paths_response = devops_client.list_paths(repository_id=repo_id) + files_in_repo = len(list_paths_response.data.items) + return repo_url,files_in_repo + + +def update_devops_config(prefix,git_config_file, repo_ssh_url,files_in_repo,dir_values,devops_user,devops_user_key,devops_dir,ct): + # create git config file + file = open(git_config_file, "w") + file.write("Host devops.scmservice.*.oci.oraclecloud.com\n " + "StrictHostKeyChecking no\n " + "User "+str(devops_user)+"\n " + "IdentityFile "+str(devops_user_key)+"\n") - # 1. Creation of Config File - - print("=================================================================") - print("NOTE: Make sure the API Public Key is added to the OCI Console!!!") - print("=================================================================") + file.close() - # Read Config file Variables + # copy to cd3user home dir + if not os.path.exists("/cd3user/.ssh"): + os.makedirs("/cd3user/.ssh") + shutil.copyfile(git_config_file,'/cd3user/.ssh/config') + + # change permissions of private key file and config file for GIT + os.chmod(devops_user_key, 0o600) + os.chmod('/cd3user/.ssh/config', 0o600) + os.chmod(git_config_file, 0o600) + + + ''' + # create symlink for Git Config file for SSH operations. + src = git_config_file + if not os.path.exists("/cd3user/.ssh"): + os.makedirs("/cd3user/.ssh") + dst = "/cd3user/.ssh/config" try: - tenancy = config.get('Default', 'tenancy_ocid').strip() - if tenancy == "" or tenancy == "\n": - print("Invalid Tenancy ID. Please try again......Exiting !!") - exit(1) + os.symlink(src,dst) + except FileExistsError as e: + os.unlink(dst) + os.symlink(src,dst) + ''' + + # create jenkins.properties file + if not os.path.exists(jenkins_home): + os.mkdir(jenkins_home) + jenkins_properties_file_path = jenkins_home+"/jenkins.properties" + + if dir_values: + dir_structure = "Multiple_Outdir" + else: + dir_structure = "Single_Outdir" + + file = open(jenkins_properties_file_path, "w+") + file.write("git_url= \""+repo_ssh_url+"\"\n" + "regions="+str(ct.all_regions)+"\n" + "services="+str(dir_values)+"\n" + "outdir_structure=[\""+dir_structure+"\"]\n") + file.close() + + # Update Environment variable for jenkins + yaml_file_path = os.environ['JENKINS_INSTALL'] + "/jcasc.yaml" + with open(yaml_file_path) as yaml_file: + cfg = yaml.load(yaml_file, Loader=yaml.FullLoader) + cfg["jenkins"]["globalNodeProperties"] = [{'envVars': {'env': [{'key': 'customer_prefix', 'value': prefix}]}}] + with open(yaml_file_path, "w") as yaml_file: + cfg = yaml.dump(cfg, stream=yaml_file, default_flow_style=False, sort_keys=False) + # Clean repo config if exists and initiate git repo + #if os.path.exists(devops_dir +".git"): + # dir_util.remove_tree(devops_dir +".git") + local_repo = git.Repo.init(devops_dir) + f = open(devops_dir + ".gitignore", "w") + git_ignore_file_data = ".DS_Store\n*tfstate*\n*terraform*\ntfplan.out\ntfplan.json\n*backup*\ntf_import_commands*\n*cis_report*\n*.safe\n*stacks.zip\n*cd3Validator*" + f.write(git_ignore_file_data) + f.close() + existing_remote = local_repo.git.remote() + if existing_remote == "origin": + local_repo.delete_remote("origin") + origin = local_repo.create_remote("origin", repo_ssh_url) + assert origin.exists() + assert origin == local_repo.remotes.origin == local_repo.remotes["origin"] + try: + origin.fetch() # assure we actually have data. fetch() returns useful information + except Exception as e: + print(str(e)) + f = open(safe_file, "a") + data = prefix + "\t" + "FAIL\t"+current_time+"\n" + f.write(data) + f.close() + exit(1) + + # Setup a local tracking branch of a remote branch + local_repo.create_head("main", origin.refs.main) # create local branch "main" from remote "main" + local_repo.heads.main.set_tracking_branch(origin.refs.main) # set local "main" to track remote "main" + local_repo_files = glob.glob(devops_dir+'*') + local_repo_files.extend(glob.glob(devops_dir + '.*')) + if local_repo.git.status("--porcelain") and files_in_repo > 0: + repo_changes = input("\nData in local terraform_files and repo is not same, which changes you want to retain? Enter local or repo, default is local : ") + if ("repo" in repo_changes.lower()): + dir_util.remove_tree(terraform_files) + os.makedirs(terraform_files) + local_repo = git.Repo.init(devops_dir) + existing_remote = local_repo.git.remote() + if existing_remote == "origin": + local_repo.delete_remote("origin") + origin = local_repo.create_remote("origin", repo_ssh_url) + assert origin.exists() + assert origin == local_repo.remotes.origin == local_repo.remotes["origin"] + origin.fetch() # assure we actually have data. fetch() returns useful information + # Setup a local tracking branch of a remote branch + local_repo.create_head("main", origin.refs.main) # create local branch "main" from remote "main" + local_repo.heads.main.set_tracking_branch(origin.refs.main) + local_repo.heads.main.checkout() + else: + tmp_dir = customer_tenancy_dir+"/tmp_repo" + if os.path.exists(tmp_dir): + dir_util.remove_tree(tmp_dir) + os.mkdir(tmp_dir) + for item in [f for f in local_repo_files if not f.endswith(".git")]: + shutil.move(item, tmp_dir+"/") + local_repo.heads.main.checkout() + local_repo.git.pull() + local_repo_files = glob.glob(devops_dir + '*') + local_repo_files.extend(glob.glob(devops_dir + '.*')) + for item in [f for f in local_repo_files if not f.endswith(".git")]: + if os.path.isfile(item): + os.remove(item) + else: + dir_util.remove_tree(item) + temp_repo_files = glob.glob(tmp_dir + '/*') + temp_repo_files.extend(glob.glob(tmp_dir + '/.*')) + for item in [f for f in temp_repo_files if not f.endswith(".git")]: + item = item.split("/")[-1] + if os.path.isfile(tmp_dir+"/"+item): + shutil.copy(tmp_dir+"/"+item, devops_dir+item) + else: + dir_util.copy_tree(tmp_dir+"/"+item,devops_dir+item) + + for f in glob.glob(os.environ['JENKINS_INSTALL'] + "/*.groovy"): + shutil.copy2(f, devops_dir) + + dir_util.remove_tree(tmp_dir) + + else: + local_repo.heads.main.checkout() + local_repo.config_writer().set_value("user", "name", devops_user).release() + local_repo.config_writer().set_value("user", "email", devops_user).release() + for f in glob.glob(os.environ['JENKINS_INSTALL'] + "/*.groovy"): + shutil.copy2(f, devops_dir) + #shutil.copy(os.environ['JENKINS_INSTALL'] + "/singleOutput.groovy", devops_dir + "/singleOutput.groovy") + local_repo.git.add('--all') + commit_id='None' + try: + msg = local_repo.git.commit('-m', 'Initial commit from createTenancyConfig.py') + commit_id = re.search("\[(.*)\]", msg) + commit_id = commit_id.group(1).split(" ")[1] + local_repo.git.push() + print("Initial Commit to DevOps Repository done with commit id: " + commit_id) + except git.exc.GitCommandError as e: + if ("nothing to commit, working directory clean" in str(e)): + print("Nothing to commit to DevOps Repository.") + except Exception as e: + print(e) + print("Exiting...") + exit(1) + return commit_id + +def create_bucket(config, signer): + bucket_region = config.get('region').strip() + buckets_client = ObjectStorageClient(config=config, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY, signer=signer) + namespace = buckets_client.get_namespace().data + try: + buckets_client.get_bucket(namespace, bucket_name).data + print("Bucket exists with name(" + bucket_name + ") in " + bucket_region + ". Reusing same.") + except Exception as e: + #print("\nCreating bucket " + bucket_name + " under root compartment in " + bucket_region+" for remote state...") + create_bucket_response = buckets_client.create_bucket( + namespace_name=namespace, + create_bucket_details=oci.object_storage.models.CreateBucketDetails(name=bucket_name,compartment_id=compartment_ocid,versioning='Enabled')) + return bucket_region,bucket_name + + +# Execution of code begins here +parser = argparse.ArgumentParser(description="Connects the container to tenancy") +parser.add_argument("propsfile", help="Full Path of properties file. eg tenancyconfig.properties") +args = parser.parse_args() +config = configparser.RawConfigParser() +config.read(args.propsfile) + +current_time=str(datetime.datetime.now()) + +# Initialize Toolkit Variables +user_dir = "/cd3user" +safe_file = user_dir + "/tenancies/createTenancyConfig.safe" +auto_keys_dir = user_dir + "/tenancies/keys" +toolkit_dir = user_dir +"/oci_tools/cd3_automation_toolkit" +modules_dir = toolkit_dir + "/user-scripts/terraform" +variables_example_file = modules_dir + "/variables_example.tf" +setupoci_props_toolkit_file_path = toolkit_dir + "/setUpOCI.properties" +jenkins_dir = os.environ['JENKINS_INSTALL'] + +prefix = config.get('Default', 'customer_name').strip() +if prefix == "" or prefix == "\n": + print("Invalid Customer Name. Please try again......Exiting !!") + exit(1) + +prefixes=[] +if os.path.exists(safe_file): + f=open(safe_file,"r") + safe_file_lines = f.readlines() + for l in safe_file_lines: + if "SUCCESS" in l: + prefixes.append(l.split("\t")[0]) + +if prefixes !=[]: + if prefix in prefixes: + print("WARNING!!! Container has already been successfuly connected to the tenancy with same customer_name. Please proceed only if you re-running the script for new region subscription") + else: + print("WARNING!!! Container has already been successfully connected to the tenancy with these values of customer_name: "+str(list(set(prefixes)))) + print("WARNING!!! Toolkit usage with Jenkins has not been tested with running this script multiple times with different values of customer_name in the properties file") + print("Jenkins is configured for the customer_name used for the first successful execution of the script.") + inp = input("\nDo you want to proceed (y/n):") + if inp.lower()=="n": + exit(1) + +# Initialize Tenancy Variables +customer_tenancy_dir = user_dir + "/tenancies/" + prefix +config_files= user_dir + "/tenancies/" + prefix +"/.config_files" +config_file_path = config_files + "/" + prefix + "_oci_config" + +terraform_files = customer_tenancy_dir + "/terraform_files/" +setupoci_props_file_path = customer_tenancy_dir + "/" + prefix + "_setUpOCI.properties" + +# Read Config file Variables +try: + user='' + _key_path='' + fingerprint='' + + tenancy = config.get('Default', 'tenancy_ocid').strip() + if tenancy == "" or tenancy == "\n": + print("Tenancy ID cannot be left empty...Exiting !!") + exit(1) + + auth_mechanism = config.get('Default', 'auth_mechanism').strip().lower() + if auth_mechanism == "" or auth_mechanism == "\n" or (auth_mechanism!='api_key' and auth_mechanism!='session_token' and auth_mechanism!='instance_principal'): + print("Auth Mechanism cannot be left empty...Exiting !!") + exit(1) + + region = config.get('Default', 'region').strip() + if region == "" or region == "\n": + print("Region cannot be left empty...Exiting !!") + exit(1) + rg=region + + if auth_mechanism == 'api_key': + print("=================================================================") + print("NOTE: Make sure the API Public Key is added to the OCI Console!!!") + print("=================================================================") fingerprint = config.get('Default', 'fingerprint').strip() if fingerprint == "" or fingerprint == "\n": - print("Invalid Fingerprint. Please try again......Exiting !!") + print("Fingerprint cannot be left empty...Exiting !!") + exit(1) + + key_path = config.get('Default', 'key_path').strip() + if key_path == "" or key_path == "\n": + key_path = auto_keys_dir +"/oci_api_private.pem" + if not os.path.isfile(key_path): + print("Invalid PEM Key File at "+key_path+". Please try again......Exiting !!") exit(1) user = config.get('Default', 'user_ocid').strip() if user == "" or user == "\n": - print("Invalid User ID. Please try again......Exiting !!") + print("user_ocid cannot be left empty...Exiting !!") exit(1) - prefix = config.get('Default', 'customer_name').strip() - if prefix == "" or prefix == "\n": - print("Invalid Prefix. Please try again......Exiting !!") - exit(1) - - key_path = config.get('Default', 'key_path').strip() + outdir_structure_file = config.get('Default', 'outdir_structure_file').strip() + ssh_public_key = config.get('Default', 'ssh_public_key').strip() + + ## Advanced parameters ## + remote_state = config.get('Default', 'use_remote_state').strip().lower() + remote_state_bucket = config.get('Default', 'remote_state_bucket_name').strip() + + use_devops = config.get('Default', 'use_oci_devops_git').strip().strip().lower() + devops_repo = config.get('Default', 'oci_devops_git_repo_name').strip().strip() + devops_user = config.get('Default', 'oci_devops_git_user').strip() + devops_user_key = config.get('Default', 'oci_devops_git_key').strip() + + if use_devops == 'yes' or remote_state == 'yes': + #Use remote state if using devops + remote_state='yes' + + # OCI DevOps GIT User and Key are mandatory while using instance_principal or session_token + if auth_mechanism == 'instance_principal' or auth_mechanism == 'session_token': + if devops_user == "" or devops_user == "\n": + print("OCI DevOps GIT User cannot be left empty when using instance_principal or session_token...Exiting !!") + exit(1) + if use_devops == 'yes' and devops_user_key == "" or devops_user_key == "\n": + print("OCI DevOps GIT Key cannot be left empty when using instance_principal or session_token...Exiting !!") + exit(1) + if auth_mechanism == 'api_key': + # Use same user and key as $user_ocid and $key_path for OCI Devops GIT operations + if devops_user == '' or devops_user=="\n": + devops_user = user + if devops_user_key == '' or devops_user_key=="\n": + devops_user_key = config_files+"/"+os.path.basename(key_path) + + if remote_state == 'yes': + # Use same oci_devops_git_user for managing terraform remote state backend + remote_state_user=devops_user + + # Bucket Name + if remote_state_bucket == '' or remote_state_bucket == "\n": + bucket_name = prefix + "-automation-toolkit-bucket" + else: + bucket_name = remote_state_bucket.strip() - region = config.get('Default', 'region').strip() - if (region == '' or key_path == "\n"): - region = "us-ashburn-1" + compartment_ocid = config.get('Default', 'compartment_ocid').strip() + if compartment_ocid == '' or compartment_ocid == '\n': + compartment_ocid = tenancy - outdir_structure_file = config.get('Default', 'outdir_structure_file').strip() +except Exception as e: + print(e) + print('Check if input properties exist and try again..exiting...') + exit(1) - ssh_public_key = config.get('Default', 'ssh_public_key').strip() - except Exception as e: - print(e) - print('Check if input properties exist and try again..exiting...` ') - exit() - - # Variables Initialization - user_dir = "/"+"cd3user" - customer_tenancy_dir = user_dir +"/tenancies/" + prefix - terraform_files = customer_tenancy_dir+"/terraform_files/" - config_file_path = customer_tenancy_dir+"/"+prefix+"_config" - auto_keys_dir = user_dir+"/tenancies/keys" - toolkit_dir = user_dir +"/oci_tools/cd3_automation_toolkit" - modules_dir = toolkit_dir+"/user-scripts/terraform" - #documentation_dir = toolkit_dir+"/documentation" - variables_example_file = modules_dir +"/variables_example.tf" - setupoci_props_toolkit_file_path =toolkit_dir + "/setUpOCI.properties" - setupoci_props_file_path = customer_tenancy_dir + "/" + prefix + "_setUpOCI.properties" - _outdir_structure_file = '' - if (outdir_structure_file != '' and outdir_structure_file != "\n"): - if not os.path.isfile(outdir_structure_file): - print("Invalid outdir_structure_file. Please provide correct file path......Exiting !!") - exit(1) - else: - outdir_config = configparser.RawConfigParser() - outdir_config.read(outdir_structure_file) - for key, value in outdir_config.items("Default"): - if value == '': - print("Out Directory is missing for one or more parameters, for eg. " + key) - print("Please check " + outdir_structure_file) - exit(1) - """if key == 'dns': - outdir_config.set('Default', 'dns-resolver', value) - outdir_config.set('Default', 'dns-rrset', value) - outdir_config.set('Default', 'dns-view', value) - outdir_config.set('Default', 'dns-zone', value)""" - if not os.path.exists(customer_tenancy_dir): - os.makedirs(customer_tenancy_dir) - _outdir_structure_file = customer_tenancy_dir + "/" + prefix + "_outdir_structure_file" - shutil.copyfile(outdir_structure_file, _outdir_structure_file) - print("Using different directories for OCI services as per the input outdir_structure_file..........") - else: - if not os.path.exists(customer_tenancy_dir): - os.makedirs(customer_tenancy_dir) - print("Using single out directory for resources..........") - - - # 1. Move the newly created PEM keys to /cd3user/tenancies// - files = glob.glob(auto_keys_dir+"/*") - - # If the private key is empty or if the private key is already present in the tenancy folder; initialize it to the default path; - if (key_path == '' or key_path == "\n") or (auto_keys_dir + "/oci_api_private.pem" in key_path): - print("key_path field is empty or default in tenancyconfig.properties. Using " + user_dir + "/tenancies/keys/oci_api_private.pem") - if os.path.exists(auto_keys_dir): - print("Copying the key files to " + customer_tenancy_dir) - if files: - for f in files: - if os.path.exists(f): - filename = f.split('/')[-1] - if os.path.exists(customer_tenancy_dir + "/" + filename): - shutil.move(customer_tenancy_dir + "/" + filename, - customer_tenancy_dir + "/" + filename + "_backup") - shutil.copyfile(f, customer_tenancy_dir + "/" + filename) - key_path = customer_tenancy_dir + "/oci_api_private.pem" - else: - print("Key files not found. Please make sure to specify the right path in the properties file.....Exiting!!!") - exit(0) - else: - print("Directory - "+auto_keys_dir+" does not exist. Please make sure to specify the right path in the properties file.....Exiting!!!") - exit(0) - shutil.move(auto_keys_dir, auto_keys_dir + "_backup_" + time.strftime("%H%M%S")) +if not os.path.exists(customer_tenancy_dir): + os.makedirs(customer_tenancy_dir) +if not os.path.exists(config_files): + os.makedirs(config_files) +dir_values = [] - # If the key - oci_api_private.pem is already present in the tenancy folder - elif customer_tenancy_dir + '/oci_api_private.pem' in key_path: - key_path = customer_tenancy_dir + "/oci_api_private.pem" +# Copy input properties file to customer_tenancy_dir +shutil.copy(args.propsfile,config_files+"/"+prefix+"_"+args.propsfile) - # If the private key is elsewhere; move it to the tenancy folder - elif auto_keys_dir + "/oci_api_private.pem" not in key_path: - try: - shutil.move(key_path, customer_tenancy_dir + '/oci_api_private.pem') - except FileNotFoundError as e: - print( - "Key file not found. Please make sure to specify the right path in the properties file.....Exiting!!!") - exit(0) - key_path = customer_tenancy_dir + "/oci_api_private.pem" +# 1. Copy outdir_structure_file +_outdir_structure_file = '' +if (outdir_structure_file != '' and outdir_structure_file != "\n"): + if not os.path.isfile(outdir_structure_file): + print("Invalid outdir_structure_file. Please provide correct file path......Exiting !!") + exit(1) else: - print("\n") - print("=================================================================") - print("\"keys\" directory NOT FOUND in " + user_dir + "/tenancies/" + ". \n" - "Please generate the keys using the command \"python createAPIKey.py\" \n(OR)\nIf the keys already exist:\n- Create a folder named \"keys\" in " + user_dir + "/tenancies/" + "\n- Place the keys with names oci_api_public.pem and oci_api_private.pem respectively\n!! Try Again !!") - print("=================================================================") - exit(0) - - - if not os.path.exists(terraform_files): - os.makedirs(terraform_files) + outdir_config = configparser.RawConfigParser() + outdir_config.read(outdir_structure_file) + for key, value in outdir_config.items("Default"): + if value == '': + print("Out Directory is missing for one or more parameters, for eg. " + key) + print("Please check " + outdir_structure_file) + exit(1) + if value not in dir_values: + dir_values.append(str(value)) + + _outdir_structure_file = customer_tenancy_dir + "/" + prefix + "_outdir_structure_file.properties" + #if not os.path.exists(_outdir_structure_file): + shutil.copyfile(outdir_structure_file, _outdir_structure_file) + print("\nUsing different directories for OCI services as per the input outdir_structure_file..........") +else: + print("\nUsing single out directory for resources..........") + ################ Get service names here only ######################## + +# 2. Move Private PEM key and Session Token file +_session_token_file='' +_key_path = '' +if auth_mechanism=='api_key': # or auth_mechanism=='session_token': + print("\nCopying Private Key File..........") + # Move Private PEM Key File + filename = os.path.basename(key_path) + #shutil.copy(key_path, key_path + "_backup_"+ datetime.datetime.now().strftime("%d-%m-%H%M%S").replace('/', '-')) + shutil.copy(key_path, config_files + "/" + filename) + _key_path = config_files + "/" + filename + os.chmod(_key_path,0o600) + +# 3. Create config file +#if not os.path.isfile(config_file_path): +print("\nCreating Tenancy specific config.................")#, terraform provider , variables and properties files.................") + +if auth_mechanism=='api_key': + file = open(config_file_path, "w") + file.write("[DEFAULT]\n" + "tenancy = "+tenancy+"\n" + "fingerprint = "+fingerprint+"\n" + "user = "+user+"\n" + "key_file = "+_key_path+"\n" + "region = "+region+"\n") + file.close() +elif auth_mechanism=='session_token': + ''' + file.write("[DEFAULT]\n" + "tenancy = "+tenancy+"\n" + "fingerprint = "+fingerprint+"\n" + "security_token_file = "+_session_token_file+"\n" + "key_file = "+_key_path+"\n" + "region = "+region+"\n") + ''' + # copy config file to customer specific directory and create symlink for TF execution + config_file_path_user_home = user_dir + "/.oci/config" + # To take care of multiple executions of createTenancyConfig,py + if not os.path.islink(config_file_path_user_home): + #shutil.copy(config_file_path_user_home,config_file_path_user_home + "_backup_" + datetime.datetime.now().strftime("%d-%m-%H%M%S").replace('/', '-')) + shutil.copy(config_file_path_user_home, config_file_path) + src = config_file_path + dst = config_file_path_user_home + try: + os.symlink(src,dst) + except FileExistsError as e: + os.unlink(dst) + os.symlink(src,dst) - # 2. Create config file - print("Creating the Tenancy specific config.................")#, terraform provider , variables and properties files.................") +elif auth_mechanism=='instance_principal': file = open(config_file_path, "w") file.write("[DEFAULT]\n" "tenancy = "+tenancy+"\n" - "fingerprint = "+fingerprint+"\n" - "user = "+user+"\n" - "key_file = "+key_path+"\n" - "region = "+region+"\n") + "region = "+region+"\n") file.close() - # 2. Fetch OCI_regions - cd3service = cd3Services() - cd3service.fetch_regions(configFileName=config_file_path) - # 3. Fetch AD Names and write to config file - print('Fetching AD names from tenancy and writing to config file if it does not exist.............') - try: - python_config = oci.config.from_file(file_location=config_file_path) - except oci.exceptions.InvalidKeyFilePath as e: - print("\nInvalid key_file path. Please make sure to specify the right path in the properties file.....Exiting!!!") - exit(0) - identity_client = oci.identity.IdentityClient(python_config) - conf_file = open(config_file_path, "a") - tenancy_id = tenancy - i = 1 - for ad in paginate(identity_client.list_availability_domains, compartment_id=tenancy_id): - ad_name = "ad_" + str(i) - if not ad_name in python_config: - conf_file.write("ad_" + str(i) + "=" + ad.name + "\n") - i = i + 1 - conf_file.close() - - ct = commonTools() - - # 4. Generate setUpOCI.properties file - print("Creating the Tenancy specific setUpOCI.properties.................") - with open(setupoci_props_toolkit_file_path, 'r+') as setUpOci_file: - setupoci_props_toolkit_file_data = setUpOci_file.read().rstrip() - - setupoci_props_toolkit_file_data = setupoci_props_toolkit_file_data.replace("outdir=", "outdir="+terraform_files) - setupoci_props_toolkit_file_data = setupoci_props_toolkit_file_data.replace("prefix=", "prefix="+prefix) - setupoci_props_toolkit_file_data = setupoci_props_toolkit_file_data.replace("config_file=", "config_file="+config_file_path) - setupoci_props_toolkit_file_data = setupoci_props_toolkit_file_data.replace("outdir_structure_file=", "outdir_structure_file="+_outdir_structure_file) - - f = open(setupoci_props_file_path, "w+") - f.write(setupoci_props_toolkit_file_data) +tenancy_id=tenancy + +## Authenticate +ct = commonTools() +config, signer = ct.authenticate(auth_mechanism, config_file_path) +try: + ct.get_subscribedregions(config,signer) +except Exception as e: + print(str(e)) + f = open(safe_file, "a") + data = prefix + "\t" + "FAIL\t" + current_time + "\n" + f.write(data) f.close() + exit(1) + +home_region = ct.home_region + +## Fetch OCI_regions +cd3service = cd3Services() +print("") +cd3service.fetch_regions(config, signer) + +## Check the remote state requirements +backend_file = open(modules_dir + "/backend.tf", 'r') +backend_file_data = backend_file.readlines() +global_backend_file_data = "" + +if remote_state == "yes": + print("\nCreating Tenancy specific remote tfstate Items - bucket, S3 credentials.................") + s3_credential_file_path = config_files + "/" + prefix + "_s3_credentials" + buckets_client = ObjectStorageClient(config=config, + retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY, + signer=signer) + namespace = buckets_client.get_namespace().data + bucket_region,bucket_name=create_bucket(config,signer) + try: + # Generate customer_secret_keys for remote state credentials + new_config = deepcopy(config) + new_config.__setitem__("region", ct.region_dict[home_region]) + + identity_client = oci.identity.IdentityClient(config=new_config, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY,signer=signer) + cred_name = prefix+"-automation-toolkit-csk" + + # Get user ocid for DevOps User Name + if "ocid1.user.oc1" not in remote_state_user: + if '@' in remote_state_user: + remote_state_user = remote_state_user.rsplit("@",1)[0] + + identity_client = oci.identity.IdentityClient(config=new_config, + retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY, + signer=signer) + user_data = identity_client.list_users(compartment_id=tenancy).data + + found=0 + for user_d in user_data: + if user_d.name==remote_state_user and user_d.lifecycle_state=="ACTIVE": + remote_state_user = user_d.id + found =1 + break + if found == 0: + print("Unable to find the user ocid for creating customer secret key. Exiting...") + exit(1) + + + # check if S3 credential exists + customer_secret_key_id='' + credential_file_data='' + list_customer_secret_key_response = identity_client.list_customer_secret_keys(user_id=remote_state_user).data + for keys in list_customer_secret_key_response: + if keys.display_name == cred_name: + customer_secret_key_id=keys.id + break + + if customer_secret_key_id!='': + # Delete existing key with same name from user profile if S3 credential file is missing and create new one + if not os.path.exists(s3_credential_file_path): + identity_client.delete_customer_secret_key(user_id=remote_state_user, + customer_secret_key_id=customer_secret_key_id) + + create_customer_secret_key_response = identity_client.create_customer_secret_key(create_customer_secret_key_details=oci.identity.models.CreateCustomerSecretKeyDetails(display_name=cred_name),user_id=remote_state_user).data + credential_file_data="[default]\naws_access_key_id="+str(create_customer_secret_key_response.id)+"\naws_secret_access_key="+create_customer_secret_key_response.key+"\n" + # If S3 Crednetials file exists, check if it's the same key + elif os.path.exists(s3_credential_file_path): + text = "aws_access_key_id="+customer_secret_key_id+"" + f = open(f"{s3_credential_file_path}", "r") + same_key=0 + existing_credential_file_lines = f.readlines() + for line in existing_credential_file_lines: + if text == line.strip(): + same_key=1 + break + + #If Access Key id is different then delete the existing key and create new one + if same_key == 0 : + identity_client.delete_customer_secret_key(user_id=remote_state_user, + customer_secret_key_id=customer_secret_key_id) + + create_customer_secret_key_response = identity_client.create_customer_secret_key( + create_customer_secret_key_details=oci.identity.models.CreateCustomerSecretKeyDetails( + display_name=cred_name), user_id=remote_state_user).data + credential_file_data = "[default]\naws_access_key_id=" + str( + create_customer_secret_key_response.id) + "\naws_secret_access_key=" + create_customer_secret_key_response.key + "\n" + else: + print("Continuing to use existing customer secret key\n") + + #Create New Key + if customer_secret_key_id == '': + if (len(list_customer_secret_key_response) > 1): + print("\nUser (" + remote_state_user + ") already has max customer secret keys created. Cannot create a new one to be used with toolkit for tfstate remote management. Please clear the existing keys or use different user. Exiting...") + exit(1) + create_customer_secret_key_response = identity_client.create_customer_secret_key( + create_customer_secret_key_details=oci.identity.models.CreateCustomerSecretKeyDetails( + display_name=cred_name), user_id=remote_state_user).data + credential_file_data = "[default]\naws_access_key_id=" + str(create_customer_secret_key_response.id) + "\naws_secret_access_key=" + create_customer_secret_key_response.key + "\n" - ct.get_subscribedregions(config_file_path) - home_region = ct.home_region + except Exception as e: + print(str(e)) + exit(1) + # Add code to ask domain name/url and generate creds + + if credential_file_data!='': + print("Creating new customer secret key\n") + f = open(f"{s3_credential_file_path}", "w+") + f.write(credential_file_data) + f.close() - # 5. Fetch Subscribed regions and create the TF related files - print("Creating the Tenancy specific region directories, terraform provider , variables files.................") + for line in backend_file_data: + if line.__contains__("This line will be removed when using remote state"): + continue + elif line.__contains__("key = "): + global_backend_file_data += " key = \"" + "global/terraform.tfstate\"\n" + elif line.__contains__("bucket = "): + global_backend_file_data += " bucket = \"" + bucket_name + "\"\n" + elif line.__contains__("region = "): + global_backend_file_data += " region = \"" + bucket_region + "\"\n" + elif line.__contains__("endpoint = "): + global_backend_file_data += " endpoint = \"https://" + namespace + ".compat.objectstorage." + bucket_region + ".oraclecloud.com\"\n" + elif line.__contains__("shared_credentials_file = "): + global_backend_file_data += " shared_credentials_file = \"" + s3_credential_file_path + "\"\n" + else: + global_backend_file_data += line +else: + for line in backend_file_data: + global_backend_file_data += line + + +''' +# 3. Fetch AD Names and write to config file +print('Fetching AD names from tenancy and writing to config file if it does not exist.............') +identity_client = oci.identity.IdentityClient(config=config,signer=signer) +conf_file = open(config_file_path, "a") +tenancy_id = tenancy +i = 1 +for ad in paginate(identity_client.list_availability_domains, compartment_id=tenancy_id): + ad_name = "ad_" + str(i) + if not ad_name in config: + conf_file.write("ad_" + str(i) + "=" + ad.name + "\n") + i = i + 1 +conf_file.close() +''' + +# 4. Generate setUpOCI.properties file +#if not os.path.isfile(setupoci_props_file_path): +print("Creating Tenancy specific setUpOCI.properties.................") +with open(setupoci_props_toolkit_file_path, 'r+') as setUpOci_file: + setupoci_props_toolkit_file_data = setUpOci_file.read().rstrip() + +setupoci_props_toolkit_file_data = setupoci_props_toolkit_file_data.replace("outdir=", "outdir="+terraform_files) +setupoci_props_toolkit_file_data = setupoci_props_toolkit_file_data.replace("prefix=", "prefix="+prefix) +setupoci_props_toolkit_file_data = setupoci_props_toolkit_file_data.replace("auth_mechanism=", "auth_mechanism=" + auth_mechanism) +setupoci_props_toolkit_file_data = setupoci_props_toolkit_file_data.replace("config_file=", "config_file="+config_file_path) +setupoci_props_toolkit_file_data = setupoci_props_toolkit_file_data.replace("outdir_structure_file=", "outdir_structure_file="+_outdir_structure_file) + +f = open(setupoci_props_file_path, "w+") +f.write(setupoci_props_toolkit_file_data) +f.close() + +# 5. Fetch Subscribed regions and create the TF related files for each region +if not os.path.exists(terraform_files): + os.makedirs(terraform_files) + + +print("Creating Tenancy specific region directories, terraform provider , variables files.................") + +for region in ct.all_regions: + # Rerunning createTenancy for any new region subscription. Process only new region directories else continue + if os.path.exists(terraform_files+region): + continue + + os.mkdir(terraform_files+region) + + linux_image_id = '' + windows_image_id = '' + + + new_config = deepcopy(config) + new_config.__setitem__("region", ct.region_dict[region]) + cc = oci.core.ComputeClient(config=new_config,signer=signer) + + # fetch latest image ocids + try: + for image in paginate(cc.list_images, compartment_id=tenancy_id, operating_system='Oracle Linux', + sort_by='TIMECREATED'): + if ("Gen2-GPU" not in image.display_name): + linux_image_id = image.id + break + for image in paginate(cc.list_images, compartment_id=tenancy_id, operating_system='Windows', + sort_by='TIMECREATED'): + if ("Gen2-GPU" not in image.display_name): + windows_image_id= image.id + break + except Exception as e: + print(e) + print("!!! Could not fetch the list of images for Windows and Oracle Linux to write to variables_"+region+".tf file!!!\n" + "Please make sure to have Read Access to the Tenancy at the minimum !!!") + print("\nContinuing without fetching Image OCIDs........!!!") + + # 6. Read variables.tf from examples folder and copy the variables as string + with open(variables_example_file, 'r+') as var_eg_file: + variables_example_file_data = var_eg_file.read().rstrip() + + variables_example_file_data = variables_example_file_data.replace("", tenancy) + variables_example_file_data = variables_example_file_data.replace("", user) + variables_example_file_data = variables_example_file_data.replace("", fingerprint) + variables_example_file_data = variables_example_file_data.replace("", _key_path) + variables_example_file_data = variables_example_file_data.replace("", ct.region_dict[region]) + variables_example_file_data = variables_example_file_data.replace("", ssh_public_key) + if (windows_image_id != ''): + variables_example_file_data = variables_example_file_data.replace("", windows_image_id) + + if (linux_image_id != ''): + variables_example_file_data = variables_example_file_data.replace("", linux_image_id) + + f = open(terraform_files+"/"+region+"/variables_" + region + ".tf", "w+") + f.write(variables_example_file_data) + f.close() - for region in ct.all_regions: - if not os.path.exists(terraform_files+region): - os.mkdir(terraform_files+region) - linux_image_id = '' - windows_image_id = '' + # Global dir for RPC related + if region == ct.home_region: + if not os.path.exists(f"{terraform_files}/global/rpc"): + os.makedirs(f"{terraform_files}/global/rpc") + shutil.copyfile(modules_dir + "/provider.tf", f"{terraform_files}/global/rpc/provider.tf") - new_config = python_config - new_config.__setitem__("region", ct.region_dict[region]) - cc = oci.core.ComputeClient(new_config) + with open(f"{terraform_files}/global/rpc/provider.tf", 'r+') as provider_file: + provider_file_data = provider_file.read().rstrip() + if auth_mechanism == 'instance_principal': + provider_file_data = provider_file_data.replace("provider \"oci\" {", "provider \"oci\" {\nauth = \"InstancePrincipal\"") + if auth_mechanism == 'session_token': + provider_file_data = provider_file_data.replace("provider \"oci\" {", "provider \"oci\" {\nauth = \"SecurityToken\"\nconfig_file_profile = \"DEFAULT\"") - # fetch latest image ocids - try: - for image in paginate(cc.list_images, compartment_id=tenancy_id, operating_system='Oracle Linux', - sort_by='TIMECREATED'): - if ("Gen2-GPU" not in image.display_name): - linux_image_id = image.id - break - for image in paginate(cc.list_images, compartment_id=tenancy_id, operating_system='Windows', - sort_by='TIMECREATED'): - if ("Gen2-GPU" not in image.display_name): - windows_image_id= image.id - break - except Exception as e: - print(e) - print("!!! Could not fetch the list of images for Windows and Oracle Linux to write to variables_"+region+".tf file!!!\n" - "Please make sure to have Read Access to the Tenancy at the minimum !!!") - print("\nContinuing without fetching Image OCIDs........!!!") - - # 6. Read variables.tf from examples folder and copy the variables as string - with open(variables_example_file, 'r+') as var_eg_file: - variables_example_file_data = var_eg_file.read().rstrip() - - variables_example_file_data = variables_example_file_data.replace("", tenancy) - variables_example_file_data = variables_example_file_data.replace("", user) - variables_example_file_data = variables_example_file_data.replace("", fingerprint) - variables_example_file_data = variables_example_file_data.replace("", key_path) - variables_example_file_data = variables_example_file_data.replace("", ct.region_dict[region]) - variables_example_file_data = variables_example_file_data.replace("", ssh_public_key) - if (windows_image_id != ''): - variables_example_file_data = variables_example_file_data.replace("", windows_image_id) - - if (linux_image_id != ''): - variables_example_file_data = variables_example_file_data.replace("", linux_image_id) - - f = open(terraform_files+"/"+region+"/variables_" + region + ".tf", "w+") + f = open(f"{terraform_files}/global/rpc/provider.tf", "w+") + f.write(provider_file_data) + f.close() + + f = open(f"{terraform_files}/global/rpc/variables_global.tf", "w+") f.write(variables_example_file_data) f.close() - # Global dir for RPC related - if region == ct.home_region: - if not os.path.exists(f"{terraform_files}/global/rpc"): - os.makedirs(f"{terraform_files}/global/rpc") - shutil.copyfile(modules_dir + "/provider.tf", f"{terraform_files}/global/rpc/provider.tf") - f = open(terraform_files + "/" + "global/rpc" + "/variables_" + "global" + ".tf", "w+") - f.write(variables_example_file_data) - f.close() - - # 7. Copy the terraform modules and variables file to outdir - distutils.dir_util.copy_tree(modules_dir, terraform_files +"/" + region) - - # Manage multiple outdir - if (outdir_structure_file == '' or outdir_structure_file == "\n"): - pass + f = open(f"{terraform_files}/global/rpc/backend.tf", "w+") + f.write(global_backend_file_data) + f.close() + + # 7. Copy terraform modules and variables file to outdir + distutils.dir_util.copy_tree(modules_dir, terraform_files +"/" + region) + with open(terraform_files +"/" + region + "/provider.tf", 'r+') as provider_file: + provider_file_data = provider_file.read().rstrip() + if auth_mechanism == 'instance_principal': + provider_file_data = provider_file_data.replace("provider \"oci\" {", + "provider \"oci\" {\nauth = \"InstancePrincipal\"") + if auth_mechanism == 'session_token': + provider_file_data = provider_file_data.replace("provider \"oci\" {", + "provider \"oci\" {\nauth = \"SecurityToken\"\nconfig_file_profile = \"DEFAULT\"") + f = open(terraform_files +"/" + region + "/provider.tf", "w+") + f.write(provider_file_data) + f.close() + + reg_backend = open(terraform_files +"/" + region + "/backend.tf",'w+') + reg_backend.write(global_backend_file_data) + reg_backend.close() + reg_backend = open(terraform_files + "/" + region + "/backend.tf", 'r+') + new_backend_data = "" + + for line in reg_backend.readlines(): + if line.__contains__("key = "): + new_backend_data += " key = \"" + region + "/terraform.tfstate\"\n" else: - region_dir = terraform_files + "/" + region + "/" - for service, service_dir in outdir_config.items("Default"): - service = service.strip().lower() - service_dir = service_dir.strip() - - # Keep the .tf file in default region directory if directory name is empty - if service_dir=="" or service_dir == "\n": - continue - #if (service != 'identity' and service != 'tagging') or ((service == 'identity' or service == 'tagging') and region == home_region): - home_region_services = ['identity', 'tagging', 'budget'] - if (region != home_region) and (service in home_region_services): - os.remove(region_dir + service + ".tf") - - if (service not in home_region_services) or ((service in home_region_services) and region == home_region): - region_service_dir = region_dir + service_dir - if not os.path.exists(region_service_dir): - os.mkdir(region_service_dir) - if (service == 'instance'): - shutil.move(region_dir + 'scripts',region_service_dir+'/') - with open(region_dir + service + ".tf", 'r+') as tf_file: - module_data = tf_file.read().rstrip() - module_data = module_data.replace("\"./modules", "\"../modules") - - f = open(region_service_dir + "/" + service + ".tf", "w+") - f.write(module_data) - f.close() - os.remove(region_dir + service + ".tf") - - shutil.copyfile(region_dir + "variables_" + region + ".tf", region_service_dir + "/" + "variables_" + region + ".tf") - shutil.copyfile(region_dir + "provider.tf", region_service_dir + "/" + "provider.tf") - shutil.copyfile(region_dir + "oci-data.tf", region_service_dir + "/" + "oci-data.tf") - - os.remove(terraform_files + "/" + region + "/" + "variables_" + region + ".tf") - os.remove(terraform_files + "/" + region + "/" + "provider.tf") - os.remove(terraform_files + "/" + region + "/" + "oci-data.tf") - - # 8. Remove the terraform example variable file from outdir - os.remove(terraform_files + "/" + region + "/variables_example.tf") - - # 9. Copy documentation folder to outdir - #distutils.dir_util.copy_tree(documentation_dir+"/", customer_tenancy_dir+"/documentation") - - # Logging information - logging.basicConfig(filename=customer_tenancy_dir+'/cmds.log', format='%(message)s', filemode='w', level=logging.INFO) - - print("==================================================================================================================================") - print("\nThe toolkit has been setup to execute API's successfully. !!!") - print("Working Directory Path: "+customer_tenancy_dir) - print("Config File Path: "+ config_file_path ) - print("Path to region based directories, terraform provider and the variables files: " + terraform_files) - print("\nPlease use "+prefix+"_setUpOCI.properties file at "+customer_tenancy_dir +" to proceed with the execution of the SetUpOCI script !!!!") - print("Update the path of CD3 Excel input file in "+customer_tenancy_dir + "/" +prefix+"_setUpOCI.properties before executing the next command......") - print("\nCommands to execute: (Alternately, you may also check the cmds.log in outdir for the same information)") - logging.info("Commands to execute:") - print("cd "+user_dir+"/oci_tools/cd3_automation_toolkit/") - logging.info("cd "+user_dir+"/oci_tools/cd3_automation_toolkit/") - print("python setUpOCI.py "+customer_tenancy_dir + "/" +prefix+"_setUpOCI.properties") - logging.info("python setUpOCI.py "+customer_tenancy_dir + "/" +prefix+"_setUpOCI.properties") - print("==================================================================================================================================") - -if __name__ == '__main__': - - # Execution of the code begins here - seek_info() + new_backend_data += line + reg_backend.close() + rewrite_backend = open(terraform_files + "/" + region + "/backend.tf", 'w') + rewrite_backend.write(new_backend_data) + rewrite_backend.close() + + # Manage multiple outdir + if (outdir_structure_file == '' or outdir_structure_file == "\n"): + pass + else: + region_dir = terraform_files + "/" + region + "/" + for service, service_dir in outdir_config.items("Default"): + service = service.strip().lower() + service_dir = service_dir.strip() + + # Keep the .tf file in default region directory if directory name is empty + if service_dir=="" or service_dir == "\n": + continue + #if (service != 'identity' and service != 'tagging') or ((service == 'identity' or service == 'tagging') and region == home_region): + home_region_services = ['identity', 'tagging', 'budget'] + if (region != home_region) and (service in home_region_services): + os.remove(region_dir + service + ".tf") + + if (service not in home_region_services) or ((service in home_region_services) and region == home_region): + region_service_dir = region_dir + service_dir + if not os.path.exists(region_service_dir): + os.mkdir(region_service_dir) + if (service == 'instance'): + if(os.path.isdir(region_service_dir+'/scripts')): + shutil.rmtree(region_service_dir+'/scripts') + shutil.move(region_dir + 'scripts',region_service_dir+'/') + with open(region_dir + service + ".tf", 'r+') as tf_file: + module_data = tf_file.read().rstrip() + module_data = module_data.replace("\"./modules", "\"../modules") + + f = open(region_service_dir + "/" + service + ".tf", "w+") + f.write(module_data) + f.close() + os.remove(region_dir + service + ".tf") + + shutil.copyfile(region_dir + "variables_" + region + ".tf", region_service_dir + "/" + "variables_" + region + ".tf") + shutil.copyfile(region_dir + "provider.tf", region_service_dir + "/" + "provider.tf") + shutil.copyfile(region_dir + "oci-data.tf", region_service_dir + "/" + "oci-data.tf") + + # write backend.tf to respective directories + reg_service_backend = open(region_service_dir + "/backend.tf", 'w+') + reg_service_backend.write(global_backend_file_data) + reg_service_backend.close() + reg_service_backend = open(region_service_dir + "/backend.tf", 'r+') + new_backend_data = "" + + for line in reg_service_backend.readlines(): + if line.__contains__("key = "): + new_backend_data += " key = \"" + region+"/"+service_dir + "/terraform.tfstate\"\n" + else: + new_backend_data += line + reg_service_backend.close() + rewrite_backend = open(terraform_files + "/" + region+"/"+service_dir + "/backend.tf", 'w') + rewrite_backend.write(new_backend_data) + rewrite_backend.close() + + + os.remove(terraform_files + "/" + region + "/" + "variables_" + region + ".tf") + os.remove(terraform_files + "/" + region + "/" + "provider.tf") + os.remove(terraform_files + "/" + region + "/" + "oci-data.tf") + os.remove(terraform_files + "/" + region + "/" + "backend.tf") + + # 8. Remove terraform example variable file from outdir + os.remove(terraform_files + "/" + region + "/variables_example.tf") + +# 9. Update DevOps files and configurations +if use_devops == 'yes': + print("\nCreating Tenancy specific DevOps Items - Topic, Project and Repository.................") + + if devops_repo == '' or devops_repo == "\n": + topic_name = prefix + "-automation-toolkit-topic" + project_name = prefix + "-automation-toolkit-project" + repo_name = prefix + "-automation-toolkit-repo" + devops_exists = False + else: + topic_name = '' + project_name = devops_repo.split("/")[0] + repo_name = devops_repo.split("/")[1] + devops_exists = True + + repo_ssh_url,files_in_repo = create_devops_resources(config, signer) + devops_dir = terraform_files + jenkins_home = os.environ['JENKINS_HOME'] + git_config_file = config_files + "/" + prefix + "_git_config" + + #Get Username from $user_ocid if $oci_devops_git_user is left empty + if "ocid1.user.oc1" in devops_user: + identity_client = oci.identity.IdentityClient(config=new_config, + retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY, + signer=signer) + user_data=identity_client.get_user(user_id=user).data + tenancy_data=identity_client.get_tenancy(tenancy_id=tenancy).data + devops_user=user_data.name+"@"+tenancy_data.name + + commit_id = update_devops_config(prefix,git_config_file, repo_ssh_url,files_in_repo, dir_values, devops_user, devops_user_key, devops_dir, ct) + +del ct, config, signer +# Logging information +outfile = customer_tenancy_dir+'/createTenancyConfig.out' +logging.basicConfig(filename=outfile, format='%(message)s', filemode='w', level=logging.INFO) + +print("==================================================================================================================================") +print("\nThe toolkit has been setup successfully. !!!\n") +#print("Customer Specific Working Directory Path: "+customer_tenancy_dir) +#print("Config File Path: "+ config_file_path ) +#print("Path to region based directories, terraform provider and the variables files: " + terraform_files) +#print("\nPlease use "+prefix+"_setUpOCI.properties file at "+customer_tenancy_dir +" to proceed with the execution of the SetUpOCI script !!!!") +#print("Update the path of CD3 Excel input file in "+customer_tenancy_dir + "/" +prefix+"_setUpOCI.properties before executing the next command......") +#print("\nCommands to execute: (Alternately, you may also check the cmds.log in outdir for the same information)") +f = open(safe_file, "a") +data=prefix + "\t" + "SUCCESS\t"+current_time+"\n" +f.write(data) +f.close() + +logging.info("Customer Specific Working Directory Path: "+customer_tenancy_dir+"\n") + +if remote_state == 'yes': + logging.info("Remote State Bucket Name: "+ bucket_name+ " in "+rg+".") + +if use_devops == "yes": + logging.info("Common Jenkins Home: " +jenkins_home) + logging.info("DevOps Project Name and Repo Name: "+project_name+ ", "+repo_name+ " in "+rg+".") + logging.info("Folder configured for OCI DevOps GIT: "+terraform_files+" Initial Commit ID from createTenancyConfig.py: "+commit_id) + logging.info("\n#########################################") + logging.info("Next Steps for using toolkit via Jenkins") + logging.info("#########################################") + logging.info("Start Jenkins using - /usr/share/jenkins/jenkins.sh &") + logging.info("Access Jenkins using - https://:8443") + +logging.info("\n######################################") +logging.info("Next Steps for using toolkit via CLI") +logging.info("######################################") +logging.info("Modify "+customer_tenancy_dir + "/" +prefix+"_setUpOCI.properties with input values for cd3file and workflow_type") +logging.info("cd "+user_dir+"/oci_tools/cd3_automation_toolkit/") +logging.info("python setUpOCI.py "+customer_tenancy_dir + "/" +prefix+"_setUpOCI.properties") + +with open(outfile, 'r') as log_file: + data = log_file.read().rstrip() +print(data) + +print("==================================================================================================================================") + diff --git a/cd3_automation_toolkit/user-scripts/outdir_structure_file.properties b/cd3_automation_toolkit/user-scripts/outdir_structure_file.properties index 45974f472..37a81130a 100644 --- a/cd3_automation_toolkit/user-scripts/outdir_structure_file.properties +++ b/cd3_automation_toolkit/user-scripts/outdir_structure_file.properties @@ -11,8 +11,8 @@ identity=identity tagging=tagging network=network -loadbalancer=network -networkloadbalancer=network +loadbalancer=loadbalancer +networkloadbalancer=loadbalancer vlan=vlan nsg=nsg # Same Directory must be specified for Instances and Block Volumes. diff --git a/cd3_automation_toolkit/user-scripts/tenancyconfig.properties b/cd3_automation_toolkit/user-scripts/tenancyconfig.properties index 0bcfd088d..daf5ea80f 100644 --- a/cd3_automation_toolkit/user-scripts/tenancyconfig.properties +++ b/cd3_automation_toolkit/user-scripts/tenancyconfig.properties @@ -1,27 +1,88 @@ [Default] -# Mandatory Fields -# Friendly name for the Customer Tenancy eg: demotenancy; -# The generated .auto.tfvars will be prefixed with this customer name + +################################################################################################################## + ## Required Parameters ## +################################################################################################################## + +# Friendly name for the Customer Tenancy eg: demotenancy; The generated .auto.tfvars files will be prefixed with this +# customer_name. customer_name= + tenancy_ocid= -fingerprint= -user_ocid= -# Path of API Private Key (PEM Key) File; If the PEM keys were generated by running createAPI.py, leave this field empty. -# Defaults to /cd3user/tenancies/keys/oci_api_private.pem when left empty. +# Example: us-phoenix-1 +region= + +# Auth Mechanism for OCI APIs - api_key,instance_principal,session_token +# Please make sure to add IAM policies for user/instance_principal before executing createTenancyConfig.py +auth_mechanism=api_key + +################################################################################################################## + ## Auth Details Parameters ## +# Required only for ${auth_mechanism} as api_key; Leave below params empty if 'instance_principal' or 'session_token' +# is used +################################################################################################################## + +user_ocid= +#Path of API Private Key (PEM Key) File; Defaults to /cd3user/tenancies/keys/oci_api_private.pem when left empty key_path= +fingerprint= -# Region ; defaults to us-ashburn-1 when left empty. -region= +################################################################################################################## + ## Deployment Parameters ## +################################################################################################################## # The outdir_structure_file defines the grouping of the terraform auto.tf.vars for the various generated resources. -# To have all the files generated in the corresponding region, leave this variable blank. +# To have all the files generated in a single directory in the corresponding region, leave this variable blank. # To group resources into different directories within each region - specify the absolute path to the file. -# The default file is specified below. You can make changes to the grouping in the below file to suit your deployment" -outdir_structure_file= +# The default file is specified below. You can make changes to the grouping in the below file to suit your deployment +#outdir_structure_file= #or -#outdir_structure_file=/cd3user/oci_tools/cd3_automation_toolkit/user-scripts/outdir_structure_file.properties +outdir_structure_file=/cd3user/oci_tools/cd3_automation_toolkit/user-scripts/outdir_structure_file.properties -# Optional Fields -# SSH Key to launched instances +# SSH Key for launched instances; Use '\n' as the delimiter to add multiple ssh keys. +# Example: "ssh-rsa AAXXX......yhdlo\nssh-rsa AAxxskj...edfwf" +# Optional ssh_public_key= + +################################################################################################################## + ## Advanced Parameters for DevOps ## +# Below OCI Objects - Remote State Bucket Name and DevOps Project/Repo and a Notification Topic will be created/fetched +# from region specified in ${region} above. +# These values are required to be set as "yes" for Jenkins Configuration. +################################################################################################################## + + +# Compartment OCID where Bucket and DevOps Project/repo will be created; defaults to root if left empty. +compartment_ocid= + +# Remote state configuration +# Enter yes if remote state needs to be configured, else tfstate will be stored on local filesystem. +use_remote_state=no + +# Specify bucket name if you want to use existing bucket else leave empty. +# If left empty, Bucket with name ${customer_name}-tfstate-bucket will be created/reused in ${region}. +remote_state_bucket_name= + +# OCI DevOps GIT configuration +# Enter yes if generated terraform_files need to be stored in OCI DevOps GIT Repo else they will be stored on local +# filesystem. Will enforce 'yes' for use_remote_state in case below is set to 'yes' +use_oci_devops_git=no + +# Specify Repo name if you want to use existing OCI Devops GIT Repository else leave empty Format: +# If left empty, DevOps items with names ${customer_name}-automation-toolkit-project/repo/topic will be created/reused +# in ${region}. +oci_devops_git_repo_name= + +# User Details to perform GIT operations in OCI Devops GIT Repo; Mandatory when using $(auth_mechanism) as instance_principal +# or session_token +# Format: /@ eg oracleidentitycloudservice/devopsuser@oracle.com@ocitenant +# When left empty, it will be fetched from $(user_ocid) for $(auth_mechanism) as api_key. +# Customer Secret Key will also be configured for this user for S3 credentials of the bucket when $(auth_mechanism) is +# instance_principal or session_token +oci_devops_git_user= +# When left empty, same key file from $(key_path) used for $(auth_mechanism) as api_key will be copied to +# /cd3user/tenancies// and used for GIT Operations. +# Make sure the api key file permissions are rw(600) for cd3user +oci_devops_git_key= + diff --git a/cd3_automation_toolkit/user-scripts/terraform/backend.tf b/cd3_automation_toolkit/user-scripts/terraform/backend.tf new file mode 100644 index 000000000..16bc5577a --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/backend.tf @@ -0,0 +1,21 @@ +/*This line will be removed when using remote state +# !!! WARNING !!! Terraform State Lock is not supported with OCI Object Storage. +# Pre-Requisite: Create a version enabled object storage bucket to store the state file. +# End Point Format: https://.compat.objectstorage..oraclecloud.com +# Please look at the below doc for information about shared_credentials_file and other parameters: +# Reference: https://docs.oracle.com/en-us/iaas/Content/API/SDKDocs/terraformUsingObjectStore.htm + +terraform { + backend "s3" { + key = "" + bucket = "" + region = "" + endpoint = "" + shared_credentials_file = "~/.aws/credentials" + skip_region_validation = true + skip_credentials_validation = true + skip_metadata_api_check = true + force_path_style = true + } +} +This line will be removed when using remote state*/ \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/instance.tf b/cd3_automation_toolkit/user-scripts/terraform/instance.tf index 1b8a71b0f..26ed1f4e3 100755 --- a/cd3_automation_toolkit/user-scripts/terraform/instance.tf +++ b/cd3_automation_toolkit/user-scripts/terraform/instance.tf @@ -1,80 +1,80 @@ -// Copyright (c) 2021, 2022, Oracle and/or its affiliates. - -############################# -## Module Block - Instances -## Create Instance -############################# - -data "oci_core_subnets" "oci_subnets" { - # depends_on = [module.subnets] # Uncomment to create Network and Instances together - for_each = var.instances != null ? var.instances : {} - compartment_id = each.value.network_compartment_id != null ? (length(regexall("ocid1.compartment.oc1*", each.value.network_compartment_id)) > 0 ? each.value.network_compartment_id : var.compartment_ocids[each.value.network_compartment_id]) : var.compartment_ocids[each.value.network_compartment_id] - display_name = each.value.subnet_id - vcn_id = data.oci_core_vcns.oci_vcns[each.key].virtual_networks.*.id[0] -} - -data "oci_core_vcns" "oci_vcns" { - # depends_on = [module.vcns] # Uncomment to create Network and Instances together - for_each = var.instances != null ? var.instances : {} - compartment_id = each.value.network_compartment_id != null ? (length(regexall("ocid1.compartment.oc1*", each.value.network_compartment_id)) > 0 ? each.value.network_compartment_id : var.compartment_ocids[each.value.network_compartment_id]) : var.compartment_ocids[each.value.network_compartment_id] - display_name = each.value.vcn_name -} - -module "instances" { - source = "./modules/compute/instance" - for_each = var.instances != null ? var.instances : {} - availability_domain = each.value.availability_domain != "" && each.value.availability_domain != null ? data.oci_identity_availability_domains.availability_domains.availability_domains[each.value.availability_domain].name : "" - compartment_id = each.value.compartment_id != null ? (length(regexall("ocid1.compartment.oc1*", each.value.compartment_id)) > 0 ? each.value.compartment_id : var.compartment_ocids[each.value.compartment_id]) : null - network_compartment_id = each.value.network_compartment_id != null ? (length(regexall("ocid1.compartment.oc1*", each.value.network_compartment_id)) > 0 ? each.value.network_compartment_id : var.compartment_ocids[each.value.network_compartment_id]) : null - vcn_names = [each.value.vcn_name] - dedicated_vm_host_name = each.value.dedicated_vm_host_id != null ? each.value.dedicated_vm_host_id : null - shape = each.value.shape - ocpu_count = each.value.ocpus - private_ip = each.value.private_ip != null ? each.value.private_ip : null - defined_tags = each.value.defined_tags - display_name = each.value.display_name - fault_domain = each.value.fault_domain - freeform_tags = each.value.freeform_tags - source_type = each.value.source_type - source_image_id = length(regexall("ocid1.image.oc1*", each.value.source_id)) > 0 || length(regexall("ocid1.bootvolume.oc1*", each.value.source_id)) > 0 ? each.value.source_id : lookup(var.instance_source_ocids, each.value.source_id, null) - subnet_id = each.value.subnet_id != "" ? (length(regexall("ocid1.subnet.oc1*", each.value.subnet_id)) > 0 ? each.value.subnet_id : data.oci_core_subnets.oci_subnets[each.key].subnets.*.id[0]) : null - assign_public_ip = each.value.assign_public_ip - ssh_public_keys = each.value.ssh_authorized_keys != null ? (length(regexall("ssh-rsa*", each.value.ssh_authorized_keys)) > 0 ? each.value.ssh_authorized_keys : lookup(var.instance_ssh_keys, each.value.ssh_authorized_keys, null)) : null - hostname_label = each.value.hostname_label - nsg_ids = each.value.nsg_ids - #nsg_ids = each.value.nsg_ids != [] ? [for nsg in each.value.nsg_ids : length(regexall("ocid1.networksecuritygroup.oc1*",nsg)) > 0 ? nsg : merge(module.nsgs.*...)[nsg]["nsg_tf_id"]] : [] - boot_volume_size_in_gbs = each.value.boot_volume_size_in_gbs != null ? each.value.boot_volume_size_in_gbs : null - memory_in_gbs = each.value.memory_in_gbs != null ? each.value.memory_in_gbs : null - capacity_reservation_id = each.value.capacity_reservation_id != null ? lookup(var.capacity_reservation_ocids, each.value.capacity_reservation_id, null) : null - create_is_pv_encryption_in_transit_enabled = each.value.create_is_pv_encryption_in_transit_enabled - - boot_tf_policy = each.value.backup_policy != null ? each.value.backup_policy : null - policy_tf_compartment_id = each.value.policy_compartment_id != null ? (length(regexall("ocid1.compartment.oc1*", each.value.policy_compartment_id)) > 0 ? each.value.policy_compartment_id : var.compartment_ocids[each.value.policy_compartment_id]) : null - remote_execute = each.value.remote_execute != null ? each.value.remote_execute : null - bastion_ip = each.value.bastion_ip != null ? each.value.bastion_ip : null - cloud_init_script = each.value.cloud_init_script != null ? each.value.cloud_init_script : null - launch_options = each.value.launch_options - plugins_details = each.value.plugins_details - platform_config = each.value.platform_config != null ? each.value.platform_config : null - is_live_migration_preferred = each.value.is_live_migration_preferred - - # extended_metadata = each.value.extended_metadata - skip_source_dest_check = each.value.skip_source_dest_check != null ? each.value.skip_source_dest_check : null - baseline_ocpu_utilization = each.value.baseline_ocpu_utilization - # preemptible_instance_config = each.value.preemptible_instance_config - all_plugins_disabled = each.value.all_plugins_disabled - is_management_disabled = each.value.is_management_disabled - is_monitoring_disabled = each.value.is_monitoring_disabled - recovery_action = each.value.recovery_action - are_legacy_imds_endpoints_disabled = each.value.are_legacy_imds_endpoints_disabled - ipxe_script = each.value.ipxe_script - preserve_boot_volume = each.value.preserve_boot_volume - assign_private_dns_record = each.value.assign_private_dns_record - vlan_id = each.value.vlan_id - kms_key_id = each.value.kms_key_id - - # VNIC Details - vnic_defined_tags = each.value.vnic_defined_tags - vnic_freeform_tags = each.value.vnic_freeform_tags - vnic_display_name = each.value.vnic_display_name -} +// Copyright (c) 2021, 2022, Oracle and/or its affiliates. + +############################# +## Module Block - Instances +## Create Instance +############################# + +data "oci_core_subnets" "oci_subnets" { + # depends_on = [module.subnets] # Uncomment to create Network and Instances together + for_each = var.instances != null ? var.instances : {} + compartment_id = each.value.network_compartment_id != null ? (length(regexall("ocid1.compartment.oc1*", each.value.network_compartment_id)) > 0 ? each.value.network_compartment_id : var.compartment_ocids[each.value.network_compartment_id]) : var.compartment_ocids[each.value.network_compartment_id] + display_name = each.value.subnet_id + vcn_id = data.oci_core_vcns.oci_vcns[each.key].virtual_networks.*.id[0] +} + +data "oci_core_vcns" "oci_vcns" { + # depends_on = [module.vcns] # Uncomment to create Network and Instances together + for_each = var.instances != null ? var.instances : {} + compartment_id = each.value.network_compartment_id != null ? (length(regexall("ocid1.compartment.oc1*", each.value.network_compartment_id)) > 0 ? each.value.network_compartment_id : var.compartment_ocids[each.value.network_compartment_id]) : var.compartment_ocids[each.value.network_compartment_id] + display_name = each.value.vcn_name +} + +module "instances" { + source = "./modules/compute/instance" + for_each = var.instances != null ? var.instances : {} + availability_domain = each.value.availability_domain != "" && each.value.availability_domain != null ? data.oci_identity_availability_domains.availability_domains.availability_domains[each.value.availability_domain].name : "" + compartment_id = each.value.compartment_id != null ? (length(regexall("ocid1.compartment.oc1*", each.value.compartment_id)) > 0 ? each.value.compartment_id : var.compartment_ocids[each.value.compartment_id]) : null + network_compartment_id = each.value.network_compartment_id != null ? (length(regexall("ocid1.compartment.oc1*", each.value.network_compartment_id)) > 0 ? each.value.network_compartment_id : var.compartment_ocids[each.value.network_compartment_id]) : null + vcn_names = [each.value.vcn_name] + dedicated_vm_host_name = each.value.dedicated_vm_host_id != null ? each.value.dedicated_vm_host_id : null + shape = each.value.shape + ocpu_count = each.value.ocpus + private_ip = each.value.private_ip != null ? each.value.private_ip : null + defined_tags = each.value.defined_tags + display_name = each.value.display_name + fault_domain = each.value.fault_domain + freeform_tags = each.value.freeform_tags + source_type = each.value.source_type + source_image_id = length(regexall("ocid1.image.oc1*", each.value.source_id)) > 0 || length(regexall("ocid1.bootvolume.oc1*", each.value.source_id)) > 0 ? each.value.source_id : lookup(var.instance_source_ocids, each.value.source_id, null) + subnet_id = each.value.subnet_id != "" ? (length(regexall("ocid1.subnet.oc1*", each.value.subnet_id)) > 0 ? each.value.subnet_id : data.oci_core_subnets.oci_subnets[each.key].subnets.*.id[0]) : null + assign_public_ip = each.value.assign_public_ip + ssh_public_keys = each.value.ssh_authorized_keys != null ? (length(regexall("ssh-rsa*", each.value.ssh_authorized_keys)) > 0 ? each.value.ssh_authorized_keys : lookup(var.instance_ssh_keys, each.value.ssh_authorized_keys, null)) : null + hostname_label = each.value.hostname_label + nsg_ids = each.value.nsg_ids + #nsg_ids = each.value.nsg_ids != [] ? [for nsg in each.value.nsg_ids : length(regexall("ocid1.networksecuritygroup.oc1*",nsg)) > 0 ? nsg : merge(module.nsgs.*...)[nsg]["nsg_tf_id"]] : [] + boot_volume_size_in_gbs = each.value.boot_volume_size_in_gbs != null ? each.value.boot_volume_size_in_gbs : null + memory_in_gbs = each.value.memory_in_gbs != null ? each.value.memory_in_gbs : null + capacity_reservation_id = each.value.capacity_reservation_id != null ? lookup(var.capacity_reservation_ocids, each.value.capacity_reservation_id, null) : null + create_is_pv_encryption_in_transit_enabled = each.value.create_is_pv_encryption_in_transit_enabled + + boot_tf_policy = each.value.backup_policy != null ? each.value.backup_policy : null + policy_tf_compartment_id = each.value.policy_compartment_id != null ? (length(regexall("ocid1.compartment.oc1*", each.value.policy_compartment_id)) > 0 ? each.value.policy_compartment_id : var.compartment_ocids[each.value.policy_compartment_id]) : null + remote_execute = each.value.remote_execute != null ? each.value.remote_execute : null + bastion_ip = each.value.bastion_ip != null ? each.value.bastion_ip : null + cloud_init_script = each.value.cloud_init_script != null ? each.value.cloud_init_script : null + launch_options = each.value.launch_options + plugins_details = each.value.plugins_details + platform_config = each.value.platform_config != null ? each.value.platform_config : null + is_live_migration_preferred = each.value.is_live_migration_preferred + + # extended_metadata = each.value.extended_metadata + skip_source_dest_check = each.value.skip_source_dest_check != null ? each.value.skip_source_dest_check : null + baseline_ocpu_utilization = each.value.baseline_ocpu_utilization + # preemptible_instance_config = each.value.preemptible_instance_config + all_plugins_disabled = each.value.all_plugins_disabled + is_management_disabled = each.value.is_management_disabled + is_monitoring_disabled = each.value.is_monitoring_disabled + recovery_action = each.value.recovery_action + are_legacy_imds_endpoints_disabled = each.value.are_legacy_imds_endpoints_disabled + ipxe_script = each.value.ipxe_script + preserve_boot_volume = each.value.preserve_boot_volume + assign_private_dns_record = each.value.assign_private_dns_record + vlan_id = each.value.vlan_id + kms_key_id = each.value.kms_key_id + + # VNIC Details + vnic_defined_tags = each.value.vnic_defined_tags + vnic_freeform_tags = each.value.vnic_freeform_tags + vnic_display_name = each.value.vnic_display_name +} diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/compute/dedicated-vm-host/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/compute/dedicated-vm-host/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/compute/dedicated-vm-host/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/compute/instance/data.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/compute/instance/data.tf index 0979b068a..9c500ff42 100755 --- a/cd3_automation_toolkit/user-scripts/terraform/modules/compute/instance/data.tf +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/compute/instance/data.tf @@ -1,159 +1,159 @@ -// Copyright (c) 2021, 2022, Oracle and/or its affiliates. - -############################# -## Data Block - Instance -## Create Instance and Boot Volume Backup Policy -############################# - -locals { - nsg_ids = var.nsg_ids != null ? flatten(tolist([for nsg in var.nsg_ids : (length(regexall("ocid1.networksecuritygroup.oc1*", nsg)) > 0 ? [nsg] : data.oci_core_network_security_groups.network_security_groups[nsg].network_security_groups[*].id)])) : null - - ADs = [ - for ad in data.oci_identity_availability_domains.ads.availability_domains : ad.name - ] - - shapes_config = { - for shape in data.oci_core_shapes.present_ad.shapes : shape.name => { - memory_in_gbs = shape.memory_in_gbs - ocpus = shape.ocpus - } - } - - platform_configs = { - for shape in data.oci_core_shapes.present_ad.shapes : shape.name => { - config_type = length(shape.platform_config_options) > 0 ? element(flatten(shape.platform_config_options[*].type),0) : "" - } if shape.name == var.shape - } - - plugins_config = var.plugins_details != null ? var.plugins_details : {} - remote_execute_script = var.remote_execute == null ? "SCRIPT-NOT-SET" : var.remote_execute - cloud_init_script = var.cloud_init_script == null ? "SCRIPT-NOT-SET" : var.cloud_init_script -} - -data "oci_identity_availability_domains" "ads" { - compartment_id = var.compartment_id -} - -data "oci_core_shapes" "present_ad" { - compartment_id = var.compartment_id - availability_domain = var.availability_domain == "" || var.availability_domain == null ? element(local.ADs, 0) : var.availability_domain -} - -data "oci_core_vcns" "oci_vcns_instances" { - for_each = { for vcn in var.vcn_names : vcn => vcn } - compartment_id = var.network_compartment_id != null ? var.network_compartment_id : var.compartment_id - display_name = each.value -} -// -//data "oci_core_subnets" "oci_subnets_instances" { -// compartment_id = var.network_compartment_id != null ? var.network_compartment_id : var.compartment_id -// display_name = var.subnet_id -// vcn_id = data.oci_core_vcns.oci_vcns_instances[var.vcn_names[0]].virtual_networks.*.id[0] -//} - -data "oci_core_dedicated_vm_hosts" "existing_vm_host" { - count = var.dedicated_vm_host_name != null ? 1 : 0 - compartment_id = var.compartment_id - display_name = var.dedicated_vm_host_name - state = "ACTIVE" -} - -data "oci_core_network_security_groups" "network_security_groups" { - for_each = var.nsg_ids != null ? { for nsg in var.nsg_ids : nsg => nsg } : {} - compartment_id = var.network_compartment_id != null ? var.network_compartment_id : var.compartment_id - display_name = each.value - vcn_id = data.oci_core_vcns.oci_vcns_instances[var.vcn_names[0]].virtual_networks.*.id[0] -} - -#data "oci_core_boot_volumes" "all_boot_volumes" { -# depends_on = [oci_core_instance.instance] -# count = var.boot_tf_policy != null ? 1 : 0 -# #Required -# compartment_id = var.compartment_id -# availability_domain = var.availability_domain -# filter { -# name = "display_name" -# values = [join(" ", [var.display_name, "(Boot Volume)"])] -# } -# filter { -# name = "state" -# values = ["AVAILABLE"] -# } -#} - -data "oci_core_volume_backup_policies" "boot_vol_backup_policy" { - count = var.boot_tf_policy != null ? 1 : 0 - - filter { - name = "display_name" - values = [lower(var.boot_tf_policy)] - } -} - -data "oci_core_volume_backup_policies" "boot_vol_custom_policy" { - count = var.boot_tf_policy != null ? 1 : 0 - compartment_id = local.policy_tf_compartment_id - filter { - name = "display_name" - values = [var.boot_tf_policy] - } -} - -################################ -# Data Block - Instances -# Market Place Images -################################ - -data "oci_marketplace_listing_package_agreements" "listing_package_agreements" { - count = length(regexall("ocid1.image.oc1*", var.source_image_id)) > 0 || length(regexall("ocid1.bootvolume.oc1*", var.source_image_id)) > 0 || var.source_image_id == null ? 0 : 1 - #Required - listing_id = data.oci_marketplace_listing.listing.0.id - package_version = data.oci_marketplace_listing.listing.0.default_package_version - - #Optional - compartment_id = var.compartment_id -} - -data "oci_marketplace_listing_package" "listing_package" { - count = length(regexall("ocid1.image.oc1*", var.source_image_id)) > 0 || length(regexall("ocid1.bootvolume.oc1*", var.source_image_id)) > 0 || var.source_image_id == null ? 0 : 1 - #Required - listing_id = data.oci_marketplace_listing.listing.0.id - package_version = data.oci_marketplace_listing.listing.0.default_package_version - - #Optional - compartment_id = var.compartment_id -} - -data "oci_marketplace_listing_packages" "listing_packages" { - count = length(regexall("ocid1.image.oc1*", var.source_image_id)) > 0 || length(regexall("ocid1.bootvolume.oc1*", var.source_image_id)) > 0 || var.source_image_id == null ? 0 : 1 - #Required - listing_id = data.oci_marketplace_listing.listing.0.id - - #Optional - compartment_id = var.compartment_id -} - -data "oci_marketplace_listings" "listings" { - count = length(regexall("ocid1.image.oc1*", var.source_image_id)) > 0 || length(regexall("ocid1.bootvolume.oc1*", var.source_image_id)) > 0 || var.source_image_id == null ? 0 : 1 - name = [var.source_image_id] - #is_featured = true # Comment this line for GovCloud - compartment_id = var.compartment_id -} - -data "oci_marketplace_listing" "listing" { - count = length(regexall("ocid1.image.oc1*", var.source_image_id)) > 0 || length(regexall("ocid1.bootvolume.oc1*", var.source_image_id)) > 0 || var.source_image_id == null ? 0 : 1 - listing_id = data.oci_marketplace_listings.listings.0.listings[0].id - compartment_id = var.compartment_id -} - -data "oci_core_app_catalog_listing_resource_versions" "app_catalog_listing_resource_versions" { - count = length(regexall("ocid1.image.oc1*", var.source_image_id)) > 0 || length(regexall("ocid1.bootvolume.oc1*", var.source_image_id)) > 0 || var.source_image_id == null ? 0 : 1 - listing_id = data.oci_marketplace_listing_package.listing_package.0.app_catalog_listing_id -} - -data "oci_core_app_catalog_listing_resource_version" "catalog_listing" { - count = length(regexall("ocid1.image.oc1*", var.source_image_id)) > 0 || length(regexall("ocid1.bootvolume.oc1*", var.source_image_id)) > 0 || var.source_image_id == null ? 0 : 1 - listing_id = data.oci_marketplace_listing_package.listing_package.0.app_catalog_listing_id - resource_version = data.oci_marketplace_listing_package.listing_package.0.app_catalog_listing_resource_version -} - +// Copyright (c) 2021, 2022, Oracle and/or its affiliates. + +############################# +## Data Block - Instance +## Create Instance and Boot Volume Backup Policy +############################# + +locals { + nsg_ids = var.nsg_ids != null ? flatten(tolist([for nsg in var.nsg_ids : (length(regexall("ocid1.networksecuritygroup.oc1*", nsg)) > 0 ? [nsg] : data.oci_core_network_security_groups.network_security_groups[nsg].network_security_groups[*].id)])) : null + + ADs = [ + for ad in data.oci_identity_availability_domains.ads.availability_domains : ad.name + ] + + shapes_config = { + for shape in data.oci_core_shapes.present_ad.shapes : shape.name => { + memory_in_gbs = shape.memory_in_gbs + ocpus = shape.ocpus + } + } + + platform_configs = { + for shape in data.oci_core_shapes.present_ad.shapes : shape.name => { + config_type = length(shape.platform_config_options) > 0 ? element(flatten(shape.platform_config_options[*].type),0) : "" + } if shape.name == var.shape + } + + plugins_config = var.plugins_details != null ? var.plugins_details : {} + remote_execute_script = var.remote_execute == null ? "SCRIPT-NOT-SET" : var.remote_execute + cloud_init_script = var.cloud_init_script == null ? "SCRIPT-NOT-SET" : var.cloud_init_script +} + +data "oci_identity_availability_domains" "ads" { + compartment_id = var.compartment_id +} + +data "oci_core_shapes" "present_ad" { + compartment_id = var.compartment_id + availability_domain = var.availability_domain == "" || var.availability_domain == null ? element(local.ADs, 0) : var.availability_domain +} + +data "oci_core_vcns" "oci_vcns_instances" { + for_each = { for vcn in var.vcn_names : vcn => vcn } + compartment_id = var.network_compartment_id != null ? var.network_compartment_id : var.compartment_id + display_name = each.value +} +// +//data "oci_core_subnets" "oci_subnets_instances" { +// compartment_id = var.network_compartment_id != null ? var.network_compartment_id : var.compartment_id +// display_name = var.subnet_id +// vcn_id = data.oci_core_vcns.oci_vcns_instances[var.vcn_names[0]].virtual_networks.*.id[0] +//} + +data "oci_core_dedicated_vm_hosts" "existing_vm_host" { + count = var.dedicated_vm_host_name != null ? 1 : 0 + compartment_id = var.compartment_id + display_name = var.dedicated_vm_host_name + state = "ACTIVE" +} + +data "oci_core_network_security_groups" "network_security_groups" { + for_each = var.nsg_ids != null ? { for nsg in var.nsg_ids : nsg => nsg } : {} + compartment_id = var.network_compartment_id != null ? var.network_compartment_id : var.compartment_id + display_name = each.value + vcn_id = data.oci_core_vcns.oci_vcns_instances[var.vcn_names[0]].virtual_networks.*.id[0] +} + +#data "oci_core_boot_volumes" "all_boot_volumes" { +# depends_on = [oci_core_instance.instance] +# count = var.boot_tf_policy != null ? 1 : 0 +# #Required +# compartment_id = var.compartment_id +# availability_domain = var.availability_domain +# filter { +# name = "display_name" +# values = [join(" ", [var.display_name, "(Boot Volume)"])] +# } +# filter { +# name = "state" +# values = ["AVAILABLE"] +# } +#} + +data "oci_core_volume_backup_policies" "boot_vol_backup_policy" { + count = var.boot_tf_policy != null ? 1 : 0 + + filter { + name = "display_name" + values = [lower(var.boot_tf_policy)] + } +} + +data "oci_core_volume_backup_policies" "boot_vol_custom_policy" { + count = var.boot_tf_policy != null ? 1 : 0 + compartment_id = local.policy_tf_compartment_id + filter { + name = "display_name" + values = [var.boot_tf_policy] + } +} + +################################ +# Data Block - Instances +# Market Place Images +################################ + +data "oci_marketplace_listing_package_agreements" "listing_package_agreements" { + count = length(regexall("ocid1.image.oc1*", var.source_image_id)) > 0 || length(regexall("ocid1.bootvolume.oc1*", var.source_image_id)) > 0 || var.source_image_id == null ? 0 : 1 + #Required + listing_id = data.oci_marketplace_listing.listing.0.id + package_version = data.oci_marketplace_listing.listing.0.default_package_version + + #Optional + compartment_id = var.compartment_id +} + +data "oci_marketplace_listing_package" "listing_package" { + count = length(regexall("ocid1.image.oc1*", var.source_image_id)) > 0 || length(regexall("ocid1.bootvolume.oc1*", var.source_image_id)) > 0 || var.source_image_id == null ? 0 : 1 + #Required + listing_id = data.oci_marketplace_listing.listing.0.id + package_version = data.oci_marketplace_listing.listing.0.default_package_version + + #Optional + compartment_id = var.compartment_id +} + +data "oci_marketplace_listing_packages" "listing_packages" { + count = length(regexall("ocid1.image.oc1*", var.source_image_id)) > 0 || length(regexall("ocid1.bootvolume.oc1*", var.source_image_id)) > 0 || var.source_image_id == null ? 0 : 1 + #Required + listing_id = data.oci_marketplace_listing.listing.0.id + + #Optional + compartment_id = var.compartment_id +} + +data "oci_marketplace_listings" "listings" { + count = length(regexall("ocid1.image.oc1*", var.source_image_id)) > 0 || length(regexall("ocid1.bootvolume.oc1*", var.source_image_id)) > 0 || var.source_image_id == null ? 0 : 1 + name = [var.source_image_id] + #is_featured = true # Comment this line for GovCloud + compartment_id = var.compartment_id +} + +data "oci_marketplace_listing" "listing" { + count = length(regexall("ocid1.image.oc1*", var.source_image_id)) > 0 || length(regexall("ocid1.bootvolume.oc1*", var.source_image_id)) > 0 || var.source_image_id == null ? 0 : 1 + listing_id = data.oci_marketplace_listings.listings.0.listings[0].id + compartment_id = var.compartment_id +} + +data "oci_core_app_catalog_listing_resource_versions" "app_catalog_listing_resource_versions" { + count = length(regexall("ocid1.image.oc1*", var.source_image_id)) > 0 || length(regexall("ocid1.bootvolume.oc1*", var.source_image_id)) > 0 || var.source_image_id == null ? 0 : 1 + listing_id = data.oci_marketplace_listing_package.listing_package.0.app_catalog_listing_id +} + +data "oci_core_app_catalog_listing_resource_version" "catalog_listing" { + count = length(regexall("ocid1.image.oc1*", var.source_image_id)) > 0 || length(regexall("ocid1.bootvolume.oc1*", var.source_image_id)) > 0 || var.source_image_id == null ? 0 : 1 + listing_id = data.oci_marketplace_listing_package.listing_package.0.app_catalog_listing_id + resource_version = data.oci_marketplace_listing_package.listing_package.0.app_catalog_listing_resource_version +} + diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/compute/instance/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/compute/instance/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/compute/instance/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/database/adb/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/database/adb/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/database/adb/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/database/dbsystem-vm-bm/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/database/dbsystem-vm-bm/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/database/dbsystem-vm-bm/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/database/exa-infra/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/database/exa-infra/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/database/exa-infra/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/database/exa-vmcluster/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/database/exa-vmcluster/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/database/exa-vmcluster/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/governance/billing/budget-alert-rule/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/governance/billing/budget-alert-rule/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/governance/billing/budget-alert-rule/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/governance/billing/budget/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/governance/billing/budget/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/governance/billing/budget/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/governance/tagging/tag-default/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/governance/tagging/tag-default/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/governance/tagging/tag-default/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/governance/tagging/tag-key/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/governance/tagging/tag-key/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/governance/tagging/tag-key/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/governance/tagging/tag-namespace/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/governance/tagging/tag-namespace/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/governance/tagging/tag-namespace/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/identity/iam-compartment/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/identity/iam-compartment/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/identity/iam-compartment/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/identity/iam-group/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/identity/iam-group/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/identity/iam-group/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/identity/iam-network-sources/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/identity/iam-network-sources/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/identity/iam-network-sources/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/identity/iam-policy/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/identity/iam-policy/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/identity/iam-policy/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/identity/iam-user/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/identity/iam-user/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/identity/iam-user/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/ip/public-ip-pool/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/ip/public-ip-pool/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/ip/public-ip-pool/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/ip/reserved-public-ip/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/ip/reserved-public-ip/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/ip/reserved-public-ip/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/ip/secondary-private-ip/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/ip/secondary-private-ip/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/ip/secondary-private-ip/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/loadbalancer/lb-backend-set/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/loadbalancer/lb-backend-set/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/loadbalancer/lb-backend-set/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/loadbalancer/lb-backend/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/loadbalancer/lb-backend/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/loadbalancer/lb-backend/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/loadbalancer/lb-certificate/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/loadbalancer/lb-certificate/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/loadbalancer/lb-certificate/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/loadbalancer/lb-cipher-suite/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/loadbalancer/lb-cipher-suite/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/loadbalancer/lb-cipher-suite/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/loadbalancer/lb-hostname/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/loadbalancer/lb-hostname/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/loadbalancer/lb-hostname/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/loadbalancer/lb-listener/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/loadbalancer/lb-listener/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/loadbalancer/lb-listener/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/loadbalancer/lb-load-balancer/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/loadbalancer/lb-load-balancer/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/loadbalancer/lb-load-balancer/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/loadbalancer/lb-path-route-set/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/loadbalancer/lb-path-route-set/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/loadbalancer/lb-path-route-set/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/loadbalancer/lb-rule-set/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/loadbalancer/lb-rule-set/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/loadbalancer/lb-rule-set/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/managementservices/alarm/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/managementservices/alarm/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/managementservices/alarm/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/managementservices/event/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/managementservices/event/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/managementservices/event/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/managementservices/log-group/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/managementservices/log-group/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/managementservices/log-group/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/managementservices/log/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/managementservices/log/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/managementservices/log/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/managementservices/notification-subscription/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/managementservices/notification-subscription/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/managementservices/notification-subscription/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/managementservices/notification-topic/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/managementservices/notification-topic/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/managementservices/notification-topic/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/managementservices/service-connector/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/managementservices/service-connector/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/managementservices/service-connector/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/network/custom-dhcp/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/network/custom-dhcp/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/network/custom-dhcp/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/network/default-dhcp/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/network/default-dhcp/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/network/default-dhcp/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/network/default-route-table/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/network/default-route-table/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/network/default-route-table/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/network/default-sec-list/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/network/default-sec-list/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/network/default-sec-list/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/network/dns/dns_resolver/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/network/dns/dns_resolver/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/network/dns/dns_resolver/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/network/dns/rrset/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/network/dns/rrset/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/network/dns/rrset/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/network/dns/view/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/network/dns/view/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/network/dns/view/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/network/dns/zone/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/network/dns/zone/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/network/dns/zone/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/network/drg-attachment/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/network/drg-attachment/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/network/drg-attachment/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/network/drg-route-distribution-statement/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/network/drg-route-distribution-statement/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/network/drg-route-distribution-statement/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/network/drg-route-distribution/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/network/drg-route-distribution/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/network/drg-route-distribution/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/network/drg-route-rule/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/network/drg-route-rule/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/network/drg-route-rule/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/network/drg-route-table/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/network/drg-route-table/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/network/drg-route-table/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/network/drg/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/network/drg/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/network/drg/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/network/igw/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/network/igw/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/network/igw/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/network/lpg/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/network/lpg/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/network/lpg/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/network/ngw/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/network/ngw/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/network/ngw/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/network/nsg-rule/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/network/nsg-rule/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/network/nsg-rule/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/network/nsg/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/network/nsg/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/network/nsg/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/network/route-table/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/network/route-table/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/network/route-table/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/network/sec-list/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/network/sec-list/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/network/sec-list/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/network/sgw/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/network/sgw/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/network/sgw/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/network/subnet/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/network/subnet/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/network/subnet/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/network/vcn/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/network/vcn/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/network/vcn/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/network/vlan/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/network/vlan/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/network/vlan/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/networkloadbalancer/nlb-backend/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/networkloadbalancer/nlb-backend/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/networkloadbalancer/nlb-backend/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/networkloadbalancer/nlb-backendset/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/networkloadbalancer/nlb-backendset/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/networkloadbalancer/nlb-backendset/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/networkloadbalancer/nlb-listener/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/networkloadbalancer/nlb-listener/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/networkloadbalancer/nlb-listener/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/networkloadbalancer/nlb/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/networkloadbalancer/nlb/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/networkloadbalancer/nlb/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/oke/cluster/main.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/oke/cluster/main.tf index 3a8931bec..a84804028 100644 --- a/cd3_automation_toolkit/user-scripts/terraform/modules/oke/cluster/main.tf +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/oke/cluster/main.tf @@ -9,6 +9,7 @@ resource "oci_containerengine_cluster" "cluster" { vcn_id = data.oci_core_vcns.oci_vcns_clusters[var.vcn_names[0]].virtual_networks.*.id[0] defined_tags = var.defined_tags freeform_tags = var.freeform_tags + kms_key_id = var.kms_key_id cluster_pod_network_options { #Required diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/oke/cluster/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/oke/cluster/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/oke/cluster/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/oke/cluster/variables.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/oke/cluster/variables.tf index af0259fa2..a74a23830 100644 --- a/cd3_automation_toolkit/user-scripts/terraform/modules/oke/cluster/variables.tf +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/oke/cluster/variables.tf @@ -88,6 +88,11 @@ variable "service_lb_subnet_ids" { default = [] } +variable "kms_key_id" { + type = string + default = null +} + variable "defined_tags" { type = map(any) default = { "Oracle-Tags.CreatedOn" = "$${oci.datetime}", diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/oke/nodepool/main.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/oke/nodepool/main.tf index f93d2ecba..49a9b346f 100644 --- a/cd3_automation_toolkit/user-scripts/terraform/modules/oke/nodepool/main.tf +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/oke/nodepool/main.tf @@ -37,6 +37,7 @@ resource "oci_containerengine_node_pool" "nodepool" { size = var.size nsg_ids = var.worker_nsg_ids != null ? (local.nodepool_nsg_ids == [] ? ["INVALID WORKER NSG Name"] : local.nodepool_nsg_ids) : null is_pv_encryption_in_transit_enabled = var.is_pv_encryption_in_transit_enabled + kms_key_id = var.kms_key_id defined_tags = var.node_defined_tags } diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/oke/nodepool/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/oke/nodepool/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/oke/nodepool/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/oke/nodepool/variables.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/oke/nodepool/variables.tf index 619dcf311..dd5c70909 100644 --- a/cd3_automation_toolkit/user-scripts/terraform/modules/oke/nodepool/variables.tf +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/oke/nodepool/variables.tf @@ -143,6 +143,11 @@ variable "is_pv_encryption_in_transit_enabled" { description = "Whether in-transit encryptions is enabled for data in persistent volume" } +variable "kms_key_id" { + type = string + default = null +} + variable "node_defined_tags" { type = map(any) default = { "Oracle-Tags.CreatedOn" = "$${oci.datetime}", diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/sddc/main.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/sddc/main.tf index 587eecb20..007378f8b 100644 --- a/cd3_automation_toolkit/user-scripts/terraform/modules/sddc/main.tf +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/sddc/main.tf @@ -6,51 +6,61 @@ ############################ resource "oci_ocvp_sddc" "sddc" { compartment_id = var.compartment_id - compute_availability_domain = var.compute_availability_domain - esxi_hosts_count = var.esxi_hosts_count - nsx_edge_uplink1vlan_id = var.nsx_edge_uplink1vlan_id - nsx_edge_uplink2vlan_id = var.nsx_edge_uplink2vlan_id - nsx_edge_vtep_vlan_id = var.nsx_edge_vtep_vlan_id - nsx_vtep_vlan_id = var.nsx_vtep_vlan_id - provisioning_subnet_id = var.provisioning_subnet_id - ssh_authorized_keys = var.ssh_authorized_keys - vmotion_vlan_id = var.vmotion_vlan_id vmware_software_version = var.vmware_software_version - vsan_vlan_id = var.vsan_vlan_id - vsphere_vlan_id = var.vsphere_vlan_id + ssh_authorized_keys = var.ssh_authorized_keys + + + initial_configuration { + initial_cluster_configurations { + initial_commitment = var.initial_sku + compute_availability_domain = var.compute_availability_domain + esxi_hosts_count = var.esxi_hosts_count + vsphere_type = "MANAGEMENT" + initial_host_ocpu_count = var.initial_host_ocpu_count + initial_host_shape_name = var.initial_host_shape_name + instance_display_name_prefix = var.instance_display_name_prefix + is_shielded_instance_enabled = var.is_shielded_instance_enabled + capacity_reservation_id = var.capacity_reservation_id + workload_network_cidr = var.workload_network_cidr + + network_configuration { + nsx_edge_uplink1vlan_id = var.nsx_edge_uplink1vlan_id + nsx_edge_uplink2vlan_id = var.nsx_edge_uplink2vlan_id + nsx_edge_vtep_vlan_id = var.nsx_edge_vtep_vlan_id + nsx_vtep_vlan_id = var.nsx_vtep_vlan_id + provisioning_subnet_id = var.provisioning_subnet_id + vmotion_vlan_id = var.vmotion_vlan_id + vsan_vlan_id = var.vsan_vlan_id + vsphere_vlan_id = var.vsphere_vlan_id + provisioning_vlan_id = var.provisioning_vlan_id + replication_vlan_id = var.replication_vlan_id + hcx_vlan_id = var.hcx_vlan_id + } + + + dynamic "datastores" { + for_each = length(var.management_datastore) != 0 ? [1] : [] + content { + datastore_type = "MANAGEMENT" + block_volume_ids = var.management_datastore + } + } + dynamic "datastores" { + for_each = length(var.workload_datastore) != 0 ? [1] : [] + content { + datastore_type = "WORKLOAD" + block_volume_ids = var.workload_datastore + } + } + } + } #Optional - capacity_reservation_id = var.capacity_reservation_id defined_tags = var.defined_tags display_name = var.display_name freeform_tags = var.freeform_tags hcx_action = var.hcx_action - hcx_vlan_id = var.hcx_vlan_id - initial_host_ocpu_count = var.initial_host_ocpu_count - initial_host_shape_name = var.initial_host_shape_name - initial_sku = var.initial_sku - instance_display_name_prefix = var.instance_display_name_prefix is_hcx_enabled = var.is_hcx_enabled - is_shielded_instance_enabled = var.is_shielded_instance_enabled is_single_host_sddc = var.is_single_host_sddc - provisioning_vlan_id = var.provisioning_vlan_id - replication_vlan_id = var.replication_vlan_id - workload_network_cidr = var.workload_network_cidr - dynamic "datastores" { - for_each = length(var.management_datastore) != 0 ? [1] : [] - content { - datastore_type = "MANAGEMENT" - block_volume_ids = var.management_datastore - } - } - dynamic "datastores" { - for_each = length(var.workload_datastore) != 0 ? [1] : [] - content { - datastore_type = "WORKLOAD" - block_volume_ids = var.workload_datastore - } - } - - } diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/sddc/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/sddc/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/sddc/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/security/cloud-guard-configuration/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/security/cloud-guard-configuration/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/security/cloud-guard-configuration/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/security/cloud-guard-target/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/security/cloud-guard-target/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/security/cloud-guard-target/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/security/key/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/security/key/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/security/key/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/security/vault/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/security/vault/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/security/vault/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/storage/block-volume/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/storage/block-volume/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/storage/block-volume/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/storage/file-storage/export-option/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/storage/file-storage/export-option/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/storage/file-storage/export-option/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/storage/file-storage/fss/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/storage/file-storage/fss/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/storage/file-storage/fss/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/storage/file-storage/mount-target/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/storage/file-storage/mount-target/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/storage/file-storage/mount-target/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/storage/object-storage/main.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/storage/object-storage/main.tf index 01ccdcf73..5048dcc8e 100644 --- a/cd3_automation_toolkit/user-scripts/terraform/modules/storage/object-storage/main.tf +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/storage/object-storage/main.tf @@ -16,7 +16,7 @@ resource "oci_objectstorage_bucket" "bucket" { auto_tiering = var.auto_tiering freeform_tags = var.freeform_tags defined_tags = var.defined_tags - #kms_key_id = var.kms_key_id + kms_key_id = var.kms_key_id #metadata = var.metadata object_events_enabled = var.object_events_enabled storage_tier = var.storage_tier diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/storage/object-storage/oracle_provider_req.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/storage/object-storage/oracle_provider_req.tf new file mode 100644 index 000000000..e52742e05 --- /dev/null +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/storage/object-storage/oracle_provider_req.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + oci = { + source = "oracle/oci" + } + } +} \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/modules/storage/object-storage/variables.tf b/cd3_automation_toolkit/user-scripts/terraform/modules/storage/object-storage/variables.tf index ef738339f..4c418d2d2 100644 --- a/cd3_automation_toolkit/user-scripts/terraform/modules/storage/object-storage/variables.tf +++ b/cd3_automation_toolkit/user-scripts/terraform/modules/storage/object-storage/variables.tf @@ -45,10 +45,10 @@ variable "defined_tags" { } } -#variable "kms_key_id" { -#description = "The OCID of a master encryption key used to call the Key Management service to generate a data encryption key or to encrypt or decrypt a data #encryption key." -#type = string -#} +variable "kms_key_id" { +description = "The OCID of a master encryption key used to call the Key Management service to generate a data encryption key or to encrypt or decrypt a data #encryption key." +type = string +} #variable "metadata" { #description = "Arbitrary string, up to 4KB, of keys and values for user-defined metadata." diff --git a/cd3_automation_toolkit/user-scripts/terraform/object-storage.tf b/cd3_automation_toolkit/user-scripts/terraform/object-storage.tf index 3ce484da0..d2a794390 100644 --- a/cd3_automation_toolkit/user-scripts/terraform/object-storage.tf +++ b/cd3_automation_toolkit/user-scripts/terraform/object-storage.tf @@ -51,7 +51,7 @@ module "oss-buckets" { auto_tiering = each.value.auto_tiering != "" ? each.value.auto_tiering : null # Defaults to 'Disabled' as per hashicorp terraform defined_tags = each.value.defined_tags != {} ? each.value.defined_tags : {} freeform_tags = each.value.freeform_tags != {} ? each.value.freeform_tags : {} - #kms_key_id = each.value.kms_key_id != "" ? each.value.kms_key_id : null + kms_key_id = each.value.kms_key_id != "" ? each.value.kms_key_id : null #metadata = each.value.metadata != {} ? each.value.metadata : {} object_events_enabled = each.value.object_events_enabled != "" ? each.value.object_events_enabled : null # Defaults to 'false' as per hashicorp terraform storage_tier = each.value.storage_tier != "" ? each.value.storage_tier : null # Defaults to 'Standard' as per hashicorp terraform diff --git a/cd3_automation_toolkit/user-scripts/terraform/oke.tf b/cd3_automation_toolkit/user-scripts/terraform/oke.tf index 4a2aa54d7..2ce4ebf85 100644 --- a/cd3_automation_toolkit/user-scripts/terraform/oke.tf +++ b/cd3_automation_toolkit/user-scripts/terraform/oke.tf @@ -63,6 +63,7 @@ module "clusters" { pods_cidr = each.value.pods_cidr services_cidr = each.value.services_cidr service_lb_subnet_ids = each.value.service_lb_subnet_ids + kms_key_id = each.value.cluster_kms_key_id defined_tags = each.value.defined_tags freeform_tags = each.value.freeform_tags } @@ -94,6 +95,7 @@ module "nodepools" { source_type = each.value.source_type boot_volume_size_in_gbs = each.value.boot_volume_size_in_gbs ssh_public_key = each.value.ssh_public_key != null ? (length(regexall("ssh-rsa*", each.value.ssh_public_key)) > 0 ? each.value.ssh_public_key : lookup(var.oke_ssh_keys, each.value.ssh_public_key, null)) : null + kms_key_id = each.value.nodepool_kms_key_id node_defined_tags = each.value.node_defined_tags node_freeform_tags = each.value.node_freeform_tags nodepool_defined_tags = each.value.nodepool_defined_tags diff --git a/cd3_automation_toolkit/user-scripts/terraform/provider.tf b/cd3_automation_toolkit/user-scripts/terraform/provider.tf index 482b2f2f8..2e5c3e3e9 100644 --- a/cd3_automation_toolkit/user-scripts/terraform/provider.tf +++ b/cd3_automation_toolkit/user-scripts/terraform/provider.tf @@ -17,32 +17,9 @@ provider "oci" { terraform { required_providers { oci = { - source = "hashicorp/oci" + source = "oracle/oci" #version = ">= 4.0.0" - version = "5.9.0" + version = "5.24.0" } } } - -/* -# Uncomment and update to use Object Storage Bucket as the backend -# !!! WARNING !!! Terraform State Lock is not supported with OCI Object Storage. -# Pre-Requisite: Create a version enabled object storage bucket to store the state file. -# End Point Format: https://.compat.objectstorage..oraclecloud.com -# Please look at the below doc for information about shared_credentials_file and other parameters: -# Reference: https://docs.oracle.com/en-us/iaas/Content/API/SDKDocs/terraformUsingObjectStore.htm - -terraform { - backend "s3" { - bucket = "" - key = "" - region = "" - endpoint = "" - shared_credentials_file = "~/.aws/credentials" - skip_region_validation = true - skip_credentials_validation = true - skip_metadata_api_check = true - force_path_style = true - } -} -*/ \ No newline at end of file diff --git a/cd3_automation_toolkit/user-scripts/terraform/variables_example.tf b/cd3_automation_toolkit/user-scripts/terraform/variables_example.tf index 1a067ffbd..e4e3e6b24 100644 --- a/cd3_automation_toolkit/user-scripts/terraform/variables_example.tf +++ b/cd3_automation_toolkit/user-scripts/terraform/variables_example.tf @@ -732,7 +732,6 @@ variable "instances" { platform_config = optional(list(map(any))) launch_options = optional(list(map(any))) ipxe_script = optional(string) - firmware = optional(string) preserve_boot_volume = optional(bool) vlan_id = optional(string) kms_key_id = optional(string) @@ -1066,7 +1065,6 @@ variable "lbr_reserved_ips" { lifetime = string private_ip_id = optional(string) public_ip_pool_id = optional(string) - lifetime = optional(string) defined_tags = optional(map(any)) freeform_tags = optional(map(any)) })) @@ -1402,6 +1400,7 @@ variable "clusters" { pods_cidr = optional(string) services_cidr = optional(string) service_lb_subnet_ids = optional(list(string)) + cluster_kms_key_id = optional(string) defined_tags = optional(map(any)) freeform_tags = optional(map(any)) })) @@ -1433,6 +1432,7 @@ variable "nodepools" { source_type = string boot_volume_size_in_gbs = optional(number) ssh_public_key = optional(string) + nodepool_kms_key_id = optional(string) node_defined_tags = optional(map(any)) node_freeform_tags = optional(map(any)) nodepool_defined_tags = optional(map(any)) diff --git a/jenkins_install/example/config b/jenkins_install/example/config new file mode 100644 index 000000000..d20d1cfb3 --- /dev/null +++ b/jenkins_install/example/config @@ -0,0 +1,5 @@ +Host devops.scmservice.*.oci.oraclecloud.com + StrictHostKeyChecking no + User /@ + IdentityFile + diff --git a/jenkins_install/example/jenkins.properties b/jenkins_install/example/jenkins.properties new file mode 100644 index 000000000..8bedb634d --- /dev/null +++ b/jenkins_install/example/jenkins.properties @@ -0,0 +1,5 @@ +git_url="ssh://devops.scmservice..oci.oraclecloud.com/namespaces//projects//repositories/" +regions=["ashburn", "phoenix"] +services=["identity", "tagging", "network", "vlan", "nsg", "compute", "database", "fss", "oke", "ocvs", "security", "managementservices", "budget", "cis", "oss", "dns"] +outdir_structure=["Multiple_Outdir"] +#outdir_structure=["Single_Outdir"] diff --git a/jenkins_install/init/01_jenkins-config.groovy b/jenkins_install/init/01_jenkins-config.groovy new file mode 100644 index 000000000..06a6dfe84 --- /dev/null +++ b/jenkins_install/init/01_jenkins-config.groovy @@ -0,0 +1,243 @@ +import com.cloudbees.hudson.plugins.folder.* +//import org.jenkinsci.plugins.workflow.job.WorkflowJob +import jenkins.model.Jenkins + +Jenkins jenkins = Jenkins.instance +def JENKINS_HOME = System.getenv("JENKINS_HOME") + +File file = new File("$JENKINS_HOME/jenkins.properties") +file.withReader { reader -> + while ((line = reader.readLine()) != null) { + if (line.startsWith('git_url')) { + git_url = Eval.me(line.split("=")[1]) + } + if (line.startsWith('regions')) { + regions = Eval.me(line.split("=")[1]) + } + if (line.startsWith('outdir_structure')) { + outdir_structure = Eval.me(line.split("=")[1]) + } + if (line.startsWith('services')) { + services = Eval.me(line.split("=")[1]) + } + } + } +def tfApplyJobName = "terraform-apply" +def tfDestroyJobName = "terraform-destroy" + +for (os in outdir_structure) { + + def ost = jenkins.getItem("terraform_files") + if (ost == null) { + ost = jenkins.createProject(Folder.class,"terraform_files") + + def global = ost.getItem("global") + if (global == null) { + global = ost.createProject(Folder.class, "global") + + def rpc = global.getItem("rpc") + if (rpc == null) { + rpc = global.createProject(Folder.class, "rpc") + + def tfGlobRpcXml = +"""\ + + + + false + + multiOutput.groovy + false + + + + ${git_url} + + + + + main + + + 2 + false + Default + + + +""" + + def tfGlobRpcDestroyXml = +"""\ + + + + false + + multiOutput-tf-destroy.groovy + false + + + + ${git_url} + + + + + main + + + 2 + false + Default + + + +""" + def tfGlobRpcXmlStream = new ByteArrayInputStream(tfGlobRpcXml.getBytes()) + job1 = rpc.createProjectFromXML(tfApplyJobName, tfGlobRpcXmlStream) + def tfGlobRpcDestroyXmlStream = new ByteArrayInputStream(tfGlobRpcDestroyXml.getBytes()) + job2 = rpc.createProjectFromXML(tfDestroyJobName, tfGlobRpcDestroyXmlStream) + + } +} + + for (reg in regions) { + def folder = ost.getItem(reg) + if (folder == null) { + folder = ost.createProject(Folder.class, reg) + if (os == "Single_Outdir"){ + def tfApplyXml = +"""\ + + + + false + + singleOutput.groovy + false + + + + ${git_url} + + + + + main + + + 2 + false + Default + + + +""" + + def tfDestroyXml = +"""\ + + + + false + + singleOutput-tf-destroy.groovy + false + + + + ${git_url} + + + + + main + + + 2 + false + Default + + + +""" + + def tfApplyxmlStream = new ByteArrayInputStream(tfApplyXml.getBytes()) + job1 = folder.createProjectFromXML(tfApplyJobName, tfApplyxmlStream) + def tfDestroyxmlStream = new ByteArrayInputStream(tfDestroyXml.getBytes()) + job2 = folder.createProjectFromXML(tfDestroyJobName, tfDestroyxmlStream) + + } + if (os == "Multiple_Outdir"){ + for (svc in services) { + def svobjt = folder.getItem(svc) + if (svobjt == null) { + svobjt = folder.createProject(Folder.class, svc) + def tfApplyXml = +"""\ + + + + false + + multiOutput.groovy + false + + + + ${git_url} + + + + + main + + + 2 + false + Default + + + +""" + + def tfDestroyXml = +"""\ + + + + false + + multiOutput-tf-destroy.groovy + false + + + + ${git_url} + + + + + main + + + 2 + false + Default + + + +""" + def tfApplyxmlStream = new ByteArrayInputStream(tfApplyXml.getBytes()) + job1 = svobjt.createProjectFromXML(tfApplyJobName, tfApplyxmlStream) + def tfDestroyxmlStream = new ByteArrayInputStream(tfDestroyXml.getBytes()) + job2 = svobjt.createProjectFromXML(tfDestroyJobName, tfDestroyxmlStream) + } + } + } + } + } + } + +} \ No newline at end of file diff --git a/jenkins_install/init/02_jenkins-view.groovy b/jenkins_install/init/02_jenkins-view.groovy new file mode 100644 index 000000000..b7976c30b --- /dev/null +++ b/jenkins_install/init/02_jenkins-view.groovy @@ -0,0 +1,55 @@ + import jenkins.model.Jenkins + + def parentPath = "terraform_files" + def jenkinsInstance = Jenkins.getInstance() + + def findRegionFolders(jenkinsInstance, parentPath) { + def parent = jenkinsInstance.getItemByFullName(parentPath) + + if (parent != null && parent instanceof hudson.model.ViewGroup) { + return parent.items.findAll { it instanceof hudson.model.ViewGroup } + } else { + println("Parent folder not found: $parentPath") + return [] + } + } + + def addJobsToView(view, folder) { + folder.items.each { item -> + if (item instanceof hudson.model.Job) { + // println("Processing job: ${item.fullName}") + view.add(item) + } else if (item instanceof hudson.model.ViewGroup) { + // Recursively add jobs from subfolders + addJobsToView(view, item) + } + } + } + + def processRegionFolder(jenkinsInstance, regionFolder) { + def viewName = "${regionFolder.name}" + def view = jenkinsInstance.getView(viewName) + + if (view == null) { + // Create the view if it doesn't exist + view = new hudson.model.ListView(viewName, jenkinsInstance) + jenkinsInstance.addView(view) + } + + addJobsToView(view, regionFolder) + + // Set the "Recurse in folders" option + view.setRecurse(true) + + // Save the view configuration + view.save() + + println("View '$viewName' created successfully.") + } + + def regionFolders = findRegionFolders(jenkinsInstance, parentPath) + regionFolders.each { regionFolder -> + processRegionFolder(jenkinsInstance, regionFolder) + } + + println("Processing completed for all region folders.") diff --git a/jenkins_install/init/03_disable_jenkins_cli.groovy b/jenkins_install/init/03_disable_jenkins_cli.groovy new file mode 100644 index 000000000..cf9a107d0 --- /dev/null +++ b/jenkins_install/init/03_disable_jenkins_cli.groovy @@ -0,0 +1,17 @@ + +// Disable CLI access over HTTP +def removal = { lst -> + lst.each { x -> if (x.getClass().name?.contains("CLIAction")) lst.remove(x) } + } + def j = jenkins.model.Jenkins.get(); + removal(j.getExtensionList(hudson.cli.CLIAction.class)) + removal(j.getExtensionList(hudson.ExtensionPoint.class)) + removal(j.getExtensionList(hudson.model.Action.class)) + removal(j.getExtensionList(hudson.model.ModelObject.class)) + removal(j.getExtensionList(hudson.model.RootAction.class)) + removal(j.getExtensionList(hudson.model.UnprotectedRootAction.class)) + removal(j.getExtensionList(java.lang.Object.class)) + removal(j.getExtensionList(org.kohsuke.stapler.StaplerProxy.class)) + removal(j.actions) + + println "Jenkins cli is disabled..." diff --git a/jenkins_install/jcasc.yaml b/jenkins_install/jcasc.yaml new file mode 100644 index 000000000..e81028cda --- /dev/null +++ b/jenkins_install/jcasc.yaml @@ -0,0 +1,67 @@ +jenkins: + authorizationStrategy: "loggedInUsersCanDoAnything" + disableRememberMe: false + disabledAdministrativeMonitors: + - "jenkins.diagnostics.RootUrlNotSetMonitor" + globalNodeProperties: + - envVars: + env: + - key: "customer_prefix" + value: "" + labelAtoms: + - name: "master" + markupFormatter: "plainText" + mode: NORMAL + myViewsTabBar: "standard" + noUsageStatistics: true + numExecutors: 6 + primaryView: + all: + name: "all" + projectNamingStrategy: "standard" + quietPeriod: 5 + #remotingSecurity: + # enabled: true + scmCheckoutRetryCount: 0 + securityRealm: + local: + allowsSignup: false + enableCaptcha: false + updateCenter: + sites: + - id: "default" + url: "https://updates.jenkins.io/update-center.json" + views: + - all: + name: "all" + viewsTabBar: "standard" +security: + apiToken: + creationOfLegacyTokenEnabled: false + tokenGenerationOnCreationEnabled: false + usageStatisticsEnabled: true + globalJobDslSecurityConfiguration: + useScriptSecurity: false +unclassified: + buildDiscarders: + configuredBuildDiscarders: + - "jobBuildDiscarder" + buildStepOperation: + enabled: false + defaultFolderConfiguration: + healthMetrics: + - worstChildHealthMetric: + recursive: true + fingerprints: + fingerprintCleanupDisabled: false + storage: "file" + scmGit: + createAccountBasedOnEmail: true + showEntireCommitSummaryInChanges: false + useExistingAccountWithSameEmail: true +tool: + git: + installations: + - home: "git" + name: "Default" + diff --git a/jenkins_install/jenkins-support b/jenkins_install/jenkins-support new file mode 100644 index 000000000..88e107113 --- /dev/null +++ b/jenkins_install/jenkins-support @@ -0,0 +1,183 @@ +#!/bin/bash -eu + +: "${REF:="/usr/share/jenkins/ref"}" + +# compare if version1 < version2 +versionLT() { + local v1; v1=$(echo "$1" | cut -d '-' -f 1 ) + local q1; q1=$(echo "$1" | cut -s -d '-' -f 2- ) + local v2; v2=$(echo "$2" | cut -d '-' -f 1 ) + local q2; q2=$(echo "$2" | cut -s -d '-' -f 2- ) + if [ "$v1" = "$v2" ]; then + if [ "$q1" = "$q2" ]; then + return 1 + else + if [ -z "$q1" ]; then + return 1 + else + if [ -z "$q2" ]; then + return 0 + else + [ "$q1" = "$(echo -e "$q1\n$q2" | sort -V | head -n1)" ] + fi + fi + fi + else + [ "$v1" = "$(echo -e "$v1\n$v2" | sort -V | head -n1)" ] + fi +} + +# returns a plugin version from a plugin archive +get_plugin_version() { + local archive; archive=$1 + local version; version=$(unzip -p "$archive" META-INF/MANIFEST.MF | grep "^Plugin-Version: " | sed -e 's#^Plugin-Version: ##') + version=${version%%[[:space:]]} + echo "$version" +} + +# Copy files from /usr/share/jenkins/ref into $JENKINS_HOME +# So the initial JENKINS-HOME is set with expected content. +# Don't override, as this is just a reference setup, and use from UI +# can then change this, upgrade plugins, etc. +copy_reference_file() { + f="${1%/}" + b="${f%.override}" + rel="${b#"$REF/"}" + version_marker="${rel}.version_from_image" + dir=$(dirname "${rel}") + local action; + local reason; + local container_version; + local image_version; + local marker_version; + local log; log=false + if [[ ${rel} == plugins/*.jpi ]]; then + container_version=$(get_plugin_version "$JENKINS_HOME/${rel}") + image_version=$(get_plugin_version "${f}") + if [[ -e $JENKINS_HOME/${version_marker} ]]; then + marker_version=$(cat "$JENKINS_HOME/${version_marker}") + if versionLT "$marker_version" "$container_version"; then + if ( versionLT "$container_version" "$image_version" && [[ -n $PLUGINS_FORCE_UPGRADE ]]); then + action="UPGRADED" + reason="Manually upgraded version ($container_version) is older than image version $image_version" + log=true + else + action="SKIPPED" + reason="Installed version ($container_version) has been manually upgraded from initial version ($marker_version)" + log=true + fi + else + if [[ "$image_version" == "$container_version" ]]; then + action="SKIPPED" + reason="Version from image is the same as the installed version $image_version" + else + if versionLT "$image_version" "$container_version"; then + action="SKIPPED" + log=true + reason="Image version ($image_version) is older than installed version ($container_version)" + else + action="UPGRADED" + log=true + reason="Image version ($image_version) is newer than installed version ($container_version)" + fi + fi + fi + else + if [[ -n "$TRY_UPGRADE_IF_NO_MARKER" ]]; then + if [[ "$image_version" == "$container_version" ]]; then + action="SKIPPED" + reason="Version from image is the same as the installed version $image_version (no marker found)" + # Add marker for next time + echo "$image_version" > "$JENKINS_HOME/${version_marker}" + else + if versionLT "$image_version" "$container_version"; then + action="SKIPPED" + log=true + reason="Image version ($image_version) is older than installed version ($container_version) (no marker found)" + else + action="UPGRADED" + log=true + reason="Image version ($image_version) is newer than installed version ($container_version) (no marker found)" + fi + fi + fi + fi + if [[ ! -e $JENKINS_HOME/${rel} || "$action" == "UPGRADED" || $f = *.override ]]; then + action=${action:-"INSTALLED"} + log=true + mkdir -p "$JENKINS_HOME/${dir}" + cp "${f}" "$JENKINS_HOME/${rel}"; + # pin plugins on initial copy + touch "$JENKINS_HOME/${rel}.pinned" + echo "$image_version" > "$JENKINS_HOME/${version_marker}" + reason=${reason:-$image_version} + else + action=${action:-"SKIPPED"} + fi + else + if [[ ! -e $JENKINS_HOME/${rel} || $f = *.override ]] + then + action="INSTALLED" + log=true + mkdir -p "$JENKINS_HOME/${dir}" + cp "$(realpath "${f}")" "$JENKINS_HOME/${rel}"; + else + action="SKIPPED" + fi + fi + if [[ -n "$VERBOSE" || "$log" == "true" ]]; then + if [ -z "$reason" ]; then + echo "$action $rel" >> "$COPY_REFERENCE_FILE_LOG" + else + echo "$action $rel : $reason" >> "$COPY_REFERENCE_FILE_LOG" + fi + fi +} + +# Retries a command a configurable number of times with backoff. +# +# The retry count is given by ATTEMPTS (default 60), the initial backoff +# timeout is given by TIMEOUT in seconds (default 1.) +# +function retry_command() { + local max_attempts=${ATTEMPTS-3} + local timeout=${TIMEOUT-1} + local success_timeout=${SUCCESS_TIMEOUT-1} + local max_success_attempt=${SUCCESS_ATTEMPTS-1} + local attempt=0 + local success_attempt=0 + local exitCode=0 + + while (( attempt < max_attempts )) + do + set +e + "$@" + exitCode=$? + set -e + + if [[ $exitCode == 0 ]] + then + success_attempt=$(( success_attempt + 1 )) + if (( success_attempt >= max_success_attempt)) + then + break + else + sleep "$success_timeout" + continue + fi + fi + + echo "$(date -u '+%T') Failure ($exitCode) Retrying in $timeout seconds..." 1>&2 + sleep "$timeout" + success_attempt=0 + attempt=$(( attempt + 1 )) + timeout=$(( timeout )) + done + + if [[ $exitCode != 0 ]] + then + echo "$(date -u '+%T') Failed in the last attempt ($*)" 1>&2 + fi + + return $exitCode +} diff --git a/jenkins_install/jenkins.sh b/jenkins_install/jenkins.sh new file mode 100644 index 000000000..13c726857 --- /dev/null +++ b/jenkins_install/jenkins.sh @@ -0,0 +1,59 @@ +#! /bin/bash -e + +: "${REF:="/usr/share/jenkins/x``"}" + +# Check if JENKINS_HOME exists +if [ ! -d "$JENKINS_HOME" ]; then + # If it doesn't exist, create it + mkdir -p "$JENKINS_HOME" + echo "Directory created: $JENKINS_HOME" +fi + +# Copy Required files to JENKINS_HOME +cp ${JENKINS_INSTALL}/jcasc.yaml "$JENKINS_HOME/" +if [ ! -d "$JENKINS_HOME/jobs/setUpOCI" ]; then + mkdir -p "$JENKINS_HOME/jobs/setUpOCI" +fi +cp ${JENKINS_INSTALL}/setUpOCI_config.xml "$JENKINS_HOME/jobs/setUpOCI/config.xml" +cp -r ${JENKINS_INSTALL}/scriptler $JENKINS_HOME + +#Generate Self Signed Cert and Copy to JENKINS_HOME + keytool -genkey -keystore "$JENKINS_INSTALL/oci_toolkit.jks" -alias "automationtoolkit" -keyalg RSA -validity 60 -keysize 2048 -dname "CN=oci-automation, OU=toolkit, C=IN" -ext SAN=dns:automationtoolkit,ip:127.0.0.1 -storepass automationtoolkit && keytool -importkeystore -srckeystore "$JENKINS_INSTALL/oci_toolkit.jks" -srcstoretype JKS -deststoretype PKCS12 -destkeystore "$JENKINS_HOME/oci_toolkit.p12" -srcstorepass automationtoolkit -deststorepass automationtoolkit -noprompt + +touch "${COPY_REFERENCE_FILE_LOG}" || { echo "Can not write to ${COPY_REFERENCE_FILE_LOG}. Wrong volume permissions?"; exit 1; } +echo "--- Copying files at $(date)" >> "$COPY_REFERENCE_FILE_LOG" +find "${REF}" \( -type f -o -type l \) -exec bash -c '. ${JENKINS_INSTALL}/jenkins-support; for arg; do copy_reference_file "$arg"; done' _ {} + + + +# if `docker run` first argument start with `--` the user is passing jenkins launcher arguments +if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then + + # read JAVA_OPTS and JENKINS_OPTS into arrays to avoid need for eval (and associated vulnerabilities) + java_opts_array=() + while IFS= read -r -d '' item; do + java_opts_array+=( "$item" ) + done < <([[ $JAVA_OPTS ]] && xargs printf '%s\0' <<<"$JAVA_OPTS") + + readonly agent_port_property='jenkins.model.Jenkins.slaveAgentPort' + if [ -n "${JENKINS_SLAVE_AGENT_PORT:-}" ] && [[ "${JAVA_OPTS:-}" != *"${agent_port_property}"* ]]; then + java_opts_array+=( "-D${agent_port_property}=${JENKINS_SLAVE_AGENT_PORT}" ) + fi + + if [[ "$DEBUG" ]] ; then + java_opts_array+=( \ + '-Xdebug' \ + '-Xrunjdwp:server=y,transport=dt_socket,address=5005,suspend=y' \ + ) + fi + + jenkins_opts_array=( ) + while IFS= read -r -d '' item; do + jenkins_opts_array+=( "$item" ) + done < <([[ $JENKINS_OPTS ]] && xargs printf '%s\0' <<<"$JENKINS_OPTS") + + # Start Jenkins on 8443 port using Self Signed Cert + exec java -Duser.home="$JENKINS_HOME" "${java_opts_array[@]}" -jar ${JENKINS_INSTALL}/jenkins.war "${jenkins_opts_array[@]}" "$@" --httpsPort=8443 --httpPort=-1 --httpsKeyStore="$JENKINS_INSTALL/oci_toolkit.jks" --httpsKeyStorePassword=automationtoolkit +fi + +# As argument is not jenkins, assume user want to run his own process, for example a `bash` shell to explore this image +exec "$@" diff --git a/jenkins_install/multiOutput-tf-destroy.groovy b/jenkins_install/multiOutput-tf-destroy.groovy new file mode 100644 index 000000000..962357f75 --- /dev/null +++ b/jenkins_install/multiOutput-tf-destroy.groovy @@ -0,0 +1,64 @@ +/* Set the various stages of the build */ +pipeline { + agent any + options { + ansiColor('xterm') + } + stages { + stage('Terraform Destroy Plan') { + when { + expression { return env.GIT_BRANCH == 'origin/main';} + } + + steps { + catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { + script { + def jobName = env.JOB_NAME + def parts = jobName.split('/') + + // Assuming the job name format is /job//job/job_name + def regionName = parts[1] + def serviceName = parts[2] + + // Set environment variables for reuse in subsequent stages + env.Region = regionName + env.Service = serviceName + + sh "cd \"${WORKSPACE}/${env.Region}/${env.Service}\" && terraform init -upgrade" + sh "cd \"${WORKSPACE}/${env.Region}/${env.Service}\" && terraform plan -destroy" + } + } + } + } + + /** Approval for Terraform Apply **/ + stage('Get Approval') { + when { + expression { return env.GIT_BRANCH == 'origin/main'; } + expression {return currentBuild.result != "FAILURE" } + } + input { + message "Do you want to perform terraform destroy?" + + } + steps { + echo "Approval for the Destroy Granted!" + } + } + + stage('Terraform Destroy') { + when { + expression {return env.GIT_BRANCH == 'origin/main'; } + expression {return currentBuild.result != "FAILURE" } + } + + steps { + catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { + script { + sh "cd \"${WORKSPACE}/${env.Region}/${env.Service}\" && terraform destroy --auto-approve" + } + } + } + } + } +} diff --git a/jenkins_install/multiOutput.groovy b/jenkins_install/multiOutput.groovy new file mode 100644 index 000000000..35173f439 --- /dev/null +++ b/jenkins_install/multiOutput.groovy @@ -0,0 +1,121 @@ +def tf_plan = "Changes" +pipeline { + agent any + options { + ansiColor('xterm') + } + stages { + stage('Terraform Plan') { + when { + expression { + return env.GIT_BRANCH == 'origin/main'; + } + } + + steps { + catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { + script { + def jobName = env.JOB_NAME + def parts = jobName.split('/') + + // Assuming the job name format is /job//job/job_name + def regionName = parts[1] + def serviceName = parts[2] + + + // Set environment variables for reuse in subsequent stages + env.Region = regionName + env.Service = serviceName + + dir("${WORKSPACE}/${env.Region}/${env.Service}") { + sh 'terraform init -upgrade' + } + + // Run Terraform plan and capture the output + def terraformPlanOutput = sh(script: "cd \"${WORKSPACE}/${env.Region}/${env.Service}\" && terraform plan -out=tfplan.out", returnStdout: true).trim() + + // Check if the plan contains any changes + if (terraformPlanOutput.contains('No changes.')) { + echo 'No changes in Terraform plan. Skipping further stages.' + tf_plan = "No Changes" + } else { + // If there are changes, proceed with applying the plan + echo "Changes detected in Terraform plan. Proceeding with apply. \n${terraformPlanOutput}" + + } + } + } + } + } + + + /** OPA Stage **/ + stage('OPA') { + when { + allOf{ + expression { return env.GIT_BRANCH == 'origin/main'} + expression { return tf_plan == "Changes" } + expression {return currentBuild.result != "FAILURE" } + } + } + + steps { + catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { + script { + // Run Terraform show and capture the output + sh "set +x && cd \"${WORKSPACE}/${env.Region}/${env.Service}\" && terraform show -json tfplan.out > tfplan.json" + // Run OPA eval + def opaOutput = sh(script: "opa eval -f pretty -b /cd3user/oci_tools/cd3_automation_toolkit/user-scripts/OPA/ -i \"${WORKSPACE}/${env.Region}/${env.Service}/tfplan.json\" data.terraform.deny",returnStdout: true).trim() + + if (opaOutput == '[]') { + echo "No OPA rules are violated. Proceeding with the next stage." + } + else { + echo "OPA Output:\n${opaOutput}" + unstable(message:"OPA Rules are violated.") + } + } + } + } + } + + stage('Get Approval') { + when { + allOf{ + expression { return env.GIT_BRANCH == 'origin/main'} + expression {return tf_plan == "Changes"} + expression {return currentBuild.result != "FAILURE" } + } + } + + options { + timeout(time: 1440, unit: 'MINUTES') // 24 hours timeout + } + + steps { + script { + input message: "Do you want to apply the plan?" + echo "Approval for the Apply Granted!" + } + } + } + stage('Terraform Apply') { + when { + allOf{ + expression { return env.GIT_BRANCH == 'origin/main'} + expression {return tf_plan == "Changes"} + expression {return currentBuild.result != "FAILURE" } + } + } + + steps { + catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { + script { + sh "cd \"${WORKSPACE}/${env.Region}/${env.Service}\" && terraform apply --auto-approve tfplan.out" + } + } + } + } + } +} + diff --git a/jenkins_install/plugins.txt b/jenkins_install/plugins.txt new file mode 100644 index 000000000..0a86d1e07 --- /dev/null +++ b/jenkins_install/plugins.txt @@ -0,0 +1,23 @@ +plain-credentials:143.v1b_df8b_d3b_e48 +snakeyaml-api:2.2-111.vc6598e30cc65 +workflow-step-api:639.v6eca_cd8c04a_a_ +pipeline-build-step:516.v8ee60a_81c5b_9 +pipeline-input-step:477.v339683a_8d55e +pipeline-stage-view:2.34 +job-dsl:1.87 +credentials-binding:642.v737c34dea_6c2 +docker-workflow:572.v950f58993843 +scm-api:676.v886669a_199a_a_ +configuration-as-code:1700.v6f448841296e +config-file-provider:959.vcff671a_4518b_ +git:5.2.1 +credentials:1309.v8835d63eb_d8a_ +build-timeout:1.31 +script-security:1281.v22fb_899df1a_e +rebuild:330.v645b_7df10e2a_ +uno-choice:2.8.1 +file-parameters:316.va_83a_1221db_a_7 +scriptler:334.v29792d5a_c058 +ansicolor:0.6.2 +pipeline-graph-view:205.vb_8e3a_b_51f12e + diff --git a/jenkins_install/scriptler/scriptler.xml b/jenkins_install/scriptler/scriptler.xml new file mode 100644 index 000000000..452c76bc0 --- /dev/null +++ b/jenkins_install/scriptler/scriptler.xml @@ -0,0 +1,62 @@ + + + + + + + + + + + false + false + false + \ No newline at end of file diff --git a/jenkins_install/scriptler/scripts/AdditionalFilters.groovy b/jenkins_install/scriptler/scripts/AdditionalFilters.groovy new file mode 100644 index 000000000..e108d805a --- /dev/null +++ b/jenkins_install/scriptler/scripts/AdditionalFilters.groovy @@ -0,0 +1,188 @@ +html_to_be_rendered = "" + +if(Workflow.toLowerCase().contains("export")){ + +html_to_be_rendered = """ + ${html_to_be_rendered} + + + + + + + + + + + + + + + + + +""" +} +for (item in SubOptions.split(",")) { + if (item.equals("Export Instances (excludes instances launched by OKE)")) { + html_to_be_rendered = """ + ${html_to_be_rendered} + + + + + + + + + + + + + + + + + """ + } + + if (item.equals("Export Block Volumes/Block Backup Policy")) { + html_to_be_rendered = """ + ${html_to_be_rendered} + + + + + + + + + + + + """ + } + + if (item.equals('Export DNS Views/Zones/Records')){ + html_to_be_rendered = """ + ${html_to_be_rendered} + + + + + + + + """ + } + if (item.equals('Upload current terraform files/state to Resource Manager')){ + html_to_be_rendered = """ + ${html_to_be_rendered} + + + + + + + + + + + + + + + + """ + } + + if (item.equals('Create Key/Vault')){ + html_to_be_rendered = """ + ${html_to_be_rendered} + + + + + + + + + + + + + + + """ + } + + if (item.equals('Create Default Budget')){ + html_to_be_rendered = """ + ${html_to_be_rendered} + + + + + + + + + + + + + + + """ + } + + if (item.equals('Enable Cloud Guard')){ + html_to_be_rendered = """ + ${html_to_be_rendered} + + + + + + + + + + """ + } + +} + +for (item in SubChildOptions.split(",")) { + if (item in ["Export Security Rules (From OCI into SecRulesinOCI sheet)","Export Route Rules (From OCI into RouteRulesinOCI sheet)","Export DRG Route Rules (From OCI into DRGRouteRulesinOCI sheet)","Export NSGs (From OCI into NSGs sheet)"]) { + + html_to_be_rendered = """ + ${html_to_be_rendered} + + + + + + + + + + """ + + } + break +} + + +html_to_be_rendered = "${html_to_be_rendered}

    (Leave empty for all subscribed regions)

    (Leave empty to fetch from all compartments)
    +
    +
    +
    +
    (eg AD1,AD2,AD3)
    +
    +

    (eg AD1,AD2,AD3)
    + + +

    (Leave empty for all subscribed regions)








    (Leave empty to fetch from all compartments)
    " + +return html_to_be_rendered \ No newline at end of file diff --git a/jenkins_install/scriptler/scripts/MainOptions.groovy b/jenkins_install/scriptler/scripts/MainOptions.groovy new file mode 100644 index 000000000..d86779024 --- /dev/null +++ b/jenkins_install/scriptler/scripts/MainOptions.groovy @@ -0,0 +1,38 @@ +if (Workflow.toLowerCase().contains("create")){ +return[ +"Validate CD3", +"Identity", +"Tags", +"Network", +"DNS Management", +"Compute", +"Storage", +"Database", +"Load Balancers", +"Management Services", +"Developer Services", +"Logging Services", +"Software-Defined Data Centers - OCVS", +"CIS Compliance Features", +"CD3 Services" +] +} +else if(Workflow.toLowerCase().contains("export")) { +return[ +"Export Identity", +"Export Tags", +"Export Network", +"Export DNS Management", +"Export Compute", +"Export Storage", +"Export Databases", +"Export Load Balancers", +"Export Management Services", +"Export Developer Services", +"Export Software-Defined Data Centers - OCVS", +"CD3 Services" +] +} +else { +return["Please select a Workflow:disabled"] +} \ No newline at end of file diff --git a/jenkins_install/scriptler/scripts/SubChildOptions.groovy b/jenkins_install/scriptler/scripts/SubChildOptions.groovy new file mode 100644 index 000000000..ec345a312 --- /dev/null +++ b/jenkins_install/scriptler/scripts/SubChildOptions.groovy @@ -0,0 +1,25 @@ +List sec_rules = ["SECURITY RULES:disabled","Export Security Rules (From OCI into SecRulesinOCI sheet)", "Add/Modify/Delete Security Rules (Reads SecRulesinOCI sheet)"] +List route_rules = ["ROUTE RULES:disabled","Export Route Rules (From OCI into RouteRulesinOCI sheet)", "Add/Modify/Delete Route Rules (Reads RouteRulesinOCI sheet)"] +List drg_route_rules = ["DRG ROUTE RULES:disabled","Export DRG Route Rules (From OCI into DRGRouteRulesinOCI sheet)", "Add/Modify/Delete DRG Route Rules (Reads DRGRouteRulesinOCI sheet)"] +List nsg = ["NSGs:disabled","Export NSGs (From OCI into NSGs sheet)", "Add/Modify/Delete NSGs (Reads NSGs sheet)"] +List cis = ["CIS:disabled","CD3 Image already contains the latest CIS compliance checking script available at the time of cd3 image release. Download latest only if new version of the script is available", "Execute compliance checking script"] +List final_list = [] + +for (item in SubOptions.split(",")) { + if (item.equals("Security Rules")){ + final_list += sec_rules + } + if (item.equals("Route Rules")){ + final_list += route_rules + } + if (item.equals("DRG Route Rules")){ + final_list += drg_route_rules + } + if (item.equals("Network Security Groups")){ + final_list += nsg + } + if (item.equals("CIS Compliance Checking Script")){ + final_list += cis + } +} +return final_list \ No newline at end of file diff --git a/jenkins_install/scriptler/scripts/SubOptions.groovy b/jenkins_install/scriptler/scripts/SubOptions.groovy new file mode 100644 index 000000000..68794d414 --- /dev/null +++ b/jenkins_install/scriptler/scripts/SubOptions.groovy @@ -0,0 +1,95 @@ +List validate_cd3 = ["CD3 Validator:disabled","Validate Compartments","Validate Groups","Validate Policies","Validate Tags","Validate Networks","Validate DNS","Validate Instances","Validate Block Volumes","Validate FSS","Validate Buckets"] +List identity = ["IDENTITY:disabled","Add/Modify/Delete Compartments", "Add/Modify/Delete Groups","Add/Modify/Delete Policies", "Add/Modify/Delete Users", "Add/Modify/Delete Network Sources"] +List network = ["NETWORK:disabled","Create Network", "Modify Network","Security Rules", "Route Rules", "DRG Route Rules", "Network Security Groups", "Add/Modify/Delete VLANs", "Customer Connectivity"] +List dns_management = ["DNS:disabled","Add/Modify/Delete DNS Views/Zones/Records", "Add/Modify/Delete DNS Resolvers"] +List compute = ["COMPUTE:disabled","Add/Modify/Delete Dedicated VM Hosts", "Add/Modify/Delete Instances/Boot Backup Policy"] +List storage = ["STORAGE:disabled","Add/Modify/Delete Block Volumes/Block Backup Policy", "Add/Modify/Delete File Systems", "Add/Modify/Delete Object Storage Buckets"] +List database = ["DATABASE:disabled","Add/Modify/Delete Virtual Machine or Bare Metal DB Systems", "Add/Modify/Delete EXA Infra and EXA VM Clusters", "Add/Modify/Delete ADBs"] +List load_balancers = ["LOAD BALANCERS:disabled","Add/Modify/Delete Load Balancers", "Add/Modify/Delete Network Load Balancers"] +List management_services = ["MANAGEMENT SERVICES:disabled","Add/Modify/Delete Notifications", "Add/Modify/Delete Events", "Add/Modify/Delete Alarms", "Add/Modify/Delete ServiceConnectors"] +List developer_services = ["DEVELOPER SERVICES:disabled", "Add/Modify/Delete OKE Cluster and Nodepools"] +List logging_services = ["LOGGING SERVICES:disabled","Enable VCN Flow Logs", "Enable LBaaS Logs", "Enable Object Storage Buckets Write Logs"] +List cis = ["CIS:disabled","CIS Compliance Checking Script", "Create Key/Vault", "Create Default Budget", "Enable Cloud Guard"] +List cd3_services = ["CD3 SERVICES:disabled","Fetch Compartments OCIDs to variables file", "Fetch Protocols to OCI_Protocols"] + +List ex_identity = ["IDENTITY:disabled","Export Compartments/Groups/Policies", "Export Users", "Export Network Sources"] +List ex_network = ["NETWORK:disabled","Export all Network Components", "Export Network components for VCNs/DRGs/DRGRouteRulesinOCI Tabs", "Export Network components for DHCP Tab", "Export Network components for SecRulesinOCI Tab", "Export Network components for RouteRulesinOCI Tab", "Export Network components for SubnetsVLANs Tab", "Export Network components for NSGs Tab"] +List ex_dns = ["DNS:disabled","Export DNS Views/Zones/Records", "Export DNS Resolvers"] +List ex_compute = ["COMPUTE:disabled","Export Dedicated VM Hosts", "Export Instances (excludes instances launched by OKE)"] +List ex_storage = ["STORAGE:disabled","Export Block Volumes/Block Backup Policy", "Export File Systems", "Export Object Storage Buckets"] +List ex_databases = ["DATABASE:disabled","Export Virtual Machine or Bare Metal DB Systems", "Export EXA Infra and EXA VMClusters", "Export ADBs"] +List ex_lb = ["LOAD BALANCERS:disabled","Export Load Balancers", "Export Network Load Balancers"] +List ex_management = ["MANAGEMENT SERVICES:disabled","Export Notifications", "Export Events", "Export Alarms", "Export Service Connectors"] +List ex_developer = ["DEVELOPER SERVICES:disabled","Export OKE cluster and Nodepools"] + +List final_list = [] +for (item in MainOptions.split(",")) { +if (item.equals("Validate CD3")){ +final_list += validate_cd3 +} +if (item.equals("Identity")){ +final_list += identity +} +if (item.equals("Compute")){ +final_list += compute +} +if (item.equals("Network")){ +final_list += network +} +if (item.equals("DNS Management")){ +final_list += dns_management +} +if (item.equals("Storage")){ +final_list += storage +} +if (item.equals("Database")){ +final_list += database +} +if (item.equals("Load Balancers")){ +final_list += load_balancers +} +if (item.equals("Management Services")){ +final_list += management_services +} +if (item.equals("Developer Services")){ +final_list += developer_services +} +if (item.equals("Logging Services")){ +final_list += logging_services +} +if (item.equals("CIS Compliance Features")){ +final_list += cis +} +if (item.equals("CD3 Services")){ +final_list += cd3_services +} +if (item.equals("Export Identity")){ +final_list += ex_identity +} +if (item.equals("Export Network")){ +final_list += ex_network +} +if (item.equals("Export DNS Management")){ +final_list += ex_dns +} +if (item.equals("Export Compute")){ +final_list += ex_compute +} +if (item.equals("Export Storage")){ +final_list += ex_storage +} +if (item.equals("Export Databases")){ +final_list += ex_databases +} +if (item.equals("Export Load Balancers")){ +final_list += ex_lb +} +if (item.equals("Export Management Services")){ +final_list += ex_management +} +if (item.equals("Export Developer Services")){ +final_list += ex_developer +} +} + +return final_list \ No newline at end of file diff --git a/jenkins_install/scriptler/scripts/ValidateParams.groovy b/jenkins_install/scriptler/scripts/ValidateParams.groovy new file mode 100644 index 000000000..d3128f9dd --- /dev/null +++ b/jenkins_install/scriptler/scripts/ValidateParams.groovy @@ -0,0 +1,75 @@ +def validate_params(Workflow,MainOptions,SubOptions,SubChildOptions,AdditionalFilters){ + valid_params = "Passed" + def gf_options_map = [ + "Validate CD3":["Validate Compartments","Validate Groups","Validate Policies","Validate Tags","Validate Networks","Validate DNS","Validate Instances","Validate Block Volumes","Validate FSS","Validate Buckets"], + "Identity":["Add/Modify/Delete Compartments", "Add/Modify/Delete Groups","Add/Modify/Delete Policies", "Add/Modify/Delete Users", "Add/Modify/Delete Network Sources"], + "Network":["Create Network", "Modify Network","Security Rules", "Route Rules", "DRG Route Rules", "Network Security Groups", "Add/Modify/Delete VLANs", "Customer Connectivity"], + "DNS Management":["Add/Modify/Delete DNS Views/Zones/Records", "Add/Modify/Delete DNS Resolvers"], + "Compute":["Add/Modify/Delete Dedicated VM Hosts", "Add/Modify/Delete Instances/Boot Backup Policy"], + "Storage":["Add/Modify/Delete Block Volumes/Block Backup Policy", "Add/Modify/Delete File Systems", "Add/Modify/Delete Object Storage Buckets"], + "Database":["Add/Modify/Delete Virtual Machine or Bare Metal DB Systems", "Add/Modify/Delete EXA Infra and EXA VM Clusters", "Add/Modify/Delete ADBs"], + "Load Balancers":["Add/Modify/Delete Load Balancers", "Add/Modify/Delete Network Load Balancers"], + "Management Services":["Add/Modify/Delete Notifications", "Add/Modify/Delete Events", "Add/Modify/Delete Alarms", "Add/Modify/Delete ServiceConnectors"], + "Developer Services":["Upload current terraform files/state to Resource Manager", "Add/Modify/Delete OKE Cluster and Nodepools"], + "Logging Services":["Enable VCN Flow Logs", "Enable LBaaS Logs", "Enable Object Storage Buckets Write Logs"], + "CIS Compliance Features":["CIS Compliance Checking Script", "Create Key/Vault", "Create Default Budget", "Enable Cloud Guard"], + "CD3 Services":["Fetch Compartments OCIDs to variables file", "Fetch Protocols to OCI_Protocols"] + ] + def non_gf_options_map = [ + "Export Identity":["Export Compartments/Groups/Policies", "Export Users", "Export Network Sources"], + "Export Network":["Export all Network Components", "Export Network components for VCNs/DRGs/DRGRouteRulesinOCI Tabs", "Export Network components for DHCP Tab", "Export Network components for SecRulesinOCI Tab", "Export Network components for RouteRulesinOCI Tab", "Export Network components for SubnetsVLANs Tab", "Export Network components for NSGs Tab"], + "Export DNS Management":["Export DNS Views/Zones/Records", "Export DNS Resolvers"], + "Export Compute":["Export Dedicated VM Hosts", "Export Instances (excludes instances launched by OKE)"], + "Export Storage":["Export Block Volumes/Block Backup Policy", "Export File Systems", "Export Object Storage Buckets"], + "Export Databases":["Export Virtual Machine or Bare Metal DB Systems", "Export EXA Infra and EXA VMClusters", "Export ADBs"], + "Export Load Balancers":["Export Load Balancers", "Export Network Load Balancers"], + "Export Management Services":["Export Notifications", "Export Events", "Export Alarms", "Export Service Connectors"], + "Export Developer Services":["Export OKE cluster and Nodepools"], + "CD3 Services":["Fetch Compartments OCIDs to variables file", "Fetch Protocols to OCI_Protocols"] + ] + mainoptions_list = MainOptions.split(",") + suboptions_list = SubOptions.split(",") + validation_map = [:] + if (mainoptions_list.size() > 0) { + for (mitem in MainOptions.split(",")) { + validation_map[mitem] = "Failed" + if (mitem.contains("Tag") || mitem.contains("OCVS") ) { + validation_map[mitem] = "Passed" + continue + } + if (Workflow.toLowerCase().contains("create")){ + for (item in gf_options_map[mitem]) { + if (item in suboptions_list) { + validation_map[mitem] = "Passed" + break + } + } + } + else { + for (item in non_gf_options_map[mitem]) { + if (item in suboptions_list) { + validation_map[mitem] = "Passed" + break + } + } + } + } + if ('Upload current terraform files/state to Resource Manager' in suboptions_list) { + if (AdditionalFilters.split("orm_compartments=\\[,")[1].startsWithAny(",", " ")) { + println("Failed - RM Stack Compartment") + valid_params = "Failed" + } + } + } else { + valid_params = "Failed" + } + result_list = [] + validation_map.each { result_list.add(it.value) } + if ("Failed" in result_list || valid_params == "Failed") { + valid_params = "Failed" + }else { + valid_params = "Passed" + } + return valid_params + } +return this \ No newline at end of file diff --git a/jenkins_install/scriptler/scripts/Workflow.groovy b/jenkins_install/scriptler/scripts/Workflow.groovy new file mode 100644 index 000000000..f0ceeae89 --- /dev/null +++ b/jenkins_install/scriptler/scripts/Workflow.groovy @@ -0,0 +1 @@ +return ["Create Resources in OCI (Greenfield Workflow)", "Export Resources from OCI (Non-Greenfield Workflow)"] \ No newline at end of file diff --git a/jenkins_install/setUpOCI_config.xml b/jenkins_install/setUpOCI_config.xml new file mode 100644 index 000000000..b162c389e --- /dev/null +++ b/jenkins_install/setUpOCI_config.xml @@ -0,0 +1,487 @@ + + + + + + + + + Excel_Template + + + + + + hudson.model.ParametersDefinitionProperty + + + + Execute setUpOCI + false + + + false + false + + + + + Excel_Template + Upload input Excel file. +Previously uploaded file will be used if left empty. + + + Workflow + Select Automation Toolkit Workflow + choice-parameter-23492076346439 + 1 + + PT_RADIO + + + MainOptions + Select Main Options + choice-parameter-23492076464252 + 1 + + + Workflow + PT_CHECKBOX + + + SubOptions + Select Sub Options + choice-parameter-23492076517486 + 1 + + + MainOptions + PT_CHECKBOX + + + SubChildOptions + Select Options for the SubOptions selected above + choice-parameter-23492076566925 + 1 + + + SubOptions + PT_CHECKBOX + + + AdditionalFilters + Select additional filters + choice-parameter-23492076642248 + 1 + + + Workflow,SubOptions,SubChildOptions + ET_FORMATTED_HTML + false + + + + + + + true + + + false + \ No newline at end of file diff --git a/jenkins_install/singleOutput-tf-destroy.groovy b/jenkins_install/singleOutput-tf-destroy.groovy new file mode 100644 index 000000000..1312aa2bc --- /dev/null +++ b/jenkins_install/singleOutput-tf-destroy.groovy @@ -0,0 +1,62 @@ +/* Set the various stages of the build */ +pipeline { + agent any + options { + ansiColor('xterm') + } + environment { + CI = 'true' + } + stages { + stage('Terraform Destroy Plan') { + when { + expression { return env.GIT_BRANCH == 'origin/main'; } + } + + steps { + catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { + script { + def jobName = env.JOB_NAME + def parts = jobName.split('/') + + // Assuming job name format is /job/job_name + def regionName = parts[1] + // Set environment variables for reuse + env.Region = regionName + + sh "cd \"${WORKSPACE}/${env.Region}\" && terraform init -upgrade" + sh "cd \"${WORKSPACE}/${env.Region}\" && terraform plan -destroy" + } + } + } + } + + stage('Get Approval') { + when { + expression { return env.GIT_BRANCH == 'origin/main';} + expression {return currentBuild.result != "FAILURE" } + } + input { + message "Do you want to perform terraform destroy?" + } + steps { + echo "Approval for the Terraform Destroy Granted!" + } + } + + stage('Terraform Destroy') + when { + expression {return env.GIT_BRANCH == 'origin/main';} + expression {return currentBuild.result != "FAILURE" } + } + + steps { + catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { + script { + sh "cd \"${WORKSPACE}/${env.Region}\" && terraform destroy --auto-approve" + } + } + } + } + } +} diff --git a/jenkins_install/singleOutput.groovy b/jenkins_install/singleOutput.groovy new file mode 100644 index 000000000..dbcfaa5dd --- /dev/null +++ b/jenkins_install/singleOutput.groovy @@ -0,0 +1,116 @@ +/* Set the various stages of the build */ +def tf_plan = "Changes" +pipeline { + agent any + options { + ansiColor('xterm') + } + stages { + stage('Terraform Plan') { + when { + expression { + return env.GIT_BRANCH == 'origin/main'; + } + } + + steps { + catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { + script { + def jobName = env.JOB_NAME + def parts = jobName.split('/') + + // Assuming the job name format is /job/job_name + def regionName = parts[1] + + // Set environment variables for reuse in subsequent stages + env.Region = regionName + dir("${WORKSPACE}/${env.Region}") { + sh 'terraform init -upgrade' + } + + // Run Terraform plan and capture the output + def terraformPlanOutput = sh(script: "cd \"${WORKSPACE}/${env.Region}\" && terraform plan -out=tfplan.out", returnStdout: true).trim() + + // Check if the plan contains any changes + if (terraformPlanOutput.contains('No changes.')) { + echo 'No changes in Terraform plan. Skipping further stages.' + tf_plan = "No Changes" + } else { + // If there are changes, proceed with applying the plan + echo "Changes detected in Terraform plan. Proceeding with apply. \n${terraformPlanOutput}" + + } + } + } + } + } + /** OPA Stage **/ + stage('OPA') { + when { + allOf{ + expression { return env.GIT_BRANCH == 'origin/main'} + expression { return tf_plan == "Changes" } + expression {return currentBuild.result != "FAILURE" } + } + } + + steps { + catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { + script { + // Run Terraform show and capture the output + sh "set +x && cd \"${WORKSPACE}/${env.Region}\" && terraform show -json tfplan.out > tfplan.json" + // Run OPA eval + def opaOutput = sh(script: "opa eval -f pretty -b /cd3user/oci_tools/cd3_automation_toolkit/user-scripts/OPA/ -i \"${WORKSPACE}/${env.Region}/tfplan.json\" data.terraform.deny",returnStdout: true).trim() + + if (opaOutput == '[]') { + echo "No OPA rules are violated. Proceeding with the next stage." + } + else { + echo "OPA Output:\n${opaOutput}" + unstable(message:"OPA Rules are violated.") + } + } + } + } + } + + stage('Get Approval') { + when { + allOf{ + expression { return env.GIT_BRANCH == 'origin/main'} + expression {return tf_plan == "Changes"} + expression {return currentBuild.result != "FAILURE" } + } + } + + options { + timeout(time: 1440, unit: 'MINUTES') // 24hours + } + + steps { + script { + input message: "Do you want to apply the plan?" + echo "Approval for the Apply Granted!" + } + } + } + + stage('Terraform Apply') { + when { + allOf{ + expression { return env.GIT_BRANCH == 'origin/main'} + expression {return tf_plan == "Changes"} + expression {return currentBuild.result != "FAILURE" } + } + } + + steps { + catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { + script { + sh "cd \"${WORKSPACE}/${env.Region}\" && terraform apply --auto-approve tfplan.out" + } + } + } + } + } +}