diff --git a/.github/workflows/e2e-parallel-destroy.yml b/.github/workflows/e2e-parallel-destroy.yml index 2652518f8c..4f00d3f32b 100644 --- a/.github/workflows/e2e-parallel-destroy.yml +++ b/.github/workflows/e2e-parallel-destroy.yml @@ -31,14 +31,9 @@ jobs: # - example_path: examples/fully-private-eks-cluster # skipping until issue #711 is addressed - example_path: examples/game-tech/agones-game-controller - example_path: examples/gitops/argocd - # - example_path: examples/ingress-controllers/nginx # ignoring due to https://github.com/kubernetes-sigs/aws-load-balancer-controller/issues/1629 - example_path: examples/ipv6-eks-cluster - example_path: examples/karpenter - example_path: examples/multi-tenancy-with-teams - - example_path: examples/node-groups/fargate-profiles - - example_path: examples/node-groups/managed-node-groups - - example_path: examples/node-groups/self-managed-node-groups - - example_path: examples/node-groups/windows-node-groups - example_path: examples/stateful - example_path: examples/vpc-cni-custom-networking steps: diff --git a/.github/workflows/e2e-parallel-full.yml b/.github/workflows/e2e-parallel-full.yml index e12f3f6e66..a5224fea2d 100644 --- a/.github/workflows/e2e-parallel-full.yml +++ b/.github/workflows/e2e-parallel-full.yml @@ -35,14 +35,9 @@ jobs: # - example_path: examples/fully-private-eks-cluster # skipping until issue #711 - example_path: examples/game-tech/agones-game-controller - example_path: examples/gitops/argocd - # - example_path: examples/ingress-controllers/nginx # ignoring due to https://github.com/kubernetes-sigs/aws-load-balancer-controller/issues/1629 - example_path: examples/ipv6-eks-cluster - example_path: examples/karpenter - example_path: examples/multi-tenancy-with-teams - - example_path: examples/node-groups/fargate-profiles - - example_path: examples/node-groups/managed-node-groups - - example_path: examples/node-groups/self-managed-node-groups - - example_path: examples/node-groups/windows-node-groups - example_path: examples/stateful - example_path: examples/vpc-cni-custom-networking steps: diff --git a/.github/workflows/plan-examples.py b/.github/workflows/plan-examples.py index d361b5385e..4c24a6bf95 100644 --- a/.github/workflows/plan-examples.py +++ b/.github/workflows/plan-examples.py @@ -11,12 +11,10 @@ def get_examples(): exclude = { 'examples/appmesh-mtls', # excluded until Rout53 is setup 'examples/eks-cluster-with-external-dns', # excluded until Rout53 is setup - 'examples/ci-cd/gitlab-ci-cd', # excluded since GitLab auth, backend, etc. required 'examples/fully-private-eks-cluster/vpc', # skipping until issue #711 is addressed 'examples/fully-private-eks-cluster/eks', 'examples/fully-private-eks-cluster/add-ons', 'examples/ai-ml/ray', # excluded until #887 is fixed - 'examples/portworx', # excluded due to policy not known at plan/apply time } projects = { diff --git a/README.md b/README.md index 16e7c8df32..c88be7c5f0 100644 --- a/README.md +++ b/README.md @@ -22,71 +22,12 @@ For complete project documentation, please visit our [documentation site](https: To view examples for how you can leverage EKS Blueprints, please see the [examples](https://github.com/aws-ia/terraform-aws-eks-blueprints/tree/main/examples) directory. -## Usage - -The below demonstrates how you can leverage EKS Blueprints to deploy an EKS cluster, a managed node group, and various Kubernetes add-ons. - -```hcl -module "eks_blueprints" { - source = "github.com/aws-ia/terraform-aws-eks-blueprints?ref=v4.12.0" - - # EKS CLUSTER - cluster_version = "1.23" - vpc_id = "" # Enter VPC ID - private_subnet_ids = ["", "", ""] # Enter Private Subnet IDs - - # EKS MANAGED NODE GROUPS - managed_node_groups = { - mg_m5 = { - node_group_name = "managed-ondemand" - instance_types = ["m5.large"] - subnet_ids = ["", "", ""] - } - } -} - -module "eks_blueprints_kubernetes_addons" { - source = "github.com/aws-ia/terraform-aws-eks-blueprints//modules/kubernetes-addons?ref=v4.12.0" - - eks_cluster_id = module.eks_blueprints.eks_cluster_id - - # EKS Addons - enable_amazon_eks_vpc_cni = true - enable_amazon_eks_coredns = true - enable_amazon_eks_kube_proxy = true - enable_amazon_eks_aws_ebs_csi_driver = true - - #K8s Add-ons - enable_argocd = true - enable_aws_for_fluentbit = true - enable_aws_load_balancer_controller = true - enable_cluster_autoscaler = true - enable_metrics_server = true - enable_prometheus = true -} -``` - -The code above will provision the following: - -- ✅ A new EKS Cluster with a managed node group. -- ✅ Amazon EKS add-ons `vpc-cni`, `CoreDNS`, `kube-proxy`, and `aws-ebs-csi-driver`. -- ✅ `Cluster Autoscaler` and `Metrics Server` for scaling your workloads. -- ✅ `Fluent Bit` for routing logs. -- ✅ `AWS Load Balancer Controller` for distributing traffic. -- ✅ `Argocd` for declarative GitOps CD for Kubernetes. -- ✅ `Prometheus` for observability. - ## Add-ons EKS Blueprints makes it easy to provision a wide range of popular Kubernetes add-ons into an EKS cluster. By default, the [Terraform Helm provider](https://github.com/hashicorp/terraform-provider-helm) is used to deploy add-ons with publicly available [Helm Charts](https://artifacthub.io/).EKS Blueprints provides support for leveraging self-hosted Helm Chart as well. For complete documentation on deploying add-ons, please visit our [add-on documentation](https://aws-ia.github.io/terraform-aws-eks-blueprints/latest/add-ons/) -## Submodules - -The root module calls into several submodules which provides support for deploying and integrating a number of external AWS services that can be used in concert with Amazon EKS. -This includes Amazon Managed Prometheus and EMR on EKS. For complete documentation on deploying external services, please visit our [submodules documentation](https://aws-ia.github.io/terraform-aws-eks-blueprints/latest/modules/emr-on-eks/). - ## Motivation Kubernetes is a powerful and extensible container orchestration technology that allows you to deploy and manage containerized applications at scale. The extensible nature of Kubernetes also allows you to use a wide range of popular open-source tools, commonly referred to as add-ons, in Kubernetes clusters. With such a large number of tooling and design choices available however, building a tailored EKS cluster that meets your application’s specific needs can take a significant amount of time. It involves integrating a wide range of open-source tools and AWS services and requires deep expertise in AWS and Kubernetes. @@ -99,8 +40,6 @@ EKS Blueprints for Terraform is maintained by AWS Solution Architects. It is not To post feedback, submit feature ideas, or report bugs, please use the [Issues section](https://github.com/aws-ia/terraform-aws-eks-blueprints/issues) of this GitHub repo. -For architectural details, step-by-step instructions, and customization options, see our [documentation site](https://aws-ia.github.io/terraform-aws-eks-blueprints/). - If you are interested in contributing to EKS Blueprints, see the [Contribution guide](https://github.com/aws-ia/terraform-aws-eks-blueprints/blob/main/CONTRIBUTING.md). diff --git a/docs/add-ons/nginx.md b/docs/add-ons/nginx.md index db7b101e81..3e67f65337 100644 --- a/docs/add-ons/nginx.md +++ b/docs/add-ons/nginx.md @@ -6,7 +6,7 @@ Other than handling Kubernetes ingress objects, this ingress controller can faci ## Usage -Nginx Ingress Controller can be deployed by enabling the add-on via the following. Check out the full [example](https://github.com/aws-ia/terraform-aws-eks-blueprints/blob/main/examples/ingress-controllers/nginx/main.tf) to deploy the EKS Cluster with Nginx Ingress Controller. +Nginx Ingress Controller can be deployed by enabling the add-on via the following. ```hcl enable_ingress_nginx = true @@ -41,7 +41,7 @@ You can optionally customize the Helm chart that deploys `nginx` via the followi The following properties are made available for use when managing the add-on via GitOps. -Refer to [main.tf](https://github.com/aws-ia/terraform-aws-eks-blueprints/blob/main/modules/kubernetes-addons/ingress-nginx/main.tf) for latest config. GitOps with ArgoCD Add-on repo is located [here](https://github.com/aws-samples/eks-blueprints-add-ons/blob/main/chart/values.yaml) +GitOps with ArgoCD Add-on repo is located [here](https://github.com/aws-samples/eks-blueprints-add-ons/blob/main/chart/values.yaml) ``` hcl argocd_gitops_config = { diff --git a/docs/add-ons/portworx.md b/docs/add-ons/portworx.md index 9cf62238a7..17f6224909 100644 --- a/docs/add-ons/portworx.md +++ b/docs/add-ons/portworx.md @@ -6,13 +6,9 @@ - [Helm chart](https://github.com/portworx/helm) -## Examples Blueprint - -To get started look at these sample [blueprints](../../examples/portworx). - ## Requirements -For the add-on to work, Portworx needs additional permission to AWS resources which can be provided in the following way. For an example blueprint, click [here](../../examples/portworx). +For the add-on to work, Portworx needs additional permission to AWS resources which can be provided in the following way. Note: Portworx currently does not support obtaining these permissions with an IRSA. Its support will be added with future releases. diff --git a/docs/advanced/bottlerocket.md b/docs/advanced/bottlerocket.md index fc1e9f7d83..fa019a214d 100644 --- a/docs/advanced/bottlerocket.md +++ b/docs/advanced/bottlerocket.md @@ -4,7 +4,7 @@ Bottlerocket has two containers runtimes running. Control container **on** by default used for AWS Systems manager and remote API access. Admin container **off** by default for deep debugging and exploration. -Bottlerocket [Launch templates userdata](https://github.com/aws-ia/terraform-aws-eks-blueprints/blob/main/modules/aws-eks-managed-node-groups/templates/userdata-bottlerocket.tpl) uses the TOML format with Key-value pairs. +Bottlerocket launch templates userdata uses the TOML format with Key-value pairs. Remote API access API via SSM agent. You can launch trouble shooting container via user data `[settings.host-containers.admin] enabled = true`. ### Features diff --git a/docs/core-concepts.md b/docs/core-concepts.md index 1ef436d78d..b95297a3b4 100644 --- a/docs/core-concepts.md +++ b/docs/core-concepts.md @@ -14,8 +14,6 @@ This document provides a high level overview of the Core Concepts that are embed A `cluster` is simply an EKS cluster. EKS Blueprints provides for customizing the compute options you leverage with your `clusters`. The framework currently supports `EC2`, `Fargate` and `BottleRocket` instances. It also supports managed and self-managed node groups. To specify the type of compute you want to use for your `cluster`, you use the `managed_node_groups`, `self_managed_nodegroups`, or `fargate_profiles` variables. -See our [Node Groups](https://aws-ia.github.io/terraform-aws-eks-blueprints/latest/node-groups/) documentation and our [Node Group example directory](https://github.com/aws-ia/terraform-aws-eks-blueprints/tree/main/examples/node-groups) for detailed information. - ## Add-on `Add-ons` allow you to configure the operational tools that you would like to deploy into your EKS cluster. When you configure `add-ons` for a `cluster`, the `add-ons` will be provisioned at deploy time by leveraging the Terraform Helm provider. Add-ons can deploy both Kubernetes specific resources and AWS resources needed to support add-on functionality. diff --git a/docs/node-groups.md b/docs/node-groups.md deleted file mode 100644 index 2c38756d8a..0000000000 --- a/docs/node-groups.md +++ /dev/null @@ -1,750 +0,0 @@ -# Node Groups - -The framework uses dedicated sub modules for creating [AWS Managed Node Groups](https://github.com/aws-ia/terraform-aws-eks-blueprints/tree/main/modules/aws-eks-managed-node-groups), [Self-managed Node groups](https://github.com/aws-ia/terraform-aws-eks-blueprints/tree/main/modules/aws-eks-self-managed-node-groups) and [Fargate profiles](https://github.com/aws-ia/terraform-aws-eks-blueprints/tree/main/modules/aws-eks-fargate-profiles). These modules provide flexibility to add or remove managed/self-managed node groups/fargate profiles by simply adding/removing map of values to input config. See [example](https://github.com/aws-ia/terraform-aws-eks-blueprints/tree/main/examples/eks-cluster-with-new-vpc). - -The `aws-auth` ConfigMap handled by this module allow your nodes to join your cluster, and you also use this ConfigMap to add RBAC access to IAM users and roles. -Each Node Group can have dedicated IAM role, Launch template and Security Group to improve the security. - -## Additional IAM Roles, Users and Accounts -Access to EKS cluster using AWS IAM entities is enabled by the [AWS IAM Authenticator](https://docs.aws.amazon.com/eks/latest/userguide/install-aws-iam-authenticator.html) for Kubernetes, which runs on the Amazon EKS control plane. -The authenticator gets its configuration information from the `aws-auth` [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/). - -The following config grants additional AWS IAM users or roles the ability to interact with your cluster. However, the best practice is to leverage [soft-multitenancy](https://aws.github.io/aws-eks-best-practices/security/docs/multitenancy/) with the help of [Teams](teams.md) module. Teams feature helps to manage users with dedicated namespaces, RBAC, IAM roles and register users with `aws-auth` to provide access to the EKS Cluster. - -The below example demonstrates adding additional IAM Roles, IAM Users and Accounts using EKS Blueprints module - -```hcl -module "eks_blueprints" { - source = "github.com/aws-ia/terraform-aws-eks-blueprints" - - # EKS CLUSTER - cluster_version = "1.21" # EKS Cluster Version - vpc_id = "" # Enter VPC ID - private_subnet_ids = ["", "", ""] # Enter Private Subnet IDs - - # List of map_roles - map_roles = [ - { - rolearn = "arn:aws:iam:::role/" # The ARN of the IAM role - username = "ops-role" # The user name within Kubernetes to map to the IAM role - groups = ["system:masters"] # A list of groups within Kubernetes to which the role is mapped; Checkout K8s Role and Rolebindings - } - ] - - # List of map_users - map_users = [ - { - userarn = "arn:aws:iam:::user/" # The ARN of the IAM user to add. - username = "opsuser" # The user name within Kubernetes to map to the IAM role - groups = ["system:masters"] # A list of groups within Kubernetes to which the role is mapped; Checkout K8s Role and Rolebindings - } - ] - - map_accounts = ["123456789", "9876543321"] # List of AWS account ids -} -``` - -## Managed Node Groups - -The below example demonstrates the minimum configuration required to deploy a managed node group. - -```hcl - # EKS MANAGED NODE GROUPS - managed_node_groups = { - mng = { - node_group_name = "mng-ondemand" - instance_types = ["m5.large"] - subnet_ids = [] # Mandatory Public or Private Subnet IDs - disk_size = 100 # disk_size will be ignored when using Launch Templates - } - } -``` - -The below example demonstrates advanced configuration options for a managed node group with launch templates. - -```hcl - managed_node_groups = { - # Managed Node groups with Launch templates using AMI TYPE - mng_lt = { - # Node Group configuration - node_group_name = "mng-lt" - create_launch_template = true # false will use the default launch template - launch_template_os = "amazonlinux2eks" # amazonlinux2eks or windows or bottlerocket - public_ip = false # Use this to enable public IP for EC2 instances; only for public subnets used in launch templates ; - enable_monitoring = true - create_iam_role = false # default is true; set to false to bring your own IAM Role with iam_role_arn option - iam_role_arn = "" # Node groups creates a new IAM role if `iam_role_arn` is not specified - pre_userdata = <<-EOT - yum install -y amazon-ssm-agent - systemctl enable amazon-ssm-agent && systemctl start amazon-ssm-agent - EOT - # Node Group scaling configuration - desired_size = 3 - max_size = 3 - min_size = 3 - - # Node Group update configuration. Set the maximum number or percentage of unavailable nodes to be tolerated during the node group version update. - update_config = [{ - max_unavailable_percentage = 30 - }] - - # Node Group compute configuration - ami_type = "AL2_x86_64" # Amazon Linux 2(AL2_x86_64), AL2_x86_64_GPU, AL2_ARM_64, BOTTLEROCKET_x86_64, BOTTLEROCKET_ARM_64 - release_version = "" # Enter AMI release version to deploy the latest AMI released by AWS. Used only when you specify ami_type - capacity_type = "ON_DEMAND" # ON_DEMAND or SPOT - instance_types = ["m5.large"] # List of instances to get capacity from multipe pools - - block_device_mappings = [ - { - device_name = "/dev/xvda" - volume_type = "gp3" - volume_size = 100 - }, - { - device_name = "/dev/xvdf" # mount point to /local1 (it could be local2, depending upon the disks are attached during boot) - volume_type = "gp3" # The volume type. Can be standard, gp2, gp3, io1, io2, sc1 or st1 (Default: gp3). - volume_size = 100 - delete_on_termination = true - encrypted = true - kms_key_id = "" # Custom KMS Key can be used to encrypt the disk - iops = 3000 - throughput = 125 - } - ] - - # Node Group network configuration - subnet_ids = [] # Mandatory - # Define private/public subnets list with comma separated ["subnet1","subnet2","subnet3"] - - additional_iam_policies = [] # Attach additional IAM policies to the IAM role attached to this worker group - # SSH ACCESS Optional - Recommended to use SSM Session manager - remote_access = false - ec2_ssh_key = "" - ssh_security_group_id = "" - - # Taints can be applied through EKS API or through Bootstrap script using kubelet_extra_args - # e.g., k8s_taints = [{key= "spot", value="true", "effect"="NO_SCHEDULE"}] - k8s_taints = [{key= "purpose", value="execution", effect="NO_SCHEDULE"}] - - # Node Labels can be applied through EKS API or through Bootstrap script using kubelet_extra_args - k8s_labels = { - Environment = "preprod" - Zone = "dev" - WorkerType = "ON_DEMAND" - } - additional_tags = { - ExtraTag = "m4-on-demand" - Name = "m4-on-demand" - subnet_type = "private" - } - launch_template_tags = { - SomeAwsProviderDefaultTag1: "TRUE" - SomeAwsProviderDefaultTag2: "TRUE" - } - } - } -``` - -The below example demonstrates advanced configuration options using GPU instances/ARM instances/Bottlerocket and custom AMIs managed node groups. - -```hcl - #---------------------------------------------------------# - # GPU instance type Worker Group - #---------------------------------------------------------# - gpu = { - # 1> Node Group configuration - Part1 - node_group_name = "gpu-mg5" # Max 40 characters for node group name - create_launch_template = true # false will use the default launch template - launch_template_os = "amazonlinux2eks" # amazonlinux2eks or bottlerocket - public_ip = false # Use this to enable public IP for EC2 instances; only for public subnets used in launch templates ; - pre_userdata = <<-EOT - yum install -y amazon-ssm-agent - systemctl enable amazon-ssm-agent && systemctl start amazon-ssm-agent - EOT - # 2> Node Group scaling configuration - desired_size = 2 - max_size = 2 - min_size = 2 - max_unavailable = 1 # or percentage = 20 - - # 3> Node Group compute configuration - ami_type = "AL2_x86_64_GPU" # AL2_x86_64, AL2_x86_64_GPU, AL2_ARM_64, CUSTOM - capacity_type = "ON_DEMAND" # ON_DEMAND or SPOT - instance_types = ["m5.large"] # List of instances to get capacity from multipe pools - block_device_mappings = [ - { - device_name = "/dev/xvda" - volume_type = "gp3" - volume_size = 100 - } - ] - - # 4> Node Group network configuration - subnet_ids = [] # Defaults to private subnet-ids used by EKS Controle plane. Define your private/public subnets list with comma separated subnet_ids = ['subnet1','subnet2','subnet3'] - - k8s_taints = [] - - k8s_labels = { - Environment = "preprod" - Zone = "dev" - WorkerType = "ON_DEMAND" - } - additional_tags = { - ExtraTag = "m5x-on-demand" - Name = "m5x-on-demand" - subnet_type = "private" - } - launch_template_tags = { - SomeAwsProviderDefaultTag1: "TRUE" - SomeAwsProviderDefaultTag2: "TRUE" - } - } - - #---------------------------------------------------------# - # ARM instance type Worker Group - #---------------------------------------------------------# - arm = { - # 1> Node Group configuration - Part1 - node_group_name = "arm-m6g-2vcpu-8gb" # Max 40 characters for node group name - create_launch_template = true # false will use the default launch template - launch_template_os = "amazonlinux2eks" # amazonlinux2eks or bottlerocket - public_ip = false # Use this to enable public IP for EC2 instances; only for public subnets used in launch templates ; - pre_userdata = <<-EOT - yum install -y amazon-ssm-agent - systemctl enable amazon-ssm-agent && systemctl start amazon-ssm-agent - EOT - # 2> Node Group scaling configuration - desired_size = 2 - max_size = 2 - min_size = 2 - max_unavailable = 1 # or percentage = 20 - - # 3> Node Group compute configuration - ami_type = "AL2_ARM_64" # AL2_x86_64, AL2_x86_64_GPU, AL2_ARM_64, CUSTOM, BOTTLEROCKET_ARM_64, BOTTLEROCKET_x86_64 - capacity_type = "ON_DEMAND" # ON_DEMAND or SPOT - instance_types = ["m6g.large"] # List of instances to get capacity from multipe pools - block_device_mappings = [ - { - device_name = "/dev/xvda" - volume_type = "gp3" - volume_size = 100 - } - ] - # 4> Node Group network configuration - subnet_ids = [] # Defaults to private subnet-ids used by EKS Controle plane. Define your private/public subnets list with comma separated subnet_ids = ['subnet1','subnet2','subnet3'] - - k8s_taints = [] - - k8s_labels = { - Environment = "preprod" - Zone = "dev" - WorkerType = "ON_DEMAND" - } - additional_tags = { - ExtraTag = "m6g-on-demand" - Name = "m6g-on-demand" - subnet_type = "private" - } - launch_template_tags = { - SomeAwsProviderDefaultTag1: "TRUE" - SomeAwsProviderDefaultTag2: "TRUE" - } - } - - #---------------------------------------------------------# - # Bottlerocket ARM instance type Worker Group - #---------------------------------------------------------# - # Checkout this doc https://github.com/bottlerocket-os/bottlerocket for configuring userdata for Launch Templates - bottlerocket_arm = { - # 1> Node Group configuration - node_group_name = "btl-arm" # Max 40 characters for node group name - create_launch_template = true # false will use the default launch template - launch_template_os = "bottlerocket" # amazonlinux2eks or bottlerocket - public_ip = false # Use this to enable public IP for EC2 instances; only for public subnets used in launch templates ; - # 2> Node Group scaling configuration - desired_size = 2 - max_size = 2 - min_size = 2 - max_unavailable = 1 # or percentage = 20 - - # 3> Node Group compute configuration - ami_type = "BOTTLEROCKET_ARM_64" # AL2_x86_64, AL2_x86_64_GPU, AL2_ARM_64, CUSTOM, BOTTLEROCKET_ARM_64, BOTTLEROCKET_x86_64 - capacity_type = "ON_DEMAND" # ON_DEMAND or SPOT - instance_types = ["m6g.large"] # List of instances to get capacity from multipe pools - disk_size = 50 - - # 4> Node Group network configuration - subnet_ids = [] # Defaults to private subnet-ids used by EKS Controle plane. Define your private/public subnets list with comma separated subnet_ids = ['subnet1','subnet2','subnet3'] - - k8s_taints = [] - - k8s_labels = { - Environment = "preprod" - Zone = "dev" - WorkerType = "ON_DEMAND" - } - additional_tags = { - ExtraTag = "m6g-on-demand" - Name = "m6g-on-demand" - subnet_type = "private" - } - launch_template_tags = { - SomeAwsProviderDefaultTag1: "TRUE" - SomeAwsProviderDefaultTag2: "TRUE" - } - } - - #---------------------------------------------------------# - # Bottlerocket instance type Worker Group - #---------------------------------------------------------# - # Checkout this doc https://github.com/bottlerocket-os/bottlerocket for configuring userdata for Launch Templates - bottlerocket_x86 = { - # 1> Node Group configuration - Part1 - node_group_name = "btl-x86" # Max 40 characters for node group name - create_launch_template = true # false will use the default launch template - launch_template_os = "bottlerocket" # amazonlinux2eks or bottlerocket - public_ip = false # Use this to enable public IP for EC2 instances; only for public subnets used in launch templates ; - # 2> Node Group scaling configuration - desired_size = 2 - max_size = 2 - min_size = 2 - max_unavailable = 1 # or percentage = 20 - - # 3> Node Group compute configuration - ami_type = "BOTTLEROCKET_x86_64" # AL2_x86_64, AL2_x86_64_GPU, AL2_ARM_64, CUSTOM, BOTTLEROCKET_ARM_64, BOTTLEROCKET_x86_64 - capacity_type = "ON_DEMAND" # ON_DEMAND or SPOT - instance_types = ["m5.large"] # List of instances to get capacity from multipe pools - block_device_mappings = [ - { - device_name = "/dev/xvda" - volume_type = "gp3" - volume_size = 100 - } - ] - - # 4> Node Group network configuration - subnet_ids = [] # Defaults to private subnet-ids used by EKS Controle plane. Define your private/public subnets list with comma separated subnet_ids = ['subnet1','subnet2','subnet3'] - - k8s_taints = [] - - k8s_labels = { - Environment = "preprod" - Zone = "dev" - WorkerType = "ON_DEMAND" - } - additional_tags = { - ExtraTag = "m5x-on-demand" - Name = "m5x-on-demand" - subnet_type = "private" - } - launch_template_tags = { - SomeAwsProviderDefaultTag1: "TRUE" - SomeAwsProviderDefaultTag2: "TRUE" - } - } - - #---------------------------------------------------------# - # Managed Node groups with Launch templates using CUSTOM AMI with ContainerD runtime - #---------------------------------------------------------# - mng_custom_ami = { - # Node Group configuration - node_group_name = "mng_custom_ami" # Max 40 characters for node group name - - # custom_ami_id is optional when you provide ami_type. Enter the Custom AMI id if you want to use your own custom AMI - custom_ami_id = data.aws_ami.amazonlinux2eks.id - capacity_type = "ON_DEMAND" # ON_DEMAND or SPOT - instance_types = ["m5.large"] # List of instances to get capacity from multipe pools - - # Launch template configuration - create_launch_template = true # false will use the default launch template - launch_template_os = "amazonlinux2eks" # amazonlinux2eks or bottlerocket - - # pre_userdata will be applied by using custom_ami_id or ami_type - pre_userdata = <<-EOT - yum install -y amazon-ssm-agent - systemctl enable amazon-ssm-agent && systemctl start amazon-ssm-agent - EOT - - # post_userdata will be applied only by using custom_ami_id - post_userdata = <<-EOT - echo "Bootstrap successfully completed! You can further apply config or install to run after bootstrap if needed" - EOT - - # kubelet_extra_args used only when you pass custom_ami_id; - # --node-labels is used to apply Kubernetes Labels to Nodes - # --register-with-taints used to apply taints to Nodes - # e.g., kubelet_extra_args='--node-labels=WorkerType=ON_DEMAND,noderole=spark --register-with-taints=ON_DEMAND=true:NoSchedule --max-pods=58', - kubelet_extra_args = "--node-labels=WorkerType=ON_DEMAND,noderole=spark --register-with-taints=test=true:NoSchedule --max-pods=20" - - # bootstrap_extra_args used only when you pass custom_ami_id. Allows you to change the Container Runtime for Nodes - # e.g., bootstrap_extra_args="--use-max-pods false --container-runtime containerd" - bootstrap_extra_args = "--use-max-pods false --container-runtime containerd" - - # Taints can be applied through EKS API or through Bootstrap script using kubelet_extra_args - k8s_taints = [] - - # Node Labels can be applied through EKS API or through Bootstrap script using kubelet_extra_args - k8s_labels = { - Environment = "preprod" - Zone = "dev" - Runtime = "containerd" - } - - enable_monitoring = true - eni_delete = true - public_ip = false # Use this to enable public IP for EC2 instances; only for public subnets used in launch templates - - # Node Group scaling configuration - desired_size = 2 - max_size = 2 - min_size = 2 - max_unavailable = 1 # or percentage = 20 - - block_device_mappings = [ - { - device_name = "/dev/xvda" - volume_type = "gp3" - volume_size = 150 - } - ] - - # Node Group network configuration - subnet_type = "private" # public or private - Default uses the private subnets used in control plane if you don't pass the "subnet_ids" - subnet_ids = [] # Defaults to private subnet-ids used by EKS Control plane. Define your private/public subnets list with comma separated subnet_ids = ['subnet1','subnet2','subnet3'] - - additional_iam_policies = [] # Attach additional IAM policies to the IAM role attached to this worker group - - # SSH ACCESS Optional - Recommended to use SSM Session manager - remote_access = false - ec2_ssh_key = "" - ssh_security_group_id = "" - - additional_tags = { - ExtraTag = "mng-custom-ami" - Name = "mng-custom-ami" - subnet_type = "private" - } - launch_template_tags = { - SomeAwsProviderDefaultTag1: "TRUE" - SomeAwsProviderDefaultTag2: "TRUE" - } - } -``` - -### Managed Node Groups with EC2 Spot Instances - -We recommend you to use managed-node groups (MNG) when using EC2 Spot instances. MNG creates the ASG for you following the Spot best practices: - -* Configure the [capacity_rebalance](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-capacity-rebalancing.html) feature to `true` -* Manage the rebalance notification notice by launching a new instance proactively when there's an instance with a high-risk of being interrupted. This is instance is [cordoned](https://jamesdefabia.github.io/docs/user-guide/kubectl/kubectl_cordon/) automatically so no new pods are scheduled there. -* Use [capacity-optimized](https://aws.amazon.com/about-aws/whats-new/2019/08/new-capacity-optimized-allocation-strategy-for-provisioning-amazon-ec2-spot-instances/) allocation strategy to launch an instance from the [pool](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-spot-instances.html#spot-features) with more spare capacity -* Manage the instance interruption notice by draining the pods automatically to other nodes in the cluster. - -The below example demonstrates the minimum configuration required to deploy a managed node group using EC2 Spot instances. Notice how we're including more than one instance type for diversification purposes. Diversification is key, is how you'll get access to more spare capacity in EC2. You can use the [Amazon EC2 Instance Selector CLI](https://github.com/aws/amazon-ec2-instance-selector) to get a list of instances that match your workload. - -```hcl - # EKS MANAGED NODE GROUPS WITH SPOT INSTANCES - spot_2vcpu_8mem = { - node_group_name = "mng-spot-2vcpu-8mem" - capacity_type = "SPOT" - instance_types = ["m5.large", "m4.large", "m6a.large", "m5a.large", "m5d.large"] // Instances with same specs for memory and CPU so Cluster Autoscaler scales efficiently - subnet_ids = [] # Mandatory Public or Private Subnet IDs - disk_size = 100 # disk_size will be ignored when using Launch Templates - k8s_taints = [{ key = "spotInstance", value = "true", effect = "NO_SCHEDULE" }] // Avoid scheduling stateful workloads in SPOT nodes - } -``` - -The below example demonstrates advanced configuration options for a managed node group with a custom launch templates. This is important if you decide to add the ability to scale-down to zero nodes. Cluster autoscaler needs to be able to identify which nodes to scale-down, and you do it by adding custom tags. - -```hcl - # EKS MANAGED NODE GROUPS WITH SPOT INSTANCES - spot_2vcpu_8mem = { - node_group_name = "mng-spot-2vcpu-8mem" - capacity_type = "SPOT" - instance_types = ["m5.large", "m4.large", "m6a.large", "m5a.large", "m5d.large"] // Instances with same specs for memory and CPU - - # Node Group network configuration - subnet_type = "private" # public or private - Default uses the private subnets used in control plane if you don't pass the "subnet_ids" - subnet_ids = [] # Defaults to private subnet-ids used by EKS Control plane. Define your private/public subnets list with comma separated subnet_ids = ['subnet1','subnet2','subnet3'] - - k8s_taints = [{ key = "spotInstance", value = "true", effect = "NO_SCHEDULE" }] // Avoid scheduling stateful workloads in SPOT nodes - - min_size = 0 // Scale-down to zero nodes when no workloads are running, useful for pre-production environments - - # Launch template configuration - create_launch_template = true # false will use the default launch template - launch_template_os = "amazonlinux2eks" # amazonlinux2eks or bottlerocket - - # This is so cluster autoscaler can identify which node (using ASGs tags) to scale-down to zero nodes - additional_tags = { - "k8s.io/cluster-autoscaler/node-template/label/eks.amazonaws.com/capacityType" = "SPOT" - "k8s.io/cluster-autoscaler/node-template/label/eks/node_group_name" = "mng-spot-2vcpu-8mem" - } - } -``` - -Cluser autoscaler has the ability to set priorities on which node groups to scale by using the `priority` expander. To configure it, you need to add the following configuration in the `eks_blueprints_kubernetes_addons` block, like this: - -```hcl - enable_cluster_autoscaler = true - cluster_autoscaler_helm_config = { - set = [ - { - name = "extraArgs.expander" - value = "priority" - }, - { - name = "expanderPriorities" - value = <<-EOT - 100: - - .*-spot-2vcpu-8mem.* - 90: - - .*-spot-4vcpu-16mem.* - 10: - - .* - EOT - } - ] - } -``` - -_NOTE_: - -- You should not set to true both `create_launch_template` and `remote_access` or you'll end-up with new managed nodegroups that won't be able to join the cluster. - -## Self-managed Node Groups - -The below example demonstrates the minimum configuration required to deploy a Self-managed node group. - -```hcl - # EKS SELF MANAGED NODE GROUPS - self_managed_node_groups = { - self_mg_5 = { - node_group_name = "self-managed-ondemand" - launch_template_os = "amazonlinux2eks" - subnet_ids = module.vpc.private_subnets - } - } -``` - -The below example demonstrates advanced configuration options for a self-managed node group. - - `--node-labels` parameter is used to apply labels to Nodes for self-managed node groups. e.g., `kubelet_extra_args="--node-labels=WorkerType=SPOT,noderole=spark` - - `--register-with-taints` is used to apply taints to Nodes for self-managed node groups. e.g., `kubelet_extra_args='--register-with-taints=spot=true:NoSchedule --max-pods=58'`, - -```hcl - self_managed_node_groups = { - self_mg_5 = { - node_group_name = "self-managed-ondemand" - instance_type = "m5.large" - custom_ami_id = "ami-0dfaa019a300f219c" # Bring your own custom AMI generated by Packer/ImageBuilder/Puppet etc. - capacity_type = "" # Optional Use this only for SPOT capacity as capacity_type = "spot" - launch_template_os = "amazonlinux2eks" # amazonlinux2eks or bottlerocket or windows - pre_userdata = <<-EOT - yum install -y amazon-ssm-agent - systemctl enable amazon-ssm-agent && systemctl start amazon-ssm-agent - EOT - post_userdata = "" - - create_iam_role = false # Changing `create_iam_role=false` to bring your own IAM Role - iam_role_arn = "" # custom IAM role for aws-auth mapping; used when create_iam_role = false - iam_instance_profile_name = "" # IAM instance profile name for Launch templates; used when create_iam_role = false - - kubelet_extra_args = "--node-labels=WorkerType=ON_DEMAND,noderole=spark --register-with-taints=test=true:NoSchedule --max-pods=20" - bootstrap_extra_args = "" - block_device_mapping = [ - { - device_name = "/dev/xvda" # mount point to / - volume_type = "gp3" - volume_size = 20 - }, - { - device_name = "/dev/xvdf" # mount point to /local1 (it could be local2, depending upon the disks are attached during boot) - volume_type = "gp3" - volume_size = 50 - iops = 3000 - throughput = 125 - }, - { - device_name = "/dev/xvdg" # mount point to /local2 (it could be local1, depending upon the disks are attached during boot) - volume_type = "gp3" - volume_size = 100 - iops = 3000 - throughput = 125 - } - ] - enable_monitoring = false - public_ip = false # Enable only for public subnets - - # AUTOSCALING - max_size = 3 - min_size = 1 - subnet_ids = [] # Mandatory Public or Private Subnet IDs - additional_tags = { - ExtraTag = "m5x-on-demand" - Name = "m5x-on-demand" - subnet_type = "private" - } - launch_template_tags = { - SomeAwsProviderDefaultTag1: "TRUE" - SomeAwsProviderDefaultTag2: "TRUE" - } - additional_iam_policies = [] - }, - } -``` - -With the previous described example at `block_device_mapping`, in case you choose an instance that has local NVMe storage, you will achieve the three specified EBS disks plus all local NVMe disks that instance brings. - -For example, for an `m5d.large` you will end up with the following mount points: `/` for device named `/dev/xvda`, `/local1` for device named `/dev/xvdf`, `/local2` for device named `/dev/xvdg`, and `/local3` for instance storage (in such case a disk with 70GB). - -Check the following references as you may desire: - -- [Amazon EBS and NVMe on Linux instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nvme-ebs-volumes.html). -- [AWS NVMe drivers for Windows instances](https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/aws-nvme-drivers.html) -- [EC2 Instance Update – M5 Instances with Local NVMe Storage (M5d)](https://aws.amazon.com/blogs/aws/ec2-instance-update-m5-instances-with-local-nvme-storage-m5d/) - -### Self-Managed Node Groups with EC2 Spot Instances - -We recommend you to use managed-node groups (MNG) when using EC2 Spot instances. However, if you need to use self-managed node groups, you need to configure the ASG with the following Spot best practices: - -* Configure the [capacity_rebalance](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-capacity-rebalancing.html) feature to `true` -* Use the [capacity-optimized](https://aws.amazon.com/about-aws/whats-new/2019/08/new-capacity-optimized-allocation-strategy-for-provisioning-amazon-ec2-spot-instances/) allocation strategy to launch an instance from the [pool](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-spot-instances.html#spot-features) with more spare capacity -* Deploy the [Node Termination Handler (NTH)](https://github.com/aws/aws-node-termination-handler) to manage the rebalance recommendation and instance termination notice - -The below example demonstrates the minimum configuration required to deploy a self-managed node group. Notice how we're including more than one instance type for diversification purposes. Diversification is key, is how you'll get access to more spare capacity in EC2. You can use the [Amazon EC2 Instance Selector CLI](https://github.com/aws/amazon-ec2-instance-selector) to get a list of instances that match your workload. - -```hcl - spot_2vcpu_8mem = { - node_group_name = "smng-spot-2vcpu-8mem" - capacity_type = "spot" - capacity_rebalance = true - instance_types = ["m5.large", "m4.large", "m6a.large", "m5a.large", "m5d.large"] - min_size = 0 - subnet_ids = module.vpc.private_subnets - launch_template_os = "amazonlinux2eks" # amazonlinux2eks or bottlerocket - k8s_taints = [{ key = "spotInstance", value = "true", effect = "NO_SCHEDULE" }] - } -``` - -You need to deploy the NTH as an add-on, so make sure you include the following within the `eks_blueprints_kubernetes_addons` block: - -```hcl - auto_scaling_group_names = module.eks_blueprints.self_managed_node_group_autoscaling_groups - enable_aws_node_termination_handler = true -``` - -Cluser autoscaler has the ability to set priorities on which node groups to scale by using the `priority` expander. To configure it, you need to add the following configuration in the `eks_blueprints_kubernetes_addons` block, like this: - -```hcl - enable_cluster_autoscaler = true - cluster_autoscaler_helm_config = { - set = [ - { - name = "extraArgs.expander" - value = "priority" - }, - { - name = "expanderPriorities" - value = <<-EOT - 100: - - .*-spot-2vcpu-8mem.* - 90: - - .*-spot-4vcpu-16mem.* - 10: - - .* - EOT - } - ] - } -``` - -### Fargate Profile - -The example below demonstrates how you can customize a Fargate profile for your cluster. - -```hcl - fargate_profiles = { - default = { - fargate_profile_name = "default" - fargate_profile_namespaces = [{ - namespace = "default" - k8s_labels = { - Environment = "preprod" - Zone = "dev" - env = "fargate" - } - }] - - subnet_ids = [] # Provide list of private subnets - - additional_tags = { - ExtraTag = "Fargate" - } - }, - multi = { - fargate_profile_name = "multi-namespaces" - create_iam_role = false # Changing `create_iam_role=false` to bring your own IAM Role - iam_role_arn = "" # custom IAM role for aws-auth mapping; used when `create_iam_role = false` - additional_iam_policies = [] # additional IAM policies - fargate_profile_namespaces = [{ - namespace = "default" - k8s_labels = { - Environment = "preprod" - Zone = "dev" - OS = "Fargate" - WorkerType = "FARGATE" - Namespace = "default" - } - }, - { - namespace = "sales" - k8s_labels = { - Environment = "preprod" - Zone = "dev" - OS = "Fargate" - WorkerType = "FARGATE" - Namespace = "default" - } - }] - - subnet_ids = [] # Provide list of private subnets - - additional_tags = { - ExtraTag = "Fargate" - } - }, - } -``` - -### Windows Self-Managed Node Groups - -The example below demonstrates the minimum configuration required to deploy a Self-managed node group of Windows nodes. Refer to the [AWS EKS user guide](https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html) for more information about Windows support in EKS. - -```hcl - # SELF-MANAGED NODE GROUP with Windows support - enable_windows_support = true - - self_managed_node_groups = { - ng_od_windows = { - node_group_name = "ng-od-windows" - launch_template_os = "windows" - instance_type = "m5n.large" - subnet_ids = module.vpc.private_subnets - min_size = 2 - } - } -``` - -In clusters where Windows support is enabled, workloads should have explicit node assignments configured using `nodeSelector` or `affinity`, as described in the Kubernetes document [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/). -For example, if you are enabling the `metrics-server` Kubernetes add-on (Helm chart), use the following configuration to ensure its pods are assigned to Linux nodes. See the [EKS Cluster with Windows Support example](../examples/node-groups/windows-node-groups/) for full Terraform configuration and workload deployment samples. - -```hcl - enable_metrics_server = true - metrics_server_helm_config = { - set = [ - { - name = "nodeSelector.kubernetes\\.io/os" - value = "linux" - } - ] - } -``` diff --git a/examples/ci-cd/gitlab-ci-cd/.gitlab-ci.yml b/examples/ci-cd/gitlab-ci-cd/.gitlab-ci.yml deleted file mode 100644 index b28d26d665..0000000000 --- a/examples/ci-cd/gitlab-ci-cd/.gitlab-ci.yml +++ /dev/null @@ -1,110 +0,0 @@ -# The purpose of this file is to demonstrate how to use GitLab CICD template to automate build and deployent of Terraform IaC -# -# This is a opinionated template which also demonstrates the best practice to implement gitops with Terraform IaC -image: registry.gitlab.com/gitlab-org/terraform-images/stable:latest -variables: - TF_ROOT: ${CI_PROJECT_DIR} - TF_HTTP_PASSWORD: ${CI_JOB_TOKEN} - TF_HTTP_ADDRESS: ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/terraform/state/${CI_PROJECT_NAME}-tfstate - TF_HTTP_LOCK_ADDRESS: ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/terraform/state/${CI_PROJECT_NAME}-tfstate/lock - TF_HTTP_UNLOCK_ADDRESS: ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/terraform/state/${CI_PROJECT_NAME}-tfstate/lock - TF_HTTP_LOCK_METHOD : POST - TF_HTTP_UNLOCK_METHOD: DELETE - TF_HTTP_RETRY_WAIT_MIN: 1 - - -cache: - key: "${TF_ROOT}" - paths: - - ${TF_ROOT}/.terraform/ - -before_script: - - cd ${TF_ROOT} - - apk add --no-cache python3 py-pip - - pip3 install awscli - -stages: - - tf-init - - tf-fmt - - tf-validate - - tf-plan - - tf-apply - - tf-destroy - -tf-init: - stage: tf-init - rules: - - exists: - - .destroy-cluster - when: never - - changes: - - "*.tf" - script: - - export DEBUG_OUTPUT=true # Optional use for debug purpose only - - gitlab-terraform init - -tf-fmt: - stage: tf-fmt - rules: - - exists: - - .destroy-cluster - when: never - - changes: - - "*.tf" - script: - - gitlab-terraform fmt -recursive - -tf-validate: - stage: tf-validate - rules: - - exists: - - .destroy-cluster - when: never - - changes: - - "*.tf" - script: - - gitlab-terraform validate - -# ${TF_ROOT}/plan.json pushes the plan into the Terraform backend of GitLab (CI) -tf-plan: - stage: tf-plan - artifacts: - name: plan - paths: - - ${TF_ROOT}/plan.cache - reports: - terraform: ${TF_ROOT}/plan.json - rules: - - exists: - - .destroy-cluster - when: never - - changes: - - "*.tf" - script: - - gitlab-terraform plan - - gitlab-terraform plan-json - -tf-apply: - stage: tf-apply - dependencies: - - tf-plan - rules: - - exists: - - .destroy-cluster - when: never - - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH - changes: - - "*.tf" - when: manual - allow_failure: true - script: - - gitlab-terraform apply - -tf-destroy: - stage: tf-destroy - dependencies: - - "tf-apply" - when: manual - allow_failure: true - script: - - gitlab-terraform destroy -auto-approve diff --git a/examples/ci-cd/gitlab-ci-cd/README.md b/examples/ci-cd/gitlab-ci-cd/README.md deleted file mode 100644 index 94374899cc..0000000000 --- a/examples/ci-cd/gitlab-ci-cd/README.md +++ /dev/null @@ -1,55 +0,0 @@ -# GitLab CI/CD example - -This pattern demonstrates a GitOps approach with IaC using Gitlab CI/CD. -This shows an example of how to automate the build and deployment of an IaC code for provisioning Amazon EKS Cluster using GitLab CI/CD. -Using Gitlab for Terraform state management which allows multiple engineers to work together to develop the infrastructure -Validation checks for the code -Note : This pattern needs Gitlab version 14.5 or above - -### Step 1: Clone this repo - -``` -git@github.com:aws-ia/terraform-aws-eks-blueprints.git -``` - -## Step 2: Create a new git repo in your GitLab group and copy files from examples/advanced/gitlab-ci-cd folder to the root of your new GitLab repo - - cd examples/ci-cd/gitlab-ci-cd - cp . $YOUR_GITLAB_REPO_ROOT - -## Step 3: Update project settings-> CI/CD ->Variables - -- Login to the GitLab console, Open your repo and navigate to `Settings->CI-CD->Variables` -- Update the following variables as Key Value pairs before triggering the pipeline - - AWS_ACCESS_KEY_ID e.g., access key from devops admin iam role - AWS_SECRET_ACCESS_KEY e.g., secret key from devops admin iam role - AWS_REGION e.g., eu-west-1 - AWS_SESSION_TOKEN e.g,. session token from devops admin iam role. Note : This is required only if you are using temporary credentials - -## Step 4: Commit changes and push to verify the pipeline - -Manually trigger the `tf-apply` to provision the resources - -## Step 5: Verify whether the state file update happened in your project (Infrastructure->Terraform-states). - -## Step 6: (Optional) Manually Install, Configure and Run GitLab Agent for Kubernetes (“Agent”, for short) is your active in-cluster. - -This is for or connecting Kubernetes clusters to GitLab. Refer https://docs.gitlab.com/ee/user/clusters/agent/install/index.html - -## Step 7: Cleanup the deployed resources - -Manually trigger the `tf-destroy` stage in the GitLab Ci/CD pipeline to destroy your deployment. - -## Troubleshooting: - -- ### 400 Error when creating resource - - - If the error contains `{message: {environment_scope: [cannot add duplicated environment scope]}}`, it is likely that an existing Kubernetes integration with the same environment scope was not removed. Remove any Kubernetes clusters with the same environment scope from the GitLab group before redeploying. - -- ### What's gitlab-terraform? - - - `gitlab-terraform` is a thin wrapper around the `terraform` binary. as part of the [GitLab Terraform docker image](https://gitlab.com/gitlab-org/terraform-images) used in `.gitlab-ci.yml`. - -- ### In case your tf-apply stage is failed in between - - Correct the source code ,commit and push the code or ensure you manually trigger tf-destroy stage and cleanup the provisioned resources diff --git a/examples/ci-cd/gitlab-ci-cd/main.tf b/examples/ci-cd/gitlab-ci-cd/main.tf deleted file mode 100644 index 468ce747f3..0000000000 --- a/examples/ci-cd/gitlab-ci-cd/main.tf +++ /dev/null @@ -1,105 +0,0 @@ -provider "gitlab" { - # Configuration options - the GitLab token that this provider requires is pulled from the variables set in the CI/CD settings of the GitLab repository -} - -provider "aws" { - region = local.region -} - -provider "kubernetes" { - host = module.eks_blueprints.eks_cluster_endpoint - cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token -} - -provider "helm" { - kubernetes { - host = module.eks_blueprints.eks_cluster_endpoint - cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token - } -} - -data "aws_eks_cluster_auth" "this" { - name = module.eks_blueprints.eks_cluster_id -} - -data "aws_availability_zones" "available" {} - -locals { - name = basename(path.cwd) - region = "us-west-2" - - vpc_cidr = "10.0.0.0/16" - azs = slice(data.aws_availability_zones.available.names, 0, 3) - - tags = { - Blueprint = local.name - GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" - } -} - -#--------------------------------------------------------------- -# EKS Blueprints -#--------------------------------------------------------------- - -module "eks_blueprints" { - source = "../../.." - - cluster_name = local.name - cluster_version = "1.23" - - vpc_id = module.vpc.vpc_id - private_subnet_ids = module.vpc.private_subnets - - managed_node_groups = { - mg_5 = { - node_group_name = "managed-ondemand" - instance_types = ["m5.large"] - subnet_ids = module.vpc.private_subnets - max_size = 10 - } - } - - tags = local.tags -} - -#--------------------------------------------------------------- -# Supporting Resources -#--------------------------------------------------------------- - -module "vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0" - - name = local.name - cidr = local.vpc_cidr - - azs = local.azs - public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] - - enable_nat_gateway = true - single_nat_gateway = true - enable_dns_hostnames = true - - # Manage so we can name - manage_default_network_acl = true - default_network_acl_tags = { Name = "${local.name}-default" } - manage_default_route_table = true - default_route_table_tags = { Name = "${local.name}-default" } - manage_default_security_group = true - default_security_group_tags = { Name = "${local.name}-default" } - - public_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/elb" = 1 - } - - private_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/internal-elb" = 1 - } - - tags = local.tags -} diff --git a/examples/ci-cd/gitlab-ci-cd/outputs.tf b/examples/ci-cd/gitlab-ci-cd/outputs.tf deleted file mode 100644 index 55552d3138..0000000000 --- a/examples/ci-cd/gitlab-ci-cd/outputs.tf +++ /dev/null @@ -1,4 +0,0 @@ -output "configure_kubectl" { - description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks_blueprints.configure_kubectl -} diff --git a/examples/ci-cd/gitlab-ci-cd/variables.tf b/examples/ci-cd/gitlab-ci-cd/variables.tf deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/examples/ci-cd/gitlab-ci-cd/versions.tf b/examples/ci-cd/gitlab-ci-cd/versions.tf deleted file mode 100644 index b2d0e4d8f5..0000000000 --- a/examples/ci-cd/gitlab-ci-cd/versions.tf +++ /dev/null @@ -1,26 +0,0 @@ -terraform { - required_version = ">= 1.0.0" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 3.72" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.10" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.4.1" - } - gitlab = { - source = "gitlabhq/gitlab" - version = "3.7.0" - } - } - - # storing tfstate with GitLab-managed Terraform state, read more here: https://docs.gitlab.com/ee/user/infrastructure/iac/terraform_state.html - backend "http" { - } -} diff --git a/examples/ingress-controllers/nginx/README.md b/examples/ingress-controllers/nginx/README.md deleted file mode 100644 index 622daa340f..0000000000 --- a/examples/ingress-controllers/nginx/README.md +++ /dev/null @@ -1,98 +0,0 @@ -# EKS Cluster Deployment with the nginx add-on enabled - -This example deploys the following Basic EKS Cluster with VPC. In AWS we use a Network load balancer (NLB) to expose the NGINX Ingress controller behind a Service of _Type=LoadBalancer_ leveraging AWS Load Balancer Controller (LBC). - -- Creates a new sample VPC, 3 Private Subnets and 3 Public Subnets -- Creates Internet gateway for Public Subnets and NAT Gateway for Private Subnets -- Creates EKS Cluster Control plane with managed nodes -- Creates the nginx controller resources; such as an internet facing AWS Network Load Balancer, AWS IAM role and policy - for the nginx service account, etc. - - Nginx controller service is using the LBC annotations to manage the NLB. - -## How to Deploy - -### Prerequisites - -Ensure that you have installed the following tools in your Mac or Windows Laptop before start working with this module and run Terraform Plan and Apply - -1. [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) -2. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) -3. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) - -### Deployment Steps - -#### Step 1: Clone the repo using the command below - -```sh -git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git -``` - -#### Step 2: Run Terraform INIT - -Initialize a working directory with configuration files - -```sh -cd examples/ingress-controllers/nginx -terraform init -``` - -#### Step 3: Run Terraform PLAN - -Verify the resources created by this execution - -```sh -export AWS_REGION= # Select your own region -terraform plan -``` - -#### Step 4: Finally, Terraform APPLY - -**Deploy the pattern** - -```sh -terraform apply -``` - -Enter `yes` to apply. - -### Configure `kubectl` and test cluster - -EKS Cluster details can be extracted from terraform output or from AWS Console to get the name of cluster. -This following command used to update the `kubeconfig` in your local machine where you run kubectl commands to interact with your EKS Cluster. - -#### Step 5: Run `update-kubeconfig` command - -`~/.kube/config` file gets updated with cluster details and certificate from the below command - -```sh - aws eks --region update-kubeconfig --name -``` - -#### Step 6: List all the worker nodes by running the command below - -```sh - kubectl get nodes -``` - -#### Step 7: List all the pods running in `nginx` namespace - -```sh - kubectl get pods -n nginx -``` - -## How to Destroy - -The following command destroys the resources created by `terraform apply` - -```sh -cd examples/ingress-controllers/nginx -terraform destroy -target="module.eks_blueprints_kubernetes_addons.module.ingress_nginx[0]" -auto-approve -terraform destroy -target="module.eks_blueprints_kubernetes_addons.module.aws_load_balancer_controller[0]" -auto-approve -terraform destroy -target="module.eks-blueprints-kubernetes-addons" -auto-approve -terraform destroy -target="module.eks-blueprints" -auto-approve -terraform destroy -auto-approve -``` - -## Learn more - -Read more about using NLB to expose the NGINX ingress controller using AWS Load Balancer Controller [here](https://kubernetes.github.io/ingress-nginx/deploy/#aws). diff --git a/examples/ingress-controllers/nginx/main.tf b/examples/ingress-controllers/nginx/main.tf deleted file mode 100644 index 0082ce25ff..0000000000 --- a/examples/ingress-controllers/nginx/main.tf +++ /dev/null @@ -1,127 +0,0 @@ -provider "aws" { - region = local.region -} - -provider "kubernetes" { - host = module.eks_blueprints.eks_cluster_endpoint - cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token -} - -provider "helm" { - kubernetes { - host = module.eks_blueprints.eks_cluster_endpoint - cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token - } -} - -data "aws_eks_cluster_auth" "this" { - name = module.eks_blueprints.eks_cluster_id -} - -data "aws_availability_zones" "available" {} - -locals { - name = basename(path.cwd) - region = "us-west-2" - - vpc_cidr = "10.0.0.0/16" - azs = slice(data.aws_availability_zones.available.names, 0, 3) - - tags = { - Blueprint = local.name - GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" - } -} - -#--------------------------------------------------------------- -# EKS Blueprints -#--------------------------------------------------------------- - -module "eks_blueprints" { - source = "../../.." - - cluster_name = local.name - cluster_version = "1.23" - - vpc_id = module.vpc.vpc_id - private_subnet_ids = module.vpc.private_subnets - - managed_node_groups = { - mg_5 = { - node_group_name = "managed-ondemand" - instance_types = ["m5.large"] - min_size = 2 - subnet_ids = module.vpc.private_subnets - } - } - - tags = local.tags -} - -module "eks_blueprints_kubernetes_addons" { - source = "../../../modules/kubernetes-addons" - - eks_cluster_id = module.eks_blueprints.eks_cluster_id - eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint - eks_oidc_provider = module.eks_blueprints.oidc_provider - eks_cluster_version = module.eks_blueprints.eks_cluster_version - - # EKS Managed Add-ons - enable_amazon_eks_coredns = true - enable_amazon_eks_kube_proxy = true - - # Add-ons - enable_metrics_server = true - enable_cluster_autoscaler = true - enable_aws_load_balancer_controller = true - - enable_ingress_nginx = true - ingress_nginx_helm_config = { - version = "4.0.17" - values = [templatefile("${path.module}/nginx_values.yaml", {})] - } - - tags = local.tags -} - -#--------------------------------------------------------------- -# Supporting Resources -#--------------------------------------------------------------- - -module "vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0" - - name = local.name - cidr = local.vpc_cidr - - azs = local.azs - public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] - - enable_nat_gateway = true - single_nat_gateway = true - enable_dns_hostnames = true - - # Manage so we can name - manage_default_network_acl = true - default_network_acl_tags = { Name = "${local.name}-default" } - manage_default_route_table = true - default_route_table_tags = { Name = "${local.name}-default" } - manage_default_security_group = true - default_security_group_tags = { Name = "${local.name}-default" } - - public_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/elb" = 1 - } - - private_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/internal-elb" = 1 - } - - tags = local.tags -} diff --git a/examples/ingress-controllers/nginx/nginx_values.yaml b/examples/ingress-controllers/nginx/nginx_values.yaml deleted file mode 100644 index d5be686648..0000000000 --- a/examples/ingress-controllers/nginx/nginx_values.yaml +++ /dev/null @@ -1,11 +0,0 @@ -controller: - service: - externalTrafficPolicy: "Local" - annotations: - # AWS Load Balancer Controller Annotations - service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp # or 'ssl' - service.beta.kubernetes.io/aws-load-balancer-attributes: load_balancing.cross_zone.enabled=true - service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '60' - service.beta.kubernetes.io/aws-load-balancer-type: 'external' - service.beta.kubernetes.io/aws-load-balancer-scheme: 'internet-facing' # or 'internal' - service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: 'ip' diff --git a/examples/ingress-controllers/nginx/outputs.tf b/examples/ingress-controllers/nginx/outputs.tf deleted file mode 100644 index 55552d3138..0000000000 --- a/examples/ingress-controllers/nginx/outputs.tf +++ /dev/null @@ -1,4 +0,0 @@ -output "configure_kubectl" { - description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks_blueprints.configure_kubectl -} diff --git a/examples/ingress-controllers/nginx/variables.tf b/examples/ingress-controllers/nginx/variables.tf deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/examples/ingress-controllers/nginx/versions.tf b/examples/ingress-controllers/nginx/versions.tf deleted file mode 100644 index 81f80777ee..0000000000 --- a/examples/ingress-controllers/nginx/versions.tf +++ /dev/null @@ -1,25 +0,0 @@ -terraform { - required_version = ">= 1.0.0" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 3.72" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.10" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.4.1" - } - } - - # ## Used for end-to-end testing on project; update to suit your needs - # backend "s3" { - # bucket = "terraform-ssp-github-actions-state" - # region = "us-west-2" - # key = "e2e/nginx/terraform.tfstate" - # } -} diff --git a/examples/node-groups/fargate-profiles/README.md b/examples/node-groups/fargate-profiles/README.md deleted file mode 100644 index 839d71f66c..0000000000 --- a/examples/node-groups/fargate-profiles/README.md +++ /dev/null @@ -1,87 +0,0 @@ -# EKS Cluster with Fargate Profiles - -This example deploys a new EKS Cluster into a new VPC and crates a Fargate profile. - -- Creates a new sample VPC, 3 Private Subnets and 3 Public Subnets -- Creates an Internet gateway for the Public Subnets and a NAT Gateway for the Private Subnets -- Creates an EKS Cluster Control plane with one Fargate profile - -## How to Deploy - -### Prerequisites: - -Ensure that you have installed the following tools in your Mac or Windows Laptop before start working with this module and run Terraform Plan and Apply - -1. [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) -2. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) -3. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) - -### Deployment Steps - -#### Step 1: Clone the repo using the command below - -```sh -git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git -``` - -#### Step 2: Run Terraform INIT - -Initialize a working directory with configuration files - -```sh -cd examples/node-groups/fargate-profiles/ -terraform init -``` - -#### Step 3: Run Terraform PLAN - -Verify the resources created by this execution - -```sh -export AWS_REGION= # Select your own region -terraform plan -``` - -#### Step 4: Finally, Terraform APPLY - -to create resources - -```sh -terraform apply -``` - -Enter `yes` to apply - -### Configure `kubectl` and test cluster - -EKS Cluster details can be extracted from terraform output or from AWS Console to get the name of cluster. -This following command used to update the `kubeconfig` in your local machine where you run kubectl commands to interact with your EKS Cluster. - -#### Step 5: Run `update-kubeconfig` command - -`~/.kube/config` file gets updated with cluster details and certificate from the below command - - $ aws eks --region update-kubeconfig --name - -#### Step 6: Create a simple pod with respective match labels. - - $ kubectl run test-pod --image=nginx --labels="Zone=dev,Environment=preprod,env=fargate" - -#### Step 7: List all the nodes by running the command below and verify the fargate nodes - - $ kubectl get nodes - -#### Step 8: List all the pods running in `kube-system` namespace - - $ kubectl get pods -n kube-system - -Note : CoreDNS requires [additional setup](https://docs.aws.amazon.com/eks/latest/userguide/fargate-getting-started.html) if customers use only Fargate - -## How to Destroy - -The following command destroys the resources created by `terraform apply` - -```sh -cd examples/node-groups/fargate-profiles -terraform destroy --auto-approve -``` diff --git a/examples/node-groups/fargate-profiles/main.tf b/examples/node-groups/fargate-profiles/main.tf deleted file mode 100644 index d532108776..0000000000 --- a/examples/node-groups/fargate-profiles/main.tf +++ /dev/null @@ -1,102 +0,0 @@ -provider "aws" { - region = local.region -} - -provider "kubernetes" { - host = module.eks_blueprints.eks_cluster_endpoint - cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token -} - -data "aws_eks_cluster_auth" "this" { - name = module.eks_blueprints.eks_cluster_id -} - -data "aws_availability_zones" "available" {} - -locals { - name = basename(path.cwd) - region = "us-west-2" - - vpc_cidr = "10.0.0.0/16" - azs = slice(data.aws_availability_zones.available.names, 0, 3) - - tags = { - Blueprint = local.name - GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" - } -} - -#--------------------------------------------------------------- -# EKS Blueprints -#--------------------------------------------------------------- -module "eks_blueprints" { - source = "../../.." - - cluster_name = local.name - cluster_version = "1.23" - - vpc_id = module.vpc.vpc_id - private_subnet_ids = module.vpc.private_subnets - - fargate_profiles = { - default = { - fargate_profile_name = "default" - fargate_profile_namespaces = [{ - namespace = "default" - k8s_labels = { - Environment = "preprod" - Zone = "dev" - env = "fargate" - } - }] - - subnet_ids = module.vpc.private_subnets - - additional_tags = { - ExtraTag = "Fargate" - } - } - } - - tags = local.tags -} - -#--------------------------------------------------------------- -# Supporting Resources -#--------------------------------------------------------------- -module "vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0" - - name = local.name - cidr = local.vpc_cidr - - azs = local.azs - public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] - - enable_nat_gateway = true - single_nat_gateway = true - enable_dns_hostnames = true - - # Manage so we can name - manage_default_network_acl = true - default_network_acl_tags = { Name = "${local.name}-default" } - manage_default_route_table = true - default_route_table_tags = { Name = "${local.name}-default" } - manage_default_security_group = true - default_security_group_tags = { Name = "${local.name}-default" } - - public_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/elb" = 1 - } - - private_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/internal-elb" = 1 - } - - tags = local.tags -} diff --git a/examples/node-groups/fargate-profiles/outputs.tf b/examples/node-groups/fargate-profiles/outputs.tf deleted file mode 100644 index 55552d3138..0000000000 --- a/examples/node-groups/fargate-profiles/outputs.tf +++ /dev/null @@ -1,4 +0,0 @@ -output "configure_kubectl" { - description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks_blueprints.configure_kubectl -} diff --git a/examples/node-groups/fargate-profiles/variables.tf b/examples/node-groups/fargate-profiles/variables.tf deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/examples/node-groups/fargate-profiles/versions.tf b/examples/node-groups/fargate-profiles/versions.tf deleted file mode 100644 index aa8cd774e8..0000000000 --- a/examples/node-groups/fargate-profiles/versions.tf +++ /dev/null @@ -1,25 +0,0 @@ -terraform { - required_version = ">= 1.0.0" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 3.72" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.10" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.4.1" - } - } - - # ## Used for end-to-end testing on project; update to suit your needs - # backend "s3" { - # bucket = "terraform-ssp-github-actions-state" - # region = "us-west-2" - # key = "e2e/fargate-profiles/terraform.tfstate" - # } -} diff --git a/examples/node-groups/managed-node-groups/README.md b/examples/node-groups/managed-node-groups/README.md deleted file mode 100644 index 6a0d5a74b0..0000000000 --- a/examples/node-groups/managed-node-groups/README.md +++ /dev/null @@ -1,124 +0,0 @@ -# EKS Cluster with Managed Node Group - -This example deploys a new EKS Cluster with a Managed node group into a new VPC. - -- Creates a new sample VPC, 3 Private Subnets and 3 Public Subnets -- Creates an Internet gateway for the Public Subnets and a NAT Gateway for the Private Subnets -- Creates an EKS Cluster Control plane with Managed node groups - -## How to Deploy - -### Prerequisites: - -Ensure that you have installed the following tools in your Mac or Windows Laptop before start working with this module and run Terraform Plan and Apply - -1. [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) -2. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) -3. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) - -### Deployment Steps - -#### Step 1: Clone the repo using the command below - -```sh -git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git -``` - -#### Step 2: Run Terraform INIT - -Initialize a working directory with configuration files - -```sh -cd examples/node-groups/managed-node-groups/ -terraform init -``` - -#### Step 3: Run Terraform PLAN - -Verify the resources created by this execution - -```sh -export AWS_REGION= # Select your own region -terraform plan -``` - -#### Step 4: Finally, Terraform APPLY - -**Deploy the pattern** - -```sh -terraform apply -``` - -Enter `yes` to apply. - -### Configure `kubectl` and test cluster - -EKS Cluster details can be extracted from terraform output or from AWS Console to get the name of cluster. -This following command used to update the `kubeconfig` in your local machine where you run kubectl commands to interact with your EKS Cluster. - -#### Step 5: Run `update-kubeconfig` command - -`~/.kube/config` file gets updated with cluster details and certificate from the below command - - $ aws eks --region update-kubeconfig --name - -#### Step 6: List all the worker nodes by running the command below - - $ kubectl get nodes - -#### Step 7: List all the pods running in `kube-system` namespace - - $ kubectl get pods -n kube-system - -#### Step 8: Deploy a pod on the spots nodegroups (spot_2vcpu_8mem and spot_4vcpu_16mem) - -Remember: - -- we created the spot_2vcpu_8mem nodegroup with a desired of 1 a min of 1 and a max of 2. -- we created the spot_4vcpu_16mem nodegroup with a desired of 0 a min of 0 and a max of 3. -- cluster-autoscaler is configured with priority expander with a priority on spot_2vcpu_8mem and then on spot_4vcpu_16mem and then any matching nodegroup - -Create a deployment with kubernetes/nginx-spot.yaml, which request spot instance through it's node selector and tolerate them: - -```bash -kubectl apply -f kubernetes/nginx-spot.yaml -``` - -If we scale the deployment, it will fullfill first the 2 nodes in the nodegroup spot_2vcpu_8mem - -```bash -kubectl scale deployment/nginx-spot --replicas=10 -``` - -If we scale again, it will need more nodes and will scale the nodegroup spot_4vcpu_16mem from 0. - -```bash -kubectl scale deployment/nginx-spot --replicas=20 -``` - -## Cleanup - -To clean up your environment, first remove your workloads: - -```bash -kubectl delete -f kubernetes/nginx-spot.yaml -``` - -Node group spot_2vcpu_8mem will scale down to 1 and node group spot_2vcpu_16mem will scale down to 0. - -then destroy the Terraform modules in reverse order. - -Destroy the Kubernetes Add-ons, EKS cluster with Node groups and VPC - -```sh -terraform destroy -target="module.eks_blueprints_kubernetes_addons" -auto-approve -terraform destroy -target="module.eks_blueprints" -auto-approve -terraform destroy -target="module.vpc" -auto-approve -``` - -Finally, destroy any additional resources that are not in the above modules - -```sh -terraform destroy -auto-approve -``` diff --git a/examples/node-groups/managed-node-groups/main.tf b/examples/node-groups/managed-node-groups/main.tf deleted file mode 100644 index 955293ecae..0000000000 --- a/examples/node-groups/managed-node-groups/main.tf +++ /dev/null @@ -1,420 +0,0 @@ -provider "aws" { - region = local.region -} - -provider "kubernetes" { - host = module.eks_blueprints.eks_cluster_endpoint - cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token -} - -provider "helm" { - kubernetes { - host = module.eks_blueprints.eks_cluster_endpoint - cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token - } -} - -data "aws_eks_cluster_auth" "this" { - name = module.eks_blueprints.eks_cluster_id -} - -data "aws_ami" "amazonlinux2eks" { - most_recent = true - - filter { - name = "name" - values = ["amazon-eks-node-${local.cluster_version}-*"] - } - - owners = ["amazon"] -} - -data "aws_availability_zones" "available" {} - -locals { - name = basename(path.cwd) - region = "us-west-2" - - cluster_version = "1.23" - - vpc_cidr = "10.0.0.0/16" - azs = slice(data.aws_availability_zones.available.names, 0, 3) - - tags = { - Blueprint = local.name - GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" - } -} - -#--------------------------------------------------------------- -# EKS Blueprints -#--------------------------------------------------------------- -module "eks_blueprints" { - source = "../../.." - - cluster_name = local.name - cluster_version = local.cluster_version - - vpc_id = module.vpc.vpc_id - private_subnet_ids = module.vpc.private_subnets - - node_security_group_additional_rules = { - # Extend node-to-node security group rules. Recommended and required for the Add-ons - ingress_self_all = { - description = "Node to node all ports/protocols" - protocol = "-1" - from_port = 0 - to_port = 0 - type = "ingress" - self = true - } - # Recommended outbound traffic for Node groups - egress_all = { - description = "Node all egress" - protocol = "-1" - from_port = 0 - to_port = 0 - type = "egress" - cidr_blocks = ["0.0.0.0/0"] - ipv6_cidr_blocks = ["::/0"] - } - # Allows Control Plane Nodes to talk to Worker nodes on all ports. Added this to simplify the example and further avoid issues with Add-ons communication with Control plane. - # This can be restricted further to specific port based on the requirement for each Add-on e.g., metrics-server 4443, spark-operator 8080, karpenter 8443 etc. - # Change this according to your security requirements if needed - ingress_cluster_to_node_all_traffic = { - description = "Cluster API to Nodegroup all traffic" - protocol = "-1" - from_port = 0 - to_port = 0 - type = "ingress" - source_cluster_security_group = true - } - } - - managed_node_groups = { - # Managed Node groups with minimum config - mg5 = { - node_group_name = "mg5" - instance_types = ["m5.large"] - min_size = 2 - create_iam_role = false # Changing `create_iam_role=false` to bring your own IAM Role - iam_role_arn = aws_iam_role.managed_ng.arn - disk_size = 100 # Disk size is used only with Managed Node Groups without Launch Templates - update_config = [{ - max_unavailable_percentage = 30 - }] - }, - # Managed Node groups with Launch templates using AMI TYPE - mng_lt = { - # Node Group configuration - node_group_name = "mng_lt" # Max 40 characters for node group name - - ami_type = "AL2_x86_64" # Available options -> AL2_x86_64, AL2_x86_64_GPU, AL2_ARM_64, CUSTOM - release_version = "" # Enter AMI release version to deploy the latest AMI released by AWS. Used only when you specify ami_type - capacity_type = "ON_DEMAND" # ON_DEMAND or SPOT - instance_types = ["r5d.large"] # List of instances used only for SPOT type - format_mount_nvme_disk = true # format and mount NVMe disks ; default to false - - # Launch template configuration - create_launch_template = true # false will use the default launch template - launch_template_os = "amazonlinux2eks" # amazonlinux2eks or bottlerocket - - enable_monitoring = true - eni_delete = true - public_ip = false # Use this to enable public IP for EC2 instances; only for public subnets used in launch templates - - http_endpoint = "enabled" - http_tokens = "optional" - http_put_response_hop_limit = 3 - - # pre_userdata can be used in both cases where you provide custom_ami_id or ami_type - pre_userdata = <<-EOT - yum install -y amazon-ssm-agent - systemctl enable amazon-ssm-agent && systemctl start amazon-ssm-agent - EOT - - # Taints can be applied through EKS API or through Bootstrap script using kubelet_extra_args - # e.g., k8s_taints = [{key= "spot", value="true", "effect"="NO_SCHEDULE"}] - k8s_taints = [] - - # Node Labels can be applied through EKS API or through Bootstrap script using kubelet_extra_args - k8s_labels = { - Environment = "preprod" - Zone = "dev" - Runtime = "docker" - } - - # Node Group scaling configuration - desired_size = 2 - max_size = 2 - min_size = 2 - - block_device_mappings = [ - { - device_name = "/dev/xvda" - volume_type = "gp3" - volume_size = 100 - } - ] - - # Node Group network configuration - subnet_type = "private" # public or private - Default uses the private subnets used in control plane if you don't pass the "subnet_ids" - subnet_ids = [] # Defaults to private subnet-ids used by EKS Control plane. Define your private/public subnets list with comma separated subnet_ids = ['subnet1','subnet2','subnet3'] - - additional_iam_policies = [] # Attach additional IAM policies to the IAM role attached to this worker group - - # SSH ACCESS Optional - Recommended to use SSM Session manager - remote_access = false - ec2_ssh_key = "" - ssh_security_group_id = "" - - additional_tags = { - ExtraTag = "m5x-on-demand" - Name = "m5x-on-demand" - subnet_type = "private" - } - } - # Managed Node groups with Launch templates using CUSTOM AMI with ContainerD runtime - mng_custom_ami = { - # Node Group configuration - node_group_name = "mng_custom_ami" # Max 40 characters for node group name - - # custom_ami_id is optional when you provide ami_type. Enter the Custom AMI id if you want to use your own custom AMI - custom_ami_id = data.aws_ami.amazonlinux2eks.id - capacity_type = "ON_DEMAND" # ON_DEMAND or SPOT - instance_types = ["r5d.large"] # List of instances used only for SPOT type - - # Launch template configuration - create_launch_template = true # false will use the default launch template - launch_template_os = "amazonlinux2eks" # amazonlinux2eks or bottlerocket - - # pre_userdata will be applied by using custom_ami_id or ami_type - pre_userdata = <<-EOT - yum install -y amazon-ssm-agent - systemctl enable amazon-ssm-agent && systemctl start amazon-ssm-agent - EOT - - # post_userdata will be applied only by using custom_ami_id - post_userdata = <<-EOT - echo "Bootstrap successfully completed! You can further apply config or install to run after bootstrap if needed" - EOT - - # kubelet_extra_args used only when you pass custom_ami_id; - # --node-labels is used to apply Kubernetes Labels to Nodes - # --register-with-taints used to apply taints to Nodes - # e.g., kubelet_extra_args='--node-labels=WorkerType=SPOT,noderole=spark --register-with-taints=spot=true:NoSchedule --max-pods=58', - kubelet_extra_args = "--node-labels=WorkerType=SPOT,noderole=spark --register-with-taints=test=true:NoSchedule --max-pods=20" - - # bootstrap_extra_args used only when you pass custom_ami_id. Allows you to change the Container Runtime for Nodes - # e.g., bootstrap_extra_args="--use-max-pods false --container-runtime containerd" - bootstrap_extra_args = "--use-max-pods false --container-runtime containerd" - - # Taints can be applied through EKS API or through Bootstrap script using kubelet_extra_args - k8s_taints = [] - - # Node Labels can be applied through EKS API or through Bootstrap script using kubelet_extra_args - k8s_labels = { - Environment = "preprod" - Zone = "dev" - Runtime = "containerd" - } - - enable_monitoring = true - eni_delete = true - public_ip = false # Use this to enable public IP for EC2 instances; only for public subnets used in launch templates - - # Node Group scaling configuration - desired_size = 2 - max_size = 2 - min_size = 2 - - block_device_mappings = [ - { - device_name = "/dev/xvda" - volume_type = "gp3" - volume_size = 150 - } - ] - - # Node Group network configuration - subnet_type = "private" # public or private - Default uses the private subnets used in control plane if you don't pass the "subnet_ids" - subnet_ids = [] # Defaults to private subnet-ids used by EKS Control plane. Define your private/public subnets list with comma separated subnet_ids = ['subnet1','subnet2','subnet3'] - - additional_iam_policies = [] # Attach additional IAM policies to the IAM role attached to this worker group - - # SSH ACCESS Optional - Recommended to use SSM Session manager - remote_access = false - ec2_ssh_key = "" - ssh_security_group_id = "" - - additional_tags = { - ExtraTag = "mng-custom-ami" - Name = "mng-custom-ami" - subnet_type = "private" - } - } - # Managed Node group with Launch templates using AMI TYPE and SPOT instances of 2 vCPUs and 8 Gib Memory - spot_2vcpu_8mem = { - node_group_name = "mng-spot-2vcpu-8mem" - capacity_type = "SPOT" - instance_types = ["m5.large", "m4.large", "m6a.large", "m5a.large", "m5d.large"] - max_size = 2 - desired_size = 1 - min_size = 1 - - # Node Group network configuration - subnet_type = "private" # public or private - Default uses the private subnets used in control plane if you don't pass the "subnet_ids" - subnet_ids = [] # Defaults to private subnet-ids used by EKS Control plane. Define your private/public subnets list with comma separated subnet_ids = ['subnet1','subnet2','subnet3'] - - k8s_taints = [{ key = "spotInstance", value = "true", effect = "NO_SCHEDULE" }] - } - - # Managed Node group with Launch templates using AMI TYPE and SPOT instances of 4 vCPUs and 16 Gib Memory - spot_4vcpu_16mem = { - node_group_name = "mng-spot-4vcpu-16mem" - capacity_type = "SPOT" - instance_types = ["m5.xlarge", "m4.xlarge", "m6a.xlarge", "m5a.xlarge", "m5d.xlarge"] - - # Node Group network configuration - subnet_type = "private" # public or private - Default uses the private subnets used in control plane if you don't pass the "subnet_ids" - subnet_ids = [] # Defaults to private subnet-ids used by EKS Control plane. Define your private/public subnets list with comma separated subnet_ids = ['subnet1','subnet2','subnet3'] - - k8s_taints = [{ key = "spotInstance", value = "true", effect = "NO_SCHEDULE" }] - - # NOTE: If we want the node group to scale-down to zero nodes, - # we need to use a custom launch template and define some additional tags for the ASGs - min_size = 0 - - # Launch template configuration - create_launch_template = true # false will use the default launch template - launch_template_os = "amazonlinux2eks" # amazonlinux2eks or bottlerocket - - # This is so cluster autoscaler can identify which node (using ASGs tags) to scale-down to zero nodes - additional_tags = { - "k8s.io/cluster-autoscaler/node-template/label/eks.amazonaws.com/capacityType" = "SPOT" - "k8s.io/cluster-autoscaler/node-template/label/eks/node_group_name" = "mng-spot-2vcpu-8mem" - } - } - } - - tags = local.tags -} - -module "eks_blueprints_kubernetes_addons" { - source = "../../../modules/kubernetes-addons" - - eks_cluster_id = module.eks_blueprints.eks_cluster_id - eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint - eks_oidc_provider = module.eks_blueprints.oidc_provider - eks_cluster_version = module.eks_blueprints.eks_cluster_version - - enable_metrics_server = true - enable_cluster_autoscaler = true - cluster_autoscaler_helm_config = { - set = [ - { - name = "extraArgs.expander" - value = "priority" - }, - { - name = "expanderPriorities" - value = <<-EOT - 100: - - .*-spot-2vcpu-8mem.* - 90: - - .*-spot-4vcpu-16mem.* - 10: - - .* - EOT - } - ] - } - - tags = local.tags -} - -#--------------------------------------------------------------- -# Supporting Resources -#--------------------------------------------------------------- -module "vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0" - - name = local.name - cidr = local.vpc_cidr - - azs = local.azs - public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] - - enable_nat_gateway = true - single_nat_gateway = true - enable_dns_hostnames = true - - # Manage so we can name - manage_default_network_acl = true - default_network_acl_tags = { Name = "${local.name}-default" } - manage_default_route_table = true - default_route_table_tags = { Name = "${local.name}-default" } - manage_default_security_group = true - default_security_group_tags = { Name = "${local.name}-default" } - - public_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/elb" = 1 - } - - private_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/internal-elb" = 1 - } - - tags = local.tags -} - -#--------------------------------------------------------------- -# Custom IAM roles for Node Groups -#--------------------------------------------------------------- -data "aws_iam_policy_document" "managed_ng_assume_role_policy" { - statement { - sid = "EKSWorkerAssumeRole" - - actions = [ - "sts:AssumeRole", - ] - principals { - type = "Service" - identifiers = ["ec2.amazonaws.com"] - } - } -} - -resource "aws_iam_role" "managed_ng" { - name = "managed-node-role" - description = "EKS Managed Node group IAM Role" - assume_role_policy = data.aws_iam_policy_document.managed_ng_assume_role_policy.json - path = "/" - force_detach_policies = true - managed_policy_arns = [ - "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", - "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", - "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", - "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" - ] - - tags = local.tags -} - -resource "aws_iam_instance_profile" "managed_ng" { - name = "managed-node-instance-profile" - role = aws_iam_role.managed_ng.name - path = "/" - - lifecycle { - create_before_destroy = true - } - - tags = local.tags -} diff --git a/examples/node-groups/managed-node-groups/outputs.tf b/examples/node-groups/managed-node-groups/outputs.tf deleted file mode 100644 index 55552d3138..0000000000 --- a/examples/node-groups/managed-node-groups/outputs.tf +++ /dev/null @@ -1,4 +0,0 @@ -output "configure_kubectl" { - description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks_blueprints.configure_kubectl -} diff --git a/examples/node-groups/managed-node-groups/variables.tf b/examples/node-groups/managed-node-groups/variables.tf deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/examples/node-groups/managed-node-groups/versions.tf b/examples/node-groups/managed-node-groups/versions.tf deleted file mode 100644 index c30e922ddc..0000000000 --- a/examples/node-groups/managed-node-groups/versions.tf +++ /dev/null @@ -1,25 +0,0 @@ -terraform { - required_version = ">= 1.0.0" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 3.72" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.10" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.4.1" - } - } - - # ## Used for end-to-end testing on project; update to suit your needs - # backend "s3" { - # bucket = "terraform-ssp-github-actions-state" - # region = "us-west-2" - # key = "e2e/managed-node-groups/terraform.tfstate" - # } -} diff --git a/examples/node-groups/self-managed-node-groups/README.md b/examples/node-groups/self-managed-node-groups/README.md deleted file mode 100644 index ca8e67a11c..0000000000 --- a/examples/node-groups/self-managed-node-groups/README.md +++ /dev/null @@ -1,85 +0,0 @@ -# EKS Cluster with Self-managed Node Group - -This example deploys a new EKS Cluster with a [self-managed node group](https://docs.aws.amazon.com/eks/latest/userguide/worker.html) into a new VPC. - -- Creates a new sample VPC, 3 Private Subnets and 3 Public Subnets -- Creates an Internet gateway for the Public Subnets and a NAT Gateway for the Private Subnets -- Creates an EKS Cluster Control plane with a self-managed node group - -## How to Deploy - -### Prerequisites: - -Ensure that you have installed the following tools in your Mac or Windows Laptop before start working with this module and run Terraform Plan and Apply - -1. [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) -2. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) -3. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) - -### Deployment Steps - -#### Step 1: Clone the repo using the command below - -```sh -git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git -``` - -#### Step 2: Run Terraform INIT - -Initialize a working directory with configuration files - -```sh -cd examples/node-groups/self-managed-node-groups/ -terraform init -``` - -#### Step 3: Run Terraform PLAN - -Verify the resources created by this execution - -```sh -export AWS_REGION= # Select your own region -terraform plan -``` - -#### Step 4: Finally, Terraform APPLY - -to create resources - -```sh -terraform apply -``` - -Enter `yes` to apply - -### Configure `kubectl` and test cluster - -EKS Cluster details can be extracted from terraform output or from AWS Console to get the name of cluster. -This following command used to update the `kubeconfig` in your local machine where you run kubectl commands to interact with your EKS Cluster. - -#### Step 5: Run `update-kubeconfig` command - -`~/.kube/config` file gets updated with cluster details and certificate from the below command - - $ aws eks --region update-kubeconfig --name - -#### Step 6: List all the worker nodes by running the command below - - $ kubectl get nodes - -#### Step 7: List all the pods running in `kube-system` namespace - - $ kubectl get pods -n kube-system - -#### Step 8: List the auto scaling group created for the self-managed node group - - $ aws autoscaling describe-auto-scaling-groups --auto-scaling-group-names aws001-preprod-dev-eks-self-managed-ondemand - -## How to Destroy - -The following command destroys the resources created by `terraform apply` - -```sh -cd examples/node-groups/self-managed-node-groups -terraform destroy --auto-approve -``` diff --git a/examples/node-groups/self-managed-node-groups/main.tf b/examples/node-groups/self-managed-node-groups/main.tf deleted file mode 100644 index 0ff5dd5e23..0000000000 --- a/examples/node-groups/self-managed-node-groups/main.tf +++ /dev/null @@ -1,277 +0,0 @@ -provider "aws" { - region = local.region -} - -provider "kubernetes" { - host = module.eks_blueprints.eks_cluster_endpoint - cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token -} - -provider "helm" { - kubernetes { - host = module.eks_blueprints.eks_cluster_endpoint - cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token - } -} - -data "aws_eks_cluster_auth" "this" { - name = module.eks_blueprints.eks_cluster_id -} - -data "aws_availability_zones" "available" {} - -locals { - name = basename(path.cwd) - region = "us-west-2" - - vpc_cidr = "10.0.0.0/16" - azs = slice(data.aws_availability_zones.available.names, 0, 3) - - tags = { - Blueprint = local.name - GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" - } -} - -#--------------------------------------------------------------- -# EKS Blueprints -#--------------------------------------------------------------- -module "eks_blueprints" { - source = "../../.." - - cluster_name = local.name - cluster_version = "1.23" - - vpc_id = module.vpc.vpc_id - private_subnet_ids = module.vpc.private_subnets - - self_managed_node_groups = { - self_mg4 = { - node_group_name = "self_mg4" - launch_template_os = "amazonlinux2eks" - subnet_ids = module.vpc.private_subnets - } - self_mg5 = { - node_group_name = "self_mg5" # Name is used to create a dedicated IAM role for each node group and adds to AWS-AUTH config map - - subnet_type = "private" - subnet_ids = module.vpc.private_subnets # Optional defaults to Private Subnet Ids used by EKS Control Plane - create_launch_template = true - launch_template_os = "amazonlinux2eks" # amazonlinux2eks or bottlerocket or windows - custom_ami_id = "" # Bring your own custom AMI generated by Packer/ImageBuilder/Puppet etc. - - create_iam_role = false # Changing `create_iam_role=false` to bring your own IAM Role - iam_role_arn = aws_iam_role.self_managed_ng.arn # custom IAM role for aws-auth mapping; used when create_iam_role = false - iam_instance_profile_name = aws_iam_instance_profile.self_managed_ng.name # IAM instance profile name for Launch templates; used when create_iam_role = false - - format_mount_nvme_disk = true - public_ip = false - enable_monitoring = false - - enable_metadata_options = false - - pre_userdata = <<-EOT - yum install -y amazon-ssm-agent - systemctl enable amazon-ssm-agent && systemctl start amazon-ssm-agent - EOT - - post_userdata = "" # Optional config - - # --node-labels is used to apply Kubernetes Labels to Nodes - # --register-with-taints used to apply taints to Nodes - # e.g., kubelet_extra_args='--node-labels=WorkerType=SPOT,noderole=spark --register-with-taints=spot=true:NoSchedule --max-pods=58', - kubelet_extra_args = "--node-labels=WorkerType=SPOT,noderole=spark --register-with-taints=test=true:NoSchedule --max-pods=20" - - # bootstrap_extra_args used only when you pass custom_ami_id. Allows you to change the Container Runtime for Nodes - # e.g., bootstrap_extra_args="--use-max-pods false --container-runtime containerd" - bootstrap_extra_args = "--use-max-pods false" - - block_device_mappings = [ - { - device_name = "/dev/xvda" # mount point to / - volume_type = "gp3" - volume_size = 50 - }, - { - device_name = "/dev/xvdf" # mount point to /local1 (it could be local2, depending upon the disks are attached during boot) - volume_type = "gp3" - volume_size = 80 - iops = 3000 - throughput = 125 - }, - { - device_name = "/dev/xvdg" # mount point to /local2 (it could be local1, depending upon the disks are attached during boot) - volume_type = "gp3" - volume_size = 100 - iops = 3000 - throughput = 125 - } - ] - - instance_type = "m5.large" - desired_size = 2 - max_size = 10 - min_size = 2 - capacity_type = "" # Optional Use this only for SPOT capacity as capacity_type = "spot" - - k8s_labels = { - Environment = "preprod" - Zone = "test" - WorkerType = "SELF_MANAGED_ON_DEMAND" - } - - additional_tags = { - ExtraTag = "m5x-on-demand" - Name = "m5x-on-demand" - subnet_type = "private" - } - } - - spot_2vcpu_8mem = { - node_group_name = "smng-spot-2vcpu-8mem" - capacity_type = "spot" - capacity_rebalance = true - instance_types = ["m5.large", "m4.large", "m6a.large", "m5a.large", "m5d.large"] - min_size = 0 - subnet_ids = module.vpc.private_subnets - launch_template_os = "amazonlinux2eks" # amazonlinux2eks or bottlerocket - k8s_taints = [{ key = "spotInstance", value = "true", effect = "NO_SCHEDULE" }] - } - - spot_4vcpu_16mem = { - node_group_name = "smng-spot-4vcpu-16mem" - capacity_type = "spot" - capacity_rebalance = true - instance_types = ["m5.xlarge", "m4.xlarge", "m6a.xlarge", "m5a.xlarge", "m5d.xlarge"] - min_size = 0 - subnet_ids = module.vpc.private_subnets - launch_template_os = "amazonlinux2eks" # amazonlinux2eks or bottlerocket - k8s_taints = [{ key = "spotInstance", value = "true", effect = "NO_SCHEDULE" }] - } - } -} - -module "eks_blueprints_kubernetes_addons" { - source = "../../../modules/kubernetes-addons" - eks_cluster_id = module.eks_blueprints.eks_cluster_id - eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint - eks_oidc_provider = module.eks_blueprints.oidc_provider - eks_cluster_version = module.eks_blueprints.eks_cluster_version - auto_scaling_group_names = module.eks_blueprints.self_managed_node_group_autoscaling_groups - - # EKS Managed Add-ons - enable_amazon_eks_vpc_cni = true - enable_amazon_eks_coredns = true - enable_amazon_eks_kube_proxy = true - - #K8s Add-ons - enable_metrics_server = true - enable_aws_node_termination_handler = true - - enable_cluster_autoscaler = true - cluster_autoscaler_helm_config = { - set = [ - { - name = "extraArgs.expander" - value = "priority" - }, - { - name = "expanderPriorities" - value = <<-EOT - 100: - - .*-spot-2vcpu-8mem.* - 90: - - .*-spot-4vcpu-16mem.* - 10: - - .* - EOT - } - ] - } -} - -#--------------------------------------------------------------- -# Custom IAM role for Self Managed Node Group -#--------------------------------------------------------------- -data "aws_iam_policy_document" "self_managed_ng_assume_role_policy" { - statement { - sid = "EKSWorkerAssumeRole" - - actions = [ - "sts:AssumeRole", - ] - principals { - type = "Service" - identifiers = ["ec2.amazonaws.com"] - } - } -} - -resource "aws_iam_role" "self_managed_ng" { - name = "self-managed-node-role" - description = "EKS Managed Node group IAM Role" - assume_role_policy = data.aws_iam_policy_document.self_managed_ng_assume_role_policy.json - path = "/" - force_detach_policies = true - managed_policy_arns = [ - "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", - "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", - "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", - "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" - ] - - tags = local.tags -} - -resource "aws_iam_instance_profile" "self_managed_ng" { - name = "self-managed-node-instance-profile" - role = aws_iam_role.self_managed_ng.name - path = "/" - - lifecycle { - create_before_destroy = true - } - - tags = local.tags -} - -#--------------------------------------------------------------- -# Supporting Resources -#--------------------------------------------------------------- -module "vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0" - - name = local.name - cidr = local.vpc_cidr - - azs = local.azs - public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] - - enable_nat_gateway = true - single_nat_gateway = true - enable_dns_hostnames = true - - # Manage so we can name - manage_default_network_acl = true - default_network_acl_tags = { Name = "${local.name}-default" } - manage_default_route_table = true - default_route_table_tags = { Name = "${local.name}-default" } - manage_default_security_group = true - default_security_group_tags = { Name = "${local.name}-default" } - - public_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/elb" = 1 - } - - private_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/internal-elb" = 1 - } - - tags = local.tags -} diff --git a/examples/node-groups/self-managed-node-groups/outputs.tf b/examples/node-groups/self-managed-node-groups/outputs.tf deleted file mode 100644 index 55552d3138..0000000000 --- a/examples/node-groups/self-managed-node-groups/outputs.tf +++ /dev/null @@ -1,4 +0,0 @@ -output "configure_kubectl" { - description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks_blueprints.configure_kubectl -} diff --git a/examples/node-groups/self-managed-node-groups/variables.tf b/examples/node-groups/self-managed-node-groups/variables.tf deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/examples/node-groups/self-managed-node-groups/versions.tf b/examples/node-groups/self-managed-node-groups/versions.tf deleted file mode 100644 index daf48803c1..0000000000 --- a/examples/node-groups/self-managed-node-groups/versions.tf +++ /dev/null @@ -1,25 +0,0 @@ -terraform { - required_version = ">= 1.0.0" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 3.72" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.10" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.4.1" - } - } - - # ## Used for end-to-end testing on project; update to suit your needs - # backend "s3" { - # bucket = "terraform-ssp-github-actions-state" - # region = "us-west-2" - # key = "e2e/self-managed-node-groups/terraform.tfstate" - # } -} diff --git a/examples/node-groups/windows-node-groups/README.md b/examples/node-groups/windows-node-groups/README.md deleted file mode 100644 index 365cf41bff..0000000000 --- a/examples/node-groups/windows-node-groups/README.md +++ /dev/null @@ -1,128 +0,0 @@ -# EKS Cluster with Windows support - -This example deploys the following AWS resources. - -- A new VPC, 3 AZs with private and public subnets -- Necessary VPC endpoints for node groups in private subnets -- An Internet gateway for the VPC and a NAT gateway in each public subnet -- An EKS cluster with an AWS-managed node group of spot Linux worker nodes and a self-managed node group of on-demand Windows worker nodes - -# How to deploy - -## Prerequisites: - -Ensure that you have installed the following tools in your Mac or Windows Laptop before start working with this module and run `terraform plan` and `terraform apply` - -1. [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) -2. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) -3. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) - -## Deployment steps - -### Step 1: Clone the repo using the command below - -```sh -git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git -``` - -### Step 2: Run `terraform init` - -to initialize a working directory with configuration files - -```sh -cd examples/node-groups/windows-node-groups -terraform init -``` - -### Step 3: Run `terraform plan` - -to verify the resources created by this execution - -```sh -export AWS_REGION=us-west-2 # Select your own region -terraform plan -``` - -If you want to use a region other than `us-west-2`, update the `aws_region` name and `aws_availability_zones` filter in the data sources in [main.tf](./main.tf) accordingly. - -### Step 4: Run `terraform apply` - -**Deploy the pattern** - -```sh -terraform apply -``` - -Enter `yes` to apply. - -## Configure kubectl and test cluster - -EKS Cluster details can be extracted from terraform output or from AWS Console to get the name of cluster. This following command used to update the `kubeconfig` in your local machine where you run kubectl commands to interact with your EKS Cluster. - -### Step 5: Run `update-kubeconfig` command. - -`~/.kube/config` file gets updated with EKS cluster context from the below command. Replace the region name and EKS cluster name with your cluster's name. - - $ aws eks --region us-west-2 update-kubeconfig --name - -### Step 6: (Optional) Deploy sample Windows and Linux workloads to verify support for both operating systems - -When Windows support is enabled in the cluster, it is necessary to use one of the ways to assign pods to specific nodes, such as `nodeSelector` or `affinity`. See the [K8s documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) for more info. This example uses `nodeSelector`s to select nodes with appropriate OS for pods. - -#### Sample Windows deployment - -```sh -cd examples/node-groups/windows-node-groups - -# Sample Windows deployment -kubectl apply -f ./k8s/windows-iis-aspnet.yaml - -# Wait for the Windows pod status to change to Running -# The following command will work on Linux -# On Mac, install the watch command using brew install watch -watch -n 1 "kubectl get po -n windows" - -# When the pod starts running, create a proxy to the K8s API -kubectl proxy -``` - -Now visit [http://127.0.0.1:8001/api/v1/namespaces/windows/services/aspnet/proxy/demo](http://127.0.0.1:8001/api/v1/namespaces/windows/services/aspnet/proxy/demo) in your browser. If everything went well, the page should display text "Hello, World!". Use Ctrl+C in your terminal to stop the `kubectl` proxy. - -Note: The `aspnet` service created by above example is a `LoadBalancer` service, so you can also visit the Network Load Balancer (NLB) endpoint in your browser instead of using `kubectl proxy` as mentioned above. To be able to access the NLB endpoint, update the security group attached to the Windows node where the `aspnet` pod is running to allow inbound access to port 80 from your IP address. You can grab the NLB endpoint from the service using the following command: - -``` -kubectl get svc -n windows -o jsonpath="{.items[0].status.loadBalancer.ingress[0].hostname}" -``` - -#### Sample Linux deployment - -```sh -# Sample Linux deployment -kubectl apply -f ./k8s/linux-nginx.yaml -``` - -## Cleanup - -```sh -cd examples/node-groups/windows-node-groups - -# If you deployed sample Windows & Linux workloads from Step 6 -kubectl delete svc,deploy -n windows --all -kubectl delete svc,deploy -n linux --all -``` - -```sh -terraform destroy -target="module.eks_blueprints_kubernetes_addons" -auto-approve -terraform destroy -target="module.eks_blueprints" -auto-approve -terraform destroy -target="module.vpc" -auto-approve -``` - -Finally, destroy any additional resources that are not in the above modules - -```sh -terraform destroy -auto-approve -``` - -## See also - -- [EKS Windows support considerations](https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html) diff --git a/examples/node-groups/windows-node-groups/k8s/linux-nginx.yaml b/examples/node-groups/windows-node-groups/k8s/linux-nginx.yaml deleted file mode 100644 index 03268d68b3..0000000000 --- a/examples/node-groups/windows-node-groups/k8s/linux-nginx.yaml +++ /dev/null @@ -1,48 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - labels: - app.kubernetes.io/name: linux - name: linux ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: nginx - namespace: linux -spec: - selector: - matchLabels: - run: nginx - replicas: 1 - template: - metadata: - labels: - run: nginx - spec: - containers: - - name: nginx - image: nginx - ports: - - containerPort: 80 - resources: - limits: - cpu: 100m - memory: 128Mi - nodeSelector: - kubernetes.io/os: linux ---- -apiVersion: v1 -kind: Service -metadata: - name: nginx - namespace: linux -spec: - ports: - - port: 80 - protocol: TCP - targetPort: 80 - selector: - run: nginx - sessionAffinity: None - type: ClusterIP diff --git a/examples/node-groups/windows-node-groups/k8s/windows-iis-aspnet.yaml b/examples/node-groups/windows-node-groups/k8s/windows-iis-aspnet.yaml deleted file mode 100644 index 917c36b4b0..0000000000 --- a/examples/node-groups/windows-node-groups/k8s/windows-iis-aspnet.yaml +++ /dev/null @@ -1,57 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - labels: - app.kubernetes.io/name: windows - name: windows ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: aspnet - namespace: windows -spec: - selector: - matchLabels: - app: aspnet - replicas: 1 - template: - metadata: - labels: - app: aspnet - spec: - containers: - - name: aspnet - image: mcr.microsoft.com/dotnet/framework/samples:aspnetapp - env: - - name: DEMO_NAME - value: "World" - command: - - powershell.exe - - -command - - > - echo '

Hello, <%= Environment.GetEnvironmentVariable("DEMO_NAME") %>!

' > C:\\inetpub\\wwwroot\\demo.aspx; - C:\\ServiceMonitor.exe "w3svc"; - resources: {} - nodeSelector: - kubernetes.io/os: windows ---- -apiVersion: v1 -kind: Service -metadata: - name: aspnet - namespace: windows - annotations: - service.beta.kubernetes.io/aws-load-balancer-scheme: "internet-facing" - service.beta.kubernetes.io/aws-load-balancer-type: "external" - service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: "ip" - service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true" -spec: - ports: - - port: 80 - protocol: TCP - targetPort: 80 - selector: - app: aspnet - sessionAffinity: None - type: LoadBalancer diff --git a/examples/node-groups/windows-node-groups/main.tf b/examples/node-groups/windows-node-groups/main.tf deleted file mode 100644 index 285b084835..0000000000 --- a/examples/node-groups/windows-node-groups/main.tf +++ /dev/null @@ -1,159 +0,0 @@ -provider "aws" { - region = local.region -} - -provider "kubernetes" { - host = module.eks_blueprints.eks_cluster_endpoint - cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token -} - -provider "helm" { - kubernetes { - host = module.eks_blueprints.eks_cluster_endpoint - cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token - } -} - -data "aws_eks_cluster_auth" "this" { - name = module.eks_blueprints.eks_cluster_id -} - -data "aws_availability_zones" "available" {} - -locals { - name = basename(path.cwd) - region = "us-west-2" - - vpc_cidr = "10.0.0.0/16" - azs = slice(data.aws_availability_zones.available.names, 0, 3) - - tags = { - Blueprint = local.name - GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" - } -} - -#--------------------------------------------------------------- -# EKS Blueprints -#--------------------------------------------------------------- -module "eks_blueprints" { - source = "../../.." - - cluster_name = local.name - cluster_version = "1.23" - - vpc_id = module.vpc.vpc_id - private_subnet_ids = module.vpc.private_subnets - - managed_node_groups = { - mng_spot_medium = { - node_group_name = "mng-spot-med" - capacity_type = "SPOT" - instance_types = ["t3.large", "t3.xlarge"] - subnet_ids = module.vpc.private_subnets - desired_size = 2 - disk_size = 30 - } - } - - enable_windows_support = true - self_managed_node_groups = { - ng_od_windows = { - node_group_name = "ng-od-windows" - launch_template_os = "windows" - instance_type = "m5.large" - subnet_ids = module.vpc.private_subnets - min_size = 2 - } - } - - tags = local.tags -} - -module "eks_blueprints_kubernetes_addons" { - source = "../../../modules/kubernetes-addons" - - eks_cluster_id = module.eks_blueprints.eks_cluster_id - eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint - eks_oidc_provider = module.eks_blueprints.oidc_provider - eks_cluster_version = module.eks_blueprints.eks_cluster_version - - # EKS Managed Add-ons - enable_amazon_eks_coredns = true - enable_amazon_eks_kube_proxy = true - - # Add-ons - enable_aws_load_balancer_controller = true - aws_load_balancer_controller_helm_config = { - set = [ - { - name = "nodeSelector.kubernetes\\.io/os" - value = "linux" - } - ] - } - - enable_metrics_server = true - metrics_server_helm_config = { - set = [ - { - name = "nodeSelector.kubernetes\\.io/os" - value = "linux" - } - ] - } - - enable_cluster_autoscaler = true - cluster_autoscaler_helm_config = { - set = [ - { - name = "nodeSelector.kubernetes\\.io/os" - value = "linux" - } - ] - } - - tags = local.tags - -} - -#--------------------------------------------------------------- -# Supporting Resources -#--------------------------------------------------------------- -module "vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0" - - name = local.name - cidr = local.vpc_cidr - - azs = local.azs - public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] - - enable_nat_gateway = true - single_nat_gateway = true - enable_dns_hostnames = true - - # Manage so we can name - manage_default_network_acl = true - default_network_acl_tags = { Name = "${local.name}-default" } - manage_default_route_table = true - default_route_table_tags = { Name = "${local.name}-default" } - manage_default_security_group = true - default_security_group_tags = { Name = "${local.name}-default" } - - public_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/elb" = 1 - } - - private_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/internal-elb" = 1 - } - - tags = local.tags -} diff --git a/examples/node-groups/windows-node-groups/outputs.tf b/examples/node-groups/windows-node-groups/outputs.tf deleted file mode 100644 index 55552d3138..0000000000 --- a/examples/node-groups/windows-node-groups/outputs.tf +++ /dev/null @@ -1,4 +0,0 @@ -output "configure_kubectl" { - description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks_blueprints.configure_kubectl -} diff --git a/examples/node-groups/windows-node-groups/variables.tf b/examples/node-groups/windows-node-groups/variables.tf deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/examples/node-groups/windows-node-groups/versions.tf b/examples/node-groups/windows-node-groups/versions.tf deleted file mode 100644 index 4f3abb9816..0000000000 --- a/examples/node-groups/windows-node-groups/versions.tf +++ /dev/null @@ -1,25 +0,0 @@ -terraform { - required_version = ">= 1.0.0" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 3.72" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.10" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.4.1" - } - } - - # ## Used for end-to-end testing on project; update to suit your needs - # backend "s3" { - # bucket = "terraform-ssp-github-actions-state" - # region = "us-west-2" - # key = "e2e/windows-node-groups/terraform.tfstate" - # } -} diff --git a/examples/portworx/README.md b/examples/portworx/README.md deleted file mode 100644 index 3b34f9732c..0000000000 --- a/examples/portworx/README.md +++ /dev/null @@ -1,206 +0,0 @@ -# Portworx add-on for EKS Blueprint - -This guide helps you install portworx on EKS environment using EKS Blueprints and its kubernetes add-on module. In this guide, we create a custom IAM policy and attach it to the node groups in EKS cluster to provide Portworx the required access. - - -The following list provides an overview of the components generated by this module: - -- 1x VPC with private and public subnets, internet gateway, route table, NAT gateway, network interface and network ACL. -- 1x EKS cluster -- 1x EKS multi-nodes managed node groups -- Installation of Portworx via Helm on the EKS cluster -- Portworx supports native integration with AWS APIs for drive creation and lifecycle management. -- User can provide drive specification to the Portworx add-on using the below format or instruct Portworx to use previously attached volumes - -## Installation - -### Prerequisites - -Ensure that the following components are installed on your local system: - -- [aws-cli](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) -- [kubectl](https://kubernetes.io/docs/tasks/tools/) -- [terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) - - -### Deployment steps - -#### Step 1. Clone the repository: - -```shell -git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git -``` - -#### Step 2. Initialize the Terraform module: - -```shell -cd examples/portworx -terraform init -``` - -#### Step 3. Make any necessary adjustments to the `main.tf` file - -Customise the values of variables like name, region, managed_node_groups configurations to set up the cluster according to your requirements. -To customise Portworx, refer to the [configuration](#portworx-configuration) section below. - -#### Step 4. Apply the deployment with Terraform. -Terraform's target functionality is leveraged to deploy a VPC, an EKS Cluster, and Kubernetes add-ons in separate steps. - -Deploy the VPC. This step will take roughly 3 minutes to complete. - -``` -terraform apply -target="module.vpc" -``` -Deploy custom IAM policy. -``` -terraform apply -target="aws_iam_policy.portworx_eksblueprint_volume_access" -``` -Deploy the EKS cluster. This step will take roughly 14 minutes to complete. -``` -terraform apply -target="module.eks_blueprints" -``` -Deploy the add-ons. This step will take rough 5 minutes to complete. -``` -terraform apply -target="module.eks_blueprints_kubernetes_addons" -``` - - -#### Step 5 Use the AWS CLI to provision a kubeconfig profile for the cluster: - -EKS Cluster details can be extracted from "terraform output" command or from AWS Console to get the name of cluster. - -```shell -aws eks --region update-kubeconfig --name -``` - -#### Step 6. Check that the nodes have created and that Portworx is running: - -```shell -kubectl get nodes -``` - -You should see 3 nodes in the list. - -```shell -kubectl get stc -n kube-system -``` - -Result: A storage cluster with set name becomes active which implies Portworx cluster is online. - -``` -NAMESPACE NAME CLUSTER UUID STATUS VERSION AGE -kube-system portworx-ir7e 79608dd3-e6b2-4a0a Online 2.11.0 3m17s -``` - -## Portworx Configuration - -The following tables lists the configurable parameters of the Portworx chart and their default values. - -| Parameter | Description | Default | -|-----------|-------------| --------| -| `imageVersion` | The image tag to pull | "2.11.0" | -| `useAWSMarketplace` | Set this variable to true if you wish to use AWS marketplace license for Portworx | "false" | -| `clusterName` | Portworx Cluster Name| portworx-\ | -| `drives` | Semi-colon seperated list of drives to be used for storage. (example: "/dev/sda;/dev/sdb" or "type=gp2,size=200;type=gp3,size=500") | "type=gp2,size=200"| -| `useInternalKVDB` | boolen variable to set internal KVDB on/off | true | -| `kvdbDevice` | specify a separate device to store KVDB data, only used when internalKVDB is set to true | type=gp2,size=150 | -| `envVars` | semi-colon-separated list of environment variables that will be exported to portworx. (example: MYENV1=val1;MYENV2=val2) | "" | -| `maxStorageNodesPerZone` | The maximum number of storage nodes desired per zone| 3 | -| `useOpenshiftInstall` | boolen variable to install Portworx on Openshift .| false | -| `etcdEndPoint` | The ETCD endpoint. Should be in the format etcd:http://(your-etcd-endpoint):2379. If there are multiple etcd endpoints they need to be ";" seperated. | "" | -| `dataInterface` | Name of the interface .| none | -| `managementInterface` | Name of the interface .| none | -| `useStork` | [Storage Orchestration for Hyperconvergence](https://github.com/libopenstorage/stork).| true | -| `storkVersion` | Optional: version of Stork. For eg: 2.11.0, when it's empty Portworx operator will pick up version according to Portworx version. | "2.11.0" | -| `customRegistryURL` | URL where to pull Portworx image from | "" | -| `registrySecret` | Image registery credentials to pull Portworx Images from a secure registry | "" | -| `licenseSecret` | Kubernetes secret name that has Portworx licensing information | "" | -| `monitoring` | Enable Monitoring on Portworx cluster | false | -| `enableCSI` | Enable CSI | false | -| `enableAutopilot` | Enable Autopilot | false | -| `KVDBauthSecretName` | Refer https://docs.portworx.com/reference/etcd/securing-with-certificates-in-kubernetes to create a kvdb secret and specify the name of the secret here| none | -| `deleteType` | Specify which strategy to use while Uninstalling Portworx. "Uninstall" values only removes Portworx but with "UninstallAndWipe" value all data from your disks including the Portworx metadata is also wiped permanently | UninstallAndWipe | - - -## Uninstalling Portworx: - -This section describes how to uninstall Portworx and remove its Kubernetes specs. When uninstalling, you may choose to either keep the the data on your drives, or wipe them completely. - -1. Start by choosing between one of two deleteStrategy option and updating the terraform script. - -``` -portworx_helm_config = { - set = [ - { - name= "deleteType" - value= "UninstallAndWipe" # Valid values: "Uninstall" and "UninstallAndWipe" - }] -} -``` - -```Uninstall``` only removes Portworx but ```UninstallAndWipe``` also remove all data from your disks permanently including the Portworx metadata. Use caution when applying the DeleteStrategy spec. Default value is set to ```UninstallAndWipe```. - -2. Perform a Terraform apply to apply the change - -``` -terraform apply -target="module.eks_blueprints_kubernetes_addons" -``` - - -3. Perform Terraform destroy using it target functionality to destroy in layers which prevents missed resources or errors. - - -#### Destroy the add-ons. - -```hcl -terraform destroy -target="module.eks_blueprints_kubernetes_addons.module.portworx[0].module.helm_addon" -terraform destroy -target="module.eks_blueprints_kubernetes_addons" -``` - -#### Destroy the EKS cluster. - -```hcl -terraform destroy -target="module.eks_blueprints" -``` -#### Destroy the IAM policy. - -```hcl -terraform destroy -target="aws_iam_policy.portworx_eksblueprint_volume_access" -``` - -#### Destroy the VPC. - -```hcl -terraform destroy -target="module.vpc" -``` - -4. You may also want to login via the AWS console or CLI and manually delete -any remaining EBS snapshots and volumes, they are not deleted as part of the destroy process. - - - - -## Modules - -| Name | Source | Version | -|------|--------|---------| -| [eks\_blueprints](#module\_eks\_blueprints) | github.com/aws-ia/terraform-aws-eks-blueprints | n/a | -| [eks\_blueprints\_kubernetes\_addons](#module\_eks\_blueprints\_kubernetes\_addons) | github.com/aws-ia/terraform-aws-eks-blueprints//modules/kubernetes-addons | n/a | -| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | - - - -## Inputs - -| Name | Description | Type | Default | Required | -|------|-------------|------|---------|:--------:| -| [cluster\_name](#input\_cluster\_name) | Name of cluster - used by Terratest for e2e test automation | `string` | `""` | no | - -## Outputs - -| Name | Description | -|------|-------------| -| [configure\_kubectl](#output\_configure\_kubectl) | Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig | -| [eks\_cluster\_id](#output\_eks\_cluster\_id) | EKS cluster ID | -| [region](#output\_region) | AWS region | -| [vpc\_cidr](#output\_vpc\_cidr) | VPC CIDR | diff --git a/examples/portworx/main.tf b/examples/portworx/main.tf deleted file mode 100644 index 05e0b652d0..0000000000 --- a/examples/portworx/main.tf +++ /dev/null @@ -1,164 +0,0 @@ -provider "aws" { - region = local.region -} - -locals { - name = "portworx-enterprise-1" - cluster_name = coalesce(var.cluster_name, local.name) - region = "us-east-1" - - vpc_cidr = "10.0.0.0/16" - azs = slice(data.aws_availability_zones.available.names, 0, 3) - - tags = { - Blueprint = local.name - GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" - } -} - -provider "kubernetes" { - host = module.eks_blueprints.eks_cluster_endpoint - cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token -} - -provider "helm" { - kubernetes { - host = module.eks_blueprints.eks_cluster_endpoint - cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token - } -} - -data "aws_eks_cluster_auth" "this" { - name = module.eks_blueprints.eks_cluster_id -} - -data "aws_availability_zones" "available" {} - -#--------------------------------------------------------------- -# Supporting Resources -#--------------------------------------------------------------- - -module "vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0" - - name = local.name - cidr = local.vpc_cidr - - azs = local.azs - public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] - - enable_nat_gateway = true - single_nat_gateway = true - enable_dns_hostnames = true - - # Manage so we can name - manage_default_network_acl = true - default_network_acl_tags = { Name = "${local.name}-default" } - manage_default_route_table = true - default_route_table_tags = { Name = "${local.name}-default" } - manage_default_security_group = true - default_security_group_tags = { Name = "${local.name}-default" } - - public_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/elb" = 1 - } - - private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = 1 - } - - tags = local.tags -} - -#--------------------------------------------------------------- -# Custom IAM roles for Node Groups -#--------------------------------------------------------------- - -resource "aws_iam_policy" "portworx_eksblueprint_volume_access" { - name = "portworx_eksblueprint_volume_access" - - policy = jsonencode({ - Version = "2012-10-17" - Statement = [ - { - Action = [ - "ec2:AttachVolume", - "ec2:ModifyVolume", - "ec2:DetachVolume", - "ec2:CreateTags", - "ec2:CreateVolume", - "ec2:DeleteTags", - "ec2:DeleteVolume", - "ec2:DescribeTags", - "ec2:DescribeVolumeAttribute", - "ec2:DescribeVolumesModifications", - "ec2:DescribeVolumeStatus", - "ec2:DescribeVolumes", - "ec2:DescribeInstances", - "autoscaling:DescribeAutoScalingGroups" - ] - Effect = "Allow" - Resource = "*" - }, - ] - }) -} - - -#--------------------------------------------------------------- -# EKS Blueprints -#--------------------------------------------------------------- - -module "eks_blueprints" { - source = "../.." - - cluster_name = local.cluster_name - cluster_version = "1.22" - - vpc_id = module.vpc.vpc_id - private_subnet_ids = module.vpc.private_subnets - - managed_node_groups = { - eksblueprint_nodegroup_med = { - node_group_name = "eksblueprint_nodegroup_med" - instance_types = ["t2.medium"] - min_size = 3 - desired_size = 3 - max_size = 3 - subnet_ids = module.vpc.private_subnets - additional_iam_policies = [aws_iam_policy.portworx_eksblueprint_volume_access.arn] - } - } - tags = local.tags - - depends_on = [ - aws_iam_policy.portworx_eksblueprint_volume_access - ] -} - -module "eks_blueprints_kubernetes_addons" { - source = "../../modules/kubernetes-addons" - eks_cluster_id = module.eks_blueprints.eks_cluster_id - eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint - eks_oidc_provider = module.eks_blueprints.oidc_provider - eks_cluster_version = module.eks_blueprints.eks_cluster_version - - enable_portworx = true - - # Custom values for parameters can be passed as shown below - portworx_helm_config = { - set = [ - { - name = "imageVersion" - value = "2.11.2" - } - ] - } - tags = local.tags -} diff --git a/examples/portworx/outputs.tf b/examples/portworx/outputs.tf deleted file mode 100644 index 34a167464c..0000000000 --- a/examples/portworx/outputs.tf +++ /dev/null @@ -1,14 +0,0 @@ -output "eks_cluster_id" { - description = "EKS cluster ID" - value = module.eks_blueprints.eks_cluster_id -} -output "configure_kubectl" { - description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks_blueprints.configure_kubectl -} - -# Region used for Terratest -output "region" { - value = local.region - description = "AWS region" -} diff --git a/examples/portworx/variables.tf b/examples/portworx/variables.tf deleted file mode 100644 index 2800300253..0000000000 --- a/examples/portworx/variables.tf +++ /dev/null @@ -1,6 +0,0 @@ -# tflint-ignore: terraform_unused_declarations -variable "cluster_name" { - description = "Name of cluster - used by Terratest for e2e test automation" - type = string - default = "" -} diff --git a/examples/portworx/versions.tf b/examples/portworx/versions.tf deleted file mode 100644 index a967321a96..0000000000 --- a/examples/portworx/versions.tf +++ /dev/null @@ -1,25 +0,0 @@ -terraform { - required_version = ">= 1.0.0" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 3.72" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.10" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.4.1" - } - } - - # ## Used for end-to-end testing on project; update to suit your needs - # backend "s3" { - # bucket = "terraform-ssp-github-actions-state" - # region = "us-west-2" - # key = "e2e/eks-cluster-with-new-vpc/terraform.tfstate" - # } -} diff --git a/examples/upstream-with-k8s-addons/README.md b/examples/upstream-with-k8s-addons/README.md deleted file mode 100644 index 18444c3931..0000000000 --- a/examples/upstream-with-k8s-addons/README.md +++ /dev/null @@ -1,87 +0,0 @@ -# EKS Upstream module with Blueprints kubernetes addons module - -Customers want to know if they can use the [upstream terraform eks module](https://github.com/terraform-aws-modules/terraform-aws-eks) and use the eks-blueprints for kubernetes-addons only, this example shows how to achieve this. - -## How to Deploy - -### Prerequisites - -Ensure that you have installed the following tools in your Mac or Windows Laptop before start working with this module and run Terraform Plan and Apply - -1. [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) -2. [Kubectl](https://Kubernetes.io/docs/tasks/tools/) -3. [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) - -### Deployment Steps - -#### Step 1: Clone the repo using the command below - -```sh -git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git -``` - -#### Step 2: Run Terraform INIT - -Initialize a working directory with configuration files - -```sh -cd examples/upstream-with-k8s-addons -terraform init -``` - -#### Step 3: Run Terraform PLAN - -Verify the resources created by this execution - -```sh -terraform plan -``` - -Note: you can change the region (default set to us-west-2) if you wish so in the main.tf under the locals. - -#### Step 4: Finally, Terraform APPLY - -**Deploy the pattern** - -```sh -terraform apply -``` - -Enter `yes` to apply. - -### Configure `kubectl` and test cluster - -EKS Cluster details can be extracted from terraform output or from AWS Console to get the name of cluster. -This following command used to update the `kubeconfig` in your local machine where you run kubectl commands to interact with your EKS Cluster. - -#### Step 5: Run `update-kubeconfig` command - -`~/.kube/config` file gets updated with cluster details and certificate from the below command - - aws eks --region update-kubeconfig --name - -#### Step 6: List all the worker nodes by running the command below - - kubectl get nodes - -#### Step 7: List all the pods running in `kube-system` namespace - - kubectl get pods -n kube-system - -## Cleanup - -To clean up your environment, destroy the Terraform modules in reverse order. - -Destroy the Kubernetes Add-ons, EKS cluster with Node groups and VPC - -```sh -terraform destroy -target="module.eks_blueprints_kubernetes_addons" -auto-approve -terraform destroy -target="module.eks" -auto-approve -terraform destroy -target="module.vpc" -auto-approve -``` - -Finally, destroy any additional resources that are not in the above modules - -```sh -terraform destroy -auto-approve -``` diff --git a/examples/upstream-with-k8s-addons/main.tf b/examples/upstream-with-k8s-addons/main.tf deleted file mode 100644 index 641969e501..0000000000 --- a/examples/upstream-with-k8s-addons/main.tf +++ /dev/null @@ -1,139 +0,0 @@ -provider "aws" { - region = local.region -} - -provider "kubernetes" { - host = module.eks.cluster_endpoint - cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token -} - -provider "helm" { - kubernetes { - host = module.eks.cluster_endpoint - cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token - } -} - -data "aws_eks_cluster_auth" "this" { - name = module.eks.cluster_id -} - -data "aws_availability_zones" "available" {} - -locals { - name = basename(path.cwd) - region = "us-west-2" - - vpc_cidr = "10.0.0.0/16" - azs = slice(data.aws_availability_zones.available.names, 0, 3) - - tags = { - Blueprint = local.name - GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" - } -} - -#--------------------------------------------------------------- -# EKS Cluster with terraform-aws-eks module -#--------------------------------------------------------------- - -module "eks" { - source = "terraform-aws-modules/eks/aws" - version = "~> 18.0" - - cluster_name = local.name - cluster_version = "1.23" - cluster_endpoint_private_access = true - - vpc_id = module.vpc.vpc_id - subnet_ids = module.vpc.private_subnets - - cluster_enabled_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"] - - eks_managed_node_group_defaults = { - instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"] - create_security_group = false - } - - eks_managed_node_groups = { - bottlerocket = { - ami_type = "BOTTLEROCKET_x86_64" - platform = "bottlerocket" - - min_size = 1 - max_size = 7 - desired_size = 1 - - update_config = { - max_unavailable_percentage = 33 - } - } - } -} - -#--------------------------------------------------------------- -# Kubernetes Addons using Blueprints kubernetes-addons module -#--------------------------------------------------------------- - -module "eks_blueprints_kubernetes_addons" { - source = "../../modules/kubernetes-addons" - - eks_cluster_id = module.eks.cluster_id - eks_cluster_endpoint = module.eks.cluster_endpoint - eks_oidc_provider = module.eks.oidc_provider - eks_cluster_version = module.eks.cluster_version - - # EKS Managed Add-ons - enable_amazon_eks_vpc_cni = true - enable_amazon_eks_coredns = true - enable_amazon_eks_kube_proxy = true - - # Add-ons - enable_metrics_server = true - enable_cluster_autoscaler = true - enable_aws_cloudwatch_metrics = true - - tags = local.tags -} - -#--------------------------------------------------------------- -# Supporting Resources -#--------------------------------------------------------------- - -module "vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0" - - name = local.name - cidr = local.vpc_cidr - - azs = local.azs - public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] - - enable_nat_gateway = true - single_nat_gateway = true - enable_dns_hostnames = true - - # Manage so we can name - manage_default_network_acl = true - default_network_acl_tags = { Name = "${local.name}-default" } - manage_default_route_table = true - default_route_table_tags = { Name = "${local.name}-default" } - manage_default_security_group = true - default_security_group_tags = { Name = "${local.name}-default" } - - public_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/elb" = 1 - } - - private_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/internal-elb" = 1 - } - - tags = local.tags -} diff --git a/examples/upstream-with-k8s-addons/outputs.tf b/examples/upstream-with-k8s-addons/outputs.tf deleted file mode 100644 index b7decade8e..0000000000 --- a/examples/upstream-with-k8s-addons/outputs.tf +++ /dev/null @@ -1,4 +0,0 @@ -output "configure_kubectl" { - description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_id}" -} diff --git a/examples/upstream-with-k8s-addons/variables.tf b/examples/upstream-with-k8s-addons/variables.tf deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/examples/upstream-with-k8s-addons/versions.tf b/examples/upstream-with-k8s-addons/versions.tf deleted file mode 100644 index 267c688ae5..0000000000 --- a/examples/upstream-with-k8s-addons/versions.tf +++ /dev/null @@ -1,25 +0,0 @@ -terraform { - required_version = ">= 1.0.0" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 3.72" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.10" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.4.1" - } - } - - # ## Used for end-to-end testing on project; update to suit your needs - # backend "s3" { - # bucket = "terraform-ssp-github-actions-state" - # region = "us-west-2" - # key = "e2e/upstream-with-k8s-addons/terraform.tfstate" - # } -}