diff --git a/README.md b/README.md index 5aaf0f9..7f330ff 100644 --- a/README.md +++ b/README.md @@ -36,9 +36,9 @@ Use of `sshuttle` with private key: | Name | Version | |------|---------| -| [aws](#provider\_aws) | 5.9.0 | +| [aws](#provider\_aws) | 5.12.0 | | [kubectl](#provider\_kubectl) | 1.14.0 | -| [kubernetes](#provider\_kubernetes) | 2.22.0 | +| [kubernetes](#provider\_kubernetes) | 2.23.0 | | [random](#provider\_random) | 3.5.1 | ## Modules @@ -46,8 +46,10 @@ Use of `sshuttle` with private key: | Name | Source | Version | |------|--------|---------| | [aws\_eks](#module\_aws\_eks) | git::https://github.com/terraform-aws-modules/terraform-aws-eks.git | v19.15.3 | +| [calico](#module\_calico) | git::https://github.com/aws-ia/terraform-aws-eks-blueprints-addon.git | v1.1.0 | +| [ebs\_csi\_driver\_irsa](#module\_ebs\_csi\_driver\_irsa) | terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks | ~> 5.20 | | [efs](#module\_efs) | terraform-aws-modules/efs/aws | ~> 1.0 | -| [eks\_blueprints\_kubernetes\_addons](#module\_eks\_blueprints\_kubernetes\_addons) | git::https://github.com/aws-ia/terraform-aws-eks-blueprints.git//modules/kubernetes-addons | v4.32.1 | +| [eks\_blueprints\_kubernetes\_addons](#module\_eks\_blueprints\_kubernetes\_addons) | git::https://github.com/aws-ia/terraform-aws-eks-blueprints-addons.git | v1.5.1 | ## Resources @@ -67,20 +69,20 @@ Use of `sshuttle` with private key: | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [amazon\_eks\_aws\_ebs\_csi\_driver\_config](#input\_amazon\_eks\_aws\_ebs\_csi\_driver\_config) | configMap for AWS EBS CSI Driver add-on | `any` | `{}` | no | | [aws\_admin\_usernames](#input\_aws\_admin\_usernames) | A list of one or more AWS usernames with authorized access to KMS and EKS resources, will automatically add the user running the terraform as an admin | `list(string)` | `[]` | no | +| [aws\_auth\_roles](#input\_aws\_auth\_roles) | List of role maps to add to the aws-auth configmap | `list(any)` | `[]` | no | | [aws\_auth\_users](#input\_aws\_auth\_users) | List of map of users to add to aws-auth configmap |
list(object({
userarn = string
username = string
groups = list(string)
}))
| `[]` | no | -| [aws\_node\_termination\_handler\_helm\_config](#input\_aws\_node\_termination\_handler\_helm\_config) | AWS Node Termination Handler Helm Chart config | `any` | `{}` | no | +| [aws\_efs\_csi\_driver](#input\_aws\_efs\_csi\_driver) | AWS EFS CSI Driver helm chart config | `any` | `{}` | no | +| [aws\_node\_termination\_handler](#input\_aws\_node\_termination\_handler) | AWS Node Termination Handler config for aws-ia/eks-blueprints-addon/aws | `any` | `{}` | no | | [aws\_region](#input\_aws\_region) | n/a | `string` | `""` | no | -| [bastion\_role\_arn](#input\_bastion\_role\_arn) | ARN of role authorized kubectl access | `string` | `""` | no | -| [bastion\_role\_name](#input\_bastion\_role\_name) | Name of role authorized kubectl access | `string` | `""` | no | -| [calico\_helm\_config](#input\_calico\_helm\_config) | Calico Helm Chart config | `any` | `{}` | no | +| [calico](#input\_calico) | Calico Helm Chart config | `any` | `{}` | no | | [cidr\_blocks](#input\_cidr\_blocks) | n/a | `list(string)` | n/a | yes | | [cluster\_addons](#input\_cluster\_addons) | Nested of eks native add-ons and their associated parameters.
See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_add-on for supported values.
See https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/examples/complete/main.tf#L44-L60 for upstream example.

to see available eks marketplace addons available for your cluster's version run:
aws eks describe-addon-versions --kubernetes-version $k8s\_cluster\_version --query 'addons[].{MarketplaceProductUrl: marketplaceInformation.productUrl, Name: addonName, Owner: owner Publisher: publisher, Type: type}' --output table | `any` | `{}` | no | -| [cluster\_autoscaler\_helm\_config](#input\_cluster\_autoscaler\_helm\_config) | Cluster Autoscaler Helm Chart config | `any` |
{
"set": [
{
"name": "extraArgs.expander",
"value": "priority"
},
{
"name": "expanderPriorities",
"value": "100:\n - .*-spot-2vcpu-8mem.*\n90:\n - .*-spot-4vcpu-16mem.*\n10:\n - .*\n"
}
]
}
| no | +| [cluster\_autoscaler](#input\_cluster\_autoscaler) | Cluster Autoscaler Helm Chart config | `any` |
{
"set": [
{
"name": "extraArgs.expander",
"value": "priority"
},
{
"name": "expanderPriorities",
"value": "100:\n - .*-spot-2vcpu-8mem.*\n90:\n - .*-spot-4vcpu-16mem.*\n10:\n - .*\n"
}
]
}
| no | | [cluster\_endpoint\_private\_access](#input\_cluster\_endpoint\_private\_access) | Enable private access to the cluster endpoint | `bool` | `true` | no | | [cluster\_endpoint\_public\_access](#input\_cluster\_endpoint\_public\_access) | Enable public access to the cluster endpoint | `bool` | `false` | no | -| [cluster\_name](#input\_cluster\_name) | Name of cluster - used by Terratest for e2e test automation | `string` | `""` | no | +| [cluster\_name](#input\_cluster\_name) | Name of cluster | `string` | `""` | no | +| [cluster\_security\_group\_additional\_rules](#input\_cluster\_security\_group\_additional\_rules) | List of additional security group rules to add to the cluster security group created. Set `source_node_security_group = true` inside rules to set the `node_security_group` as source | `any` | `{}` | no | | [cluster\_version](#input\_cluster\_version) | Kubernetes version to use for EKS cluster | `string` | `"1.27"` | no | | [control\_plane\_subnet\_ids](#input\_control\_plane\_subnet\_ids) | Subnet IDs for control plane | `list(string)` | `[]` | no | | [create\_aws\_auth\_configmap](#input\_create\_aws\_auth\_configmap) | Determines whether to create the aws-auth configmap. NOTE - this is only intended for scenarios where the configmap does not exist (i.e. - when using only self-managed node groups). Most users should use `manage_aws_auth_configmap` | `bool` | `false` | no | @@ -88,23 +90,22 @@ Use of `sshuttle` with private key: | [eks\_managed\_node\_group\_defaults](#input\_eks\_managed\_node\_group\_defaults) | Map of EKS-managed node group default configurations | `any` | `{}` | no | | [eks\_managed\_node\_groups](#input\_eks\_managed\_node\_groups) | Managed node groups configuration | `any` | `{}` | no | | [eks\_use\_mfa](#input\_eks\_use\_mfa) | Use MFA for auth\_eks\_role | `bool` | `false` | no | -| [enable\_amazon\_eks\_aws\_ebs\_csi\_driver](#input\_enable\_amazon\_eks\_aws\_ebs\_csi\_driver) | Enable EKS Managed AWS EBS CSI Driver add-on; enable\_amazon\_eks\_aws\_ebs\_csi\_driver and enable\_self\_managed\_aws\_ebs\_csi\_driver are mutually exclusive | `bool` | `false` | no | +| [enable\_amazon\_eks\_aws\_ebs\_csi\_driver](#input\_enable\_amazon\_eks\_aws\_ebs\_csi\_driver) | Enable EKS Managed AWS EBS CSI Driver add-on | `bool` | `false` | no | +| [enable\_amazon\_eks\_aws\_efs\_csi\_driver](#input\_enable\_amazon\_eks\_aws\_efs\_csi\_driver) | Enable EFS CSI Driver add-on | `bool` | `false` | no | | [enable\_aws\_node\_termination\_handler](#input\_enable\_aws\_node\_termination\_handler) | Enable AWS Node Termination Handler add-on | `bool` | `false` | no | -| [enable\_calico](#input\_enable\_calico) | Enable Calico add-on | `bool` | `false` | no | +| [enable\_calico](#input\_enable\_calico) | Enable Calico add-on | `bool` | `true` | no | | [enable\_cluster\_autoscaler](#input\_enable\_cluster\_autoscaler) | Enable Cluster autoscaler add-on | `bool` | `false` | no | -| [enable\_efs](#input\_enable\_efs) | Enable EFS CSI Driver add-on | `bool` | `false` | no | | [enable\_gp3\_default\_storage\_class](#input\_enable\_gp3\_default\_storage\_class) | Enable gp3 as default storage class | `bool` | `false` | no | | [enable\_metrics\_server](#input\_enable\_metrics\_server) | Enable metrics server add-on | `bool` | `false` | no | | [iam\_role\_permissions\_boundary](#input\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the IAM role | `string` | `null` | no | | [kms\_key\_administrators](#input\_kms\_key\_administrators) | List of ARNs of additional administrator users to add to KMS key policy | `list(string)` | `[]` | no | | [manage\_aws\_auth\_configmap](#input\_manage\_aws\_auth\_configmap) | Determines whether to manage the aws-auth configmap | `bool` | `false` | no | -| [metrics\_server\_helm\_config](#input\_metrics\_server\_helm\_config) | Metrics Server Helm Chart config | `any` | `{}` | no | +| [metrics\_server](#input\_metrics\_server) | Metrics Server config for aws-ia/eks-blueprints-addon/aws | `any` | `{}` | no | | [name](#input\_name) | n/a | `string` | `""` | no | | [private\_subnet\_ids](#input\_private\_subnet\_ids) | Private subnet IDs | `list(string)` | `[]` | no | | [reclaim\_policy](#input\_reclaim\_policy) | Reclaim policy for EFS storage class, valid options are Delete and Retain | `string` | `"Delete"` | no | | [self\_managed\_node\_group\_defaults](#input\_self\_managed\_node\_group\_defaults) | Map of self-managed node group default configurations | `any` | `{}` | no | | [self\_managed\_node\_groups](#input\_self\_managed\_node\_groups) | Self-managed node groups configuration | `any` | `{}` | no | -| [source\_security\_group\_id](#input\_source\_security\_group\_id) | List of additional rules to add to cluster security group | `string` | `""` | no | | [storageclass\_reclaim\_policy](#input\_storageclass\_reclaim\_policy) | Reclaim policy for gp3 storage class, valid options are Delete and Retain | `string` | `"Delete"` | no | | [tags](#input\_tags) | A map of tags to apply to all resources | `map(string)` | `{}` | no | | [vpc\_cni\_custom\_subnet](#input\_vpc\_cni\_custom\_subnet) | Subnet to put pod ENIs in | `list(string)` | `[]` | no | @@ -118,8 +119,9 @@ Use of `sshuttle` with private key: | [cluster\_endpoint](#output\_cluster\_endpoint) | EKS cluster endpoint | | [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | EKS cluster IAM role ARN | | [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster | +| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | EKS cluster security group ID | | [cluster\_status](#output\_cluster\_status) | status of the EKS cluster | -| [efs\_storageclass\_name](#output\_efs\_storageclass\_name) | The name of the EFS storageclass that was created (if var.enable\_efs was set to true) | +| [efs\_storageclass\_name](#output\_efs\_storageclass\_name) | The name of the EFS storageclass that was created (if var.enable\_amazon\_eks\_aws\_efs\_csi\_driver was set to true) | | [managed\_nodegroups](#output\_managed\_nodegroups) | EKS managed node groups | | [oidc\_provider](#output\_oidc\_provider) | The OpenID Connect identity provider (issuer URL without leading `https://`) | | [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | EKS OIDC provider ARN | diff --git a/eks-addons.tf b/eks-addons.tf index a883a6b..470b3f7 100644 --- a/eks-addons.tf +++ b/eks-addons.tf @@ -1,59 +1,195 @@ -#--------------------------------------------------------------- -# EKS Add-Ons -#--------------------------------------------------------------- +################################################################################ +# EKS Blueprints Add-Ons +################################################################################ locals { - self_managed_node_group_names = [for key, value in module.aws_eks.self_managed_node_groups : lookup(value, "autoscaling_group_name", "")] + node_group_arns = [for key, value in module.aws_eks.self_managed_node_groups : lookup(value, "autoscaling_group_arn", "")] } module "eks_blueprints_kubernetes_addons" { - source = "git::https://github.com/aws-ia/terraform-aws-eks-blueprints.git//modules/kubernetes-addons?ref=v4.32.1" + source = "git::https://github.com/aws-ia/terraform-aws-eks-blueprints-addons.git?ref=v1.5.1" - eks_cluster_id = module.aws_eks.cluster_name - eks_cluster_endpoint = module.aws_eks.cluster_endpoint - eks_oidc_provider = module.aws_eks.oidc_provider - eks_cluster_version = module.aws_eks.cluster_version - irsa_iam_permissions_boundary = var.iam_role_permissions_boundary - - # only used for aws_node_termination_handler, if this list is empty, then enable_aws_node_termination_handler should also be false. - auto_scaling_group_names = local.self_managed_node_group_names + cluster_name = module.aws_eks.cluster_name + cluster_endpoint = module.aws_eks.cluster_endpoint + oidc_provider_arn = module.aws_eks.oidc_provider_arn + cluster_version = module.aws_eks.cluster_version - # blueprints addons + # time_sleep w/ trigger for nodes to be deployed + create_delay_dependencies = local.node_group_arns - # EKS EBS CSI Driver - enable_amazon_eks_aws_ebs_csi_driver = var.enable_amazon_eks_aws_ebs_csi_driver - amazon_eks_aws_ebs_csi_driver_config = var.amazon_eks_aws_ebs_csi_driver_config + # only used for aws_node_termination_handler, if this list is empty, then enable_aws_node_termination_handler should also be false. + # you don't need to tag eks managed node group ASGs for NTH - https://github.com/aws/aws-node-termination-handler/blob/main/README.md?plain=1#L41 + aws_node_termination_handler_asg_arns = local.node_group_arns # EKS EFS CSI Driver - enable_aws_efs_csi_driver = var.enable_efs + enable_aws_efs_csi_driver = var.enable_amazon_eks_aws_efs_csi_driver + aws_efs_csi_driver = var.aws_efs_csi_driver # K8s Add-ons # EKS Metrics Server - enable_metrics_server = var.enable_metrics_server - metrics_server_helm_config = var.metrics_server_helm_config + enable_metrics_server = var.enable_metrics_server + metrics_server = var.metrics_server # EKS AWS node termination handler - enable_aws_node_termination_handler = var.enable_aws_node_termination_handler - aws_node_termination_handler_helm_config = var.aws_node_termination_handler_helm_config + enable_aws_node_termination_handler = var.enable_aws_node_termination_handler + aws_node_termination_handler = var.aws_node_termination_handler # EKS Cluster Autoscaler - enable_cluster_autoscaler = var.enable_cluster_autoscaler - cluster_autoscaler_helm_config = var.cluster_autoscaler_helm_config + enable_cluster_autoscaler = var.enable_cluster_autoscaler + cluster_autoscaler = var.cluster_autoscaler + + # Arbitrary helm charts can be fed into a helm_release var in blueprints. Note that the standard "create" var doesn't work with these + # see https://github.com/aws-ia/terraform-aws-eks-blueprints-addons/blob/main/docs/helm-release.md + + tags = var.tags +} - # Calico - enable_calico = var.enable_calico - calico_helm_config = var.calico_helm_config +################################################################################ +# Custom Addons +################################################################################ + +# Calico +module "calico" { + source = "git::https://github.com/aws-ia/terraform-aws-eks-blueprints-addon.git?ref=v1.1.0" + + create = var.enable_calico + + # https://github.com/aws/eks-charts/blob/master/stable/aws-load-balancer-controller/Chart.yaml + name = try(var.calico.name, "calico") + description = try(var.calico.description, "calico helm Chart deployment configuration") + namespace = try(var.calico.namespace, "tigera-operator") + create_namespace = try(var.calico.create_namespace, true) + chart = try(var.calico.chart, "tigera-operator") + chart_version = try(var.calico.chart_version, "v3.26.1") + repository = try(var.calico.repository, "https://docs.projectcalico.org/charts") + values = try(var.calico.values, [ + <<-EOT + installation: + kubernetesProvider: "EKS" + EOT + ]) + + timeout = try(var.calico.timeout, null) + repository_key_file = try(var.calico.repository_key_file, null) + repository_cert_file = try(var.calico.repository_cert_file, null) + repository_ca_file = try(var.calico.repository_ca_file, null) + repository_username = try(var.calico.repository_username, null) + repository_password = try(var.calico.repository_password, null) + devel = try(var.calico.devel, null) + verify = try(var.calico.verify, null) + keyring = try(var.calico.keyring, null) + disable_webhooks = try(var.calico.disable_webhooks, null) + reuse_values = try(var.calico.reuse_values, null) + reset_values = try(var.calico.reset_values, null) + force_update = try(var.calico.force_update, null) + recreate_pods = try(var.calico.recreate_pods, null) + cleanup_on_fail = try(var.calico.cleanup_on_fail, null) + max_history = try(var.calico.max_history, null) + atomic = try(var.calico.atomic, null) + skip_crds = try(var.calico.skip_crds, null) + render_subchart_notes = try(var.calico.render_subchart_notes, null) + disable_openapi_validation = try(var.calico.disable_openapi_validation, null) + wait = try(var.calico.wait, false) + wait_for_jobs = try(var.calico.wait_for_jobs, null) + dependency_update = try(var.calico.dependency_update, null) + replace = try(var.calico.replace, null) + lint = try(var.calico.lint, null) + + postrender = try(var.calico.postrender, []) + set = [] + set_sensitive = try(var.calico.set_sensitive, []) + + tags = var.tags +} + +################################################################################ +# EBS CSI Driver Configurations +################################################################################ + +module "ebs_csi_driver_irsa" { + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "~> 5.20" + + count = var.enable_amazon_eks_aws_ebs_csi_driver ? 1 : 0 + + role_name_prefix = "${module.aws_eks.cluster_name}-ebs-csi-driver-" + + attach_ebs_csi_policy = true + + oidc_providers = { + main = { + provider_arn = module.aws_eks.oidc_provider_arn + namespace_service_accounts = ["kube-system:ebs-csi-controller-sa"] + } + } + + tags = var.tags +} + + +################################################################################ +# EFS CSI Driver Configurations +################################################################################ + +resource "random_id" "efs_name" { + byte_length = 2 + prefix = "EFS-" +} + +resource "kubernetes_storage_class_v1" "efs" { + count = var.enable_amazon_eks_aws_efs_csi_driver ? 1 : 0 + metadata { + name = lower(random_id.efs_name.hex) + } + + storage_provisioner = "efs.csi.aws.com" + reclaim_policy = var.reclaim_policy + parameters = { + provisioningMode = "efs-ap" # Dynamic provisioning + fileSystemId = module.efs[0].id + directoryPerms = "700" + } + mount_options = [ + "iam" + ] + + depends_on = [ + module.eks_blueprints_kubernetes_addons + ] +} + +module "efs" { + source = "terraform-aws-modules/efs/aws" + version = "~> 1.0" + + count = var.enable_amazon_eks_aws_efs_csi_driver ? 1 : 0 + + name = lower(random_id.efs_name.hex) + # Mount targets / security group + mount_targets = { + for k, v in zipmap(local.availability_zone_name, var.private_subnet_ids) : k => { subnet_id = v } + } + + security_group_description = "${local.cluster_name} EFS security group" + security_group_vpc_id = var.vpc_id + security_group_rules = { + vpc = { + # relying on the defaults provdied for EFS/NFS (2049/TCP + ingress) + description = "NFS ingress from VPC private subnets" + cidr_blocks = var.cidr_blocks + } + } tags = var.tags } ################################################################################ -# Storage Classes +# Storage Class config ################################################################################ resource "kubernetes_annotations" "gp2" { - count = var.enable_gp3_default_storage_class ? 1 : 0 + count = var.enable_gp3_default_storage_class && var.enable_amazon_eks_aws_ebs_csi_driver ? 1 : 0 api_version = "storage.k8s.io/v1" kind = "StorageClass" @@ -74,7 +210,7 @@ resource "kubernetes_annotations" "gp2" { } resource "kubernetes_storage_class_v1" "gp3" { - count = var.enable_gp3_default_storage_class ? 1 : 0 + count = var.enable_gp3_default_storage_class && var.enable_amazon_eks_aws_ebs_csi_driver ? 1 : 0 metadata { name = "gp3" diff --git a/examples/complete/README.md b/examples/complete/README.md index 86bc598..5067e1f 100644 --- a/examples/complete/README.md +++ b/examples/complete/README.md @@ -146,26 +146,27 @@ kubectl get nodes | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| | [access\_log\_expire\_days](#input\_access\_log\_expire\_days) | Number of days to wait before deleting access logs | `number` | `30` | no | -| [amazon\_eks\_aws\_ebs\_csi\_driver\_config](#input\_amazon\_eks\_aws\_ebs\_csi\_driver\_config) | configMap for AWS EBS CSI Driver add-on | `any` | `{}` | no | | [aws\_admin\_usernames](#input\_aws\_admin\_usernames) | A list of one or more AWS usernames with authorized access to KMS and EKS resources, will automatically add the user running the terraform as an admin | `list(string)` | `[]` | no | -| [aws\_node\_termination\_handler\_helm\_config](#input\_aws\_node\_termination\_handler\_helm\_config) | AWS Node Termination Handler Helm Chart config | `any` | `{}` | no | +| [aws\_efs\_csi\_driver](#input\_aws\_efs\_csi\_driver) | AWS EFS CSI Driver helm chart config | `any` | `{}` | no | +| [aws\_node\_termination\_handler](#input\_aws\_node\_termination\_handler) | AWS Node Termination Handler config for aws-ia/eks-blueprints-addon/aws | `any` | `{}` | no | | [bastion\_instance\_type](#input\_bastion\_instance\_type) | value for the instance type of the EKS worker nodes | `string` | `"m5.xlarge"` | no | | [bastion\_ssh\_password](#input\_bastion\_ssh\_password) | The SSH password to use for the bastion if SSM authentication is used | `string` | `"my-password"` | no | | [bastion\_ssh\_user](#input\_bastion\_ssh\_user) | The SSH user to use for the bastion | `string` | `"ec2-user"` | no | | [bastion\_tenancy](#input\_bastion\_tenancy) | The tenancy of the bastion | `string` | `"default"` | no | -| [calico\_helm\_config](#input\_calico\_helm\_config) | Calico Helm Chart config | `any` | `{}` | no | +| [calico](#input\_calico) | Calico Helm Chart config | `any` | `{}` | no | | [cluster\_addons](#input\_cluster\_addons) | Nested of eks native add-ons and their associated parameters.
See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_add-on for supported values.
See https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/examples/complete/main.tf#L44-L60 for upstream example.

to see available eks marketplace addons available for your cluster's version run:
aws eks describe-addon-versions --kubernetes-version $k8s\_cluster\_version --query 'addons[].{MarketplaceProductUrl: marketplaceInformation.productUrl, Name: addonName, Owner: owner Publisher: publisher, Type: type}' --output table | `any` | `{}` | no | -| [cluster\_autoscaler\_helm\_config](#input\_cluster\_autoscaler\_helm\_config) | Cluster Autoscaler Helm Chart config | `any` | `{}` | no | +| [cluster\_autoscaler](#input\_cluster\_autoscaler) | Cluster Autoscaler Helm Chart config | `any` | `{}` | no | | [cluster\_endpoint\_public\_access](#input\_cluster\_endpoint\_public\_access) | Whether to enable private access to the EKS cluster | `bool` | `false` | no | | [cluster\_version](#input\_cluster\_version) | Kubernetes version to use for EKS cluster | `string` | `"1.26"` | no | | [create\_aws\_auth\_configmap](#input\_create\_aws\_auth\_configmap) | Determines whether to create the aws-auth configmap. NOTE - this is only intended for scenarios where the configmap does not exist (i.e. - when using only self-managed node groups). Most users should use `manage_aws_auth_configmap` | `bool` | `false` | no | | [eks\_use\_mfa](#input\_eks\_use\_mfa) | Use MFA for auth\_eks\_role | `bool` | n/a | yes | | [eks\_worker\_tenancy](#input\_eks\_worker\_tenancy) | The tenancy of the EKS worker nodes | `string` | `"default"` | no | -| [enable\_amazon\_eks\_aws\_ebs\_csi\_driver](#input\_enable\_amazon\_eks\_aws\_ebs\_csi\_driver) | Enable EKS Managed AWS EBS CSI Driver add-on; enable\_amazon\_eks\_aws\_ebs\_csi\_driver and enable\_self\_managed\_aws\_ebs\_csi\_driver are mutually exclusive | `bool` | `false` | no | +| [enable\_amazon\_eks\_aws\_ebs\_csi\_driver](#input\_enable\_amazon\_eks\_aws\_ebs\_csi\_driver) | Enable EKS Managed AWS EBS CSI Driver add-on | `bool` | `false` | no | +| [enable\_amazon\_eks\_aws\_efs\_csi\_driver](#input\_enable\_amazon\_eks\_aws\_efs\_csi\_driver) | Enable EFS CSI add-on | `bool` | `false` | no | | [enable\_aws\_node\_termination\_handler](#input\_enable\_aws\_node\_termination\_handler) | Enable AWS Node Termination Handler add-on | `bool` | `false` | no | +| [enable\_bastion](#input\_enable\_bastion) | If true, a bastion will be created | `bool` | `true` | no | | [enable\_calico](#input\_enable\_calico) | Enable Calico add-on | `bool` | `true` | no | | [enable\_cluster\_autoscaler](#input\_enable\_cluster\_autoscaler) | Enable Cluster autoscaler add-on | `bool` | `false` | no | -| [enable\_efs](#input\_enable\_efs) | Enable EFS CSI add-on | `bool` | `false` | no | | [enable\_eks\_managed\_nodegroups](#input\_enable\_eks\_managed\_nodegroups) | Enable managed node groups | `bool` | n/a | yes | | [enable\_gp3\_default\_storage\_class](#input\_enable\_gp3\_default\_storage\_class) | Enable gp3 as default storage class | `bool` | `false` | no | | [enable\_metrics\_server](#input\_enable\_metrics\_server) | Enable metrics server add-on | `bool` | `false` | no | @@ -174,11 +175,10 @@ kubectl get nodes | [iam\_role\_permissions\_boundary](#input\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for IAM roles | `string` | `null` | no | | [kms\_key\_deletion\_window](#input\_kms\_key\_deletion\_window) | Waiting period for scheduled KMS Key deletion. Can be 7-30 days. | `number` | `7` | no | | [manage\_aws\_auth\_configmap](#input\_manage\_aws\_auth\_configmap) | Determines whether to manage the aws-auth configmap | `bool` | `false` | no | -| [metrics\_server\_helm\_config](#input\_metrics\_server\_helm\_config) | Metrics Server Helm Chart config | `any` | `{}` | no | +| [metrics\_server](#input\_metrics\_server) | Metrics Server config for aws-ia/eks-blueprints-addon/aws | `any` | `{}` | no | | [name\_prefix](#input\_name\_prefix) | The prefix to use when naming all resources | `string` | `"ex-complete"` | no | | [reclaim\_policy](#input\_reclaim\_policy) | Reclaim policy for EFS storage class, valid options are Delete and Retain | `string` | `"Delete"` | no | | [region](#input\_region) | The AWS region to deploy into | `string` | n/a | yes | -| [region2](#input\_region2) | The AWS region to deploy into | `string` | n/a | yes | | [secondary\_cidr\_blocks](#input\_secondary\_cidr\_blocks) | A list of secondary CIDR blocks for the VPC | `list(string)` | `[]` | no | | [storageclass\_reclaim\_policy](#input\_storageclass\_reclaim\_policy) | Reclaim policy for gp3 storage class, valid options are Delete and Retain | `string` | `"Delete"` | no | | [tags](#input\_tags) | A map of tags to apply to all resources | `map(string)` | `{}` | no | @@ -192,7 +192,7 @@ kubectl get nodes | [bastion\_instance\_id](#output\_bastion\_instance\_id) | The ID of the bastion host | | [bastion\_private\_dns](#output\_bastion\_private\_dns) | The private DNS address of the bastion host | | [bastion\_region](#output\_bastion\_region) | The region that the bastion host was deployed to | -| [efs\_storageclass\_name](#output\_efs\_storageclass\_name) | The name of the EFS storageclass that was created (if var.enable\_efs was set to true) | +| [efs\_storageclass\_name](#output\_efs\_storageclass\_name) | The name of the EFS storageclass that was created (if var.enable\_amazon\_eks\_aws\_efs\_csi\_driver was set to true) | | [eks\_cluster\_name](#output\_eks\_cluster\_name) | The name of the EKS cluster | | [vpc\_cidr](#output\_vpc\_cidr) | The CIDR block of the VPC | \ No newline at end of file diff --git a/examples/complete/efs-validation.tf b/examples/complete/efs-validation.tf index b7867d0..394b8a2 100644 --- a/examples/complete/efs-validation.tf +++ b/examples/complete/efs-validation.tf @@ -1,7 +1,7 @@ # This code is used to validate that PVCs backed by EFS are working as expected. None of it is needed in production. resource "kubernetes_persistent_volume_claim_v1" "test_claim" { - count = var.enable_efs ? 1 : 0 + count = var.enable_amazon_eks_aws_efs_csi_driver ? 1 : 0 metadata { name = "test-claim" namespace = "default" @@ -20,7 +20,7 @@ resource "kubernetes_persistent_volume_claim_v1" "test_claim" { resource "kubernetes_job_v1" "test_write" { # checkov:skip=CKV_K8S_21: "The default namespace should not be used" -- This is test code, not production - count = var.enable_efs ? 1 : 0 + count = var.enable_amazon_eks_aws_efs_csi_driver ? 1 : 0 metadata { name = "test-write" namespace = "default" diff --git a/examples/complete/fixtures.common.tfvars b/examples/complete/fixtures.common.tfvars index 169b398..15c3f29 100644 --- a/examples/complete/fixtures.common.tfvars +++ b/examples/complete/fixtures.common.tfvars @@ -61,31 +61,33 @@ cluster_addons = { kube-proxy = { most_recent = true } + aws-ebs-csi-driver = { + most_recent = true + } } - #################### Blueprints addons ################### #wait false for all addons, as it times out on teardown in the test pipeline -enable_efs = true +enable_amazon_eks_aws_efs_csi_driver = true +aws_efs_csi_driver = { + wait = false + chart_version = "2.4.8" +} enable_amazon_eks_aws_ebs_csi_driver = true -amazon_eks_aws_ebs_csi_driver_config = { - wait = false - most_recent = true -} -enable_gp3_default_storage_class = true +enable_gp3_default_storage_class = true enable_aws_node_termination_handler = true -aws_node_termination_handler_helm_config = { - wait = false - version = "v0.21.0" +aws_node_termination_handler = { + wait = false + chart_version = "v0.21.0" } enable_cluster_autoscaler = true -cluster_autoscaler_helm_config = { - wait = false - version = "v9.29.1" +cluster_autoscaler = { + wait = false + chart_version = "v9.29.1" # set = [ # { # name = "extraArgs.expander" @@ -99,13 +101,13 @@ cluster_autoscaler_helm_config = { } enable_metrics_server = true -metrics_server_helm_config = { - wait = false - version = "v3.10.0" +metrics_server = { + wait = false + chart_version = "v3.10.0" } enable_calico = true -calico_helm_config = { - wait = false - version = "v3.26.1" +calico = { + wait = false + chart_version = "v3.26.1" } diff --git a/examples/complete/fixtures.insecure.tfvars b/examples/complete/fixtures.insecure.tfvars index 223e569..2316388 100644 --- a/examples/complete/fixtures.insecure.tfvars +++ b/examples/complete/fixtures.insecure.tfvars @@ -1,5 +1,5 @@ region = "us-east-2" -region2 = "us-east-2" +enable_bastion = false enable_eks_managed_nodegroups = true enable_self_managed_nodegroups = true bastion_tenancy = "default" diff --git a/examples/complete/fixtures.secure.tfvars b/examples/complete/fixtures.secure.tfvars index c0a11db..245293e 100644 --- a/examples/complete/fixtures.secure.tfvars +++ b/examples/complete/fixtures.secure.tfvars @@ -1,5 +1,4 @@ region = "us-east-2" -region2 = "us-east-1" enable_eks_managed_nodegroups = false enable_self_managed_nodegroups = true bastion_tenancy = "dedicated" diff --git a/examples/complete/main.tf b/examples/complete/main.tf index 988072b..ead99e7 100644 --- a/examples/complete/main.tf +++ b/examples/complete/main.tf @@ -13,7 +13,129 @@ locals { access_logging_name_prefix = "${var.name_prefix}-accesslog-${lower(random_id.default.hex)}" kms_key_alias_name_prefix = "alias/${var.name_prefix}-${lower(random_id.default.hex)}" access_log_sqs_queue_name = "${var.name_prefix}-accesslog-access-${lower(random_id.default.hex)}" + tags = merge( + var.tags, + { + RootTFModule = replace(basename(path.cwd), "_", "-") # tag names based on the directory name + GithubRepo = "github.com/defenseunicorns/terraform-aws-uds-eks" + } + ) +} + +################################################################################ +# VPC +################################################################################ + +module "vpc" { + source = "git::https://github.com/defenseunicorns/terraform-aws-uds-vpc.git?ref=tags/v0.0.1-alpha" + + name = local.vpc_name + vpc_cidr = var.vpc_cidr + secondary_cidr_blocks = var.secondary_cidr_blocks + azs = ["${var.region}a", "${var.region}b", "${var.region}c"] + public_subnets = [for k, v in module.vpc.azs : cidrsubnet(module.vpc.vpc_cidr_block, 5, k)] + private_subnets = [for k, v in module.vpc.azs : cidrsubnet(module.vpc.vpc_cidr_block, 5, k + 4)] + database_subnets = [for k, v in module.vpc.azs : cidrsubnet(module.vpc.vpc_cidr_block, 5, k + 8)] + intra_subnets = [for k, v in module.vpc.azs : cidrsubnet(element(module.vpc.vpc_secondary_cidr_blocks, 0), 5, k)] + single_nat_gateway = true + enable_nat_gateway = true + + private_subnet_tags = { + "kubernetes.io/cluster/${local.cluster_name}" = "shared" + "kubernetes.io/role/internal-elb" = 1 + } + create_database_subnet_group = true + + instance_tenancy = "default" + vpc_flow_log_permissions_boundary = var.iam_role_permissions_boundary + + tags = local.tags +} + +################################################################################ +# Bastion instance +################################################################################ +locals { + bastion_role_arn = try(module.bastion[0].bastion_role_arn, "") + bastion_role_name = try(module.bastion[0].bastion_role_name, "") + + enable_bastion_access = length(local.bastion_role_arn) > 0 && length(local.bastion_role_name) > 0 + + ingress_bastion_to_cluster = { + description = "Bastion SG to Cluster" + security_group_id = module.eks.cluster_security_group_id + from_port = 443 + to_port = 443 + protocol = "tcp" + type = "ingress" + source_security_group_id = try(module.bastion[0].security_group_ids[0], null) + } + + # if bastion role vars are defined, add bastion role to aws_auth_roles list + bastion_aws_auth_entry = local.enable_bastion_access ? [ + { + rolearn = local.bastion_role_arn + username = local.bastion_role_name + groups = ["system:masters"] + }] : [] +} + +data "aws_ami" "amazonlinux2" { + count = var.enable_bastion ? 1 : 0 + + most_recent = true + + filter { + name = "name" + values = ["amzn2-ami-hvm*x86_64-gp2"] + } + + owners = ["amazon"] +} + +module "bastion" { + source = "git::https://github.com/defenseunicorns/terraform-aws-uds-bastion.git?ref=tags/v0.0.1-alpha" + + count = var.enable_bastion ? 1 : 0 + + enable_bastion_terraform_permissions = true + + ami_id = data.aws_ami.amazonlinux2[0].id + instance_type = var.bastion_instance_type + root_volume_config = { + volume_type = "gp3" + volume_size = "20" + encrypted = true + } + name = local.bastion_name + vpc_id = module.vpc.vpc_id + subnet_id = module.vpc.private_subnets[0] + region = var.region + access_logs_bucket_name = aws_s3_bucket.access_log_bucket.id + session_log_bucket_name_prefix = "${local.bastion_name}-sessionlogs" + kms_key_arn = aws_kms_key.default.arn + ssh_user = var.bastion_ssh_user + ssh_password = var.bastion_ssh_password + assign_public_ip = false + enable_log_to_s3 = true + enable_log_to_cloudwatch = true + tenancy = var.bastion_tenancy + zarf_version = var.zarf_version + permissions_boundary = var.iam_role_permissions_boundary + tags = merge( + local.tags, + { Function = "bastion-ssm" }) +} +################################################################################ +# EKS Cluster +################################################################################ + +locals { + cluster_security_group_additional_rules = merge( + var.enable_bastion ? { ingress_bastion_to_cluster = local.ingress_bastion_to_cluster } : {}, + #other rules here + ) eks_managed_node_group_defaults = { # https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/node_groups.tf iam_role_permissions_boundary = var.iam_role_permissions_boundary @@ -41,31 +163,19 @@ locals { self_managed_node_group_defaults = { iam_role_permissions_boundary = var.iam_role_permissions_boundary - instance_type = "m5a.large" # should be compatible with dedicated tenancy in GovCloud region https://aws.amazon.com/ec2/pricing/dedicated-instances/#Dedicated_On-Demand_instances + instance_type = null update_launch_template_default_version = true use_mixed_instances_policy = true - mixed_instances_policy = { - instances_distribution = { - on_demand_base_capacity = 2 - on_demand_percentage_above_base_capacity = 20 - spot_allocation_strategy = "capacity-optimized" + instance_requirements = { + allowed_instance_types = ["m7i.4xlarge", "m6a.4xlarge", "m5a.4xlarge"] #this should be adjusted to the appropriate instance family if reserved instances are being utilized + memory_mib = { + min = 64000 + } + vcpu_count = { + min = 16 } - - override = [ - { - instance_requirements = { - allowed_instance_types = ["m5a.large", "m5.large", "m6i.large"] #this should be adjusted to the appropriate instance family if reserved instances are being utilized - memory_mib = { - min = 8192 - } - vcpu_count = { - min = 2 - } - } - } - ] } placement = { @@ -92,7 +202,7 @@ locals { # enable discovery of autoscaling groups by cluster-autoscaler autoscaling_group_tags = merge( - var.tags, + local.tags, { "k8s.io/cluster-autoscaler/enabled" : true, "k8s.io/cluster-autoscaler/${local.cluster_name}" : "owned" @@ -133,102 +243,24 @@ locals { self_managed_node_groups = var.enable_self_managed_nodegroups ? local.mission_app_self_mg_node_group : {} } -########################################################### -####################### VPC ############################### - -module "vpc" { - source = "git::https://github.com/defenseunicorns/terraform-aws-uds-vpc.git?ref=tags/v0.0.1-alpha" - - name = local.vpc_name - vpc_cidr = var.vpc_cidr - secondary_cidr_blocks = var.secondary_cidr_blocks - azs = ["${var.region}a", "${var.region}b", "${var.region}c"] - public_subnets = [for k, v in module.vpc.azs : cidrsubnet(module.vpc.vpc_cidr_block, 5, k)] - private_subnets = [for k, v in module.vpc.azs : cidrsubnet(module.vpc.vpc_cidr_block, 5, k + 4)] - database_subnets = [for k, v in module.vpc.azs : cidrsubnet(module.vpc.vpc_cidr_block, 5, k + 8)] - intra_subnets = [for k, v in module.vpc.azs : cidrsubnet(element(module.vpc.vpc_secondary_cidr_blocks, 0), 5, k)] - single_nat_gateway = true - enable_nat_gateway = true - - private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = 1 - } - create_database_subnet_group = true - - instance_tenancy = "default" - vpc_flow_log_permissions_boundary = var.iam_role_permissions_boundary - - tags = var.tags -} - -########################################################### -##################### Bastion ############################# - -data "aws_ami" "amazonlinux2" { - most_recent = true - - filter { - name = "name" - values = ["amzn2-ami-hvm*x86_64-gp2"] - } - - owners = ["amazon"] -} - -module "bastion" { - source = "git::https://github.com/defenseunicorns/terraform-aws-uds-bastion.git?ref=tags/v0.0.1-alpha" - - enable_bastion_terraform_permissions = true - - ami_id = data.aws_ami.amazonlinux2.id - instance_type = var.bastion_instance_type - root_volume_config = { - volume_type = "gp3" - volume_size = "20" - encrypted = true - } - name = local.bastion_name - vpc_id = module.vpc.vpc_id - subnet_id = module.vpc.private_subnets[0] - region = var.region - access_logs_bucket_name = aws_s3_bucket.access_log_bucket.id - session_log_bucket_name_prefix = "${local.bastion_name}-sessionlogs" - kms_key_arn = aws_kms_key.default.arn - ssh_user = var.bastion_ssh_user - ssh_password = var.bastion_ssh_password - assign_public_ip = false - enable_log_to_s3 = true - enable_log_to_cloudwatch = true - tenancy = var.bastion_tenancy - zarf_version = var.zarf_version - permissions_boundary = var.iam_role_permissions_boundary - tags = merge( - var.tags, - { Function = "bastion-ssm" }) -} - -########################################################### -################### EKS Cluster ########################### module "eks" { source = "../.." - name = local.cluster_name - aws_region = var.region - vpc_id = module.vpc.vpc_id - private_subnet_ids = module.vpc.private_subnets - control_plane_subnet_ids = module.vpc.private_subnets - iam_role_permissions_boundary = var.iam_role_permissions_boundary - source_security_group_id = module.bastion.security_group_ids[0] - cluster_endpoint_public_access = var.cluster_endpoint_public_access - cluster_endpoint_private_access = true - vpc_cni_custom_subnet = module.vpc.intra_subnets - aws_admin_usernames = var.aws_admin_usernames - cluster_version = var.cluster_version - bastion_role_arn = module.bastion.bastion_role_arn - bastion_role_name = module.bastion.bastion_role_name - cidr_blocks = module.vpc.private_subnets_cidr_blocks - eks_use_mfa = var.eks_use_mfa + name = local.cluster_name + aws_region = var.region + vpc_id = module.vpc.vpc_id + private_subnet_ids = module.vpc.private_subnets + control_plane_subnet_ids = module.vpc.private_subnets + iam_role_permissions_boundary = var.iam_role_permissions_boundary + cluster_security_group_additional_rules = local.cluster_security_group_additional_rules + cluster_endpoint_public_access = var.cluster_endpoint_public_access + cluster_endpoint_private_access = true + vpc_cni_custom_subnet = module.vpc.intra_subnets + aws_admin_usernames = var.aws_admin_usernames + cluster_version = var.cluster_version + cidr_blocks = module.vpc.private_subnets_cidr_blocks + eks_use_mfa = var.eks_use_mfa + aws_auth_roles = local.bastion_aws_auth_entry # If using EKS Managed Node Groups, the aws-auth ConfigMap is created by eks itself and terraform can not create it create_aws_auth_configmap = var.create_aws_auth_configmap @@ -242,7 +274,7 @@ module "eks" { self_managed_node_group_defaults = local.self_managed_node_group_defaults self_managed_node_groups = local.self_managed_node_groups - tags = var.tags + tags = local.tags @@ -253,32 +285,37 @@ module "eks" { cluster_addons = var.cluster_addons #--------------------------------------------------------------- - # EKS Blueprints - EKS Add-Ons + # EKS Blueprints - blueprints curated helm charts #--------------------------------------------------------------- # AWS EKS EBS CSI Driver enable_amazon_eks_aws_ebs_csi_driver = var.enable_amazon_eks_aws_ebs_csi_driver - amazon_eks_aws_ebs_csi_driver_config = var.amazon_eks_aws_ebs_csi_driver_config enable_gp3_default_storage_class = var.enable_gp3_default_storage_class storageclass_reclaim_policy = var.storageclass_reclaim_policy # AWS EKS EFS CSI Driver - enable_efs = var.enable_efs + enable_amazon_eks_aws_efs_csi_driver = var.enable_amazon_eks_aws_efs_csi_driver + aws_efs_csi_driver = var.aws_efs_csi_driver + reclaim_policy = var.reclaim_policy # AWS EKS node termination handler - enable_aws_node_termination_handler = var.enable_aws_node_termination_handler - aws_node_termination_handler_helm_config = var.aws_node_termination_handler_helm_config + enable_aws_node_termination_handler = var.enable_aws_node_termination_handler + aws_node_termination_handler = var.aws_node_termination_handler # k8s Metrics Server - enable_metrics_server = var.enable_metrics_server - metrics_server_helm_config = var.metrics_server_helm_config + enable_metrics_server = var.enable_metrics_server + metrics_server = var.metrics_server # k8s Cluster Autoscaler - enable_cluster_autoscaler = var.enable_cluster_autoscaler - cluster_autoscaler_helm_config = var.cluster_autoscaler_helm_config + enable_cluster_autoscaler = var.enable_cluster_autoscaler + cluster_autoscaler = var.cluster_autoscaler + + #---------------------------------------------------------------- + # custom helm charts + #---------------------------------------------------------------- #Calico - enable_calico = var.enable_calico - calico_helm_config = var.calico_helm_config + enable_calico = var.enable_calico + calico = var.calico } diff --git a/examples/complete/outputs.tf b/examples/complete/outputs.tf index 6ea937a..0199c71 100644 --- a/examples/complete/outputs.tf +++ b/examples/complete/outputs.tf @@ -3,19 +3,19 @@ output "bastion_instance_id" { description = "The ID of the bastion host" - value = module.bastion.instance_id + value = try(module.bastion[0].instance_id, null) sensitive = true } output "bastion_region" { description = "The region that the bastion host was deployed to" - value = module.bastion.region + value = try(module.bastion[0].region, null) sensitive = true } output "bastion_private_dns" { description = "The private DNS address of the bastion host" - value = module.bastion.private_dns + value = try(module.bastion[0].private_dns, null) sensitive = true } @@ -32,6 +32,6 @@ output "eks_cluster_name" { } output "efs_storageclass_name" { - description = "The name of the EFS storageclass that was created (if var.enable_efs was set to true)" + description = "The name of the EFS storageclass that was created (if var.enable_amazon_eks_aws_efs_csi_driver was set to true)" value = try(module.eks.efs_storageclass_name, null) } diff --git a/examples/complete/providers.tf b/examples/complete/providers.tf index 1e1e3d8..d5c372c 100644 --- a/examples/complete/providers.tf +++ b/examples/complete/providers.tf @@ -55,14 +55,6 @@ provider "aws" { # } } -provider "aws" { - alias = "region2" - region = var.region2 - # default_tags { - # tags = var.tags #bug https://github.com/hashicorp/terraform-provider-aws/issues/19583#issuecomment-855773246 - # } -} - provider "kubernetes" { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) diff --git a/examples/complete/variables.tf b/examples/complete/variables.tf index 4fade98..c84d172 100644 --- a/examples/complete/variables.tf +++ b/examples/complete/variables.tf @@ -6,11 +6,6 @@ variable "region" { type = string } -variable "region2" { - description = "The AWS region to deploy into" - type = string -} - variable "name_prefix" { description = "The prefix to use when naming all resources" type = string @@ -139,17 +134,11 @@ EOD #----------------AWS EBS CSI Driver------------------------- variable "enable_amazon_eks_aws_ebs_csi_driver" { - description = "Enable EKS Managed AWS EBS CSI Driver add-on; enable_amazon_eks_aws_ebs_csi_driver and enable_self_managed_aws_ebs_csi_driver are mutually exclusive" + description = "Enable EKS Managed AWS EBS CSI Driver add-on" type = bool default = false } -variable "amazon_eks_aws_ebs_csi_driver_config" { - description = "configMap for AWS EBS CSI Driver add-on" - type = any - default = {} -} - variable "enable_gp3_default_storage_class" { description = "Enable gp3 as default storage class" type = bool @@ -169,8 +158,8 @@ variable "enable_metrics_server" { default = false } -variable "metrics_server_helm_config" { - description = "Metrics Server Helm Chart config" +variable "metrics_server" { + description = "Metrics Server config for aws-ia/eks-blueprints-addon/aws" type = any default = {} } @@ -182,8 +171,8 @@ variable "enable_aws_node_termination_handler" { default = false } -variable "aws_node_termination_handler_helm_config" { - description = "AWS Node Termination Handler Helm Chart config" +variable "aws_node_termination_handler" { + description = "AWS Node Termination Handler config for aws-ia/eks-blueprints-addon/aws" type = any default = {} } @@ -195,19 +184,25 @@ variable "enable_cluster_autoscaler" { default = false } -variable "cluster_autoscaler_helm_config" { +variable "cluster_autoscaler" { description = "Cluster Autoscaler Helm Chart config" type = any default = {} } #----------------Enable_EFS_CSI------------------------- -variable "enable_efs" { +variable "enable_amazon_eks_aws_efs_csi_driver" { description = "Enable EFS CSI add-on" type = bool default = false } +variable "aws_efs_csi_driver" { + description = "AWS EFS CSI Driver helm chart config" + type = any + default = {} +} + variable "reclaim_policy" { description = "Reclaim policy for EFS storage class, valid options are Delete and Retain" type = string @@ -221,7 +216,7 @@ variable "enable_calico" { default = true } -variable "calico_helm_config" { +variable "calico" { description = "Calico Helm Chart config" type = any default = {} @@ -259,3 +254,9 @@ variable "zarf_version" { type = string default = "" } + +variable "enable_bastion" { + description = "If true, a bastion will be created" + type = bool + default = true +} diff --git a/locals.tf b/locals.tf index 8518353..8d80083 100644 --- a/locals.tf +++ b/locals.tf @@ -2,50 +2,5 @@ locals { availability_zone_name = slice(data.aws_availability_zones.available.names, 0, 3) azs = slice(data.aws_availability_zones.available.names, 0, 3) - # var.cluster_name is for Terratest - cluster_name = coalesce(var.cluster_name, var.name) - admin_arns = distinct(concat( - [for admin_user in var.aws_admin_usernames : "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:user/${admin_user}"], - [data.aws_caller_identity.current.arn] - )) - aws_auth_users = [for admin_user in var.aws_admin_usernames : { - userarn = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:user/${admin_user}" - username = admin_user - groups = ["system:masters"] - }] - - eks_admin_arns = length(local.admin_arns) == 0 ? [] : local.admin_arns - - # Used to resolve non-MFA policy. See https://docs.fugue.co/FG_R00255.html - auth_eks_role_policy = var.eks_use_mfa ? jsonencode({ - Version = "2012-10-17", - Statement = [ - { - Action = "sts:AssumeRole" - Principal = { - AWS = local.eks_admin_arns - }, - Effect = "Allow" - Sid = "" - Condition = { - Bool = { - "aws:MultiFactorAuthPresent" = "true" - } - } - } - ] - }) : jsonencode({ - Version = "2012-10-17", - Statement = [ - { - Action = "sts:AssumeRole" - Principal = { - AWS = local.eks_admin_arns - }, - Effect = "Allow" - Sid = "" - } - ] - }) } diff --git a/main.tf b/main.tf index 83d5e91..1959ad8 100644 --- a/main.tf +++ b/main.tf @@ -1,6 +1,73 @@ -#--------------------------------------------------------------- -# EKS Blueprints -#--------------------------------------------------------------- +############################################################### +# EKS Cluster +############################################################### +locals { + admin_arns = distinct(concat( + [for admin_user in var.aws_admin_usernames : "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:user/${admin_user}"], + [data.aws_caller_identity.current.arn] + )) + aws_auth_users = [for admin_user in var.aws_admin_usernames : { + userarn = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:user/${admin_user}" + username = admin_user + groups = ["system:masters"] + }] + + eks_admin_arns = length(local.admin_arns) == 0 ? [] : local.admin_arns + + # Used to resolve non-MFA policy. See https://docs.fugue.co/FG_R00255.html + auth_eks_role_policy = var.eks_use_mfa ? jsonencode({ + Version = "2012-10-17", + Statement = [ + { + Action = "sts:AssumeRole" + Principal = { + AWS = local.eks_admin_arns + }, + Effect = "Allow" + Sid = "" + Condition = { + Bool = { + "aws:MultiFactorAuthPresent" = "true" + } + } + } + ] + }) : jsonencode({ + Version = "2012-10-17", + Statement = [ + { + Action = "sts:AssumeRole" + Principal = { + AWS = local.eks_admin_arns + }, + Effect = "Allow" + Sid = "" + } + ] + }) + + aws_eks_auth_roles = concat( + var.aws_auth_roles, + [ + { + rolearn = aws_iam_role.auth_eks_role.arn + username = aws_iam_role.auth_eks_role.name + groups = ["system:masters"] + } + ]) + + #merge in irsa role arn + ebs_csi_driver_addon_extra_config = var.enable_amazon_eks_aws_ebs_csi_driver ? { + aws-ebs-csi-driver = { + service_account_role_arn = module.ebs_csi_driver_irsa[0].iam_role_arn + } + } : {} + + cluster_addons = merge( + var.cluster_addons, + local.ebs_csi_driver_addon_extra_config + ) +} module "aws_eks" { source = "git::https://github.com/terraform-aws-modules/terraform-aws-eks.git?ref=v19.15.3" @@ -22,7 +89,7 @@ module "aws_eks" { dataplane_wait_duration = var.dataplane_wait_duration - cluster_addons = var.cluster_addons + cluster_addons = local.cluster_addons #----------------------------------------------------------------------------------------------------------# # Security groups used in this module created by the upstream modules terraform-aws-eks (https://github.com/terraform-aws-modules/terraform-aws-eks). @@ -30,42 +97,19 @@ module "aws_eks" { # So, by default the security groups are restrictive. Users needs to enable rules for specific ports required for App requirement or Add-ons # See the notes below for each rule used in these examples #----------------------------------------------------------------------------------------------------------# - cluster_security_group_additional_rules = { - ingress_bastion_to_cluster = { - # name = "allow bastion ingress to cluster" - description = "Bastion SG to Cluster" - security_group_id = module.aws_eks.cluster_security_group_id - from_port = 443 - to_port = 443 - protocol = "tcp" - type = "ingress" - source_security_group_id = var.source_security_group_id - } - } + cluster_security_group_additional_rules = var.cluster_security_group_additional_rules + create_aws_auth_configmap = var.create_aws_auth_configmap manage_aws_auth_configmap = var.manage_aws_auth_configmap kms_key_administrators = distinct(concat(local.admin_arns, var.kms_key_administrators)) aws_auth_users = distinct(concat(local.aws_auth_users, var.aws_auth_users)) - aws_auth_roles = [ - { - rolearn = aws_iam_role.auth_eks_role.arn - username = aws_iam_role.auth_eks_role.name - groups = ["system:masters"] - }, - { - rolearn = var.bastion_role_arn - username = var.bastion_role_name - groups = ["system:masters"] - } - ] + aws_auth_roles = local.aws_eks_auth_roles tags = var.tags } - - resource "aws_iam_role" "auth_eks_role" { name = "${var.name}-auth-eks-role" description = "EKS AuthConfig Role" @@ -73,59 +117,3 @@ resource "aws_iam_role" "auth_eks_role" { assume_role_policy = local.auth_eks_role_policy # max_session_duration = var.eks_iam_role_max_session } - -#--------------------------------------------------------------- -# EFS Configurations -#--------------------------------------------------------------- - -resource "random_id" "efs_name" { - byte_length = 2 - prefix = "EFS-" -} - -resource "kubernetes_storage_class_v1" "efs" { - count = var.enable_efs ? 1 : 0 - metadata { - name = lower(random_id.efs_name.hex) - } - - storage_provisioner = "efs.csi.aws.com" - reclaim_policy = var.reclaim_policy - parameters = { - provisioningMode = "efs-ap" # Dynamic provisioning - fileSystemId = module.efs[0].id - directoryPerms = "700" - } - mount_options = [ - "iam" - ] - - depends_on = [ - module.eks_blueprints_kubernetes_addons - ] -} - -module "efs" { - source = "terraform-aws-modules/efs/aws" - version = "~> 1.0" - - count = var.enable_efs ? 1 : 0 - - name = lower(random_id.efs_name.hex) - # Mount targets / security group - mount_targets = { - for k, v in zipmap(local.availability_zone_name, var.private_subnet_ids) : k => { subnet_id = v } - } - - security_group_description = "${local.cluster_name} EFS security group" - security_group_vpc_id = var.vpc_id - security_group_rules = { - vpc = { - # relying on the defaults provdied for EFS/NFS (2049/TCP + ingress) - description = "NFS ingress from VPC private subnets" - cidr_blocks = var.cidr_blocks - } - } - - tags = var.tags -} diff --git a/outputs.tf b/outputs.tf index f8f8f74..ef40db3 100644 --- a/outputs.tf +++ b/outputs.tf @@ -42,7 +42,7 @@ output "cluster_certificate_authority_data" { } output "efs_storageclass_name" { - description = "The name of the EFS storageclass that was created (if var.enable_efs was set to true)" + description = "The name of the EFS storageclass that was created (if var.enable_amazon_eks_aws_efs_csi_driver was set to true)" value = try(kubernetes_storage_class_v1.efs[0].id, null) } @@ -50,3 +50,8 @@ output "cluster_iam_role_arn" { description = "EKS cluster IAM role ARN" value = module.aws_eks.cluster_iam_role_arn } + +output "cluster_security_group_id" { + description = "EKS cluster security group ID" + value = module.aws_eks.cluster_security_group_id +} diff --git a/variables.tf b/variables.tf index 5d8343a..27e31f0 100644 --- a/variables.tf +++ b/variables.tf @@ -1,6 +1,6 @@ # tflint-ignore: terraform_unused_declarations variable "cluster_name" { - description = "Name of cluster - used by Terratest for e2e test automation" + description = "Name of cluster" type = string default = "" } @@ -85,6 +85,12 @@ variable "aws_admin_usernames" { default = [] } +variable "aws_auth_roles" { + description = "List of role maps to add to the aws-auth configmap" + type = list(any) + default = [] +} + variable "create_aws_auth_configmap" { description = "Determines whether to create the aws-auth configmap. NOTE - this is only intended for scenarios where the configmap does not exist (i.e. - when using only self-managed node groups). Most users should use `manage_aws_auth_configmap`" type = bool @@ -121,22 +127,10 @@ variable "vpc_cni_custom_subnet" { default = [] } -variable "source_security_group_id" { - description = "List of additional rules to add to cluster security group" - type = string - default = "" -} - -variable "bastion_role_arn" { - description = "ARN of role authorized kubectl access" - type = string - default = "" -} - -variable "bastion_role_name" { - description = "Name of role authorized kubectl access" - type = string - default = "" +variable "cluster_security_group_additional_rules" { + description = "List of additional security group rules to add to the cluster security group created. Set `source_node_security_group = true` inside rules to set the `node_security_group` as source" + type = any + default = {} } variable "dataplane_wait_duration" { @@ -191,17 +185,11 @@ EOD #----------------AWS EBS CSI Driver------------------------- variable "enable_amazon_eks_aws_ebs_csi_driver" { - description = "Enable EKS Managed AWS EBS CSI Driver add-on; enable_amazon_eks_aws_ebs_csi_driver and enable_self_managed_aws_ebs_csi_driver are mutually exclusive" + description = "Enable EKS Managed AWS EBS CSI Driver add-on" type = bool default = false } -variable "amazon_eks_aws_ebs_csi_driver_config" { - description = "configMap for AWS EBS CSI Driver add-on" - type = any - default = {} -} - variable "enable_gp3_default_storage_class" { description = "Enable gp3 as default storage class" type = bool @@ -215,11 +203,16 @@ variable "storageclass_reclaim_policy" { } #----------------AWS EFS CSI Driver------------------------- -variable "enable_efs" { +variable "enable_amazon_eks_aws_efs_csi_driver" { description = "Enable EFS CSI Driver add-on" type = bool default = false +} +variable "aws_efs_csi_driver" { + description = "AWS EFS CSI Driver helm chart config" + type = any + default = {} } variable "reclaim_policy" { @@ -239,8 +232,8 @@ variable "enable_metrics_server" { default = false } -variable "metrics_server_helm_config" { - description = "Metrics Server Helm Chart config" +variable "metrics_server" { + description = "Metrics Server config for aws-ia/eks-blueprints-addon/aws" type = any default = {} } @@ -252,12 +245,11 @@ variable "enable_aws_node_termination_handler" { default = false } -variable "aws_node_termination_handler_helm_config" { - description = "AWS Node Termination Handler Helm Chart config" +variable "aws_node_termination_handler" { + description = "AWS Node Termination Handler config for aws-ia/eks-blueprints-addon/aws" type = any default = {} } - #----------------Cluster Autoscaler------------------------- variable "enable_cluster_autoscaler" { description = "Enable Cluster autoscaler add-on" @@ -265,7 +257,7 @@ variable "enable_cluster_autoscaler" { default = false } -variable "cluster_autoscaler_helm_config" { +variable "cluster_autoscaler" { description = "Cluster Autoscaler Helm Chart config" type = any default = { @@ -294,10 +286,10 @@ variable "cluster_autoscaler_helm_config" { variable "enable_calico" { description = "Enable Calico add-on" type = bool - default = false + default = true } -variable "calico_helm_config" { +variable "calico" { description = "Calico Helm Chart config" type = any default = {}