Skip to content

Commit

Permalink
Merge pull request #353 from vmware/feature/eks_wait_for_cluster_healthy
Browse files Browse the repository at this point in the history
EKS wait for management cluster and pinniped to be healthy and set kubeconfig when wait_for_kubeconfig is set to true
  • Loading branch information
sreenivasmrpivot authored Jan 19, 2024
2 parents 4dfb7a3 + c947580 commit 88ddf24
Show file tree
Hide file tree
Showing 7 changed files with 161 additions and 14 deletions.
2 changes: 2 additions & 0 deletions docs/data-sources/ekscluster.md
Original file line number Diff line number Diff line change
Expand Up @@ -48,10 +48,12 @@ data "tanzu-mission-control_ekscluster" "tf_eks_cluster" {
- `meta` (Block List, Max: 1) Metadata for the resource (see [below for nested schema](#nestedblock--meta))
- `ready_wait_timeout` (String) Wait timeout duration until cluster resource reaches READY state. Accepted timeout duration values like 5s, 45m, or 3h, higher than zero
- `spec` (Block List, Max: 1) Spec for the cluster (see [below for nested schema](#nestedblock--spec))
- `wait_for_kubeconfig` (Boolean) Wait until pinniped extension is ready to provide kubeconfig

### Read-Only

- `id` (String) The ID of this resource.
- `kubeconfig` (String) Kubeconfig for connecting to newly created cluster base64 encoded. This will only be returned if you have elected to wait for kubeconfig.
- `status` (Map of String) Status of the cluster

<a id="nestedblock--meta"></a>
Expand Down
21 changes: 10 additions & 11 deletions docs/resources/ekscluster.md
Original file line number Diff line number Diff line change
Expand Up @@ -80,22 +80,19 @@ resource "tanzu-mission-control_ekscluster" "tf_eks_cluster" {
]
}
addons_config {
addons_config { // this whole section is optional
vpc_cni_config {
eni_config {
id = "subnet-0a680171b6330619f" // Required, should belong to the same VPC as the cluster
security_groups = [
id = "subnet-0a680171b6330619f" // Required, need not belong to the same VPC as the cluster, subnets provided in vpc_cni_config are expected to be in different AZs
security_groups = [ //optional, if not provided, the cluster security group will be used
"sg-00c96ad9d02a22522",
]
}
eni_config {
id = "subnet-06feb0bb0451cda78" // Required, should belong to the same VPC as the cluster
security_groups = [
"sg-00c96ad9d02a22522",
]
id = "subnet-06feb0bb0451cda79" // Required, need not belong to the same VPC as the cluster, subnets provided in vpc_cni_config are expected to be in different AZs
}
}
}
}
}
nodepool {
Expand Down Expand Up @@ -213,10 +210,12 @@ resource "tanzu-mission-control_ekscluster" "tf_eks_cluster" {
- `meta` (Block List, Max: 1) Metadata for the resource (see [below for nested schema](#nestedblock--meta))
- `ready_wait_timeout` (String) Wait timeout duration until cluster resource reaches READY state. Accepted timeout duration values like 5s, 45m, or 3h, higher than zero
- `spec` (Block List, Max: 1) Spec for the cluster (see [below for nested schema](#nestedblock--spec))
- `wait_for_kubeconfig` (Boolean) Wait until pinniped extension is ready to provide kubeconfig

### Read-Only

- `id` (String) The ID of this resource.
- `kubeconfig` (String) Kubeconfig for connecting to newly created cluster base64 encoded. This will only be returned if you have elected to wait for kubeconfig.
- `status` (Map of String) Status of the cluster

<a id="nestedblock--meta"></a>
Expand Down Expand Up @@ -283,14 +282,14 @@ Optional:

Optional:

- `vpc_cni_config` (Block List, Max: 1) VPC CNI addon config contains the configuration for the VPC CNI addon of the cluster. (see [below for nested schema](#nestedblock--spec--config--addons_config--vpc_cni_config))
- `vpc_cni_config` (Block List, Max: 1) VPC CNI addon config contains the configuration for the VPC CNI addon of the cluster (see [below for nested schema](#nestedblock--spec--config--addons_config--vpc_cni_config))

<a id="nestedblock--spec--config--addons_config--vpc_cni_config"></a>
### Nested Schema for `spec.config.addons_config.vpc_cni_config`

Optional:

- `eni_config` (Block List) ENI config is the VPC CNI Elastic Network Interface config for providing the configuration of subnet and security groups for pods in each AZ. Subnets need not be in the same VPC as the cluster. The subnets provided across eniConfigs should be in different availability zones. Nodepool subnets need to be in the same AZ as the AZs used in ENIConfig. (see [below for nested schema](#nestedblock--spec--config--addons_config--vpc_cni_config--eni_config))
- `eni_config` (Block List) ENI config is the VPC CNI Elastic Network Interface config for providing the configuration of subnet and security groups for pods in each AZ. Subnets need not be in the same VPC as the cluster. The subnets provided across eniConfigs should be in different availability zones. Nodepool subnets need to be in the same AZ as the AZs used in ENIConfig. (see [below for nested schema](#nestedblock--spec--config--addons_config--vpc_cni_config--eni_config))

<a id="nestedblock--spec--config--addons_config--vpc_cni_config--eni_config"></a>
### Nested Schema for `spec.config.addons_config.vpc_cni_config.eni_config`
Expand All @@ -301,7 +300,7 @@ Required:

Optional:

- `security_groups` (Set of String) List of security group is optional and if not provided default security group created by EKS will be used.
- `security_groups` (Set of String) List of security group is optional and if not provided default security group created by EKS will be used.



Expand Down
6 changes: 6 additions & 0 deletions internal/models/ekscluster/spec.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions internal/resources/ekscluster/constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,4 +69,6 @@ const (
releaseVersionKey = "release_version"
readyCondition = "Ready"
errorSeverity = "ERROR"
waitForKubeconfig = "wait_for_kubeconfig"
kubeconfigKey = "kubeconfig"
)
62 changes: 62 additions & 0 deletions internal/resources/ekscluster/data_source.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,9 @@ import (
"github.com/vmware/terraform-provider-tanzu-mission-control/internal/helper"
eksmodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/ekscluster"
"github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/common"

clustermodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/cluster"
configModels "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/kubeconfig"
)

func DataSourceTMCEKSCluster() *schema.Resource {
Expand Down Expand Up @@ -79,6 +82,49 @@ func dataSourceTMCEKSClusterRead(ctx context.Context, d *schema.ResourceData, m
return true, nil
}

if isWaitForKubeconfig(d) {
clusFullName := &clustermodel.VmwareTanzuManageV1alpha1ClusterFullName{
Name: resp.EksCluster.Spec.AgentName,
OrgID: clusterFn.OrgID,
ManagementClusterName: "eks",
ProvisionerName: "eks",
}
clusterResp, err := config.TMCConnection.ClusterResourceService.ManageV1alpha1ClusterResourceServiceGet(clusFullName)
// nolint: wsl
if err != nil {
log.Printf("Unable to get Tanzu Mission Control cluster entry, name : %s, error : %s", clusterFn.Name, err.Error())
return true, err
}

clusterHealthy, err := isClusterHealthy(clusterResp)
if err != nil || !clusterHealthy {
log.Printf("[DEBUG] waiting for cluster(%s) to be in Healthy status", clusterFn.Name)
return true, err
}

fn := &configModels.VmwareTanzuManageV1alpha1ClusterFullName{
ManagementClusterName: "eks",
ProvisionerName: "eks",
Name: resp.EksCluster.Spec.AgentName,
}
resp, err := config.TMCConnection.KubeConfigResourceService.KubeconfigServiceGet(fn)
// nolint: wsl
if err != nil {
log.Printf("Unable to get Tanzu Mission Control Kubeconfig entry, name : %s, error : %s", fn.Name, err.Error())
return true, err
}

if kubeConfigReady(resp) {
if err = d.Set(kubeconfigKey, resp.Kubeconfig); err != nil {
log.Printf("Failed to set Kubeconfig for cluster %s, error : %s", clusterFn.Name, err.Error())
return false, err
}
} else {
log.Printf("[DEBUG] waiting for cluster(%s)'s Kubeconfig to be in Ready status", clusterFn.Name)
return true, nil
}
}

return false, nil
}

Expand Down Expand Up @@ -124,6 +170,22 @@ func dataSourceTMCEKSClusterRead(ctx context.Context, d *schema.ResourceData, m
return diags
}

func isClusterHealthy(cluster *clustermodel.VmwareTanzuManageV1alpha1ClusterGetClusterResponse) (bool, error) {
if cluster == nil || cluster.Cluster == nil || cluster.Cluster.Status == nil || cluster.Cluster.Status.Health == nil {
return false, errors.New("cluster data is invalid or nil")
}

if *cluster.Cluster.Status.Health == clustermodel.VmwareTanzuManageV1alpha1CommonClusterHealthHEALTHY {
return true, nil
}

return false, nil
}

func kubeConfigReady(resp *configModels.VmwareTanzuManageV1alpha1ClusterKubeconfigGetKubeconfigResponse) bool {
return *resp.Status == configModels.VmwareTanzuManageV1alpha1ClusterKubeconfigGetKubeconfigResponseStatusREADY
}

func setResourceData(d *schema.ResourceData, eksCluster *eksmodel.VmwareTanzuManageV1alpha1EksclusterEksCluster, remoteNodepools []*eksmodel.VmwareTanzuManageV1alpha1EksclusterNodepoolNodepool) error {
status := map[string]interface{}{
// TODO: add condition
Expand Down
62 changes: 59 additions & 3 deletions internal/resources/ekscluster/data_source_test.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,3 @@
//go:build ekscluster
// +build ekscluster

/*
Copyright 2022 VMware, Inc. All Rights Reserved.
SPDX-License-Identifier: MPL-2.0
Expand All @@ -11,8 +8,10 @@ package ekscluster
import (
"testing"

"github.com/pkg/errors"
"github.com/stretchr/testify/require"

clustermodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/cluster"
eksmodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/ekscluster"
)

Expand Down Expand Up @@ -62,3 +61,60 @@ func TestNodepoolPosMap(t *testing.T) {
})
}
}

func TestIsManagemetClusterHealthy(t *testing.T) {
tests := []struct {
name string
cluster *clustermodel.VmwareTanzuManageV1alpha1ClusterGetClusterResponse
response bool
err error
}{
{
name: "Not healthy",
cluster: &clustermodel.VmwareTanzuManageV1alpha1ClusterGetClusterResponse{
Cluster: &clustermodel.VmwareTanzuManageV1alpha1ClusterCluster{
Status: &clustermodel.VmwareTanzuManageV1alpha1ClusterStatus{
Health: clustermodel.NewVmwareTanzuManageV1alpha1CommonClusterHealth(clustermodel.VmwareTanzuManageV1alpha1CommonClusterHealthUNHEALTHY),
},
},
},
response: false,
err: nil,
},
{
name: "Healthy",
cluster: &clustermodel.VmwareTanzuManageV1alpha1ClusterGetClusterResponse{
Cluster: &clustermodel.VmwareTanzuManageV1alpha1ClusterCluster{
Status: &clustermodel.VmwareTanzuManageV1alpha1ClusterStatus{
Health: clustermodel.NewVmwareTanzuManageV1alpha1CommonClusterHealth(clustermodel.VmwareTanzuManageV1alpha1CommonClusterHealthHEALTHY),
},
},
},
response: true,
err: nil,
},
{
name: "Error",
cluster: &clustermodel.VmwareTanzuManageV1alpha1ClusterGetClusterResponse{
Cluster: &clustermodel.VmwareTanzuManageV1alpha1ClusterCluster{
Status: nil,
},
},
response: false,
err: errors.New("cluster data is invalid or nil"),
},
}

for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
result, err := isClusterHealthy(test.cluster)
if err != nil {
if err.Error() != test.err.Error() {
t.Errorf("expected error to match")
}
} else if test.response != result {
t.Errorf("expected function output to match")
}
})
}
}
20 changes: 20 additions & 0 deletions internal/resources/ekscluster/resource_ekscluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,17 @@ var clusterSchema = map[string]*schema.Schema{
return true
},
},
waitForKubeconfig: {
Type: schema.TypeBool,
Description: "Wait until pinniped extension is ready to provide kubeconfig",
Default: false,
Optional: true,
},
kubeconfigKey: {
Type: schema.TypeString,
Description: "Kubeconfig for connecting to newly created cluster base64 encoded. This will only be returned if you have elected to wait for kubeconfig.",
Computed: true,
},
}

var clusterSpecSchema = &schema.Schema{
Expand Down Expand Up @@ -958,3 +969,12 @@ func flattenEniConfig(item *eksmodel.VmwareTanzuManageV1alpha1EksclusterEniConfi

return data
}

func isWaitForKubeconfig(data *schema.ResourceData) bool {
v := data.Get(waitForKubeconfig)
if v != nil {
return v.(bool)
}

return false
}

0 comments on commit 88ddf24

Please sign in to comment.