Skip to content

Commit

Permalink
Fix undefined panic when using .map (#1214)
Browse files Browse the repository at this point in the history
### Proposed changes

Adds an `undefined` guard before attempting to use `.map()`. This is due
to MLC programs being able to send `undefined` instead of an empty
array.

This PR also updates the error handling to throw `pulumi.ResourceError`
instead of a generic nodejs error.

### Related issues (optional)


Fixes: #1202
  • Loading branch information
rquitales authored Jun 27, 2024
1 parent f6c92bb commit 0fa2f99
Show file tree
Hide file tree
Showing 3 changed files with 190 additions and 31 deletions.
115 changes: 115 additions & 0 deletions examples/cluster-py/step2/__main__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
import pulumi
import pulumi_aws as aws
import pulumi_eks as eks

from vpc import Vpc

project_name = pulumi.get_project()

# Create an EKS cluster with the default configuration.
cluster1 = eks.Cluster(f"{project_name}-1")

# Create an EKS cluster with a non-default configuration.
# TODO specify tags: { "Name": f"{project_name}-2" }
vpc = Vpc(f"{project_name}-2")

cluster2 = eks.Cluster('eks-cluster',
vpc_id=vpc.vpc_id,
public_subnet_ids=vpc.public_subnet_ids,
public_access_cidrs=['0.0.0.0/0'],
desired_capacity=2,
min_size=2,
max_size=2,
instance_type='t3.micro',
# set storage class.
storage_classes={"gp2": eks.StorageClassArgs(
type='gp2', allow_volume_expansion=True, default=True, encrypted=True,)},
enabled_cluster_log_types=[
"api",
"audit",
"authenticator",
],)

iam_role = aws.iam.Role(
f"{project_name}-role",
assume_role_policy=pulumi.Output.json_dumps({
"Version": "2012-10-17",
"Statement": [{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
}
}]
})
)

cluster3 = eks.Cluster(f"{project_name}-3",
vpc_id=vpc.vpc_id,
public_subnet_ids=vpc.public_subnet_ids,
node_group_options=eks.ClusterNodeGroupOptionsArgs(
desired_capacity=1,
min_size=1,
max_size=1,
instance_type="t3.small"
),
authentication_mode=eks.AuthenticationMode.API_AND_CONFIG_MAP,
access_entries={
f'{project_name}-role': eks.AccessEntryArgs(
principal_arn=iam_role.arn, kubernetes_groups=["test-group"], access_policies={
'view': eks.AccessPolicyAssociationArgs(
policy_arn="arn:aws:eks::aws:cluster-access-policy/AmazonEKSViewPolicy",
access_scope=aws.eks.AccessPolicyAssociationAccessScopeArgs(namespaces=["default", "application"], type="namespace")
)
}
)
}
)

##########################
### EKS Addons ###
##########################

coredns = eks.Addon(
f"{project_name}-cluster3-coredns",
cluster=cluster3,
addon_name="coredns",
addon_version="v1.11.1-eksbuild.9",
resolve_conflicts_on_update="PRESERVE",
configuration_values={
"replicaCount": 4,
"resources": {
"limits": {
"cpu": "100m",
"memory": "150Mi",
},
"requests": {
"cpu": "100m",
"memory": "150Mi",
},
},
},
)


##############################
### MNG Panic Test ###
##############################

# Create a managed node group and attach it to a cluster using ConfigMap auth.
# Do not add the role to the cluster's aws-auth ConfigMap and we should expect this to fail.
managed_node_group_infra = eks.ManagedNodeGroup("mng-panic-test",
cluster=cluster2,
subnet_ids=vpc.private_subnet_ids,
node_role=iam_role,
scaling_config=aws.eks.NodeGroupScalingConfigArgs(
desired_size=1,
min_size=1,
max_size=1,
),
)

# Export the clusters' kubeconfig.
pulumi.export("kubeconfig1", cluster1.kubeconfig)
pulumi.export("kubeconfig2", cluster2.kubeconfig)
pulumi.export("kubeconfig3", cluster3.kubeconfig)
20 changes: 20 additions & 0 deletions examples/examples_py_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,14 @@
package example

import (
"bytes"
"path"
"path/filepath"
"testing"

"github.com/pulumi/pulumi-eks/examples/utils"
"github.com/pulumi/pulumi/pkg/v3/testing/integration"
"github.com/stretchr/testify/assert"
)

func TestAccAwsProfilePy(t *testing.T) {
Expand Down Expand Up @@ -63,6 +65,9 @@ func TestAccAwsProfileRolePy(t *testing.T) {
}

func TestAccClusterPy(t *testing.T) {
// Create io.Writer to capture stderr and stdout from Pulumi CLI.
var output bytes.Buffer

test := getPythonBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "cluster-py"),
Expand All @@ -74,6 +79,21 @@ func TestAccClusterPy(t *testing.T) {
info.Outputs["kubeconfig3"],
)
},
EditDirs: []integration.EditDir{
{
Dir: path.Join(getCwd(t), "cluster-py", "step2"),
ExpectFailure: true,
Additive: true,
Stderr: &output,
Stdout: &output,
ExtraRuntimeValidation: func(t *testing.T, info integration.RuntimeValidationStackInfo) {
// Ensure that the panic from https://github.com/pulumi/pulumi-eks/issues/1202 does not occur.
combinedOutput := output.String()
assert.NotContains(t, combinedOutput, "Cannot read properties of undefined")
assert.Contains(t, combinedOutput, "A managed node group cannot be created without first setting its role in the cluster's instanceRoles")
},
},
},
})

integration.ProgramTest(t, &test)
Expand Down
Loading

0 comments on commit 0fa2f99

Please sign in to comment.