Skip to content

Commit

Permalink
Merge branch 'main' into tallaxes/expectations
Browse files Browse the repository at this point in the history
  • Loading branch information
tallaxes authored Dec 2, 2024
2 parents cad3d60 + 44cddef commit 72d489a
Show file tree
Hide file tree
Showing 3 changed files with 12 additions and 22 deletions.
6 changes: 5 additions & 1 deletion hack/deploy/configure-values.sh
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,9 @@ AZURE_RESOURCE_GROUP=$2
KARPENTER_SERVICE_ACCOUNT_NAME=$3
AZURE_KARPENTER_USER_ASSIGNED_IDENTITY_NAME=$4

# Optional values through env vars:
LOG_LEVEL=${LOG_LEVEL:-"info"}

AKS_JSON=$(az aks show --name "$CLUSTER_NAME" --resource-group "$AZURE_RESOURCE_GROUP" -o json)
AZURE_LOCATION=$(jq -r ".location" <<< "$AKS_JSON")
AZURE_RESOURCE_GROUP_MC=$(jq -r ".nodeResourceGroup" <<< "$AKS_JSON")
Expand Down Expand Up @@ -47,7 +50,8 @@ NODE_IDENTITIES=$(jq -r ".identityProfile.kubeletidentity.resourceId" <<< "$AKS_
KARPENTER_USER_ASSIGNED_CLIENT_ID=$(az identity show --resource-group "${AZURE_RESOURCE_GROUP}" --name "${AZURE_KARPENTER_USER_ASSIGNED_IDENTITY_NAME}" --query 'clientId' -otsv)

export CLUSTER_NAME AZURE_LOCATION AZURE_RESOURCE_GROUP_MC KARPENTER_SERVICE_ACCOUNT_NAME \
CLUSTER_ENDPOINT BOOTSTRAP_TOKEN SSH_PUBLIC_KEY VNET_SUBNET_ID KARPENTER_USER_ASSIGNED_CLIENT_ID NODE_IDENTITIES AZURE_SUBSCRIPTION_ID NETWORK_PLUGIN NETWORK_PLUGIN_MODE NETWORK_POLICY
CLUSTER_ENDPOINT BOOTSTRAP_TOKEN SSH_PUBLIC_KEY VNET_SUBNET_ID KARPENTER_USER_ASSIGNED_CLIENT_ID NODE_IDENTITIES AZURE_SUBSCRIPTION_ID NETWORK_PLUGIN NETWORK_PLUGIN_MODE NETWORK_POLICY \
LOG_LEVEL

# get karpenter-values-template.yaml, if not already present (e.g. outside of repo context)
if [ ! -f karpenter-values-template.yaml ]; then
Expand Down
1 change: 1 addition & 0 deletions karpenter-values-template.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -43,3 +43,4 @@ serviceAccount:
azure.workload.identity/client-id: ${KARPENTER_USER_ASSIGNED_CLIENT_ID}
podLabels:
azure.workload.identity/use: "true"
logLevel: ${LOG_LEVEL}
27 changes: 6 additions & 21 deletions test/suites/integration/expiration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,13 @@ package integration_test

import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/samber/lo"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr"
"sigs.k8s.io/controller-runtime/pkg/client"

karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1"

Expand All @@ -53,7 +51,8 @@ var _ = Describe("Expiration", func() {
selector = labels.SelectorFromSet(dep.Spec.Selector.MatchLabels)
})
It("should expire the node after the expiration is reached", func() {
nodePool.Spec.Template.Spec.ExpireAfter = karpv1.MustParseNillableDuration("2m")
// Set expire after large enough to make sure the new nodes are not expired before workloads are moved over.
nodePool.Spec.Template.Spec.ExpireAfter = karpv1.MustParseNillableDuration("3m")
env.ExpectCreated(nodeClass, nodePool, dep)

nodeClaim := env.EventuallyExpectCreatedNodeClaimCount("==", 1)[0]
Expand All @@ -62,15 +61,8 @@ var _ = Describe("Expiration", func() {
env.Monitor.Reset() // Reset the monitor so that we can expect a single node to be spun up after expiration

// Eventually the node will be tainted, which means its actively being disrupted
Eventually(func(g Gomega) {
g.Expect(env.Client.Get(env.Context, client.ObjectKeyFromObject(node), node)).Should(Succeed())
_, ok := lo.Find(node.Spec.Taints, func(t corev1.Taint) bool {
return t.MatchTaint(&karpv1.DisruptedNoScheduleTaint)
})
g.Expect(ok).To(BeTrue())
}).Should(Succeed())

env.EventuallyExpectCreatedNodeCount("==", 2)
env.EventuallyExpectTaintedNodeCount("==", 1)
env.EventuallyExpectCreatedNodeCount("==", 1)
// Set the limit to 0 to make sure we don't continue to create nodeClaims.
// This is CRITICAL since it prevents leaking node resources into subsequent tests
nodePool.Spec.Limits = karpv1.Limits{
Expand Down Expand Up @@ -109,15 +101,8 @@ var _ = Describe("Expiration", func() {
env.Monitor.Reset() // Reset the monitor so that we can expect a single node to be spun up after expiration

// Eventually the node will be tainted, which means its actively being disrupted
Eventually(func(g Gomega) {
g.Expect(env.Client.Get(env.Context, client.ObjectKeyFromObject(node), node)).Should(Succeed())
_, ok := lo.Find(node.Spec.Taints, func(t corev1.Taint) bool {
return t.MatchTaint(&karpv1.DisruptedNoScheduleTaint)
})
g.Expect(ok).To(BeTrue())
}).Should(Succeed())

env.EventuallyExpectCreatedNodeCount("==", 2)
env.EventuallyExpectTaintedNodeCount("==", 1)
env.EventuallyExpectCreatedNodeCount("==", 1)
// Set the limit to 0 to make sure we don't continue to create nodeClaims.
// This is CRITICAL since it prevents leaking node resources into subsequent tests
nodePool.Spec.Limits = karpv1.Limits{
Expand Down

0 comments on commit 72d489a

Please sign in to comment.