Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

test(e2e): update drift suite #650

Open
wants to merge 15 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions Makefile-az.mk
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ az-mkacr: az-mkrg ## Create test ACR
az-acrimport: ## Imports an image to an acr registry
az acr import --name $(AZURE_ACR_NAME) --source "mcr.microsoft.com/oss/kubernetes/pause:3.6" --image "pause:3.6"

az-cleanenv: az-rmnodeclaims-fin ## Deletes a few common karpenter testing resources(pods, nodepools, nodeclaims, aksnodeclasses)
az-cleanenv: az-rmnodeclaims-fin ## Deletes a few common karpenter testing resources(pods, nodepools, nodeclaims, aksnodeclasses)
kubectl delete deployments -n default --all
kubectl delete pods -n default --all
kubectl delete nodeclaims --all
Expand Down Expand Up @@ -138,7 +138,7 @@ az-perm: ## Create role assignments to let Karpenter manage VMs and Network
az role assignment create --assignee $(KARPENTER_USER_ASSIGNED_CLIENT_ID) --scope /subscriptions/$(AZURE_SUBSCRIPTION_ID)/resourceGroups/$(AZURE_RESOURCE_GROUP) --role "Network Contributor" # in some case we create vnet here
@echo Consider "make az-configure-values"!

az-perm-sig: ## Create role assignments when testing with SIG images
az-perm-sig: ## Create role assignments when testing with SIG images
$(eval KARPENTER_USER_ASSIGNED_CLIENT_ID=$(shell az identity show --resource-group "${AZURE_RESOURCE_GROUP}" --name "${AZURE_KARPENTER_USER_ASSIGNED_IDENTITY_NAME}" --query 'principalId' -otsv))
az role assignment create --assignee $(KARPENTER_USER_ASSIGNED_CLIENT_ID) --role "Reader" --scope /subscriptions/$(AZURE_SIG_SUBSCRIPTION_ID)/resourceGroups/AKS-Ubuntu/providers/Microsoft.Compute/galleries/AKSUbuntu
az role assignment create --assignee $(KARPENTER_USER_ASSIGNED_CLIENT_ID) --role "Reader" --scope /subscriptions/$(AZURE_SIG_SUBSCRIPTION_ID)/resourceGroups/AKS-AzureLinux/providers/Microsoft.Compute/galleries/AKSAzureLinux
Expand Down Expand Up @@ -208,7 +208,7 @@ az-debug-bootstrap: ## Debug bootstrap (target first privateIP of the first NIC
az-cleanup: ## Delete the deployment
skaffold delete || true

az-deploy-goldpinger: ## Deploy goldpinger for testing networking
az-deploy-goldpinger: ## Deploy goldpinger for testing networking
kubectl apply -f https://gist.githubusercontent.com/paulgmiller/084bd4605f1661a329e5ab891a826ae0/raw/94a32d259e137bb300ac8af3ef71caa471463f23/goldpinger-daemon.yaml
kubectl apply -f https://gist.githubusercontent.com/paulgmiller/7bca68cd08cccb4e9bc72b0a08485edf/raw/d6a103fb79a65083f6555e4d822554ed64f510f8/goldpinger-deploy.yaml

Expand Down Expand Up @@ -261,7 +261,7 @@ az-taintsystemnodes: ## Taint all system nodepool nodes
az-taintnodes:
kubectl taint nodes CriticalAddonsOnly=true:NoSchedule --all --overwrite

az-e2etests: ## Run e2etests
az-e2etests: az-cleanenv ## Run e2etests
kubectl taint nodes CriticalAddonsOnly=true:NoSchedule --all --overwrite
TEST_SUITE=Utilization make e2etests
kubectl taint nodes CriticalAddonsOnly=true:NoSchedule- --all
Expand Down
2 changes: 1 addition & 1 deletion pkg/apis/v1alpha2/aksnodeclass.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ type AKSNodeClassSpec struct {
// Wherever possible, the types and names should reflect the upstream kubelet types.
// +kubebuilder:validation:XValidation:message="imageGCHighThresholdPercent must be greater than imageGCLowThresholdPercent",rule="has(self.imageGCHighThresholdPercent) && has(self.imageGCLowThresholdPercent) ? self.imageGCHighThresholdPercent > self.imageGCLowThresholdPercent : true"
// +optional
Kubelet *KubeletConfiguration `json:"kubelet,omitempty" hash:"ignore"`
Kubelet *KubeletConfiguration `json:"kubelet,omitempty"`
// MaxPods is an override for the maximum number of pods that can run on a worker node instance.
// +kubebuilder:validation:Minimum:=0
// +optional
Expand Down
106 changes: 106 additions & 0 deletions pkg/apis/v1alpha2/aksnodeclass_hash_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
/*
Portions Copyright (c) Microsoft Corporation.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package v1alpha2_test

import (
"time"

"github.com/Azure/karpenter-provider-azure/pkg/apis/v1alpha2"
"github.com/imdario/mergo"
"github.com/samber/lo"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/karpenter/pkg/test"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)

var _ = Describe("Hash", func() {
const staticHash = "3322684356700793451"
var nodeClass *v1alpha2.AKSNodeClass
BeforeEach(func() {
nodeClass = &v1alpha2.AKSNodeClass{
ObjectMeta: test.ObjectMeta(metav1.ObjectMeta{}),
Spec: v1alpha2.AKSNodeClassSpec{
VNETSubnetID: lo.ToPtr("subnet-id"),
OSDiskSizeGB: lo.ToPtr(int32(30)),
ImageFamily: lo.ToPtr("Ubuntu2204"),
Tags: map[string]string{
"keyTag-1": "valueTag-1",
"keyTag-2": "valueTag-2",
},
Kubelet: &v1alpha2.KubeletConfiguration{
CPUManagerPolicy: "static",
CPUCFSQuota: lo.ToPtr(true),
CPUCFSQuotaPeriod: metav1.Duration{Duration: lo.Must(time.ParseDuration("100ms"))},
ImageGCHighThresholdPercent: lo.ToPtr(int32(85)),
ImageGCLowThresholdPercent: lo.ToPtr(int32(80)),
TopologyManagerPolicy: "none",
AllowedUnsafeSysctls: []string{"net.core.somaxconn"},
ContainerLogMaxSize: "10Mi",
ContainerLogMaxFiles: lo.ToPtr(int32(10)),
},
MaxPods: lo.ToPtr(int32(100)),
},
}
})
DescribeTable(
"should match static hash on field value change",
func(hash string, changes v1alpha2.AKSNodeClass) {
Expect(mergo.Merge(nodeClass, changes, mergo.WithOverride, mergo.WithSliceDeepCopy)).To(Succeed())
Expect(nodeClass.Hash()).To(Equal(hash))
},
Entry("Base AKSNodeClass", staticHash, v1alpha2.AKSNodeClass{}),

// Static fields, expect changed hash from base
Entry("VNETSubnetID", "1592960122675270014", v1alpha2.AKSNodeClass{Spec: v1alpha2.AKSNodeClassSpec{VNETSubnetID: lo.ToPtr("subnet-id-2")}}),
Entry("OSDiskSizeGB", "8480605960768312946", v1alpha2.AKSNodeClass{Spec: v1alpha2.AKSNodeClassSpec{OSDiskSizeGB: lo.ToPtr(int32(40))}}),
Entry("ImageFamily", "1518600367124538723", v1alpha2.AKSNodeClass{Spec: v1alpha2.AKSNodeClassSpec{ImageFamily: lo.ToPtr("AzureLinux")}}),
Entry("Tags", "13342466710512152270", v1alpha2.AKSNodeClass{Spec: v1alpha2.AKSNodeClassSpec{Tags: map[string]string{"keyTag-test-3": "valueTag-test-3"}}}),
Entry("Kubelet", "13352743309220827045", v1alpha2.AKSNodeClass{Spec: v1alpha2.AKSNodeClassSpec{Kubelet: &v1alpha2.KubeletConfiguration{CPUManagerPolicy: "none"}}}),
Entry("MaxPods", "17237389697604178241", v1alpha2.AKSNodeClass{Spec: v1alpha2.AKSNodeClassSpec{MaxPods: lo.ToPtr(int32(200))}}),
)
It("should match static hash when reordering tags", func() {
nodeClass.Spec.Tags = map[string]string{"keyTag-2": "valueTag-2", "keyTag-1": "valueTag-1"}
Expect(nodeClass.Hash()).To(Equal(staticHash))
})
DescribeTable("should change hash when static fields are updated", func(changes v1alpha2.AKSNodeClass) {
hash := nodeClass.Hash()
Expect(mergo.Merge(nodeClass, changes, mergo.WithOverride, mergo.WithSliceDeepCopy)).To(Succeed())
updatedHash := nodeClass.Hash()
Expect(hash).ToNot(Equal(updatedHash))
},
Entry("VNETSubnetID", v1alpha2.AKSNodeClass{Spec: v1alpha2.AKSNodeClassSpec{VNETSubnetID: lo.ToPtr("subnet-id-2")}}),
Entry("OSDiskSizeGB", v1alpha2.AKSNodeClass{Spec: v1alpha2.AKSNodeClassSpec{OSDiskSizeGB: lo.ToPtr(int32(40))}}),
Entry("ImageFamily", v1alpha2.AKSNodeClass{Spec: v1alpha2.AKSNodeClassSpec{ImageFamily: lo.ToPtr("AzureLinux")}}),
Entry("Tags", v1alpha2.AKSNodeClass{Spec: v1alpha2.AKSNodeClassSpec{Tags: map[string]string{"keyTag-test-3": "valueTag-test-3"}}}),
Entry("Kubelet", v1alpha2.AKSNodeClass{Spec: v1alpha2.AKSNodeClassSpec{Kubelet: &v1alpha2.KubeletConfiguration{CPUManagerPolicy: "none"}}}),
Entry("MaxPods", v1alpha2.AKSNodeClass{Spec: v1alpha2.AKSNodeClassSpec{MaxPods: lo.ToPtr(int32(200))}}),
)
It("should not change hash when tags are re-ordered", func() {
hash := nodeClass.Hash()
nodeClass.Spec.Tags = map[string]string{"keyTag-2": "valueTag-2", "keyTag-1": "valueTag-1"}
updatedHash := nodeClass.Hash()
Expect(hash).To(Equal(updatedHash))
})
It("should expect two AKSNodeClasses with the same spec to have the same hash", func() {
otherNodeClass := &v1alpha2.AKSNodeClass{
Spec: nodeClass.Spec,
}
Expect(nodeClass.Hash()).To(Equal(otherNodeClass.Hash()))
})
})
6 changes: 3 additions & 3 deletions pkg/controllers/nodeclass/hash/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,9 +80,9 @@ func (c *Controller) Register(_ context.Context, m manager.Manager) error {
Complete(reconcile.AsReconciler(m.GetClient(), c))
}

// Updating `AKSNodeClass-hash-version` annotation inside the karpenter controller means a breaking change has been made to the hash calculation.
// `AKSNodeClass-hash` annotation on the AKSNodeClass will be updated, due to the breaking change, making the `AKSNodeClass-hash` on the NodeClaim different from
// AKSNodeClass. Since, we cannot rely on the `AKSNodeClass-hash` on the NodeClaims, due to the breaking change, we will need to re-calculate the hash and update the annotation.
// Updating `aksnodeclass-hash-version` annotation inside the karpenter controller means a breaking change has been made to the hash calculation.
// `aksnodeclass-hash` annotation on the AKSNodeClass will be updated, due to the breaking change, making the `aksnodeclass-hash` on the NodeClaim different from
// AKSNodeClass. Since, we cannot rely on the `aksnodeclass-hash` on the NodeClaims, due to the breaking change, we will need to re-calculate the hash and update the annotation.
// For more information on the Drift Hash Versioning: https://github.com/kubernetes-sigs/karpenter/blob/main/designs/drift-hash-versioning.md
func (c *Controller) updateNodeClaimHash(ctx context.Context, nodeClass *v1alpha2.AKSNodeClass) error {
ncList := &karpv1.NodeClaimList{}
Expand Down
18 changes: 9 additions & 9 deletions pkg/controllers/nodeclass/hash/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ var _ = Describe("NodeClass Hash Controller", func() {
Entry("OSDiskSizeGB Drift", &v1alpha2.AKSNodeClass{Spec: v1alpha2.AKSNodeClassSpec{OSDiskSizeGB: lo.ToPtr(int32(100))}}),
Entry("Tags Drift", &v1alpha2.AKSNodeClass{Spec: v1alpha2.AKSNodeClassSpec{Tags: map[string]string{"keyTag-test-3": "valueTag-test-3"}}}),
)
It("should update AKSNodeClass-hash-version annotation when the AKSNodeClass-hash-version on the NodeClass does not match with the controller hash version", func() {
It("should update aksnodeclass-hash-version annotation when the aksnodeclass-hash-version on the NodeClass does not match with the controller hash version", func() {
nodeClass.Annotations = map[string]string{
v1alpha2.AnnotationAKSNodeClassHash: "abceduefed",
v1alpha2.AnnotationAKSNodeClassHashVersion: "test",
Expand All @@ -128,11 +128,11 @@ var _ = Describe("NodeClass Hash Controller", func() {
nodeClass = ExpectExists(ctx, env.Client, nodeClass)

expectedHash := nodeClass.Hash()
// Expect AKSNodeClass-hash on the NodeClass to be updated
// Expect aksnodeclass-hash on the NodeClass to be updated
Expect(nodeClass.Annotations).To(HaveKeyWithValue(v1alpha2.AnnotationAKSNodeClassHash, expectedHash))
Expect(nodeClass.Annotations).To(HaveKeyWithValue(v1alpha2.AnnotationAKSNodeClassHashVersion, v1alpha2.AKSNodeClassHashVersion))
})
It("should update AKSNodeClass-hash-versions on all NodeClaims when the AKSNodeClass-hash-version does not match with the controller hash version", func() {
It("should update aksnodeclass-hash-versions on all NodeClaims when the aksnodeclass-hash-version does not match with the controller hash version", func() {
nodeClass.Annotations = map[string]string{
v1alpha2.AnnotationAKSNodeClassHash: "abceduefed",
v1alpha2.AnnotationAKSNodeClassHashVersion: "test",
Expand Down Expand Up @@ -178,13 +178,13 @@ var _ = Describe("NodeClass Hash Controller", func() {
nodeClaimTwo = ExpectExists(ctx, env.Client, nodeClaimTwo)

expectedHash := nodeClass.Hash()
// Expect AKSNodeClass-hash on the NodeClaims to be updated
// Expect aksnodeclass-hash on the NodeClaims to be updated
Expect(nodeClaimOne.Annotations).To(HaveKeyWithValue(v1alpha2.AnnotationAKSNodeClassHash, expectedHash))
Expect(nodeClaimOne.Annotations).To(HaveKeyWithValue(v1alpha2.AnnotationAKSNodeClassHashVersion, v1alpha2.AKSNodeClassHashVersion))
Expect(nodeClaimTwo.Annotations).To(HaveKeyWithValue(v1alpha2.AnnotationAKSNodeClassHash, expectedHash))
Expect(nodeClaimTwo.Annotations).To(HaveKeyWithValue(v1alpha2.AnnotationAKSNodeClassHashVersion, v1alpha2.AKSNodeClassHashVersion))
})
It("should not update AKSNodeClass-hash on all NodeClaims when the AKSNodeClass-hash-version matches the controller hash version", func() {
It("should not update aksnodeclass-hash on all NodeClaims when the aksnodeclass-hash-version matches the controller hash version", func() {
nodeClass.Annotations = map[string]string{
v1alpha2.AnnotationAKSNodeClassHash: "abceduefed",
v1alpha2.AnnotationAKSNodeClassHashVersion: "test-version",
Expand Down Expand Up @@ -213,14 +213,14 @@ var _ = Describe("NodeClass Hash Controller", func() {

expectedHash := nodeClass.Hash()

// Expect AKSNodeClass-hash on the NodeClass to be updated
// Expect aksnodeclass-hash on the NodeClass to be updated
Expect(nodeClass.Annotations).To(HaveKeyWithValue(v1alpha2.AnnotationAKSNodeClassHash, expectedHash))
Expect(nodeClass.Annotations).To(HaveKeyWithValue(v1alpha2.AnnotationAKSNodeClassHashVersion, v1alpha2.AKSNodeClassHashVersion))
// Expect AKSNodeClass-hash on the NodeClaims to stay the same
// Expect aksnodeclass-hash on the NodeClaims to stay the same
Expect(nodeClaim.Annotations).To(HaveKeyWithValue(v1alpha2.AnnotationAKSNodeClassHash, "1234564654"))
Expect(nodeClaim.Annotations).To(HaveKeyWithValue(v1alpha2.AnnotationAKSNodeClassHashVersion, v1alpha2.AKSNodeClassHashVersion))
})
It("should not update AKSNodeClass-hash on the NodeClaim if it's drifted and the AKSNodeClass-hash-version does not match the controller hash version", func() {
It("should not update aksnodeclass-hash on the NodeClaim if it's drifted and the aksnodeclass-hash-version does not match the controller hash version", func() {
nodeClass.Annotations = map[string]string{
v1alpha2.AnnotationAKSNodeClassHash: "abceduefed",
v1alpha2.AnnotationAKSNodeClassHashVersion: "test",
Expand All @@ -247,7 +247,7 @@ var _ = Describe("NodeClass Hash Controller", func() {
ExpectObjectReconciled(ctx, env.Client, hashController, nodeClass)
nodeClaim = ExpectExists(ctx, env.Client, nodeClaim)

// Expect AKSNodeClass-hash on the NodeClaims to stay the same
// Expect aksnodeclass-hash on the NodeClaims to stay the same
Expect(nodeClaim.Annotations).To(HaveKeyWithValue(v1alpha2.AnnotationAKSNodeClassHash, "123456"))
Expect(nodeClaim.Annotations).To(HaveKeyWithValue(v1alpha2.AnnotationAKSNodeClassHashVersion, v1alpha2.AKSNodeClassHashVersion))
})
Expand Down
16 changes: 9 additions & 7 deletions pkg/test/aksnodeclass.go → pkg/test/nodeclass.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,17 +36,19 @@ func AKSNodeClass(overrides ...v1alpha2.AKSNodeClass) *v1alpha2.AKSNodeClass {
panic(fmt.Sprintf("Failed to merge settings: %s", err))
}
}

nc := &v1alpha2.AKSNodeClass{
// In reality, these default values will be set via the defaulting done by the API server. The reason we provide them here is
// we sometimes reference a test.AKSNodeClass without applying it, and in that case we need to set the default values ourselves
if options.Spec.OSDiskSizeGB == nil {
options.Spec.OSDiskSizeGB = lo.ToPtr[int32](128)
}
if options.Spec.ImageFamily == nil {
options.Spec.ImageFamily = lo.ToPtr(v1alpha2.Ubuntu2204ImageFamily)
}
return &v1alpha2.AKSNodeClass{
ObjectMeta: test.ObjectMeta(options.ObjectMeta),
Spec: options.Spec,
Status: options.Status,
}
// In reality, these default values will be set via the defaulting done by the API server. The reason we provide them here is
// we sometimes reference a test.AKSNodeClass without applying it, and in that case we need to set the default values ourselves
nc.Spec.OSDiskSizeGB = lo.ToPtr[int32](128)
nc.Spec.ImageFamily = lo.ToPtr(v1alpha2.Ubuntu2204ImageFamily)
return nc
}

func AKSNodeClassFieldIndexer(ctx context.Context) func(cache.Cache) error {
Expand Down
4 changes: 2 additions & 2 deletions test/pkg/environment/azure/environment.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,19 +22,19 @@ import (
"github.com/samber/lo"
v1 "k8s.io/api/core/v1"
karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1"
coretest "sigs.k8s.io/karpenter/pkg/test"

"github.com/Azure/karpenter-provider-azure/pkg/apis/v1alpha2"
"github.com/Azure/karpenter-provider-azure/pkg/test"
"github.com/Azure/karpenter-provider-azure/test/pkg/environment/common"
)

func init() {
// TODO: should have core1beta1.NormalizedLabels too?
karpv1.NormalizedLabels = lo.Assign(karpv1.NormalizedLabels, map[string]string{"topology.disk.csi.azure.com/zone": v1.LabelTopologyZone})
coretest.DefaultImage = "mcr.microsoft.com/oss/kubernetes/pause:3.6"
}

const (
WindowsDefaultImage = "mcr.microsoft.com/oss/kubernetes/pause:3.9"
CiliumAgentNotReadyTaint = "node.cilium.io/agent-not-ready"
)

Expand Down
11 changes: 7 additions & 4 deletions test/pkg/environment/common/setup.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,6 @@ import (
"sync"
"time"

"github.com/Azure/karpenter-provider-azure/pkg/apis/v1alpha2"
"github.com/Azure/karpenter-provider-azure/test/pkg/debug"
. "github.com/onsi/ginkgo/v2" //nolint:revive,stylecheck
. "github.com/onsi/gomega" //nolint:revive,stylecheck
"github.com/samber/lo"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
Expand All @@ -40,6 +36,12 @@ import (
karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1"
"sigs.k8s.io/karpenter/pkg/test"
"sigs.k8s.io/karpenter/pkg/utils/pod"

"github.com/Azure/karpenter-provider-azure/pkg/apis/v1alpha2"
"github.com/Azure/karpenter-provider-azure/test/pkg/debug"

. "github.com/onsi/ginkgo/v2" //nolint:revive,stylecheck
. "github.com/onsi/gomega" //nolint:revive,stylecheck
)

const TestingFinalizer = "testing/finalizer"
Expand Down Expand Up @@ -107,6 +109,7 @@ func (env *Environment) ExpectCleanCluster() {

func (env *Environment) Cleanup() {
env.CleanupObjects(CleanableObjects...)
env.EventuallyExpectNoLeakedKubeNodeLease()
env.eventuallyExpectScaleDown()
env.ExpectNoCrashes()
}
Expand Down
1 change: 0 additions & 1 deletion test/suites/chaos/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,6 @@ var _ = Describe("Chaos", func() {
dep := coretest.Deployment(coretest.DeploymentOptions{
Replicas: int32(numPods),
PodOptions: coretest.PodOptions{
Image: "mcr.microsoft.com/oss/kubernetes/pause:3.6",
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"app": "my-app"},
},
Expand Down
Loading
Loading