Skip to content

Commit

Permalink
Merge pull request openshift-kni#937 from openshift-kni/e2e-ephemeral…
Browse files Browse the repository at this point in the history
…-storage-more-tests

e2e: hostlevel: cover all pod QoSes
  • Loading branch information
openshift-merge-bot[bot] authored May 29, 2024
2 parents 3df6093 + d88e010 commit bd7733c
Show file tree
Hide file tree
Showing 8 changed files with 375 additions and 48 deletions.
47 changes: 44 additions & 3 deletions internal/resourcelist/resourcelist.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ func ToString(res corev1.ResourceList) string {
}

func FromReplicaSet(rs appsv1.ReplicaSet) corev1.ResourceList {
rl := FromContainers(rs.Spec.Template.Spec.Containers)
rl := FromContainerLimits(rs.Spec.Template.Spec.Containers)
replicas := rs.Spec.Replicas
for resName, resQty := range rl {
replicaResQty := resQty.DeepCopy()
Expand All @@ -58,10 +58,10 @@ func FromReplicaSet(rs appsv1.ReplicaSet) corev1.ResourceList {
}

func FromGuaranteedPod(pod corev1.Pod) corev1.ResourceList {
return FromContainers(pod.Spec.Containers)
return FromContainerLimits(pod.Spec.Containers)
}

func FromContainers(containers []corev1.Container) corev1.ResourceList {
func FromContainerLimits(containers []corev1.Container) corev1.ResourceList {
res := make(corev1.ResourceList)
for idx := 0; idx < len(containers); idx++ {
cnt := &containers[idx] // shortcut
Expand All @@ -74,6 +74,19 @@ func FromContainers(containers []corev1.Container) corev1.ResourceList {
return res
}

func FromContainerRequests(containers []corev1.Container) corev1.ResourceList {
res := make(corev1.ResourceList)
for idx := 0; idx < len(containers); idx++ {
cnt := &containers[idx] // shortcut
for resName, resQty := range cnt.Resources.Requests {
qty := res[resName]
qty.Add(resQty)
res[resName] = qty
}
}
return res
}

func AddCoreResources(res, resToAdd corev1.ResourceList) {
for resName, resQty := range resToAdd {
qty := res[resName]
Expand All @@ -94,6 +107,34 @@ func SubCoreResources(res, resToSub corev1.ResourceList) error {
return nil
}

func Accumulate(ress []corev1.ResourceList) corev1.ResourceList {
ret := corev1.ResourceList{}
for _, rr := range ress {
for resName, resQty := range rr {
qty := ret[resName]
qty.Add(resQty)
ret[resName] = qty
}
}
return ret
}

func Equal(ra, rb corev1.ResourceList) bool {
if len(ra) != len(rb) {
return false
}
for key, valA := range ra {
valB, ok := rb[key]
if !ok {
return false
}
if !valA.Equal(valB) {
return false
}
}
return true
}

func RoundUpCoreResources(cpu, mem resource.Quantity) (resource.Quantity, resource.Quantity) {
retCpu := *resource.NewQuantity(roundUp(cpu.Value(), 2), resource.DecimalSI)
retMem := mem.DeepCopy() // TODO: this is out of over caution
Expand Down
165 changes: 165 additions & 0 deletions internal/resourcelist/resourcelist_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -223,3 +223,168 @@ func TestRoundUpCoreResources(t *testing.T) {
})
}
}

func TestEqual(t *testing.T) {
type testCase struct {
name string
ra corev1.ResourceList
rb corev1.ResourceList
expected bool
}

testCases := []testCase{
{
name: "empty",
expected: true,
},
{
name: "same size, different values",
ra: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("1"),
},
rb: corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse("1Gi"),
},
},
{
name: "subset A",
ra: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("1"),
corev1.ResourceMemory: resource.MustParse("1Gi"),
},
rb: corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse("1Gi"),
},
},
{
name: "subset B",
ra: corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse("1Gi"),
},
rb: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("1"),
corev1.ResourceMemory: resource.MustParse("1Gi"),
},
},
{
name: "equal",
ra: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("1"),
corev1.ResourceMemory: resource.MustParse("1Gi"),
},
rb: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("1"),
corev1.ResourceMemory: resource.MustParse("1Gi"),
},
expected: true,
},
{
name: "different value",
ra: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("2"),
corev1.ResourceMemory: resource.MustParse("2Gi"),
},
rb: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("2"),
corev1.ResourceMemory: resource.MustParse("2064Mi"),
},
},
}

for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
got := Equal(tc.ra, tc.rb)
if got != tc.expected {
t.Errorf("expected %v got %v", tc.expected, got)
}
})
}
}

func TestAccumulate(t *testing.T) {
type testCase struct {
name string
resLists []corev1.ResourceList
expected corev1.ResourceList
}

testCases := []testCase{
{
name: "empty",
},
{
name: "single operand",
resLists: []corev1.ResourceList{
{
corev1.ResourceCPU: resource.MustParse("4"),
corev1.ResourceMemory: resource.MustParse("8Gi"),
},
},
expected: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("4"),
corev1.ResourceMemory: resource.MustParse("8Gi"),
},
},
{
name: "overlapping operands",
resLists: []corev1.ResourceList{
{
corev1.ResourceCPU: resource.MustParse("4"),
corev1.ResourceMemory: resource.MustParse("8Gi"),
},
{
corev1.ResourceCPU: resource.MustParse("1"),
corev1.ResourceMemory: resource.MustParse("3Gi"),
},
},
expected: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("5"),
corev1.ResourceMemory: resource.MustParse("11Gi"),
},
},
{
name: "partially overlapping operands",
resLists: []corev1.ResourceList{
{
corev1.ResourceCPU: resource.MustParse("4"),
corev1.ResourceMemory: resource.MustParse("8Gi"),
},
{
corev1.ResourceMemory: resource.MustParse("4Gi"),
},
},
expected: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("4"),
corev1.ResourceMemory: resource.MustParse("12Gi"),
},
},
{
name: "disjoint operands",
resLists: []corev1.ResourceList{
{
corev1.ResourceCPU: resource.MustParse("4"),
},
{
corev1.ResourceMemory: resource.MustParse("8Gi"),
},
{
corev1.ResourceStorage: resource.MustParse("256Gi"),
},
},
expected: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("4"),
corev1.ResourceMemory: resource.MustParse("8Gi"),
corev1.ResourceStorage: resource.MustParse("256Gi"),
},
},
}

for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
got := Accumulate(tc.resLists)
if !Equal(got, tc.expected) {
t.Errorf("expected %v got %v", tc.expected, got)
}
})
}
}
19 changes: 12 additions & 7 deletions test/e2e/serial/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,9 @@ package config

import (
"context"
"encoding/json"
"fmt"
"os"
"strings"

"k8s.io/klog/v2"
kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1"
Expand Down Expand Up @@ -157,12 +157,9 @@ func CheckNodesTopology(ctx context.Context) error {

errorMap := getTopologyConsistencyErrors(kconfigs, nrtList.Items)
if len(errorMap) != 0 {
klog.Infof("incoeherent NRT/KubeletConfig data: %v", errorMap)
prettyMap, err := json.MarshalIndent(errorMap, "", " ")
if err != nil {
return fmt.Errorf("Found some nodes with incoherent info in KubeletConfig/NRT data")
}
return fmt.Errorf("Following nodes have incoherent info in KubeletConfig/NRT data:\n%s\n", string(prettyMap))
errText := errorMapToString(errorMap)
klog.Infof("incoeherent NRT/KubeletConfig data: %v", errText)
return fmt.Errorf("Following nodes have incoherent info in KubeletConfig/NRT data:\n%#v\n", errText)
}

singleNUMANodeNRTs := e2enrt.FilterByTopologyManagerPolicy(nrtList.Items, intnrt.SingleNUMANode)
Expand All @@ -172,3 +169,11 @@ func CheckNodesTopology(ctx context.Context) error {

return nil
}

func errorMapToString(errs map[string]error) string {
var sb strings.Builder
for node, err := range errs {
fmt.Fprintf(&sb, "%s: %v\n", node, err)
}
return sb.String()
}
6 changes: 3 additions & 3 deletions test/e2e/serial/tests/non_regression.go
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme
RestartPolicy: corev1.RestartPolicyAlways,
}

By(fmt.Sprintf("creating a deployment with a guaranteed pod with two containers requiring total %s", e2ereslist.ToString(e2ereslist.FromContainers(podSpec.Containers))))
By(fmt.Sprintf("creating a deployment with a guaranteed pod with two containers requiring total %s", e2ereslist.ToString(e2ereslist.FromContainerLimits(podSpec.Containers))))
dp := objects.NewTestDeploymentWithPodSpec(replicas, podLabels, nil, fxt.Namespace.Name, "testdp", *podSpec)

err = fxt.Client.Create(context.TODO(), dp)
Expand Down Expand Up @@ -274,7 +274,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme
Expect(schedOK).To(BeFalse(), "pod %s/%s not assigned to a specific node without a scheduler %s", updatedPod.Namespace, updatedPod.Name, nonExistingSchedulerName)

rl := e2ereslist.FromGuaranteedPod(updatedPod)
klog.Infof("post-create pod resource list: spec=[%s] updated=[%s]", e2ereslist.ToString(e2ereslist.FromContainers(podSpec.Containers)), e2ereslist.ToString(rl))
klog.Infof("post-create pod resource list: spec=[%s] updated=[%s]", e2ereslist.ToString(e2ereslist.FromContainerLimits(podSpec.Containers)), e2ereslist.ToString(rl))

nrtInitial, err := e2enrt.FindFromList(nrtInitialList.Items, updatedPod.Spec.NodeName)
Expect(err).ToNot(HaveOccurred())
Expand Down Expand Up @@ -392,7 +392,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme
pod.Spec.SchedulerName = serialconfig.Config.SchedulerName
pod.Spec.Containers[0].Resources.Limits = requiredRes

By(fmt.Sprintf("Scheduling the testing pod with resources that are not allocatable on any numa zone of the target node. requested resources of the test pod: %s", e2ereslist.ToString(e2ereslist.FromContainers(pod.Spec.Containers))))
By(fmt.Sprintf("Scheduling the testing pod with resources that are not allocatable on any numa zone of the target node. requested resources of the test pod: %s", e2ereslist.ToString(e2ereslist.FromContainerLimits(pod.Spec.Containers))))
err = fxt.Client.Create(context.TODO(), pod)
Expect(err).NotTo(HaveOccurred(), "unable to create pod %q", pod.Name)

Expand Down
3 changes: 1 addition & 2 deletions test/e2e/serial/tests/resource_accounting.go
Original file line number Diff line number Diff line change
Expand Up @@ -678,7 +678,7 @@ var _ = Describe("[serial][disruptive][scheduler][resacct] numaresources workloa
Expect(err).ToNot(HaveOccurred())

rl := e2ereslist.FromGuaranteedPod(*updatedPod2)
klog.Infof("post-create pod resource list: spec=[%s] updated=[%s]", e2ereslist.ToString(e2ereslist.FromContainers(podGuanranteed.Spec.Containers)), e2ereslist.ToString(rl))
klog.Infof("post-create pod resource list: spec=[%s] updated=[%s]", e2ereslist.ToString(e2ereslist.FromContainerLimits(podGuanranteed.Spec.Containers)), e2ereslist.ToString(rl))

scope, ok := attribute.Get(targetNrtInitial.Attributes, intnrt.TopologyManagerScopeAttribute)
Expect(ok).To(BeTrue(), fmt.Sprintf("Unable to find required attribute %q on NRT %q", intnrt.TopologyManagerScopeAttribute, targetNrtInitial.Name))
Expand Down Expand Up @@ -764,7 +764,6 @@ var _ = Describe("[serial][disruptive][scheduler][resacct] numaresources workloa
err = fxt.Client.Delete(context.TODO(), ds)
Expect(err).ToNot(HaveOccurred())
})

})
})

Expand Down
Loading

0 comments on commit bd7733c

Please sign in to comment.