Skip to content

Commit

Permalink
Merge pull request #1229 from haircommander/display-dedup
Browse files Browse the repository at this point in the history
crictl: deduplicate display boilerplate
  • Loading branch information
k8s-ci-robot authored Aug 3, 2023
2 parents dd51f3b + 306ebdf commit 94d44a2
Show file tree
Hide file tree
Showing 3 changed files with 100 additions and 113 deletions.
104 changes: 38 additions & 66 deletions cmd/crictl/container_stats.go
Original file line number Diff line number Diff line change
Expand Up @@ -132,75 +132,36 @@ func (c containerStatsByID) Less(i, j int) bool {
return c[i].Attributes.Id < c[j].Attributes.Id
}

type containerStatsDisplayer struct {
opts statsOptions
request *pb.ListContainerStatsRequest
*display
}

// ContainerStats sends a ListContainerStatsRequest to the server, and
// parses the returned ListContainerStatsResponse.
func ContainerStats(client internalapi.RuntimeService, opts statsOptions) error {
filter := &pb.ContainerStatsFilter{}
if opts.id != "" {
filter.Id = opts.id
}
if opts.podID != "" {
filter.PodSandboxId = opts.podID
}
if opts.labels != nil {
filter.LabelSelector = opts.labels
}
request := &pb.ListContainerStatsRequest{
Filter: filter,
}

display := newTableDisplay(20, 1, 3, ' ', 0)
if !opts.watch {
if err := displayStats(context.TODO(), client, request, display, opts); err != nil {
return err
}
} else {
displayErrCh := make(chan error, 1)
ticker := time.NewTicker(500 * time.Millisecond)
defer ticker.Stop()
watchCtx, cancelFn := context.WithCancel(context.Background())
defer cancelFn()
// Put the displayStats in another goroutine.
// because it might be time consuming with lots of containers.
// and we want to cancel it ASAP when user hit CtrlC
go func() {
for range ticker.C {
if err := displayStats(watchCtx, client, request, display, opts); err != nil {
displayErrCh <- err
break
}
}
}()
// listen for CtrlC or error
select {
case <-SetupInterruptSignalHandler():
cancelFn()
return nil
case err := <-displayErrCh:
return err
}
d := containerStatsDisplayer{
opts: opts,
request: &pb.ListContainerStatsRequest{
Filter: &pb.ContainerStatsFilter{
Id: opts.id,
PodSandboxId: opts.podID,
LabelSelector: opts.labels,
},
},
display: newTableDisplay(20, 1, 3, ' ', 0),
}

return nil
}

func getContainerStats(ctx context.Context, client internalapi.RuntimeService, request *pb.ListContainerStatsRequest) (*pb.ListContainerStatsResponse, error) {
logrus.Debugf("ListContainerStatsRequest: %v", request)
r, err := client.ListContainerStats(context.TODO(), request.Filter)
logrus.Debugf("ListContainerResponse: %v", r)
if err != nil {
return nil, err
}
sort.Sort(containerStatsByID(r))
return &pb.ListContainerStatsResponse{Stats: r}, nil
return handleDisplay(context.TODO(), client, opts.watch, d.displayStats)
}

func displayStats(ctx context.Context, client internalapi.RuntimeService, request *pb.ListContainerStatsRequest, display *display, opts statsOptions) error {
r, err := getContainerStats(ctx, client, request)
func (d containerStatsDisplayer) displayStats(ctx context.Context, client internalapi.RuntimeService) error {
r, err := getContainerStats(ctx, client, d.request)
if err != nil {
return err
}
switch opts.output {
switch d.opts.output {
case "json":
return outputProtobufObjAsJSON(r)
case "yaml":
Expand All @@ -214,14 +175,14 @@ func displayStats(ctx context.Context, client internalapi.RuntimeService, reques
oldStats[s.Attributes.Id] = s
}

time.Sleep(opts.sample)
time.Sleep(d.opts.sample)

r, err = getContainerStats(ctx, client, request)
r, err = getContainerStats(ctx, client, d.request)
if err != nil {
return err
}

display.AddRow([]string{columnContainer, columnName, columnCPU, columnMemory, columnDisk, columnInodes})
d.display.AddRow([]string{columnContainer, columnName, columnCPU, columnMemory, columnDisk, columnInodes})
for _, s := range r.GetStats() {
if ctx.Err() != nil {
return ctx.Err()
Expand All @@ -232,7 +193,7 @@ func displayStats(ctx context.Context, client internalapi.RuntimeService, reques
mem := s.GetMemory().GetWorkingSetBytes().GetValue()
disk := s.GetWritableLayer().GetUsedBytes().GetValue()
inodes := s.GetWritableLayer().GetInodesUsed().GetValue()
if !opts.all && cpu == 0 && mem == 0 {
if !d.opts.all && cpu == 0 && mem == 0 {
// Skip non-running container
continue
}
Expand All @@ -250,12 +211,23 @@ func displayStats(ctx context.Context, client internalapi.RuntimeService, reques
}
cpuPerc = float64(cpu-old.GetCpu().GetUsageCoreNanoSeconds().GetValue()) / float64(duration) * 100
}
display.AddRow([]string{id, name, fmt.Sprintf("%.2f", cpuPerc), units.HumanSize(float64(mem)),
d.display.AddRow([]string{id, name, fmt.Sprintf("%.2f", cpuPerc), units.HumanSize(float64(mem)),
units.HumanSize(float64(disk)), fmt.Sprintf("%d", inodes)})

}
display.ClearScreen()
display.Flush()
d.display.ClearScreen()
d.display.Flush()

return nil
}

func getContainerStats(ctx context.Context, client internalapi.RuntimeService, request *pb.ListContainerStatsRequest) (*pb.ListContainerStatsResponse, error) {
logrus.Debugf("ListContainerStatsRequest: %v", request)
r, err := client.ListContainerStats(context.TODO(), request.Filter)
logrus.Debugf("ListContainerResponse: %v", r)
if err != nil {
return nil, err
}
sort.Sort(containerStatsByID(r))
return &pb.ListContainerStatsResponse{Stats: r}, nil
}
67 changes: 20 additions & 47 deletions cmd/crictl/pod_stats.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,12 @@ func (c podStatsByID) Less(i, j int) bool {
return c[i].Attributes.Id < c[j].Attributes.Id
}

type podStatsDisplayer struct {
filter *pb.PodSandboxStatsFilter
opts podStatsOptions
*display
}

func podStats(
c context.Context,
client cri.RuntimeService,
Expand All @@ -133,58 +139,25 @@ func podStats(
filter.LabelSelector = opts.labels
}

display := newTableDisplay(20, 1, 3, ' ', 0)
if opts.watch {
displayErrCh := make(chan error, 1)
ticker := time.NewTicker(500 * time.Millisecond)
defer ticker.Stop()

watchCtx, cancelFn := context.WithCancel(context.Background())
defer cancelFn()

// Put the displayPodStats in another goroutine, because it might be
// time consuming with lots of pods and we want to cancel it
// ASAP when user hit CtrlC
go func() {
for range ticker.C {
if err := displayPodStats(watchCtx, client, filter, display, opts); err != nil {
displayErrCh <- err
break
}
}
}()

// listen for CtrlC or error
select {
case <-SetupInterruptSignalHandler():
cancelFn()
return nil
case err := <-displayErrCh:
return err
}
}

if err := displayPodStats(c, client, filter, display, opts); err != nil {
return fmt.Errorf("display pod stats: %w", err)
d := podStatsDisplayer{
filter: filter,
opts: opts,
display: newTableDisplay(20, 1, 3, ' ', 0),
}

return nil
return handleDisplay(c, client, opts.watch, d.displayPodStats)
}

func displayPodStats(
func (d *podStatsDisplayer) displayPodStats(
c context.Context,
client cri.RuntimeService,
filter *pb.PodSandboxStatsFilter,
display *display,
opts podStatsOptions,
) error {
stats, err := getPodSandboxStats(client, filter)
stats, err := getPodSandboxStats(client, d.filter)
if err != nil {
return err
}

response := &pb.ListPodSandboxStatsResponse{Stats: stats}
switch opts.output {
switch d.opts.output {
case "json":
return outputProtobufObjAsJSON(response)
case "yaml":
Expand All @@ -199,14 +172,14 @@ func displayPodStats(
oldStats[s.Attributes.Id] = s
}

time.Sleep(opts.sample)
time.Sleep(d.opts.sample)

stats, err = getPodSandboxStats(client, filter)
stats, err = getPodSandboxStats(client, d.filter)
if err != nil {
return err
}

display.AddRow([]string{columnPodName, columnPodID, columnCPU, columnMemory})
d.display.AddRow([]string{columnPodName, columnPodID, columnCPU, columnMemory})
for _, s := range stats {
if c.Err() != nil {
return c.Err()
Expand Down Expand Up @@ -265,16 +238,16 @@ func displayPodStats(
}
cpuPerc = float64(cpu-oldCpu) / float64(duration) * 100
}
display.AddRow([]string{
d.display.AddRow([]string{
s.Attributes.GetMetadata().GetName(),
id,
fmt.Sprintf("%.2f", cpuPerc),
units.HumanSize(float64(mem)),
})

}
display.ClearScreen()
display.Flush()
d.display.ClearScreen()
d.display.Flush()

return nil
}
Expand Down
42 changes: 42 additions & 0 deletions cmd/crictl/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package main

import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
Expand All @@ -28,10 +29,12 @@ import (
"sort"
"strings"
"sync"
"time"

"github.com/golang/protobuf/jsonpb"
"github.com/golang/protobuf/proto"
utilyaml "k8s.io/apimachinery/pkg/util/yaml"
cri "k8s.io/cri-api/pkg/apis"
internalapi "k8s.io/cri-api/pkg/apis"
pb "k8s.io/cri-api/pkg/apis/runtime/v1"
"sigs.k8s.io/yaml"
Expand Down Expand Up @@ -355,3 +358,42 @@ func getRepoImage(imageClient internalapi.ImageManagerService, image string) (st
}
return image, nil
}

func handleDisplay(
ctx context.Context,
client cri.RuntimeService,
watch bool,
displayFunc func(context.Context, cri.RuntimeService) error,
) error {
if !watch {
return displayFunc(ctx, client)
}

displayErrCh := make(chan error, 1)
ticker := time.NewTicker(500 * time.Millisecond)
defer ticker.Stop()

watchCtx, cancelFn := context.WithCancel(ctx)
defer cancelFn()

// Put the displayPodMetrics in another goroutine, because it might be
// time consuming with lots of pods and we want to cancel it
// ASAP when user hit CtrlC
go func() {
for range ticker.C {
if err := displayFunc(watchCtx, client); err != nil {
displayErrCh <- err
break
}
}
}()

// listen for CtrlC or error
select {
case <-SetupInterruptSignalHandler():
cancelFn()
return nil
case err := <-displayErrCh:
return err
}
}

0 comments on commit 94d44a2

Please sign in to comment.