Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Sparkline #110

Merged
merged 12 commits into from
Dec 18, 2024
Merged
1 change: 1 addition & 0 deletions cmd/commands.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ var commands []*cli.Command = []*cli.Command{
filterCommand(),
histogramCommand(),
heatmapCommand(),
sparkCommand(),
bargraphCommand(),
analyzeCommand(),
tabulateCommand(),
Expand Down
119 changes: 119 additions & 0 deletions cmd/spark.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
package cmd

import (
"fmt"
"rare/cmd/helpers"
"rare/pkg/aggregation"
"rare/pkg/color"
"rare/pkg/csv"
"rare/pkg/expressions"
"rare/pkg/multiterm"
"rare/pkg/multiterm/termrenderers"

"github.com/urfave/cli/v2"
)

func sparkFunction(c *cli.Context) error {
var (
delim = c.String("delim")
numRows = c.Int("num")
numCols = c.Int("cols")
noTruncate = c.Bool("notruncate")
scalerName = c.String(helpers.ScaleFlag.Name)
sortRows = c.String("sort-rows")
sortCols = c.String("sort-cols")
)

counter := aggregation.NewTable(delim)

batcher := helpers.BuildBatcherFromArguments(c)
ext := helpers.BuildExtractorFromArguments(c, batcher)
rowSorter := helpers.BuildSorterOrFail(sortRows)
colSorter := helpers.BuildSorterOrFail(sortCols)

vt := helpers.BuildVTermFromArguments(c)
writer := termrenderers.NewSpark(vt, numRows, numCols)
writer.Scaler = helpers.BuildScalerOrFail(scalerName)

helpers.RunAggregationLoop(ext, counter, func() {

// Trim unused data from the data store (keep memory tidy!)
if !noTruncate {
if keepCols := counter.OrderedColumns(colSorter); len(keepCols) > numCols {
keepCols = keepCols[len(keepCols)-numCols:]
keepLookup := make(map[string]struct{})
for _, item := range keepCols {
keepLookup[item] = struct{}{}
}
counter.Trim(func(col, row string, val int64) bool {
zix99 marked this conversation as resolved.
Show resolved Hide resolved
_, ok := keepLookup[col]
return !ok
})
}
}

// Write spark
writer.WriteTable(counter, rowSorter, colSorter)
writer.WriteFooter(0, helpers.FWriteExtractorSummary(ext, counter.ParseErrors(),
fmt.Sprintf("(R: %v; C: %v)", color.Wrapi(color.Yellow, counter.RowCount()), color.Wrapi(color.BrightBlue, counter.ColumnCount()))))
writer.WriteFooter(1, batcher.StatusString())
})

// Not deferred intentionally
writer.Close()

if err := helpers.TryWriteCSV(c, counter, csv.WriteTable); err != nil {
return err
}

return helpers.DetermineErrorState(batcher, ext, counter)
}

func sparkCommand() *cli.Command {
return helpers.AdaptCommandForExtractor(cli.Command{
Name: "spark",
Aliases: []string{"sparkline", "s"},
Usage: "Create rows of sparkline graphs",
Description: `Create rows of a sparkkline graph, all scaled equally
based on a table like input`,
Category: cmdCatVisualize,
Action: sparkFunction,
Flags: []cli.Flag{
&cli.StringFlag{
Name: "delim",
Usage: "Character to tabulate on. Use {$} helper by default",
Value: expressions.ArraySeparatorString,
},
&cli.IntFlag{
Name: "num",
Aliases: []string{"rows", "n"},
Usage: "Number of elements (rows) to display",
Value: 20,
},
&cli.IntFlag{
Name: "cols",
Usage: "Number of columns to display",
Value: multiterm.TermCols() - 15,
},
&cli.BoolFlag{
Name: "notruncate",
Usage: "Disable truncating data that doesnt fit in the sparkline",
Value: false,
},
&cli.StringFlag{
Name: "sort-rows",
Usage: helpers.DefaultSortFlag.Usage,
Value: "value",
},
&cli.StringFlag{
Name: "sort-cols",
Usage: helpers.DefaultSortFlag.Usage,
Value: "numeric",
},
helpers.SnapshotFlag,
helpers.NoOutFlag,
helpers.CSVFlag,
helpers.ScaleFlag,
},
})
}
22 changes: 22 additions & 0 deletions cmd/spark_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
package cmd

import (
"testing"

"github.com/stretchr/testify/assert"
)

func TestSparkline(t *testing.T) {
testCommandSet(t, sparkCommand(),
`-m "(.+) (\d+)" -e "{$ {1} {2}}" testdata/graph.txt`,
`-o - -m "(.+) (\d+)" -e "{$ {1} {2}}" testdata/graph.txt`,
)
}

func TestSparklineWithTrim(t *testing.T) {
out, eout, err := testCommandCapture(sparkCommand(), `--snapshot -m "(.+) (.+)" -e {1} -e {2} --cols 2 testdata/heat.txt`)

assert.NoError(t, err)
assert.Empty(t, eout)
assert.Contains(t, out, " First bc Last \ny 1 _█ 2 \nx 1 __ 1 \nMatched: 10 / 10 (R: 2; C: 2)")
}
29 changes: 29 additions & 0 deletions pkg/aggregation/table.go
Original file line number Diff line number Diff line change
Expand Up @@ -155,13 +155,42 @@ func (s *TableAggregator) ColTotal(k string) int64 {
return s.cols[k]
}

// Sum all data
func (s *TableAggregator) Sum() (ret int64) {
for _, v := range s.cols {
ret += v
}
return
}

// Trim data. Returns number of fields trimmed
func (s *TableAggregator) Trim(predicate func(col, row string, val int64) bool) int {
trimmed := 0

for colName := range s.cols {

removeAllInCol := true
for rowName, row := range s.rows {
if predicate(colName, rowName, row.cols[colName]) {
delete(row.cols, colName)
trimmed++
} else {
removeAllInCol = false
}

if len(row.cols) == 0 {
delete(s.rows, rowName)
}
}

if removeAllInCol {
delete(s.cols, colName)
}
}

return trimmed
}

func (s *TableRow) Name() string {
return s.name
}
Expand Down
25 changes: 25 additions & 0 deletions pkg/aggregation/table_test.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
package aggregation

import (
"fmt"
"rare/pkg/aggregation/sorting"
"strconv"
"testing"

"github.com/stretchr/testify/assert"
Expand Down Expand Up @@ -91,3 +93,26 @@ func TestSingleRowTable(t *testing.T) {
assert.Equal(t, int64(2), rows[0].Value("a"))
assert.Equal(t, int64(1), rows[0].Value("b"))
}

func TestTrimData(t *testing.T) {
table := NewTable(" ")
for i := 0; i < 10; i++ {
table.Sample(fmt.Sprintf("%d a", i))
table.Sample(fmt.Sprintf("%d b", i))
}

assert.Len(t, table.Columns(), 10)

trimmed := table.Trim(func(col, row string, val int64) bool {
if row == "b" {
return true
}
cVal, _ := strconv.Atoi(col)
return cVal < 5
})

assert.ElementsMatch(t, []string{"5", "6", "7", "8", "9"}, table.Columns())
assert.Equal(t, 15, trimmed)
assert.Len(t, table.Rows(), 1)
assert.Len(t, table.Rows()[0].cols, 5)
}
89 changes: 89 additions & 0 deletions pkg/multiterm/termrenderers/spark.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
package termrenderers

import (
"rare/pkg/aggregation"
"rare/pkg/aggregation/sorting"
"rare/pkg/color"
"rare/pkg/humanize"
"rare/pkg/multiterm"
"rare/pkg/multiterm/termscaler"
"rare/pkg/multiterm/termunicode"
"strings"
)

type Spark struct {
rowCount, colCount int
footerOffset int
Scaler termscaler.Scaler
table *TableWriter
}

func NewSpark(term multiterm.MultilineTerm, rows, cols int) *Spark {
return &Spark{
rowCount: rows,
colCount: cols,
Scaler: termscaler.ScalerLinear,
table: NewTable(term, 4, rows+1),
}
}

func (s *Spark) WriteTable(agg *aggregation.TableAggregator, rowSorter, colSorter sorting.NameValueSorter) {
minVal := agg.ComputeMin() // Optimization: ComputeMinMax()
maxVal := agg.ComputeMax()

colNames := agg.OrderedColumns(colSorter)
if len(colNames) > s.colCount {
colNames = colNames[len(colNames)-s.colCount:]
}

// reused buffer
var sb strings.Builder
sb.Grow(len(colNames))

// Write header
{
dots := len(colNames) - len(colNames[0]) - len(colNames[len(colNames)-1])
if dots < 0 {
dots = 0
}
sb.WriteString(colNames[0])
writeRepeat(&sb, '.', dots)
sb.WriteString(colNames[len(colNames)-1])

s.table.WriteRow(0, "", color.Wrap(color.Underline, "First"), sb.String(), color.Wrap(color.Underline, "Last"))
sb.Reset()
}

// Each row...
rows := agg.OrderedRows(rowSorter)
rowCount := mini(len(rows), s.rowCount)
for i := 0; i < rowCount; i++ {
row := rows[i]

for j := 0; j < len(colNames); j++ {
termunicode.SparkWrite(&sb, s.Scaler.Scale(row.Value(colNames[j]), minVal, maxVal))
}

vFirst := humanize.Hi(row.Value(colNames[0]))
vLast := humanize.Hi(row.Value(colNames[len(colNames)-1]))
s.table.WriteRow(i+1, color.Wrap(color.Yellow, row.Name()), color.Wrap(color.BrightBlack, vFirst), sb.String(), color.Wrap(color.BrightBlack, vLast))

sb.Reset()
}

// If more rows than can display, write how many were missed
if len(rows) > rowCount {
s.table.WriteFooter(0, color.Wrapf(color.BrightBlack, "(%d more)", len(rows)-rowCount))
s.footerOffset = 1
} else {
s.footerOffset = 0
}
}

func (s *Spark) Close() {
s.table.Close()
}

func (s *Spark) WriteFooter(idx int, line string) {
s.table.WriteFooter(s.footerOffset+idx, line)
}
56 changes: 56 additions & 0 deletions pkg/multiterm/termrenderers/spark_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
package termrenderers

import (
"rare/pkg/aggregation"
"rare/pkg/aggregation/sorting"
"rare/pkg/multiterm"
"testing"

"github.com/stretchr/testify/assert"
)

func TestSimpleSpark(t *testing.T) {
vt := multiterm.NewVirtualTerm()
s := NewSpark(vt, 2, 2)

agg := aggregation.NewTable(" ")
agg.Sample("a 1")
agg.Sample("a 2")

s.WriteTable(agg, sorting.NVNameSorter, sorting.NVNameSorter)
s.WriteFooter(0, "hello")

assert.Equal(t, " First aa Last ", vt.Get(0))
assert.Equal(t, "1 1 _ 1 ", vt.Get(1))
assert.Equal(t, "2 1 _ 1 ", vt.Get(2))
assert.Equal(t, "hello", vt.Get(3))
assert.Equal(t, "", vt.Get(4))

s.Close()
assert.True(t, vt.IsClosed())
}

func TestOverflowSpark(t *testing.T) {
vt := multiterm.NewVirtualTerm()
s := NewSpark(vt, 2, 2)

agg := aggregation.NewTable(" ")
agg.Sample("1 a")
agg.Sample("2 a")
agg.Sample("2 b")
agg.Sample("2 b")
agg.Sample("1 c")

s.WriteTable(agg, sorting.NVNameSorter, sorting.NVNameSorter)
s.WriteFooter(0, "hello")

assert.Equal(t, " First 12 Last ", vt.Get(0))
assert.Equal(t, "a 1 ▄▄ 1 ", vt.Get(1))
assert.Equal(t, "b 0 _█ 2 ", vt.Get(2))
assert.Equal(t, "(1 more)", vt.Get(3))
assert.Equal(t, "hello", vt.Get(4))
assert.Equal(t, "", vt.Get(5))

s.Close()
assert.True(t, vt.IsClosed())
}
Loading
Loading