Skip to content
This repository has been archived by the owner on Oct 30, 2024. It is now read-only.

Commit

Permalink
chore: go mod tidy, gofmt, staticcheck
Browse files Browse the repository at this point in the history
  • Loading branch information
galargh committed Aug 23, 2023
1 parent d58a4dd commit 84f2c7f
Show file tree
Hide file tree
Showing 10 changed files with 91 additions and 113 deletions.
3 changes: 3 additions & 0 deletions cmd/stargate/fetch.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,9 @@ var fetchCmd = &cli.Command{
return err
}
ro, err := blockstore.OpenReadOnly(name)
if err != nil {
return err
}

ls := cidlink.DefaultLinkSystem()
ls.TrustedStorage = true
Expand Down
12 changes: 6 additions & 6 deletions cmd/stargate/import.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,11 +62,11 @@ var importCmd = &cli.Command{
}
newLocale := filepath.Join(carPath(repoDir), root.String()+".car")
if fileExists(newLocale) {
return errors.New("File or directory already imported")
return errors.New("file or directory already imported")
}
err = os.Rename(carFileName, newLocale)
if err != nil {
return fmt.Errorf("Renaming file: %w", err)
return fmt.Errorf("renaming file: %w", err)
}
carFileName = newLocale

Expand Down Expand Up @@ -128,23 +128,23 @@ func indexImport(ctx context.Context, carFileName string, db *sql.SQLUnixFSStore

bs, err := stores.ReadOnlyFilestore(carFileName)
if err != nil {
return fmt.Errorf("Reopening file store")
return fmt.Errorf("reopening file store")
}
allKeys, err := bs.AllKeysChan(ctx)
if err != nil {
return fmt.Errorf("Fetching all block keys")
return fmt.Errorf("fetching all block keys")
}
lsys := storeutil.LinkSystemForBlockstore(bs)

roots, err := traversal.DiscoverRoots(ctx, allKeys, &lsys)
if err != nil {
return fmt.Errorf("Discovering roots: %w", err)
return fmt.Errorf("discovering roots: %w", err)
}

for _, root := range roots {
err := db.AddRoot(ctx, root, []byte(carFileName), &lsys)
if err != nil {
return fmt.Errorf("Adding root to index: %w", err)
return fmt.Errorf("adding root to index: %w", err)
}
}
return bs.Close()
Expand Down
5 changes: 2 additions & 3 deletions internal/stores/filestore_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ package stores
import (
"context"
"io"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
Expand Down Expand Up @@ -112,7 +111,7 @@ func dagToNormalFile(t *testing.T, ctx context.Context, root cid.Cid, bs bstore.
return nil, err
}

finalBytes, err := ioutil.ReadAll(outputF)
finalBytes, err := io.ReadAll(outputF)
if err != nil {
return nil, err
}
Expand All @@ -136,7 +135,7 @@ func createFile(t *testing.T, rseed int64, size int64) (path string, contents []

_, err = file.Seek(0, io.SeekStart)
require.NoError(t, err)
bz, err := ioutil.ReadAll(file)
bz, err := io.ReadAll(file)
require.NoError(t, err)
require.NoError(t, file.Close())

Expand Down
5 changes: 2 additions & 3 deletions internal/stores/kvcarbs.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"sync"

Expand Down Expand Up @@ -331,10 +330,10 @@ func (drsb *discardingReadSeekerPlusByte) Seek(offset int64, whence int) (int64,
if n < 0 {
panic("unsupported rewind via whence: io.SeekStart")
}
_, err := io.CopyN(ioutil.Discard, drsb, n)
_, err := io.CopyN(io.Discard, drsb, n)
return drsb.offset, err
case io.SeekCurrent:
_, err := io.CopyN(ioutil.Discard, drsb, offset)
_, err := io.CopyN(io.Discard, drsb, offset)
return drsb.offset, err
default:
panic("unsupported whence: io.SeekEnd")
Expand Down
19 changes: 0 additions & 19 deletions pkg/blockwriter/blockwriter.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,13 +31,6 @@ func newBlock() *block {
return newItem
}

func freeList(blocks []*block) {
for _, ri := range blocks {
ri.data = nil
blockPool.Put(ri)
}
}

type BlockWriter struct {
head *block
tail *block
Expand Down Expand Up @@ -105,18 +98,6 @@ func (bq *BlockWriter) Write(p []byte) (n int, err error) {
}
}

func (bq *BlockWriter) empty() bool {
return bq.head == nil
}

func (bq *BlockWriter) first() []byte {
if bq.head == nil {
return nil
}

return bq.head.data
}

func (bq *BlockWriter) writeLoop() {
bq.lk.Lock()
defer bq.lk.Unlock()
Expand Down
17 changes: 7 additions & 10 deletions pkg/carwriter.go/carwriter.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ import (
"io"

"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2"
stargate "github.com/ipfs/stargate/pkg"
"github.com/ipld/go-car"
"github.com/ipld/go-car/util"
Expand All @@ -18,8 +17,6 @@ import (
"github.com/multiformats/go-multihash"
)

var log = logging.Logger("stargate-carwriter")

// WriteCar traverses a StarGate query using a resolver to write StarGate CAR response to the given writer
func WriteCar(ctx context.Context, w io.Writer, root cid.Cid, paths stargate.PathSegments, query stargate.Query, appResolver stargate.AppResolver) error {
// write CAR header
Expand All @@ -29,44 +26,44 @@ func WriteCar(ctx context.Context, w io.Writer, root cid.Cid, paths stargate.Pat
}
err := car.WriteHeader(&header, w)
if err != nil {
return fmt.Errorf("Writing car header: %w", err)
return fmt.Errorf("writing car header: %w", err)
}
// resolve root
lsys, resolver, err := appResolver.GetResolver(ctx, root)
if err != nil {
return fmt.Errorf("Error loading root resolver: %w", err)
return fmt.Errorf("error loading root resolver: %w", err)
}
// resolve all path segments
for len(paths) != 0 {
var path *stargate.Path
path, paths, resolver, err = resolver.ResolvePathSegments(ctx, paths)
if err != nil {
return fmt.Errorf("Resolving path segments: %w", err)
return fmt.Errorf("resolving path segments: %w", err)
}
err = writeStarGateMessageAndBlocks(ctx, w, stargate.StarGateMessage{
Kind: stargate.KindPath,
Path: path,
}, lsys)
if err != nil {
return fmt.Errorf("Encoding stargate message and blocks: %w", err)
return fmt.Errorf("encoding stargate message and blocks: %w", err)
}
}
// resolve query
queryResolver, err := resolver.ResolveQuery(ctx, query)
if err != nil {
return fmt.Errorf("Resolving Query: %w", err)
return fmt.Errorf("resolving Query: %w", err)
}
for !queryResolver.Done() {
dag, err := queryResolver.Next()
if err != nil {
return fmt.Errorf("Resolving Query Step: %w", err)
return fmt.Errorf("resolving Query Step: %w", err)
}
err = writeStarGateMessageAndBlocks(ctx, w, stargate.StarGateMessage{
Kind: stargate.KindDAG,
DAG: dag,
}, lsys)
if err != nil {
return fmt.Errorf("Encoding stargate message and blocks: %w", err)
return fmt.Errorf("encoding stargate message and blocks: %w", err)
}
}
return nil
Expand Down
2 changes: 1 addition & 1 deletion pkg/errors.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,5 +31,5 @@ func (e ErrPathError) Error() string {
type ErrNoMoreMessages struct{}

func (e ErrNoMoreMessages) Error() string {
return fmt.Sprintf("query resolution already complete")
return "query resolution already complete"
}
4 changes: 2 additions & 2 deletions pkg/unixfsstore/sql/dirlinks_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ package sql_test
import (
"context"
"database/sql"
"io/ioutil"
"os"
"testing"

"github.com/ipfs/go-cid"
Expand Down Expand Up @@ -129,7 +129,7 @@ func TestDirLinksDB(t *testing.T) {
}

func CreateTestTmpDB(t *testing.T) *sql.DB {
f, err := ioutil.TempFile(t.TempDir(), "*.db")
f, err := os.CreateTemp(t.TempDir(), "*.db")
require.NoError(t, err)
require.NoError(t, f.Close())
d, err := ufssql.SqlDB(f.Name())
Expand Down
130 changes: 64 additions & 66 deletions pkg/unixfsstore/traversal/roots.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,82 +19,80 @@ func DiscoverRoots(ctx context.Context, incoming <-chan cid.Cid, ls *linking.Lin
roots := make(map[cid.Cid]struct{})
nonRoots := make(map[cid.Cid]struct{})
for {
select {
case next, ok := <-incoming:
if !ok {
rootsResult := make([]cid.Cid, 0, len(roots))
for root := range roots {
rootsResult = append(rootsResult, root)
}
return rootsResult, nil
next, ok := <-incoming
if !ok {
rootsResult := make([]cid.Cid, 0, len(roots))
for root := range roots {
rootsResult = append(rootsResult, root)
}
return rootsResult, nil
}

// we only care about protobuf nodes
var nonRootChildren []cid.Cid
switch multicodec.Code(next.Type()) {
case multicodec.DagPb:
nd, err := ls.Load(ipld.LinkContext{Ctx: ctx}, cidlink.Link{next}, dagpb.Type.PBNode)
if err != nil {
return nil, fmt.Errorf("malformed blockstore cid %s: %w", next.String(), err)
}
pbnd, ok := nd.(dagpb.PBNode)
if !ok {
return nil, fmt.Errorf("malformed blockstore cid %s: %w", next.String(), hamt.ErrNotProtobuf)
}
// if no data field, ignore
if !pbnd.FieldData().Exists() {
continue
}
// if not UnixFS data, ignore
ufsdata, err := data.DecodeUnixFSData(pbnd.FieldData().Must().Bytes())
if err != nil {
continue
// we only care about protobuf nodes
var nonRootChildren []cid.Cid
switch multicodec.Code(next.Type()) {
case multicodec.DagPb:
nd, err := ls.Load(ipld.LinkContext{Ctx: ctx}, cidlink.Link{next}, dagpb.Type.PBNode)

Check failure on line 35 in pkg/unixfsstore/traversal/roots.go

View workflow job for this annotation

GitHub Actions / go-check / All

github.com/ipld/go-ipld-prime/linking/cid.Link composite literal uses unkeyed fields
if err != nil {
return nil, fmt.Errorf("malformed blockstore cid %s: %w", next.String(), err)
}
pbnd, ok := nd.(dagpb.PBNode)
if !ok {
return nil, fmt.Errorf("malformed blockstore cid %s: %w", next.String(), hamt.ErrNotProtobuf)
}
// if no data field, ignore
if !pbnd.FieldData().Exists() {
continue
}
// if not UnixFS data, ignore
ufsdata, err := data.DecodeUnixFSData(pbnd.FieldData().Must().Bytes())
if err != nil {
continue
}
// ok, it's a unixfsnode, so we may want to add as root
// record relevant non-root children
switch ufsdata.DataType.Int() {
case data.Data_File:
// for a regular file, all children are now non-root children
iter := pbnd.Links.Iterator()
for !iter.Done() {
_, lnk := iter.Next()
nonRootChildren = append(nonRootChildren, lnk.Hash.Link().(cidlink.Link).Cid)
}
// ok, it's a unixfsnode, so we may want to add as root
// record relevant non-root children
switch ufsdata.DataType.Int() {
case data.Data_File:
// for a regular file, all children are now non-root children
iter := pbnd.Links.Iterator()
for !iter.Done() {
_, lnk := iter.Next()
nonRootChildren = append(nonRootChildren, lnk.Hash.Link().(cidlink.Link).Cid)
case data.Data_HAMTShard:
// for a hamt directory, all children that are not value nodes are non root children
iter := pbnd.Links.Iterator()
maxPadLen := maxPadLength(ufsdata)
for !iter.Done() {
_, lnk := iter.Next()
isValue, err := isValueLink(lnk, maxPadLen)
if err != nil {
return nil, err
}
case data.Data_HAMTShard:
// for a hamt directory, all children that are not value nodes are non root children
iter := pbnd.Links.Iterator()
maxPadLen := maxPadLength(ufsdata)
for !iter.Done() {
_, lnk := iter.Next()
isValue, err := isValueLink(lnk, maxPadLen)
if err != nil {
return nil, err
}
if !isValue {
nonRootChildren = append(nonRootChildren, lnk.Hash.Link().(cidlink.Link).Cid)
}
if !isValue {
nonRootChildren = append(nonRootChildren, lnk.Hash.Link().(cidlink.Link).Cid)
}
default:
// all other unixfs types do not have non-root children
}
case multicodec.Raw:
// raw may be a root, but it has no children
default:
// not raw or dabpb, ignore
continue
// all other unixfs types do not have non-root children
}
case multicodec.Raw:
// raw may be a root, but it has no children
default:
// not raw or dabpb, ignore
continue
}

for _, child := range nonRootChildren {
_, isRoot := roots[child]
if isRoot {
delete(roots, child)
}
nonRoots[child] = struct{}{}
}
_, isNonRoot := nonRoots[next]
if !isNonRoot {
roots[next] = struct{}{}
for _, child := range nonRootChildren {
_, isRoot := roots[child]
if isRoot {
delete(roots, child)
}
nonRoots[child] = struct{}{}
}
_, isNonRoot := nonRoots[next]
if !isNonRoot {
roots[next] = struct{}{}
}
}
}
7 changes: 4 additions & 3 deletions pkg/unixfsstore/traversal/traversal.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,9 +104,7 @@ func iterateHAMTDirLinks(ctx context.Context, root cid.Cid, substrate dagpb.PBNo
name := st.transformNameNode(next.FieldName().Must())
onPathCids := make([]cid.Cid, 0, len(cidsSoFar)+1)
// copy array before handing off, so further modifications do not affect result
for _, c := range cidsSoFar {
onPathCids = append(onPathCids, c)
}
onPathCids = append(onPathCids, cidsSoFar...)
onPathCids = append(onPathCids, next.FieldHash().Link().(cidlink.Link).Cid)
if err := visitor.OnPath(ctx, root, name.String(), onPathCids); err != nil {
return err
Expand All @@ -115,6 +113,9 @@ func iterateHAMTDirLinks(ctx context.Context, root cid.Cid, substrate dagpb.PBNo
continue
}
nd, err := lsys.Load(ipld.LinkContext{Ctx: ctx}, next.FieldHash().Link(), dagpb.Type.PBNode)
if err != nil {
return err
}

pbnd, ok := nd.(dagpb.PBNode)
if !ok {
Expand Down

0 comments on commit 84f2c7f

Please sign in to comment.