From 29c2de28efe2121286e75bc804594cec5e0af5c0 Mon Sep 17 00:00:00 2001 From: emidev98 Date: Thu, 7 Dec 2023 18:54:57 +0200 Subject: [PATCH 01/14] init: fast query DB --- app/app.go | 44 + app/fast_query/db/driver/batch.go | 84 ++ app/fast_query/db/driver/db.go | 155 +++ app/fast_query/db/driver/iterator.go | 92 ++ app/fast_query/db/driver/rollbackable.go | 55 + app/fast_query/db/driver/safe_batch.go | 110 ++ .../db/driver/safe_batch_nullify.go | 37 + app/fast_query/db/driver/types.go | 51 + app/fast_query/db/height_driver/height_db.go | 228 ++++ .../db/height_driver/height_db_iterator.go | 46 + app/fast_query/db/height_driver/types.go | 71 ++ app/fast_query/db/height_driver/utils.go | 51 + app/fast_query/db/snappy/snappy_batch.go | 38 + app/fast_query/db/snappy/snappy_db.go | 115 ++ app/fast_query/db/snappy/snappy_db_test.go | 93 ++ app/fast_query/db/types.go | 78 ++ app/fast_query/db/utils/bytes.go | 25 + app/fast_query/fast_query_service.go | 49 + app/fast_query/store/dbadapter.go | 48 + app/fast_query/store/proof.go | 27 + app/fast_query/store/store.go | 1126 +++++++++++++++++ app/fast_query/streaming_service.go | 82 ++ .../src/setup/init-test-framework.sh | 2 + 23 files changed, 2707 insertions(+) create mode 100644 app/fast_query/db/driver/batch.go create mode 100644 app/fast_query/db/driver/db.go create mode 100644 app/fast_query/db/driver/iterator.go create mode 100644 app/fast_query/db/driver/rollbackable.go create mode 100644 app/fast_query/db/driver/safe_batch.go create mode 100644 app/fast_query/db/driver/safe_batch_nullify.go create mode 100644 app/fast_query/db/driver/types.go create mode 100644 app/fast_query/db/height_driver/height_db.go create mode 100644 app/fast_query/db/height_driver/height_db_iterator.go create mode 100644 app/fast_query/db/height_driver/types.go create mode 100644 app/fast_query/db/height_driver/utils.go create mode 100644 app/fast_query/db/snappy/snappy_batch.go create mode 100644 app/fast_query/db/snappy/snappy_db.go create mode 100644 app/fast_query/db/snappy/snappy_db_test.go create mode 100644 app/fast_query/db/types.go create mode 100644 app/fast_query/db/utils/bytes.go create mode 100644 app/fast_query/fast_query_service.go create mode 100644 app/fast_query/store/dbadapter.go create mode 100644 app/fast_query/store/proof.go create mode 100644 app/fast_query/store/store.go create mode 100644 app/fast_query/streaming_service.go diff --git a/app/app.go b/app/app.go index 863c7836..674b7637 100644 --- a/app/app.go +++ b/app/app.go @@ -7,12 +7,15 @@ import ( "os" "path/filepath" "reflect" // #nosec G702 + "slices" "github.com/prometheus/client_golang/prometheus" + "github.com/cosmos/cosmos-sdk/store/streaming" authsims "github.com/cosmos/cosmos-sdk/x/auth/simulation" wasmkeeper "github.com/CosmWasm/wasmd/x/wasm/keeper" + fastquery "github.com/terra-money/core/v2/app/fast_query" "github.com/terra-money/core/v2/app/keepers" "github.com/terra-money/core/v2/app/post" "github.com/terra-money/core/v2/app/rpc" @@ -247,11 +250,24 @@ func NewTerraApp( app.mm.RegisterInvariants(&app.Keepers.CrisisKeeper) app.mm.RegisterServices(app.configurator) + // load state streaming if enabled + if _, _, err := streaming.LoadStreamingServices(app.BaseApp, appOpts, app.appCodec, app.Logger(), app.keys); err != nil { + panic(err) + } + // initialize stores app.MountKVStores(app.keys) app.MountTransientStores(app.tkeys) app.MountMemoryStores(app.memKeys) + // when fastquery streamer is enabled in the config, + // setup the fastquery feature and serve the data + // from the fastquery. + streamers := cast.ToStringSlice(appOpts.Get("store.streamers")) + if slices.Contains(streamers, "fastquery") { + app.SetupFastQueryDB(appOpts, homePath) + } + // register upgrade app.RegisterUpgradeHandlers() app.RegisterUpgradeStores() @@ -618,3 +634,31 @@ func (app *TerraApp) GetWasmOpts(appOpts servertypes.AppOptions) []wasmkeeper.Op return wasmOpts } + +func (app *TerraApp) SetupFastQueryDB(appOpts servertypes.AppOptions, homePath string) error { + // Create the path for fastquerydb + dir := filepath.Join(homePath, "data") + + // Create a copy of the store keys to avoid mutating the app.keys + storeKeys := make([]storetypes.StoreKey, 0, len(app.keys)) + for _, storeKey := range app.keys { + storeKeys = append(storeKeys, storeKey) + } + + // Create fast query serice + fastQueryService, err := fastquery.NewFastQueryService(dir, app.Logger()) + if err != nil { + return err + } + + // Create the streaming service + streamingservice := fastquery.NewStreamingService(fastQueryService, storeKeys) + + // Assign the streaming service to the app and + // the query multi store so the users query the + // data in a faster way. + app.SetStreamingService(streamingservice) + app.SetQueryMultiStore(fastQueryService.Store) + + return nil +} diff --git a/app/fast_query/db/driver/batch.go b/app/fast_query/db/driver/batch.go new file mode 100644 index 00000000..e422f57c --- /dev/null +++ b/app/fast_query/db/driver/batch.go @@ -0,0 +1,84 @@ +package driver + +import ( + "fmt" + + tmdb "github.com/cometbft/cometbft-db" + "github.com/terra-money/core/v2/app/fast_query/db/height_driver" +) + +var _ height_driver.HeightEnabledBatch = (*DriverBatch)(nil) +var _ HasRollbackBatch = (*DriverBatch)(nil) + +type DriverBatch struct { + height int64 + batch *RollbackableBatch + mode int +} + +func (b *DriverBatch) keyBytesWithHeight(key []byte) []byte { + return append(prefixDataWithHeightKey(key), serializeHeight(b.mode, b.height)...) +} + +func NewLevelDBBatch(atHeight int64, dbDriver *DBDriver) *DriverBatch { + return &DriverBatch{ + height: atHeight, + batch: NewRollbackableBatch(dbDriver.session), + mode: dbDriver.mode, + } +} + +func (b *DriverBatch) Set(key, value []byte) error { + newKey := b.keyBytesWithHeight(key) + + // make fixed size byte slice for performance + buf := make([]byte, 0, len(value)+1) + buf = append(buf, byte(0)) // 0 => not deleted + buf = append(buf, value...) + + if err := b.batch.Set(prefixCurrentDataKey(key), buf[1:]); err != nil { + return err + } + if err := b.batch.Set(prefixKeysForIteratorKey(key), []byte{}); err != nil { + return err + } + return b.batch.Set(newKey, buf) +} + +func (b *DriverBatch) Delete(key []byte) error { + newKey := b.keyBytesWithHeight(key) + + buf := []byte{1} + + if err := b.batch.Delete(prefixCurrentDataKey(key)); err != nil { + return err + } + if err := b.batch.Set(prefixKeysForIteratorKey(key), buf); err != nil { + return err + } + return b.batch.Set(newKey, buf) +} + +func (b *DriverBatch) Write() error { + return b.batch.Write() +} + +func (b *DriverBatch) WriteSync() error { + return b.batch.WriteSync() +} + +func (b *DriverBatch) Close() error { + return b.batch.Close() +} + +func (b *DriverBatch) RollbackBatch() tmdb.Batch { + b.Metric() + return b.batch.RollbackBatch +} + +func (b *DriverBatch) Metric() { + fmt.Printf("[rollback-batch] rollback batch for height %d's record length %d\n", + b.height, + b.batch.RecordCount, + ) +} diff --git a/app/fast_query/db/driver/db.go b/app/fast_query/db/driver/db.go new file mode 100644 index 00000000..8bead232 --- /dev/null +++ b/app/fast_query/db/driver/db.go @@ -0,0 +1,155 @@ +package driver + +import ( + "fmt" + "math" + + dbm "github.com/cometbft/cometbft-db" + "github.com/terra-money/core/v2/app/fast_query/db/height_driver" + "github.com/terra-money/core/v2/app/fast_query/db/utils" +) + +type DBDriver struct { + session *dbm.GoLevelDB + mode int +} + +func NewDBDriver(dir string) (*DBDriver, error) { + ldb, err := dbm.NewGoLevelDB(DBName, dir) + if err != nil { + return nil, err + } + + return &DBDriver{ + session: ldb, + mode: DriverModeKeySuffixDesc, + }, nil +} + +func (dbDriver *DBDriver) newInnerIterator(requestHeight int64, pdb *dbm.PrefixDB) (dbm.Iterator, error) { + if dbDriver.mode == DriverModeKeySuffixAsc { + heightEnd := utils.UintToBigEndian(uint64(requestHeight + 1)) + return pdb.ReverseIterator(nil, heightEnd) + } else { + heightStart := utils.UintToBigEndian(math.MaxUint64 - uint64(requestHeight)) + return pdb.Iterator(heightStart, nil) + } +} + +func (dbDriver *DBDriver) Get(maxHeight int64, key []byte) ([]byte, error) { + if maxHeight == 0 { + return dbDriver.session.Get(prefixCurrentDataKey(key)) + } + var requestHeight = height_driver.Height(maxHeight).CurrentOrLatest().ToInt64() + var requestHeightMin = height_driver.Height(0).CurrentOrNever().ToInt64() + + // check if requestHeightMin is + if requestHeightMin > requestHeight { + return nil, fmt.Errorf("invalid height") + } + + pdb := dbm.NewPrefixDB(dbDriver.session, prefixDataWithHeightKey(key)) + + iter, _ := dbDriver.newInnerIterator(requestHeight, pdb) + defer iter.Close() + + // in tm-db@v0.6.4, key not found is NOT an error + if !iter.Valid() { + return nil, nil + } + + value := iter.Value() + deleted := value[0] + if deleted == 1 { + return nil, nil + } else { + if len(value) > 1 { + return value[1:], nil + } + return []byte{}, nil + } +} + +func (dbDriver *DBDriver) Has(maxHeight int64, key []byte) (bool, error) { + if maxHeight == 0 { + return dbDriver.session.Has(prefixCurrentDataKey(key)) + } + var requestHeight = height_driver.Height(maxHeight).CurrentOrLatest().ToInt64() + var requestHeightMin = height_driver.Height(0).CurrentOrNever().ToInt64() + + // check if requestHeightMin is + if requestHeightMin > requestHeight { + return false, fmt.Errorf("invalid height") + } + + pdb := dbm.NewPrefixDB(dbDriver.session, prefixDataWithHeightKey(key)) + + iter, _ := dbDriver.newInnerIterator(requestHeight, pdb) + defer iter.Close() + + // in tm-db@v0.6.4, key not found is NOT an error + if !iter.Valid() { + return false, nil + } + + deleted := iter.Value()[0] + + if deleted == 1 { + return false, nil + } else { + return true, nil + } +} + +func (dbDriver *DBDriver) Set(atHeight int64, key, value []byte) error { + // should never reach here, all should be batched in tiered+hld + panic("should never reach here") +} + +func (dbDriver *DBDriver) SetSync(atHeight int64, key, value []byte) error { + // should never reach here, all should be batched in tiered+hld + panic("should never reach here") +} + +func (dbDriver *DBDriver) Delete(atHeight int64, key []byte) error { + // should never reach here, all should be batched in tiered+hld + panic("should never reach here") +} + +func (dbDriver *DBDriver) DeleteSync(atHeight int64, key []byte) error { + return dbDriver.Delete(atHeight, key) +} + +func (dbDriver *DBDriver) Iterator(maxHeight int64, start, end []byte) (height_driver.HeightEnabledIterator, error) { + if maxHeight == 0 { + pdb := dbm.NewPrefixDB(dbDriver.session, cCurrentDataPrefix) + return pdb.Iterator(start, end) + } + return NewLevelDBIterator(dbDriver, maxHeight, start, end) +} + +func (dbDriver *DBDriver) ReverseIterator(maxHeight int64, start, end []byte) (height_driver.HeightEnabledIterator, error) { + if maxHeight == 0 { + pdb := dbm.NewPrefixDB(dbDriver.session, cCurrentDataPrefix) + return pdb.ReverseIterator(start, end) + } + return NewLevelDBReverseIterator(dbDriver, maxHeight, start, end) +} + +func (dbDriver *DBDriver) Close() error { + dbDriver.session.Close() + return nil +} + +func (dbDriver *DBDriver) NewBatch(atHeight int64) height_driver.HeightEnabledBatch { + return NewLevelDBBatch(atHeight, dbDriver) +} + +// TODO: Implement me +func (dbDriver *DBDriver) Print() error { + return nil +} + +func (dbDriver *DBDriver) Stats() map[string]string { + return nil +} diff --git a/app/fast_query/db/driver/iterator.go b/app/fast_query/db/driver/iterator.go new file mode 100644 index 00000000..8b1dafb0 --- /dev/null +++ b/app/fast_query/db/driver/iterator.go @@ -0,0 +1,92 @@ +package driver + +import ( + "bytes" + + tmdb "github.com/cometbft/cometbft-db" + "github.com/terra-money/core/v2/app/fast_query/db/height_driver" +) + +var _ height_driver.HeightEnabledIterator = (*Iterator)(nil) + +type Iterator struct { + dbDriver *DBDriver + tmdb.Iterator + + maxHeight int64 + start []byte + end []byte + + // caching last validated key and value + // since Valid and Value functions are expensive but called repeatedly + lastValidKey []byte + lastValidValue []byte +} + +func NewLevelDBIterator(dbDriver *DBDriver, maxHeight int64, start, end []byte) (*Iterator, error) { + pdb := tmdb.NewPrefixDB(dbDriver.session, cKeysForIteratorPrefix) + iter, err := pdb.Iterator(start, end) + if err != nil { + return nil, err + } + + return &Iterator{ + dbDriver: dbDriver, + Iterator: iter, + + maxHeight: maxHeight, + start: start, + end: end, + }, nil +} +func NewLevelDBReverseIterator(dbDriver *DBDriver, maxHeight int64, start, end []byte) (*Iterator, error) { + pdb := tmdb.NewPrefixDB(dbDriver.session, cKeysForIteratorPrefix) + iter, err := pdb.ReverseIterator(start, end) + if err != nil { + return nil, err + } + + return &Iterator{ + dbDriver: dbDriver, + Iterator: iter, + + maxHeight: maxHeight, + start: start, + end: end, + }, nil +} + +func (i *Iterator) Domain() (start []byte, end []byte) { + panic("implement me") +} + +func (i *Iterator) Valid() bool { + // filter out items with Deleted = true + // it should return somewhere during the loop + // otherwise iterator has reached the end without finding any record + // with Delete = false, return false in such case. + + for ; i.Iterator.Valid(); i.Iterator.Next() { + if bytes.Equal(i.lastValidKey, i.Key()) { + return true + } + if val, _ := i.dbDriver.Get(i.maxHeight, i.Key()); val != nil { + i.lastValidKey = i.Key() + i.lastValidValue = val + return true + } + } + return false + +} + +func (i *Iterator) Value() (value []byte) { + if bytes.Equal(i.lastValidKey, i.Key()) { + return i.lastValidValue + } + val, err := i.dbDriver.Get(i.maxHeight, i.Key()) + if err != nil { + panic(err) + } + return val +} diff --git a/app/fast_query/db/driver/rollbackable.go b/app/fast_query/db/driver/rollbackable.go new file mode 100644 index 00000000..a1b38da2 --- /dev/null +++ b/app/fast_query/db/driver/rollbackable.go @@ -0,0 +1,55 @@ +package driver + +import ( + mdb "github.com/cometbft/cometbft-db" +) + +type HasRollbackBatch interface { + RollbackBatch() mdb.Batch +} + +var _ mdb.Batch = (*RollbackableBatch)(nil) + +type RollbackableBatch struct { + mdb.Batch + + db mdb.DB + RollbackBatch mdb.Batch + RecordCount int +} + +func NewRollbackableBatch(db mdb.DB) *RollbackableBatch { + return &RollbackableBatch{ + db: db, + Batch: db.NewBatch(), + RollbackBatch: db.NewBatch(), + } +} + +// revert value for key to previous state +func (b *RollbackableBatch) backup(key []byte) error { + b.RecordCount++ + data, err := b.db.Get(key) + if err != nil { + return err + } + if data == nil { + return b.RollbackBatch.Delete(key) + } else { + return b.RollbackBatch.Set(key, data) + } +} + +func (b *RollbackableBatch) Set(key, value []byte) error { + if err := b.backup(key); err != nil { + return err + } + return b.Batch.Set(key, value) +} + +func (b *RollbackableBatch) Delete(key []byte) error { + if err := b.backup(key); err != nil { + return err + } + return b.Batch.Delete(key) +} diff --git a/app/fast_query/db/driver/safe_batch.go b/app/fast_query/db/driver/safe_batch.go new file mode 100644 index 00000000..4be908da --- /dev/null +++ b/app/fast_query/db/driver/safe_batch.go @@ -0,0 +1,110 @@ +package driver + +import ( + "fmt" + + mdb "github.com/cometbft/cometbft-db" +) + +var _ mdb.DB = (*SafeBatchDB)(nil) +var _ SafeBatchDBCloser = (*SafeBatchDB)(nil) + +type SafeBatchDBCloser interface { + mdb.DB + Open() + Flush() (mdb.Batch, error) +} + +type SafeBatchDB struct { + db mdb.DB + batch mdb.Batch +} + +// open batch +func (s *SafeBatchDB) Open() { + s.batch = s.db.NewBatch() +} + +// flush batch and return rollback batch if rollbackable +func (s *SafeBatchDB) Flush() (mdb.Batch, error) { + defer func() { + if s.batch != nil { + s.batch.Close() + } + s.batch = nil + }() + + if batch, ok := s.batch.(HasRollbackBatch); ok { + return batch.RollbackBatch(), s.batch.WriteSync() + } else { + return nil, s.batch.WriteSync() + } +} + +func NewSafeBatchDB(db mdb.DB) mdb.DB { + return &SafeBatchDB{ + db: db, + batch: nil, + } +} + +func (s *SafeBatchDB) Get(bytes []byte) ([]byte, error) { + return s.db.Get(bytes) +} + +func (s *SafeBatchDB) Has(key []byte) (bool, error) { + return s.db.Has(key) +} + +func (s *SafeBatchDB) Set(key, value []byte) error { + if s.batch != nil { + return s.batch.Set(key, value) + } else { + return s.db.Set(key, value) + } +} + +func (s *SafeBatchDB) SetSync(key, value []byte) error { + return s.Set(key, value) +} + +func (s *SafeBatchDB) Delete(key []byte) error { + if s.batch != nil { + return s.batch.Delete(key) + } else { + return s.db.Delete(key) + } +} + +func (s *SafeBatchDB) DeleteSync(key []byte) error { + return s.Delete(key) +} + +func (s *SafeBatchDB) Iterator(start, end []byte) (mdb.Iterator, error) { + return s.db.Iterator(start, end) +} + +func (s *SafeBatchDB) ReverseIterator(start, end []byte) (mdb.Iterator, error) { + return s.db.ReverseIterator(start, end) +} + +func (s *SafeBatchDB) Close() error { + return s.db.Close() +} + +func (s *SafeBatchDB) NewBatch() mdb.Batch { + if s.batch != nil { + return NewSafeBatchNullify(s.batch) + } else { + fmt.Println("=== warn! should never enter here") + return s.db.NewBatch() + } +} + +func (s *SafeBatchDB) Print() error { + return s.db.Print() +} + +func (s *SafeBatchDB) Stats() map[string]string { + return s.db.Stats() +} diff --git a/app/fast_query/db/driver/safe_batch_nullify.go b/app/fast_query/db/driver/safe_batch_nullify.go new file mode 100644 index 00000000..b17d41df --- /dev/null +++ b/app/fast_query/db/driver/safe_batch_nullify.go @@ -0,0 +1,37 @@ +package driver + +import mdb "github.com/cometbft/cometbft-db" + +var _ mdb.Batch = (*SafeBatchNullified)(nil) + +type SafeBatchNullified struct { + batch mdb.Batch +} + +func NewSafeBatchNullify(batch mdb.Batch) mdb.Batch { + return &SafeBatchNullified{ + batch: batch, + } +} + +func (s SafeBatchNullified) Set(key, value []byte) error { + return s.batch.Set(key, value) +} + +func (s SafeBatchNullified) Delete(key []byte) error { + return s.batch.Delete(key) +} + +func (s SafeBatchNullified) Write() error { + // noop + return nil +} + +func (s SafeBatchNullified) WriteSync() error { + return s.Write() +} + +func (s SafeBatchNullified) Close() error { + // noop + return nil +} diff --git a/app/fast_query/db/driver/types.go b/app/fast_query/db/driver/types.go new file mode 100644 index 00000000..35551165 --- /dev/null +++ b/app/fast_query/db/driver/types.go @@ -0,0 +1,51 @@ +package driver + +import ( + "encoding/binary" + "math" + + "github.com/terra-money/core/v2/app/fast_query/db/utils" +) + +const ( + DriverModeKeySuffixAsc = iota + DriverModeKeySuffixDesc = 1 + DBName = "fast_query" +) + +var ( + cCurrentDataPrefix = []byte{0} + cKeysForIteratorPrefix = []byte{1} + cDataWithHeightPrefix = []byte{2} +) + +func prefixCurrentDataKey(key []byte) []byte { + return append(cCurrentDataPrefix, key...) +} + +func prefixKeysForIteratorKey(key []byte) []byte { + return append(cKeysForIteratorPrefix, key...) +} + +func prefixDataWithHeightKey(key []byte) []byte { + result := make([]byte, 0, len(cDataWithHeightPrefix)+len(key)) + result = append(result, cDataWithHeightPrefix...) + result = append(result, key...) + return result +} + +func serializeHeight(mode int, height int64) []byte { + if mode == DriverModeKeySuffixAsc { + return utils.UintToBigEndian(uint64(height)) + } else { + return utils.UintToBigEndian(math.MaxUint64 - uint64(height)) + } +} + +func deserializeHeight(mode int, data []byte) int64 { + if mode == DriverModeKeySuffixAsc { + return int64(binary.BigEndian.Uint64(data)) + } else { + return int64(math.MaxUint64 - binary.BigEndian.Uint64(data)) + } +} diff --git a/app/fast_query/db/height_driver/height_db.go b/app/fast_query/db/height_driver/height_db.go new file mode 100644 index 00000000..a187e6c8 --- /dev/null +++ b/app/fast_query/db/height_driver/height_db.go @@ -0,0 +1,228 @@ +package height_driver + +import ( + "bytes" + "fmt" + "sync" + + "github.com/terra-money/core/v2/app/fast_query/db/utils" + + tmdb "github.com/cometbft/cometbft-db" +) + +const ( + LatestHeight = 0 + InvalidHeight = 0 + + debugKeyGet = iota + debugKeySet + debugKeyIterator + debugKeyReverseIterator + debugKeyGetResult +) + +var LatestHeightBuf = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} + +var _ HeigthDB = (*HeightDB)(nil) + +type HeightDB struct { + odb HeightEnabledDB + readMutex *sync.RWMutex + writeMutex *sync.RWMutex + writeHeight int64 + readHeight int64 + config *HeightDBConfig + + // writeBatch HeightEnabledBatch +} + +type HeightDBConfig struct { + Debug bool +} + +func NewHeightDB(db HeightEnabledDB, config *HeightDBConfig) *HeightDB { + return &HeightDB{ + writeHeight: 0, + readHeight: 0, + readMutex: new(sync.RWMutex), + writeMutex: new(sync.RWMutex), + odb: db, + config: config, + // writeBatch: nil, + } +} + +func (hd *HeightDB) BranchHeightDB(height int64) *HeightDB { + newOne := NewHeightDB(hd.odb, hd.config) + newOne.SetReadHeight(height) + return newOne +} + +// SetReadHeight sets a target read height in the db driver. +// It acts differently if the db mode is writer or reader: +// - Reader uses readHeight as the max height at which the retrieved key/value pair is limited to, +// allowing full block snapshot history +func (hd *HeightDB) SetReadHeight(height int64) { + hd.readHeight = height +} + +// ClearReadHeight sets internal readHeight to LatestHeight +func (hd *HeightDB) ClearReadHeight() int64 { + lastKnownReadHeight := hd.readHeight + hd.readHeight = LatestHeight + return lastKnownReadHeight +} + +// GetCurrentReadHeight gets the current readHeight +func (hd *HeightDB) GetCurrentReadHeight() int64 { + return hd.readHeight +} + +// SetWriteHeight sets a target write height in the db driver. +// - Writer uses writeHeight to append along with the key, so later when fetching with the driver +// you can find the latest known key/value pair before the writeHeight +func (hd *HeightDB) SetWriteHeight(height int64) { + if height != 0 { + hd.writeHeight = height + // hd.writeBatch = hd.NewBatch() + } +} + +// ClearWriteHeight sets the next target write Height +// NOTE: evaluate the actual usage of it +func (hd *HeightDB) ClearWriteHeight() int64 { + fmt.Println("!!! clearing write height...") + lastKnownWriteHeight := hd.writeHeight + hd.writeHeight = InvalidHeight + // if batchErr := hd.writeBatch.Write(); batchErr != nil { + // panic(batchErr) + // } + // hd.writeBatch = nil + return lastKnownWriteHeight +} + +// GetCurrentWriteHeight gets the current write height +func (hd *HeightDB) GetCurrentWriteHeight() int64 { + return hd.writeHeight +} + +// Get fetches the value of the given key, or nil if it does not exist. +// CONTRACT: key, value readonly []byte +func (hd *HeightDB) Get(key []byte) ([]byte, error) { + return hd.odb.Get(hd.GetCurrentReadHeight(), key) +} + +// Has checks if a key exists. +// CONTRACT: key, value readonly []byte +func (hd *HeightDB) Has(key []byte) (bool, error) { + return hd.odb.Has(hd.GetCurrentReadHeight(), key) +} + +// Set sets the value for the given key, replacing it if it already exists. +// CONTRACT: key, value readonly []byte +func (hd *HeightDB) Set(key []byte, value []byte) error { + return hd.odb.Set(hd.writeHeight, key, value) +} + +// SetSync sets the value for the given key, and flushes it to storage before returning. +func (hd *HeightDB) SetSync(key []byte, value []byte) error { + return hd.Set(key, value) +} + +// Delete deletes the key, or does nothing if the key does not exist. +// CONTRACT: key readonly []byte +// NOTE(mantlemint): delete should be marked? +func (hd *HeightDB) Delete(key []byte) error { + return hd.odb.Delete(hd.writeHeight, key) +} + +// DeleteSync deletes the key, and flushes the delete to storage before returning. +func (hd *HeightDB) DeleteSync(key []byte) error { + return hd.Delete(key) +} + +// Iterator returns an iterator over a domain of keys, in ascending order. The caller must call +// Close when done. End is exclusive, and start must be less than end. A nil start iterates +// from the first key, and a nil end iterates to the last key (inclusive). +// CONTRACT: No writes may happen within a domain while an iterator exists over it. +// CONTRACT: start, end readonly []byte +func (hd *HeightDB) Iterator(start, end []byte) (tmdb.Iterator, error) { + return hd.odb.Iterator(hd.GetCurrentReadHeight(), start, end) +} + +// ReverseIterator returns an iterator over a domain of keys, in descending order. The caller +// must call Close when done. End is exclusive, and start must be less than end. A nil end +// iterates from the last key (inclusive), and a nil start iterates to the first key (inclusive). +// CONTRACT: No writes may happen within a domain while an iterator exists over it. +// CONTRACT: start, end readonly []byte +func (hd *HeightDB) ReverseIterator(start, end []byte) (tmdb.Iterator, error) { + return hd.odb.ReverseIterator(hd.GetCurrentReadHeight(), start, end) +} + +// Close closes the database connection. +func (hd *HeightDB) Close() error { + return hd.odb.Close() +} + +// NewBatch creates a batch for atomic updates. The caller must call Batch.Close. +func (hd *HeightDB) NewBatch() tmdb.Batch { + // if hld.writeBatch != nil { + // // TODO: fix me + // return hld.writeBatch + // } else { + // fmt.Println("!!! opening hld.batch", hld.GetCurrentWriteHeight()) + // hld.writeBatch = hld.odb.NewBatch(hld.GetCurrentWriteHeight()) + // return hld.writeBatch + // } + // + return hd.odb.NewBatch(hd.GetCurrentWriteHeight()) +} + +// +// func (hd *HeightDB) FlushBatch() error { +// hd.writeBatch +// } + +// Print is used for debugging. +func (hd *HeightDB) Print() error { + return hd.odb.Print() +} + +// Stats returns a map of property values for all keys and the size of the cache. +func (hd *HeightDB) Stats() map[string]string { + return hd.odb.Stats() +} + +func (hd *HeightDB) Debug(debugType int, key []byte, value []byte) { + if !hd.config.Debug { + return + } + + keyFamily := key[:len(key)-9] + keyHeight := key[len(key)-8:] + + var debugPrefix string + switch debugType { + case debugKeyGet: + debugPrefix = "get" + case debugKeySet: + debugPrefix = "set" + case debugKeyIterator: + debugPrefix = "get/it" + case debugKeyReverseIterator: + debugPrefix = "get/rit" + + case debugKeyGetResult: + debugPrefix = "get/response" + } + + var actualKeyHeight int64 + if bytes.Compare(keyHeight, LatestHeightBuf) == 0 { + actualKeyHeight = -1 + } else { + actualKeyHeight = int64(utils.BigEndianToUint(keyHeight)) + } + + fmt.Printf("<%s @ %d> %s", debugPrefix, actualKeyHeight, keyFamily) + fmt.Printf("\n") +} diff --git a/app/fast_query/db/height_driver/height_db_iterator.go b/app/fast_query/db/height_driver/height_db_iterator.go new file mode 100644 index 00000000..e0dc36e7 --- /dev/null +++ b/app/fast_query/db/height_driver/height_db_iterator.go @@ -0,0 +1,46 @@ +package height_driver + +import tmdb "github.com/cometbft/cometbft-db" + +var _ tmdb.Iterator = (*HeightDBIterator)(nil) + +type HeightDBIterator struct { + oit tmdb.Iterator + atHeight int64 +} + +func NewHeightLimitedIterator(atHeight int64, oit tmdb.Iterator) tmdb.Iterator { + return &HeightDBIterator{ + oit: oit, + atHeight: atHeight, + } +} + +func (h *HeightDBIterator) Domain() (start []byte, end []byte) { + // TODO: fix me + return h.oit.Domain() +} + +func (h *HeightDBIterator) Valid() bool { + return h.oit.Valid() +} + +func (h *HeightDBIterator) Next() { + h.oit.Next() +} + +func (h *HeightDBIterator) Key() (key []byte) { + return h.oit.Key()[:len(key)-9] +} + +func (h *HeightDBIterator) Value() (value []byte) { + return h.oit.Value() +} + +func (h *HeightDBIterator) Error() error { + return h.oit.Error() +} + +func (h *HeightDBIterator) Close() error { + return h.oit.Close() +} diff --git a/app/fast_query/db/height_driver/types.go b/app/fast_query/db/height_driver/types.go new file mode 100644 index 00000000..c4f1212f --- /dev/null +++ b/app/fast_query/db/height_driver/types.go @@ -0,0 +1,71 @@ +package height_driver + +import ( + dbm "github.com/cometbft/cometbft-db" +) + +type HeigthDB interface { + dbm.DB + SetReadHeight(int64) + ClearReadHeight() int64 + SetWriteHeight(int64) + ClearWriteHeight() int64 +} + +type HeightEnabledDB interface { + // Get fetches the value of the given key, or nil if it does not exist. + // CONTRACT: key, value readonly []byte + Get(maxHeight int64, key []byte) ([]byte, error) + + // Has checks if a key exists. + // CONTRACT: key, value readonly []byte + Has(maxHeight int64, key []byte) (bool, error) + + // Set sets the value for the given key, replacing it if it already exists. + // CONTRACT: key, value readonly []byte + Set(atHeight int64, key, value []byte) error + + // SetSync sets the value for the given key, and flushes it to storage before returning. + SetSync(atHeight int64, key, value []byte) error + + // Delete deletes the key, or does nothing if the key does not exist. + // CONTRACT: key readonly []byte + Delete(atHeight int64, key []byte) error + + // DeleteSync deletes the key, and flushes the delete to storage before returning. + DeleteSync(atHeight int64, key []byte) error + + // Iterator returns an iterator over a domain of keys, in ascending order. The caller must call + // Close when done. End is exclusive, and start must be less than end. A nil start iterates + // from the first key, and a nil end iterates to the last key (inclusive). + // CONTRACT: No writes may happen within a domain while an iterator exists over it. + // CONTRACT: start, end readonly []byte + Iterator(maxHeight int64, start, end []byte) (HeightEnabledIterator, error) + + // ReverseIterator returns an iterator over a domain of keys, in descending order. The caller + // must call Close when done. End is exclusive, and start must be less than end. A nil end + // iterates from the last key (inclusive), and a nil start iterates to the first key (inclusive). + // CONTRACT: No writes may happen within a domain while an iterator exists over it. + // CONTRACT: start, end readonly []byte + ReverseIterator(maxHeight int64, start, end []byte) (HeightEnabledIterator, error) + + // Close closes the database connection. + Close() error + + // NewBatch creates a batch for atomic updates. The caller must call Batch.Close. + NewBatch(atHeight int64) HeightEnabledBatch + + // Print is used for debugging. + Print() error + + // Stats returns a map of property values for all keys and the size of the cache. + Stats() map[string]string +} + +type HeightEnabledIterator interface { + dbm.Iterator +} + +type HeightEnabledBatch interface { + dbm.Batch +} diff --git a/app/fast_query/db/height_driver/utils.go b/app/fast_query/db/height_driver/utils.go new file mode 100644 index 00000000..20085ff1 --- /dev/null +++ b/app/fast_query/db/height_driver/utils.go @@ -0,0 +1,51 @@ +package height_driver + +type Height int64 + +const ( + // Max int64 value + maxHeight = 9223372036854775807 +) + +// Cluster returns /1000 of the height; useful for clustering records in different partitions +func (h Height) Cluster() Height { + return h / 1000 +} + +func (h Height) ToInt64() int64 { + return int64(h) +} + +func (h Height) IsLatestHeight() bool { + if h == maxHeight { + return true + } else { + return false + } +} + +func (h Height) CurrentOrLatest() Height { + if h == 0 { + return Height(maxHeight) + } else { + return h + } +} + +func (h Height) CurrentOrNever() Height { + if h == 0 { + return -1 + } else { + return h + } +} + +type Key []byte + +func (k Key) CurrentOrDefault() []byte { + if k != nil { + return k + } else { + return []byte{0x0} + } +} diff --git a/app/fast_query/db/snappy/snappy_batch.go b/app/fast_query/db/snappy/snappy_batch.go new file mode 100644 index 00000000..a7bbf25f --- /dev/null +++ b/app/fast_query/db/snappy/snappy_batch.go @@ -0,0 +1,38 @@ +package snappy + +import ( + mdb "github.com/cometbft/cometbft-db" + "github.com/golang/snappy" +) + +var _ mdb.Batch = (*SnappyBatch)(nil) + +type SnappyBatch struct { + batch mdb.Batch +} + +func NewSnappyBatch(batch mdb.Batch) *SnappyBatch { + return &SnappyBatch{ + batch: batch, + } +} + +func (s *SnappyBatch) Set(key, value []byte) error { + return s.batch.Set(key, snappy.Encode(nil, value)) +} + +func (s *SnappyBatch) Delete(key []byte) error { + return s.batch.Delete(key) +} + +func (s *SnappyBatch) Write() error { + return s.batch.Write() +} + +func (s *SnappyBatch) WriteSync() error { + return s.batch.WriteSync() +} + +func (s *SnappyBatch) Close() error { + return s.batch.Close() +} diff --git a/app/fast_query/db/snappy/snappy_db.go b/app/fast_query/db/snappy/snappy_db.go new file mode 100644 index 00000000..222a07e3 --- /dev/null +++ b/app/fast_query/db/snappy/snappy_db.go @@ -0,0 +1,115 @@ +package snappy + +import ( + "encoding/json" + tmdb "github.com/cometbft/cometbft-db" + "github.com/golang/snappy" + "github.com/pkg/errors" + "sync" +) + +const ( + CompatModeEnabled = iota + CompatModeDisabled +) + +var ( + errIteratorNotSupported = errors.New("iterator unsupported") + errUnknownData = errors.New("unknown format") +) + +var _ tmdb.DB = (*SnappyDB)(nil) + +// SnappyDB implements a tmdb.DB overlay with snappy compression/decompression +// Iterator is NOT supported -- main purpose of this library is to support indexer.db, +// which never makes use of iterators anyway +// NOTE: implement when needed +// NOTE2: monitor mem pressure, optimize by pre-allocating dst buf when there is bottleneck +type SnappyDB struct { + db tmdb.DB + mtx *sync.Mutex + compatMode int +} + +func NewSnappyDB(db tmdb.DB, compatMode int) *SnappyDB { + return &SnappyDB{ + mtx: new(sync.Mutex), + db: db, + compatMode: compatMode, + } +} + +func (s *SnappyDB) Get(key []byte) ([]byte, error) { + if item, err := s.db.Get(key); err != nil { + return nil, err + } else if item == nil && err == nil { + return nil, nil + } else { + decoded, decodeErr := snappy.Decode(nil, item) + + // if snappy decode fails, try to replace the underlying + // only recover & replace when the blob is a valid json + if s.compatMode == CompatModeEnabled { + if decodeErr != nil { + if json.Valid(item) { + s.mtx.Lock() + // run item by Set() to encode & replace + _ = s.db.Set(key, item) + defer s.mtx.Unlock() + + return item, nil + } else { + return nil, errUnknownData + } + } else { + return decoded, nil + } + } + + return decoded, decodeErr + } +} + +func (s *SnappyDB) Has(key []byte) (bool, error) { + return s.db.Has(key) +} + +func (s *SnappyDB) Set(key []byte, value []byte) error { + return s.db.Set(key, snappy.Encode(nil, value)) +} + +func (s *SnappyDB) SetSync(key []byte, value []byte) error { + return s.Set(key, value) +} + +func (s *SnappyDB) Delete(key []byte) error { + return s.db.Delete(key) +} + +func (s *SnappyDB) DeleteSync(key []byte) error { + return s.Delete(key) +} + +func (s *SnappyDB) Iterator(start, end []byte) (tmdb.Iterator, error) { + return nil, errIteratorNotSupported +} + +func (s *SnappyDB) ReverseIterator(start, end []byte) (tmdb.Iterator, error) { + return nil, errIteratorNotSupported +} + +func (s *SnappyDB) Close() error { + return s.db.Close() +} + +func (s *SnappyDB) NewBatch() tmdb.Batch { + return NewSnappyBatch(s.db.NewBatch()) +} + +func (s *SnappyDB) Print() error { + return s.db.Print() +} + +func (s *SnappyDB) Stats() map[string]string { + return s.db.Stats() +} diff --git a/app/fast_query/db/snappy/snappy_db_test.go b/app/fast_query/db/snappy/snappy_db_test.go new file mode 100644 index 00000000..4d5a7a36 --- /dev/null +++ b/app/fast_query/db/snappy/snappy_db_test.go @@ -0,0 +1,93 @@ +package snappy + +import ( + "io/ioutil" + "os" + "testing" + + db "github.com/cometbft/cometbft-db" + tmjson "github.com/cometbft/cometbft/libs/json" + cometbfttypes "github.com/cometbft/cometbft/types" + "github.com/stretchr/testify/assert" +) + +func TestSnappyDB(t *testing.T) { + snappy := NewSnappyDB(db.NewMemDB(), CompatModeEnabled) + + assert.Nil(t, snappy.Set([]byte("test"), []byte("testValue"))) + + var v []byte + var err error + + // nil buffer test + v, err = snappy.Get([]byte("non-existing")) + assert.Nil(t, v) + assert.Nil(t, err) + + v, err = snappy.Get([]byte("test")) + assert.Nil(t, err) + assert.Equal(t, []byte("testValue"), v) + + assert.Nil(t, snappy.Delete([]byte("test"))) + v, err = snappy.Get([]byte("test")) + assert.Nil(t, v) + assert.Nil(t, err) + + // iterator is not supported + var it db.Iterator + it, err = snappy.Iterator([]byte("start"), []byte("end")) + assert.Nil(t, it) + assert.Equal(t, errIteratorNotSupported, err) + + it, err = snappy.ReverseIterator([]byte("start"), []byte("end")) + assert.Nil(t, it) + assert.Equal(t, errIteratorNotSupported, err) + + // batched store is compressed as well + var batch db.Batch + batch = snappy.NewBatch() + + assert.Nil(t, batch.Set([]byte("key"), []byte("batchedValue"))) + assert.Nil(t, batch.Write()) + assert.Nil(t, batch.Close()) + + v, err = snappy.Get([]byte("key")) + assert.Equal(t, []byte("batchedValue"), v) + + batch = snappy.NewBatch() + assert.Nil(t, batch.Delete([]byte("key"))) + assert.Nil(t, batch.Write()) + assert.Nil(t, batch.Close()) + + v, err = snappy.Get([]byte("key")) + assert.Nil(t, v) + assert.Nil(t, err) +} + +func TestSnappyDBCompat(t *testing.T) { + mdb := db.NewMemDB() + testKey := []byte("testKey") + + nocompat := NewSnappyDB(mdb, CompatModeDisabled) + indexSampleTx(nocompat, testKey) + + nocompatResult, _ := nocompat.Get(testKey) + + compat := NewSnappyDB(mdb, CompatModeEnabled) + compatResult, _ := compat.Get(testKey) + assert.Equal(t, nocompatResult, compatResult) + + nocompatResult2, _ := nocompat.Get(testKey) + assert.Equal(t, compatResult, nocompatResult2) +} + +func indexSampleTx(mdb db.DB, key []byte) { + block := &cometbfttypes.Block{} + blockFile, _ := os.Open("../../indexer/fixtures/block_4814775.json") + blockJSON, _ := ioutil.ReadAll(blockFile) + if err := tmjson.Unmarshal(blockJSON, block); err != nil { + panic(err) + } + + _ = mdb.Set(key, blockJSON) +} diff --git a/app/fast_query/db/types.go b/app/fast_query/db/types.go new file mode 100644 index 00000000..ba52abb9 --- /dev/null +++ b/app/fast_query/db/types.go @@ -0,0 +1,78 @@ +package db + +// Batch represents a group of writes. They may or may not be written atomically depending on the +// backend. Callers must call Close on the batch when done. +// +// As with DB, given keys and values should be considered read-only, and must not be modified after +// passing them to the batch. +type Batch interface { + // Set sets a key/value pair. + // CONTRACT: key, value readonly []byte + Set(key, value []byte) error + + // Delete deletes a key/value pair. + // CONTRACT: key readonly []byte + Delete(key []byte) error + + // Write writes the batch, possibly without flushing to disk. Only Close() can be called after, + // other methods will error. + Write() error + + // WriteSync writes the batch and flushes it to disk. Only Close() can be called after, other + // methods will error. + WriteSync() error + + // Close closes the batch. It is idempotent, but calls to other methods afterwards will error. + Close() error +} + +// Iterator represents an iterator over a domain of keys. Callers must call Close when done. +// No writes can happen to a domain while there exists an iterator over it, some backends may take +// out database locks to ensure this will not happen. +// +// Callers must make sure the iterator is valid before calling any methods on it, otherwise +// these methods will panic. This is in part caused by most backend databases using this convention. +// +// As with DB, keys and values should be considered read-only, and must be copied before they are +// modified. +// +// Typical usage: +// +// var itr Iterator = ... +// defer itr.Close() +// +// for ; itr.Valid(); itr.Next() { +// k, v := itr.Key(); itr.Value() +// ... +// } +// +// if err := itr.Error(); err != nil { +// ... +// } +type Iterator interface { + // Domain returns the start (inclusive) and end (exclusive) limits of the iterator. + // CONTRACT: start, end readonly []byte + Domain() (start []byte, end []byte) + + // Valid returns whether the current iterator is valid. Once invalid, the Iterator remains + // invalid forever. + Valid() bool + + // Next moves the iterator to the next key in the database, as defined by order of iteration. + // If Valid returns false, this method will panic. + Next() + + // Key returns the key at the current position. Panics if the iterator is invalid. + // CONTRACT: key readonly []byte + Key() (key []byte) + + // Value returns the value at the current position. Panics if the iterator is invalid. + // CONTRACT: value readonly []byte + Value() (value []byte) + + // Error returns the last error encountered by the iterator, if any. + Error() error + + // Close closes the iterator, relasing any allocated resources. + Close() error +} diff --git a/app/fast_query/db/utils/bytes.go b/app/fast_query/db/utils/bytes.go new file mode 100644 index 00000000..58c2e99b --- /dev/null +++ b/app/fast_query/db/utils/bytes.go @@ -0,0 +1,25 @@ +package utils + +import ( + "bytes" + "encoding/binary" +) + +func ConcatBytes(items ...[]byte) []byte { + buf := new(bytes.Buffer) + for _, item := range items { + buf.Write(item) + } + return buf.Bytes() +} + +func UintToBigEndian(n uint64) []byte { + var buf = make([]byte, 8) + binary.BigEndian.PutUint64(buf, n) + + return buf +} + +func BigEndianToUint(n []byte) uint64 { + return binary.BigEndian.Uint64(n) +} diff --git a/app/fast_query/fast_query_service.go b/app/fast_query/fast_query_service.go new file mode 100644 index 00000000..442bda72 --- /dev/null +++ b/app/fast_query/fast_query_service.go @@ -0,0 +1,49 @@ +package fast_query + +import ( + log "github.com/cometbft/cometbft/libs/log" + "github.com/cosmos/cosmos-sdk/store/types" + "github.com/terra-money/core/v2/app/fast_query/db/driver" + "github.com/terra-money/core/v2/app/fast_query/db/height_driver" + "github.com/terra-money/core/v2/app/fast_query/store" +) + +type FastQueryService struct { + Store *store.Store + safeBatchDBCloser driver.SafeBatchDBCloser + fastQueryDriver height_driver.HeightEnabledDB + logger log.Logger +} + +func NewFastQueryService(homedir string, logger log.Logger) (*FastQueryService, error) { + // Create a new instance of the Database Driver that uses LevelDB + fastQueryDriver, err := driver.NewDBDriver(homedir) + if err != nil { + return nil, err + } + + // Create HeightDB Driver that implements optimization for reading + // and writing data in the database in paralell. + fastQueryHeightDriver := height_driver.NewHeightDB( + fastQueryDriver, + &height_driver.HeightDBConfig{ + Debug: true, + }, + ) + + heightEnabledDB := driver.NewSafeBatchDB(fastQueryHeightDriver) + safeBatchDBCloser := heightEnabledDB.(driver.SafeBatchDBCloser) + store := store.NewStore(heightEnabledDB, fastQueryHeightDriver, logger) + + return &FastQueryService{ + Store: store, + safeBatchDBCloser: safeBatchDBCloser, + fastQueryDriver: fastQueryDriver, + logger: logger, + }, err +} + +func (fqs *FastQueryService) CommitChanges(blockHeight int64, changeSet []types.StoreKVPair) error { + fqs.logger.Info("CommitChanges", "blockHeight", blockHeight, "changeSet", changeSet) + return nil +} diff --git a/app/fast_query/store/dbadapter.go b/app/fast_query/store/dbadapter.go new file mode 100644 index 00000000..3e501f1e --- /dev/null +++ b/app/fast_query/store/dbadapter.go @@ -0,0 +1,48 @@ +package store + +import ( + dbm "github.com/cometbft/cometbft-db" + "github.com/cosmos/cosmos-sdk/store/dbadapter" + pruningtypes "github.com/cosmos/cosmos-sdk/store/pruning/types" + "github.com/cosmos/cosmos-sdk/store/types" +) + +var commithash = []byte("FAKE_HASH") + +//---------------------------------------- +// commitDBStoreWrapper should only be used for simulation/debugging, +// as it doesn't compute any commit hash, and it cannot load older state. + +// Wrapper type for dbm.Db with implementation of KVStore +type commitDBStoreAdapter struct { + dbadapter.Store + prefix []byte +} + +func (cdsa commitDBStoreAdapter) Commit() types.CommitID { + return types.CommitID{ + Version: -1, + Hash: commithash, + } +} + +func (cdsa commitDBStoreAdapter) LastCommitID() types.CommitID { + return types.CommitID{ + Version: -1, + Hash: commithash, + } +} + +func (cdsa commitDBStoreAdapter) SetPruning(_ pruningtypes.PruningOptions) {} + +// GetPruning is a no-op as pruning options cannot be directly set on this store. +// They must be set on the root commit multi-store. +func (cdsa commitDBStoreAdapter) GetPruning() pruningtypes.PruningOptions { + return pruningtypes.PruningOptions{} +} + +func (cdsa *commitDBStoreAdapter) BranchStoreWithHeightLimitedDB(hldb dbm.DB) types.CommitKVStore { + var db = dbm.NewPrefixDB(hldb, cdsa.prefix) + + return commitDBStoreAdapter{Store: dbadapter.Store{DB: db}, prefix: cdsa.prefix} +} diff --git a/app/fast_query/store/proof.go b/app/fast_query/store/proof.go new file mode 100644 index 00000000..92335107 --- /dev/null +++ b/app/fast_query/store/proof.go @@ -0,0 +1,27 @@ +package store + +import ( + "github.com/cometbft/cometbft/crypto/merkle" + + storetypes "github.com/cosmos/cosmos-sdk/store/types" +) + +// RequireProof returns whether proof is required for the subpath. +func RequireProof(subpath string) bool { + // XXX: create a better convention. + // Currently, only when query subpath is "/key", will proof be included in + // response. If there are some changes about proof building in iavlstore.go, + // we must change code here to keep consistency with iavlStore#Query. + return subpath == "/key" +} + +//----------------------------------------------------------------------------- + +// XXX: This should be managed by the rootMultiStore which may want to register +// more proof ops? +func DefaultProofRuntime() (prt *merkle.ProofRuntime) { + prt = merkle.NewProofRuntime() + prt.RegisterOpDecoder(storetypes.ProofOpIAVLCommitment, storetypes.CommitmentOpDecoder) + prt.RegisterOpDecoder(storetypes.ProofOpSimpleMerkleCommitment, storetypes.CommitmentOpDecoder) + return +} diff --git a/app/fast_query/store/store.go b/app/fast_query/store/store.go new file mode 100644 index 00000000..eabfa48a --- /dev/null +++ b/app/fast_query/store/store.go @@ -0,0 +1,1126 @@ +package store + +import ( + "fmt" + "io" + "math" + "sort" + "strings" + "sync" + + dbm "github.com/cometbft/cometbft-db" + abci "github.com/cometbft/cometbft/abci/types" + "github.com/cometbft/cometbft/libs/log" + protoio "github.com/cosmos/gogoproto/io" + iavltree "github.com/cosmos/iavl" + gogotypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "github.com/terra-money/core/v2/app/fast_query/db/height_driver" + + snapshottypes "github.com/cosmos/cosmos-sdk/snapshots/types" + + "github.com/cosmos/cosmos-sdk/store/cachemulti" + "github.com/cosmos/cosmos-sdk/store/dbadapter" + "github.com/cosmos/cosmos-sdk/store/iavl" + "github.com/cosmos/cosmos-sdk/store/listenkv" + "github.com/cosmos/cosmos-sdk/store/mem" + "github.com/cosmos/cosmos-sdk/store/pruning" + pruningtypes "github.com/cosmos/cosmos-sdk/store/pruning/types" + "github.com/cosmos/cosmos-sdk/store/tracekv" + "github.com/cosmos/cosmos-sdk/store/transient" + "github.com/cosmos/cosmos-sdk/store/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const ( + latestVersionKey = "s/latest" + commitInfoKeyFmt = "s/%d" // s/ +) + +const iavlDisablefastNodeDefault = false + +// Store is composed of many CommitStores. Name contrasts with +// cacheMultiStore which is used for branching other MultiStores. It implements +// the CommitMultiStore interface. +type Store struct { + db dbm.DB + hldb *height_driver.HeightDB + logger log.Logger + lastCommitInfo *types.CommitInfo + pruningManager *pruning.Manager + iavlCacheSize int + iavlDisableFastNode bool + storesParams map[types.StoreKey]storeParams + stores map[types.StoreKey]types.CommitKVStore + keysByName map[string]types.StoreKey + lazyLoading bool + pruneHeights []int64 + initialVersion int64 + removalMap map[types.StoreKey]bool + + traceWriter io.Writer + traceContext types.TraceContext + traceContextMutex sync.Mutex + + interBlockCache types.MultiStorePersistentCache + + listeners map[types.StoreKey][]types.WriteListener +} + +var ( + _ types.CommitMultiStore = (*Store)(nil) + _ types.Queryable = (*Store)(nil) +) + +// NewStore returns a reference to a new Store object with the provided DB. The +// store will be created with a PruneNothing pruning strategy by default. After +// a store is created, KVStores must be mounted and finally LoadLatestVersion or +// LoadVersion must be called. +func NewStore(db dbm.DB, hldb *height_driver.HeightDB, logger log.Logger) *Store { + return &Store{ + db: db, + hldb: hldb, + logger: logger, + pruningManager: pruning.NewManager(db, logger), + iavlCacheSize: iavl.DefaultIAVLCacheSize, + iavlDisableFastNode: iavlDisablefastNodeDefault, + storesParams: make(map[types.StoreKey]storeParams), + stores: make(map[types.StoreKey]types.CommitKVStore), + keysByName: make(map[string]types.StoreKey), + removalMap: make(map[types.StoreKey]bool), + pruneHeights: make([]int64, 0), + listeners: make(map[types.StoreKey][]types.WriteListener), + } +} + +// GetPruning fetches the pruning strategy from the root store. +func (rs *Store) GetPruning() pruningtypes.PruningOptions { + return rs.pruningManager.GetOptions() +} + +// SetPruning sets the pruning strategy on the root store and all the sub-stores. +// Note, calling SetPruning on the root store prior to LoadVersion or +// LoadLatestVersion performs a no-op as the stores aren't mounted yet. +func (rs *Store) SetPruning(pruningOpts pruningtypes.PruningOptions) { + rs.pruningManager.SetOptions(pruningOpts) +} + +// SetSnapshotInterval sets the interval at which the snapshots are taken. +// It is used by the store to determine which heights to retain until after the snapshot is complete. +func (rs *Store) SetSnapshotInterval(snapshotInterval uint64) { + rs.pruningManager.SetSnapshotInterval(snapshotInterval) +} + +// SetIAVLCacheSize sets the size of IAVL cache tree. +func (rs *Store) SetIAVLCacheSize(cacheSize int) { + rs.iavlCacheSize = cacheSize +} + +func (rs *Store) SetIAVLDisableFastNode(disableFastNode bool) { + rs.iavlDisableFastNode = disableFastNode +} + +// SetLazyLoading sets if the iavl store should be loaded lazily or not +func (rs *Store) SetLazyLoading(lazyLoading bool) { + rs.lazyLoading = lazyLoading +} + +// GetStoreType implements Store. +func (rs *Store) GetStoreType() types.StoreType { + return types.StoreTypeMulti +} + +// MountStoreWithDB implements CommitMultiStore. +func (rs *Store) MountStoreWithDB(key types.StoreKey, typ types.StoreType, db dbm.DB) { + if key == nil { + panic("MountIAVLStore() key cannot be nil") + } + if _, ok := rs.storesParams[key]; ok { + panic(fmt.Sprintf("store duplicate store key %v", key)) + } + if _, ok := rs.keysByName[key.Name()]; ok { + panic(fmt.Sprintf("store duplicate store key name %v", key)) + } + rs.storesParams[key] = newStoreParams(key, db, typ, 0) + rs.keysByName[key.Name()] = key +} + +// GetCommitStore returns a mounted CommitStore for a given StoreKey. If the +// store is wrapped in an inter-block cache, it will be unwrapped before returning. +func (rs *Store) GetCommitStore(key types.StoreKey) types.CommitStore { + return rs.GetCommitKVStore(key) +} + +// GetCommitKVStore returns a mounted CommitKVStore for a given StoreKey. If the +// store is wrapped in an inter-block cache, it will be unwrapped before returning. +func (rs *Store) GetCommitKVStore(key types.StoreKey) types.CommitKVStore { + // If the Store has an inter-block cache, first attempt to lookup and unwrap + // the underlying CommitKVStore by StoreKey. If it does not exist, fallback to + // the main mapping of CommitKVStores. + if rs.interBlockCache != nil { + if store := rs.interBlockCache.Unwrap(key); store != nil { + return store + } + } + + return rs.stores[key] +} + +// StoreKeysByName returns mapping storeNames -> StoreKeys +func (rs *Store) StoreKeysByName() map[string]types.StoreKey { + return rs.keysByName +} + +// LoadLatestVersionAndUpgrade implements CommitMultiStore +func (rs *Store) LoadLatestVersionAndUpgrade(upgrades *types.StoreUpgrades) error { + ver := GetLatestVersion(rs.db) + return rs.loadVersion(ver, upgrades) +} + +// LoadVersionAndUpgrade allows us to rename substores while loading an older version +func (rs *Store) LoadVersionAndUpgrade(ver int64, upgrades *types.StoreUpgrades) error { + return rs.loadVersion(ver, upgrades) +} + +// LoadLatestVersion implements CommitMultiStore. +func (rs *Store) LoadLatestVersion() error { + ver := GetLatestVersion(rs.db) + return rs.loadVersion(ver, nil) +} + +// LoadVersion implements CommitMultiStore. +func (rs *Store) LoadVersion(ver int64) error { + return rs.loadVersion(ver, nil) +} + +func (rs *Store) loadVersion(ver int64, upgrades *types.StoreUpgrades) error { + infos := make(map[string]types.StoreInfo) + + cInfo := &types.CommitInfo{} + + // load old data if we are not version 0 + if ver != 0 { + var err error + cInfo, err = getCommitInfo(rs.db, ver) + if err != nil { + return err + } + + // convert StoreInfos slice to map + for _, storeInfo := range cInfo.StoreInfos { + infos[storeInfo.Name] = storeInfo + } + } + + // load each Store (note this doesn't panic on unmounted keys now) + newStores := make(map[types.StoreKey]types.CommitKVStore) + + storesKeys := make([]types.StoreKey, 0, len(rs.storesParams)) + + for key := range rs.storesParams { + storesKeys = append(storesKeys, key) + } + if upgrades != nil { + // deterministic iteration order for upgrades + // (as the underlying store may change and + // upgrades make store changes where the execution order may matter) + sort.Slice(storesKeys, func(i, j int) bool { + return storesKeys[i].Name() < storesKeys[j].Name() + }) + } + + for _, key := range storesKeys { + storeParams := rs.storesParams[key] + commitID := rs.getCommitID(infos, key.Name()) + + // If it has been added, set the initial version + if upgrades.IsAdded(key.Name()) || upgrades.RenamedFrom(key.Name()) != "" { + storeParams.initialVersion = uint64(ver) + 1 + } else if commitID.Version != ver && storeParams.typ == types.StoreTypeIAVL { + return fmt.Errorf("version of store %s mismatch root store's version; expected %d got %d", key.Name(), ver, commitID.Version) + } + + store, err := rs.loadCommitStoreFromParams(key, commitID, storeParams) + if err != nil { + return errors.Wrap(err, "failed to load store") + } + + newStores[key] = store + + // If it was deleted, remove all data + if upgrades.IsDeleted(key.Name()) { + if err := deleteKVStore(types.KVStore(store)); err != nil { + return errors.Wrapf(err, "failed to delete store %s", key.Name()) + } + rs.removalMap[key] = true + } else if oldName := upgrades.RenamedFrom(key.Name()); oldName != "" { + // handle renames specially + // make an unregistered key to satisfy loadCommitStore params + oldKey := types.NewKVStoreKey(oldName) + oldParams := newStoreParams(oldKey, storeParams.db, storeParams.typ, 0) + + // load from the old name + oldStore, err := rs.loadCommitStoreFromParams(oldKey, rs.getCommitID(infos, oldName), oldParams) + if err != nil { + return errors.Wrapf(err, "failed to load old store %s", oldName) + } + + // move all data + if err := moveKVStoreData(types.KVStore(oldStore), types.KVStore(store)); err != nil { + return errors.Wrapf(err, "failed to move store %s -> %s", oldName, key.Name()) + } + + // add the old key so its deletion is committed + newStores[oldKey] = oldStore + // this will ensure it's not perpetually stored in commitInfo + rs.removalMap[oldKey] = true + } + } + + rs.lastCommitInfo = cInfo + rs.stores = newStores + + // load any pruned heights we missed from disk to be pruned on the next run + if err := rs.pruningManager.LoadPruningHeights(rs.db); err != nil { + return err + } + + return nil +} + +func (rs *Store) getCommitID(infos map[string]types.StoreInfo, name string) types.CommitID { + info, ok := infos[name] + if !ok { + return types.CommitID{} + } + + return info.CommitId +} + +func deleteKVStore(kv types.KVStore) error { + // Note that we cannot write while iterating, so load all keys here, delete below + var keys [][]byte + itr := kv.Iterator(nil, nil) + for itr.Valid() { + keys = append(keys, itr.Key()) + itr.Next() + } + itr.Close() + + for _, k := range keys { + kv.Delete(k) + } + return nil +} + +// we simulate move by a copy and delete +func moveKVStoreData(oldDB types.KVStore, newDB types.KVStore) error { + // we read from one and write to another + itr := oldDB.Iterator(nil, nil) + for itr.Valid() { + newDB.Set(itr.Key(), itr.Value()) + itr.Next() + } + itr.Close() + + // then delete the old store + return deleteKVStore(oldDB) +} + +// PruneSnapshotHeight prunes the given height according to the prune strategy. +// If PruneNothing, this is a no-op. +// If other strategy, this height is persisted until it is +// less than - KeepRecent and % Interval == 0 +func (rs *Store) PruneSnapshotHeight(height int64) { + rs.pruningManager.HandleHeightSnapshot(height) +} + +// SetInterBlockCache sets the Store's internal inter-block (persistent) cache. +// When this is defined, all CommitKVStores will be wrapped with their respective +// inter-block cache. +func (rs *Store) SetInterBlockCache(c types.MultiStorePersistentCache) { + rs.interBlockCache = c +} + +// SetTracer sets the tracer for the MultiStore that the underlying +// stores will utilize to trace operations. A MultiStore is returned. +func (rs *Store) SetTracer(w io.Writer) types.MultiStore { + rs.traceWriter = w + return rs +} + +// SetTracingContext updates the tracing context for the MultiStore by merging +// the given context with the existing context by key. Any existing keys will +// be overwritten. It is implied that the caller should update the context when +// necessary between tracing operations. It returns a modified MultiStore. +func (rs *Store) SetTracingContext(tc types.TraceContext) types.MultiStore { + rs.traceContextMutex.Lock() + defer rs.traceContextMutex.Unlock() + rs.traceContext = rs.traceContext.Merge(tc) + + return rs +} + +func (rs *Store) getTracingContext() types.TraceContext { + rs.traceContextMutex.Lock() + defer rs.traceContextMutex.Unlock() + + if rs.traceContext == nil { + return nil + } + + ctx := types.TraceContext{} + for k, v := range rs.traceContext { + ctx[k] = v + } + + return ctx +} + +// TracingEnabled returns if tracing is enabled for the MultiStore. +func (rs *Store) TracingEnabled() bool { + return rs.traceWriter != nil +} + +// AddListeners adds listeners for a specific KVStore +func (rs *Store) AddListeners(key types.StoreKey, listeners []types.WriteListener) { + if ls, ok := rs.listeners[key]; ok { + rs.listeners[key] = append(ls, listeners...) + } else { + rs.listeners[key] = listeners + } +} + +// ListeningEnabled returns if listening is enabled for a specific KVStore +func (rs *Store) ListeningEnabled(key types.StoreKey) bool { + if ls, ok := rs.listeners[key]; ok { + return len(ls) != 0 + } + return false +} + +// LatestVersion returns the latest version in the store +func (rs *Store) LatestVersion() int64 { + return rs.LastCommitID().Version +} + +// LastCommitID implements Committer/CommitStore. +func (rs *Store) LastCommitID() types.CommitID { + if rs.lastCommitInfo == nil { + return types.CommitID{ + Version: GetLatestVersion(rs.db), + } + } + + return rs.lastCommitInfo.CommitID() +} + +// Commit implements Committer/CommitStore. +func (rs *Store) Commit() types.CommitID { + var previousHeight, version int64 + if rs.lastCommitInfo.GetVersion() == 0 && rs.initialVersion > 1 { + // This case means that no commit has been made in the store, we + // start from initialVersion. + version = rs.initialVersion + } else { + // This case can means two things: + // - either there was already a previous commit in the store, in which + // case we increment the version from there, + // - or there was no previous commit, and initial version was not set, + // in which case we start at version 1. + previousHeight = rs.lastCommitInfo.GetVersion() + version = previousHeight + 1 + } + + rs.lastCommitInfo = commitStores(version, rs.stores, rs.removalMap) + defer rs.flushMetadata(rs.db, version, rs.lastCommitInfo) + + // remove remnants of removed stores + for sk := range rs.removalMap { + if _, ok := rs.stores[sk]; ok { + delete(rs.stores, sk) + delete(rs.storesParams, sk) + delete(rs.keysByName, sk.Name()) + } + } + // reset the removalMap + rs.removalMap = make(map[types.StoreKey]bool) + + if err := rs.handlePruning(version); err != nil { + panic(err) + } + + return types.CommitID{ + Version: version, + Hash: rs.lastCommitInfo.Hash(), + } +} + +// CacheWrap implements CacheWrapper/Store/CommitStore. +func (rs *Store) CacheWrap() types.CacheWrap { + return rs.CacheMultiStore().(types.CacheWrap) +} + +// CacheWrapWithTrace implements the CacheWrapper interface. +func (rs *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.CacheWrap { + return rs.CacheWrap() +} + +// CacheWrapWithListeners implements the CacheWrapper interface. +func (rs *Store) CacheWrapWithListeners(_ types.StoreKey, _ []types.WriteListener) types.CacheWrap { + return rs.CacheWrap() +} + +// CacheMultiStore creates ephemeral branch of the multi-store and returns a CacheMultiStore. +// It implements the MultiStore interface. +func (rs *Store) CacheMultiStore() types.CacheMultiStore { + stores := make(map[types.StoreKey]types.CacheWrapper) + for k, v := range rs.stores { + store := types.KVStore(v) + // Wire the listenkv.Store to allow listeners to observe the writes from the cache store, + // set same listeners on cache store will observe duplicated writes. + if rs.ListeningEnabled(k) { + store = listenkv.NewStore(store, k, rs.listeners[k]) + } + stores[k] = store + } + return cachemulti.NewStore(rs.db, stores, rs.keysByName, rs.traceWriter, rs.traceContext) +} + +// CacheMultiStoreWithVersion is analogous to CacheMultiStore except that it +// attempts to load stores at a given version (height). An error is returned if +// any store cannot be loaded. This should only be used for querying and +// iterating at past heights. +func (rs *Store) CacheMultiStoreWithVersion(version int64) (types.CacheMultiStore, error) { + hldb := rs.hldb.BranchHeightDB(version) + + cachedStores := make(map[types.StoreKey]types.CacheWrapper) + for key, store := range rs.stores { + switch store.GetStoreType() { + case types.StoreTypeIAVL: + // If the store is wrapped with an inter-block cache, we must first unwrap + // it to get the underlying IAVL store. + store = rs.GetCommitKVStore(key) + + // Attempt to lazy-load an already saved IAVL store version. If the + // version does not exist or is pruned, an error should be returned. + iavlStore, err := store.(*iavl.Store).GetImmutable(version) + if err != nil { + return nil, err + } + + cachedStores[key] = iavlStore + + case types.StoreTypeDB: + if version == rs.lastCommitInfo.Version { + cachedStores[key] = store + } else { + s := rs.GetCommitKVStore(key).(commitDBStoreAdapter) + // connect new adapter with height limited + cachedStores[key] = s.BranchStoreWithHeightLimitedDB(hldb) + } + + default: + cachedStores[key] = store + } + } + + return cachemulti.NewStore(hldb, cachedStores, rs.keysByName, rs.traceWriter, rs.traceContext), nil +} + +// GetStore returns a mounted Store for a given StoreKey. If the StoreKey does +// not exist, it will panic. If the Store is wrapped in an inter-block cache, it +// will be unwrapped prior to being returned. +// +// TODO: This isn't used directly upstream. Consider returning the Store as-is +// instead of unwrapping. +func (rs *Store) GetStore(key types.StoreKey) types.Store { + store := rs.GetCommitKVStore(key) + if store == nil { + panic(fmt.Sprintf("store does not exist for key: %s", key.Name())) + } + + return store +} + +// GetKVStore returns a mounted KVStore for a given StoreKey. If tracing is +// enabled on the KVStore, a wrapped TraceKVStore will be returned with the root +// store's tracer, otherwise, the original KVStore will be returned. +// +// NOTE: The returned KVStore may be wrapped in an inter-block cache if it is +// set on the root store. +func (rs *Store) GetKVStore(key types.StoreKey) types.KVStore { + s := rs.stores[key] + if s == nil { + panic(fmt.Sprintf("store does not exist for key: %s", key.Name())) + } + store := types.KVStore(s) + + if rs.TracingEnabled() { + store = tracekv.NewStore(store, rs.traceWriter, rs.getTracingContext()) + } + if rs.ListeningEnabled(key) { + store = listenkv.NewStore(store, key, rs.listeners[key]) + } + + return store +} + +func (rs *Store) handlePruning(version int64) error { + rs.pruningManager.HandleHeight(version - 1) // we should never prune the current version. + if !rs.pruningManager.ShouldPruneAtHeight(version) { + return nil + } + rs.logger.Info("prune start", "height", version) + defer rs.logger.Info("prune end", "height", version) + return rs.PruneStores(true, nil) +} + +// PruneStores prunes the specific heights of the multi store. +// If clearPruningManager is true, the pruning manager will return the pruning heights, +// and they are appended to the pruningHeights to be pruned. +func (rs *Store) PruneStores(clearPruningManager bool, pruningHeights []int64) (err error) { + if clearPruningManager { + heights, err := rs.pruningManager.GetFlushAndResetPruningHeights() + if err != nil { + return err + } + + if len(heights) == 0 { + rs.logger.Debug("no heights to be pruned from pruning manager") + } + + pruningHeights = append(pruningHeights, heights...) + } + + if len(pruningHeights) == 0 { + rs.logger.Debug("no heights need to be pruned") + return nil + } + + rs.logger.Debug("pruning heights", "heights", pruningHeights) + + for key, store := range rs.stores { + // If the store is wrapped with an inter-block cache, we must first unwrap + // it to get the underlying IAVL store. + if store.GetStoreType() != types.StoreTypeIAVL { + continue + } + + store = rs.GetCommitKVStore(key) + + err := store.(*iavl.Store).DeleteVersions(pruningHeights...) + if err == nil { + continue + } + + if errCause := errors.Cause(err); errCause != nil && errCause != iavltree.ErrVersionDoesNotExist { + return err + } + } + return nil +} + +// getStoreByName performs a lookup of a StoreKey given a store name typically +// provided in a path. The StoreKey is then used to perform a lookup and return +// a Store. If the Store is wrapped in an inter-block cache, it will be unwrapped +// prior to being returned. If the StoreKey does not exist, nil is returned. +func (rs *Store) GetStoreByName(name string) types.Store { + key := rs.keysByName[name] + if key == nil { + return nil + } + + return rs.GetCommitKVStore(key) +} + +// Query calls substore.Query with the same `req` where `req.Path` is +// modified to remove the substore prefix. +// Ie. `req.Path` here is `//`, and trimmed to `/` for the substore. +// TODO: add proof for `multistore -> substore`. +func (rs *Store) Query(req abci.RequestQuery) abci.ResponseQuery { + path := req.Path + storeName, subpath, err := parsePath(path) + if err != nil { + return sdkerrors.QueryResult(err, false) + } + + store := rs.GetStoreByName(storeName) + if store == nil { + return sdkerrors.QueryResult(sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "no such store: %s", storeName), false) + } + + queryable, ok := store.(types.Queryable) + if !ok { + return sdkerrors.QueryResult(sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "store %s (type %T) doesn't support queries", storeName, store), false) + } + + // trim the path and make the query + req.Path = subpath + res := queryable.Query(req) + + if !req.Prove || !RequireProof(subpath) { + return res + } + + if res.ProofOps == nil || len(res.ProofOps.Ops) == 0 { + return sdkerrors.QueryResult(sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "proof is unexpectedly empty; ensure height has not been pruned"), false) + } + + // If the request's height is the latest height we've committed, then utilize + // the store's lastCommitInfo as this commit info may not be flushed to disk. + // Otherwise, we query for the commit info from disk. + var commitInfo *types.CommitInfo + + if res.Height == rs.lastCommitInfo.Version { + commitInfo = rs.lastCommitInfo + } else { + commitInfo, err = getCommitInfo(rs.db, res.Height) + if err != nil { + return sdkerrors.QueryResult(err, false) + } + } + + // Restore origin path and append proof op. + res.ProofOps.Ops = append(res.ProofOps.Ops, commitInfo.ProofOp(storeName)) + + return res +} + +// SetInitialVersion sets the initial version of the IAVL tree. It is used when +// starting a new chain at an arbitrary height. +func (rs *Store) SetInitialVersion(version int64) error { + rs.initialVersion = version + + // Loop through all the stores, if it's an IAVL store, then set initial + // version on it. + for key, store := range rs.stores { + if store.GetStoreType() == types.StoreTypeIAVL { + // If the store is wrapped with an inter-block cache, we must first unwrap + // it to get the underlying IAVL store. + store = rs.GetCommitKVStore(key) + store.(*iavl.Store).SetInitialVersion(version) + } + } + + return nil +} + +// parsePath expects a format like /[/] +// Must start with /, subpath may be empty +// Returns error if it doesn't start with / +func parsePath(path string) (storeName string, subpath string, err error) { + if !strings.HasPrefix(path, "/") { + return storeName, subpath, sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "invalid path: %s", path) + } + + paths := strings.SplitN(path[1:], "/", 2) + storeName = paths[0] + + if len(paths) == 2 { + subpath = "/" + paths[1] + } + + return storeName, subpath, nil +} + +//---------------------- Snapshotting ------------------ + +// Snapshot implements snapshottypes.Snapshotter. The snapshot output for a given format must be +// identical across nodes such that chunks from different sources fit together. If the output for a +// given format changes (at the byte level), the snapshot format must be bumped - see +// TestMultistoreSnapshot_Checksum test. +func (rs *Store) Snapshot(height uint64, protoWriter protoio.Writer) error { + if height == 0 { + return sdkerrors.Wrap(sdkerrors.ErrLogic, "cannot snapshot height 0") + } + if height > uint64(rs.LastCommitID().Version) { + return sdkerrors.Wrapf(sdkerrors.ErrLogic, "cannot snapshot future height %v", height) + } + + // Collect stores to snapshot (only IAVL stores are supported) + type namedStore struct { + *iavl.Store + name string + } + stores := []namedStore{} + for key := range rs.stores { + switch store := rs.GetCommitKVStore(key).(type) { + case *iavl.Store: + stores = append(stores, namedStore{name: key.Name(), Store: store}) + case *transient.Store, *mem.Store: + // Non-persisted stores shouldn't be snapshotted + continue + default: + return sdkerrors.Wrapf(sdkerrors.ErrLogic, + "don't know how to snapshot store %q of type %T", key.Name(), store) + } + } + sort.Slice(stores, func(i, j int) bool { + return strings.Compare(stores[i].name, stores[j].name) == -1 + }) + + // Export each IAVL store. Stores are serialized as a stream of SnapshotItem Protobuf + // messages. The first item contains a SnapshotStore with store metadata (i.e. name), + // and the following messages contain a SnapshotNode (i.e. an ExportNode). Store changes + // are demarcated by new SnapshotStore items. + for _, store := range stores { + exporter, err := store.Export(int64(height)) + if err != nil { + return err + } + defer exporter.Close() + err = protoWriter.WriteMsg(&snapshottypes.SnapshotItem{ + Item: &snapshottypes.SnapshotItem_Store{ + Store: &snapshottypes.SnapshotStoreItem{ + Name: store.name, + }, + }, + }) + if err != nil { + return err + } + + for { + node, err := exporter.Next() + if err == iavltree.ErrorExportDone { + break + } else if err != nil { + return err + } + err = protoWriter.WriteMsg(&snapshottypes.SnapshotItem{ + Item: &snapshottypes.SnapshotItem_IAVL{ + IAVL: &snapshottypes.SnapshotIAVLItem{ + Key: node.Key, + Value: node.Value, + Height: int32(node.Height), + Version: node.Version, + }, + }, + }) + if err != nil { + return err + } + } + exporter.Close() + } + + return nil +} + +// Restore implements snapshottypes.Snapshotter. +// returns next snapshot item and error. +func (rs *Store) Restore( + height uint64, format uint32, protoReader protoio.Reader, +) (snapshottypes.SnapshotItem, error) { + // Import nodes into stores. The first item is expected to be a SnapshotItem containing + // a SnapshotStoreItem, telling us which store to import into. The following items will contain + // SnapshotNodeItem (i.e. ExportNode) until we reach the next SnapshotStoreItem or EOF. + var importer *iavltree.Importer + var snapshotItem snapshottypes.SnapshotItem +loop: + for { + snapshotItem = snapshottypes.SnapshotItem{} + err := protoReader.ReadMsg(&snapshotItem) + if err == io.EOF { + break + } else if err != nil { + return snapshottypes.SnapshotItem{}, sdkerrors.Wrap(err, "invalid protobuf message") + } + + switch item := snapshotItem.Item.(type) { + case *snapshottypes.SnapshotItem_Store: + if importer != nil { + err = importer.Commit() + if err != nil { + return snapshottypes.SnapshotItem{}, sdkerrors.Wrap(err, "IAVL commit failed") + } + importer.Close() + } + store, ok := rs.GetStoreByName(item.Store.Name).(*iavl.Store) + if !ok || store == nil { + return snapshottypes.SnapshotItem{}, sdkerrors.Wrapf(sdkerrors.ErrLogic, "cannot import into non-IAVL store %q", item.Store.Name) + } + importer, err = store.Import(int64(height)) + if err != nil { + return snapshottypes.SnapshotItem{}, sdkerrors.Wrap(err, "import failed") + } + defer importer.Close() + + case *snapshottypes.SnapshotItem_IAVL: + if importer == nil { + return snapshottypes.SnapshotItem{}, sdkerrors.Wrap(sdkerrors.ErrLogic, "received IAVL node item before store item") + } + if item.IAVL.Height > math.MaxInt8 { + return snapshottypes.SnapshotItem{}, sdkerrors.Wrapf(sdkerrors.ErrLogic, "node height %v cannot exceed %v", + item.IAVL.Height, math.MaxInt8) + } + node := &iavltree.ExportNode{ + Key: item.IAVL.Key, + Value: item.IAVL.Value, + Height: int8(item.IAVL.Height), + Version: item.IAVL.Version, + } + // Protobuf does not differentiate between []byte{} as nil, but fortunately IAVL does + // not allow nil keys nor nil values for leaf nodes, so we can always set them to empty. + if node.Key == nil { + node.Key = []byte{} + } + if node.Height == 0 && node.Value == nil { + node.Value = []byte{} + } + err := importer.Add(node) + if err != nil { + return snapshottypes.SnapshotItem{}, sdkerrors.Wrap(err, "IAVL node import failed") + } + + default: + break loop + } + } + + if importer != nil { + err := importer.Commit() + if err != nil { + return snapshottypes.SnapshotItem{}, sdkerrors.Wrap(err, "IAVL commit failed") + } + importer.Close() + } + + rs.flushMetadata(rs.db, int64(height), rs.buildCommitInfo(int64(height))) + return snapshotItem, rs.LoadLatestVersion() +} + +func (rs *Store) loadCommitStoreFromParams(key types.StoreKey, id types.CommitID, params storeParams) (types.CommitKVStore, error) { + var db dbm.DB + + var prefix []byte + if params.db != nil { + prefix = []byte("s/_/") + db = dbm.NewPrefixDB(params.db, prefix) + } else { + prefix = []byte("s/k:" + params.key.Name() + "/") + db = dbm.NewPrefixDB(rs.db, prefix) + } + + switch params.typ { + case types.StoreTypeMulti: + panic("recursive MultiStores not yet supported") + + case types.StoreTypeIAVL: + var store types.CommitKVStore + var err error + + if params.initialVersion == 0 { + store, err = iavl.LoadStore(db, rs.logger, key, id, rs.lazyLoading, rs.iavlCacheSize, rs.iavlDisableFastNode) + } else { + store, err = iavl.LoadStoreWithInitialVersion(db, rs.logger, key, id, rs.lazyLoading, params.initialVersion, rs.iavlCacheSize, rs.iavlDisableFastNode) + } + + if err != nil { + return nil, err + } + + if rs.interBlockCache != nil { + // Wrap and get a CommitKVStore with inter-block caching. Note, this should + // only wrap the primary CommitKVStore, not any store that is already + // branched as that will create unexpected behavior. + store = rs.interBlockCache.GetStoreCache(key, store) + } + + return store, err + + case types.StoreTypeDB: + return commitDBStoreAdapter{Store: dbadapter.Store{DB: db}, prefix: prefix}, nil + + case types.StoreTypeTransient: + _, ok := key.(*types.TransientStoreKey) + if !ok { + return nil, fmt.Errorf("invalid StoreKey for StoreTypeTransient: %s", key.String()) + } + + return transient.NewStore(), nil + + case types.StoreTypeMemory: + if _, ok := key.(*types.MemoryStoreKey); !ok { + return nil, fmt.Errorf("unexpected key type for a MemoryStoreKey; got: %s", key.String()) + } + + return mem.NewStore(), nil + + default: + panic(fmt.Sprintf("unrecognized store type %v", params.typ)) + } +} + +func (rs *Store) buildCommitInfo(version int64) *types.CommitInfo { + storeInfos := []types.StoreInfo{} + for key, store := range rs.stores { + if store.GetStoreType() == types.StoreTypeTransient { + continue + } + storeInfos = append(storeInfos, types.StoreInfo{ + Name: key.Name(), + CommitId: store.LastCommitID(), + }) + } + return &types.CommitInfo{ + Version: version, + StoreInfos: storeInfos, + } +} + +// RollbackToVersion delete the versions after `target` and update the latest version. +func (rs *Store) RollbackToVersion(target int64) error { + if target <= 0 { + return fmt.Errorf("invalid rollback height target: %d", target) + } + + for key, store := range rs.stores { + if store.GetStoreType() == types.StoreTypeIAVL { + // If the store is wrapped with an inter-block cache, we must first unwrap + // it to get the underlying IAVL store. + store = rs.GetCommitKVStore(key) + var err error + if rs.lazyLoading { + _, err = store.(*iavl.Store).LazyLoadVersionForOverwriting(target) + } else { + _, err = store.(*iavl.Store).LoadVersionForOverwriting(target) + } + if err != nil { + return err + } + } + } + + rs.flushMetadata(rs.db, target, rs.buildCommitInfo(target)) + + return rs.LoadLatestVersion() +} + +func (rs *Store) flushMetadata(db dbm.DB, version int64, cInfo *types.CommitInfo) { + rs.logger.Debug("flushing metadata", "height", version) + batch := db.NewBatch() + defer batch.Close() + + if cInfo != nil { + flushCommitInfo(batch, version, cInfo) + } else { + rs.logger.Debug("commitInfo is nil, not flushed", "height", version) + } + + flushLatestVersion(batch, version) + + if err := batch.WriteSync(); err != nil { + panic(fmt.Errorf("error on batch write %w", err)) + } + rs.logger.Debug("flushing metadata finished", "height", version) +} + +type storeParams struct { + key types.StoreKey + db dbm.DB + typ types.StoreType + initialVersion uint64 +} + +func newStoreParams(key types.StoreKey, db dbm.DB, typ types.StoreType, initialVersion uint64) storeParams { + return storeParams{ + key: key, + db: db, + typ: typ, + initialVersion: initialVersion, + } +} + +func GetLatestVersion(db dbm.DB) int64 { + bz, err := db.Get([]byte(latestVersionKey)) + if err != nil { + panic(err) + } else if bz == nil { + return 0 + } + + var latestVersion int64 + + if err := gogotypes.StdInt64Unmarshal(&latestVersion, bz); err != nil { + panic(err) + } + + return latestVersion +} + +// Commits each store and returns a new commitInfo. +func commitStores(version int64, storeMap map[types.StoreKey]types.CommitKVStore, removalMap map[types.StoreKey]bool) *types.CommitInfo { + storeInfos := make([]types.StoreInfo, 0, len(storeMap)) + + for key, store := range storeMap { + last := store.LastCommitID() + + // If a commit event execution is interrupted, a new iavl store's version will be larger than the rootmulti's metadata, when the block is replayed, we should avoid committing that iavl store again. + var commitID types.CommitID + if last.Version >= version { + last.Version = version + commitID = last + } else { + commitID = store.Commit() + } + if store.GetStoreType() == types.StoreTypeTransient { + continue + } + + if !removalMap[key] { + si := types.StoreInfo{} + si.Name = key.Name() + si.CommitId = commitID + storeInfos = append(storeInfos, si) + } + } + + sort.SliceStable(storeInfos, func(i, j int) bool { + return strings.Compare(storeInfos[i].Name, storeInfos[j].Name) < 0 + }) + + return &types.CommitInfo{ + Version: version, + StoreInfos: storeInfos, + } +} + +// Gets commitInfo from disk. +func getCommitInfo(db dbm.DB, ver int64) (*types.CommitInfo, error) { + cInfoKey := fmt.Sprintf(commitInfoKeyFmt, ver) + + bz, err := db.Get([]byte(cInfoKey)) + if err != nil { + return nil, errors.Wrap(err, "failed to get commit info") + } else if bz == nil { + return nil, errors.New("no commit info found") + } + + cInfo := &types.CommitInfo{} + if err = cInfo.Unmarshal(bz); err != nil { + return nil, errors.Wrap(err, "failed unmarshal commit info") + } + + return cInfo, nil +} + +func flushCommitInfo(batch dbm.Batch, version int64, cInfo *types.CommitInfo) { + bz, err := cInfo.Marshal() + if err != nil { + panic(err) + } + + cInfoKey := fmt.Sprintf(commitInfoKeyFmt, version) + batch.Set([]byte(cInfoKey), bz) +} + +func flushLatestVersion(batch dbm.Batch, version int64) { + bz, err := gogotypes.StdInt64Marshal(version) + if err != nil { + panic(err) + } + + batch.Set([]byte(latestVersionKey), bz) +} diff --git a/app/fast_query/streaming_service.go b/app/fast_query/streaming_service.go new file mode 100644 index 00000000..6d644442 --- /dev/null +++ b/app/fast_query/streaming_service.go @@ -0,0 +1,82 @@ +package fast_query + +import ( + "context" + "sort" + "strings" + "sync" + + abci "github.com/cometbft/cometbft/abci/types" + + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/store/types" +) + +var _ baseapp.StreamingService = &StreamingService{} + +// StreamingService is a concrete implementation of StreamingService that accumulate the state changes in current block, +// writes the ordered changeset out to version storage. +type StreamingService struct { + listeners []*types.MemoryListener // the listeners that will be initialized with BaseApp + fastQueryService *FastQueryService + currentBlockNumber int64 // the current block number +} + +// NewStreamingService creates a new StreamingService for the provided writeDir, (optional) filePrefix, and storeKeys +func NewStreamingService(fastQueryService *FastQueryService, storeKeys []types.StoreKey) *StreamingService { + // sort by the storeKeys first to avoid indeterministic order + sort.SliceStable(storeKeys, func(i, j int) bool { + return strings.Compare(storeKeys[i].Name(), storeKeys[j].Name()) < 0 + }) + + listeners := make([]*types.MemoryListener, len(storeKeys)) + for i, key := range storeKeys { + listeners[i] = types.NewMemoryListener(key) + } + return &StreamingService{listeners, fastQueryService, 0} +} + +// Listeners satisfies the baseapp.StreamingService interface +func (fss *StreamingService) Listeners() map[types.StoreKey][]types.WriteListener { + listeners := make(map[types.StoreKey][]types.WriteListener, len(fss.listeners)) + for _, listener := range fss.listeners { + listeners[listener.StoreKey()] = []types.WriteListener{listener} + } + return listeners +} + +// ListenBeginBlock satisfies the baseapp.ABCIListener interface +func (fss *StreamingService) ListenBeginBlock(ctx context.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) error { + fss.currentBlockNumber = req.GetHeader().Height + return nil +} + +// ListenDeliverTx satisfies the baseapp.ABCIListener interface +func (fss *StreamingService) ListenDeliverTx(ctx context.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) error { + return nil +} + +// ListenEndBlock satisfies the baseapp.ABCIListener interface +func (fss *StreamingService) ListenEndBlock(ctx context.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) error { + return nil +} + +func (fss *StreamingService) ListenCommit(ctx context.Context, res abci.ResponseCommit) error { + // concat the state caches + var changeSet []types.StoreKVPair + for _, listener := range fss.listeners { + changeSet = append(changeSet, listener.PopStateCache()...) + } + + return fss.fastQueryService.CommitChanges(fss.currentBlockNumber, changeSet) +} + +// Stream satisfies the baseapp.StreamingService interface +func (fss *StreamingService) Stream(wg *sync.WaitGroup) error { + return nil +} + +// Close satisfies the io.Closer interface, which satisfies the baseapp.StreamingService interface +func (fss *StreamingService) Close() error { + return nil +} diff --git a/integration-tests/src/setup/init-test-framework.sh b/integration-tests/src/setup/init-test-framework.sh index 640074fc..f7130223 100755 --- a/integration-tests/src/setup/init-test-framework.sh +++ b/integration-tests/src/setup/init-test-framework.sh @@ -190,6 +190,7 @@ sed -i -e 's/enable = false/enable = true/g' $CHAIN_DIR/$CHAINID_1/config/app.to sed -i -e 's/swagger = false/swagger = true/g' $CHAIN_DIR/$CHAINID_1/config/app.toml sed -i -e 's#"tcp://localhost:1317"#"tcp://localhost:'"$RESTPORT_1"'"#g' $CHAIN_DIR/$CHAINID_1/config/app.toml sed -i -e 's#":8080"#":'"$ROSETTA_1"'"#g' $CHAIN_DIR/$CHAINID_1/config/app.toml +sed -i -e 's/streamers = \[\]/streamers = \["fastquery"\]/g' $CHAIN_DIR/$CHAINID_1/config/app.toml sed -i -e 's#"tcp://0.0.0.0:26656"#"tcp://localhost:'"$P2PPORT_2"'"#g' $CHAIN_DIR/$CHAINID_2/config/config.toml sed -i -e 's#"tcp://127.0.0.1:26657"#"tcp://localhost:'"$RPCPORT_2"'"#g' $CHAIN_DIR/$CHAINID_2/config/config.toml @@ -201,6 +202,7 @@ sed -i -e 's/enable = false/enable = true/g' $CHAIN_DIR/$CHAINID_2/config/app.to sed -i -e 's/swagger = false/swagger = true/g' $CHAIN_DIR/$CHAINID_2/config/app.toml sed -i -e 's#"tcp://localhost:1317"#"tcp://localhost:'"$RESTPORT_2"'"#g' $CHAIN_DIR/$CHAINID_2/config/app.toml sed -i -e 's#":8080"#":'"$ROSETTA_2"'"#g' $CHAIN_DIR/$CHAINID_2/config/app.toml +sed -i -e 's/streamers = \[\]/streamers = \["fastquery"\]/g' $CHAIN_DIR/$CHAINID_1/config/app.toml echo "Chaning genesis.json..." sed -i -e 's/"voting_period": "172800s"/"voting_period": "2s"/g' $CHAIN_DIR/$CHAINID_1/config/genesis.json From 7577abcec3695157aef99cd1cc0105688c1d6e3b Mon Sep 17 00:00:00 2001 From: javiersuweijie Date: Fri, 8 Dec 2023 15:20:40 +0800 Subject: [PATCH 02/14] wip: updated fast query service --- app/app.go | 3 ++- app/fast_query/fast_query_service.go | 31 +++++++++++++++++++++------- go.mod | 8 +++---- 3 files changed, 30 insertions(+), 12 deletions(-) diff --git a/app/app.go b/app/app.go index 674b7637..3b2334f2 100644 --- a/app/app.go +++ b/app/app.go @@ -2,12 +2,12 @@ package app import ( "encoding/json" + "golang.org/x/exp/slices" "io" "net/http" "os" "path/filepath" "reflect" // #nosec G702 - "slices" "github.com/prometheus/client_golang/prometheus" @@ -263,6 +263,7 @@ func NewTerraApp( // when fastquery streamer is enabled in the config, // setup the fastquery feature and serve the data // from the fastquery. + // TODO: move checking if streaming service is enabled to a helper function streamers := cast.ToStringSlice(appOpts.Get("store.streamers")) if slices.Contains(streamers, "fastquery") { app.SetupFastQueryDB(appOpts, homePath) diff --git a/app/fast_query/fast_query_service.go b/app/fast_query/fast_query_service.go index 442bda72..8e0af87a 100644 --- a/app/fast_query/fast_query_service.go +++ b/app/fast_query/fast_query_service.go @@ -11,39 +11,56 @@ import ( type FastQueryService struct { Store *store.Store safeBatchDBCloser driver.SafeBatchDBCloser - fastQueryDriver height_driver.HeightEnabledDB + fastQueryDb *height_driver.HeightDB logger log.Logger } func NewFastQueryService(homedir string, logger log.Logger) (*FastQueryService, error) { // Create a new instance of the Database Driver that uses LevelDB - fastQueryDriver, err := driver.NewDBDriver(homedir) + fastQueryDbDriver, err := driver.NewDBDriver(homedir) if err != nil { return nil, err } // Create HeightDB Driver that implements optimization for reading // and writing data in the database in paralell. - fastQueryHeightDriver := height_driver.NewHeightDB( - fastQueryDriver, + fastQueryDb := height_driver.NewHeightDB( + fastQueryDbDriver, &height_driver.HeightDBConfig{ Debug: true, }, ) - heightEnabledDB := driver.NewSafeBatchDB(fastQueryHeightDriver) + heightEnabledDB := driver.NewSafeBatchDB(fastQueryDb) safeBatchDBCloser := heightEnabledDB.(driver.SafeBatchDBCloser) - store := store.NewStore(heightEnabledDB, fastQueryHeightDriver, logger) + store := store.NewStore(heightEnabledDB, fastQueryDb, logger) return &FastQueryService{ Store: store, safeBatchDBCloser: safeBatchDBCloser, - fastQueryDriver: fastQueryDriver, + fastQueryDb: fastQueryDb, logger: logger, }, err } func (fqs *FastQueryService) CommitChanges(blockHeight int64, changeSet []types.StoreKVPair) error { fqs.logger.Info("CommitChanges", "blockHeight", blockHeight, "changeSet", changeSet) + fqs.fastQueryDb.SetWriteHeight(blockHeight) + fqs.safeBatchDBCloser.Open() + + for _, kv := range changeSet { + key := fqs.Store.StoreKeysByName()[kv.StoreKey] + ckvs := fqs.Store.GetCommitKVStore(key) + if kv.Delete { + ckvs.Delete(kv.Key) + } else { + ckvs.Set(kv.Key, kv.Value) + } + } + + fqs.fastQueryDb.ClearReadHeight() + if _, err := fqs.safeBatchDBCloser.Flush(); err != nil { + return err + } return nil } diff --git a/go.mod b/go.mod index 8d0db47d..27645dd6 100644 --- a/go.mod +++ b/go.mod @@ -14,12 +14,15 @@ require ( github.com/cosmos/cosmos-sdk v0.47.5 github.com/cosmos/go-bip39 v1.0.0 github.com/cosmos/gogoproto v1.4.10 + github.com/cosmos/iavl v0.20.1 github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v7 v7.0.1-0.20231012160012-d0f49580a238 github.com/cosmos/ibc-apps/modules/async-icq/v7 v7.0.0 github.com/cosmos/ibc-apps/modules/ibc-hooks/v7 v7.0.0-20230803181732-7c8f814d3b79 github.com/cosmos/ibc-go/v7 v7.3.0 + github.com/gogo/protobuf v1.3.2 github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.3 + github.com/golang/snappy v0.0.4 github.com/gorilla/mux v1.8.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/pkg/errors v0.9.1 @@ -32,6 +35,7 @@ require ( github.com/stretchr/testify v1.8.4 github.com/terra-money/alliance v0.3.2 go.uber.org/mock v0.3.0 + golang.org/x/exp v0.0.0-20230711153332-06a737ee72cb google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 google.golang.org/grpc v1.58.3 ) @@ -68,7 +72,6 @@ require ( github.com/confio/ics23/go v0.9.0 // indirect github.com/cosmos/btcutil v1.0.5 // indirect github.com/cosmos/gogogateway v1.2.0 // indirect - github.com/cosmos/iavl v0.20.1 // indirect github.com/cosmos/ics23/go v0.10.0 // indirect github.com/cosmos/ledger-cosmos-go v0.12.4 // indirect github.com/cosmos/ledger-go v0.9.3 // indirect @@ -92,10 +95,8 @@ require ( github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect github.com/gogo/googleapis v1.4.1 // indirect - github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/glog v1.1.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/snappy v0.0.4 // indirect github.com/google/btree v1.1.2 // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/google/gofuzz v1.2.0 // indirect @@ -165,7 +166,6 @@ require ( go.etcd.io/bbolt v1.3.7 // indirect go.opencensus.io v0.24.0 // indirect golang.org/x/crypto v0.14.0 // indirect - golang.org/x/exp v0.0.0-20230711153332-06a737ee72cb // indirect golang.org/x/net v0.17.0 // indirect golang.org/x/oauth2 v0.10.0 // indirect golang.org/x/sync v0.3.0 // indirect From 37a89abcbd9a5d9673520231d0ec6d20a780ab47 Mon Sep 17 00:00:00 2001 From: emidev98 Date: Fri, 8 Dec 2023 18:05:11 +0200 Subject: [PATCH 03/14] wip(v2.9): fast query service --- app/app.go | 13 ++++--------- app/fast_query/fast_query_service.go | 12 +++++++----- app/fast_query/store/store.go | 10 ++++++++-- app/fast_query/streaming_service.go | 8 +++++++- 4 files changed, 26 insertions(+), 17 deletions(-) diff --git a/app/app.go b/app/app.go index 3b2334f2..c355d450 100644 --- a/app/app.go +++ b/app/app.go @@ -2,13 +2,14 @@ package app import ( "encoding/json" - "golang.org/x/exp/slices" "io" "net/http" "os" "path/filepath" "reflect" // #nosec G702 + "golang.org/x/exp/slices" + "github.com/prometheus/client_golang/prometheus" "github.com/cosmos/cosmos-sdk/store/streaming" @@ -640,20 +641,14 @@ func (app *TerraApp) SetupFastQueryDB(appOpts servertypes.AppOptions, homePath s // Create the path for fastquerydb dir := filepath.Join(homePath, "data") - // Create a copy of the store keys to avoid mutating the app.keys - storeKeys := make([]storetypes.StoreKey, 0, len(app.keys)) - for _, storeKey := range app.keys { - storeKeys = append(storeKeys, storeKey) - } - // Create fast query serice - fastQueryService, err := fastquery.NewFastQueryService(dir, app.Logger()) + fastQueryService, err := fastquery.NewFastQueryService(dir, app.Logger(), app.keys) if err != nil { return err } // Create the streaming service - streamingservice := fastquery.NewStreamingService(fastQueryService, storeKeys) + streamingservice := fastquery.NewStreamingService(fastQueryService, app.keys) // Assign the streaming service to the app and // the query multi store so the users query the diff --git a/app/fast_query/fast_query_service.go b/app/fast_query/fast_query_service.go index 8e0af87a..514bebf9 100644 --- a/app/fast_query/fast_query_service.go +++ b/app/fast_query/fast_query_service.go @@ -15,8 +15,8 @@ type FastQueryService struct { logger log.Logger } -func NewFastQueryService(homedir string, logger log.Logger) (*FastQueryService, error) { - // Create a new instance of the Database Driver that uses LevelDB +func NewFastQueryService(homedir string, logger log.Logger, storeKeys map[string]*types.KVStoreKey) (*FastQueryService, error) { + // Create a copy of the store keys fastQueryDbDriver, err := driver.NewDBDriver(homedir) if err != nil { return nil, err @@ -30,10 +30,12 @@ func NewFastQueryService(homedir string, logger log.Logger) (*FastQueryService, Debug: true, }, ) - + // Create the new BatchingDB with it's safe batch closer heightEnabledDB := driver.NewSafeBatchDB(fastQueryDb) safeBatchDBCloser := heightEnabledDB.(driver.SafeBatchDBCloser) - store := store.NewStore(heightEnabledDB, fastQueryDb, logger) + store := store.NewStore(heightEnabledDB, fastQueryDb, logger, storeKeys) + + // store.LoadLatestVersion() return &FastQueryService{ Store: store, @@ -45,6 +47,7 @@ func NewFastQueryService(homedir string, logger log.Logger) (*FastQueryService, func (fqs *FastQueryService) CommitChanges(blockHeight int64, changeSet []types.StoreKVPair) error { fqs.logger.Info("CommitChanges", "blockHeight", blockHeight, "changeSet", changeSet) + fqs.fastQueryDb.SetWriteHeight(blockHeight) fqs.safeBatchDBCloser.Open() @@ -58,7 +61,6 @@ func (fqs *FastQueryService) CommitChanges(blockHeight int64, changeSet []types. } } - fqs.fastQueryDb.ClearReadHeight() if _, err := fqs.safeBatchDBCloser.Flush(); err != nil { return err } diff --git a/app/fast_query/store/store.go b/app/fast_query/store/store.go index eabfa48a..57455670 100644 --- a/app/fast_query/store/store.go +++ b/app/fast_query/store/store.go @@ -76,8 +76,8 @@ var ( // store will be created with a PruneNothing pruning strategy by default. After // a store is created, KVStores must be mounted and finally LoadLatestVersion or // LoadVersion must be called. -func NewStore(db dbm.DB, hldb *height_driver.HeightDB, logger log.Logger) *Store { - return &Store{ +func NewStore(db dbm.DB, hldb *height_driver.HeightDB, logger log.Logger, storeKeys map[string]*types.KVStoreKey) *Store { + store := &Store{ db: db, hldb: hldb, logger: logger, @@ -91,6 +91,12 @@ func NewStore(db dbm.DB, hldb *height_driver.HeightDB, logger log.Logger) *Store pruneHeights: make([]int64, 0), listeners: make(map[types.StoreKey][]types.WriteListener), } + + for _, storeKeyValue := range storeKeys { + store.MountStoreWithDB(storeKeyValue, types.StoreTypeIAVL, db) + } + + return store } // GetPruning fetches the pruning strategy from the root store. diff --git a/app/fast_query/streaming_service.go b/app/fast_query/streaming_service.go index 6d644442..2c7201a2 100644 --- a/app/fast_query/streaming_service.go +++ b/app/fast_query/streaming_service.go @@ -23,7 +23,13 @@ type StreamingService struct { } // NewStreamingService creates a new StreamingService for the provided writeDir, (optional) filePrefix, and storeKeys -func NewStreamingService(fastQueryService *FastQueryService, storeKeys []types.StoreKey) *StreamingService { +func NewStreamingService(fastQueryService *FastQueryService, appKeys map[string]*types.KVStoreKey) *StreamingService { + // Create a copy of the store keys to avoid mutating the app.keys + storeKeys := make([]types.StoreKey, 0, len(appKeys)) + for _, storeKey := range appKeys { + storeKeys = append(storeKeys, storeKey) + } + // sort by the storeKeys first to avoid indeterministic order sort.SliceStable(storeKeys, func(i, j int) bool { return strings.Compare(storeKeys[i].Name(), storeKeys[j].Name()) < 0 From 5ba6cc66b07f86a8f68f5ddf0102f709e665d167 Mon Sep 17 00:00:00 2001 From: emidev98 Date: Mon, 18 Dec 2023 20:17:44 +0200 Subject: [PATCH 04/14] wip: fast query --- app/fast_query/db/height_driver/height_db.go | 9 --------- app/fast_query/fast_query_service.go | 5 ++++- app/fast_query/store/store.go | 8 ++++++-- integration-tests/src/setup/init-test-framework.sh | 6 ++---- 4 files changed, 12 insertions(+), 16 deletions(-) diff --git a/app/fast_query/db/height_driver/height_db.go b/app/fast_query/db/height_driver/height_db.go index a187e6c8..ffeac99d 100644 --- a/app/fast_query/db/height_driver/height_db.go +++ b/app/fast_query/db/height_driver/height_db.go @@ -166,15 +166,6 @@ func (hd *HeightDB) Close() error { // NewBatch creates a batch for atomic updates. The caller must call Batch.Close. func (hd *HeightDB) NewBatch() tmdb.Batch { - // if hld.writeBatch != nil { - // // TODO: fix me - // return hld.writeBatch - // } else { - // fmt.Println("!!! opening hld.batch", hld.GetCurrentWriteHeight()) - // hld.writeBatch = hld.odb.NewBatch(hld.GetCurrentWriteHeight()) - // return hld.writeBatch - // } - // return hd.odb.NewBatch(hd.GetCurrentWriteHeight()) } diff --git a/app/fast_query/fast_query_service.go b/app/fast_query/fast_query_service.go index 514bebf9..2ea5a1a1 100644 --- a/app/fast_query/fast_query_service.go +++ b/app/fast_query/fast_query_service.go @@ -33,7 +33,10 @@ func NewFastQueryService(homedir string, logger log.Logger, storeKeys map[string // Create the new BatchingDB with it's safe batch closer heightEnabledDB := driver.NewSafeBatchDB(fastQueryDb) safeBatchDBCloser := heightEnabledDB.(driver.SafeBatchDBCloser) - store := store.NewStore(heightEnabledDB, fastQueryDb, logger, storeKeys) + store, err := store.NewStore(heightEnabledDB, fastQueryDb, logger, storeKeys) + if err != nil { + return nil, err + } // store.LoadLatestVersion() diff --git a/app/fast_query/store/store.go b/app/fast_query/store/store.go index 57455670..d4c16c8c 100644 --- a/app/fast_query/store/store.go +++ b/app/fast_query/store/store.go @@ -76,7 +76,7 @@ var ( // store will be created with a PruneNothing pruning strategy by default. After // a store is created, KVStores must be mounted and finally LoadLatestVersion or // LoadVersion must be called. -func NewStore(db dbm.DB, hldb *height_driver.HeightDB, logger log.Logger, storeKeys map[string]*types.KVStoreKey) *Store { +func NewStore(db dbm.DB, hldb *height_driver.HeightDB, logger log.Logger, storeKeys map[string]*types.KVStoreKey) (*Store, error) { store := &Store{ db: db, hldb: hldb, @@ -96,7 +96,11 @@ func NewStore(db dbm.DB, hldb *height_driver.HeightDB, logger log.Logger, storeK store.MountStoreWithDB(storeKeyValue, types.StoreTypeIAVL, db) } - return store + if err := store.LoadLatestVersion(); err != nil { + return nil, err + } + + return store, nil } // GetPruning fetches the pruning strategy from the root store. diff --git a/integration-tests/src/setup/init-test-framework.sh b/integration-tests/src/setup/init-test-framework.sh index 0ab21f1b..8a16c2bb 100755 --- a/integration-tests/src/setup/init-test-framework.sh +++ b/integration-tests/src/setup/init-test-framework.sh @@ -35,8 +35,6 @@ RPCPORT_1=16657 RPCPORT_2=26657 RESTPORT_1=1316 RESTPORT_2=1317 -ROSETTA_1=8080 -ROSETTA_2=8081 GRPCPORT_1=8090 GRPCPORT_2=9090 GRPCWEB_1=8091 @@ -189,7 +187,6 @@ sed -i -e 's/index_all_keys = false/index_all_keys = true/g' $CHAIN_DIR/$CHAINID sed -i -e 's/enable = false/enable = true/g' $CHAIN_DIR/$CHAINID_1/config/app.toml sed -i -e 's/swagger = false/swagger = true/g' $CHAIN_DIR/$CHAINID_1/config/app.toml sed -i -e 's#"tcp://localhost:1317"#"tcp://localhost:'"$RESTPORT_1"'"#g' $CHAIN_DIR/$CHAINID_1/config/app.toml -sed -i -e 's#":8080"#":'"$ROSETTA_1"'"#g' $CHAIN_DIR/$CHAINID_1/config/app.toml sed -i -e 's/streamers = \[\]/streamers = \["fastquery"\]/g' $CHAIN_DIR/$CHAINID_1/config/app.toml sed -i -e 's#"tcp://0.0.0.0:26656"#"tcp://localhost:'"$P2PPORT_2"'"#g' $CHAIN_DIR/$CHAINID_2/config/config.toml @@ -201,7 +198,6 @@ sed -i -e 's/index_all_keys = false/index_all_keys = true/g' $CHAIN_DIR/$CHAINID sed -i -e 's/enable = false/enable = true/g' $CHAIN_DIR/$CHAINID_2/config/app.toml sed -i -e 's/swagger = false/swagger = true/g' $CHAIN_DIR/$CHAINID_2/config/app.toml sed -i -e 's#"tcp://localhost:1317"#"tcp://localhost:'"$RESTPORT_2"'"#g' $CHAIN_DIR/$CHAINID_2/config/app.toml -sed -i -e 's#":8080"#":'"$ROSETTA_2"'"#g' $CHAIN_DIR/$CHAINID_2/config/app.toml sed -i -e 's/streamers = \[\]/streamers = \["fastquery"\]/g' $CHAIN_DIR/$CHAINID_1/config/app.toml echo "Chaning genesis.json..." @@ -210,6 +206,8 @@ sed -i -e 's/"voting_period": "172800s"/"voting_period": "2s"/g' $CHAIN_DIR/$CHA sed -i -e 's/"reward_delay_time": "604800s"/"reward_delay_time": "0s"/g' $CHAIN_DIR/$CHAINID_1/config/genesis.json sed -i -e 's/"reward_delay_time": "604800s"/"reward_delay_time": "0s"/g' $CHAIN_DIR/$CHAINID_2/config/genesis.json +exit + echo "Starting $CHAINID_1 in $CHAIN_DIR..." echo "Creating log file at $CHAIN_DIR/$CHAINID_1.log" $BINARY start --log_level trace --log_format json --home $CHAIN_DIR/$CHAINID_1 --pruning=nothing --grpc.address="0.0.0.0:$GRPCPORT_1" --grpc-web.address="0.0.0.0:$GRPCWEB_1" > $CHAIN_DIR/$CHAINID_1.log 2>&1 & From b27da2d7b818d8fb27e878691320774707261599 Mon Sep 17 00:00:00 2001 From: emidev98 Date: Tue, 19 Dec 2023 14:04:43 +0200 Subject: [PATCH 05/14] wip: fast query service --- app/fast_query/fast_query_service.go | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/app/fast_query/fast_query_service.go b/app/fast_query/fast_query_service.go index 2ea5a1a1..a0cbcbc3 100644 --- a/app/fast_query/fast_query_service.go +++ b/app/fast_query/fast_query_service.go @@ -1,6 +1,8 @@ package fast_query import ( + "fmt" + log "github.com/cometbft/cometbft/libs/log" "github.com/cosmos/cosmos-sdk/store/types" "github.com/terra-money/core/v2/app/fast_query/db/driver" @@ -38,8 +40,6 @@ func NewFastQueryService(homedir string, logger log.Logger, storeKeys map[string return nil, err } - // store.LoadLatestVersion() - return &FastQueryService{ Store: store, safeBatchDBCloser: safeBatchDBCloser, @@ -54,18 +54,12 @@ func (fqs *FastQueryService) CommitChanges(blockHeight int64, changeSet []types. fqs.fastQueryDb.SetWriteHeight(blockHeight) fqs.safeBatchDBCloser.Open() - for _, kv := range changeSet { - key := fqs.Store.StoreKeysByName()[kv.StoreKey] - ckvs := fqs.Store.GetCommitKVStore(key) - if kv.Delete { - ckvs.Delete(kv.Key) - } else { - ckvs.Set(kv.Key, kv.Value) - } - } - - if _, err := fqs.safeBatchDBCloser.Flush(); err != nil { - return err - } + lastCommitId := fqs.Store.Commit() + fmt.Print("FQS last_block_height ", lastCommitId.Version) + // if rollback, err := fqs.safeBatchDBCloser.Flush(); err != nil { + // return err + // } else if rollback != nil { + // rollback.Close() + // } return nil } From 89229dbe7de9fd5704ed8c7a6d79f78b18d6fc90 Mon Sep 17 00:00:00 2001 From: javiersuweijie Date: Wed, 20 Dec 2023 16:22:33 +0800 Subject: [PATCH 06/14] wip: chain halts --- app/fast_query/fast_query_service.go | 16 ++++++++++++++-- app/fast_query/store/store.go | 2 +- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/app/fast_query/fast_query_service.go b/app/fast_query/fast_query_service.go index a0cbcbc3..b3b6801e 100644 --- a/app/fast_query/fast_query_service.go +++ b/app/fast_query/fast_query_service.go @@ -54,8 +54,20 @@ func (fqs *FastQueryService) CommitChanges(blockHeight int64, changeSet []types. fqs.fastQueryDb.SetWriteHeight(blockHeight) fqs.safeBatchDBCloser.Open() - lastCommitId := fqs.Store.Commit() - fmt.Print("FQS last_block_height ", lastCommitId.Version) + for _, kv := range changeSet { + if kv.Delete { + fqs.safeBatchDBCloser.Delete(kv.Key) + } else { + fqs.safeBatchDBCloser.Set(kv.Key, kv.Value) + } + } + + commitId := fqs.Store.Commit() + fmt.Printf("[commitId]: %v\n", commitId) + + fqs.safeBatchDBCloser.Flush() + fqs.fastQueryDb.ClearWriteHeight() + //fmt.Print("FQS last_block_height ", lastCommitId.Version) // if rollback, err := fqs.safeBatchDBCloser.Flush(); err != nil { // return err // } else if rollback != nil { diff --git a/app/fast_query/store/store.go b/app/fast_query/store/store.go index d4c16c8c..4837812d 100644 --- a/app/fast_query/store/store.go +++ b/app/fast_query/store/store.go @@ -93,7 +93,7 @@ func NewStore(db dbm.DB, hldb *height_driver.HeightDB, logger log.Logger, storeK } for _, storeKeyValue := range storeKeys { - store.MountStoreWithDB(storeKeyValue, types.StoreTypeIAVL, db) + store.MountStoreWithDB(storeKeyValue, types.StoreTypeDB, db) } if err := store.LoadLatestVersion(); err != nil { From cea5e9ee93a22036e77aae43223af2f353207e6f Mon Sep 17 00:00:00 2001 From: emidev98 Date: Wed, 20 Dec 2023 12:19:03 +0200 Subject: [PATCH 07/14] fix: fast query data store KVStore --- app/fast_query/fast_query_service.go | 26 +++++++++---------- .../src/setup/init-test-framework.sh | 5 +++- 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/app/fast_query/fast_query_service.go b/app/fast_query/fast_query_service.go index b3b6801e..47b93603 100644 --- a/app/fast_query/fast_query_service.go +++ b/app/fast_query/fast_query_service.go @@ -5,6 +5,7 @@ import ( log "github.com/cometbft/cometbft/libs/log" "github.com/cosmos/cosmos-sdk/store/types" + storetypes "github.com/cosmos/cosmos-sdk/store/types" "github.com/terra-money/core/v2/app/fast_query/db/driver" "github.com/terra-money/core/v2/app/fast_query/db/height_driver" "github.com/terra-money/core/v2/app/fast_query/store" @@ -54,24 +55,21 @@ func (fqs *FastQueryService) CommitChanges(blockHeight int64, changeSet []types. fqs.fastQueryDb.SetWriteHeight(blockHeight) fqs.safeBatchDBCloser.Open() - for _, kv := range changeSet { - if kv.Delete { - fqs.safeBatchDBCloser.Delete(kv.Key) + for _, change := range changeSet { + storeKey := storetypes.NewKVStoreKey(change.StoreKey) + commitKVStore := fqs.Store.GetStoreByName(storeKey.Name()).(types.CommitKVStore) + fmt.Print(commitKVStore) + if change.Delete { + commitKVStore.Delete(change.Key) } else { - fqs.safeBatchDBCloser.Set(kv.Key, kv.Value) + commitKVStore.Set(change.Key, change.Value) } } - commitId := fqs.Store.Commit() - fmt.Printf("[commitId]: %v\n", commitId) - - fqs.safeBatchDBCloser.Flush() + fqs.Store.Commit() + if _, err := fqs.safeBatchDBCloser.Flush(); err != nil { + return err + } fqs.fastQueryDb.ClearWriteHeight() - //fmt.Print("FQS last_block_height ", lastCommitId.Version) - // if rollback, err := fqs.safeBatchDBCloser.Flush(); err != nil { - // return err - // } else if rollback != nil { - // rollback.Close() - // } return nil } diff --git a/integration-tests/src/setup/init-test-framework.sh b/integration-tests/src/setup/init-test-framework.sh index 8a16c2bb..90460a5e 100755 --- a/integration-tests/src/setup/init-test-framework.sh +++ b/integration-tests/src/setup/init-test-framework.sh @@ -188,6 +188,8 @@ sed -i -e 's/enable = false/enable = true/g' $CHAIN_DIR/$CHAINID_1/config/app.to sed -i -e 's/swagger = false/swagger = true/g' $CHAIN_DIR/$CHAINID_1/config/app.toml sed -i -e 's#"tcp://localhost:1317"#"tcp://localhost:'"$RESTPORT_1"'"#g' $CHAIN_DIR/$CHAINID_1/config/app.toml sed -i -e 's/streamers = \[\]/streamers = \["fastquery"\]/g' $CHAIN_DIR/$CHAINID_1/config/app.toml +sed -i '/# Enable defines if the Rosetta API server should be enabled\./ {N; s/enable = true/enable = false/}' $CHAIN_DIR/$CHAINID_1/config/app.toml + sed -i -e 's#"tcp://0.0.0.0:26656"#"tcp://localhost:'"$P2PPORT_2"'"#g' $CHAIN_DIR/$CHAINID_2/config/config.toml sed -i -e 's#"tcp://127.0.0.1:26657"#"tcp://localhost:'"$RPCPORT_2"'"#g' $CHAIN_DIR/$CHAINID_2/config/config.toml @@ -198,7 +200,8 @@ sed -i -e 's/index_all_keys = false/index_all_keys = true/g' $CHAIN_DIR/$CHAINID sed -i -e 's/enable = false/enable = true/g' $CHAIN_DIR/$CHAINID_2/config/app.toml sed -i -e 's/swagger = false/swagger = true/g' $CHAIN_DIR/$CHAINID_2/config/app.toml sed -i -e 's#"tcp://localhost:1317"#"tcp://localhost:'"$RESTPORT_2"'"#g' $CHAIN_DIR/$CHAINID_2/config/app.toml -sed -i -e 's/streamers = \[\]/streamers = \["fastquery"\]/g' $CHAIN_DIR/$CHAINID_1/config/app.toml +sed -i -e 's/streamers = \[\]/streamers = \["fastquery"\]/g' $CHAIN_DIR/$CHAINID_2/config/app.toml +sed -i '/# Enable defines if the Rosetta API server should be enabled\./ {N; s/enable = true/enable = false/}' $CHAIN_DIR/$CHAINID_2/config/app.toml echo "Chaning genesis.json..." sed -i -e 's/"voting_period": "172800s"/"voting_period": "2s"/g' $CHAIN_DIR/$CHAINID_1/config/genesis.json From 4d0bc54589d219fcad73faa058554592bdd102bd Mon Sep 17 00:00:00 2001 From: emidev98 Date: Thu, 21 Dec 2023 08:19:02 +0200 Subject: [PATCH 08/14] test: fastquerydb --- integration-tests/src/helpers/const.ts | 2 - integration-tests/src/helpers/index.ts | 8 +-- integration-tests/src/helpers/lcd.clients.ts | 64 +++++++++++++++++++ .../src/helpers/lcd.connection.ts | 24 ------- .../src/modules/alliance/alliance.test.ts | 22 +++---- .../src/modules/auth/auth.test.ts | 6 +- .../src/modules/authz/authz.test.ts | 10 ++- .../src/modules/feeshare/feeshare.test.ts | 16 ++--- integration-tests/src/modules/gov/gov.test.ts | 6 +- .../src/modules/ibc-hooks/ibc-hooks.test.ts | 10 +-- .../src/modules/ica/icav1.test.ts | 21 +++--- .../src/modules/icq/icqv1.test.ts | 4 +- integration-tests/src/modules/pob/pob.test.ts | 7 +- .../modules/tokenfactory/tokenfactory.test.ts | 31 +++++---- .../src/modules/wasm/ics20.test.ts | 17 +++-- 15 files changed, 147 insertions(+), 101 deletions(-) create mode 100644 integration-tests/src/helpers/lcd.clients.ts delete mode 100644 integration-tests/src/helpers/lcd.connection.ts diff --git a/integration-tests/src/helpers/const.ts b/integration-tests/src/helpers/const.ts index 28aafa77..f83d8bab 100644 --- a/integration-tests/src/helpers/const.ts +++ b/integration-tests/src/helpers/const.ts @@ -1,7 +1,5 @@ export const SAFE_VOTING_PERIOD_TIME = 2100; export const SAFE_IBC_TRANSFER = 4100; -export const SAFE_BLOCK_INCLUSION_TIME = 1100; -export const blockInclusion = () => new Promise((resolve) => setTimeout(() => resolve(SAFE_BLOCK_INCLUSION_TIME), SAFE_BLOCK_INCLUSION_TIME)); export const ibcTransfer = () => new Promise((resolve) => setTimeout(() => resolve(SAFE_IBC_TRANSFER), SAFE_IBC_TRANSFER)); export const votingPeriod = () => new Promise((resolve) => setTimeout(() => resolve(SAFE_VOTING_PERIOD_TIME), SAFE_VOTING_PERIOD_TIME)); \ No newline at end of file diff --git a/integration-tests/src/helpers/index.ts b/integration-tests/src/helpers/index.ts index 1a0f7f12..df829931 100644 --- a/integration-tests/src/helpers/index.ts +++ b/integration-tests/src/helpers/index.ts @@ -1,19 +1,15 @@ import { - SAFE_BLOCK_INCLUSION_TIME, SAFE_VOTING_PERIOD_TIME, - blockInclusion, votingPeriod, ibcTransfer, } from "./const" import { getMnemonics } from "./mnemonics" -import { getLCDClient } from "./lcd.connection" +import { LCDClients } from "./lcd.clients" export { - SAFE_BLOCK_INCLUSION_TIME, SAFE_VOTING_PERIOD_TIME, - blockInclusion, votingPeriod, ibcTransfer, getMnemonics, - getLCDClient + LCDClients } \ No newline at end of file diff --git a/integration-tests/src/helpers/lcd.clients.ts b/integration-tests/src/helpers/lcd.clients.ts new file mode 100644 index 00000000..d50f2fce --- /dev/null +++ b/integration-tests/src/helpers/lcd.clients.ts @@ -0,0 +1,64 @@ +import { LCDClient } from "@terra-money/feather.js"; + +export class LCDClients { + public chain1 = new LCDClient({ + "test-1": { + lcd: "http://localhost:1316", + chainID: "test-1", + gasPrices: "0.15uluna", + gasAdjustment: 1.5, + prefix: "terra" + } + }) + public chain2 = new LCDClient({ + "test-2": { + lcd: "http://localhost:1317", + chainID: "test-2", + gasPrices: "0.15uluna", + gasAdjustment: 1.5, + prefix: "terra" + } + }) + + static create() { + return new LCDClients(); + } + + private constructor() { } + + async blockInclusionChain1() { + let res = await this.chain1.tendermint.blockInfo("test-1") + let height = res.block.header.height; + let currentHeight = res.block.header.height; + + for await (const _ of new Array(10)) { + await interval(); + let res = await this.chain1.tendermint.blockInfo("test-1") + currentHeight = res.block.header.height; + + if (height != currentHeight) return Promise.resolve(); + } + } + + async blockInclusionChain2() { + let res = await this.chain2.tendermint.blockInfo("test-2") + let height = res.block.header.height; + let currentHeight = res.block.header.height; + + for await (const _ of new Array(10)) { + await interval(); + let res = await this.chain2.tendermint.blockInfo("test-2") + currentHeight = res.block.header.height; + + if (height != currentHeight) return Promise.resolve(); + } + } +} + +function interval(): Promise{ + return new Promise((resolve) => { + setTimeout(() => { + resolve(); + }, 400); + }); +} diff --git a/integration-tests/src/helpers/lcd.connection.ts b/integration-tests/src/helpers/lcd.connection.ts deleted file mode 100644 index b228bc7f..00000000 --- a/integration-tests/src/helpers/lcd.connection.ts +++ /dev/null @@ -1,24 +0,0 @@ -import { LCDClient } from "@terra-money/feather.js"; - -export function getLCDClient() { - return { - chain1: new LCDClient({ - "test-1": { - lcd: "http://localhost:1316", - chainID: "test-1", - gasPrices: "0.15uluna", - gasAdjustment: 1.5, - prefix: "terra" - } - }), - chain2: new LCDClient({ - "test-2": { - lcd: "http://localhost:1317", - chainID: "test-2", - gasPrices: "0.15uluna", - gasAdjustment: 1.5, - prefix: "terra" - } - }) - } -} \ No newline at end of file diff --git a/integration-tests/src/modules/alliance/alliance.test.ts b/integration-tests/src/modules/alliance/alliance.test.ts index 35935e22..bfad8e39 100644 --- a/integration-tests/src/modules/alliance/alliance.test.ts +++ b/integration-tests/src/modules/alliance/alliance.test.ts @@ -1,11 +1,11 @@ -import { getLCDClient, getMnemonics, blockInclusion, votingPeriod } from "../../helpers"; +import { LCDClients, getMnemonics, votingPeriod } from "../../helpers"; import { Coin, MsgTransfer, MsgCreateAlliance, Coins, MsgVote, Fee, MsgAllianceDelegate, MsgClaimDelegationRewards, MsgAllianceUndelegate, MsgDeleteAlliance, MsgSubmitProposal } from "@terra-money/feather.js"; import { VoteOption } from "@terra-money/terra.proto/cosmos/gov/v1beta1/gov"; import { Height } from "@terra-money/feather.js/dist/core/ibc/core/client/Height"; describe("Alliance Module (https://github.com/terra-money/alliance/tree/release/v0.3.x) ", () => { // Prepare environment clients, accounts and wallets - const LCD = getLCDClient(); + const LCD = LCDClients.create(); const accounts = getMnemonics(); const chain1Wallet = LCD.chain1.wallet(accounts.allianceMnemonic); const val2Wallet = LCD.chain2.wallet(accounts.val2); @@ -35,14 +35,14 @@ describe("Alliance Module (https://github.com/terra-money/alliance/tree/release/ }); let result = await LCD.chain1.tx.broadcastSync(tx, "test-1"); - await blockInclusion(); + await LCD.blockInclusionChain1(); let txResult = await LCD.chain1.tx.txInfo(result.txhash, "test-1") as any; expect(txResult).toBeDefined(); // Check during 5 blocks for the receival // of the IBC coin on chain-2 for (let i = 0; i <= 5; i++) { - await blockInclusion(); + await LCD.blockInclusionChain2(); let _ibcCoin = (await LCD.chain2.bank.balance(allianceAccountAddress))[0].find(c => c.denom.startsWith("ibc/")); if (_ibcCoin) { expect(_ibcCoin.denom.startsWith("ibc/")).toBeTruthy(); @@ -66,7 +66,7 @@ describe("Alliance Module (https://github.com/terra-money/alliance/tree/release/ // Query the IBC coin and check if there is any // which menas that the IBC transfer was successful for (let i = 0; i <= 5; i++) { - await blockInclusion(); + await LCD.blockInclusionChain2(); let _ibcCoin = (await LCD.chain2.bank.balance(allianceAccountAddress))[0].find(c => c.denom.startsWith("ibc/")); if (_ibcCoin) { ibcCoin = _ibcCoin; @@ -100,7 +100,7 @@ describe("Alliance Module (https://github.com/terra-money/alliance/tree/release/ chainID: "test-2", }); let result = await LCD.chain2.tx.broadcastSync(tx, "test-2"); - await blockInclusion(); + await LCD.blockInclusionChain2(); // Check that the proposal was created successfully let txResult = await LCD.chain2.tx.txInfo(result.txhash, "test-2") as any; @@ -155,7 +155,7 @@ describe("Alliance Module (https://github.com/terra-money/alliance/tree/release/ chainID: "test-2", }); let result = await LCD.chain2.tx.broadcastSync(tx, "test-2"); - await blockInclusion(); + await LCD.blockInclusionChain2(); // Check that the proposal was created successfully let txResult = await LCD.chain2.tx.txInfo(result.txhash, "test-2") as any; @@ -235,7 +235,7 @@ describe("Alliance Module (https://github.com/terra-money/alliance/tree/release/ chainID: "test-2", }); let result = await LCD.chain2.tx.broadcastSync(tx, "test-2"); - await blockInclusion(); + await LCD.blockInclusionChain2(); // Check that the proposal was created successfully let txResult = await LCD.chain2.tx.txInfo(result.txhash, "test-2") as any; @@ -250,7 +250,7 @@ describe("Alliance Module (https://github.com/terra-money/alliance/tree/release/ }) test("Must undelegate from the alliance", async () => { - await blockInclusion(); + await LCD.blockInclusionChain2(); const allianceWallet2 = LCD.chain2.wallet(accounts.allianceMnemonic); let ibcCoin = (await LCD.chain2.bank.balance(allianceAccountAddress))[0].find(c => c.denom.startsWith("ibc/")) as Coin; let tx = await allianceWallet2.createAndSignTx({ @@ -265,7 +265,7 @@ describe("Alliance Module (https://github.com/terra-money/alliance/tree/release/ chainID: "test-2", }); let result = await LCD.chain2.tx.broadcastSync(tx, "test-2"); - await blockInclusion(); + await LCD.blockInclusionChain2(); // Check that the proposal was created successfully let txResult = await LCD.chain2.tx.txInfo(result.txhash, "test-2") as any; @@ -304,7 +304,7 @@ describe("Alliance Module (https://github.com/terra-money/alliance/tree/release/ chainID: "test-2", }); let result = await LCD.chain2.tx.broadcastSync(tx, "test-2"); - await blockInclusion(); + await LCD.blockInclusionChain2(); // Check that the proposal was created successfully let txResult = await LCD.chain2.tx.txInfo(result.txhash, "test-2") as any; diff --git a/integration-tests/src/modules/auth/auth.test.ts b/integration-tests/src/modules/auth/auth.test.ts index 2739aadd..ae24f0dc 100644 --- a/integration-tests/src/modules/auth/auth.test.ts +++ b/integration-tests/src/modules/auth/auth.test.ts @@ -1,10 +1,10 @@ -import { getMnemonics, getLCDClient, blockInclusion } from "../../helpers"; +import { getMnemonics, LCDClients } from "../../helpers"; import { ContinuousVestingAccount, Coins, MnemonicKey, MsgCreateVestingAccount, Coin } from "@terra-money/feather.js"; import moment from "moment"; describe("Auth Module (https://github.com/terra-money/cosmos-sdk/tree/release/v0.47.x/x/auth)", () => { // Prepare environment clients, accounts and wallets - const LCD = getLCDClient(); + const LCD = LCDClients.create(); const accounts = getMnemonics(); const wallet = LCD.chain1.wallet(accounts.genesisVesting1); const vestAccAddr1 = accounts.genesisVesting1.accAddress("terra"); @@ -76,7 +76,7 @@ describe("Auth Module (https://github.com/terra-money/cosmos-sdk/tree/release/v0 }); let result = await LCD.chain1.tx.broadcastSync(tx, "test-1"); - await blockInclusion(); + await LCD.blockInclusionChain1(); let txResult = await LCD.chain1.tx.txInfo(result.txhash, "test-1") as any; expect(txResult.logs[0].events) .toEqual([{ diff --git a/integration-tests/src/modules/authz/authz.test.ts b/integration-tests/src/modules/authz/authz.test.ts index 1d5d94b7..622ee75e 100644 --- a/integration-tests/src/modules/authz/authz.test.ts +++ b/integration-tests/src/modules/authz/authz.test.ts @@ -1,12 +1,10 @@ -import { getMnemonics } from "../../helpers/mnemonics"; -import { getLCDClient } from "../../helpers/lcd.connection"; +import { getMnemonics, LCDClients } from "../../helpers"; import { StakeAuthorization, MsgGrantAuthorization, AuthorizationGrant, Coin, MsgExecAuthorized, MsgDelegate } from "@terra-money/feather.js"; import { AuthorizationType } from "@terra-money/terra.proto/cosmos/staking/v1beta1/authz"; import moment from "moment"; -import { blockInclusion } from "../../helpers/const"; describe("Authz Module (https://github.com/terra-money/cosmos-sdk/tree/release/v0.47.x/x/authz)", () => { - const LCD = getLCDClient(); + const LCD = LCDClients.create(); const accounts = getMnemonics(); // Accounts used in chain2, which means that // will not cause conflicts with txs nonces @@ -32,7 +30,7 @@ describe("Authz Module (https://github.com/terra-money/cosmos-sdk/tree/release/v chainID: "test-2", }); let result = await LCD.chain2.tx.broadcastSync(tx, "test-2"); - await blockInclusion(); + await LCD.blockInclusionChain2(); // Check the MsgGrantAuthorization executed as expected let txResult = await LCD.chain2.tx.txInfo(result.txhash, "test-2") as any; @@ -78,7 +76,7 @@ describe("Authz Module (https://github.com/terra-money/cosmos-sdk/tree/release/v chainID: "test-2", }); let result = await LCD.chain2.tx.broadcastSync(tx, "test-2"); - await blockInclusion(); + await LCD.blockInclusionChain2(); let txResult = await LCD.chain2.tx.txInfo(result.txhash, "test-2") as any; let eventsList = txResult.logs[0].events; diff --git a/integration-tests/src/modules/feeshare/feeshare.test.ts b/integration-tests/src/modules/feeshare/feeshare.test.ts index 054dfbdf..bfb02a80 100644 --- a/integration-tests/src/modules/feeshare/feeshare.test.ts +++ b/integration-tests/src/modules/feeshare/feeshare.test.ts @@ -1,11 +1,11 @@ -import { getMnemonics, blockInclusion, getLCDClient } from "../../helpers"; +import { getMnemonics, LCDClients } from "../../helpers"; import { Coins, Fee, MnemonicKey, MsgExecuteContract, MsgInstantiateContract, MsgRegisterFeeShare, MsgStoreCode } from "@terra-money/feather.js"; import fs from "fs"; import path from 'path'; describe("Feeshare Module (https://github.com/terra-money/core/tree/release/v2.6/x/feeshare) ", () => { // Prepare environment clients, accounts and wallets - const LCD = getLCDClient(); + const LCD = LCDClients.create(); const accounts = getMnemonics(); const wallet = LCD.chain1.wallet(accounts.feeshareMnemonic); const feeshareAccountAddress = accounts.feeshareMnemonic.accAddress("terra"); @@ -25,7 +25,7 @@ describe("Feeshare Module (https://github.com/terra-money/core/tree/release/v2.6 }); let result = await LCD.chain1.tx.broadcastSync(tx, "test-1"); - await blockInclusion(); + await LCD.blockInclusionChain1(); let txResult = await LCD.chain1.tx.txInfo(result.txhash, "test-1") as any; let codeId = Number(txResult.logs[0].events[1].attributes[1].value); expect(codeId).toBeDefined(); @@ -44,7 +44,7 @@ describe("Feeshare Module (https://github.com/terra-money/core/tree/release/v2.6 chainID: "test-1", }); result = await LCD.chain1.tx.broadcastSync(tx, "test-1"); - await blockInclusion(); + await LCD.blockInclusionChain1(); txResult = await LCD.chain1.tx.txInfo(result.txhash, "test-1") as any; contractAddress = txResult.logs[0].events[4].attributes[0].value; expect(contractAddress).toBeDefined(); @@ -76,7 +76,7 @@ describe("Feeshare Module (https://github.com/terra-money/core/tree/release/v2.6 }); let result = await LCD.chain1.tx.broadcastSync(tx, "test-1"); - await blockInclusion(); + await LCD.blockInclusionChain1(); // Check the tx logs let txResult = await LCD.chain1.tx.txInfo(result.txhash, "test-1") as any; @@ -118,7 +118,7 @@ describe("Feeshare Module (https://github.com/terra-money/core/tree/release/v2.6 // Check that querying all feeshares returns at least one feeshares let feesharesByWallet = await LCD.chain1.feeshare.feeshares("test-1"); expect(feesharesByWallet.feeshare.length).toBeGreaterThan(0); - await blockInclusion(); + await LCD.blockInclusionChain1(); // Send an execute message to the reflect contract let msgExecute = new MsgExecuteContract( @@ -136,7 +136,7 @@ describe("Feeshare Module (https://github.com/terra-money/core/tree/release/v2.6 fee: new Fee(200_000, "400000uluna"), }); result = await LCD.chain1.tx.broadcastSync(tx, "test-1"); - await blockInclusion(); + await LCD.blockInclusionChain1(); // Check the tx logs have the expected events txResult = await LCD.chain1.tx.txInfo(result.txhash, "test-1") as any; @@ -175,7 +175,7 @@ describe("Feeshare Module (https://github.com/terra-money/core/tree/release/v2.6 }] } ]) - await blockInclusion() + await LCD.blockInclusionChain1(); // Query the random account (new owner of the contract) // and validate that the account has received 50% of the fees diff --git a/integration-tests/src/modules/gov/gov.test.ts b/integration-tests/src/modules/gov/gov.test.ts index ae9f9345..6efed77d 100644 --- a/integration-tests/src/modules/gov/gov.test.ts +++ b/integration-tests/src/modules/gov/gov.test.ts @@ -1,10 +1,10 @@ -import { getLCDClient, blockInclusion, votingPeriod, getMnemonics } from "../../helpers"; +import { LCDClients, votingPeriod, getMnemonics } from "../../helpers"; import { Coins, MsgVote, Fee, MsgSubmitProposal, Proposal, Int } from "@terra-money/feather.js"; import { ProposalStatus, VoteOption } from "@terra-money/terra.proto/cosmos/gov/v1beta1/gov"; describe("Governance Module (https://github.com/terra-money/cosmos-sdk/tree/release/v0.47.x/x/gov) ", () => { // Prepare environment clients, accounts and wallets - const LCD = getLCDClient(); + const LCD = LCDClients.create(); const accounts = getMnemonics(); const val2Wallet = LCD.chain2.wallet(accounts.val2); const val2WalletAddress = val2Wallet.key.accAddress("terra"); @@ -157,7 +157,7 @@ describe("Governance Module (https://github.com/terra-money/cosmos-sdk/tree/rele chainID: "test-2", }); let result = await LCD.chain2.tx.broadcastSync(tx, "test-2"); - await blockInclusion(); + await LCD.blockInclusionChain2(); // Check that the proposal was created successfully let txResult = await LCD.chain2.tx.txInfo(result.txhash, "test-2") as any; diff --git a/integration-tests/src/modules/ibc-hooks/ibc-hooks.test.ts b/integration-tests/src/modules/ibc-hooks/ibc-hooks.test.ts index fcfe1d02..cf6ac3ba 100644 --- a/integration-tests/src/modules/ibc-hooks/ibc-hooks.test.ts +++ b/integration-tests/src/modules/ibc-hooks/ibc-hooks.test.ts @@ -1,6 +1,6 @@ import { Coin, Coins, MsgInstantiateContract, MsgStoreCode, MsgTransfer } from "@terra-money/feather.js"; import { deriveIbcHooksSender } from "@terra-money/feather.js/dist/core/ibc-hooks"; -import { ibcTransfer, getMnemonics, getLCDClient, blockInclusion } from "../../helpers"; +import { ibcTransfer, getMnemonics, LCDClients } from "../../helpers"; import fs from "fs"; import path from 'path'; import moment from "moment"; @@ -10,7 +10,7 @@ describe("IbcHooks Module (github.com/cosmos/ibc-apps/modules/ibc-hooks/v7) ", ( // Prepare the LCD and wallets. chain1Wallet is the one that will // deploy the contract on chain 1 and chain2Wallet will be used // to send IBC messages from chain 2 to interact with the contract. - const LCD = getLCDClient(); + const LCD = LCDClients.create(); const accounts = getMnemonics(); const chain1Wallet = LCD.chain1.wallet(accounts.ibcHooksMnemonic); const chain2Wallet = LCD.chain2.wallet(accounts.ibcHooksMnemonic); @@ -31,7 +31,7 @@ describe("IbcHooks Module (github.com/cosmos/ibc-apps/modules/ibc-hooks/v7) ", ( }); let result = await LCD.chain1.tx.broadcastSync(tx, "test-1"); - await blockInclusion(); + await LCD.blockInclusionChain1(); let txResult = await LCD.chain1.tx.txInfo(result.txhash, "test-1") as any; let codeId = Number(txResult.logs[0].events[1].attributes[1].value); expect(codeId).toBeDefined(); @@ -50,7 +50,7 @@ describe("IbcHooks Module (github.com/cosmos/ibc-apps/modules/ibc-hooks/v7) ", ( chainID: "test-1", }); result = await LCD.chain1.tx.broadcastSync(tx, "test-1"); - await blockInclusion(); + await LCD.blockInclusionChain1(); txResult = await LCD.chain1.tx.txInfo(result.txhash, "test-1") as any; contractAddress = txResult.logs[0].events[4].attributes[0].value; expect(contractAddress).toBeDefined(); @@ -149,7 +149,7 @@ describe("IbcHooks Module (github.com/cosmos/ibc-apps/modules/ibc-hooks/v7) ", ( }); await LCD.chain1.tx.broadcastSync(tx, "test-1") await ibcTransfer(); - await blockInclusion(); + await LCD.blockInclusionChain1(); let res = await LCD.chain1.wasm.contractQuery( contractAddress, { "get_count": { "addr": contractAddress } } diff --git a/integration-tests/src/modules/ica/icav1.test.ts b/integration-tests/src/modules/ica/icav1.test.ts index 365cb59a..a4f2c129 100644 --- a/integration-tests/src/modules/ica/icav1.test.ts +++ b/integration-tests/src/modules/ica/icav1.test.ts @@ -1,5 +1,5 @@ import { AccAddress, Coin, MsgTransfer, MsgSend, Coins } from "@terra-money/feather.js"; -import { blockInclusion, getLCDClient, getMnemonics } from "../../helpers"; +import { LCDClients, getMnemonics } from "../../helpers"; import { MsgRegisterInterchainAccount, MsgSendTx } from "@terra-money/feather.js/dist/core/ica/controller/v1/msgs"; import { Height } from "@terra-money/feather.js/dist/core/ibc/core/client/Height"; import Long from "long"; @@ -8,7 +8,7 @@ import { CosmosTx } from "@terra-money/feather.js/dist/core/ica/controller/v1/Co describe("ICA Module (https://github.com/cosmos/ibc-go/tree/release/v7.3.x/modules/apps/27-interchain-accounts)", () => { // Prepare environment clients, accounts and wallets - const LCD = getLCDClient(); + const LCD = LCDClients.create(); const { icaMnemonic } = getMnemonics(); const chain1Wallet = LCD.chain1.wallet(icaMnemonic); const externalAccAddr = icaMnemonic.accAddress("terra"); @@ -75,7 +75,7 @@ describe("ICA Module (https://github.com/cosmos/ibc-go/tree/release/v7.3.x/modul // Check during 5 blocks for the receival // of the IBC coin on chain-2 for (let i = 0; i <= 5; i++) { - await blockInclusion(); + await LCD.blockInclusionChain2(); let _ibcCoin = (await LCD.chain2.bank.balance(intechainAccountAddr))[0].find(c => c.denom.startsWith("ibc/")); if (_ibcCoin) { expect(_ibcCoin.denom.startsWith("ibc/")).toBeTruthy(); @@ -102,7 +102,7 @@ describe("ICA Module (https://github.com/cosmos/ibc-go/tree/release/v7.3.x/modul if (tx !== undefined) { let result = await LCD.chain1.tx.broadcastSync(tx, "test-1"); - await blockInclusion(); + await LCD.blockInclusionChain1(); let txResult = await LCD.chain1.tx.txInfo(result.txhash, "test-1") as any; let events = txResult.logs[0].events; @@ -127,9 +127,9 @@ describe("ICA Module (https://github.com/cosmos/ibc-go/tree/release/v7.3.x/modul }) // Check during 5 blocks for the receival - // of the IBC coin on chain-2 + // of the IBC coin on chain-1 for (let i = 0; i <= 5; i++) { - await blockInclusion(); + await LCD.blockInclusionChain1(); let res = await LCD.chain1.icaV1.controllerAccountAddress(externalAccAddr, "connection-0") .catch((e) => { const expectMsg = "failed to retrieve account address for icacontroller-"; @@ -163,13 +163,13 @@ describe("ICA Module (https://github.com/cosmos/ibc-go/tree/release/v7.3.x/modul }); let result = await LCD.chain1.tx.broadcastSync(tx, "test-1"); - await blockInclusion(); + await LCD.blockInclusionChain1(); let txResult = await LCD.chain1.tx.txInfo(result.txhash, "test-1") as any; expect(txResult).toBeDefined(); // Check during 5 blocks for the receival // of the IBC coin on chain-2 for (let i = 0; i <= 5; i++) { - await blockInclusion(); + await LCD.blockInclusionChain2(); let _ibcCoin = (await LCD.chain2.bank.balance(intechainAccountAddr))[0].find(c => c.denom.startsWith("ibc/")); if (_ibcCoin) { expect(_ibcCoin.denom.startsWith("ibc/")).toBeTruthy(); @@ -205,7 +205,8 @@ describe("ICA Module (https://github.com/cosmos/ibc-go/tree/release/v7.3.x/modul }); let result = await LCD.chain1.tx.broadcastSync(tx, "test-1"); - await blockInclusion(); + await LCD.blockInclusionChain1(); + let txResult = await LCD.chain1.tx.txInfo(result.txhash, "test-1") as any; const events = txResult.logs[0].events; expect(events[0]) @@ -232,7 +233,7 @@ describe("ICA Module (https://github.com/cosmos/ibc-go/tree/release/v7.3.x/modul // Check during 5 blocks for the receival // of the IBC coin on chain-2 for (let i = 0; i <= 5; i++) { - await blockInclusion(); + await LCD.blockInclusionChain2(); const bankRes = await LCD.chain2.bank.balance(burnAddress); const coins = bankRes[0].find(c => c.denom === ibcCoinDenom); if (coins) { diff --git a/integration-tests/src/modules/icq/icqv1.test.ts b/integration-tests/src/modules/icq/icqv1.test.ts index ce8f9d3b..48676ba1 100644 --- a/integration-tests/src/modules/icq/icqv1.test.ts +++ b/integration-tests/src/modules/icq/icqv1.test.ts @@ -1,8 +1,8 @@ -import { getLCDClient } from "../../helpers"; +import { LCDClients } from "../../helpers"; describe("ICQ Module (https://github.com/cosmos/ibc-apps/tree/main/modules/async-icq)", () => { // Prepare environment clients, accounts and wallets - const LCD = getLCDClient(); + const LCD = LCDClients.create(); test('Must contain the expected module params', async () => { // Query ica diff --git a/integration-tests/src/modules/pob/pob.test.ts b/integration-tests/src/modules/pob/pob.test.ts index f99ae5bc..a21571ae 100644 --- a/integration-tests/src/modules/pob/pob.test.ts +++ b/integration-tests/src/modules/pob/pob.test.ts @@ -1,10 +1,10 @@ import { Coins, Fee, MsgSend } from "@terra-money/feather.js"; -import { getMnemonics, getLCDClient, blockInclusion } from "../../helpers"; +import { getMnemonics, LCDClients } from "../../helpers"; import { MsgAuctionBid } from "@terra-money/feather.js/dist/core/pob/MsgAuctionBid"; describe("Proposer Builder Module (https://github.com/skip-mev/pob) ", () => { // Prepare environment clients, accounts and wallets - const LCD = getLCDClient(); + const LCD = LCDClients.create(); const accounts = getMnemonics(); const wallet = LCD.chain1.wallet(accounts.pobMnemonic); const wallet11 = LCD.chain1.wallet(accounts.pobMnemonic1); @@ -93,7 +93,8 @@ describe("Proposer Builder Module (https://github.com/skip-mev/pob) ", () => { timeoutHeight: parseInt(blockHeight) + 20, }); const result = await LCD.chain1.tx.broadcastSync(buildTx, "test-1"); - await blockInclusion(); + await LCD.blockInclusionChain1(); + const txResult = await LCD.chain1.tx.txInfo(result.txhash, "test-1"); expect(txResult.logs).toBeDefined(); // Recover the transactions hashes from the bundled transactions diff --git a/integration-tests/src/modules/tokenfactory/tokenfactory.test.ts b/integration-tests/src/modules/tokenfactory/tokenfactory.test.ts index 646edf50..ab1ae7d8 100644 --- a/integration-tests/src/modules/tokenfactory/tokenfactory.test.ts +++ b/integration-tests/src/modules/tokenfactory/tokenfactory.test.ts @@ -1,11 +1,11 @@ import { Coin, Coins, Fee, MnemonicKey, MsgBurn, MsgChangeAdmin, MsgCreateDenom, MsgInstantiateContract, MsgMint, MsgStoreCode, MsgSetBeforeSendHook, MsgSend } from "@terra-money/feather.js"; -import { getMnemonics, getLCDClient, blockInclusion } from "../../helpers"; +import { getMnemonics, LCDClients } from "../../helpers"; import fs from "fs"; import path from 'path'; describe("TokenFactory Module (https://github.com/terra-money/core/tree/release/v2.7/x/tokenfactory) ", () => { // Prepare environment clients, accounts and wallets - const LCD = getLCDClient(); + const LCD = LCDClients.create(); const accounts = getMnemonics(); const wallet = LCD.chain1.wallet(accounts.tokenFactoryMnemonic); const tokenFactoryWalletAddr = accounts.tokenFactoryMnemonic.accAddress("terra"); @@ -27,7 +27,8 @@ describe("TokenFactory Module (https://github.com/terra-money/core/tree/release/ }); let result = await LCD.chain1.tx.broadcastSync(tx, "test-1"); - await blockInclusion(); + await LCD.blockInclusionChain1(); + let txResult = await LCD.chain1.tx.txInfo(result.txhash, "test-1") as any; let codeId = Number(txResult.logs[0].events[1].attributes[1].value); expect(codeId).toBeDefined(); @@ -46,7 +47,8 @@ describe("TokenFactory Module (https://github.com/terra-money/core/tree/release/ chainID: "test-1", }); result = await LCD.chain1.tx.broadcastSync(tx, "test-1"); - await blockInclusion(); + await LCD.blockInclusionChain1(); + txResult = await LCD.chain1.tx.txInfo(result.txhash, "test-1") as any; contractAddress = txResult.logs[0].events[4].attributes[0].value; expect(contractAddress).toBeDefined(); @@ -83,7 +85,8 @@ describe("TokenFactory Module (https://github.com/terra-money/core/tree/release/ chainID: "test-1", }); let result = await LCD.chain1.tx.broadcastSync(tx, "test-1"); - await blockInclusion(); + await LCD.blockInclusionChain1(); + let txResult = await LCD.chain1.tx.txInfo(result.txhash, "test-1") as any; factoryDenom = txResult.logs[0].eventsByType.create_denom.new_token_denom[0] as string expect(txResult.logs[0].events).toStrictEqual([{ @@ -160,7 +163,8 @@ describe("TokenFactory Module (https://github.com/terra-money/core/tree/release/ chainID: "test-1", }); let result = await LCD.chain1.tx.broadcastSync(tx, "test-1"); - await blockInclusion(); + await LCD.blockInclusionChain1(); + let txResult = await LCD.chain1.tx.txInfo(result.txhash, "test-1") as any; expect(txResult.logs[0].events).toStrictEqual([{ "type": "message", @@ -263,7 +267,8 @@ describe("TokenFactory Module (https://github.com/terra-money/core/tree/release/ fee: new Fee(100_000, new Coins({ uluna: 100_000 })), }); let result = await LCD.chain1.tx.broadcastSync(tx, "test-1"); - await blockInclusion(); + await LCD.blockInclusionChain1(); + let txResult = await LCD.chain1.tx.txInfo(result.txhash, "test-1") as any; expect(txResult.logs[0].events).toStrictEqual([{ "type": "message", @@ -360,7 +365,8 @@ describe("TokenFactory Module (https://github.com/terra-money/core/tree/release/ chainID: "test-1", }); let result = await LCD.chain1.tx.broadcastSync(tx, "test-1"); - await blockInclusion(); + await LCD.blockInclusionChain1(); + let txResult = await LCD.chain1.tx.txInfo(result.txhash, "test-1") as any; expect(txResult.logs[0].events).toStrictEqual([{ "type": "message", @@ -404,7 +410,8 @@ describe("TokenFactory Module (https://github.com/terra-money/core/tree/release/ chainID: "test-1", }); let result = await LCD.chain1.tx.broadcastSync(tx, "test-1"); - await blockInclusion(); + await LCD.blockInclusionChain1(); + let txResult = await LCD.chain1.tx.txInfo(result.txhash, "test-1") as any; expect(txResult.logs[0].events) .toStrictEqual([{ @@ -471,7 +478,8 @@ describe("TokenFactory Module (https://github.com/terra-money/core/tree/release/ fee: new Fee(100_000, new Coins({ uluna: 100_000 })), }); let result = await LCD.chain1.tx.broadcastSync(tx, "test-1"); - await blockInclusion(); + await LCD.blockInclusionChain1(); + let txResult = await LCD.chain1.tx.txInfo(result.txhash, "test-1") as any; expect(txResult.raw_log) .toStrictEqual("failed to execute message; message index: 0: {Loading CosmWasm module: sudo}: gas meter hit maximum limit"); @@ -497,7 +505,8 @@ describe("TokenFactory Module (https://github.com/terra-money/core/tree/release/ chainID: "test-1", }); let result = await LCD.chain1.tx.broadcastSync(tx, "test-1"); - await blockInclusion(); + await LCD.blockInclusionChain1(); + let txResult = await LCD.chain1.tx.txInfo(result.txhash, "test-1") as any; expect(txResult.logs[0].events).toStrictEqual([{ "type": "message", diff --git a/integration-tests/src/modules/wasm/ics20.test.ts b/integration-tests/src/modules/wasm/ics20.test.ts index 72f86aa9..7a724dd8 100644 --- a/integration-tests/src/modules/wasm/ics20.test.ts +++ b/integration-tests/src/modules/wasm/ics20.test.ts @@ -1,13 +1,13 @@ import { MnemonicKey, MsgExecuteContract, MsgInstantiateContract, MsgStoreCode } from "@terra-money/feather.js"; -import { getMnemonics, getLCDClient, blockInclusion, ibcTransfer } from "../../helpers"; +import { getMnemonics, LCDClients, ibcTransfer } from "../../helpers"; import fs from "fs"; import path from 'path'; import { execSync, exec } from 'child_process'; describe("Wasm Module (https://github.com/CosmWasm/wasmd/releases/tag/v0.45.0) ", () => { // Prepare environment clients, accounts and wallets - const LCD = getLCDClient(); + const LCD = LCDClients.create(); const accounts = getMnemonics(); const wallet = LCD.chain1.wallet(accounts.wasmContracts); const walletAddress = accounts.wasmContracts.accAddress("terra"); @@ -45,7 +45,7 @@ describe("Wasm Module (https://github.com/CosmWasm/wasmd/releases/tag/v0.45.0) " }); let result = await LCD.chain1.tx.broadcastSync(tx, "test-1"); - await blockInclusion(); + await LCD.blockInclusionChain1(); let txResult = await LCD.chain1.tx.txInfo(result.txhash, "test-1") as any; cw20BaseCodeId = Number(txResult.logs[0].events[1].attributes[1].value); @@ -76,7 +76,8 @@ describe("Wasm Module (https://github.com/CosmWasm/wasmd/releases/tag/v0.45.0) " chainID: "test-1", }); let result = await LCD.chain1.tx.broadcastSync(tx, "test-1"); - await blockInclusion(); + await LCD.blockInclusionChain1(); + let txResult = await LCD.chain1.tx.txInfo(result.txhash, "test-1") as any; cw20ContractAddr = txResult.logs[0].events[1].attributes[0].value; expect(cw20ContractAddr).toBeDefined(); @@ -101,7 +102,8 @@ describe("Wasm Module (https://github.com/CosmWasm/wasmd/releases/tag/v0.45.0) " chainID: "test-1", }); result = await LCD.chain1.tx.broadcastSync(tx, "test-1"); - await blockInclusion(); + await LCD.blockInclusionChain1(); + txResult = await LCD.chain1.tx.txInfo(result.txhash, "test-1") as any; ics20ContractAddr = txResult.logs[0].events[1].attributes[0].value; expect(ics20ContractAddr).toBeDefined(); @@ -119,7 +121,7 @@ describe("Wasm Module (https://github.com/CosmWasm/wasmd/releases/tag/v0.45.0) " // Create the path const pathToRelayDir = path.join(__dirname, "/../../test-data/relayer"); execSync(`relayer tx link "test1-test2" --src-port="wasm.${ics20ContractAddr}" --dst-port="transfer" --version="ics20-1" --home="${pathToRelayDir}"`, { stdio: "ignore" }) - await blockInclusion(); + await LCD.blockInclusionChain1(); // Start the relayer again const relayerStart = exec(`relayer start "test1-test2" -p="events" -b=100 --flush-interval="1s" --time-threshold="1s" --home="${pathToRelayDir}" > ${pathToRelayDir}/relayer.log 2>&1`) @@ -162,7 +164,8 @@ describe("Wasm Module (https://github.com/CosmWasm/wasmd/releases/tag/v0.45.0) " chainID: "test-1", }); let result = await LCD.chain1.tx.broadcastSync(tx, "test-1"); - await blockInclusion(); + await LCD.blockInclusionChain1(); + let txResult = await LCD.chain1.tx.txInfo(result.txhash, "test-1") as any; let events = txResult.logs[0].events; From 96439ca3841d7aefc26d69688d896d392ec39269 Mon Sep 17 00:00:00 2001 From: javiersuweijie Date: Tue, 9 Jan 2024 12:40:54 +0800 Subject: [PATCH 09/14] fix: lcd queries --- app/app.go | 5 +- app/fast_query/db/snappy/snappy_db.go | 3 +- app/fast_query/fast_query_service.go | 7 ++- app/fast_query/store/dbadapter.go | 1 + app/fast_query/store/store.go | 12 ++-- .../src/setup/init-test-framework.sh | 56 +++++++++---------- 6 files changed, 47 insertions(+), 37 deletions(-) diff --git a/app/app.go b/app/app.go index 1af0d10e..469c3906 100644 --- a/app/app.go +++ b/app/app.go @@ -262,7 +262,10 @@ func NewTerraApp( // TODO: move checking if streaming service is enabled to a helper function streamers := cast.ToStringSlice(appOpts.Get("store.streamers")) if slices.Contains(streamers, "fastquery") { - app.SetupFastQueryDB(appOpts, homePath) + err := app.SetupFastQueryDB(appOpts, homePath) + if err != nil { + panic(err) + } } // register upgrade diff --git a/app/fast_query/db/snappy/snappy_db.go b/app/fast_query/db/snappy/snappy_db.go index 222a07e3..aae4b84f 100644 --- a/app/fast_query/db/snappy/snappy_db.go +++ b/app/fast_query/db/snappy/snappy_db.go @@ -2,10 +2,11 @@ package snappy import ( "encoding/json" + "sync" + tmdb "github.com/cometbft/cometbft-db" "github.com/golang/snappy" "github.com/pkg/errors" - "sync" ) const ( diff --git a/app/fast_query/fast_query_service.go b/app/fast_query/fast_query_service.go index 47b93603..d099d822 100644 --- a/app/fast_query/fast_query_service.go +++ b/app/fast_query/fast_query_service.go @@ -4,11 +4,12 @@ import ( "fmt" log "github.com/cometbft/cometbft/libs/log" - "github.com/cosmos/cosmos-sdk/store/types" - storetypes "github.com/cosmos/cosmos-sdk/store/types" "github.com/terra-money/core/v2/app/fast_query/db/driver" "github.com/terra-money/core/v2/app/fast_query/db/height_driver" "github.com/terra-money/core/v2/app/fast_query/store" + + "github.com/cosmos/cosmos-sdk/store/types" + storetypes "github.com/cosmos/cosmos-sdk/store/types" ) type FastQueryService struct { @@ -26,7 +27,7 @@ func NewFastQueryService(homedir string, logger log.Logger, storeKeys map[string } // Create HeightDB Driver that implements optimization for reading - // and writing data in the database in paralell. + // and writing data in the database in parallel. fastQueryDb := height_driver.NewHeightDB( fastQueryDbDriver, &height_driver.HeightDBConfig{ diff --git a/app/fast_query/store/dbadapter.go b/app/fast_query/store/dbadapter.go index 3e501f1e..d260744f 100644 --- a/app/fast_query/store/dbadapter.go +++ b/app/fast_query/store/dbadapter.go @@ -2,6 +2,7 @@ package store import ( dbm "github.com/cometbft/cometbft-db" + "github.com/cosmos/cosmos-sdk/store/dbadapter" pruningtypes "github.com/cosmos/cosmos-sdk/store/pruning/types" "github.com/cosmos/cosmos-sdk/store/types" diff --git a/app/fast_query/store/store.go b/app/fast_query/store/store.go index 4837812d..1e18f137 100644 --- a/app/fast_query/store/store.go +++ b/app/fast_query/store/store.go @@ -93,7 +93,8 @@ func NewStore(db dbm.DB, hldb *height_driver.HeightDB, logger log.Logger, storeK } for _, storeKeyValue := range storeKeys { - store.MountStoreWithDB(storeKeyValue, types.StoreTypeDB, db) + // nil is set for DB so that when we loadCommitStoreFromParams, we load the db as a prefix db + store.MountStoreWithDB(storeKeyValue, types.StoreTypeDB, nil) } if err := store.LoadLatestVersion(); err != nil { @@ -1122,8 +1123,9 @@ func flushCommitInfo(batch dbm.Batch, version int64, cInfo *types.CommitInfo) { panic(err) } - cInfoKey := fmt.Sprintf(commitInfoKeyFmt, version) - batch.Set([]byte(cInfoKey), bz) + if err = batch.Set([]byte(cInfoKey), bz); err != nil { + panic(err) + } } func flushLatestVersion(batch dbm.Batch, version int64) { @@ -1132,5 +1134,7 @@ func flushLatestVersion(batch dbm.Batch, version int64) { panic(err) } - batch.Set([]byte(latestVersionKey), bz) + if err = batch.Set([]byte(latestVersionKey), bz); err != nil { + panic(err) + } } diff --git a/integration-tests/src/setup/init-test-framework.sh b/integration-tests/src/setup/init-test-framework.sh index 90460a5e..84e31497 100755 --- a/integration-tests/src/setup/init-test-framework.sh +++ b/integration-tests/src/setup/init-test-framework.sh @@ -178,36 +178,36 @@ $BINARY genesis collect-gentxs --home $CHAIN_DIR/$CHAINID_1 &> /dev/null $BINARY genesis collect-gentxs --home $CHAIN_DIR/$CHAINID_2 &> /dev/null echo "Changing defaults and ports in app.toml and config.toml files..." -sed -i -e 's#"tcp://0.0.0.0:26656"#"tcp://localhost:'"$P2PPORT_1"'"#g' $CHAIN_DIR/$CHAINID_1/config/config.toml -sed -i -e 's#"tcp://127.0.0.1:26657"#"tcp://localhost:'"$RPCPORT_1"'"#g' $CHAIN_DIR/$CHAINID_1/config/config.toml -sed -i -e 's#"tcp://localhost:26657"#"tcp://localhost:'"$RPCPORT_1"'"#g' $CHAIN_DIR/$CHAINID_1/config/client.toml -sed -i -e 's/timeout_commit = "5s"/timeout_commit = "1s"/g' $CHAIN_DIR/$CHAINID_1/config/config.toml -sed -i -e 's/timeout_propose = "3s"/timeout_propose = "1s"/g' $CHAIN_DIR/$CHAINID_1/config/config.toml -sed -i -e 's/index_all_keys = false/index_all_keys = true/g' $CHAIN_DIR/$CHAINID_1/config/config.toml -sed -i -e 's/enable = false/enable = true/g' $CHAIN_DIR/$CHAINID_1/config/app.toml -sed -i -e 's/swagger = false/swagger = true/g' $CHAIN_DIR/$CHAINID_1/config/app.toml -sed -i -e 's#"tcp://localhost:1317"#"tcp://localhost:'"$RESTPORT_1"'"#g' $CHAIN_DIR/$CHAINID_1/config/app.toml -sed -i -e 's/streamers = \[\]/streamers = \["fastquery"\]/g' $CHAIN_DIR/$CHAINID_1/config/app.toml -sed -i '/# Enable defines if the Rosetta API server should be enabled\./ {N; s/enable = true/enable = false/}' $CHAIN_DIR/$CHAINID_1/config/app.toml - - -sed -i -e 's#"tcp://0.0.0.0:26656"#"tcp://localhost:'"$P2PPORT_2"'"#g' $CHAIN_DIR/$CHAINID_2/config/config.toml -sed -i -e 's#"tcp://127.0.0.1:26657"#"tcp://localhost:'"$RPCPORT_2"'"#g' $CHAIN_DIR/$CHAINID_2/config/config.toml -sed -i -e 's#"tcp://localhost:26657"#"tcp://localhost:'"$RPCPORT_2"'"#g' $CHAIN_DIR/$CHAINID_2/config/client.toml -sed -i -e 's/timeout_commit = "5s"/timeout_commit = "1s"/g' $CHAIN_DIR/$CHAINID_2/config/config.toml -sed -i -e 's/timeout_propose = "3s"/timeout_propose = "1s"/g' $CHAIN_DIR/$CHAINID_2/config/config.toml -sed -i -e 's/index_all_keys = false/index_all_keys = true/g' $CHAIN_DIR/$CHAINID_2/config/config.toml -sed -i -e 's/enable = false/enable = true/g' $CHAIN_DIR/$CHAINID_2/config/app.toml -sed -i -e 's/swagger = false/swagger = true/g' $CHAIN_DIR/$CHAINID_2/config/app.toml -sed -i -e 's#"tcp://localhost:1317"#"tcp://localhost:'"$RESTPORT_2"'"#g' $CHAIN_DIR/$CHAINID_2/config/app.toml -sed -i -e 's/streamers = \[\]/streamers = \["fastquery"\]/g' $CHAIN_DIR/$CHAINID_2/config/app.toml -sed -i '/# Enable defines if the Rosetta API server should be enabled\./ {N; s/enable = true/enable = false/}' $CHAIN_DIR/$CHAINID_2/config/app.toml +sed -i '' -e 's#"tcp://0.0.0.0:26656"#"tcp://localhost:'"$P2PPORT_1"'"#g' $CHAIN_DIR/$CHAINID_1/config/config.toml +sed -i '' -e 's#"tcp://127.0.0.1:26657"#"tcp://localhost:'"$RPCPORT_1"'"#g' $CHAIN_DIR/$CHAINID_1/config/config.toml +sed -i '' -e 's#"tcp://localhost:26657"#"tcp://localhost:'"$RPCPORT_1"'"#g' $CHAIN_DIR/$CHAINID_1/config/client.toml +sed -i '' -e 's/timeout_commit = "5s"/timeout_commit = "1s"/g' $CHAIN_DIR/$CHAINID_1/config/config.toml +sed -i '' -e 's/timeout_propose = "3s"/timeout_propose = "1s"/g' $CHAIN_DIR/$CHAINID_1/config/config.toml +sed -i '' -e 's/index_all_keys = false/index_all_keys = true/g' $CHAIN_DIR/$CHAINID_1/config/config.toml +sed -i '' -e 's/enable = false/enable = true/g' $CHAIN_DIR/$CHAINID_1/config/app.toml +sed -i '' -e 's/swagger = false/swagger = true/g' $CHAIN_DIR/$CHAINID_1/config/app.toml +sed -i '' -e 's#"tcp://localhost:1317"#"tcp://localhost:'"$RESTPORT_1"'"#g' $CHAIN_DIR/$CHAINID_1/config/app.toml +sed -i '' -e 's/streamers = \[\]/streamers = \["fastquery"\]/g' $CHAIN_DIR/$CHAINID_1/config/app.toml +sed -i '' -e '/\[rosetta\]/,/enable = true/ s/enable = true/enable = false/' $CHAIN_DIR/$CHAINID_1/config/app.toml + + +sed -i '' -e 's#"tcp://0.0.0.0:26656"#"tcp://localhost:'"$P2PPORT_2"'"#g' $CHAIN_DIR/$CHAINID_2/config/config.toml +sed -i '' -e 's#"tcp://127.0.0.1:26657"#"tcp://localhost:'"$RPCPORT_2"'"#g' $CHAIN_DIR/$CHAINID_2/config/config.toml +sed -i '' -e 's#"tcp://localhost:26657"#"tcp://localhost:'"$RPCPORT_2"'"#g' $CHAIN_DIR/$CHAINID_2/config/client.toml +sed -i '' -e 's/timeout_commit = "5s"/timeout_commit = "1s"/g' $CHAIN_DIR/$CHAINID_2/config/config.toml +sed -i '' -e 's/timeout_propose = "3s"/timeout_propose = "1s"/g' $CHAIN_DIR/$CHAINID_2/config/config.toml +sed -i '' -e 's/index_all_keys = false/index_all_keys = true/g' $CHAIN_DIR/$CHAINID_2/config/config.toml +sed -i '' -e 's/enable = false/enable = true/g' $CHAIN_DIR/$CHAINID_2/config/app.toml +sed -i '' -e 's/swagger = false/swagger = true/g' $CHAIN_DIR/$CHAINID_2/config/app.toml +sed -i '' -e 's#"tcp://localhost:1317"#"tcp://localhost:'"$RESTPORT_2"'"#g' $CHAIN_DIR/$CHAINID_2/config/app.toml +sed -i '' -e 's/streamers = \[\]/streamers = \["fastquery"\]/g' $CHAIN_DIR/$CHAINID_2/config/app.toml +sed -i '' -e '/\[rosetta\]/,/enable = true/ s/enable = true/enable = false/' $CHAIN_DIR/$CHAINID_2/config/app.toml echo "Chaning genesis.json..." -sed -i -e 's/"voting_period": "172800s"/"voting_period": "2s"/g' $CHAIN_DIR/$CHAINID_1/config/genesis.json -sed -i -e 's/"voting_period": "172800s"/"voting_period": "2s"/g' $CHAIN_DIR/$CHAINID_2/config/genesis.json -sed -i -e 's/"reward_delay_time": "604800s"/"reward_delay_time": "0s"/g' $CHAIN_DIR/$CHAINID_1/config/genesis.json -sed -i -e 's/"reward_delay_time": "604800s"/"reward_delay_time": "0s"/g' $CHAIN_DIR/$CHAINID_2/config/genesis.json +sed -i '' -e 's/"voting_period": "172800s"/"voting_period": "2s"/g' $CHAIN_DIR/$CHAINID_1/config/genesis.json +sed -i '' -e 's/"voting_period": "172800s"/"voting_period": "2s"/g' $CHAIN_DIR/$CHAINID_2/config/genesis.json +sed -i '' -e 's/"reward_delay_time": "604800s"/"reward_delay_time": "0s"/g' $CHAIN_DIR/$CHAINID_1/config/genesis.json +sed -i '' -e 's/"reward_delay_time": "604800s"/"reward_delay_time": "0s"/g' $CHAIN_DIR/$CHAINID_2/config/genesis.json exit From 42d87ad88c790392ae5016f4866162e409f609c7 Mon Sep 17 00:00:00 2001 From: javiersuweijie Date: Tue, 9 Jan 2024 12:49:26 +0800 Subject: [PATCH 10/14] fix: accidental delete --- app/fast_query/store/store.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/app/fast_query/store/store.go b/app/fast_query/store/store.go index 1e18f137..afa3dc98 100644 --- a/app/fast_query/store/store.go +++ b/app/fast_query/store/store.go @@ -1123,7 +1123,8 @@ func flushCommitInfo(batch dbm.Batch, version int64, cInfo *types.CommitInfo) { panic(err) } - if err = batch.Set([]byte(cInfoKey), bz); err != nil { + cInfoKey := fmt.Sprintf(commitInfoKeyFmt, version) + if err != batch.Set([]byte(cInfoKey), bz) { panic(err) } } From 2ffbea9ae71840e5557019424c8e74ecb53c72ba Mon Sep 17 00:00:00 2001 From: javiersuweijie Date: Mon, 15 Jan 2024 11:23:39 +0800 Subject: [PATCH 11/14] fix: lint + remove log --- app/app.go | 5 +++++ app/encoding_test.go | 7 ++++--- app/fast_query/db/driver/batch.go | 1 - app/fast_query/fast_query_service.go | 8 +++++--- app/genesis_test.go | 4 +++- app/post/mocks/post_mock.go | 3 ++- app/simulation_test.go | 1 + cmd/terrad/config.go | 1 + x/feeshare/post/post_test.go | 22 +++++++++++----------- 9 files changed, 32 insertions(+), 20 deletions(-) diff --git a/app/app.go b/app/app.go index 469c3906..3412bde0 100644 --- a/app/app.go +++ b/app/app.go @@ -609,5 +609,10 @@ func (app *TerraApp) SetupFastQueryDB(appOpts servertypes.AppOptions, homePath s app.SetStreamingService(streamingservice) app.SetQueryMultiStore(fastQueryService.Store) + realBlockHeight := app.LastBlockHeight() + if fastQueryService.Store.LatestVersion() != realBlockHeight { + panic(fmt.Sprintf("fastquerydb version is not equal to the real block height. fastquerydb version: %d, real block height: %d", fastQueryService.Store.LatestVersion(), realBlockHeight)) + } + return nil } diff --git a/app/encoding_test.go b/app/encoding_test.go index 092ff63e..0ebd85dc 100644 --- a/app/encoding_test.go +++ b/app/encoding_test.go @@ -1,11 +1,12 @@ package app_test import ( + "github.com/terra-money/core/v2/app" + "github.com/terra-money/core/v2/app/test_helpers" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" paramstypes "github.com/cosmos/cosmos-sdk/x/params/types" - "github.com/terra-money/core/v2/app" - "github.com/terra-money/core/v2/app/test_helpers" ) type AppCodecsTestSuite struct { @@ -16,7 +17,7 @@ func (acts *AppCodecsTestSuite) TestCodecs() { // Setting up the app acts.Setup() - // generating the encoding config to assert agains + // generating the encoding config to assert against encCfg := app.MakeEncodingConfig() // Validate the encoding config have been configured as expected for the App diff --git a/app/fast_query/db/driver/batch.go b/app/fast_query/db/driver/batch.go index e422f57c..02be9a80 100644 --- a/app/fast_query/db/driver/batch.go +++ b/app/fast_query/db/driver/batch.go @@ -72,7 +72,6 @@ func (b *DriverBatch) Close() error { } func (b *DriverBatch) RollbackBatch() tmdb.Batch { - b.Metric() return b.batch.RollbackBatch } diff --git a/app/fast_query/fast_query_service.go b/app/fast_query/fast_query_service.go index d099d822..e2369128 100644 --- a/app/fast_query/fast_query_service.go +++ b/app/fast_query/fast_query_service.go @@ -51,15 +51,17 @@ func NewFastQueryService(homedir string, logger log.Logger, storeKeys map[string } func (fqs *FastQueryService) CommitChanges(blockHeight int64, changeSet []types.StoreKVPair) error { - fqs.logger.Info("CommitChanges", "blockHeight", blockHeight, "changeSet", changeSet) - + fqs.logger.Debug("CommitChanges", "blockHeight", blockHeight, "changeSet", changeSet) + if blockHeight-fqs.Store.LatestVersion() != 1 { + fmt.Println(fmt.Sprintf("invalid block height: %s vs %s", blockHeight, fqs.Store.LatestVersion())) + panic("") + } fqs.fastQueryDb.SetWriteHeight(blockHeight) fqs.safeBatchDBCloser.Open() for _, change := range changeSet { storeKey := storetypes.NewKVStoreKey(change.StoreKey) commitKVStore := fqs.Store.GetStoreByName(storeKey.Name()).(types.CommitKVStore) - fmt.Print(commitKVStore) if change.Delete { commitKVStore.Delete(change.Key) } else { diff --git a/app/genesis_test.go b/app/genesis_test.go index 9ec8bc3a..33c245ad 100644 --- a/app/genesis_test.go +++ b/app/genesis_test.go @@ -10,9 +10,10 @@ import ( abci "github.com/cometbft/cometbft/abci/types" "github.com/cometbft/cometbft/libs/log" + "github.com/terra-money/core/v2/app/test_helpers" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" - "github.com/terra-money/core/v2/app/test_helpers" ibcfee "github.com/cosmos/ibc-go/v7/modules/apps/29-fee" "github.com/golang/mock/gomock" @@ -55,6 +56,7 @@ import ( "github.com/CosmWasm/wasmd/x/wasm" wasmtypes "github.com/CosmWasm/wasmd/x/wasm/types" tmtypes "github.com/cometbft/cometbft/types" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" diff --git a/app/post/mocks/post_mock.go b/app/post/mocks/post_mock.go index c2de3e16..1c817f56 100644 --- a/app/post/mocks/post_mock.go +++ b/app/post/mocks/post_mock.go @@ -11,8 +11,9 @@ package mocks import ( reflect "reflect" - types "github.com/cosmos/cosmos-sdk/types" gomock "go.uber.org/mock/gomock" + + types "github.com/cosmos/cosmos-sdk/types" ) // MockAnteDecorator is a mock of AnteDecorator interface. diff --git a/app/simulation_test.go b/app/simulation_test.go index 0a1906d2..8f9e6e9a 100644 --- a/app/simulation_test.go +++ b/app/simulation_test.go @@ -12,6 +12,7 @@ import ( "github.com/terra-money/core/v2/app/keepers" wasmtypes "github.com/CosmWasm/wasmd/x/wasm/types" + simtestutil "github.com/cosmos/cosmos-sdk/testutil/sims" simulationtypes "github.com/cosmos/cosmos-sdk/types/simulation" "github.com/cosmos/cosmos-sdk/x/simulation" diff --git a/cmd/terrad/config.go b/cmd/terrad/config.go index 3f1a6a22..422d5d7d 100644 --- a/cmd/terrad/config.go +++ b/cmd/terrad/config.go @@ -2,6 +2,7 @@ package main import ( wasmtypes "github.com/CosmWasm/wasmd/x/wasm/types" + serverconfig "github.com/cosmos/cosmos-sdk/server/config" ) diff --git a/x/feeshare/post/post_test.go b/x/feeshare/post/post_test.go index 065c457e..c15c58c7 100644 --- a/x/feeshare/post/post_test.go +++ b/x/feeshare/post/post_test.go @@ -200,7 +200,7 @@ func (suite *AnteTestSuite) TestCalculateFee() { func (suite *AnteTestSuite) TestPostHandler() { suite.Setup() - // Create a mocked next post hanlder to assert the function being called. + // Create a mocked next post handler to assert the function being called. ctrl := gomock.NewController(suite.T()) mockedPostDecorator := mocks.NewMockPostDecorator(ctrl) @@ -234,7 +234,7 @@ func (suite *AnteTestSuite) TestPostHandler() { // Remove all events from the context to assert the events being added correctly. suite.Ctx = suite.Ctx.WithEventManager(sdk.NewEventManager()) - // Assert the next hanlder is called once + // Assert the next handler is called once mockedPostDecorator. EXPECT(). PostHandle(gomock.Any(), gomock.Any(), false, true, gomock.Any()). @@ -294,7 +294,7 @@ func (suite *AnteTestSuite) TestPostHandler() { func (suite *AnteTestSuite) TestDisabledPostHandle() { suite.Setup() - // Create a mocked next post hanlder to assert the function being called. + // Create a mocked next post handler to assert the function being called. ctrl := gomock.NewController(suite.T()) mockedPostDecorator := mocks.NewMockPostDecorator(ctrl) @@ -319,7 +319,7 @@ func (suite *AnteTestSuite) TestDisabledPostHandle() { suite.App.Keepers.WasmKeeper, ) - // Assert the next hanlder is called once + // Assert the next handler is called once mockedPostDecorator. EXPECT(). PostHandle(gomock.Any(), gomock.Any(), false, true, gomock.Any()). @@ -341,7 +341,7 @@ func (suite *AnteTestSuite) TestDisabledPostHandle() { func (suite *AnteTestSuite) TestWithZeroFeesPostHandle() { suite.Setup() - // Create a mocked next post hanlder to assert the function being called. + // Create a mocked next post handler to assert the function being called. ctrl := gomock.NewController(suite.T()) mockedPostDecorator := mocks.NewMockPostDecorator(ctrl) @@ -355,7 +355,7 @@ func (suite *AnteTestSuite) TestWithZeroFeesPostHandle() { suite.App.Keepers.WasmKeeper, ) - // Assert the next hanlder is called once + // Assert the next handler is called once mockedPostDecorator. EXPECT(). PostHandle(gomock.Any(), gomock.Any(), false, true, gomock.Any()). @@ -377,7 +377,7 @@ func (suite *AnteTestSuite) TestWithZeroFeesPostHandle() { func (suite *AnteTestSuite) TestPostHandlerWithEmptySmartContractStore() { suite.Setup() - // Create a mocked next post hanlder to assert the function being called. + // Create a mocked next post handler to assert the function being called. ctrl := gomock.NewController(suite.T()) mockedPostDecorator := mocks.NewMockPostDecorator(ctrl) @@ -405,7 +405,7 @@ func (suite *AnteTestSuite) TestPostHandlerWithEmptySmartContractStore() { suite.App.Keepers.WasmKeeper, ) - // Assert the next hanlder is called once + // Assert the next handler is called once mockedPostDecorator. EXPECT(). PostHandle(gomock.Any(), gomock.Any(), false, true, gomock.Any()). @@ -427,7 +427,7 @@ func (suite *AnteTestSuite) TestPostHandlerWithEmptySmartContractStore() { func (suite *AnteTestSuite) TestPostHandlerNoSmartContractExecuted() { suite.Setup() - // Create a mocked next post hanlder to assert the function being called. + // Create a mocked next post handler to assert the function being called. ctrl := gomock.NewController(suite.T()) mockedPostDecorator := mocks.NewMockPostDecorator(ctrl) @@ -459,7 +459,7 @@ func (suite *AnteTestSuite) TestPostHandlerNoSmartContractExecuted() { suite.App.Keepers.WasmKeeper, ) - // Assert the next hanlder is called once + // Assert the next handler is called once mockedPostDecorator. EXPECT(). PostHandle(gomock.Any(), gomock.Any(), false, true, gomock.Any()). @@ -481,7 +481,7 @@ func (suite *AnteTestSuite) TestPostHandlerNoSmartContractExecuted() { func (suite *AnteTestSuite) TestPostHandlerWithInvalidContractAddrOnExecution() { suite.Setup() - // Create a mocked next post hanlder to assert the function being called. + // Create a mocked next post handler to assert the function being called. ctrl := gomock.NewController(suite.T()) mockedPostDecorator := mocks.NewMockPostDecorator(ctrl) From 4d466e9ff506d44fdebbc47f24dda631c9b98a90 Mon Sep 17 00:00:00 2001 From: javiersuweijie Date: Mon, 15 Jan 2024 12:07:26 +0800 Subject: [PATCH 12/14] fix: tests --- integration-tests/src/setup/init-test-framework.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/integration-tests/src/setup/init-test-framework.sh b/integration-tests/src/setup/init-test-framework.sh index 84e31497..3aa45922 100755 --- a/integration-tests/src/setup/init-test-framework.sh +++ b/integration-tests/src/setup/init-test-framework.sh @@ -209,8 +209,6 @@ sed -i '' -e 's/"voting_period": "172800s"/"voting_period": "2s"/g' $CHAIN_DIR/$ sed -i '' -e 's/"reward_delay_time": "604800s"/"reward_delay_time": "0s"/g' $CHAIN_DIR/$CHAINID_1/config/genesis.json sed -i '' -e 's/"reward_delay_time": "604800s"/"reward_delay_time": "0s"/g' $CHAIN_DIR/$CHAINID_2/config/genesis.json -exit - echo "Starting $CHAINID_1 in $CHAIN_DIR..." echo "Creating log file at $CHAIN_DIR/$CHAINID_1.log" $BINARY start --log_level trace --log_format json --home $CHAIN_DIR/$CHAINID_1 --pruning=nothing --grpc.address="0.0.0.0:$GRPCPORT_1" --grpc-web.address="0.0.0.0:$GRPCWEB_1" > $CHAIN_DIR/$CHAINID_1.log 2>&1 & From 18fab94951dbd113b21fe58132b363fd41860d90 Mon Sep 17 00:00:00 2001 From: javiersuweijie Date: Mon, 15 Jan 2024 12:19:00 +0800 Subject: [PATCH 13/14] fix: lint --- app/app.go | 1 + app/fast_query/fast_query_service.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/app/app.go b/app/app.go index 2f14db3f..7f342cf1 100644 --- a/app/app.go +++ b/app/app.go @@ -2,6 +2,7 @@ package app import ( "encoding/json" + "fmt" "io" "net/http" "os" diff --git a/app/fast_query/fast_query_service.go b/app/fast_query/fast_query_service.go index e2369128..e4afe34c 100644 --- a/app/fast_query/fast_query_service.go +++ b/app/fast_query/fast_query_service.go @@ -53,7 +53,7 @@ func NewFastQueryService(homedir string, logger log.Logger, storeKeys map[string func (fqs *FastQueryService) CommitChanges(blockHeight int64, changeSet []types.StoreKVPair) error { fqs.logger.Debug("CommitChanges", "blockHeight", blockHeight, "changeSet", changeSet) if blockHeight-fqs.Store.LatestVersion() != 1 { - fmt.Println(fmt.Sprintf("invalid block height: %s vs %s", blockHeight, fqs.Store.LatestVersion())) + fmt.Println(fmt.Sprintf("invalid block height: %d vs %d", blockHeight, fqs.Store.LatestVersion())) panic("") } fqs.fastQueryDb.SetWriteHeight(blockHeight) From 6599920a867cc8fc1bf16dda567537fdc7f7e4ad Mon Sep 17 00:00:00 2001 From: javiersuweijie Date: Tue, 16 Jan 2024 12:20:42 +0800 Subject: [PATCH 14/14] refactor: removed unused snappy DB --- app/fast_query/db/snappy/snappy_batch.go | 38 ------- app/fast_query/db/snappy/snappy_db.go | 116 --------------------- app/fast_query/db/snappy/snappy_db_test.go | 93 ----------------- 3 files changed, 247 deletions(-) delete mode 100644 app/fast_query/db/snappy/snappy_batch.go delete mode 100644 app/fast_query/db/snappy/snappy_db.go delete mode 100644 app/fast_query/db/snappy/snappy_db_test.go diff --git a/app/fast_query/db/snappy/snappy_batch.go b/app/fast_query/db/snappy/snappy_batch.go deleted file mode 100644 index a7bbf25f..00000000 --- a/app/fast_query/db/snappy/snappy_batch.go +++ /dev/null @@ -1,38 +0,0 @@ -package snappy - -import ( - mdb "github.com/cometbft/cometbft-db" - "github.com/golang/snappy" -) - -var _ mdb.Batch = (*SnappyBatch)(nil) - -type SnappyBatch struct { - batch mdb.Batch -} - -func NewSnappyBatch(batch mdb.Batch) *SnappyBatch { - return &SnappyBatch{ - batch: batch, - } -} - -func (s *SnappyBatch) Set(key, value []byte) error { - return s.batch.Set(key, snappy.Encode(nil, value)) -} - -func (s *SnappyBatch) Delete(key []byte) error { - return s.batch.Delete(key) -} - -func (s *SnappyBatch) Write() error { - return s.batch.Write() -} - -func (s *SnappyBatch) WriteSync() error { - return s.batch.WriteSync() -} - -func (s *SnappyBatch) Close() error { - return s.batch.Close() -} diff --git a/app/fast_query/db/snappy/snappy_db.go b/app/fast_query/db/snappy/snappy_db.go deleted file mode 100644 index aae4b84f..00000000 --- a/app/fast_query/db/snappy/snappy_db.go +++ /dev/null @@ -1,116 +0,0 @@ -package snappy - -import ( - "encoding/json" - "sync" - - tmdb "github.com/cometbft/cometbft-db" - "github.com/golang/snappy" - "github.com/pkg/errors" -) - -const ( - CompatModeEnabled = iota - CompatModeDisabled -) - -var ( - errIteratorNotSupported = errors.New("iterator unsupported") - errUnknownData = errors.New("unknown format") -) - -var _ tmdb.DB = (*SnappyDB)(nil) - -// SnappyDB implements a tmdb.DB overlay with snappy compression/decompression -// Iterator is NOT supported -- main purpose of this library is to support indexer.db, -// which never makes use of iterators anyway -// NOTE: implement when needed -// NOTE2: monitor mem pressure, optimize by pre-allocating dst buf when there is bottleneck -type SnappyDB struct { - db tmdb.DB - mtx *sync.Mutex - compatMode int -} - -func NewSnappyDB(db tmdb.DB, compatMode int) *SnappyDB { - return &SnappyDB{ - mtx: new(sync.Mutex), - db: db, - compatMode: compatMode, - } -} - -func (s *SnappyDB) Get(key []byte) ([]byte, error) { - if item, err := s.db.Get(key); err != nil { - return nil, err - } else if item == nil && err == nil { - return nil, nil - } else { - decoded, decodeErr := snappy.Decode(nil, item) - - // if snappy decode fails, try to replace the underlying - // only recover & replace when the blob is a valid json - if s.compatMode == CompatModeEnabled { - if decodeErr != nil { - if json.Valid(item) { - s.mtx.Lock() - // run item by Set() to encode & replace - _ = s.db.Set(key, item) - defer s.mtx.Unlock() - - return item, nil - } else { - return nil, errUnknownData - } - } else { - return decoded, nil - } - } - - return decoded, decodeErr - } -} - -func (s *SnappyDB) Has(key []byte) (bool, error) { - return s.db.Has(key) -} - -func (s *SnappyDB) Set(key []byte, value []byte) error { - return s.db.Set(key, snappy.Encode(nil, value)) -} - -func (s *SnappyDB) SetSync(key []byte, value []byte) error { - return s.Set(key, value) -} - -func (s *SnappyDB) Delete(key []byte) error { - return s.db.Delete(key) -} - -func (s *SnappyDB) DeleteSync(key []byte) error { - return s.Delete(key) -} - -func (s *SnappyDB) Iterator(start, end []byte) (tmdb.Iterator, error) { - return nil, errIteratorNotSupported -} - -func (s *SnappyDB) ReverseIterator(start, end []byte) (tmdb.Iterator, error) { - return nil, errIteratorNotSupported -} - -func (s *SnappyDB) Close() error { - return s.db.Close() -} - -func (s *SnappyDB) NewBatch() tmdb.Batch { - return NewSnappyBatch(s.db.NewBatch()) -} - -func (s *SnappyDB) Print() error { - return s.db.Print() -} - -func (s *SnappyDB) Stats() map[string]string { - return s.db.Stats() -} diff --git a/app/fast_query/db/snappy/snappy_db_test.go b/app/fast_query/db/snappy/snappy_db_test.go deleted file mode 100644 index 4d5a7a36..00000000 --- a/app/fast_query/db/snappy/snappy_db_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package snappy - -import ( - "io/ioutil" - "os" - "testing" - - db "github.com/cometbft/cometbft-db" - tmjson "github.com/cometbft/cometbft/libs/json" - cometbfttypes "github.com/cometbft/cometbft/types" - "github.com/stretchr/testify/assert" -) - -func TestSnappyDB(t *testing.T) { - snappy := NewSnappyDB(db.NewMemDB(), CompatModeEnabled) - - assert.Nil(t, snappy.Set([]byte("test"), []byte("testValue"))) - - var v []byte - var err error - - // nil buffer test - v, err = snappy.Get([]byte("non-existing")) - assert.Nil(t, v) - assert.Nil(t, err) - - v, err = snappy.Get([]byte("test")) - assert.Nil(t, err) - assert.Equal(t, []byte("testValue"), v) - - assert.Nil(t, snappy.Delete([]byte("test"))) - v, err = snappy.Get([]byte("test")) - assert.Nil(t, v) - assert.Nil(t, err) - - // iterator is not supported - var it db.Iterator - it, err = snappy.Iterator([]byte("start"), []byte("end")) - assert.Nil(t, it) - assert.Equal(t, errIteratorNotSupported, err) - - it, err = snappy.ReverseIterator([]byte("start"), []byte("end")) - assert.Nil(t, it) - assert.Equal(t, errIteratorNotSupported, err) - - // batched store is compressed as well - var batch db.Batch - batch = snappy.NewBatch() - - assert.Nil(t, batch.Set([]byte("key"), []byte("batchedValue"))) - assert.Nil(t, batch.Write()) - assert.Nil(t, batch.Close()) - - v, err = snappy.Get([]byte("key")) - assert.Equal(t, []byte("batchedValue"), v) - - batch = snappy.NewBatch() - assert.Nil(t, batch.Delete([]byte("key"))) - assert.Nil(t, batch.Write()) - assert.Nil(t, batch.Close()) - - v, err = snappy.Get([]byte("key")) - assert.Nil(t, v) - assert.Nil(t, err) -} - -func TestSnappyDBCompat(t *testing.T) { - mdb := db.NewMemDB() - testKey := []byte("testKey") - - nocompat := NewSnappyDB(mdb, CompatModeDisabled) - indexSampleTx(nocompat, testKey) - - nocompatResult, _ := nocompat.Get(testKey) - - compat := NewSnappyDB(mdb, CompatModeEnabled) - compatResult, _ := compat.Get(testKey) - assert.Equal(t, nocompatResult, compatResult) - - nocompatResult2, _ := nocompat.Get(testKey) - assert.Equal(t, compatResult, nocompatResult2) -} - -func indexSampleTx(mdb db.DB, key []byte) { - block := &cometbfttypes.Block{} - blockFile, _ := os.Open("../../indexer/fixtures/block_4814775.json") - blockJSON, _ := ioutil.ReadAll(blockFile) - if err := tmjson.Unmarshal(blockJSON, block); err != nil { - panic(err) - } - - _ = mdb.Set(key, blockJSON) -}