-
Notifications
You must be signed in to change notification settings - Fork 0
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
feat(op-batcher): altda->ethda failover #13
base: feat--multiframe-altda-channel
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,74 @@ | ||
package altda | ||
|
||
import ( | ||
"math/big" | ||
"testing" | ||
|
||
op_e2e "github.com/ethereum-optimism/optimism/op-e2e" | ||
"github.com/ethereum-optimism/optimism/op-node/rollup/derive" | ||
"github.com/ethereum/go-ethereum/log" | ||
|
||
"github.com/ethereum-optimism/optimism/op-batcher/flags" | ||
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth" | ||
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/transactions" | ||
"github.com/ethereum-optimism/optimism/op-e2e/system/e2esys" | ||
"github.com/stretchr/testify/require" | ||
) | ||
|
||
// TestBatcher_FailoverToEthDA_FallbackToAltDA tests that the batcher will failover to ethDA | ||
// if the da-server returns 503, and then fallback to altDA once altDA is available again | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. we always try altda first for every dispersal, then retry for non-altDA after sufficient retry. Wording seems odd. |
||
// (i.e. the da-server doesn't return 503 anymore). | ||
func TestBatcher_FailoverToEthDA_FallbackToAltDA(t *testing.T) { | ||
op_e2e.InitParallel(t) | ||
|
||
nChannelsFailover := uint64(2) | ||
|
||
cfg := e2esys.DefaultSystemConfig(t, e2esys.WithLogLevel(log.LevelCrit)) | ||
cfg.DeployConfig.UseAltDA = true | ||
// With these settings, the batcher will post a single commitment per L1 block, | ||
// so it's easy to trigger failover and observe the commitment changing on the next L1 block. | ||
cfg.BatcherMaxPendingTransactions = 1 // no limit on parallel txs | ||
cfg.BatcherMaxConcurrentDARequest = 1 | ||
cfg.BatcherBatchType = 0 | ||
// We make channels as small as possible, such that they contain a single commitment. | ||
// This is because failover to ethDA happens on a per-channel basis (each new channel is sent to altDA first). | ||
// Hence, we can quickly observe the failover (to ethda) and fallback (to altda) behavior. | ||
// cfg.BatcherMaxL1TxSizeBytes = 1200 | ||
// currently altda commitments can only be sent as calldata | ||
cfg.DataAvailabilityType = flags.CalldataType | ||
|
||
sys, err := cfg.Start(t) | ||
require.NoError(t, err, "Error starting up system") | ||
defer sys.Close() | ||
l1Client := sys.NodeClient("l1") | ||
|
||
startBlockL1, err := geth.WaitForBlockWithTxFromSender(cfg.DeployConfig.BatchSenderAddress, l1Client, 10) | ||
require.NoError(t, err) | ||
|
||
// Simulate altda server returning 503 | ||
sys.FakeAltDAServer.SetPutFailoverForNRequests(nChannelsFailover) | ||
|
||
countEthDACommitment := uint64(0) | ||
|
||
// Most likely, sequence of blocks will be: altDA, ethDA, ethDA, altDA, altDA, altDA. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. why most likely? And why two ethDA in a row, because we set failoverCount=2, but why altDA in the beginning? |
||
for blockNumL1 := startBlockL1.NumberU64(); blockNumL1 < startBlockL1.NumberU64()+6; blockNumL1++ { | ||
blockL1, err := geth.WaitForBlock(big.NewInt(0).SetUint64(blockNumL1), l1Client) | ||
require.NoError(t, err) | ||
batcherTxs, err := transactions.TransactionsBySender(blockL1, cfg.DeployConfig.BatchSenderAddress) | ||
require.NoError(t, err) | ||
require.Equal(t, 1, len(batcherTxs)) // sanity check: ensure BatcherMaxPendingTransactions=1 is working | ||
batcherTx := batcherTxs[0] | ||
if batcherTx.Data()[0] == 1 { | ||
t.Log("blockL1", blockNumL1, "batcherTxType", "altda") | ||
} else if batcherTx.Data()[0] == 0 { | ||
t.Log("blockL1", blockNumL1, "batcherTxType", "ethda") | ||
} else { | ||
t.Fatalf("unexpected batcherTxType: %v", batcherTx.Data()[0]) | ||
} | ||
if batcherTx.Data()[0] == byte(derive.DerivationVersion0) { | ||
countEthDACommitment++ | ||
} | ||
} | ||
require.Equal(t, nChannelsFailover, countEthDACommitment, "Expected %v ethDA commitments, got %v", nChannelsFailover, countEthDACommitment) | ||
|
||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
what is the point to decrement failoverCount, then actually handle the put
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Is it just to simplify testing?